From 099f53cb50e45ef617a9f1d63ceec799e489418b Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Wed, 8 Apr 2009 14:28:37 -0700 Subject: async_tx: rename zero_sum to val 'zero_sum' does not properly describe the operation of generating parity and checking that it validates against an existing buffer. Change the name of the operation to 'val' (for 'validate'). This is in anticipation of the p+q case where it is a requirement to identify the target parity buffers separately from the source buffers, because the target parity buffers will not have corresponding pq coefficients. Reviewed-by: Andre Noll Acked-by: Maciej Sosnowski Signed-off-by: Dan Williams --- include/linux/async_tx.h | 2 +- include/linux/dmaengine.h | 8 ++++---- 2 files changed, 5 insertions(+), 5 deletions(-) (limited to 'include') diff --git a/include/linux/async_tx.h b/include/linux/async_tx.h index 5fc2ef8d97f..513150d8c25 100644 --- a/include/linux/async_tx.h +++ b/include/linux/async_tx.h @@ -117,7 +117,7 @@ async_xor(struct page *dest, struct page **src_list, unsigned int offset, dma_async_tx_callback cb_fn, void *cb_fn_param); struct dma_async_tx_descriptor * -async_xor_zero_sum(struct page *dest, struct page **src_list, +async_xor_val(struct page *dest, struct page **src_list, unsigned int offset, int src_cnt, size_t len, u32 *result, enum async_tx_flags flags, struct dma_async_tx_descriptor *depend_tx, diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h index 2e2aa3df170..6768727d00d 100644 --- a/include/linux/dmaengine.h +++ b/include/linux/dmaengine.h @@ -55,8 +55,8 @@ enum dma_transaction_type { DMA_PQ_XOR, DMA_DUAL_XOR, DMA_PQ_UPDATE, - DMA_ZERO_SUM, - DMA_PQ_ZERO_SUM, + DMA_XOR_VAL, + DMA_PQ_VAL, DMA_MEMSET, DMA_MEMCPY_CRC32C, DMA_INTERRUPT, @@ -214,7 +214,7 @@ struct dma_async_tx_descriptor { * @device_free_chan_resources: release DMA channel's resources * @device_prep_dma_memcpy: prepares a memcpy operation * @device_prep_dma_xor: prepares a xor operation - * @device_prep_dma_zero_sum: prepares a zero_sum operation + * @device_prep_dma_xor_val: prepares a xor validation operation * @device_prep_dma_memset: prepares a memset operation * @device_prep_dma_interrupt: prepares an end of chain interrupt operation * @device_prep_slave_sg: prepares a slave dma operation @@ -243,7 +243,7 @@ struct dma_device { struct dma_async_tx_descriptor *(*device_prep_dma_xor)( struct dma_chan *chan, dma_addr_t dest, dma_addr_t *src, unsigned int src_cnt, size_t len, unsigned long flags); - struct dma_async_tx_descriptor *(*device_prep_dma_zero_sum)( + struct dma_async_tx_descriptor *(*device_prep_dma_xor_val)( struct dma_chan *chan, dma_addr_t *src, unsigned int src_cnt, size_t len, u32 *result, unsigned long flags); struct dma_async_tx_descriptor *(*device_prep_dma_memset)( -- cgit v1.2.3-70-g09d2 From 88ba2aa586c874681c072101287e15d40de7e6e2 Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Thu, 9 Apr 2009 16:16:18 -0700 Subject: async_tx: kill ASYNC_TX_DEP_ACK flag In support of inter-channel chaining async_tx utilizes an ack flag to gate whether a dependent operation can be chained to another. While the flag is not set the chain can be considered open for appending. Setting the ack flag closes the chain and flags the descriptor for garbage collection. The ASYNC_TX_DEP_ACK flag essentially means "close the chain after adding this dependency". Since each operation can only have one child the api now implicitly sets the ack flag at dependency submission time. This removes an unnecessary management burden from clients of the api. [ Impact: clean up and enforce one dependency per operation ] Reviewed-by: Andre Noll Acked-by: Maciej Sosnowski Signed-off-by: Dan Williams --- Documentation/crypto/async-tx-api.txt | 9 ++++----- crypto/async_tx/async_memcpy.c | 2 +- crypto/async_tx/async_memset.c | 2 +- crypto/async_tx/async_tx.c | 4 ++-- crypto/async_tx/async_xor.c | 6 ++---- drivers/md/raid5.c | 25 +++++++++++-------------- include/linux/async_tx.h | 4 +--- 7 files changed, 22 insertions(+), 30 deletions(-) (limited to 'include') diff --git a/Documentation/crypto/async-tx-api.txt b/Documentation/crypto/async-tx-api.txt index 4af12180d19..76feda8541d 100644 --- a/Documentation/crypto/async-tx-api.txt +++ b/Documentation/crypto/async-tx-api.txt @@ -80,8 +80,8 @@ acknowledged by the application before the offload engine driver is allowed to recycle (or free) the descriptor. A descriptor can be acked by one of the following methods: 1/ setting the ASYNC_TX_ACK flag if no child operations are to be submitted -2/ setting the ASYNC_TX_DEP_ACK flag to acknowledge the parent - descriptor of a new operation. +2/ submitting an unacknowledged descriptor as a dependency to another + async_tx call will implicitly set the acknowledged state. 3/ calling async_tx_ack() on the descriptor. 3.4 When does the operation execute? @@ -136,10 +136,9 @@ int run_xor_copy_xor(struct page **xor_srcs, tx = async_xor(xor_dest, xor_srcs, 0, xor_src_cnt, xor_len, ASYNC_TX_XOR_DROP_DST, NULL, NULL, NULL); - tx = async_memcpy(copy_dest, copy_src, 0, 0, copy_len, - ASYNC_TX_DEP_ACK, tx, NULL, NULL); + tx = async_memcpy(copy_dest, copy_src, 0, 0, copy_len, tx, NULL, NULL); tx = async_xor(xor_dest, xor_srcs, 0, xor_src_cnt, xor_len, - ASYNC_TX_XOR_DROP_DST | ASYNC_TX_DEP_ACK | ASYNC_TX_ACK, + ASYNC_TX_XOR_DROP_DST | ASYNC_TX_ACK, tx, complete_xor_copy_xor, NULL); async_tx_issue_pending_all(); diff --git a/crypto/async_tx/async_memcpy.c b/crypto/async_tx/async_memcpy.c index ddccfb01c41..7117ec6f1b7 100644 --- a/crypto/async_tx/async_memcpy.c +++ b/crypto/async_tx/async_memcpy.c @@ -35,7 +35,7 @@ * @src: src page * @offset: offset in pages to start transaction * @len: length in bytes - * @flags: ASYNC_TX_ACK, ASYNC_TX_DEP_ACK, + * @flags: ASYNC_TX_ACK * @depend_tx: memcpy depends on the result of this transaction * @cb_fn: function to call when the memcpy completes * @cb_param: parameter to pass to the callback routine diff --git a/crypto/async_tx/async_memset.c b/crypto/async_tx/async_memset.c index 5b5eb99bb24..b2f133885b7 100644 --- a/crypto/async_tx/async_memset.c +++ b/crypto/async_tx/async_memset.c @@ -35,7 +35,7 @@ * @val: fill value * @offset: offset in pages to start transaction * @len: length in bytes - * @flags: ASYNC_TX_ACK, ASYNC_TX_DEP_ACK + * @flags: ASYNC_TX_ACK * @depend_tx: memset depends on the result of this transaction * @cb_fn: function to call when the memcpy completes * @cb_param: parameter to pass to the callback routine diff --git a/crypto/async_tx/async_tx.c b/crypto/async_tx/async_tx.c index 06eb6cc09fe..3766bc3d7d8 100644 --- a/crypto/async_tx/async_tx.c +++ b/crypto/async_tx/async_tx.c @@ -223,7 +223,7 @@ async_tx_submit(struct dma_chan *chan, struct dma_async_tx_descriptor *tx, if (flags & ASYNC_TX_ACK) async_tx_ack(tx); - if (depend_tx && (flags & ASYNC_TX_DEP_ACK)) + if (depend_tx) async_tx_ack(depend_tx); } EXPORT_SYMBOL_GPL(async_tx_submit); @@ -231,7 +231,7 @@ EXPORT_SYMBOL_GPL(async_tx_submit); /** * async_trigger_callback - schedules the callback function to be run after * any dependent operations have been completed. - * @flags: ASYNC_TX_ACK, ASYNC_TX_DEP_ACK + * @flags: ASYNC_TX_ACK * @depend_tx: 'callback' requires the completion of this transaction * @cb_fn: function to call after depend_tx completes * @cb_param: parameter to pass to the callback routine diff --git a/crypto/async_tx/async_xor.c b/crypto/async_tx/async_xor.c index e0580b0ea53..3cc5dc763b5 100644 --- a/crypto/async_tx/async_xor.c +++ b/crypto/async_tx/async_xor.c @@ -105,7 +105,6 @@ do_async_xor(struct dma_chan *chan, struct page *dest, struct page **src_list, _cb_param); depend_tx = tx; - flags |= ASYNC_TX_DEP_ACK; if (src_cnt > xor_src_cnt) { /* drop completed sources */ @@ -168,8 +167,7 @@ do_sync_xor(struct page *dest, struct page **src_list, unsigned int offset, * @offset: offset in pages to start transaction * @src_cnt: number of source pages * @len: length in bytes - * @flags: ASYNC_TX_XOR_ZERO_DST, ASYNC_TX_XOR_DROP_DEST, - * ASYNC_TX_ACK, ASYNC_TX_DEP_ACK + * @flags: ASYNC_TX_XOR_ZERO_DST, ASYNC_TX_XOR_DROP_DEST, ASYNC_TX_ACK * @depend_tx: xor depends on the result of this transaction. * @cb_fn: function to call when the xor completes * @cb_param: parameter to pass to the callback routine @@ -230,7 +228,7 @@ static int page_is_zero(struct page *p, unsigned int offset, size_t len) * @src_cnt: number of source pages * @len: length in bytes * @result: 0 if sum == 0 else non-zero - * @flags: ASYNC_TX_ACK, ASYNC_TX_DEP_ACK + * @flags: ASYNC_TX_ACK * @depend_tx: xor depends on the result of this transaction. * @cb_fn: function to call when the xor completes * @cb_param: parameter to pass to the callback routine diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index f8d2d35ed29..0ef5362c8d0 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c @@ -525,14 +525,12 @@ async_copy_data(int frombio, struct bio *bio, struct page *page, bio_page = bio_iovec_idx(bio, i)->bv_page; if (frombio) tx = async_memcpy(page, bio_page, page_offset, - b_offset, clen, - ASYNC_TX_DEP_ACK, - tx, NULL, NULL); + b_offset, clen, 0, + tx, NULL, NULL); else tx = async_memcpy(bio_page, page, b_offset, - page_offset, clen, - ASYNC_TX_DEP_ACK, - tx, NULL, NULL); + page_offset, clen, 0, + tx, NULL, NULL); } if (clen < len) /* hit end of page */ break; @@ -615,8 +613,7 @@ static void ops_run_biofill(struct stripe_head *sh) } atomic_inc(&sh->count); - async_trigger_callback(ASYNC_TX_DEP_ACK | ASYNC_TX_ACK, tx, - ops_complete_biofill, sh); + async_trigger_callback(ASYNC_TX_ACK, tx, ops_complete_biofill, sh); } static void ops_complete_compute5(void *stripe_head_ref) @@ -701,8 +698,8 @@ ops_run_prexor(struct stripe_head *sh, struct dma_async_tx_descriptor *tx) } tx = async_xor(xor_dest, xor_srcs, 0, count, STRIPE_SIZE, - ASYNC_TX_DEP_ACK | ASYNC_TX_XOR_DROP_DST, tx, - ops_complete_prexor, sh); + ASYNC_TX_XOR_DROP_DST, tx, + ops_complete_prexor, sh); return tx; } @@ -809,7 +806,7 @@ ops_run_postxor(struct stripe_head *sh, struct dma_async_tx_descriptor *tx) * set ASYNC_TX_XOR_DROP_DST and ASYNC_TX_XOR_ZERO_DST * for the synchronous xor case */ - flags = ASYNC_TX_DEP_ACK | ASYNC_TX_ACK | + flags = ASYNC_TX_ACK | (prexor ? ASYNC_TX_XOR_DROP_DST : ASYNC_TX_XOR_ZERO_DST); atomic_inc(&sh->count); @@ -858,7 +855,7 @@ static void ops_run_check(struct stripe_head *sh) &sh->ops.zero_sum_result, 0, NULL, NULL, NULL); atomic_inc(&sh->count); - tx = async_trigger_callback(ASYNC_TX_DEP_ACK | ASYNC_TX_ACK, tx, + tx = async_trigger_callback(ASYNC_TX_ACK, tx, ops_complete_check, sh); } @@ -2687,8 +2684,8 @@ static void handle_stripe_expansion(raid5_conf_t *conf, struct stripe_head *sh, /* place all the copies on one channel */ tx = async_memcpy(sh2->dev[dd_idx].page, - sh->dev[i].page, 0, 0, STRIPE_SIZE, - ASYNC_TX_DEP_ACK, tx, NULL, NULL); + sh->dev[i].page, 0, 0, STRIPE_SIZE, + 0, tx, NULL, NULL); set_bit(R5_Expanded, &sh2->dev[dd_idx].flags); set_bit(R5_UPTODATE, &sh2->dev[dd_idx].flags); diff --git a/include/linux/async_tx.h b/include/linux/async_tx.h index 513150d8c25..9f14cd540cd 100644 --- a/include/linux/async_tx.h +++ b/include/linux/async_tx.h @@ -58,13 +58,11 @@ struct dma_chan_ref { * array. * @ASYNC_TX_ACK: immediately ack the descriptor, precludes setting up a * dependency chain - * @ASYNC_TX_DEP_ACK: ack the dependency descriptor. Useful for chaining. */ enum async_tx_flags { ASYNC_TX_XOR_ZERO_DST = (1 << 0), ASYNC_TX_XOR_DROP_DST = (1 << 1), - ASYNC_TX_ACK = (1 << 3), - ASYNC_TX_DEP_ACK = (1 << 4), + ASYNC_TX_ACK = (1 << 2), }; #ifdef CONFIG_DMA_ENGINE -- cgit v1.2.3-70-g09d2 From a08abd8ca890a377521d65d493d174bebcaf694b Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Wed, 3 Jun 2009 11:43:59 -0700 Subject: async_tx: structify submission arguments, add scribble Prepare the api for the arrival of a new parameter, 'scribble'. This will allow callers to identify scratchpad memory for dma address or page address conversions. As this adds yet another parameter, take this opportunity to convert the common submission parameters (flags, dependency, callback, and callback argument) into an object that is passed by reference. Also, take this opportunity to fix up the kerneldoc and add notes about the relevant ASYNC_TX_* flags for each routine. [ Impact: moves api pass-by-value parameters to a pass-by-reference struct ] Signed-off-by: Andre Noll Acked-by: Maciej Sosnowski Signed-off-by: Dan Williams --- Documentation/crypto/async-tx-api.txt | 6 +- crypto/async_tx/async_memcpy.c | 26 +++---- crypto/async_tx/async_memset.c | 25 +++---- crypto/async_tx/async_tx.c | 51 +++++++------- crypto/async_tx/async_xor.c | 123 +++++++++++++++++----------------- drivers/md/raid5.c | 59 +++++++++------- include/linux/async_tx.h | 84 ++++++++++++++--------- 7 files changed, 200 insertions(+), 174 deletions(-) (limited to 'include') diff --git a/Documentation/crypto/async-tx-api.txt b/Documentation/crypto/async-tx-api.txt index 76feda8541d..dfe0475f791 100644 --- a/Documentation/crypto/async-tx-api.txt +++ b/Documentation/crypto/async-tx-api.txt @@ -54,11 +54,7 @@ features surfaced as a result: 3.1 General format of the API: struct dma_async_tx_descriptor * -async_(, - enum async_tx_flags flags, - struct dma_async_tx_descriptor *dependency, - dma_async_tx_callback callback_routine, - void *callback_parameter); +async_(, struct async_submit ctl *submit) 3.2 Supported operations: memcpy - memory copy between a source and a destination buffer diff --git a/crypto/async_tx/async_memcpy.c b/crypto/async_tx/async_memcpy.c index 7117ec6f1b7..89e05556f3d 100644 --- a/crypto/async_tx/async_memcpy.c +++ b/crypto/async_tx/async_memcpy.c @@ -33,28 +33,28 @@ * async_memcpy - attempt to copy memory with a dma engine. * @dest: destination page * @src: src page - * @offset: offset in pages to start transaction + * @dest_offset: offset into 'dest' to start transaction + * @src_offset: offset into 'src' to start transaction * @len: length in bytes - * @flags: ASYNC_TX_ACK - * @depend_tx: memcpy depends on the result of this transaction - * @cb_fn: function to call when the memcpy completes - * @cb_param: parameter to pass to the callback routine + * @submit: submission / completion modifiers + * + * honored flags: ASYNC_TX_ACK */ struct dma_async_tx_descriptor * async_memcpy(struct page *dest, struct page *src, unsigned int dest_offset, - unsigned int src_offset, size_t len, enum async_tx_flags flags, - struct dma_async_tx_descriptor *depend_tx, - dma_async_tx_callback cb_fn, void *cb_param) + unsigned int src_offset, size_t len, + struct async_submit_ctl *submit) { - struct dma_chan *chan = async_tx_find_channel(depend_tx, DMA_MEMCPY, + struct dma_chan *chan = async_tx_find_channel(submit, DMA_MEMCPY, &dest, 1, &src, 1, len); struct dma_device *device = chan ? chan->device : NULL; struct dma_async_tx_descriptor *tx = NULL; if (device) { dma_addr_t dma_dest, dma_src; - unsigned long dma_prep_flags = cb_fn ? DMA_PREP_INTERRUPT : 0; + unsigned long dma_prep_flags; + dma_prep_flags = submit->cb_fn ? DMA_PREP_INTERRUPT : 0; dma_dest = dma_map_page(device->dev, dest, dest_offset, len, DMA_FROM_DEVICE); @@ -67,13 +67,13 @@ async_memcpy(struct page *dest, struct page *src, unsigned int dest_offset, if (tx) { pr_debug("%s: (async) len: %zu\n", __func__, len); - async_tx_submit(chan, tx, flags, depend_tx, cb_fn, cb_param); + async_tx_submit(chan, tx, submit); } else { void *dest_buf, *src_buf; pr_debug("%s: (sync) len: %zu\n", __func__, len); /* wait for any prerequisite operations */ - async_tx_quiesce(&depend_tx); + async_tx_quiesce(&submit->depend_tx); dest_buf = kmap_atomic(dest, KM_USER0) + dest_offset; src_buf = kmap_atomic(src, KM_USER1) + src_offset; @@ -83,7 +83,7 @@ async_memcpy(struct page *dest, struct page *src, unsigned int dest_offset, kunmap_atomic(dest_buf, KM_USER0); kunmap_atomic(src_buf, KM_USER1); - async_tx_sync_epilog(cb_fn, cb_param); + async_tx_sync_epilog(submit); } return tx; diff --git a/crypto/async_tx/async_memset.c b/crypto/async_tx/async_memset.c index b2f133885b7..c14437238f4 100644 --- a/crypto/async_tx/async_memset.c +++ b/crypto/async_tx/async_memset.c @@ -35,26 +35,23 @@ * @val: fill value * @offset: offset in pages to start transaction * @len: length in bytes - * @flags: ASYNC_TX_ACK - * @depend_tx: memset depends on the result of this transaction - * @cb_fn: function to call when the memcpy completes - * @cb_param: parameter to pass to the callback routine + * + * honored flags: ASYNC_TX_ACK */ struct dma_async_tx_descriptor * -async_memset(struct page *dest, int val, unsigned int offset, - size_t len, enum async_tx_flags flags, - struct dma_async_tx_descriptor *depend_tx, - dma_async_tx_callback cb_fn, void *cb_param) +async_memset(struct page *dest, int val, unsigned int offset, size_t len, + struct async_submit_ctl *submit) { - struct dma_chan *chan = async_tx_find_channel(depend_tx, DMA_MEMSET, + struct dma_chan *chan = async_tx_find_channel(submit, DMA_MEMSET, &dest, 1, NULL, 0, len); struct dma_device *device = chan ? chan->device : NULL; struct dma_async_tx_descriptor *tx = NULL; if (device) { dma_addr_t dma_dest; - unsigned long dma_prep_flags = cb_fn ? DMA_PREP_INTERRUPT : 0; + unsigned long dma_prep_flags; + dma_prep_flags = submit->cb_fn ? DMA_PREP_INTERRUPT : 0; dma_dest = dma_map_page(device->dev, dest, offset, len, DMA_FROM_DEVICE); @@ -64,19 +61,19 @@ async_memset(struct page *dest, int val, unsigned int offset, if (tx) { pr_debug("%s: (async) len: %zu\n", __func__, len); - async_tx_submit(chan, tx, flags, depend_tx, cb_fn, cb_param); + async_tx_submit(chan, tx, submit); } else { /* run the memset synchronously */ void *dest_buf; pr_debug("%s: (sync) len: %zu\n", __func__, len); - dest_buf = (void *) (((char *) page_address(dest)) + offset); + dest_buf = page_address(dest) + offset; /* wait for any prerequisite operations */ - async_tx_quiesce(&depend_tx); + async_tx_quiesce(&submit->depend_tx); memset(dest_buf, val, len); - async_tx_sync_epilog(cb_fn, cb_param); + async_tx_sync_epilog(submit); } return tx; diff --git a/crypto/async_tx/async_tx.c b/crypto/async_tx/async_tx.c index 3766bc3d7d8..802a5ce437d 100644 --- a/crypto/async_tx/async_tx.c +++ b/crypto/async_tx/async_tx.c @@ -45,13 +45,15 @@ static void __exit async_tx_exit(void) /** * __async_tx_find_channel - find a channel to carry out the operation or let * the transaction execute synchronously - * @depend_tx: transaction dependency + * @submit: transaction dependency and submission modifiers * @tx_type: transaction type */ struct dma_chan * -__async_tx_find_channel(struct dma_async_tx_descriptor *depend_tx, - enum dma_transaction_type tx_type) +__async_tx_find_channel(struct async_submit_ctl *submit, + enum dma_transaction_type tx_type) { + struct dma_async_tx_descriptor *depend_tx = submit->depend_tx; + /* see if we can keep the chain on one channel */ if (depend_tx && dma_has_cap(tx_type, depend_tx->chan->device->cap_mask)) @@ -144,13 +146,14 @@ async_tx_channel_switch(struct dma_async_tx_descriptor *depend_tx, /** - * submit_disposition - while holding depend_tx->lock we must avoid submitting - * new operations to prevent a circular locking dependency with - * drivers that already hold a channel lock when calling - * async_tx_run_dependencies. + * submit_disposition - flags for routing an incoming operation * @ASYNC_TX_SUBMITTED: we were able to append the new operation under the lock * @ASYNC_TX_CHANNEL_SWITCH: when the lock is dropped schedule a channel switch * @ASYNC_TX_DIRECT_SUBMIT: when the lock is dropped submit directly + * + * while holding depend_tx->lock we must avoid submitting new operations + * to prevent a circular locking dependency with drivers that already + * hold a channel lock when calling async_tx_run_dependencies. */ enum submit_disposition { ASYNC_TX_SUBMITTED, @@ -160,11 +163,12 @@ enum submit_disposition { void async_tx_submit(struct dma_chan *chan, struct dma_async_tx_descriptor *tx, - enum async_tx_flags flags, struct dma_async_tx_descriptor *depend_tx, - dma_async_tx_callback cb_fn, void *cb_param) + struct async_submit_ctl *submit) { - tx->callback = cb_fn; - tx->callback_param = cb_param; + struct dma_async_tx_descriptor *depend_tx = submit->depend_tx; + + tx->callback = submit->cb_fn; + tx->callback_param = submit->cb_param; if (depend_tx) { enum submit_disposition s; @@ -220,7 +224,7 @@ async_tx_submit(struct dma_chan *chan, struct dma_async_tx_descriptor *tx, tx->tx_submit(tx); } - if (flags & ASYNC_TX_ACK) + if (submit->flags & ASYNC_TX_ACK) async_tx_ack(tx); if (depend_tx) @@ -229,21 +233,20 @@ async_tx_submit(struct dma_chan *chan, struct dma_async_tx_descriptor *tx, EXPORT_SYMBOL_GPL(async_tx_submit); /** - * async_trigger_callback - schedules the callback function to be run after - * any dependent operations have been completed. - * @flags: ASYNC_TX_ACK - * @depend_tx: 'callback' requires the completion of this transaction - * @cb_fn: function to call after depend_tx completes - * @cb_param: parameter to pass to the callback routine + * async_trigger_callback - schedules the callback function to be run + * @submit: submission and completion parameters + * + * honored flags: ASYNC_TX_ACK + * + * The callback is run after any dependent operations have completed. */ struct dma_async_tx_descriptor * -async_trigger_callback(enum async_tx_flags flags, - struct dma_async_tx_descriptor *depend_tx, - dma_async_tx_callback cb_fn, void *cb_param) +async_trigger_callback(struct async_submit_ctl *submit) { struct dma_chan *chan; struct dma_device *device; struct dma_async_tx_descriptor *tx; + struct dma_async_tx_descriptor *depend_tx = submit->depend_tx; if (depend_tx) { chan = depend_tx->chan; @@ -262,14 +265,14 @@ async_trigger_callback(enum async_tx_flags flags, if (tx) { pr_debug("%s: (async)\n", __func__); - async_tx_submit(chan, tx, flags, depend_tx, cb_fn, cb_param); + async_tx_submit(chan, tx, submit); } else { pr_debug("%s: (sync)\n", __func__); /* wait for any prerequisite operations */ - async_tx_quiesce(&depend_tx); + async_tx_quiesce(&submit->depend_tx); - async_tx_sync_epilog(cb_fn, cb_param); + async_tx_sync_epilog(submit); } return tx; diff --git a/crypto/async_tx/async_xor.c b/crypto/async_tx/async_xor.c index 3cc5dc763b5..691fa98a18c 100644 --- a/crypto/async_tx/async_xor.c +++ b/crypto/async_tx/async_xor.c @@ -34,18 +34,16 @@ static __async_inline struct dma_async_tx_descriptor * do_async_xor(struct dma_chan *chan, struct page *dest, struct page **src_list, unsigned int offset, int src_cnt, size_t len, - enum async_tx_flags flags, - struct dma_async_tx_descriptor *depend_tx, - dma_async_tx_callback cb_fn, void *cb_param) + struct async_submit_ctl *submit) { struct dma_device *dma = chan->device; dma_addr_t *dma_src = (dma_addr_t *) src_list; struct dma_async_tx_descriptor *tx = NULL; int src_off = 0; int i; - dma_async_tx_callback _cb_fn; - void *_cb_param; - enum async_tx_flags async_flags; + dma_async_tx_callback cb_fn_orig = submit->cb_fn; + void *cb_param_orig = submit->cb_param; + enum async_tx_flags flags_orig = submit->flags; enum dma_ctrl_flags dma_flags; int xor_src_cnt; dma_addr_t dma_dest; @@ -63,7 +61,7 @@ do_async_xor(struct dma_chan *chan, struct page *dest, struct page **src_list, } while (src_cnt) { - async_flags = flags; + submit->flags = flags_orig; dma_flags = 0; xor_src_cnt = min(src_cnt, dma->max_xor); /* if we are submitting additional xors, leave the chain open, @@ -71,15 +69,15 @@ do_async_xor(struct dma_chan *chan, struct page *dest, struct page **src_list, * buffer mapped */ if (src_cnt > xor_src_cnt) { - async_flags &= ~ASYNC_TX_ACK; + submit->flags &= ~ASYNC_TX_ACK; dma_flags = DMA_COMPL_SKIP_DEST_UNMAP; - _cb_fn = NULL; - _cb_param = NULL; + submit->cb_fn = NULL; + submit->cb_param = NULL; } else { - _cb_fn = cb_fn; - _cb_param = cb_param; + submit->cb_fn = cb_fn_orig; + submit->cb_param = cb_param_orig; } - if (_cb_fn) + if (submit->cb_fn) dma_flags |= DMA_PREP_INTERRUPT; /* Since we have clobbered the src_list we are committed @@ -90,7 +88,7 @@ do_async_xor(struct dma_chan *chan, struct page *dest, struct page **src_list, xor_src_cnt, len, dma_flags); if (unlikely(!tx)) - async_tx_quiesce(&depend_tx); + async_tx_quiesce(&submit->depend_tx); /* spin wait for the preceeding transactions to complete */ while (unlikely(!tx)) { @@ -101,10 +99,8 @@ do_async_xor(struct dma_chan *chan, struct page *dest, struct page **src_list, dma_flags); } - async_tx_submit(chan, tx, async_flags, depend_tx, _cb_fn, - _cb_param); - - depend_tx = tx; + async_tx_submit(chan, tx, submit); + submit->depend_tx = tx; if (src_cnt > xor_src_cnt) { /* drop completed sources */ @@ -123,8 +119,7 @@ do_async_xor(struct dma_chan *chan, struct page *dest, struct page **src_list, static void do_sync_xor(struct page *dest, struct page **src_list, unsigned int offset, - int src_cnt, size_t len, enum async_tx_flags flags, - dma_async_tx_callback cb_fn, void *cb_param) + int src_cnt, size_t len, struct async_submit_ctl *submit) { int i; int xor_src_cnt; @@ -139,7 +134,7 @@ do_sync_xor(struct page *dest, struct page **src_list, unsigned int offset, /* set destination address */ dest_buf = page_address(dest) + offset; - if (flags & ASYNC_TX_XOR_ZERO_DST) + if (submit->flags & ASYNC_TX_XOR_ZERO_DST) memset(dest_buf, 0, len); while (src_cnt > 0) { @@ -152,33 +147,35 @@ do_sync_xor(struct page *dest, struct page **src_list, unsigned int offset, src_off += xor_src_cnt; } - async_tx_sync_epilog(cb_fn, cb_param); + async_tx_sync_epilog(submit); } /** * async_xor - attempt to xor a set of blocks with a dma engine. - * xor_blocks always uses the dest as a source so the ASYNC_TX_XOR_ZERO_DST - * flag must be set to not include dest data in the calculation. The - * assumption with dma eninges is that they only use the destination - * buffer as a source when it is explicity specified in the source list. * @dest: destination page - * @src_list: array of source pages (if the dest is also a source it must be - * at index zero). The contents of this array may be overwritten. - * @offset: offset in pages to start transaction + * @src_list: array of source pages + * @offset: common src/dst offset to start transaction * @src_cnt: number of source pages * @len: length in bytes - * @flags: ASYNC_TX_XOR_ZERO_DST, ASYNC_TX_XOR_DROP_DEST, ASYNC_TX_ACK - * @depend_tx: xor depends on the result of this transaction. - * @cb_fn: function to call when the xor completes - * @cb_param: parameter to pass to the callback routine + * @submit: submission / completion modifiers + * + * honored flags: ASYNC_TX_ACK, ASYNC_TX_XOR_ZERO_DST, ASYNC_TX_XOR_DROP_DST + * + * xor_blocks always uses the dest as a source so the + * ASYNC_TX_XOR_ZERO_DST flag must be set to not include dest data in + * the calculation. The assumption with dma eninges is that they only + * use the destination buffer as a source when it is explicity specified + * in the source list. + * + * src_list note: if the dest is also a source it must be at index zero. + * The contents of this array will be overwritten if a scribble region + * is not specified. */ struct dma_async_tx_descriptor * async_xor(struct page *dest, struct page **src_list, unsigned int offset, - int src_cnt, size_t len, enum async_tx_flags flags, - struct dma_async_tx_descriptor *depend_tx, - dma_async_tx_callback cb_fn, void *cb_param) + int src_cnt, size_t len, struct async_submit_ctl *submit) { - struct dma_chan *chan = async_tx_find_channel(depend_tx, DMA_XOR, + struct dma_chan *chan = async_tx_find_channel(submit, DMA_XOR, &dest, 1, src_list, src_cnt, len); BUG_ON(src_cnt <= 1); @@ -188,7 +185,7 @@ async_xor(struct page *dest, struct page **src_list, unsigned int offset, pr_debug("%s (async): len: %zu\n", __func__, len); return do_async_xor(chan, dest, src_list, offset, src_cnt, len, - flags, depend_tx, cb_fn, cb_param); + submit); } else { /* run the xor synchronously */ pr_debug("%s (sync): len: %zu\n", __func__, len); @@ -196,16 +193,15 @@ async_xor(struct page *dest, struct page **src_list, unsigned int offset, /* in the sync case the dest is an implied source * (assumes the dest is the first source) */ - if (flags & ASYNC_TX_XOR_DROP_DST) { + if (submit->flags & ASYNC_TX_XOR_DROP_DST) { src_cnt--; src_list++; } /* wait for any prerequisite operations */ - async_tx_quiesce(&depend_tx); + async_tx_quiesce(&submit->depend_tx); - do_sync_xor(dest, src_list, offset, src_cnt, len, - flags, cb_fn, cb_param); + do_sync_xor(dest, src_list, offset, src_cnt, len, submit); return NULL; } @@ -222,25 +218,25 @@ static int page_is_zero(struct page *p, unsigned int offset, size_t len) /** * async_xor_val - attempt a xor parity check with a dma engine. * @dest: destination page used if the xor is performed synchronously - * @src_list: array of source pages. The dest page must be listed as a source - * at index zero. The contents of this array may be overwritten. + * @src_list: array of source pages * @offset: offset in pages to start transaction * @src_cnt: number of source pages * @len: length in bytes * @result: 0 if sum == 0 else non-zero - * @flags: ASYNC_TX_ACK - * @depend_tx: xor depends on the result of this transaction. - * @cb_fn: function to call when the xor completes - * @cb_param: parameter to pass to the callback routine + * @submit: submission / completion modifiers + * + * honored flags: ASYNC_TX_ACK + * + * src_list note: if the dest is also a source it must be at index zero. + * The contents of this array will be overwritten if a scribble region + * is not specified. */ struct dma_async_tx_descriptor * -async_xor_val(struct page *dest, struct page **src_list, - unsigned int offset, int src_cnt, size_t len, - u32 *result, enum async_tx_flags flags, - struct dma_async_tx_descriptor *depend_tx, - dma_async_tx_callback cb_fn, void *cb_param) +async_xor_val(struct page *dest, struct page **src_list, unsigned int offset, + int src_cnt, size_t len, u32 *result, + struct async_submit_ctl *submit) { - struct dma_chan *chan = async_tx_find_channel(depend_tx, DMA_XOR_VAL, + struct dma_chan *chan = async_tx_find_channel(submit, DMA_XOR_VAL, &dest, 1, src_list, src_cnt, len); struct dma_device *device = chan ? chan->device : NULL; @@ -250,11 +246,12 @@ async_xor_val(struct page *dest, struct page **src_list, if (device && src_cnt <= device->max_xor) { dma_addr_t *dma_src = (dma_addr_t *) src_list; - unsigned long dma_prep_flags = cb_fn ? DMA_PREP_INTERRUPT : 0; + unsigned long dma_prep_flags; int i; pr_debug("%s: (async) len: %zu\n", __func__, len); + dma_prep_flags = submit->cb_fn ? DMA_PREP_INTERRUPT : 0; for (i = 0; i < src_cnt; i++) dma_src[i] = dma_map_page(device->dev, src_list[i], offset, len, DMA_TO_DEVICE); @@ -263,7 +260,7 @@ async_xor_val(struct page *dest, struct page **src_list, len, result, dma_prep_flags); if (unlikely(!tx)) { - async_tx_quiesce(&depend_tx); + async_tx_quiesce(&submit->depend_tx); while (!tx) { dma_async_issue_pending(chan); @@ -273,23 +270,23 @@ async_xor_val(struct page *dest, struct page **src_list, } } - async_tx_submit(chan, tx, flags, depend_tx, cb_fn, cb_param); + async_tx_submit(chan, tx, submit); } else { - unsigned long xor_flags = flags; + enum async_tx_flags flags_orig = submit->flags; pr_debug("%s: (sync) len: %zu\n", __func__, len); - xor_flags |= ASYNC_TX_XOR_DROP_DST; - xor_flags &= ~ASYNC_TX_ACK; + submit->flags |= ASYNC_TX_XOR_DROP_DST; + submit->flags &= ~ASYNC_TX_ACK; - tx = async_xor(dest, src_list, offset, src_cnt, len, xor_flags, - depend_tx, NULL, NULL); + tx = async_xor(dest, src_list, offset, src_cnt, len, submit); async_tx_quiesce(&tx); *result = page_is_zero(dest, offset, len) ? 0 : 1; - async_tx_sync_epilog(cb_fn, cb_param); + async_tx_sync_epilog(submit); + submit->flags = flags_orig; } return tx; diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index 0ef5362c8d0..e1920f23579 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c @@ -499,11 +499,14 @@ async_copy_data(int frombio, struct bio *bio, struct page *page, struct page *bio_page; int i; int page_offset; + struct async_submit_ctl submit; if (bio->bi_sector >= sector) page_offset = (signed)(bio->bi_sector - sector) * 512; else page_offset = (signed)(sector - bio->bi_sector) * -512; + + init_async_submit(&submit, 0, tx, NULL, NULL, NULL); bio_for_each_segment(bvl, bio, i) { int len = bio_iovec_idx(bio, i)->bv_len; int clen; @@ -525,13 +528,14 @@ async_copy_data(int frombio, struct bio *bio, struct page *page, bio_page = bio_iovec_idx(bio, i)->bv_page; if (frombio) tx = async_memcpy(page, bio_page, page_offset, - b_offset, clen, 0, - tx, NULL, NULL); + b_offset, clen, &submit); else tx = async_memcpy(bio_page, page, b_offset, - page_offset, clen, 0, - tx, NULL, NULL); + page_offset, clen, &submit); } + /* chain the operations */ + submit.depend_tx = tx; + if (clen < len) /* hit end of page */ break; page_offset += len; @@ -590,6 +594,7 @@ static void ops_run_biofill(struct stripe_head *sh) { struct dma_async_tx_descriptor *tx = NULL; raid5_conf_t *conf = sh->raid_conf; + struct async_submit_ctl submit; int i; pr_debug("%s: stripe %llu\n", __func__, @@ -613,7 +618,8 @@ static void ops_run_biofill(struct stripe_head *sh) } atomic_inc(&sh->count); - async_trigger_callback(ASYNC_TX_ACK, tx, ops_complete_biofill, sh); + init_async_submit(&submit, ASYNC_TX_ACK, tx, ops_complete_biofill, sh, NULL); + async_trigger_callback(&submit); } static void ops_complete_compute5(void *stripe_head_ref) @@ -645,6 +651,7 @@ static struct dma_async_tx_descriptor *ops_run_compute5(struct stripe_head *sh) struct page *xor_dest = tgt->page; int count = 0; struct dma_async_tx_descriptor *tx; + struct async_submit_ctl submit; int i; pr_debug("%s: stripe %llu block: %d\n", @@ -657,13 +664,12 @@ static struct dma_async_tx_descriptor *ops_run_compute5(struct stripe_head *sh) atomic_inc(&sh->count); + init_async_submit(&submit, ASYNC_TX_XOR_ZERO_DST, NULL, + ops_complete_compute5, sh, NULL); if (unlikely(count == 1)) - tx = async_memcpy(xor_dest, xor_srcs[0], 0, 0, STRIPE_SIZE, - 0, NULL, ops_complete_compute5, sh); + tx = async_memcpy(xor_dest, xor_srcs[0], 0, 0, STRIPE_SIZE, &submit); else - tx = async_xor(xor_dest, xor_srcs, 0, count, STRIPE_SIZE, - ASYNC_TX_XOR_ZERO_DST, NULL, - ops_complete_compute5, sh); + tx = async_xor(xor_dest, xor_srcs, 0, count, STRIPE_SIZE, &submit); return tx; } @@ -683,6 +689,7 @@ ops_run_prexor(struct stripe_head *sh, struct dma_async_tx_descriptor *tx) int disks = sh->disks; struct page *xor_srcs[disks]; int count = 0, pd_idx = sh->pd_idx, i; + struct async_submit_ctl submit; /* existing parity data subtracted */ struct page *xor_dest = xor_srcs[count++] = sh->dev[pd_idx].page; @@ -697,9 +704,9 @@ ops_run_prexor(struct stripe_head *sh, struct dma_async_tx_descriptor *tx) xor_srcs[count++] = dev->page; } - tx = async_xor(xor_dest, xor_srcs, 0, count, STRIPE_SIZE, - ASYNC_TX_XOR_DROP_DST, tx, - ops_complete_prexor, sh); + init_async_submit(&submit, ASYNC_TX_XOR_DROP_DST, tx, + ops_complete_prexor, sh, NULL); + tx = async_xor(xor_dest, xor_srcs, 0, count, STRIPE_SIZE, &submit); return tx; } @@ -772,7 +779,7 @@ ops_run_postxor(struct stripe_head *sh, struct dma_async_tx_descriptor *tx) /* kernel stack size limits the total number of disks */ int disks = sh->disks; struct page *xor_srcs[disks]; - + struct async_submit_ctl submit; int count = 0, pd_idx = sh->pd_idx, i; struct page *xor_dest; int prexor = 0; @@ -811,13 +818,11 @@ ops_run_postxor(struct stripe_head *sh, struct dma_async_tx_descriptor *tx) atomic_inc(&sh->count); - if (unlikely(count == 1)) { - flags &= ~(ASYNC_TX_XOR_DROP_DST | ASYNC_TX_XOR_ZERO_DST); - tx = async_memcpy(xor_dest, xor_srcs[0], 0, 0, STRIPE_SIZE, - flags, tx, ops_complete_postxor, sh); - } else - tx = async_xor(xor_dest, xor_srcs, 0, count, STRIPE_SIZE, - flags, tx, ops_complete_postxor, sh); + init_async_submit(&submit, flags, tx, ops_complete_postxor, sh, NULL); + if (unlikely(count == 1)) + tx = async_memcpy(xor_dest, xor_srcs[0], 0, 0, STRIPE_SIZE, &submit); + else + tx = async_xor(xor_dest, xor_srcs, 0, count, STRIPE_SIZE, &submit); } static void ops_complete_check(void *stripe_head_ref) @@ -838,6 +843,7 @@ static void ops_run_check(struct stripe_head *sh) int disks = sh->disks; struct page *xor_srcs[disks]; struct dma_async_tx_descriptor *tx; + struct async_submit_ctl submit; int count = 0, pd_idx = sh->pd_idx, i; struct page *xor_dest = xor_srcs[count++] = sh->dev[pd_idx].page; @@ -851,12 +857,13 @@ static void ops_run_check(struct stripe_head *sh) xor_srcs[count++] = dev->page; } + init_async_submit(&submit, 0, NULL, NULL, NULL, NULL); tx = async_xor_val(xor_dest, xor_srcs, 0, count, STRIPE_SIZE, - &sh->ops.zero_sum_result, 0, NULL, NULL, NULL); + &sh->ops.zero_sum_result, &submit); atomic_inc(&sh->count); - tx = async_trigger_callback(ASYNC_TX_ACK, tx, - ops_complete_check, sh); + init_async_submit(&submit, ASYNC_TX_ACK, tx, ops_complete_check, sh, NULL); + tx = async_trigger_callback(&submit); } static void raid5_run_ops(struct stripe_head *sh, unsigned long ops_request) @@ -2664,6 +2671,7 @@ static void handle_stripe_expansion(raid5_conf_t *conf, struct stripe_head *sh, if (i != sh->pd_idx && i != sh->qd_idx) { int dd_idx, j; struct stripe_head *sh2; + struct async_submit_ctl submit; sector_t bn = compute_blocknr(sh, i, 1); sector_t s = raid5_compute_sector(conf, bn, 0, @@ -2683,9 +2691,10 @@ static void handle_stripe_expansion(raid5_conf_t *conf, struct stripe_head *sh, } /* place all the copies on one channel */ + init_async_submit(&submit, 0, tx, NULL, NULL, NULL); tx = async_memcpy(sh2->dev[dd_idx].page, sh->dev[i].page, 0, 0, STRIPE_SIZE, - 0, tx, NULL, NULL); + &submit); set_bit(R5_Expanded, &sh2->dev[dd_idx].flags); set_bit(R5_UPTODATE, &sh2->dev[dd_idx].flags); diff --git a/include/linux/async_tx.h b/include/linux/async_tx.h index 9f14cd540cd..00cfb637ddf 100644 --- a/include/linux/async_tx.h +++ b/include/linux/async_tx.h @@ -65,6 +65,22 @@ enum async_tx_flags { ASYNC_TX_ACK = (1 << 2), }; +/** + * struct async_submit_ctl - async_tx submission/completion modifiers + * @flags: submission modifiers + * @depend_tx: parent dependency of the current operation being submitted + * @cb_fn: callback routine to run at operation completion + * @cb_param: parameter for the callback routine + * @scribble: caller provided space for dma/page address conversions + */ +struct async_submit_ctl { + enum async_tx_flags flags; + struct dma_async_tx_descriptor *depend_tx; + dma_async_tx_callback cb_fn; + void *cb_param; + void *scribble; +}; + #ifdef CONFIG_DMA_ENGINE #define async_tx_issue_pending_all dma_issue_pending_all #ifdef CONFIG_ARCH_HAS_ASYNC_TX_FIND_CHANNEL @@ -73,8 +89,8 @@ enum async_tx_flags { #define async_tx_find_channel(dep, type, dst, dst_count, src, src_count, len) \ __async_tx_find_channel(dep, type) struct dma_chan * -__async_tx_find_channel(struct dma_async_tx_descriptor *depend_tx, - enum dma_transaction_type tx_type); +__async_tx_find_channel(struct async_submit_ctl *submit, + enum dma_transaction_type tx_type); #endif /* CONFIG_ARCH_HAS_ASYNC_TX_FIND_CHANNEL */ #else static inline void async_tx_issue_pending_all(void) @@ -83,9 +99,10 @@ static inline void async_tx_issue_pending_all(void) } static inline struct dma_chan * -async_tx_find_channel(struct dma_async_tx_descriptor *depend_tx, - enum dma_transaction_type tx_type, struct page **dst, int dst_count, - struct page **src, int src_count, size_t len) +async_tx_find_channel(struct async_submit_ctl *submit, + enum dma_transaction_type tx_type, struct page **dst, + int dst_count, struct page **src, int src_count, + size_t len) { return NULL; } @@ -97,46 +114,53 @@ async_tx_find_channel(struct dma_async_tx_descriptor *depend_tx, * @cb_fn_param: parameter to pass to the callback routine */ static inline void -async_tx_sync_epilog(dma_async_tx_callback cb_fn, void *cb_fn_param) +async_tx_sync_epilog(struct async_submit_ctl *submit) +{ + if (submit->cb_fn) + submit->cb_fn(submit->cb_param); +} + +typedef union { + unsigned long addr; + struct page *page; + dma_addr_t dma; +} addr_conv_t; + +static inline void +init_async_submit(struct async_submit_ctl *args, enum async_tx_flags flags, + struct dma_async_tx_descriptor *tx, + dma_async_tx_callback cb_fn, void *cb_param, + addr_conv_t *scribble) { - if (cb_fn) - cb_fn(cb_fn_param); + args->flags = flags; + args->depend_tx = tx; + args->cb_fn = cb_fn; + args->cb_param = cb_param; + args->scribble = scribble; } -void -async_tx_submit(struct dma_chan *chan, struct dma_async_tx_descriptor *tx, - enum async_tx_flags flags, struct dma_async_tx_descriptor *depend_tx, - dma_async_tx_callback cb_fn, void *cb_fn_param); +void async_tx_submit(struct dma_chan *chan, struct dma_async_tx_descriptor *tx, + struct async_submit_ctl *submit); struct dma_async_tx_descriptor * async_xor(struct page *dest, struct page **src_list, unsigned int offset, - int src_cnt, size_t len, enum async_tx_flags flags, - struct dma_async_tx_descriptor *depend_tx, - dma_async_tx_callback cb_fn, void *cb_fn_param); + int src_cnt, size_t len, struct async_submit_ctl *submit); struct dma_async_tx_descriptor * -async_xor_val(struct page *dest, struct page **src_list, - unsigned int offset, int src_cnt, size_t len, - u32 *result, enum async_tx_flags flags, - struct dma_async_tx_descriptor *depend_tx, - dma_async_tx_callback cb_fn, void *cb_fn_param); +async_xor_val(struct page *dest, struct page **src_list, unsigned int offset, + int src_cnt, size_t len, u32 *result, + struct async_submit_ctl *submit); struct dma_async_tx_descriptor * async_memcpy(struct page *dest, struct page *src, unsigned int dest_offset, - unsigned int src_offset, size_t len, enum async_tx_flags flags, - struct dma_async_tx_descriptor *depend_tx, - dma_async_tx_callback cb_fn, void *cb_fn_param); + unsigned int src_offset, size_t len, + struct async_submit_ctl *submit); struct dma_async_tx_descriptor * async_memset(struct page *dest, int val, unsigned int offset, - size_t len, enum async_tx_flags flags, - struct dma_async_tx_descriptor *depend_tx, - dma_async_tx_callback cb_fn, void *cb_fn_param); + size_t len, struct async_submit_ctl *submit); -struct dma_async_tx_descriptor * -async_trigger_callback(enum async_tx_flags flags, - struct dma_async_tx_descriptor *depend_tx, - dma_async_tx_callback cb_fn, void *cb_fn_param); +struct dma_async_tx_descriptor *async_trigger_callback(struct async_submit_ctl *submit); void async_tx_quiesce(struct dma_async_tx_descriptor **tx); #endif /* _ASYNC_TX_H_ */ -- cgit v1.2.3-70-g09d2 From dcf52fb71d988ba945054308f661bddf9b2455fb Mon Sep 17 00:00:00 2001 From: Bjorn Helgaas Date: Mon, 22 Jun 2009 20:41:45 +0000 Subject: ACPI: remove unused acpi_device_ops .stop method No drivers use the .stop method, so remove it. Signed-off-by: Bjorn Helgaas Reviewed-by: Alex Chiang Signed-off-by: Len Brown --- drivers/acpi/scan.c | 5 ----- include/acpi/acpi_bus.h | 2 -- 2 files changed, 7 deletions(-) (limited to 'include') diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c index 781435d7e36..4a89f081160 100644 --- a/drivers/acpi/scan.c +++ b/drivers/acpi/scan.c @@ -426,9 +426,6 @@ static int acpi_device_probe(struct device * dev) if (acpi_drv->ops.notify) { ret = acpi_device_install_notify_handler(acpi_dev); if (ret) { - if (acpi_drv->ops.stop) - acpi_drv->ops.stop(acpi_dev, - acpi_dev->removal_type); if (acpi_drv->ops.remove) acpi_drv->ops.remove(acpi_dev, acpi_dev->removal_type); @@ -452,8 +449,6 @@ static int acpi_device_remove(struct device * dev) if (acpi_drv) { if (acpi_drv->ops.notify) acpi_device_remove_notify_handler(acpi_dev); - if (acpi_drv->ops.stop) - acpi_drv->ops.stop(acpi_dev, acpi_dev->removal_type); if (acpi_drv->ops.remove) acpi_drv->ops.remove(acpi_dev, acpi_dev->removal_type); } diff --git a/include/acpi/acpi_bus.h b/include/acpi/acpi_bus.h index c65e4ce6c3a..79a6c5ebe90 100644 --- a/include/acpi/acpi_bus.h +++ b/include/acpi/acpi_bus.h @@ -89,7 +89,6 @@ struct acpi_device; typedef int (*acpi_op_add) (struct acpi_device * device); typedef int (*acpi_op_remove) (struct acpi_device * device, int type); typedef int (*acpi_op_start) (struct acpi_device * device); -typedef int (*acpi_op_stop) (struct acpi_device * device, int type); typedef int (*acpi_op_suspend) (struct acpi_device * device, pm_message_t state); typedef int (*acpi_op_resume) (struct acpi_device * device); @@ -106,7 +105,6 @@ struct acpi_device_ops { acpi_op_add add; acpi_op_remove remove; acpi_op_start start; - acpi_op_stop stop; acpi_op_suspend suspend; acpi_op_resume resume; acpi_op_bind bind; -- cgit v1.2.3-70-g09d2 From b294a290d24d1196d68399cc3a9b8c50bfb55abd Mon Sep 17 00:00:00 2001 From: Andres Salomon Date: Tue, 30 Jun 2009 02:13:01 -0400 Subject: Revert "power: remove POWER_SUPPLY_PROP_CAPACITY_LEVEL" This reverts commit 8efe444038a205e79b38b7ad03878824901849a8 and 4cbc76eadf56399cd11fb736b33c53aec9caab8c. Richard@laptop.org was apparently using CAPACITY_LEVEL for debugging battery/EC problems, and was upset that it was removed. This readds it. Conflicts: Documentation/power_supply_class.txt Signed-off-by: Andres Salomon Signed-off-by: Anton Vorontsov --- Documentation/power/power_supply_class.txt | 2 ++ drivers/power/olpc_battery.c | 9 +++++++++ drivers/power/power_supply_sysfs.c | 6 ++++++ include/linux/power_supply.h | 10 ++++++++++ 4 files changed, 27 insertions(+) (limited to 'include') diff --git a/Documentation/power/power_supply_class.txt b/Documentation/power/power_supply_class.txt index c6cd4956047..709d95571d7 100644 --- a/Documentation/power/power_supply_class.txt +++ b/Documentation/power/power_supply_class.txt @@ -108,6 +108,8 @@ relative, time-based measurements. ENERGY_FULL, ENERGY_EMPTY - same as above but for energy. CAPACITY - capacity in percents. +CAPACITY_LEVEL - capacity level. This corresponds to +POWER_SUPPLY_CAPACITY_LEVEL_*. TEMP - temperature of the power supply. TEMP_AMBIENT - ambient temperature. diff --git a/drivers/power/olpc_battery.c b/drivers/power/olpc_battery.c index 58e419299cd..3a589df0937 100644 --- a/drivers/power/olpc_battery.c +++ b/drivers/power/olpc_battery.c @@ -276,6 +276,14 @@ static int olpc_bat_get_property(struct power_supply *psy, return ret; val->intval = ec_byte; break; + case POWER_SUPPLY_PROP_CAPACITY_LEVEL: + if (ec_byte & BAT_STAT_FULL) + val->intval = POWER_SUPPLY_CAPACITY_LEVEL_FULL; + else if (ec_byte & BAT_STAT_LOW) + val->intval = POWER_SUPPLY_CAPACITY_LEVEL_LOW; + else + val->intval = POWER_SUPPLY_CAPACITY_LEVEL_NORMAL; + break; case POWER_SUPPLY_PROP_TEMP: ret = olpc_ec_cmd(EC_BAT_TEMP, NULL, 0, (void *)&ec_word, 2); if (ret) @@ -321,6 +329,7 @@ static enum power_supply_property olpc_bat_props[] = { POWER_SUPPLY_PROP_VOLTAGE_AVG, POWER_SUPPLY_PROP_CURRENT_AVG, POWER_SUPPLY_PROP_CAPACITY, + POWER_SUPPLY_PROP_CAPACITY_LEVEL, POWER_SUPPLY_PROP_TEMP, POWER_SUPPLY_PROP_TEMP_AMBIENT, POWER_SUPPLY_PROP_MANUFACTURER, diff --git a/drivers/power/power_supply_sysfs.c b/drivers/power/power_supply_sysfs.c index da73591017f..9deabbde6fd 100644 --- a/drivers/power/power_supply_sysfs.c +++ b/drivers/power/power_supply_sysfs.c @@ -51,6 +51,9 @@ static ssize_t power_supply_show_property(struct device *dev, "Unknown", "NiMH", "Li-ion", "Li-poly", "LiFe", "NiCd", "LiMn" }; + static char *capacity_level_text[] = { + "Unknown", "Critical", "Low", "Normal", "High", "Full" + }; ssize_t ret; struct power_supply *psy = dev_get_drvdata(dev); const ptrdiff_t off = attr - power_supply_attrs; @@ -71,6 +74,8 @@ static ssize_t power_supply_show_property(struct device *dev, return sprintf(buf, "%s\n", health_text[value.intval]); else if (off == POWER_SUPPLY_PROP_TECHNOLOGY) return sprintf(buf, "%s\n", technology_text[value.intval]); + else if (off == POWER_SUPPLY_PROP_CAPACITY_LEVEL) + return sprintf(buf, "%s\n", capacity_level_text[value.intval]); else if (off >= POWER_SUPPLY_PROP_MODEL_NAME) return sprintf(buf, "%s\n", value.strval); @@ -109,6 +114,7 @@ static struct device_attribute power_supply_attrs[] = { POWER_SUPPLY_ATTR(energy_now), POWER_SUPPLY_ATTR(energy_avg), POWER_SUPPLY_ATTR(capacity), + POWER_SUPPLY_ATTR(capacity_level), POWER_SUPPLY_ATTR(temp), POWER_SUPPLY_ATTR(temp_ambient), POWER_SUPPLY_ATTR(time_to_empty_now), diff --git a/include/linux/power_supply.h b/include/linux/power_supply.h index 594c494ac3f..0ab6aa17124 100644 --- a/include/linux/power_supply.h +++ b/include/linux/power_supply.h @@ -58,6 +58,15 @@ enum { POWER_SUPPLY_TECHNOLOGY_LiMn, }; +enum { + POWER_SUPPLY_CAPACITY_LEVEL_UNKNOWN = 0, + POWER_SUPPLY_CAPACITY_LEVEL_CRITICAL, + POWER_SUPPLY_CAPACITY_LEVEL_LOW, + POWER_SUPPLY_CAPACITY_LEVEL_NORMAL, + POWER_SUPPLY_CAPACITY_LEVEL_HIGH, + POWER_SUPPLY_CAPACITY_LEVEL_FULL, +}; + enum power_supply_property { /* Properties of type `int' */ POWER_SUPPLY_PROP_STATUS = 0, @@ -89,6 +98,7 @@ enum power_supply_property { POWER_SUPPLY_PROP_ENERGY_NOW, POWER_SUPPLY_PROP_ENERGY_AVG, POWER_SUPPLY_PROP_CAPACITY, /* in percents! */ + POWER_SUPPLY_PROP_CAPACITY_LEVEL, POWER_SUPPLY_PROP_TEMP, POWER_SUPPLY_PROP_TEMP_AMBIENT, POWER_SUPPLY_PROP_TIME_TO_EMPTY_NOW, -- cgit v1.2.3-70-g09d2 From ee8076ed3e1cdd0cd1e61318386932669c90b92f Mon Sep 17 00:00:00 2001 From: Andres Salomon Date: Thu, 2 Jul 2009 09:45:18 -0400 Subject: power_supply: Add a charge_type property, and use it for olpc driver This adds a new sysfs file called 'charge_type' which displays the type of charging (unknown, n/a, trickle charge, or fast charging). This allows things like battery diagnostics to determine what the battery/EC is doing without resorting to changing the 'status' sysfs output. Signed-off-by: Andres Salomon Acked-by: Mark Brown Signed-off-by: Anton Vorontsov --- Documentation/power/power_supply_class.txt | 5 +++++ drivers/power/olpc_battery.c | 9 +++++++++ drivers/power/power_supply_sysfs.c | 6 ++++++ include/linux/power_supply.h | 8 ++++++++ 4 files changed, 28 insertions(+) (limited to 'include') diff --git a/Documentation/power/power_supply_class.txt b/Documentation/power/power_supply_class.txt index 709d95571d7..9f16c5178b6 100644 --- a/Documentation/power/power_supply_class.txt +++ b/Documentation/power/power_supply_class.txt @@ -76,6 +76,11 @@ STATUS - this attribute represents operating status (charging, full, discharging (i.e. powering a load), etc.). This corresponds to BATTERY_STATUS_* values, as defined in battery.h. +CHARGE_TYPE - batteries can typically charge at different rates. +This defines trickle and fast charges. For batteries that +are already charged or discharging, 'n/a' can be displayed (or +'unknown', if the status is not known). + HEALTH - represents health of the battery, values corresponds to POWER_SUPPLY_HEALTH_*, defined in battery.h. diff --git a/drivers/power/olpc_battery.c b/drivers/power/olpc_battery.c index 602bbd008f7..8fefe5a7355 100644 --- a/drivers/power/olpc_battery.c +++ b/drivers/power/olpc_battery.c @@ -233,6 +233,14 @@ static int olpc_bat_get_property(struct power_supply *psy, if (ret) return ret; break; + case POWER_SUPPLY_PROP_CHARGE_TYPE: + if (ec_byte & BAT_STAT_TRICKLE) + val->intval = POWER_SUPPLY_CHARGE_TYPE_TRICKLE; + else if (ec_byte & BAT_STAT_CHARGING) + val->intval = POWER_SUPPLY_CHARGE_TYPE_FAST; + else + val->intval = POWER_SUPPLY_CHARGE_TYPE_NONE; + break; case POWER_SUPPLY_PROP_PRESENT: val->intval = !!(ec_byte & (BAT_STAT_PRESENT | BAT_STAT_TRICKLE)); @@ -325,6 +333,7 @@ static int olpc_bat_get_property(struct power_supply *psy, static enum power_supply_property olpc_bat_props[] = { POWER_SUPPLY_PROP_STATUS, + POWER_SUPPLY_PROP_CHARGE_TYPE, POWER_SUPPLY_PROP_PRESENT, POWER_SUPPLY_PROP_HEALTH, POWER_SUPPLY_PROP_TECHNOLOGY, diff --git a/drivers/power/power_supply_sysfs.c b/drivers/power/power_supply_sysfs.c index 9deabbde6fd..08144393d64 100644 --- a/drivers/power/power_supply_sysfs.c +++ b/drivers/power/power_supply_sysfs.c @@ -43,6 +43,9 @@ static ssize_t power_supply_show_property(struct device *dev, static char *status_text[] = { "Unknown", "Charging", "Discharging", "Not charging", "Full" }; + static char *charge_type[] = { + "Unknown", "N/A", "Trickle", "Fast" + }; static char *health_text[] = { "Unknown", "Good", "Overheat", "Dead", "Over voltage", "Unspecified failure", "Cold", @@ -70,6 +73,8 @@ static ssize_t power_supply_show_property(struct device *dev, if (off == POWER_SUPPLY_PROP_STATUS) return sprintf(buf, "%s\n", status_text[value.intval]); + else if (off == POWER_SUPPLY_PROP_CHARGE_TYPE) + return sprintf(buf, "%s\n", charge_type[value.intval]); else if (off == POWER_SUPPLY_PROP_HEALTH) return sprintf(buf, "%s\n", health_text[value.intval]); else if (off == POWER_SUPPLY_PROP_TECHNOLOGY) @@ -86,6 +91,7 @@ static ssize_t power_supply_show_property(struct device *dev, static struct device_attribute power_supply_attrs[] = { /* Properties of type `int' */ POWER_SUPPLY_ATTR(status), + POWER_SUPPLY_ATTR(charge_type), POWER_SUPPLY_ATTR(health), POWER_SUPPLY_ATTR(present), POWER_SUPPLY_ATTR(online), diff --git a/include/linux/power_supply.h b/include/linux/power_supply.h index 0ab6aa17124..4c7c6fc3548 100644 --- a/include/linux/power_supply.h +++ b/include/linux/power_supply.h @@ -38,6 +38,13 @@ enum { POWER_SUPPLY_STATUS_FULL, }; +enum { + POWER_SUPPLY_CHARGE_TYPE_UNKNOWN = 0, + POWER_SUPPLY_CHARGE_TYPE_NONE, + POWER_SUPPLY_CHARGE_TYPE_TRICKLE, + POWER_SUPPLY_CHARGE_TYPE_FAST, +}; + enum { POWER_SUPPLY_HEALTH_UNKNOWN = 0, POWER_SUPPLY_HEALTH_GOOD, @@ -70,6 +77,7 @@ enum { enum power_supply_property { /* Properties of type `int' */ POWER_SUPPLY_PROP_STATUS = 0, + POWER_SUPPLY_PROP_CHARGE_TYPE, POWER_SUPPLY_PROP_HEALTH, POWER_SUPPLY_PROP_PRESENT, POWER_SUPPLY_PROP_ONLINE, -- cgit v1.2.3-70-g09d2 From 7a6d3c8b3049d07123628f2bf57127bba2cc878f Mon Sep 17 00:00:00 2001 From: Csaba Henk Date: Wed, 1 Jul 2009 17:28:41 -0700 Subject: fuse: make the number of max background requests and congestion threshold tunable The practical values for these limits depend on the design of the filesystem server so let userspace set them at initialization time. Signed-off-by: Csaba Henk Signed-off-by: Miklos Szeredi --- fs/fuse/dev.c | 10 +++++----- fs/fuse/fuse_i.h | 12 ++++++------ fs/fuse/inode.c | 14 ++++++++++++++ include/linux/fuse.h | 9 +++++++-- 4 files changed, 32 insertions(+), 13 deletions(-) (limited to 'include') diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c index f58ecbc416c..b152761c1bf 100644 --- a/fs/fuse/dev.c +++ b/fs/fuse/dev.c @@ -250,7 +250,7 @@ static void queue_request(struct fuse_conn *fc, struct fuse_req *req) static void flush_bg_queue(struct fuse_conn *fc) { - while (fc->active_background < FUSE_MAX_BACKGROUND && + while (fc->active_background < fc->max_background && !list_empty(&fc->bg_queue)) { struct fuse_req *req; @@ -280,11 +280,11 @@ __releases(&fc->lock) list_del(&req->intr_entry); req->state = FUSE_REQ_FINISHED; if (req->background) { - if (fc->num_background == FUSE_MAX_BACKGROUND) { + if (fc->num_background == fc->max_background) { fc->blocked = 0; wake_up_all(&fc->blocked_waitq); } - if (fc->num_background == FUSE_CONGESTION_THRESHOLD && + if (fc->num_background == fc->congestion_threshold && fc->connected && fc->bdi_initialized) { clear_bdi_congested(&fc->bdi, READ); clear_bdi_congested(&fc->bdi, WRITE); @@ -410,9 +410,9 @@ static void fuse_request_send_nowait_locked(struct fuse_conn *fc, { req->background = 1; fc->num_background++; - if (fc->num_background == FUSE_MAX_BACKGROUND) + if (fc->num_background == fc->max_background) fc->blocked = 1; - if (fc->num_background == FUSE_CONGESTION_THRESHOLD && + if (fc->num_background == fc->congestion_threshold && fc->bdi_initialized) { set_bdi_congested(&fc->bdi, READ); set_bdi_congested(&fc->bdi, WRITE); diff --git a/fs/fuse/fuse_i.h b/fs/fuse/fuse_i.h index 52b641fc0fa..6bcfab04396 100644 --- a/fs/fuse/fuse_i.h +++ b/fs/fuse/fuse_i.h @@ -25,12 +25,6 @@ /** Max number of pages that can be used in a single read request */ #define FUSE_MAX_PAGES_PER_REQ 32 -/** Maximum number of outstanding background requests */ -#define FUSE_MAX_BACKGROUND 12 - -/** Congestion starts at 75% of maximum */ -#define FUSE_CONGESTION_THRESHOLD (FUSE_MAX_BACKGROUND * 75 / 100) - /** Bias for fi->writectr, meaning new writepages must not be sent */ #define FUSE_NOWRITE INT_MIN @@ -349,6 +343,12 @@ struct fuse_conn { /** rbtree of fuse_files waiting for poll events indexed by ph */ struct rb_root polled_files; + /** Maximum number of outstanding background requests */ + unsigned max_background; + + /** Number of background requests at which congestion starts */ + unsigned congestion_threshold; + /** Number of requests currently in the background */ unsigned num_background; diff --git a/fs/fuse/inode.c b/fs/fuse/inode.c index f91ccc4a189..9aa6f46d0c3 100644 --- a/fs/fuse/inode.c +++ b/fs/fuse/inode.c @@ -32,6 +32,12 @@ DEFINE_MUTEX(fuse_mutex); #define FUSE_DEFAULT_BLKSIZE 512 +/** Maximum number of outstanding background requests */ +#define FUSE_DEFAULT_MAX_BACKGROUND 12 + +/** Congestion starts at 75% of maximum */ +#define FUSE_DEFAULT_CONGESTION_THRESHOLD (FUSE_DEFAULT_MAX_BACKGROUND * 3 / 4) + struct fuse_mount_data { int fd; unsigned rootmode; @@ -517,6 +523,8 @@ void fuse_conn_init(struct fuse_conn *fc) INIT_LIST_HEAD(&fc->bg_queue); INIT_LIST_HEAD(&fc->entry); atomic_set(&fc->num_waiting, 0); + fc->max_background = FUSE_DEFAULT_MAX_BACKGROUND; + fc->congestion_threshold = FUSE_DEFAULT_CONGESTION_THRESHOLD; fc->khctr = 0; fc->polled_files = RB_ROOT; fc->reqctr = 0; @@ -736,6 +744,12 @@ static void process_init_reply(struct fuse_conn *fc, struct fuse_req *req) else { unsigned long ra_pages; + if (arg->minor >= 13) { + if (arg->max_background) + fc->max_background = arg->max_background; + if (arg->congestion_threshold) + fc->congestion_threshold = arg->congestion_threshold; + } if (arg->minor >= 6) { ra_pages = arg->max_readahead / PAGE_CACHE_SIZE; if (arg->flags & FUSE_ASYNC_READ) diff --git a/include/linux/fuse.h b/include/linux/fuse.h index cf593bf9fd3..b3700f0ac26 100644 --- a/include/linux/fuse.h +++ b/include/linux/fuse.h @@ -30,6 +30,10 @@ * - add umask flag to input argument of open, mknod and mkdir * - add notification messages for invalidation of inodes and * directory entries + * + * 7.13 + * - make max number of background requests and congestion threshold + * tunables */ #ifndef _LINUX_FUSE_H @@ -41,7 +45,7 @@ #define FUSE_KERNEL_VERSION 7 /** Minor version number of this interface */ -#define FUSE_KERNEL_MINOR_VERSION 12 +#define FUSE_KERNEL_MINOR_VERSION 13 /** The node ID of the root inode */ #define FUSE_ROOT_ID 1 @@ -427,7 +431,8 @@ struct fuse_init_out { __u32 minor; __u32 max_readahead; __u32 flags; - __u32 unused; + __u16 max_background; + __u16 congestion_threshold; __u32 max_write; }; -- cgit v1.2.3-70-g09d2 From 37d217f029a56a6d385f99773fb27dfcb51f9a46 Mon Sep 17 00:00:00 2001 From: Miklos Szeredi Date: Wed, 8 Jul 2009 18:17:58 +0200 Subject: fuse: document protocol version negotiation Clarify how the protocol version should be negotiated between kernel and userspace. Notably libfuse didn't correctly handle the case when the supported major versions didn't match. Signed-off-by: Miklos Szeredi --- include/linux/fuse.h | 20 ++++++++++++++++++++ 1 file changed, 20 insertions(+) (limited to 'include') diff --git a/include/linux/fuse.h b/include/linux/fuse.h index b3700f0ac26..3e2925a34bf 100644 --- a/include/linux/fuse.h +++ b/include/linux/fuse.h @@ -41,6 +41,26 @@ #include +/* + * Version negotiation: + * + * Both the kernel and userspace send the version they support in the + * INIT request and reply respectively. + * + * If the major versions match then both shall use the smallest + * of the two minor versions for communication. + * + * If the kernel supports a larger major version, then userspace shall + * reply with the major version it supports, ignore the rest of the + * INIT message and expect a new INIT message from the kernel with a + * matching major version. + * + * If the library supports a larger major version, then it shall fall + * back to the major protocol version sent by the kernel for + * communication and reply with that major version (and an arbitrary + * supported minor version). + */ + /** Version number of this interface */ #define FUSE_KERNEL_VERSION 7 -- cgit v1.2.3-70-g09d2 From 5a421ce3c062a87db0a9e7f2a0a7ee0a5b869aab Mon Sep 17 00:00:00 2001 From: Benny Halevy Date: Fri, 10 Jul 2009 12:37:40 +0300 Subject: nfsd41: gather and report statistics also for v4.1 ops Signed-off-by: Benny Halevy Signed-off-by: J. Bruce Fields --- include/linux/nfs4.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include') diff --git a/include/linux/nfs4.h b/include/linux/nfs4.h index bd2eba53066..aff924a24ab 100644 --- a/include/linux/nfs4.h +++ b/include/linux/nfs4.h @@ -234,7 +234,7 @@ enum nfs_opnum4 { Needs to be updated if more operations are defined in future.*/ #define FIRST_NFS4_OP OP_ACCESS -#define LAST_NFS4_OP OP_RELEASE_LOCKOWNER +#define LAST_NFS4_OP OP_RECLAIM_COMPLETE enum nfsstat4 { NFS4_OK = 0, -- cgit v1.2.3-70-g09d2 From 4bd9b0f4afc76cf972578c702e1bc1b6f2d10ba5 Mon Sep 17 00:00:00 2001 From: Andy Adamson Date: Wed, 24 Jun 2009 15:37:45 -0400 Subject: nfsd41: use globals for DRC limits The version 4.1 DRC memory limit and tracking variables are server wide and session specific. Replace struct svc_serv fields with globals. Stop using the svc_serv sv_lock. Add a spinlock to serialize access to the DRC limit management variables which change on session creation and deletion (usage counter) or (future) administrative action to adjust the total DRC memory limit. Signed-off-by: Andy Adamson Signed-off-by: Benny Halevy --- fs/nfsd/nfs4state.c | 10 +++++----- fs/nfsd/nfssvc.c | 19 +++++++++++++++---- include/linux/nfsd/nfsd.h | 3 +++ include/linux/sunrpc/svc.h | 2 -- 4 files changed, 23 insertions(+), 11 deletions(-) (limited to 'include') diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c index 980a216a48c..2e6a44e3d2f 100644 --- a/fs/nfsd/nfs4state.c +++ b/fs/nfsd/nfs4state.c @@ -430,11 +430,11 @@ static int set_forechannel_maxreqs(struct nfsd4_channel_attrs *fchan) else if (fchan->maxreqs > NFSD_MAX_SLOTS_PER_SESSION) fchan->maxreqs = NFSD_MAX_SLOTS_PER_SESSION; - spin_lock(&nfsd_serv->sv_lock); - if (np + nfsd_serv->sv_drc_pages_used > nfsd_serv->sv_drc_max_pages) - np = nfsd_serv->sv_drc_max_pages - nfsd_serv->sv_drc_pages_used; - nfsd_serv->sv_drc_pages_used += np; - spin_unlock(&nfsd_serv->sv_lock); + spin_lock(&nfsd_drc_lock); + if (np + nfsd_drc_pages_used > nfsd_drc_max_pages) + np = nfsd_drc_max_pages - nfsd_drc_pages_used; + nfsd_drc_pages_used += np; + spin_unlock(&nfsd_drc_lock); if (np <= 0) { status = nfserr_resource; diff --git a/fs/nfsd/nfssvc.c b/fs/nfsd/nfssvc.c index d4c9884cd54..78d8fcd883f 100644 --- a/fs/nfsd/nfssvc.c +++ b/fs/nfsd/nfssvc.c @@ -67,6 +67,16 @@ struct timeval nfssvc_boot; DEFINE_MUTEX(nfsd_mutex); struct svc_serv *nfsd_serv; +/* + * nfsd_drc_lock protects nfsd_drc_max_pages and nfsd_drc_pages_used. + * nfsd_drc_max_pages limits the total amount of memory available for + * version 4.1 DRC caches. + * nfsd_drc_pages_used tracks the current version 4.1 DRC memory usage. + */ +spinlock_t nfsd_drc_lock; +unsigned int nfsd_drc_max_pages; +unsigned int nfsd_drc_pages_used; + #if defined(CONFIG_NFSD_V2_ACL) || defined(CONFIG_NFSD_V3_ACL) static struct svc_stat nfsd_acl_svcstats; static struct svc_version * nfsd_acl_version[] = { @@ -238,11 +248,12 @@ static void set_max_drc(void) { /* The percent of nr_free_buffer_pages used by the V4.1 server DRC */ #define NFSD_DRC_SIZE_SHIFT 7 - nfsd_serv->sv_drc_max_pages = nr_free_buffer_pages() + nfsd_drc_max_pages = nr_free_buffer_pages() >> NFSD_DRC_SIZE_SHIFT; - nfsd_serv->sv_drc_pages_used = 0; - dprintk("%s svc_drc_max_pages %u\n", __func__, - nfsd_serv->sv_drc_max_pages); + nfsd_drc_pages_used = 0; + spin_lock_init(&nfsd_drc_lock); + dprintk("%s nfsd_drc_max_pages %u\n", __func__, + nfsd_drc_max_pages); } int nfsd_create_serv(void) diff --git a/include/linux/nfsd/nfsd.h b/include/linux/nfsd/nfsd.h index 2b49d676d0c..2571f856908 100644 --- a/include/linux/nfsd/nfsd.h +++ b/include/linux/nfsd/nfsd.h @@ -56,6 +56,9 @@ extern struct svc_version nfsd_version2, nfsd_version3, extern u32 nfsd_supported_minorversion; extern struct mutex nfsd_mutex; extern struct svc_serv *nfsd_serv; +extern spinlock_t nfsd_drc_lock; +extern unsigned int nfsd_drc_max_pages; +extern unsigned int nfsd_drc_pages_used; extern struct seq_operations nfs_exports_op; diff --git a/include/linux/sunrpc/svc.h b/include/linux/sunrpc/svc.h index ea8009695c6..52e8cb0a756 100644 --- a/include/linux/sunrpc/svc.h +++ b/include/linux/sunrpc/svc.h @@ -94,8 +94,6 @@ struct svc_serv { struct module * sv_module; /* optional module to count when * adding threads */ svc_thread_fn sv_function; /* main function for threads */ - unsigned int sv_drc_max_pages; /* Total pages for DRC */ - unsigned int sv_drc_pages_used;/* DRC pages used */ #if defined(CONFIG_NFS_V4_1) struct list_head sv_cb_list; /* queue for callback requests * that arrive over the same -- cgit v1.2.3-70-g09d2 From d782c3f95c9263dc0b98e7115f75f1e18b9600b3 Mon Sep 17 00:00:00 2001 From: Zhao Yakui Date: Mon, 22 Jun 2009 13:17:08 +0800 Subject: drm/mode: add the CVT algorithm in kernel space Add the CVT algorithm in kernel space. And this function can be called to generate the required modeline. I copied it from the file of xserver/hw/xfree86/modes/xf86cvt.c. What I have done is to translate it by using integer calculation. This is to avoid the float-point calculation in kernel space. [airlied:- cleaned up some bits] Signed-off-by: Zhao Yakui Signed-off-by: Dave Airlie --- drivers/gpu/drm/drm_modes.c | 219 ++++++++++++++++++++++++++++++++++++++++++++ include/drm/drm_crtc.h | 3 + 2 files changed, 222 insertions(+) (limited to 'include') diff --git a/drivers/gpu/drm/drm_modes.c b/drivers/gpu/drm/drm_modes.c index 54f492a488a..0dbc7e4f864 100644 --- a/drivers/gpu/drm/drm_modes.c +++ b/drivers/gpu/drm/drm_modes.c @@ -8,6 +8,7 @@ * Copyright © 2007 Dave Airlie * Copyright © 2007-2008 Intel Corporation * Jesse Barnes + * Copyright 2005-2006 Luc Verhaegen * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), @@ -61,6 +62,224 @@ void drm_mode_debug_printmodeline(struct drm_display_mode *mode) } EXPORT_SYMBOL(drm_mode_debug_printmodeline); +/** + * drm_cvt_mode -create a modeline based on CVT algorithm + * @dev: DRM device + * @hdisplay: hdisplay size + * @vdisplay: vdisplay size + * @vrefresh : vrefresh rate + * @reduced : Whether the GTF calculation is simplified + * @interlaced:Whether the interlace is supported + * + * LOCKING: + * none. + * + * return the modeline based on CVT algorithm + * + * This function is called to generate the modeline based on CVT algorithm + * according to the hdisplay, vdisplay, vrefresh. + * It is based from the VESA(TM) Coordinated Video Timing Generator by + * Graham Loveridge April 9, 2003 available at + * http://www.vesa.org/public/CVT/CVTd6r1.xls + * + * And it is copied from xf86CVTmode in xserver/hw/xfree86/modes/xf86cvt.c. + * What I have done is to translate it by using integer calculation. + */ +#define HV_FACTOR 1000 +struct drm_display_mode *drm_cvt_mode(struct drm_device *dev, int hdisplay, + int vdisplay, int vrefresh, + bool reduced, bool interlaced) +{ + /* 1) top/bottom margin size (% of height) - default: 1.8, */ +#define CVT_MARGIN_PERCENTAGE 18 + /* 2) character cell horizontal granularity (pixels) - default 8 */ +#define CVT_H_GRANULARITY 8 + /* 3) Minimum vertical porch (lines) - default 3 */ +#define CVT_MIN_V_PORCH 3 + /* 4) Minimum number of vertical back porch lines - default 6 */ +#define CVT_MIN_V_BPORCH 6 + /* Pixel Clock step (kHz) */ +#define CVT_CLOCK_STEP 250 + struct drm_display_mode *drm_mode; + bool margins = false; + unsigned int vfieldrate, hperiod; + int hdisplay_rnd, hmargin, vdisplay_rnd, vmargin, vsync; + int interlace; + + /* allocate the drm_display_mode structure. If failure, we will + * return directly + */ + drm_mode = drm_mode_create(dev); + if (!drm_mode) + return NULL; + + /* the CVT default refresh rate is 60Hz */ + if (!vrefresh) + vrefresh = 60; + + /* the required field fresh rate */ + if (interlaced) + vfieldrate = vrefresh * 2; + else + vfieldrate = vrefresh; + + /* horizontal pixels */ + hdisplay_rnd = hdisplay - (hdisplay % CVT_H_GRANULARITY); + + /* determine the left&right borders */ + hmargin = 0; + if (margins) { + hmargin = hdisplay_rnd * CVT_MARGIN_PERCENTAGE / 1000; + hmargin -= hmargin % CVT_H_GRANULARITY; + } + /* find the total active pixels */ + drm_mode->hdisplay = hdisplay_rnd + 2 * hmargin; + + /* find the number of lines per field */ + if (interlaced) + vdisplay_rnd = vdisplay / 2; + else + vdisplay_rnd = vdisplay; + + /* find the top & bottom borders */ + vmargin = 0; + if (margins) + vmargin = vdisplay_rnd * CVT_MARGIN_PERCENTAGE / 1000; + + drm_mode->vdisplay = vdisplay_rnd + 2 * vmargin; + + /* Interlaced */ + if (interlaced) + interlace = 1; + else + interlace = 0; + + /* Determine VSync Width from aspect ratio */ + if (!(vdisplay % 3) && ((vdisplay * 4 / 3) == hdisplay)) + vsync = 4; + else if (!(vdisplay % 9) && ((vdisplay * 16 / 9) == hdisplay)) + vsync = 5; + else if (!(vdisplay % 10) && ((vdisplay * 16 / 10) == hdisplay)) + vsync = 6; + else if (!(vdisplay % 4) && ((vdisplay * 5 / 4) == hdisplay)) + vsync = 7; + else if (!(vdisplay % 9) && ((vdisplay * 15 / 9) == hdisplay)) + vsync = 7; + else /* custom */ + vsync = 10; + + if (!reduced) { + /* simplify the GTF calculation */ + /* 4) Minimum time of vertical sync + back porch interval (µs) + * default 550.0 + */ + int tmp1, tmp2; +#define CVT_MIN_VSYNC_BP 550 + /* 3) Nominal HSync width (% of line period) - default 8 */ +#define CVT_HSYNC_PERCENTAGE 8 + unsigned int hblank_percentage; + int vsyncandback_porch, vback_porch, hblank; + + /* estimated the horizontal period */ + tmp1 = HV_FACTOR * 1000000 - + CVT_MIN_VSYNC_BP * HV_FACTOR * vfieldrate; + tmp2 = (vdisplay_rnd + 2 * vmargin + CVT_MIN_V_PORCH) * 2 + + interlace; + hperiod = tmp1 * 2 / (tmp2 * vfieldrate); + + tmp1 = CVT_MIN_VSYNC_BP * HV_FACTOR / hperiod + 1; + /* 9. Find number of lines in sync + backporch */ + if (tmp1 < (vsync + CVT_MIN_V_PORCH)) + vsyncandback_porch = vsync + CVT_MIN_V_PORCH; + else + vsyncandback_porch = tmp1; + /* 10. Find number of lines in back porch */ + vback_porch = vsyncandback_porch - vsync; + drm_mode->vtotal = vdisplay_rnd + 2 * vmargin + + vsyncandback_porch + CVT_MIN_V_PORCH; + /* 5) Definition of Horizontal blanking time limitation */ + /* Gradient (%/kHz) - default 600 */ +#define CVT_M_FACTOR 600 + /* Offset (%) - default 40 */ +#define CVT_C_FACTOR 40 + /* Blanking time scaling factor - default 128 */ +#define CVT_K_FACTOR 128 + /* Scaling factor weighting - default 20 */ +#define CVT_J_FACTOR 20 +#define CVT_M_PRIME (CVT_M_FACTOR * CVT_K_FACTOR / 256) +#define CVT_C_PRIME ((CVT_C_FACTOR - CVT_J_FACTOR) * CVT_K_FACTOR / 256 + \ + CVT_J_FACTOR) + /* 12. Find ideal blanking duty cycle from formula */ + hblank_percentage = CVT_C_PRIME * HV_FACTOR - CVT_M_PRIME * + hperiod / 1000; + /* 13. Blanking time */ + if (hblank_percentage < 20 * HV_FACTOR) + hblank_percentage = 20 * HV_FACTOR; + hblank = drm_mode->hdisplay * hblank_percentage / + (100 * HV_FACTOR - hblank_percentage); + hblank -= hblank % (2 * CVT_H_GRANULARITY); + /* 14. find the total pixes per line */ + drm_mode->htotal = drm_mode->hdisplay + hblank; + drm_mode->hsync_end = drm_mode->hdisplay + hblank / 2; + drm_mode->hsync_start = drm_mode->hsync_end - + (drm_mode->htotal * CVT_HSYNC_PERCENTAGE) / 100; + drm_mode->hsync_start += CVT_H_GRANULARITY - + drm_mode->hsync_start % CVT_H_GRANULARITY; + /* fill the Vsync values */ + drm_mode->vsync_start = drm_mode->vdisplay + CVT_MIN_V_PORCH; + drm_mode->vsync_end = drm_mode->vsync_start + vsync; + } else { + /* Reduced blanking */ + /* Minimum vertical blanking interval time (µs)- default 460 */ +#define CVT_RB_MIN_VBLANK 460 + /* Fixed number of clocks for horizontal sync */ +#define CVT_RB_H_SYNC 32 + /* Fixed number of clocks for horizontal blanking */ +#define CVT_RB_H_BLANK 160 + /* Fixed number of lines for vertical front porch - default 3*/ +#define CVT_RB_VFPORCH 3 + int vbilines; + int tmp1, tmp2; + /* 8. Estimate Horizontal period. */ + tmp1 = HV_FACTOR * 1000000 - + CVT_RB_MIN_VBLANK * HV_FACTOR * vfieldrate; + tmp2 = vdisplay_rnd + 2 * vmargin; + hperiod = tmp1 / (tmp2 * vfieldrate); + /* 9. Find number of lines in vertical blanking */ + vbilines = CVT_RB_MIN_VBLANK * HV_FACTOR / hperiod + 1; + /* 10. Check if vertical blanking is sufficient */ + if (vbilines < (CVT_RB_VFPORCH + vsync + CVT_MIN_V_BPORCH)) + vbilines = CVT_RB_VFPORCH + vsync + CVT_MIN_V_BPORCH; + /* 11. Find total number of lines in vertical field */ + drm_mode->vtotal = vdisplay_rnd + 2 * vmargin + vbilines; + /* 12. Find total number of pixels in a line */ + drm_mode->htotal = drm_mode->hdisplay + CVT_RB_H_BLANK; + /* Fill in HSync values */ + drm_mode->hsync_end = drm_mode->hdisplay + CVT_RB_H_BLANK / 2; + drm_mode->hsync_start = drm_mode->hsync_end = CVT_RB_H_SYNC; + } + /* 15/13. Find pixel clock frequency (kHz for xf86) */ + drm_mode->clock = drm_mode->htotal * HV_FACTOR * 1000 / hperiod; + drm_mode->clock -= drm_mode->clock % CVT_CLOCK_STEP; + /* 18/16. Find actual vertical frame frequency */ + /* ignore - just set the mode flag for interlaced */ + if (interlaced) + drm_mode->vtotal *= 2; + /* Fill the mode line name */ + drm_mode_set_name(drm_mode); + if (reduced) + drm_mode->flags |= (DRM_MODE_FLAG_PHSYNC | + DRM_MODE_FLAG_NVSYNC); + else + drm_mode->flags |= (DRM_MODE_FLAG_PVSYNC | + DRM_MODE_FLAG_NHSYNC); + if (interlaced) + drm_mode->flags |= DRM_MODE_FLAG_INTERLACE; + + return drm_mode; +} +EXPORT_SYMBOL(drm_cvt_mode); + /** * drm_mode_set_name - set the name on a mode * @mode: name will be set in this mode diff --git a/include/drm/drm_crtc.h b/include/drm/drm_crtc.h index 7300fb86676..820bc0977e5 100644 --- a/include/drm/drm_crtc.h +++ b/include/drm/drm_crtc.h @@ -736,4 +736,7 @@ extern int drm_mode_gamma_get_ioctl(struct drm_device *dev, extern int drm_mode_gamma_set_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); extern bool drm_detect_hdmi_monitor(struct edid *edid); +extern struct drm_display_mode *drm_cvt_mode(struct drm_device *dev, + int hdisplay, int vdisplay, int vrefresh, + bool reduced, bool interlaced); #endif /* __DRM_CRTC_H__ */ -- cgit v1.2.3-70-g09d2 From 26bbdadad356ec02d33657858d91675f3e9aca94 Mon Sep 17 00:00:00 2001 From: Zhao Yakui Date: Mon, 22 Jun 2009 13:17:09 +0800 Subject: drm/mode: add the GTF algorithm in kernel space Add the GTF algorithm in kernel space. And this function can be called to generate the required modeline. I copied it from the file of xserver/hw/xfree86/modes/xf86gtf.c. What I have done is to translate it by using integer calculation. This is to avoid the float-point calculation in kernel space. At the same tie I also refer to the function of fb_get_mode in drivers/video/fbmon.c Signed-off-by: Zhao Yakui Signed-off-by: Dave Airlie --- drivers/gpu/drm/drm_modes.c | 197 ++++++++++++++++++++++++++++++++++++++++++++ include/drm/drm_crtc.h | 3 + 2 files changed, 200 insertions(+) (limited to 'include') diff --git a/drivers/gpu/drm/drm_modes.c b/drivers/gpu/drm/drm_modes.c index 0dbc7e4f864..fd489d76fbb 100644 --- a/drivers/gpu/drm/drm_modes.c +++ b/drivers/gpu/drm/drm_modes.c @@ -9,6 +9,7 @@ * Copyright © 2007-2008 Intel Corporation * Jesse Barnes * Copyright 2005-2006 Luc Verhaegen + * Copyright (c) 2001, Andy Ritger aritger@nvidia.com * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), @@ -280,6 +281,202 @@ struct drm_display_mode *drm_cvt_mode(struct drm_device *dev, int hdisplay, } EXPORT_SYMBOL(drm_cvt_mode); +/** + * drm_gtf_mode - create the modeline based on GTF algorithm + * + * @dev :drm device + * @hdisplay :hdisplay size + * @vdisplay :vdisplay size + * @vrefresh :vrefresh rate. + * @interlaced :whether the interlace is supported + * @margins :whether the margin is supported + * + * LOCKING. + * none. + * + * return the modeline based on GTF algorithm + * + * This function is to create the modeline based on the GTF algorithm. + * Generalized Timing Formula is derived from: + * GTF Spreadsheet by Andy Morrish (1/5/97) + * available at http://www.vesa.org + * + * And it is copied from the file of xserver/hw/xfree86/modes/xf86gtf.c. + * What I have done is to translate it by using integer calculation. + * I also refer to the function of fb_get_mode in the file of + * drivers/video/fbmon.c + */ +struct drm_display_mode *drm_gtf_mode(struct drm_device *dev, int hdisplay, + int vdisplay, int vrefresh, + bool interlaced, int margins) +{ + /* 1) top/bottom margin size (% of height) - default: 1.8, */ +#define GTF_MARGIN_PERCENTAGE 18 + /* 2) character cell horizontal granularity (pixels) - default 8 */ +#define GTF_CELL_GRAN 8 + /* 3) Minimum vertical porch (lines) - default 3 */ +#define GTF_MIN_V_PORCH 1 + /* width of vsync in lines */ +#define V_SYNC_RQD 3 + /* width of hsync as % of total line */ +#define H_SYNC_PERCENT 8 + /* min time of vsync + back porch (microsec) */ +#define MIN_VSYNC_PLUS_BP 550 + /* blanking formula gradient */ +#define GTF_M 600 + /* blanking formula offset */ +#define GTF_C 40 + /* blanking formula scaling factor */ +#define GTF_K 128 + /* blanking formula scaling factor */ +#define GTF_J 20 + /* C' and M' are part of the Blanking Duty Cycle computation */ +#define GTF_C_PRIME (((GTF_C - GTF_J) * GTF_K / 256) + GTF_J) +#define GTF_M_PRIME (GTF_K * GTF_M / 256) + struct drm_display_mode *drm_mode; + unsigned int hdisplay_rnd, vdisplay_rnd, vfieldrate_rqd; + int top_margin, bottom_margin; + int interlace; + unsigned int hfreq_est; + int vsync_plus_bp, vback_porch; + unsigned int vtotal_lines, vfieldrate_est, hperiod; + unsigned int vfield_rate, vframe_rate; + int left_margin, right_margin; + unsigned int total_active_pixels, ideal_duty_cycle; + unsigned int hblank, total_pixels, pixel_freq; + int hsync, hfront_porch, vodd_front_porch_lines; + unsigned int tmp1, tmp2; + + drm_mode = drm_mode_create(dev); + if (!drm_mode) + return NULL; + + /* 1. In order to give correct results, the number of horizontal + * pixels requested is first processed to ensure that it is divisible + * by the character size, by rounding it to the nearest character + * cell boundary: + */ + hdisplay_rnd = (hdisplay + GTF_CELL_GRAN / 2) / GTF_CELL_GRAN; + hdisplay_rnd = hdisplay_rnd * GTF_CELL_GRAN; + + /* 2. If interlace is requested, the number of vertical lines assumed + * by the calculation must be halved, as the computation calculates + * the number of vertical lines per field. + */ + if (interlaced) + vdisplay_rnd = vdisplay / 2; + else + vdisplay_rnd = vdisplay; + + /* 3. Find the frame rate required: */ + if (interlaced) + vfieldrate_rqd = vrefresh * 2; + else + vfieldrate_rqd = vrefresh; + + /* 4. Find number of lines in Top margin: */ + top_margin = 0; + if (margins) + top_margin = (vdisplay_rnd * GTF_MARGIN_PERCENTAGE + 500) / + 1000; + /* 5. Find number of lines in bottom margin: */ + bottom_margin = top_margin; + + /* 6. If interlace is required, then set variable interlace: */ + if (interlaced) + interlace = 1; + else + interlace = 0; + + /* 7. Estimate the Horizontal frequency */ + { + tmp1 = (1000000 - MIN_VSYNC_PLUS_BP * vfieldrate_rqd) / 500; + tmp2 = (vdisplay_rnd + 2 * top_margin + GTF_MIN_V_PORCH) * + 2 + interlace; + hfreq_est = (tmp2 * 1000 * vfieldrate_rqd) / tmp1; + } + + /* 8. Find the number of lines in V sync + back porch */ + /* [V SYNC+BP] = RINT(([MIN VSYNC+BP] * hfreq_est / 1000000)) */ + vsync_plus_bp = MIN_VSYNC_PLUS_BP * hfreq_est / 1000; + vsync_plus_bp = (vsync_plus_bp + 500) / 1000; + /* 9. Find the number of lines in V back porch alone: */ + vback_porch = vsync_plus_bp - V_SYNC_RQD; + /* 10. Find the total number of lines in Vertical field period: */ + vtotal_lines = vdisplay_rnd + top_margin + bottom_margin + + vsync_plus_bp + GTF_MIN_V_PORCH; + /* 11. Estimate the Vertical field frequency: */ + vfieldrate_est = hfreq_est / vtotal_lines; + /* 12. Find the actual horizontal period: */ + hperiod = 1000000 / (vfieldrate_rqd * vtotal_lines); + + /* 13. Find the actual Vertical field frequency: */ + vfield_rate = hfreq_est / vtotal_lines; + /* 14. Find the Vertical frame frequency: */ + if (interlaced) + vframe_rate = vfield_rate / 2; + else + vframe_rate = vfield_rate; + /* 15. Find number of pixels in left margin: */ + if (margins) + left_margin = (hdisplay_rnd * GTF_MARGIN_PERCENTAGE + 500) / + 1000; + else + left_margin = 0; + + /* 16.Find number of pixels in right margin: */ + right_margin = left_margin; + /* 17.Find total number of active pixels in image and left and right */ + total_active_pixels = hdisplay_rnd + left_margin + right_margin; + /* 18.Find the ideal blanking duty cycle from blanking duty cycle */ + ideal_duty_cycle = GTF_C_PRIME * 1000 - + (GTF_M_PRIME * 1000000 / hfreq_est); + /* 19.Find the number of pixels in the blanking time to the nearest + * double character cell: */ + hblank = total_active_pixels * ideal_duty_cycle / + (100000 - ideal_duty_cycle); + hblank = (hblank + GTF_CELL_GRAN) / (2 * GTF_CELL_GRAN); + hblank = hblank * 2 * GTF_CELL_GRAN; + /* 20.Find total number of pixels: */ + total_pixels = total_active_pixels + hblank; + /* 21.Find pixel clock frequency: */ + pixel_freq = total_pixels * hfreq_est / 1000; + /* Stage 1 computations are now complete; I should really pass + * the results to another function and do the Stage 2 computations, + * but I only need a few more values so I'll just append the + * computations here for now */ + /* 17. Find the number of pixels in the horizontal sync period: */ + hsync = H_SYNC_PERCENT * total_pixels / 100; + hsync = (hsync + GTF_CELL_GRAN / 2) / GTF_CELL_GRAN; + hsync = hsync * GTF_CELL_GRAN; + /* 18. Find the number of pixels in horizontal front porch period */ + hfront_porch = hblank / 2 - hsync; + /* 36. Find the number of lines in the odd front porch period: */ + vodd_front_porch_lines = GTF_MIN_V_PORCH ; + + /* finally, pack the results in the mode struct */ + drm_mode->hdisplay = hdisplay_rnd; + drm_mode->hsync_start = hdisplay_rnd + hfront_porch; + drm_mode->hsync_end = drm_mode->hsync_start + hsync; + drm_mode->htotal = total_pixels; + drm_mode->vdisplay = vdisplay_rnd; + drm_mode->vsync_start = vdisplay_rnd + vodd_front_porch_lines; + drm_mode->vsync_end = drm_mode->vsync_start + V_SYNC_RQD; + drm_mode->vtotal = vtotal_lines; + + drm_mode->clock = pixel_freq; + + drm_mode_set_name(drm_mode); + drm_mode->flags = DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC; + + if (interlaced) { + drm_mode->vtotal *= 2; + drm_mode->flags |= DRM_MODE_FLAG_INTERLACE; + } + + return drm_mode; +} +EXPORT_SYMBOL(drm_gtf_mode); /** * drm_mode_set_name - set the name on a mode * @mode: name will be set in this mode diff --git a/include/drm/drm_crtc.h b/include/drm/drm_crtc.h index 820bc0977e5..125994d8ac0 100644 --- a/include/drm/drm_crtc.h +++ b/include/drm/drm_crtc.h @@ -739,4 +739,7 @@ extern bool drm_detect_hdmi_monitor(struct edid *edid); extern struct drm_display_mode *drm_cvt_mode(struct drm_device *dev, int hdisplay, int vdisplay, int vrefresh, bool reduced, bool interlaced); +extern struct drm_display_mode *drm_gtf_mode(struct drm_device *dev, + int hdisplay, int vdisplay, int vrefresh, + bool interlaced, int margins); #endif /* __DRM_CRTC_H__ */ -- cgit v1.2.3-70-g09d2 From 3d39cecc4841e8d4c4abdb401d10180f5faaded0 Mon Sep 17 00:00:00 2001 From: David Woodhouse Date: Wed, 8 Jul 2009 15:23:30 +0100 Subject: intel-iommu: Remove superfluous iova_alloc_lock from IOVA code We only ever obtain this lock immediately before the iova_rbtree_lock, and release it immediately after the iova_rbtree_lock. So ditch it and just use iova_rbtree_lock. [v2: Remove the lockdep bits this time too] Signed-off-by: David Woodhouse --- drivers/pci/intel-iommu.c | 3 --- drivers/pci/iova.c | 16 ++++------------ include/linux/iova.h | 1 - 3 files changed, 4 insertions(+), 16 deletions(-) (limited to 'include') diff --git a/drivers/pci/intel-iommu.c b/drivers/pci/intel-iommu.c index c5f7c73cbb5..d6a857397ec 100644 --- a/drivers/pci/intel-iommu.c +++ b/drivers/pci/intel-iommu.c @@ -1309,7 +1309,6 @@ static void iommu_detach_domain(struct dmar_domain *domain, } static struct iova_domain reserved_iova_list; -static struct lock_class_key reserved_alloc_key; static struct lock_class_key reserved_rbtree_key; static void dmar_init_reserved_ranges(void) @@ -1320,8 +1319,6 @@ static void dmar_init_reserved_ranges(void) init_iova_domain(&reserved_iova_list, DMA_32BIT_PFN); - lockdep_set_class(&reserved_iova_list.iova_alloc_lock, - &reserved_alloc_key); lockdep_set_class(&reserved_iova_list.iova_rbtree_lock, &reserved_rbtree_key); diff --git a/drivers/pci/iova.c b/drivers/pci/iova.c index 46dd440e231..7914951ef29 100644 --- a/drivers/pci/iova.c +++ b/drivers/pci/iova.c @@ -22,7 +22,6 @@ void init_iova_domain(struct iova_domain *iovad, unsigned long pfn_32bit) { - spin_lock_init(&iovad->iova_alloc_lock); spin_lock_init(&iovad->iova_rbtree_lock); iovad->rbroot = RB_ROOT; iovad->cached32_node = NULL; @@ -205,7 +204,6 @@ alloc_iova(struct iova_domain *iovad, unsigned long size, unsigned long limit_pfn, bool size_aligned) { - unsigned long flags; struct iova *new_iova; int ret; @@ -219,11 +217,9 @@ alloc_iova(struct iova_domain *iovad, unsigned long size, if (size_aligned) size = __roundup_pow_of_two(size); - spin_lock_irqsave(&iovad->iova_alloc_lock, flags); ret = __alloc_and_insert_iova_range(iovad, size, limit_pfn, new_iova, size_aligned); - spin_unlock_irqrestore(&iovad->iova_alloc_lock, flags); if (ret) { free_iova_mem(new_iova); return NULL; @@ -381,8 +377,7 @@ reserve_iova(struct iova_domain *iovad, struct iova *iova; unsigned int overlap = 0; - spin_lock_irqsave(&iovad->iova_alloc_lock, flags); - spin_lock(&iovad->iova_rbtree_lock); + spin_lock_irqsave(&iovad->iova_rbtree_lock, flags); for (node = rb_first(&iovad->rbroot); node; node = rb_next(node)) { if (__is_range_overlap(node, pfn_lo, pfn_hi)) { iova = container_of(node, struct iova, node); @@ -402,8 +397,7 @@ reserve_iova(struct iova_domain *iovad, iova = __insert_new_range(iovad, pfn_lo, pfn_hi); finish: - spin_unlock(&iovad->iova_rbtree_lock); - spin_unlock_irqrestore(&iovad->iova_alloc_lock, flags); + spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags); return iova; } @@ -420,8 +414,7 @@ copy_reserved_iova(struct iova_domain *from, struct iova_domain *to) unsigned long flags; struct rb_node *node; - spin_lock_irqsave(&from->iova_alloc_lock, flags); - spin_lock(&from->iova_rbtree_lock); + spin_lock_irqsave(&from->iova_rbtree_lock, flags); for (node = rb_first(&from->rbroot); node; node = rb_next(node)) { struct iova *iova = container_of(node, struct iova, node); struct iova *new_iova; @@ -430,6 +423,5 @@ copy_reserved_iova(struct iova_domain *from, struct iova_domain *to) printk(KERN_ERR "Reserve iova range %lx@%lx failed\n", iova->pfn_lo, iova->pfn_lo); } - spin_unlock(&from->iova_rbtree_lock); - spin_unlock_irqrestore(&from->iova_alloc_lock, flags); + spin_unlock_irqrestore(&from->iova_rbtree_lock, flags); } diff --git a/include/linux/iova.h b/include/linux/iova.h index 228f6c94b69..76a0759e88e 100644 --- a/include/linux/iova.h +++ b/include/linux/iova.h @@ -28,7 +28,6 @@ struct iova { /* holds all the iova translations for a domain */ struct iova_domain { - spinlock_t iova_alloc_lock;/* Lock to protect iova allocation */ spinlock_t iova_rbtree_lock; /* Lock to protect update of rbtree */ struct rb_root rbroot; /* iova domain rbtree root */ struct rb_node *cached32_node; /* Save last alloced node */ -- cgit v1.2.3-70-g09d2 From 719a72b7c75bb239ca6184190ab994b71a31c6dc Mon Sep 17 00:00:00 2001 From: Magnus Damm Date: Fri, 17 Jul 2009 14:59:55 +0000 Subject: usb: r8a66597-hcd platform data on_chip support Convert the r8a66597-hcd driver to use the on_chip flag from platform data to enable on chip behaviour instead of relying on CONFIG_SUPERH_ON_CHIP_R8A66597 ugliness. This makes the code cleaner and also allows us to support both external and internal r8a66597 with the same kernel. It also makes the Kconfig part more future proof since we with this patch can add support for new processors with on-chip r8a66597 without modifying the Kconfig. Signed-off-by: Magnus Damm Signed-off-by: Paul Mundt --- arch/sh/boards/mach-se/7724/setup.c | 1 + arch/sh/kernel/cpu/sh4a/setup-sh7366.c | 2 +- arch/sh/kernel/cpu/sh4a/setup-sh7723.c | 2 +- drivers/usb/host/Kconfig | 7 -- drivers/usb/host/r8a66597-hcd.c | 187 +++++++++++++++++++-------------- drivers/usb/host/r8a66597.h | 76 ++++++-------- include/linux/usb/r8a66597.h | 3 + 7 files changed, 147 insertions(+), 131 deletions(-) (limited to 'include') diff --git a/arch/sh/boards/mach-se/7724/setup.c b/arch/sh/boards/mach-se/7724/setup.c index 8fed45a2fb8..4fb7e48e284 100644 --- a/arch/sh/boards/mach-se/7724/setup.c +++ b/arch/sh/boards/mach-se/7724/setup.c @@ -304,6 +304,7 @@ static struct platform_device sh_eth_device = { }; static struct r8a66597_platdata sh7724_usb0_host_data = { + .on_chip = 1, }; static struct resource sh7724_usb0_host_resources[] = { diff --git a/arch/sh/kernel/cpu/sh4a/setup-sh7366.c b/arch/sh/kernel/cpu/sh4a/setup-sh7366.c index c18f7d09281..f6d20881356 100644 --- a/arch/sh/kernel/cpu/sh4a/setup-sh7366.c +++ b/arch/sh/kernel/cpu/sh4a/setup-sh7366.c @@ -40,7 +40,7 @@ static struct platform_device iic_device = { }; static struct r8a66597_platdata r8a66597_data = { - /* This set zero to all members */ + .on_chip = 1, }; static struct resource usb_host_resources[] = { diff --git a/arch/sh/kernel/cpu/sh4a/setup-sh7723.c b/arch/sh/kernel/cpu/sh4a/setup-sh7723.c index e1bb80b2a27..28516499a2c 100644 --- a/arch/sh/kernel/cpu/sh4a/setup-sh7723.c +++ b/arch/sh/kernel/cpu/sh4a/setup-sh7723.c @@ -398,7 +398,7 @@ static struct platform_device rtc_device = { }; static struct r8a66597_platdata r8a66597_data = { - /* This set zero to all members */ + .on_chip = 1, }; static struct resource sh7723_usb_host_resources[] = { diff --git a/drivers/usb/host/Kconfig b/drivers/usb/host/Kconfig index 1a920c70b5a..f21ca7d27a4 100644 --- a/drivers/usb/host/Kconfig +++ b/drivers/usb/host/Kconfig @@ -336,13 +336,6 @@ config USB_R8A66597_HCD To compile this driver as a module, choose M here: the module will be called r8a66597-hcd. -config SUPERH_ON_CHIP_R8A66597 - boolean "Enable SuperH on-chip R8A66597 USB" - depends on USB_R8A66597_HCD && (CPU_SUBTYPE_SH7366 || CPU_SUBTYPE_SH7723 || CPU_SUBTYPE_SH7724) - help - This driver enables support for the on-chip R8A66597 in the - SH7366, SH7723 and SH7724 processors. - config USB_WHCI_HCD tristate "Wireless USB Host Controller Interface (WHCI) driver (EXPERIMENTAL)" depends on EXPERIMENTAL diff --git a/drivers/usb/host/r8a66597-hcd.c b/drivers/usb/host/r8a66597-hcd.c index 09895a97c10..82dce3e0d4d 100644 --- a/drivers/usb/host/r8a66597-hcd.c +++ b/drivers/usb/host/r8a66597-hcd.c @@ -91,43 +91,43 @@ static int r8a66597_clock_enable(struct r8a66597 *r8a66597) u16 tmp; int i = 0; -#if defined(CONFIG_SUPERH_ON_CHIP_R8A66597) -#if defined(CONFIG_HAVE_CLK) - clk_enable(r8a66597->clk); + if (r8a66597->pdata->on_chip) { +#ifdef CONFIG_HAVE_CLK + clk_enable(r8a66597->clk); #endif - do { - r8a66597_write(r8a66597, SCKE, SYSCFG0); - tmp = r8a66597_read(r8a66597, SYSCFG0); - if (i++ > 1000) { - printk(KERN_ERR "r8a66597: register access fail.\n"); - return -ENXIO; - } - } while ((tmp & SCKE) != SCKE); - r8a66597_write(r8a66597, 0x04, 0x02); -#else - do { - r8a66597_write(r8a66597, USBE, SYSCFG0); - tmp = r8a66597_read(r8a66597, SYSCFG0); - if (i++ > 1000) { - printk(KERN_ERR "r8a66597: register access fail.\n"); - return -ENXIO; - } - } while ((tmp & USBE) != USBE); - r8a66597_bclr(r8a66597, USBE, SYSCFG0); - r8a66597_mdfy(r8a66597, get_xtal_from_pdata(r8a66597->pdata), XTAL, - SYSCFG0); + do { + r8a66597_write(r8a66597, SCKE, SYSCFG0); + tmp = r8a66597_read(r8a66597, SYSCFG0); + if (i++ > 1000) { + printk(KERN_ERR "r8a66597: reg access fail.\n"); + return -ENXIO; + } + } while ((tmp & SCKE) != SCKE); + r8a66597_write(r8a66597, 0x04, 0x02); + } else { + do { + r8a66597_write(r8a66597, USBE, SYSCFG0); + tmp = r8a66597_read(r8a66597, SYSCFG0); + if (i++ > 1000) { + printk(KERN_ERR "r8a66597: reg access fail.\n"); + return -ENXIO; + } + } while ((tmp & USBE) != USBE); + r8a66597_bclr(r8a66597, USBE, SYSCFG0); + r8a66597_mdfy(r8a66597, get_xtal_from_pdata(r8a66597->pdata), + XTAL, SYSCFG0); - i = 0; - r8a66597_bset(r8a66597, XCKE, SYSCFG0); - do { - msleep(1); - tmp = r8a66597_read(r8a66597, SYSCFG0); - if (i++ > 500) { - printk(KERN_ERR "r8a66597: register access fail.\n"); - return -ENXIO; - } - } while ((tmp & SCKE) != SCKE); -#endif /* #if defined(CONFIG_SUPERH_ON_CHIP_R8A66597) */ + i = 0; + r8a66597_bset(r8a66597, XCKE, SYSCFG0); + do { + msleep(1); + tmp = r8a66597_read(r8a66597, SYSCFG0); + if (i++ > 500) { + printk(KERN_ERR "r8a66597: reg access fail.\n"); + return -ENXIO; + } + } while ((tmp & SCKE) != SCKE); + } return 0; } @@ -136,15 +136,16 @@ static void r8a66597_clock_disable(struct r8a66597 *r8a66597) { r8a66597_bclr(r8a66597, SCKE, SYSCFG0); udelay(1); -#if defined(CONFIG_SUPERH_ON_CHIP_R8A66597) -#if defined(CONFIG_HAVE_CLK) - clk_disable(r8a66597->clk); -#endif -#else - r8a66597_bclr(r8a66597, PLLC, SYSCFG0); - r8a66597_bclr(r8a66597, XCKE, SYSCFG0); - r8a66597_bclr(r8a66597, USBE, SYSCFG0); + + if (r8a66597->pdata->on_chip) { +#ifdef CONFIG_HAVE_CLK + clk_disable(r8a66597->clk); #endif + } else { + r8a66597_bclr(r8a66597, PLLC, SYSCFG0); + r8a66597_bclr(r8a66597, XCKE, SYSCFG0); + r8a66597_bclr(r8a66597, USBE, SYSCFG0); + } } static void r8a66597_enable_port(struct r8a66597 *r8a66597, int port) @@ -205,7 +206,7 @@ static int enable_controller(struct r8a66597 *r8a66597) r8a66597_bset(r8a66597, SIGNE | SACKE, INTENB1); - for (port = 0; port < R8A66597_MAX_ROOT_HUB; port++) + for (port = 0; port < r8a66597->max_root_hub; port++) r8a66597_enable_port(r8a66597, port); return 0; @@ -218,7 +219,7 @@ static void disable_controller(struct r8a66597 *r8a66597) r8a66597_write(r8a66597, 0, INTENB0); r8a66597_write(r8a66597, 0, INTSTS0); - for (port = 0; port < R8A66597_MAX_ROOT_HUB; port++) + for (port = 0; port < r8a66597->max_root_hub; port++) r8a66597_disable_port(r8a66597, port); r8a66597_clock_disable(r8a66597); @@ -249,11 +250,12 @@ static int is_hub_limit(char *devpath) return ((strlen(devpath) >= 4) ? 1 : 0); } -static void get_port_number(char *devpath, u16 *root_port, u16 *hub_port) +static void get_port_number(struct r8a66597 *r8a66597, + char *devpath, u16 *root_port, u16 *hub_port) { if (root_port) { *root_port = (devpath[0] & 0x0F) - 1; - if (*root_port >= R8A66597_MAX_ROOT_HUB) + if (*root_port >= r8a66597->max_root_hub) printk(KERN_ERR "r8a66597: Illegal root port number.\n"); } if (hub_port) @@ -355,7 +357,8 @@ static int make_r8a66597_device(struct r8a66597 *r8a66597, INIT_LIST_HEAD(&dev->device_list); list_add_tail(&dev->device_list, &r8a66597->child_device); - get_port_number(urb->dev->devpath, &dev->root_port, &dev->hub_port); + get_port_number(r8a66597, urb->dev->devpath, + &dev->root_port, &dev->hub_port); if (!is_child_device(urb->dev->devpath)) r8a66597->root_hub[dev->root_port].dev = dev; @@ -420,7 +423,7 @@ static void free_usb_address(struct r8a66597 *r8a66597, list_del(&dev->device_list); kfree(dev); - for (port = 0; port < R8A66597_MAX_ROOT_HUB; port++) { + for (port = 0; port < r8a66597->max_root_hub; port++) { if (r8a66597->root_hub[port].dev == dev) { r8a66597->root_hub[port].dev = NULL; break; @@ -495,10 +498,20 @@ static void r8a66597_pipe_toggle(struct r8a66597 *r8a66597, r8a66597_bset(r8a66597, SQCLR, pipe->pipectr); } +static inline unsigned short mbw_value(struct r8a66597 *r8a66597) +{ + if (r8a66597->pdata->on_chip) + return MBW_32; + else + return MBW_16; +} + /* this function must be called with interrupt disabled */ static inline void cfifo_change(struct r8a66597 *r8a66597, u16 pipenum) { - r8a66597_mdfy(r8a66597, MBW | pipenum, MBW | CURPIPE, CFIFOSEL); + unsigned short mbw = mbw_value(r8a66597); + + r8a66597_mdfy(r8a66597, mbw | pipenum, mbw | CURPIPE, CFIFOSEL); r8a66597_reg_wait(r8a66597, CFIFOSEL, CURPIPE, pipenum); } @@ -506,11 +519,13 @@ static inline void cfifo_change(struct r8a66597 *r8a66597, u16 pipenum) static inline void fifo_change_from_pipe(struct r8a66597 *r8a66597, struct r8a66597_pipe *pipe) { + unsigned short mbw = mbw_value(r8a66597); + cfifo_change(r8a66597, 0); - r8a66597_mdfy(r8a66597, MBW | 0, MBW | CURPIPE, D0FIFOSEL); - r8a66597_mdfy(r8a66597, MBW | 0, MBW | CURPIPE, D1FIFOSEL); + r8a66597_mdfy(r8a66597, mbw | 0, mbw | CURPIPE, D0FIFOSEL); + r8a66597_mdfy(r8a66597, mbw | 0, mbw | CURPIPE, D1FIFOSEL); - r8a66597_mdfy(r8a66597, MBW | pipe->info.pipenum, MBW | CURPIPE, + r8a66597_mdfy(r8a66597, mbw | pipe->info.pipenum, mbw | CURPIPE, pipe->fifosel); r8a66597_reg_wait(r8a66597, pipe->fifosel, CURPIPE, pipe->info.pipenum); } @@ -742,9 +757,13 @@ static void enable_r8a66597_pipe_dma(struct r8a66597 *r8a66597, struct r8a66597_pipe *pipe, struct urb *urb) { -#if !defined(CONFIG_SUPERH_ON_CHIP_R8A66597) int i; struct r8a66597_pipe_info *info = &pipe->info; + unsigned short mbw = mbw_value(r8a66597); + + /* pipe dma is only for external controlles */ + if (r8a66597->pdata->on_chip) + return; if ((pipe->info.pipenum != 0) && (info->type != R8A66597_INT)) { for (i = 0; i < R8A66597_MAX_DMA_CHANNEL; i++) { @@ -763,8 +782,8 @@ static void enable_r8a66597_pipe_dma(struct r8a66597 *r8a66597, set_pipe_reg_addr(pipe, i); cfifo_change(r8a66597, 0); - r8a66597_mdfy(r8a66597, MBW | pipe->info.pipenum, - MBW | CURPIPE, pipe->fifosel); + r8a66597_mdfy(r8a66597, mbw | pipe->info.pipenum, + mbw | CURPIPE, pipe->fifosel); r8a66597_reg_wait(r8a66597, pipe->fifosel, CURPIPE, pipe->info.pipenum); @@ -772,7 +791,6 @@ static void enable_r8a66597_pipe_dma(struct r8a66597 *r8a66597, break; } } -#endif /* #if defined(CONFIG_SUPERH_ON_CHIP_R8A66597) */ } /* this function must be called with interrupt disabled */ @@ -1769,7 +1787,7 @@ static void r8a66597_timer(unsigned long _r8a66597) spin_lock_irqsave(&r8a66597->lock, flags); - for (port = 0; port < R8A66597_MAX_ROOT_HUB; port++) + for (port = 0; port < r8a66597->max_root_hub; port++) r8a66597_root_hub_control(r8a66597, port); spin_unlock_irqrestore(&r8a66597->lock, flags); @@ -1807,7 +1825,7 @@ static void set_address_zero(struct r8a66597 *r8a66597, struct urb *urb) u16 root_port, hub_port; if (usb_address == 0) { - get_port_number(urb->dev->devpath, + get_port_number(r8a66597, urb->dev->devpath, &root_port, &hub_port); set_devadd_reg(r8a66597, 0, get_r8a66597_usb_speed(urb->dev->speed), @@ -2082,7 +2100,7 @@ static int r8a66597_hub_status_data(struct usb_hcd *hcd, char *buf) *buf = 0; /* initialize (no change) */ - for (i = 0; i < R8A66597_MAX_ROOT_HUB; i++) { + for (i = 0; i < r8a66597->max_root_hub; i++) { if (r8a66597->root_hub[i].port & 0xffff0000) *buf |= 1 << (i + 1); } @@ -2097,11 +2115,11 @@ static void r8a66597_hub_descriptor(struct r8a66597 *r8a66597, { desc->bDescriptorType = 0x29; desc->bHubContrCurrent = 0; - desc->bNbrPorts = R8A66597_MAX_ROOT_HUB; + desc->bNbrPorts = r8a66597->max_root_hub; desc->bDescLength = 9; desc->bPwrOn2PwrGood = 0; desc->wHubCharacteristics = cpu_to_le16(0x0011); - desc->bitmap[0] = ((1 << R8A66597_MAX_ROOT_HUB) - 1) << 1; + desc->bitmap[0] = ((1 << r8a66597->max_root_hub) - 1) << 1; desc->bitmap[1] = ~0; } @@ -2129,7 +2147,7 @@ static int r8a66597_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue, } break; case ClearPortFeature: - if (wIndex > R8A66597_MAX_ROOT_HUB) + if (wIndex > r8a66597->max_root_hub) goto error; if (wLength != 0) goto error; @@ -2162,12 +2180,12 @@ static int r8a66597_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue, *buf = 0x00; break; case GetPortStatus: - if (wIndex > R8A66597_MAX_ROOT_HUB) + if (wIndex > r8a66597->max_root_hub) goto error; *(__le32 *)buf = cpu_to_le32(rh->port); break; case SetPortFeature: - if (wIndex > R8A66597_MAX_ROOT_HUB) + if (wIndex > r8a66597->max_root_hub) goto error; if (wLength != 0) goto error; @@ -2216,7 +2234,7 @@ static int r8a66597_bus_suspend(struct usb_hcd *hcd) dbg("%s", __func__); - for (port = 0; port < R8A66597_MAX_ROOT_HUB; port++) { + for (port = 0; port < r8a66597->max_root_hub; port++) { struct r8a66597_root_hub *rh = &r8a66597->root_hub[port]; unsigned long dvstctr_reg = get_dvstctr_reg(port); @@ -2247,7 +2265,7 @@ static int r8a66597_bus_resume(struct usb_hcd *hcd) dbg("%s", __func__); - for (port = 0; port < R8A66597_MAX_ROOT_HUB; port++) { + for (port = 0; port < r8a66597->max_root_hub; port++) { struct r8a66597_root_hub *rh = &r8a66597->root_hub[port]; unsigned long dvstctr_reg = get_dvstctr_reg(port); @@ -2314,7 +2332,7 @@ static int r8a66597_suspend(struct device *dev) disable_controller(r8a66597); - for (port = 0; port < R8A66597_MAX_ROOT_HUB; port++) { + for (port = 0; port < r8a66597->max_root_hub; port++) { struct r8a66597_root_hub *rh = &r8a66597->root_hub[port]; rh->port = 0x00000000; @@ -2354,8 +2372,9 @@ static int __init_or_module r8a66597_remove(struct platform_device *pdev) del_timer_sync(&r8a66597->rh_timer); usb_remove_hcd(hcd); iounmap((void *)r8a66597->reg); -#if defined(CONFIG_SUPERH_ON_CHIP_R8A66597) && defined(CONFIG_HAVE_CLK) - clk_put(r8a66597->clk); +#ifdef CONFIG_HAVE_CLK + if (r8a66597->pdata->on_chip) + clk_put(r8a66597->clk); #endif usb_put_hcd(hcd); return 0; @@ -2363,7 +2382,7 @@ static int __init_or_module r8a66597_remove(struct platform_device *pdev) static int __devinit r8a66597_probe(struct platform_device *pdev) { -#if defined(CONFIG_SUPERH_ON_CHIP_R8A66597) && defined(CONFIG_HAVE_CLK) +#ifdef CONFIG_HAVE_CLK char clk_name[8]; #endif struct resource *res = NULL, *ires; @@ -2425,15 +2444,20 @@ static int __devinit r8a66597_probe(struct platform_device *pdev) r8a66597->pdata = pdev->dev.platform_data; r8a66597->irq_sense_low = irq_trigger == IRQF_TRIGGER_LOW; -#if defined(CONFIG_SUPERH_ON_CHIP_R8A66597) && defined(CONFIG_HAVE_CLK) - snprintf(clk_name, sizeof(clk_name), "usb%d", pdev->id); - r8a66597->clk = clk_get(&pdev->dev, clk_name); - if (IS_ERR(r8a66597->clk)) { - dev_err(&pdev->dev, "cannot get clock \"%s\"\n", clk_name); - ret = PTR_ERR(r8a66597->clk); - goto clean_up2; - } + if (r8a66597->pdata->on_chip) { +#ifdef CONFIG_HAVE_CLK + snprintf(clk_name, sizeof(clk_name), "usb%d", pdev->id); + r8a66597->clk = clk_get(&pdev->dev, clk_name); + if (IS_ERR(r8a66597->clk)) { + dev_err(&pdev->dev, "cannot get clock \"%s\"\n", + clk_name); + ret = PTR_ERR(r8a66597->clk); + goto clean_up2; + } #endif + r8a66597->max_root_hub = 1; + } else + r8a66597->max_root_hub = 2; spin_lock_init(&r8a66597->lock); init_timer(&r8a66597->rh_timer); @@ -2463,8 +2487,9 @@ static int __devinit r8a66597_probe(struct platform_device *pdev) return 0; clean_up3: -#if defined(CONFIG_SUPERH_ON_CHIP_R8A66597) && defined(CONFIG_HAVE_CLK) - clk_put(r8a66597->clk); +#ifdef CONFIG_HAVE_CLK + if (r8a66597->pdata->on_chip) + clk_put(r8a66597->clk); clean_up2: #endif usb_put_hcd(hcd); diff --git a/drivers/usb/host/r8a66597.h b/drivers/usb/host/r8a66597.h index d72680b433f..eecbd917bc8 100644 --- a/drivers/usb/host/r8a66597.h +++ b/drivers/usb/host/r8a66597.h @@ -26,7 +26,7 @@ #ifndef __R8A66597_H__ #define __R8A66597_H__ -#if defined(CONFIG_SUPERH_ON_CHIP_R8A66597) && defined(CONFIG_HAVE_CLK) +#ifdef CONFIG_HAVE_CLK #include #endif @@ -193,13 +193,9 @@ #define REW 0x4000 /* b14: Buffer rewind */ #define DCLRM 0x2000 /* b13: DMA buffer clear mode */ #define DREQE 0x1000 /* b12: DREQ output enable */ -#if defined(CONFIG_SUPERH_ON_CHIP_R8A66597) -#define MBW 0x0800 -#else -#define MBW 0x0400 /* b10: Maximum bit width for FIFO access */ -#endif #define MBW_8 0x0000 /* 8bit */ #define MBW_16 0x0400 /* 16bit */ +#define MBW_32 0x0800 /* 32bit */ #define BIGEND 0x0100 /* b8: Big endian mode */ #define BYTE_LITTLE 0x0000 /* little dendian */ #define BYTE_BIG 0x0100 /* big endifan */ @@ -405,11 +401,7 @@ #define R8A66597_MAX_NUM_PIPE 10 #define R8A66597_BUF_BSIZE 8 #define R8A66597_MAX_DEVICE 10 -#if defined(CONFIG_SUPERH_ON_CHIP_R8A66597) -#define R8A66597_MAX_ROOT_HUB 1 -#else #define R8A66597_MAX_ROOT_HUB 2 -#endif #define R8A66597_MAX_SAMPLING 5 #define R8A66597_RH_POLL_TIME 10 #define R8A66597_MAX_DMA_CHANNEL 2 @@ -487,7 +479,7 @@ struct r8a66597_root_hub { struct r8a66597 { spinlock_t lock; unsigned long reg; -#if defined(CONFIG_SUPERH_ON_CHIP_R8A66597) && defined(CONFIG_HAVE_CLK) +#ifdef CONFIG_HAVE_CLK struct clk *clk; #endif struct r8a66597_platdata *pdata; @@ -504,6 +496,7 @@ struct r8a66597 { unsigned short interval_map; unsigned char pipe_cnt[R8A66597_MAX_NUM_PIPE]; unsigned char dma_map; + unsigned int max_root_hub; struct list_head child_device; unsigned long child_connect_map[4]; @@ -550,21 +543,22 @@ static inline void r8a66597_read_fifo(struct r8a66597 *r8a66597, unsigned long offset, u16 *buf, int len) { -#if defined(CONFIG_SUPERH_ON_CHIP_R8A66597) unsigned long fifoaddr = r8a66597->reg + offset; unsigned long count; - count = len / 4; - insl(fifoaddr, buf, count); + if (r8a66597->pdata->on_chip) { + count = len / 4; + insl(fifoaddr, buf, count); - if (len & 0x00000003) { - unsigned long tmp = inl(fifoaddr); - memcpy((unsigned char *)buf + count * 4, &tmp, len & 0x03); + if (len & 0x00000003) { + unsigned long tmp = inl(fifoaddr); + memcpy((unsigned char *)buf + count * 4, &tmp, + len & 0x03); + } + } else { + len = (len + 1) / 2; + insw(fifoaddr, buf, len); } -#else - len = (len + 1) / 2; - insw(r8a66597->reg + offset, buf, len); -#endif } static inline void r8a66597_write(struct r8a66597 *r8a66597, u16 val, @@ -578,33 +572,33 @@ static inline void r8a66597_write_fifo(struct r8a66597 *r8a66597, int len) { unsigned long fifoaddr = r8a66597->reg + offset; -#if defined(CONFIG_SUPERH_ON_CHIP_R8A66597) unsigned long count; unsigned char *pb; int i; - count = len / 4; - outsl(fifoaddr, buf, count); + if (r8a66597->pdata->on_chip) { + count = len / 4; + outsl(fifoaddr, buf, count); + + if (len & 0x00000003) { + pb = (unsigned char *)buf + count * 4; + for (i = 0; i < (len & 0x00000003); i++) { + if (r8a66597_read(r8a66597, CFIFOSEL) & BIGEND) + outb(pb[i], fifoaddr + i); + else + outb(pb[i], fifoaddr + 3 - i); + } + } + } else { + int odd = len & 0x0001; - if (len & 0x00000003) { - pb = (unsigned char *)buf + count * 4; - for (i = 0; i < (len & 0x00000003); i++) { - if (r8a66597_read(r8a66597, CFIFOSEL) & BIGEND) - outb(pb[i], fifoaddr + i); - else - outb(pb[i], fifoaddr + 3 - i); + len = len / 2; + outsw(fifoaddr, buf, len); + if (unlikely(odd)) { + buf = &buf[len]; + outb((unsigned char)*buf, fifoaddr); } } -#else - int odd = len & 0x0001; - - len = len / 2; - outsw(fifoaddr, buf, len); - if (unlikely(odd)) { - buf = &buf[len]; - outb((unsigned char)*buf, fifoaddr); - } -#endif } static inline void r8a66597_mdfy(struct r8a66597 *r8a66597, diff --git a/include/linux/usb/r8a66597.h b/include/linux/usb/r8a66597.h index e9f0384fa20..460ee3f6a2c 100644 --- a/include/linux/usb/r8a66597.h +++ b/include/linux/usb/r8a66597.h @@ -31,6 +31,9 @@ struct r8a66597_platdata { /* This ops can controll port power instead of DVSTCTR register. */ void (*port_power)(int port, int power); + /* set one = on chip controller, set zero = external controller */ + unsigned on_chip:1; + /* (external controller only) set R8A66597_PLATDATA_XTAL_nnMHZ */ unsigned xtal:2; -- cgit v1.2.3-70-g09d2 From fbd90375d7531927d312766b548376d909811b4d Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Wed, 22 Jul 2009 13:40:14 +0200 Subject: hrtimer: Remove cb_entry from struct hrtimer It's unused, remove it. Signed-off-by: Peter Zijlstra Signed-off-by: Thomas Gleixner LKML-Reference: --- include/linux/hrtimer.h | 2 -- kernel/hrtimer.c | 1 - 2 files changed, 3 deletions(-) (limited to 'include') diff --git a/include/linux/hrtimer.h b/include/linux/hrtimer.h index 54648e625ef..40e7d54fc42 100644 --- a/include/linux/hrtimer.h +++ b/include/linux/hrtimer.h @@ -91,7 +91,6 @@ enum hrtimer_restart { * @function: timer expiry callback function * @base: pointer to the timer base (per cpu and per clock) * @state: state information (See bit values above) - * @cb_entry: list head to enqueue an expired timer into the callback list * @start_site: timer statistics field to store the site where the timer * was started * @start_comm: timer statistics field to store the name of the process which @@ -108,7 +107,6 @@ struct hrtimer { enum hrtimer_restart (*function)(struct hrtimer *); struct hrtimer_clock_base *base; unsigned long state; - struct list_head cb_entry; #ifdef CONFIG_TIMER_STATS int start_pid; void *start_site; diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c index 43d151f185b..052a0f53e4e 100644 --- a/kernel/hrtimer.c +++ b/kernel/hrtimer.c @@ -1092,7 +1092,6 @@ static void __hrtimer_init(struct hrtimer *timer, clockid_t clock_id, clock_id = CLOCK_MONOTONIC; timer->base = &cpu_base->clock_base[clock_id]; - INIT_LIST_HEAD(&timer->cb_entry); hrtimer_init_timer_hres(timer); #ifdef CONFIG_TIMER_STATS -- cgit v1.2.3-70-g09d2 From cf4f1e76c49dacfde0680b170b9a9b6a42f296bb Mon Sep 17 00:00:00 2001 From: Magnus Damm Date: Wed, 22 Jul 2009 14:32:03 +0000 Subject: usb: move r8a66597 register defines Move r8a66597 hardware register definitions from the host controller header file to the platform data header file. With this change in place we can easily share register definitions between the host controller driver and a future gadget driver. Signed-off-by: Magnus Damm Signed-off-by: Paul Mundt --- drivers/usb/host/r8a66597.h | 366 ------------------------------------------ include/linux/usb/r8a66597.h | 372 ++++++++++++++++++++++++++++++++++++++++++- 2 files changed, 370 insertions(+), 368 deletions(-) (limited to 'include') diff --git a/drivers/usb/host/r8a66597.h b/drivers/usb/host/r8a66597.h index eecbd917bc8..228e3fb2385 100644 --- a/drivers/usb/host/r8a66597.h +++ b/drivers/usb/host/r8a66597.h @@ -32,372 +32,6 @@ #include -#define SYSCFG0 0x00 -#define SYSCFG1 0x02 -#define SYSSTS0 0x04 -#define SYSSTS1 0x06 -#define DVSTCTR0 0x08 -#define DVSTCTR1 0x0A -#define TESTMODE 0x0C -#define PINCFG 0x0E -#define DMA0CFG 0x10 -#define DMA1CFG 0x12 -#define CFIFO 0x14 -#define D0FIFO 0x18 -#define D1FIFO 0x1C -#define CFIFOSEL 0x20 -#define CFIFOCTR 0x22 -#define CFIFOSIE 0x24 -#define D0FIFOSEL 0x28 -#define D0FIFOCTR 0x2A -#define D1FIFOSEL 0x2C -#define D1FIFOCTR 0x2E -#define INTENB0 0x30 -#define INTENB1 0x32 -#define INTENB2 0x34 -#define BRDYENB 0x36 -#define NRDYENB 0x38 -#define BEMPENB 0x3A -#define SOFCFG 0x3C -#define INTSTS0 0x40 -#define INTSTS1 0x42 -#define INTSTS2 0x44 -#define BRDYSTS 0x46 -#define NRDYSTS 0x48 -#define BEMPSTS 0x4A -#define FRMNUM 0x4C -#define UFRMNUM 0x4E -#define USBADDR 0x50 -#define USBREQ 0x54 -#define USBVAL 0x56 -#define USBINDX 0x58 -#define USBLENG 0x5A -#define DCPCFG 0x5C -#define DCPMAXP 0x5E -#define DCPCTR 0x60 -#define PIPESEL 0x64 -#define PIPECFG 0x68 -#define PIPEBUF 0x6A -#define PIPEMAXP 0x6C -#define PIPEPERI 0x6E -#define PIPE1CTR 0x70 -#define PIPE2CTR 0x72 -#define PIPE3CTR 0x74 -#define PIPE4CTR 0x76 -#define PIPE5CTR 0x78 -#define PIPE6CTR 0x7A -#define PIPE7CTR 0x7C -#define PIPE8CTR 0x7E -#define PIPE9CTR 0x80 -#define PIPE1TRE 0x90 -#define PIPE1TRN 0x92 -#define PIPE2TRE 0x94 -#define PIPE2TRN 0x96 -#define PIPE3TRE 0x98 -#define PIPE3TRN 0x9A -#define PIPE4TRE 0x9C -#define PIPE4TRN 0x9E -#define PIPE5TRE 0xA0 -#define PIPE5TRN 0xA2 -#define DEVADD0 0xD0 -#define DEVADD1 0xD2 -#define DEVADD2 0xD4 -#define DEVADD3 0xD6 -#define DEVADD4 0xD8 -#define DEVADD5 0xDA -#define DEVADD6 0xDC -#define DEVADD7 0xDE -#define DEVADD8 0xE0 -#define DEVADD9 0xE2 -#define DEVADDA 0xE4 - -/* System Configuration Control Register */ -#define XTAL 0xC000 /* b15-14: Crystal selection */ -#define XTAL48 0x8000 /* 48MHz */ -#define XTAL24 0x4000 /* 24MHz */ -#define XTAL12 0x0000 /* 12MHz */ -#define XCKE 0x2000 /* b13: External clock enable */ -#define PLLC 0x0800 /* b11: PLL control */ -#define SCKE 0x0400 /* b10: USB clock enable */ -#define PCSDIS 0x0200 /* b9: not CS wakeup */ -#define LPSME 0x0100 /* b8: Low power sleep mode */ -#define HSE 0x0080 /* b7: Hi-speed enable */ -#define DCFM 0x0040 /* b6: Controller function select */ -#define DRPD 0x0020 /* b5: D+/- pull down control */ -#define DPRPU 0x0010 /* b4: D+ pull up control */ -#define USBE 0x0001 /* b0: USB module operation enable */ - -/* System Configuration Status Register */ -#define OVCBIT 0x8000 /* b15-14: Over-current bit */ -#define OVCMON 0xC000 /* b15-14: Over-current monitor */ -#define SOFEA 0x0020 /* b5: SOF monitor */ -#define IDMON 0x0004 /* b3: ID-pin monitor */ -#define LNST 0x0003 /* b1-0: D+, D- line status */ -#define SE1 0x0003 /* SE1 */ -#define FS_KSTS 0x0002 /* Full-Speed K State */ -#define FS_JSTS 0x0001 /* Full-Speed J State */ -#define LS_JSTS 0x0002 /* Low-Speed J State */ -#define LS_KSTS 0x0001 /* Low-Speed K State */ -#define SE0 0x0000 /* SE0 */ - -/* Device State Control Register */ -#define EXTLP0 0x0400 /* b10: External port */ -#define VBOUT 0x0200 /* b9: VBUS output */ -#define WKUP 0x0100 /* b8: Remote wakeup */ -#define RWUPE 0x0080 /* b7: Remote wakeup sense */ -#define USBRST 0x0040 /* b6: USB reset enable */ -#define RESUME 0x0020 /* b5: Resume enable */ -#define UACT 0x0010 /* b4: USB bus enable */ -#define RHST 0x0007 /* b1-0: Reset handshake status */ -#define HSPROC 0x0004 /* HS handshake is processing */ -#define HSMODE 0x0003 /* Hi-Speed mode */ -#define FSMODE 0x0002 /* Full-Speed mode */ -#define LSMODE 0x0001 /* Low-Speed mode */ -#define UNDECID 0x0000 /* Undecided */ - -/* Test Mode Register */ -#define UTST 0x000F /* b3-0: Test select */ -#define H_TST_PACKET 0x000C /* HOST TEST Packet */ -#define H_TST_SE0_NAK 0x000B /* HOST TEST SE0 NAK */ -#define H_TST_K 0x000A /* HOST TEST K */ -#define H_TST_J 0x0009 /* HOST TEST J */ -#define H_TST_NORMAL 0x0000 /* HOST Normal Mode */ -#define P_TST_PACKET 0x0004 /* PERI TEST Packet */ -#define P_TST_SE0_NAK 0x0003 /* PERI TEST SE0 NAK */ -#define P_TST_K 0x0002 /* PERI TEST K */ -#define P_TST_J 0x0001 /* PERI TEST J */ -#define P_TST_NORMAL 0x0000 /* PERI Normal Mode */ - -/* Data Pin Configuration Register */ -#define LDRV 0x8000 /* b15: Drive Current Adjust */ -#define VIF1 0x0000 /* VIF = 1.8V */ -#define VIF3 0x8000 /* VIF = 3.3V */ -#define INTA 0x0001 /* b1: USB INT-pin active */ - -/* DMAx Pin Configuration Register */ -#define DREQA 0x4000 /* b14: Dreq active select */ -#define BURST 0x2000 /* b13: Burst mode */ -#define DACKA 0x0400 /* b10: Dack active select */ -#define DFORM 0x0380 /* b9-7: DMA mode select */ -#define CPU_ADR_RD_WR 0x0000 /* Address + RD/WR mode (CPU bus) */ -#define CPU_DACK_RD_WR 0x0100 /* DACK + RD/WR mode (CPU bus) */ -#define CPU_DACK_ONLY 0x0180 /* DACK only mode (CPU bus) */ -#define SPLIT_DACK_ONLY 0x0200 /* DACK only mode (SPLIT bus) */ -#define DENDA 0x0040 /* b6: Dend active select */ -#define PKTM 0x0020 /* b5: Packet mode */ -#define DENDE 0x0010 /* b4: Dend enable */ -#define OBUS 0x0004 /* b2: OUTbus mode */ - -/* CFIFO/DxFIFO Port Select Register */ -#define RCNT 0x8000 /* b15: Read count mode */ -#define REW 0x4000 /* b14: Buffer rewind */ -#define DCLRM 0x2000 /* b13: DMA buffer clear mode */ -#define DREQE 0x1000 /* b12: DREQ output enable */ -#define MBW_8 0x0000 /* 8bit */ -#define MBW_16 0x0400 /* 16bit */ -#define MBW_32 0x0800 /* 32bit */ -#define BIGEND 0x0100 /* b8: Big endian mode */ -#define BYTE_LITTLE 0x0000 /* little dendian */ -#define BYTE_BIG 0x0100 /* big endifan */ -#define ISEL 0x0020 /* b5: DCP FIFO port direction select */ -#define CURPIPE 0x000F /* b2-0: PIPE select */ - -/* CFIFO/DxFIFO Port Control Register */ -#define BVAL 0x8000 /* b15: Buffer valid flag */ -#define BCLR 0x4000 /* b14: Buffer clear */ -#define FRDY 0x2000 /* b13: FIFO ready */ -#define DTLN 0x0FFF /* b11-0: FIFO received data length */ - -/* Interrupt Enable Register 0 */ -#define VBSE 0x8000 /* b15: VBUS interrupt */ -#define RSME 0x4000 /* b14: Resume interrupt */ -#define SOFE 0x2000 /* b13: Frame update interrupt */ -#define DVSE 0x1000 /* b12: Device state transition interrupt */ -#define CTRE 0x0800 /* b11: Control transfer stage transition interrupt */ -#define BEMPE 0x0400 /* b10: Buffer empty interrupt */ -#define NRDYE 0x0200 /* b9: Buffer not ready interrupt */ -#define BRDYE 0x0100 /* b8: Buffer ready interrupt */ - -/* Interrupt Enable Register 1 */ -#define OVRCRE 0x8000 /* b15: Over-current interrupt */ -#define BCHGE 0x4000 /* b14: USB us chenge interrupt */ -#define DTCHE 0x1000 /* b12: Detach sense interrupt */ -#define ATTCHE 0x0800 /* b11: Attach sense interrupt */ -#define EOFERRE 0x0040 /* b6: EOF error interrupt */ -#define SIGNE 0x0020 /* b5: SETUP IGNORE interrupt */ -#define SACKE 0x0010 /* b4: SETUP ACK interrupt */ - -/* BRDY Interrupt Enable/Status Register */ -#define BRDY9 0x0200 /* b9: PIPE9 */ -#define BRDY8 0x0100 /* b8: PIPE8 */ -#define BRDY7 0x0080 /* b7: PIPE7 */ -#define BRDY6 0x0040 /* b6: PIPE6 */ -#define BRDY5 0x0020 /* b5: PIPE5 */ -#define BRDY4 0x0010 /* b4: PIPE4 */ -#define BRDY3 0x0008 /* b3: PIPE3 */ -#define BRDY2 0x0004 /* b2: PIPE2 */ -#define BRDY1 0x0002 /* b1: PIPE1 */ -#define BRDY0 0x0001 /* b1: PIPE0 */ - -/* NRDY Interrupt Enable/Status Register */ -#define NRDY9 0x0200 /* b9: PIPE9 */ -#define NRDY8 0x0100 /* b8: PIPE8 */ -#define NRDY7 0x0080 /* b7: PIPE7 */ -#define NRDY6 0x0040 /* b6: PIPE6 */ -#define NRDY5 0x0020 /* b5: PIPE5 */ -#define NRDY4 0x0010 /* b4: PIPE4 */ -#define NRDY3 0x0008 /* b3: PIPE3 */ -#define NRDY2 0x0004 /* b2: PIPE2 */ -#define NRDY1 0x0002 /* b1: PIPE1 */ -#define NRDY0 0x0001 /* b1: PIPE0 */ - -/* BEMP Interrupt Enable/Status Register */ -#define BEMP9 0x0200 /* b9: PIPE9 */ -#define BEMP8 0x0100 /* b8: PIPE8 */ -#define BEMP7 0x0080 /* b7: PIPE7 */ -#define BEMP6 0x0040 /* b6: PIPE6 */ -#define BEMP5 0x0020 /* b5: PIPE5 */ -#define BEMP4 0x0010 /* b4: PIPE4 */ -#define BEMP3 0x0008 /* b3: PIPE3 */ -#define BEMP2 0x0004 /* b2: PIPE2 */ -#define BEMP1 0x0002 /* b1: PIPE1 */ -#define BEMP0 0x0001 /* b0: PIPE0 */ - -/* SOF Pin Configuration Register */ -#define TRNENSEL 0x0100 /* b8: Select transaction enable period */ -#define BRDYM 0x0040 /* b6: BRDY clear timing */ -#define INTL 0x0020 /* b5: Interrupt sense select */ -#define EDGESTS 0x0010 /* b4: */ -#define SOFMODE 0x000C /* b3-2: SOF pin select */ -#define SOF_125US 0x0008 /* SOF OUT 125us Frame Signal */ -#define SOF_1MS 0x0004 /* SOF OUT 1ms Frame Signal */ -#define SOF_DISABLE 0x0000 /* SOF OUT Disable */ - -/* Interrupt Status Register 0 */ -#define VBINT 0x8000 /* b15: VBUS interrupt */ -#define RESM 0x4000 /* b14: Resume interrupt */ -#define SOFR 0x2000 /* b13: SOF frame update interrupt */ -#define DVST 0x1000 /* b12: Device state transition interrupt */ -#define CTRT 0x0800 /* b11: Control transfer stage transition interrupt */ -#define BEMP 0x0400 /* b10: Buffer empty interrupt */ -#define NRDY 0x0200 /* b9: Buffer not ready interrupt */ -#define BRDY 0x0100 /* b8: Buffer ready interrupt */ -#define VBSTS 0x0080 /* b7: VBUS input port */ -#define DVSQ 0x0070 /* b6-4: Device state */ -#define DS_SPD_CNFG 0x0070 /* Suspend Configured */ -#define DS_SPD_ADDR 0x0060 /* Suspend Address */ -#define DS_SPD_DFLT 0x0050 /* Suspend Default */ -#define DS_SPD_POWR 0x0040 /* Suspend Powered */ -#define DS_SUSP 0x0040 /* Suspend */ -#define DS_CNFG 0x0030 /* Configured */ -#define DS_ADDS 0x0020 /* Address */ -#define DS_DFLT 0x0010 /* Default */ -#define DS_POWR 0x0000 /* Powered */ -#define DVSQS 0x0030 /* b5-4: Device state */ -#define VALID 0x0008 /* b3: Setup packet detected flag */ -#define CTSQ 0x0007 /* b2-0: Control transfer stage */ -#define CS_SQER 0x0006 /* Sequence error */ -#define CS_WRND 0x0005 /* Control write nodata status stage */ -#define CS_WRSS 0x0004 /* Control write status stage */ -#define CS_WRDS 0x0003 /* Control write data stage */ -#define CS_RDSS 0x0002 /* Control read status stage */ -#define CS_RDDS 0x0001 /* Control read data stage */ -#define CS_IDST 0x0000 /* Idle or setup stage */ - -/* Interrupt Status Register 1 */ -#define OVRCR 0x8000 /* b15: Over-current interrupt */ -#define BCHG 0x4000 /* b14: USB bus chenge interrupt */ -#define DTCH 0x1000 /* b12: Detach sense interrupt */ -#define ATTCH 0x0800 /* b11: Attach sense interrupt */ -#define EOFERR 0x0040 /* b6: EOF-error interrupt */ -#define SIGN 0x0020 /* b5: Setup ignore interrupt */ -#define SACK 0x0010 /* b4: Setup acknowledge interrupt */ - -/* Frame Number Register */ -#define OVRN 0x8000 /* b15: Overrun error */ -#define CRCE 0x4000 /* b14: Received data error */ -#define FRNM 0x07FF /* b10-0: Frame number */ - -/* Micro Frame Number Register */ -#define UFRNM 0x0007 /* b2-0: Micro frame number */ - -/* Default Control Pipe Maxpacket Size Register */ -/* Pipe Maxpacket Size Register */ -#define DEVSEL 0xF000 /* b15-14: Device address select */ -#define MAXP 0x007F /* b6-0: Maxpacket size of default control pipe */ - -/* Default Control Pipe Control Register */ -#define BSTS 0x8000 /* b15: Buffer status */ -#define SUREQ 0x4000 /* b14: Send USB request */ -#define CSCLR 0x2000 /* b13: complete-split status clear */ -#define CSSTS 0x1000 /* b12: complete-split status */ -#define SUREQCLR 0x0800 /* b11: stop setup request */ -#define SQCLR 0x0100 /* b8: Sequence toggle bit clear */ -#define SQSET 0x0080 /* b7: Sequence toggle bit set */ -#define SQMON 0x0040 /* b6: Sequence toggle bit monitor */ -#define PBUSY 0x0020 /* b5: pipe busy */ -#define PINGE 0x0010 /* b4: ping enable */ -#define CCPL 0x0004 /* b2: Enable control transfer complete */ -#define PID 0x0003 /* b1-0: Response PID */ -#define PID_STALL11 0x0003 /* STALL */ -#define PID_STALL 0x0002 /* STALL */ -#define PID_BUF 0x0001 /* BUF */ -#define PID_NAK 0x0000 /* NAK */ - -/* Pipe Window Select Register */ -#define PIPENM 0x0007 /* b2-0: Pipe select */ - -/* Pipe Configuration Register */ -#define R8A66597_TYP 0xC000 /* b15-14: Transfer type */ -#define R8A66597_ISO 0xC000 /* Isochronous */ -#define R8A66597_INT 0x8000 /* Interrupt */ -#define R8A66597_BULK 0x4000 /* Bulk */ -#define R8A66597_BFRE 0x0400 /* b10: Buffer ready interrupt mode select */ -#define R8A66597_DBLB 0x0200 /* b9: Double buffer mode select */ -#define R8A66597_CNTMD 0x0100 /* b8: Continuous transfer mode select */ -#define R8A66597_SHTNAK 0x0080 /* b7: Transfer end NAK */ -#define R8A66597_DIR 0x0010 /* b4: Transfer direction select */ -#define R8A66597_EPNUM 0x000F /* b3-0: Eendpoint number select */ - -/* Pipe Buffer Configuration Register */ -#define BUFSIZE 0x7C00 /* b14-10: Pipe buffer size */ -#define BUFNMB 0x007F /* b6-0: Pipe buffer number */ -#define PIPE0BUF 256 -#define PIPExBUF 64 - -/* Pipe Maxpacket Size Register */ -#define MXPS 0x07FF /* b10-0: Maxpacket size */ - -/* Pipe Cycle Configuration Register */ -#define IFIS 0x1000 /* b12: Isochronous in-buffer flush mode select */ -#define IITV 0x0007 /* b2-0: Isochronous interval */ - -/* Pipex Control Register */ -#define BSTS 0x8000 /* b15: Buffer status */ -#define INBUFM 0x4000 /* b14: IN buffer monitor (Only for PIPE1 to 5) */ -#define CSCLR 0x2000 /* b13: complete-split status clear */ -#define CSSTS 0x1000 /* b12: complete-split status */ -#define ATREPM 0x0400 /* b10: Auto repeat mode */ -#define ACLRM 0x0200 /* b9: Out buffer auto clear mode */ -#define SQCLR 0x0100 /* b8: Sequence toggle bit clear */ -#define SQSET 0x0080 /* b7: Sequence toggle bit set */ -#define SQMON 0x0040 /* b6: Sequence toggle bit monitor */ -#define PBUSY 0x0020 /* b5: pipe busy */ -#define PID 0x0003 /* b1-0: Response PID */ - -/* PIPExTRE */ -#define TRENB 0x0200 /* b9: Transaction counter enable */ -#define TRCLR 0x0100 /* b8: Transaction counter clear */ - -/* PIPExTRN */ -#define TRNCNT 0xFFFF /* b15-0: Transaction counter */ - -/* DEVADDx */ -#define UPPHUB 0x7800 -#define HUBPORT 0x0700 -#define USBSPD 0x00C0 -#define RTPORT 0x0001 - #define R8A66597_MAX_NUM_PIPE 10 #define R8A66597_BUF_BSIZE 8 #define R8A66597_MAX_DEVICE 10 diff --git a/include/linux/usb/r8a66597.h b/include/linux/usb/r8a66597.h index 460ee3f6a2c..26d21673405 100644 --- a/include/linux/usb/r8a66597.h +++ b/include/linux/usb/r8a66597.h @@ -28,7 +28,7 @@ #define R8A66597_PLATDATA_XTAL_48MHZ 0x03 struct r8a66597_platdata { - /* This ops can controll port power instead of DVSTCTR register. */ + /* This callback can control port power instead of DVSTCTR register. */ void (*port_power)(int port, int power); /* set one = on chip controller, set zero = external controller */ @@ -43,5 +43,373 @@ struct r8a66597_platdata { /* set one = big endian, set zero = little endian */ unsigned endian:1; }; -#endif + +/* Register definitions */ +#define SYSCFG0 0x00 +#define SYSCFG1 0x02 +#define SYSSTS0 0x04 +#define SYSSTS1 0x06 +#define DVSTCTR0 0x08 +#define DVSTCTR1 0x0A +#define TESTMODE 0x0C +#define PINCFG 0x0E +#define DMA0CFG 0x10 +#define DMA1CFG 0x12 +#define CFIFO 0x14 +#define D0FIFO 0x18 +#define D1FIFO 0x1C +#define CFIFOSEL 0x20 +#define CFIFOCTR 0x22 +#define CFIFOSIE 0x24 +#define D0FIFOSEL 0x28 +#define D0FIFOCTR 0x2A +#define D1FIFOSEL 0x2C +#define D1FIFOCTR 0x2E +#define INTENB0 0x30 +#define INTENB1 0x32 +#define INTENB2 0x34 +#define BRDYENB 0x36 +#define NRDYENB 0x38 +#define BEMPENB 0x3A +#define SOFCFG 0x3C +#define INTSTS0 0x40 +#define INTSTS1 0x42 +#define INTSTS2 0x44 +#define BRDYSTS 0x46 +#define NRDYSTS 0x48 +#define BEMPSTS 0x4A +#define FRMNUM 0x4C +#define UFRMNUM 0x4E +#define USBADDR 0x50 +#define USBREQ 0x54 +#define USBVAL 0x56 +#define USBINDX 0x58 +#define USBLENG 0x5A +#define DCPCFG 0x5C +#define DCPMAXP 0x5E +#define DCPCTR 0x60 +#define PIPESEL 0x64 +#define PIPECFG 0x68 +#define PIPEBUF 0x6A +#define PIPEMAXP 0x6C +#define PIPEPERI 0x6E +#define PIPE1CTR 0x70 +#define PIPE2CTR 0x72 +#define PIPE3CTR 0x74 +#define PIPE4CTR 0x76 +#define PIPE5CTR 0x78 +#define PIPE6CTR 0x7A +#define PIPE7CTR 0x7C +#define PIPE8CTR 0x7E +#define PIPE9CTR 0x80 +#define PIPE1TRE 0x90 +#define PIPE1TRN 0x92 +#define PIPE2TRE 0x94 +#define PIPE2TRN 0x96 +#define PIPE3TRE 0x98 +#define PIPE3TRN 0x9A +#define PIPE4TRE 0x9C +#define PIPE4TRN 0x9E +#define PIPE5TRE 0xA0 +#define PIPE5TRN 0xA2 +#define DEVADD0 0xD0 +#define DEVADD1 0xD2 +#define DEVADD2 0xD4 +#define DEVADD3 0xD6 +#define DEVADD4 0xD8 +#define DEVADD5 0xDA +#define DEVADD6 0xDC +#define DEVADD7 0xDE +#define DEVADD8 0xE0 +#define DEVADD9 0xE2 +#define DEVADDA 0xE4 + +/* System Configuration Control Register */ +#define XTAL 0xC000 /* b15-14: Crystal selection */ +#define XTAL48 0x8000 /* 48MHz */ +#define XTAL24 0x4000 /* 24MHz */ +#define XTAL12 0x0000 /* 12MHz */ +#define XCKE 0x2000 /* b13: External clock enable */ +#define PLLC 0x0800 /* b11: PLL control */ +#define SCKE 0x0400 /* b10: USB clock enable */ +#define PCSDIS 0x0200 /* b9: not CS wakeup */ +#define LPSME 0x0100 /* b8: Low power sleep mode */ +#define HSE 0x0080 /* b7: Hi-speed enable */ +#define DCFM 0x0040 /* b6: Controller function select */ +#define DRPD 0x0020 /* b5: D+/- pull down control */ +#define DPRPU 0x0010 /* b4: D+ pull up control */ +#define USBE 0x0001 /* b0: USB module operation enable */ + +/* System Configuration Status Register */ +#define OVCBIT 0x8000 /* b15-14: Over-current bit */ +#define OVCMON 0xC000 /* b15-14: Over-current monitor */ +#define SOFEA 0x0020 /* b5: SOF monitor */ +#define IDMON 0x0004 /* b3: ID-pin monitor */ +#define LNST 0x0003 /* b1-0: D+, D- line status */ +#define SE1 0x0003 /* SE1 */ +#define FS_KSTS 0x0002 /* Full-Speed K State */ +#define FS_JSTS 0x0001 /* Full-Speed J State */ +#define LS_JSTS 0x0002 /* Low-Speed J State */ +#define LS_KSTS 0x0001 /* Low-Speed K State */ +#define SE0 0x0000 /* SE0 */ + +/* Device State Control Register */ +#define EXTLP0 0x0400 /* b10: External port */ +#define VBOUT 0x0200 /* b9: VBUS output */ +#define WKUP 0x0100 /* b8: Remote wakeup */ +#define RWUPE 0x0080 /* b7: Remote wakeup sense */ +#define USBRST 0x0040 /* b6: USB reset enable */ +#define RESUME 0x0020 /* b5: Resume enable */ +#define UACT 0x0010 /* b4: USB bus enable */ +#define RHST 0x0007 /* b1-0: Reset handshake status */ +#define HSPROC 0x0004 /* HS handshake is processing */ +#define HSMODE 0x0003 /* Hi-Speed mode */ +#define FSMODE 0x0002 /* Full-Speed mode */ +#define LSMODE 0x0001 /* Low-Speed mode */ +#define UNDECID 0x0000 /* Undecided */ + +/* Test Mode Register */ +#define UTST 0x000F /* b3-0: Test select */ +#define H_TST_PACKET 0x000C /* HOST TEST Packet */ +#define H_TST_SE0_NAK 0x000B /* HOST TEST SE0 NAK */ +#define H_TST_K 0x000A /* HOST TEST K */ +#define H_TST_J 0x0009 /* HOST TEST J */ +#define H_TST_NORMAL 0x0000 /* HOST Normal Mode */ +#define P_TST_PACKET 0x0004 /* PERI TEST Packet */ +#define P_TST_SE0_NAK 0x0003 /* PERI TEST SE0 NAK */ +#define P_TST_K 0x0002 /* PERI TEST K */ +#define P_TST_J 0x0001 /* PERI TEST J */ +#define P_TST_NORMAL 0x0000 /* PERI Normal Mode */ + +/* Data Pin Configuration Register */ +#define LDRV 0x8000 /* b15: Drive Current Adjust */ +#define VIF1 0x0000 /* VIF = 1.8V */ +#define VIF3 0x8000 /* VIF = 3.3V */ +#define INTA 0x0001 /* b1: USB INT-pin active */ + +/* DMAx Pin Configuration Register */ +#define DREQA 0x4000 /* b14: Dreq active select */ +#define BURST 0x2000 /* b13: Burst mode */ +#define DACKA 0x0400 /* b10: Dack active select */ +#define DFORM 0x0380 /* b9-7: DMA mode select */ +#define CPU_ADR_RD_WR 0x0000 /* Address + RD/WR mode (CPU bus) */ +#define CPU_DACK_RD_WR 0x0100 /* DACK + RD/WR mode (CPU bus) */ +#define CPU_DACK_ONLY 0x0180 /* DACK only mode (CPU bus) */ +#define SPLIT_DACK_ONLY 0x0200 /* DACK only mode (SPLIT bus) */ +#define DENDA 0x0040 /* b6: Dend active select */ +#define PKTM 0x0020 /* b5: Packet mode */ +#define DENDE 0x0010 /* b4: Dend enable */ +#define OBUS 0x0004 /* b2: OUTbus mode */ + +/* CFIFO/DxFIFO Port Select Register */ +#define RCNT 0x8000 /* b15: Read count mode */ +#define REW 0x4000 /* b14: Buffer rewind */ +#define DCLRM 0x2000 /* b13: DMA buffer clear mode */ +#define DREQE 0x1000 /* b12: DREQ output enable */ +#define MBW_8 0x0000 /* 8bit */ +#define MBW_16 0x0400 /* 16bit */ +#define MBW_32 0x0800 /* 32bit */ +#define BIGEND 0x0100 /* b8: Big endian mode */ +#define BYTE_LITTLE 0x0000 /* little dendian */ +#define BYTE_BIG 0x0100 /* big endifan */ +#define ISEL 0x0020 /* b5: DCP FIFO port direction select */ +#define CURPIPE 0x000F /* b2-0: PIPE select */ + +/* CFIFO/DxFIFO Port Control Register */ +#define BVAL 0x8000 /* b15: Buffer valid flag */ +#define BCLR 0x4000 /* b14: Buffer clear */ +#define FRDY 0x2000 /* b13: FIFO ready */ +#define DTLN 0x0FFF /* b11-0: FIFO received data length */ + +/* Interrupt Enable Register 0 */ +#define VBSE 0x8000 /* b15: VBUS interrupt */ +#define RSME 0x4000 /* b14: Resume interrupt */ +#define SOFE 0x2000 /* b13: Frame update interrupt */ +#define DVSE 0x1000 /* b12: Device state transition interrupt */ +#define CTRE 0x0800 /* b11: Control transfer stage transition interrupt */ +#define BEMPE 0x0400 /* b10: Buffer empty interrupt */ +#define NRDYE 0x0200 /* b9: Buffer not ready interrupt */ +#define BRDYE 0x0100 /* b8: Buffer ready interrupt */ + +/* Interrupt Enable Register 1 */ +#define OVRCRE 0x8000 /* b15: Over-current interrupt */ +#define BCHGE 0x4000 /* b14: USB us chenge interrupt */ +#define DTCHE 0x1000 /* b12: Detach sense interrupt */ +#define ATTCHE 0x0800 /* b11: Attach sense interrupt */ +#define EOFERRE 0x0040 /* b6: EOF error interrupt */ +#define SIGNE 0x0020 /* b5: SETUP IGNORE interrupt */ +#define SACKE 0x0010 /* b4: SETUP ACK interrupt */ + +/* BRDY Interrupt Enable/Status Register */ +#define BRDY9 0x0200 /* b9: PIPE9 */ +#define BRDY8 0x0100 /* b8: PIPE8 */ +#define BRDY7 0x0080 /* b7: PIPE7 */ +#define BRDY6 0x0040 /* b6: PIPE6 */ +#define BRDY5 0x0020 /* b5: PIPE5 */ +#define BRDY4 0x0010 /* b4: PIPE4 */ +#define BRDY3 0x0008 /* b3: PIPE3 */ +#define BRDY2 0x0004 /* b2: PIPE2 */ +#define BRDY1 0x0002 /* b1: PIPE1 */ +#define BRDY0 0x0001 /* b1: PIPE0 */ + +/* NRDY Interrupt Enable/Status Register */ +#define NRDY9 0x0200 /* b9: PIPE9 */ +#define NRDY8 0x0100 /* b8: PIPE8 */ +#define NRDY7 0x0080 /* b7: PIPE7 */ +#define NRDY6 0x0040 /* b6: PIPE6 */ +#define NRDY5 0x0020 /* b5: PIPE5 */ +#define NRDY4 0x0010 /* b4: PIPE4 */ +#define NRDY3 0x0008 /* b3: PIPE3 */ +#define NRDY2 0x0004 /* b2: PIPE2 */ +#define NRDY1 0x0002 /* b1: PIPE1 */ +#define NRDY0 0x0001 /* b1: PIPE0 */ + +/* BEMP Interrupt Enable/Status Register */ +#define BEMP9 0x0200 /* b9: PIPE9 */ +#define BEMP8 0x0100 /* b8: PIPE8 */ +#define BEMP7 0x0080 /* b7: PIPE7 */ +#define BEMP6 0x0040 /* b6: PIPE6 */ +#define BEMP5 0x0020 /* b5: PIPE5 */ +#define BEMP4 0x0010 /* b4: PIPE4 */ +#define BEMP3 0x0008 /* b3: PIPE3 */ +#define BEMP2 0x0004 /* b2: PIPE2 */ +#define BEMP1 0x0002 /* b1: PIPE1 */ +#define BEMP0 0x0001 /* b0: PIPE0 */ + +/* SOF Pin Configuration Register */ +#define TRNENSEL 0x0100 /* b8: Select transaction enable period */ +#define BRDYM 0x0040 /* b6: BRDY clear timing */ +#define INTL 0x0020 /* b5: Interrupt sense select */ +#define EDGESTS 0x0010 /* b4: */ +#define SOFMODE 0x000C /* b3-2: SOF pin select */ +#define SOF_125US 0x0008 /* SOF OUT 125us Frame Signal */ +#define SOF_1MS 0x0004 /* SOF OUT 1ms Frame Signal */ +#define SOF_DISABLE 0x0000 /* SOF OUT Disable */ + +/* Interrupt Status Register 0 */ +#define VBINT 0x8000 /* b15: VBUS interrupt */ +#define RESM 0x4000 /* b14: Resume interrupt */ +#define SOFR 0x2000 /* b13: SOF frame update interrupt */ +#define DVST 0x1000 /* b12: Device state transition interrupt */ +#define CTRT 0x0800 /* b11: Control transfer stage transition interrupt */ +#define BEMP 0x0400 /* b10: Buffer empty interrupt */ +#define NRDY 0x0200 /* b9: Buffer not ready interrupt */ +#define BRDY 0x0100 /* b8: Buffer ready interrupt */ +#define VBSTS 0x0080 /* b7: VBUS input port */ +#define DVSQ 0x0070 /* b6-4: Device state */ +#define DS_SPD_CNFG 0x0070 /* Suspend Configured */ +#define DS_SPD_ADDR 0x0060 /* Suspend Address */ +#define DS_SPD_DFLT 0x0050 /* Suspend Default */ +#define DS_SPD_POWR 0x0040 /* Suspend Powered */ +#define DS_SUSP 0x0040 /* Suspend */ +#define DS_CNFG 0x0030 /* Configured */ +#define DS_ADDS 0x0020 /* Address */ +#define DS_DFLT 0x0010 /* Default */ +#define DS_POWR 0x0000 /* Powered */ +#define DVSQS 0x0030 /* b5-4: Device state */ +#define VALID 0x0008 /* b3: Setup packet detected flag */ +#define CTSQ 0x0007 /* b2-0: Control transfer stage */ +#define CS_SQER 0x0006 /* Sequence error */ +#define CS_WRND 0x0005 /* Control write nodata status stage */ +#define CS_WRSS 0x0004 /* Control write status stage */ +#define CS_WRDS 0x0003 /* Control write data stage */ +#define CS_RDSS 0x0002 /* Control read status stage */ +#define CS_RDDS 0x0001 /* Control read data stage */ +#define CS_IDST 0x0000 /* Idle or setup stage */ + +/* Interrupt Status Register 1 */ +#define OVRCR 0x8000 /* b15: Over-current interrupt */ +#define BCHG 0x4000 /* b14: USB bus chenge interrupt */ +#define DTCH 0x1000 /* b12: Detach sense interrupt */ +#define ATTCH 0x0800 /* b11: Attach sense interrupt */ +#define EOFERR 0x0040 /* b6: EOF-error interrupt */ +#define SIGN 0x0020 /* b5: Setup ignore interrupt */ +#define SACK 0x0010 /* b4: Setup acknowledge interrupt */ + +/* Frame Number Register */ +#define OVRN 0x8000 /* b15: Overrun error */ +#define CRCE 0x4000 /* b14: Received data error */ +#define FRNM 0x07FF /* b10-0: Frame number */ + +/* Micro Frame Number Register */ +#define UFRNM 0x0007 /* b2-0: Micro frame number */ + +/* Default Control Pipe Maxpacket Size Register */ +/* Pipe Maxpacket Size Register */ +#define DEVSEL 0xF000 /* b15-14: Device address select */ +#define MAXP 0x007F /* b6-0: Maxpacket size of default control pipe */ + +/* Default Control Pipe Control Register */ +#define BSTS 0x8000 /* b15: Buffer status */ +#define SUREQ 0x4000 /* b14: Send USB request */ +#define CSCLR 0x2000 /* b13: complete-split status clear */ +#define CSSTS 0x1000 /* b12: complete-split status */ +#define SUREQCLR 0x0800 /* b11: stop setup request */ +#define SQCLR 0x0100 /* b8: Sequence toggle bit clear */ +#define SQSET 0x0080 /* b7: Sequence toggle bit set */ +#define SQMON 0x0040 /* b6: Sequence toggle bit monitor */ +#define PBUSY 0x0020 /* b5: pipe busy */ +#define PINGE 0x0010 /* b4: ping enable */ +#define CCPL 0x0004 /* b2: Enable control transfer complete */ +#define PID 0x0003 /* b1-0: Response PID */ +#define PID_STALL11 0x0003 /* STALL */ +#define PID_STALL 0x0002 /* STALL */ +#define PID_BUF 0x0001 /* BUF */ +#define PID_NAK 0x0000 /* NAK */ + +/* Pipe Window Select Register */ +#define PIPENM 0x0007 /* b2-0: Pipe select */ + +/* Pipe Configuration Register */ +#define R8A66597_TYP 0xC000 /* b15-14: Transfer type */ +#define R8A66597_ISO 0xC000 /* Isochronous */ +#define R8A66597_INT 0x8000 /* Interrupt */ +#define R8A66597_BULK 0x4000 /* Bulk */ +#define R8A66597_BFRE 0x0400 /* b10: Buffer ready interrupt mode select */ +#define R8A66597_DBLB 0x0200 /* b9: Double buffer mode select */ +#define R8A66597_CNTMD 0x0100 /* b8: Continuous transfer mode select */ +#define R8A66597_SHTNAK 0x0080 /* b7: Transfer end NAK */ +#define R8A66597_DIR 0x0010 /* b4: Transfer direction select */ +#define R8A66597_EPNUM 0x000F /* b3-0: Eendpoint number select */ + +/* Pipe Buffer Configuration Register */ +#define BUFSIZE 0x7C00 /* b14-10: Pipe buffer size */ +#define BUFNMB 0x007F /* b6-0: Pipe buffer number */ +#define PIPE0BUF 256 +#define PIPExBUF 64 + +/* Pipe Maxpacket Size Register */ +#define MXPS 0x07FF /* b10-0: Maxpacket size */ + +/* Pipe Cycle Configuration Register */ +#define IFIS 0x1000 /* b12: Isochronous in-buffer flush mode select */ +#define IITV 0x0007 /* b2-0: Isochronous interval */ + +/* Pipex Control Register */ +#define BSTS 0x8000 /* b15: Buffer status */ +#define INBUFM 0x4000 /* b14: IN buffer monitor (Only for PIPE1 to 5) */ +#define CSCLR 0x2000 /* b13: complete-split status clear */ +#define CSSTS 0x1000 /* b12: complete-split status */ +#define ATREPM 0x0400 /* b10: Auto repeat mode */ +#define ACLRM 0x0200 /* b9: Out buffer auto clear mode */ +#define SQCLR 0x0100 /* b8: Sequence toggle bit clear */ +#define SQSET 0x0080 /* b7: Sequence toggle bit set */ +#define SQMON 0x0040 /* b6: Sequence toggle bit monitor */ +#define PBUSY 0x0020 /* b5: pipe busy */ +#define PID 0x0003 /* b1-0: Response PID */ + +/* PIPExTRE */ +#define TRENB 0x0200 /* b9: Transaction counter enable */ +#define TRCLR 0x0100 /* b8: Transaction counter clear */ + +/* PIPExTRN */ +#define TRNCNT 0xFFFF /* b15-0: Transaction counter */ + +/* DEVADDx */ +#define UPPHUB 0x7800 +#define HUBPORT 0x0700 +#define USBSPD 0x00C0 +#define RTPORT 0x0001 + +#endif /* __LINUX_USB_R8A66597_H */ -- cgit v1.2.3-70-g09d2 From 2c59b0b70b9d5d61c726f179724660c4c2423f31 Mon Sep 17 00:00:00 2001 From: Magnus Damm Date: Wed, 22 Jul 2009 14:41:35 +0000 Subject: usb: m66592-udc platform data on_chip support Convert the m66592-udc driver to use the on_chip flag from platform data to enable on chip behaviour instead of relying on CONFIG_SUPERH_BUILT_IN_M66592 ugliness. This makes the code cleaner and also allows us to support both external and internal m66592 with the same kernel. It also makes the Kconfig part more future proof since we with this patch can add support for new processors with on-chip m66592 without modifying the Kconfig. The patch adds a m66592 header file for platform data and ties in platform data to the existing m66592 devices. Signed-off-by: Magnus Damm Signed-off-by: Paul Mundt --- arch/sh/boards/mach-highlander/setup.c | 7 + arch/sh/boards/mach-x3proto/setup.c | 7 + arch/sh/kernel/cpu/sh4a/setup-sh7722.c | 8 +- drivers/usb/gadget/Kconfig | 10 -- drivers/usb/gadget/m66592-udc.c | 252 +++++++++++++++++++-------------- drivers/usb/gadget/m66592-udc.h | 89 ++++++------ include/linux/usb/m66592.h | 44 ++++++ 7 files changed, 257 insertions(+), 160 deletions(-) create mode 100644 include/linux/usb/m66592.h (limited to 'include') diff --git a/arch/sh/boards/mach-highlander/setup.c b/arch/sh/boards/mach-highlander/setup.c index 1639f891500..566e69d8d72 100644 --- a/arch/sh/boards/mach-highlander/setup.c +++ b/arch/sh/boards/mach-highlander/setup.c @@ -22,6 +22,7 @@ #include #include #include +#include #include #include #include @@ -60,6 +61,11 @@ static struct platform_device r8a66597_usb_host_device = { .resource = r8a66597_usb_host_resources, }; +static struct m66592_platdata usbf_platdata = { + .xtal = M66592_PLATDATA_XTAL_24MHZ, + .vif = 1, +}; + static struct resource m66592_usb_peripheral_resources[] = { [0] = { .name = "m66592_udc", @@ -81,6 +87,7 @@ static struct platform_device m66592_usb_peripheral_device = { .dev = { .dma_mask = NULL, /* don't use dma */ .coherent_dma_mask = 0xffffffff, + .platform_data = &usbf_platdata, }, .num_resources = ARRAY_SIZE(m66592_usb_peripheral_resources), .resource = m66592_usb_peripheral_resources, diff --git a/arch/sh/boards/mach-x3proto/setup.c b/arch/sh/boards/mach-x3proto/setup.c index 8913ae39a80..efe4cb9f8a7 100644 --- a/arch/sh/boards/mach-x3proto/setup.c +++ b/arch/sh/boards/mach-x3proto/setup.c @@ -17,6 +17,7 @@ #include #include #include +#include #include static struct resource heartbeat_resources[] = { @@ -89,6 +90,11 @@ static struct platform_device r8a66597_usb_host_device = { .resource = r8a66597_usb_host_resources, }; +static struct m66592_platdata usbf_platdata = { + .xtal = M66592_PLATDATA_XTAL_24MHZ, + .vif = 1, +}; + static struct resource m66592_usb_peripheral_resources[] = { [0] = { .name = "m66592_udc", @@ -109,6 +115,7 @@ static struct platform_device m66592_usb_peripheral_device = { .dev = { .dma_mask = NULL, /* don't use dma */ .coherent_dma_mask = 0xffffffff, + .platform_data = &usbf_platdata, }, .num_resources = ARRAY_SIZE(m66592_usb_peripheral_resources), .resource = m66592_usb_peripheral_resources, diff --git a/arch/sh/kernel/cpu/sh4a/setup-sh7722.c b/arch/sh/kernel/cpu/sh4a/setup-sh7722.c index ea524a2da3e..0bad14a4423 100644 --- a/arch/sh/kernel/cpu/sh4a/setup-sh7722.c +++ b/arch/sh/kernel/cpu/sh4a/setup-sh7722.c @@ -13,6 +13,7 @@ #include #include #include +#include #include #include #include @@ -47,9 +48,13 @@ static struct platform_device rtc_device = { .resource = rtc_resources, }; +static struct m66592_platdata usbf_platdata = { + .on_chip = 1, +}; + static struct resource usbf_resources[] = { [0] = { - .name = "m66592_udc", + .name = "USBF", .start = 0x04480000, .end = 0x044800FF, .flags = IORESOURCE_MEM, @@ -67,6 +72,7 @@ static struct platform_device usbf_device = { .dev = { .dma_mask = NULL, .coherent_dma_mask = 0xffffffff, + .platform_data = &usbf_platdata, }, .num_resources = ARRAY_SIZE(usbf_resources), .resource = usbf_resources, diff --git a/drivers/usb/gadget/Kconfig b/drivers/usb/gadget/Kconfig index 7f8e83a954a..b7f10bc25c2 100644 --- a/drivers/usb/gadget/Kconfig +++ b/drivers/usb/gadget/Kconfig @@ -360,16 +360,6 @@ config USB_M66592 default USB_GADGET select USB_GADGET_SELECTED -config SUPERH_BUILT_IN_M66592 - boolean "Enable SuperH built-in USB like the M66592" - depends on USB_GADGET_M66592 && CPU_SUBTYPE_SH7722 - help - SH7722 has USB like the M66592. - - The transfer rate is very slow when use "Ethernet Gadget". - However, this problem is improved if change a value of - NET_IP_ALIGN to 4. - # # Controllers available only in discrete form (and all PCI controllers) # diff --git a/drivers/usb/gadget/m66592-udc.c b/drivers/usb/gadget/m66592-udc.c index 0dddd2f8ff3..a61c70caff1 100644 --- a/drivers/usb/gadget/m66592-udc.c +++ b/drivers/usb/gadget/m66592-udc.c @@ -31,38 +31,12 @@ #include "m66592-udc.h" - MODULE_DESCRIPTION("M66592 USB gadget driver"); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Yoshihiro Shimoda"); MODULE_ALIAS("platform:m66592_udc"); -#define DRIVER_VERSION "26 Jun 2009" - -/* module parameters */ -#if defined(CONFIG_SUPERH_BUILT_IN_M66592) -static unsigned short endian = M66592_LITTLE; -module_param(endian, ushort, 0644); -MODULE_PARM_DESC(endian, "data endian: big=0, little=0 (default=0)"); -#else -static unsigned short clock = M66592_XTAL24; -module_param(clock, ushort, 0644); -MODULE_PARM_DESC(clock, "input clock: 48MHz=32768, 24MHz=16384, 12MHz=0 " - "(default=16384)"); - -static unsigned short vif = M66592_LDRV; -module_param(vif, ushort, 0644); -MODULE_PARM_DESC(vif, "input VIF: 3.3V=32768, 1.5V=0 (default=32768)"); - -static unsigned short endian; -module_param(endian, ushort, 0644); -MODULE_PARM_DESC(endian, "data endian: big=256, little=0 (default=0)"); - -static unsigned short irq_sense = M66592_INTL; -module_param(irq_sense, ushort, 0644); -MODULE_PARM_DESC(irq_sense, "IRQ sense: low level=2, falling edge=0 " - "(default=2)"); -#endif +#define DRIVER_VERSION "21 July 2009" static const char udc_name[] = "m66592_udc"; static const char *m66592_ep_name[] = { @@ -244,6 +218,7 @@ static inline int get_buffer_size(struct m66592 *m66592, u16 pipenum) static inline void pipe_change(struct m66592 *m66592, u16 pipenum) { struct m66592_ep *ep = m66592->pipenum2ep[pipenum]; + unsigned short mbw; if (ep->use_dma) return; @@ -252,7 +227,12 @@ static inline void pipe_change(struct m66592 *m66592, u16 pipenum) ndelay(450); - m66592_bset(m66592, M66592_MBW, ep->fifosel); + if (m66592->pdata->on_chip) + mbw = M66592_MBW_32; + else + mbw = M66592_MBW_16; + + m66592_bset(m66592, mbw, ep->fifosel); } static int pipe_buffer_setting(struct m66592 *m66592, @@ -332,6 +312,7 @@ static void pipe_buffer_release(struct m66592 *m66592, static void pipe_initialize(struct m66592_ep *ep) { struct m66592 *m66592 = ep->m66592; + unsigned short mbw; m66592_mdfy(m66592, 0, M66592_CURPIPE, ep->fifosel); @@ -343,7 +324,12 @@ static void pipe_initialize(struct m66592_ep *ep) ndelay(450); - m66592_bset(m66592, M66592_MBW, ep->fifosel); + if (m66592->pdata->on_chip) + mbw = M66592_MBW_32; + else + mbw = M66592_MBW_16; + + m66592_bset(m66592, mbw, ep->fifosel); } } @@ -359,15 +345,13 @@ static void m66592_ep_setting(struct m66592 *m66592, struct m66592_ep *ep, ep->fifosel = M66592_D0FIFOSEL; ep->fifoctr = M66592_D0FIFOCTR; ep->fifotrn = M66592_D0FIFOTRN; -#if !defined(CONFIG_SUPERH_BUILT_IN_M66592) - } else if (m66592->num_dma == 1) { + } else if (!m66592->pdata->on_chip && m66592->num_dma == 1) { m66592->num_dma++; ep->use_dma = 1; ep->fifoaddr = M66592_D1FIFO; ep->fifosel = M66592_D1FIFOSEL; ep->fifoctr = M66592_D1FIFOCTR; ep->fifotrn = M66592_D1FIFOTRN; -#endif } else { ep->use_dma = 0; ep->fifoaddr = M66592_CFIFO; @@ -612,76 +596,120 @@ static void start_ep0(struct m66592_ep *ep, struct m66592_request *req) } } -#if defined(CONFIG_SUPERH_BUILT_IN_M66592) static void init_controller(struct m66592 *m66592) { - m66592_bset(m66592, M66592_HSE, M66592_SYSCFG); /* High spd */ - m66592_bclr(m66592, M66592_USBE, M66592_SYSCFG); - m66592_bclr(m66592, M66592_DPRPU, M66592_SYSCFG); - m66592_bset(m66592, M66592_USBE, M66592_SYSCFG); + unsigned int endian; - /* This is a workaound for SH7722 2nd cut */ - m66592_bset(m66592, 0x8000, M66592_DVSTCTR); - m66592_bset(m66592, 0x1000, M66592_TESTMODE); - m66592_bclr(m66592, 0x8000, M66592_DVSTCTR); + if (m66592->pdata->on_chip) { + if (m66592->pdata->endian) + endian = 0; /* big endian */ + else + endian = M66592_LITTLE; /* little endian */ - m66592_bset(m66592, M66592_INTL, M66592_INTENB1); + m66592_bset(m66592, M66592_HSE, M66592_SYSCFG); /* High spd */ + m66592_bclr(m66592, M66592_USBE, M66592_SYSCFG); + m66592_bclr(m66592, M66592_DPRPU, M66592_SYSCFG); + m66592_bset(m66592, M66592_USBE, M66592_SYSCFG); - m66592_write(m66592, 0, M66592_CFBCFG); - m66592_write(m66592, 0, M66592_D0FBCFG); - m66592_bset(m66592, endian, M66592_CFBCFG); - m66592_bset(m66592, endian, M66592_D0FBCFG); -} -#else /* #if defined(CONFIG_SUPERH_BUILT_IN_M66592) */ -static void init_controller(struct m66592 *m66592) -{ - m66592_bset(m66592, (vif & M66592_LDRV) | (endian & M66592_BIGEND), - M66592_PINCFG); - m66592_bset(m66592, M66592_HSE, M66592_SYSCFG); /* High spd */ - m66592_mdfy(m66592, clock & M66592_XTAL, M66592_XTAL, M66592_SYSCFG); + /* This is a workaound for SH7722 2nd cut */ + m66592_bset(m66592, 0x8000, M66592_DVSTCTR); + m66592_bset(m66592, 0x1000, M66592_TESTMODE); + m66592_bclr(m66592, 0x8000, M66592_DVSTCTR); - m66592_bclr(m66592, M66592_USBE, M66592_SYSCFG); - m66592_bclr(m66592, M66592_DPRPU, M66592_SYSCFG); - m66592_bset(m66592, M66592_USBE, M66592_SYSCFG); + m66592_bset(m66592, M66592_INTL, M66592_INTENB1); + + m66592_write(m66592, 0, M66592_CFBCFG); + m66592_write(m66592, 0, M66592_D0FBCFG); + m66592_bset(m66592, endian, M66592_CFBCFG); + m66592_bset(m66592, endian, M66592_D0FBCFG); + } else { + unsigned int clock, vif, irq_sense; + + if (m66592->pdata->endian) + endian = M66592_BIGEND; /* big endian */ + else + endian = 0; /* little endian */ + + if (m66592->pdata->vif) + vif = M66592_LDRV; /* 3.3v */ + else + vif = 0; /* 1.5v */ + + switch (m66592->pdata->xtal) { + case M66592_PLATDATA_XTAL_12MHZ: + clock = M66592_XTAL12; + break; + case M66592_PLATDATA_XTAL_24MHZ: + clock = M66592_XTAL24; + break; + case M66592_PLATDATA_XTAL_48MHZ: + clock = M66592_XTAL48; + break; + default: + pr_warning("m66592-udc: xtal configuration error\n"); + clock = 0; + } - m66592_bset(m66592, M66592_XCKE, M66592_SYSCFG); + switch (m66592->irq_trigger) { + case IRQF_TRIGGER_LOW: + irq_sense = M66592_INTL; + break; + case IRQF_TRIGGER_FALLING: + irq_sense = 0; + break; + default: + pr_warning("m66592-udc: irq trigger config error\n"); + irq_sense = 0; + } - msleep(3); + m66592_bset(m66592, + (vif & M66592_LDRV) | (endian & M66592_BIGEND), + M66592_PINCFG); + m66592_bset(m66592, M66592_HSE, M66592_SYSCFG); /* High spd */ + m66592_mdfy(m66592, clock & M66592_XTAL, M66592_XTAL, + M66592_SYSCFG); + m66592_bclr(m66592, M66592_USBE, M66592_SYSCFG); + m66592_bclr(m66592, M66592_DPRPU, M66592_SYSCFG); + m66592_bset(m66592, M66592_USBE, M66592_SYSCFG); - m66592_bset(m66592, M66592_RCKE | M66592_PLLC, M66592_SYSCFG); + m66592_bset(m66592, M66592_XCKE, M66592_SYSCFG); + + msleep(3); - msleep(1); + m66592_bset(m66592, M66592_RCKE | M66592_PLLC, M66592_SYSCFG); - m66592_bset(m66592, M66592_SCKE, M66592_SYSCFG); + msleep(1); - m66592_bset(m66592, irq_sense & M66592_INTL, M66592_INTENB1); - m66592_write(m66592, M66592_BURST | M66592_CPU_ADR_RD_WR, - M66592_DMA0CFG); + m66592_bset(m66592, M66592_SCKE, M66592_SYSCFG); + + m66592_bset(m66592, irq_sense & M66592_INTL, M66592_INTENB1); + m66592_write(m66592, M66592_BURST | M66592_CPU_ADR_RD_WR, + M66592_DMA0CFG); + } } -#endif /* #if defined(CONFIG_SUPERH_BUILT_IN_M66592) */ static void disable_controller(struct m66592 *m66592) { -#if !defined(CONFIG_SUPERH_BUILT_IN_M66592) - m66592_bclr(m66592, M66592_SCKE, M66592_SYSCFG); - udelay(1); - m66592_bclr(m66592, M66592_PLLC, M66592_SYSCFG); - udelay(1); - m66592_bclr(m66592, M66592_RCKE, M66592_SYSCFG); - udelay(1); - m66592_bclr(m66592, M66592_XCKE, M66592_SYSCFG); -#endif + if (!m66592->pdata->on_chip) { + m66592_bclr(m66592, M66592_SCKE, M66592_SYSCFG); + udelay(1); + m66592_bclr(m66592, M66592_PLLC, M66592_SYSCFG); + udelay(1); + m66592_bclr(m66592, M66592_RCKE, M66592_SYSCFG); + udelay(1); + m66592_bclr(m66592, M66592_XCKE, M66592_SYSCFG); + } } static void m66592_start_xclock(struct m66592 *m66592) { -#if !defined(CONFIG_SUPERH_BUILT_IN_M66592) u16 tmp; - tmp = m66592_read(m66592, M66592_SYSCFG); - if (!(tmp & M66592_XCKE)) - m66592_bset(m66592, M66592_XCKE, M66592_SYSCFG); -#endif + if (!m66592->pdata->on_chip) { + tmp = m66592_read(m66592, M66592_SYSCFG); + if (!(tmp & M66592_XCKE)) + m66592_bset(m66592, M66592_XCKE, M66592_SYSCFG); + } } /*-------------------------------------------------------------------------*/ @@ -1169,8 +1197,7 @@ static irqreturn_t m66592_irq(int irq, void *_m66592) intsts0 = m66592_read(m66592, M66592_INTSTS0); intenb0 = m66592_read(m66592, M66592_INTENB0); -#if defined(CONFIG_SUPERH_BUILT_IN_M66592) - if (!intsts0 && !intenb0) { + if (m66592->pdata->on_chip && !intsts0 && !intenb0) { /* * When USB clock stops, it cannot read register. Even if a * clock stops, the interrupt occurs. So this driver turn on @@ -1180,7 +1207,6 @@ static irqreturn_t m66592_irq(int irq, void *_m66592) intsts0 = m66592_read(m66592, M66592_INTSTS0); intenb0 = m66592_read(m66592, M66592_INTENB0); } -#endif savepipe = m66592_read(m66592, M66592_CFIFOSEL); @@ -1526,9 +1552,11 @@ static int __exit m66592_remove(struct platform_device *pdev) iounmap(m66592->reg); free_irq(platform_get_irq(pdev, 0), m66592); m66592_free_request(&m66592->ep[0].ep, m66592->ep0_req); -#if defined(CONFIG_SUPERH_BUILT_IN_M66592) && defined(CONFIG_HAVE_CLK) - clk_disable(m66592->clk); - clk_put(m66592->clk); +#ifdef CONFIG_HAVE_CLK + if (m66592->pdata->on_chip) { + clk_disable(m66592->clk); + clk_put(m66592->clk); + } #endif kfree(m66592); return 0; @@ -1540,11 +1568,10 @@ static void nop_completion(struct usb_ep *ep, struct usb_request *r) static int __init m66592_probe(struct platform_device *pdev) { - struct resource *res; - int irq; + struct resource *res, *ires; void __iomem *reg = NULL; struct m66592 *m66592 = NULL; -#if defined(CONFIG_SUPERH_BUILT_IN_M66592) && defined(CONFIG_HAVE_CLK) +#ifdef CONFIG_HAVE_CLK char clk_name[8]; #endif int ret = 0; @@ -1557,10 +1584,11 @@ static int __init m66592_probe(struct platform_device *pdev) goto clean_up; } - irq = platform_get_irq(pdev, 0); - if (irq < 0) { + ires = platform_get_resource(pdev, IORESOURCE_IRQ, 0); + if (!ires) { ret = -ENODEV; - pr_err("platform_get_irq error.\n"); + dev_err(&pdev->dev, + "platform_get_resource IORESOURCE_IRQ error.\n"); goto clean_up; } @@ -1571,6 +1599,12 @@ static int __init m66592_probe(struct platform_device *pdev) goto clean_up; } + if (pdev->dev.platform_data == NULL) { + dev_err(&pdev->dev, "no platform data\n"); + ret = -ENODEV; + goto clean_up; + } + /* initialize ucd */ m66592 = kzalloc(sizeof(struct m66592), GFP_KERNEL); if (m66592 == NULL) { @@ -1578,6 +1612,9 @@ static int __init m66592_probe(struct platform_device *pdev) goto clean_up; } + m66592->pdata = pdev->dev.platform_data; + m66592->irq_trigger = ires->flags & IRQF_TRIGGER_MASK; + spin_lock_init(&m66592->lock); dev_set_drvdata(&pdev->dev, m66592); @@ -1595,22 +1632,25 @@ static int __init m66592_probe(struct platform_device *pdev) m66592->timer.data = (unsigned long)m66592; m66592->reg = reg; - ret = request_irq(irq, m66592_irq, IRQF_DISABLED | IRQF_SHARED, + ret = request_irq(ires->start, m66592_irq, IRQF_DISABLED | IRQF_SHARED, udc_name, m66592); if (ret < 0) { pr_err("request_irq error (%d)\n", ret); goto clean_up; } -#if defined(CONFIG_SUPERH_BUILT_IN_M66592) && defined(CONFIG_HAVE_CLK) - snprintf(clk_name, sizeof(clk_name), "usbf%d", pdev->id); - m66592->clk = clk_get(&pdev->dev, clk_name); - if (IS_ERR(m66592->clk)) { - dev_err(&pdev->dev, "cannot get clock \"%s\"\n", clk_name); - ret = PTR_ERR(m66592->clk); - goto clean_up2; +#ifdef CONFIG_HAVE_CLK + if (m66592->pdata->on_chip) { + snprintf(clk_name, sizeof(clk_name), "usbf%d", pdev->id); + m66592->clk = clk_get(&pdev->dev, clk_name); + if (IS_ERR(m66592->clk)) { + dev_err(&pdev->dev, "cannot get clock \"%s\"\n", + clk_name); + ret = PTR_ERR(m66592->clk); + goto clean_up2; + } + clk_enable(m66592->clk); } - clk_enable(m66592->clk); #endif INIT_LIST_HEAD(&m66592->gadget.ep_list); m66592->gadget.ep0 = &m66592->ep[0].ep; @@ -1652,12 +1692,14 @@ static int __init m66592_probe(struct platform_device *pdev) return 0; clean_up3: -#if defined(CONFIG_SUPERH_BUILT_IN_M66592) && defined(CONFIG_HAVE_CLK) - clk_disable(m66592->clk); - clk_put(m66592->clk); +#ifdef CONFIG_HAVE_CLK + if (m66592->pdata->on_chip) { + clk_disable(m66592->clk); + clk_put(m66592->clk); + } clean_up2: #endif - free_irq(irq, m66592); + free_irq(ires->start, m66592); clean_up: if (m66592) { if (m66592->ep0_req) diff --git a/drivers/usb/gadget/m66592-udc.h b/drivers/usb/gadget/m66592-udc.h index 9a9c2bf9fbd..8b960deed68 100644 --- a/drivers/usb/gadget/m66592-udc.h +++ b/drivers/usb/gadget/m66592-udc.h @@ -23,10 +23,12 @@ #ifndef __M66592_UDC_H__ #define __M66592_UDC_H__ -#if defined(CONFIG_SUPERH_BUILT_IN_M66592) && defined(CONFIG_HAVE_CLK) +#ifdef CONFIG_HAVE_CLK #include #endif +#include + #define M66592_SYSCFG 0x00 #define M66592_XTAL 0xC000 /* b15-14: Crystal selection */ #define M66592_XTAL48 0x8000 /* 48MHz */ @@ -76,11 +78,11 @@ #define M66592_P_TST_J 0x0001 /* PERI TEST J */ #define M66592_P_TST_NORMAL 0x0000 /* PERI Normal Mode */ -#if defined(CONFIG_SUPERH_BUILT_IN_M66592) +/* built-in registers */ #define M66592_CFBCFG 0x0A #define M66592_D0FBCFG 0x0C #define M66592_LITTLE 0x0100 /* b8: Little endian mode */ -#else +/* external chip case */ #define M66592_PINCFG 0x0A #define M66592_LDRV 0x8000 /* b15: Drive Current Adjust */ #define M66592_BIGEND 0x0100 /* b8: Big endian mode */ @@ -100,8 +102,8 @@ #define M66592_PKTM 0x0020 /* b5: Packet mode */ #define M66592_DENDE 0x0010 /* b4: Dend enable */ #define M66592_OBUS 0x0004 /* b2: OUTbus mode */ -#endif /* #if defined(CONFIG_SUPERH_BUILT_IN_M66592) */ +/* common case */ #define M66592_CFIFO 0x10 #define M66592_D0FIFO 0x14 #define M66592_D1FIFO 0x18 @@ -113,13 +115,9 @@ #define M66592_REW 0x4000 /* b14: Buffer rewind */ #define M66592_DCLRM 0x2000 /* b13: DMA buffer clear mode */ #define M66592_DREQE 0x1000 /* b12: DREQ output enable */ -#if defined(CONFIG_SUPERH_BUILT_IN_M66592) -#define M66592_MBW 0x0800 /* b11: Maximum bit width for FIFO */ -#else -#define M66592_MBW 0x0400 /* b10: Maximum bit width for FIFO */ -#define M66592_MBW_8 0x0000 /* 8bit */ -#define M66592_MBW_16 0x0400 /* 16bit */ -#endif /* #if defined(CONFIG_SUPERH_BUILT_IN_M66592) */ +#define M66592_MBW_8 0x0000 /* 8bit */ +#define M66592_MBW_16 0x0400 /* 16bit */ +#define M66592_MBW_32 0x0800 /* 32bit */ #define M66592_TRENB 0x0200 /* b9: Transaction counter enable */ #define M66592_TRCLR 0x0100 /* b8: Transaction counter clear */ #define M66592_DEZPM 0x0080 /* b7: Zero-length packet mode */ @@ -480,9 +478,11 @@ struct m66592_ep { struct m66592 { spinlock_t lock; void __iomem *reg; -#if defined(CONFIG_SUPERH_BUILT_IN_M66592) && defined(CONFIG_HAVE_CLK) +#ifdef CONFIG_HAVE_CLK struct clk *clk; #endif + struct m66592_platdata *pdata; + unsigned long irq_trigger; struct usb_gadget gadget; struct usb_gadget_driver *driver; @@ -546,13 +546,13 @@ static inline void m66592_read_fifo(struct m66592 *m66592, { unsigned long fifoaddr = (unsigned long)m66592->reg + offset; -#if defined(CONFIG_SUPERH_BUILT_IN_M66592) - len = (len + 3) / 4; - insl(fifoaddr, buf, len); -#else - len = (len + 1) / 2; - insw(fifoaddr, buf, len); -#endif + if (m66592->pdata->on_chip) { + len = (len + 3) / 4; + insl(fifoaddr, buf, len); + } else { + len = (len + 1) / 2; + insw(fifoaddr, buf, len); + } } static inline void m66592_write(struct m66592 *m66592, u16 val, @@ -566,33 +566,34 @@ static inline void m66592_write_fifo(struct m66592 *m66592, void *buf, unsigned long len) { unsigned long fifoaddr = (unsigned long)m66592->reg + offset; -#if defined(CONFIG_SUPERH_BUILT_IN_M66592) - unsigned long count; - unsigned char *pb; - int i; - - count = len / 4; - outsl(fifoaddr, buf, count); - - if (len & 0x00000003) { - pb = buf + count * 4; - for (i = 0; i < (len & 0x00000003); i++) { - if (m66592_read(m66592, M66592_CFBCFG)) /* little */ - outb(pb[i], fifoaddr + (3 - i)); - else - outb(pb[i], fifoaddr + i); + + if (m66592->pdata->on_chip) { + unsigned long count; + unsigned char *pb; + int i; + + count = len / 4; + outsl(fifoaddr, buf, count); + + if (len & 0x00000003) { + pb = buf + count * 4; + for (i = 0; i < (len & 0x00000003); i++) { + if (m66592_read(m66592, M66592_CFBCFG)) /* le */ + outb(pb[i], fifoaddr + (3 - i)); + else + outb(pb[i], fifoaddr + i); + } + } + } else { + unsigned long odd = len & 0x0001; + + len = len / 2; + outsw(fifoaddr, buf, len); + if (odd) { + unsigned char *p = buf + len*2; + outb(*p, fifoaddr); } } -#else - unsigned long odd = len & 0x0001; - - len = len / 2; - outsw(fifoaddr, buf, len); - if (odd) { - unsigned char *p = buf + len*2; - outb(*p, fifoaddr); - } -#endif /* #if defined(CONFIG_SUPERH_BUILT_IN_M66592) */ } static inline void m66592_mdfy(struct m66592 *m66592, u16 val, u16 pat, diff --git a/include/linux/usb/m66592.h b/include/linux/usb/m66592.h new file mode 100644 index 00000000000..cda9625e7df --- /dev/null +++ b/include/linux/usb/m66592.h @@ -0,0 +1,44 @@ +/* + * M66592 driver platform data + * + * Copyright (C) 2009 Renesas Solutions Corp. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + * + */ + +#ifndef __LINUX_USB_M66592_H +#define __LINUX_USB_M66592_H + +#define M66592_PLATDATA_XTAL_12MHZ 0x01 +#define M66592_PLATDATA_XTAL_24MHZ 0x02 +#define M66592_PLATDATA_XTAL_48MHZ 0x03 + +struct m66592_platdata { + /* one = on chip controller, zero = external controller */ + unsigned on_chip:1; + + /* one = big endian, zero = little endian */ + unsigned endian:1; + + /* (external controller only) M66592_PLATDATA_XTAL_nnMHZ */ + unsigned xtal:2; + + /* (external controller only) one = 3.3V, zero = 1.5V */ + unsigned vif:1; + +}; + +#endif /* __LINUX_USB_M66592_H */ + -- cgit v1.2.3-70-g09d2 From 0c193054a4c1cf190d2f23e5e91bd14402e43912 Mon Sep 17 00:00:00 2001 From: Andy Adamson Date: Mon, 27 Jul 2009 19:09:19 -0400 Subject: nfsd41: hange from page to memory based drc limits NFSD_SLOT_CACHE_SIZE is the size of all encoded operation responses (excluding the sequence operation) that we want to cache. For now, keep NFSD_SLOT_CACHE_SIZE at PAGE_SIZE. It will be reduced when the DRC is changed from page based to memory based. Signed-off-by: Andy Adamson Signed-off-by: J. Bruce Fields --- fs/nfsd/nfs4state.c | 28 +++++++++++++--------------- fs/nfsd/nfssvc.c | 13 ++++++------- include/linux/nfsd/nfsd.h | 4 ++-- include/linux/nfsd/state.h | 1 + 4 files changed, 22 insertions(+), 24 deletions(-) (limited to 'include') diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c index 70cba3fbfa6..e2b11b1b515 100644 --- a/fs/nfsd/nfs4state.c +++ b/fs/nfsd/nfs4state.c @@ -414,31 +414,31 @@ gen_sessionid(struct nfsd4_session *ses) /* * Give the client the number of slots it requests bound by - * NFSD_MAX_SLOTS_PER_SESSION and by sv_drc_max_pages. + * NFSD_MAX_SLOTS_PER_SESSION and by nfsd_drc_max_mem. * - * If we run out of pages (sv_drc_pages_used == sv_drc_max_pages) we - * should (up to a point) re-negotiate active sessions and reduce their - * slot usage to make rooom for new connections. For now we just fail the - * create session. + * If we run out of reserved DRC memory we should (up to a point) re-negotiate + * active sessions and reduce their slot usage to make rooom for new + * connections. For now we just fail the create session. */ static int set_forechannel_maxreqs(struct nfsd4_channel_attrs *fchan) { - int np; + int mem; if (fchan->maxreqs < 1) return nfserr_inval; else if (fchan->maxreqs > NFSD_MAX_SLOTS_PER_SESSION) fchan->maxreqs = NFSD_MAX_SLOTS_PER_SESSION; - np = fchan->maxreqs * NFSD_PAGES_PER_SLOT; + mem = fchan->maxreqs * NFSD_SLOT_CACHE_SIZE; spin_lock(&nfsd_drc_lock); - if (np + nfsd_drc_pages_used > nfsd_drc_max_pages) - np = nfsd_drc_max_pages - nfsd_drc_pages_used; - nfsd_drc_pages_used += np; + if (mem + nfsd_drc_mem_used > nfsd_drc_max_mem) + mem = ((nfsd_drc_max_mem - nfsd_drc_mem_used) / + NFSD_SLOT_CACHE_SIZE) * NFSD_SLOT_CACHE_SIZE; + nfsd_drc_mem_used += mem; spin_unlock(&nfsd_drc_lock); - fchan->maxreqs = np / NFSD_PAGES_PER_SLOT; + fchan->maxreqs = mem / NFSD_SLOT_CACHE_SIZE; if (fchan->maxreqs == 0) return nfserr_resource; return 0; @@ -465,9 +465,7 @@ static int init_forechannel_attrs(struct svc_rqst *rqstp, fchan->maxresp_sz = maxcount; session_fchan->maxresp_sz = fchan->maxresp_sz; - /* Set the max response cached size our default which is - * a multiple of PAGE_SIZE and small */ - session_fchan->maxresp_cached = NFSD_PAGES_PER_SLOT * PAGE_SIZE; + session_fchan->maxresp_cached = NFSD_SLOT_CACHE_SIZE; fchan->maxresp_cached = session_fchan->maxresp_cached; /* Use the client's maxops if possible */ @@ -585,7 +583,7 @@ free_session(struct kref *kref) nfsd4_release_respages(e->ce_respages, e->ce_resused); } spin_lock(&nfsd_drc_lock); - nfsd_drc_pages_used -= ses->se_fchannel.maxreqs * NFSD_PAGES_PER_SLOT; + nfsd_drc_mem_used -= ses->se_fchannel.maxreqs * NFSD_SLOT_CACHE_SIZE; spin_unlock(&nfsd_drc_lock); kfree(ses); } diff --git a/fs/nfsd/nfssvc.c b/fs/nfsd/nfssvc.c index 9be2a1932f8..5a280a9cb54 100644 --- a/fs/nfsd/nfssvc.c +++ b/fs/nfsd/nfssvc.c @@ -74,8 +74,8 @@ struct svc_serv *nfsd_serv; * nfsd_drc_pages_used tracks the current version 4.1 DRC memory usage. */ spinlock_t nfsd_drc_lock; -unsigned int nfsd_drc_max_pages; -unsigned int nfsd_drc_pages_used; +unsigned int nfsd_drc_max_mem; +unsigned int nfsd_drc_mem_used; #if defined(CONFIG_NFSD_V2_ACL) || defined(CONFIG_NFSD_V3_ACL) static struct svc_stat nfsd_acl_svcstats; @@ -247,12 +247,11 @@ void nfsd_reset_versions(void) static void set_max_drc(void) { #define NFSD_DRC_SIZE_SHIFT 10 - nfsd_drc_max_pages = nr_free_buffer_pages() - >> NFSD_DRC_SIZE_SHIFT; - nfsd_drc_pages_used = 0; + nfsd_drc_max_mem = (nr_free_buffer_pages() + >> NFSD_DRC_SIZE_SHIFT) * PAGE_SIZE; + nfsd_drc_mem_used = 0; spin_lock_init(&nfsd_drc_lock); - dprintk("%s nfsd_drc_max_pages %u\n", __func__, - nfsd_drc_max_pages); + dprintk("%s nfsd_drc_max_mem %u \n", __func__, nfsd_drc_max_mem); } int nfsd_create_serv(void) diff --git a/include/linux/nfsd/nfsd.h b/include/linux/nfsd/nfsd.h index 2571f856908..2812ed52669 100644 --- a/include/linux/nfsd/nfsd.h +++ b/include/linux/nfsd/nfsd.h @@ -57,8 +57,8 @@ extern u32 nfsd_supported_minorversion; extern struct mutex nfsd_mutex; extern struct svc_serv *nfsd_serv; extern spinlock_t nfsd_drc_lock; -extern unsigned int nfsd_drc_max_pages; -extern unsigned int nfsd_drc_pages_used; +extern unsigned int nfsd_drc_max_mem; +extern unsigned int nfsd_drc_mem_used; extern struct seq_operations nfs_exports_op; diff --git a/include/linux/nfsd/state.h b/include/linux/nfsd/state.h index 57ab2ed0845..a6c87d62389 100644 --- a/include/linux/nfsd/state.h +++ b/include/linux/nfsd/state.h @@ -96,6 +96,7 @@ struct nfs4_cb_conn { #define NFSD_MAX_SLOTS_PER_SESSION 128 /* Maximum number of pages per slot cache entry */ #define NFSD_PAGES_PER_SLOT 1 +#define NFSD_SLOT_CACHE_SIZE PAGE_SIZE /* Maximum number of operations per session compound */ #define NFSD_MAX_OPS_PER_COMPOUND 16 -- cgit v1.2.3-70-g09d2 From 49557cc74c7bdf6a984be227ead9a84b3a26f053 Mon Sep 17 00:00:00 2001 From: Andy Adamson Date: Thu, 23 Jul 2009 19:02:16 -0400 Subject: nfsd41: Use separate DRC for setclientid Instead of trying to share the generic 4.1 reply cache code for the CREATE_SESSION reply cache, it's simpler to handle CREATE_SESSION separately. The nfs41 single slot clientid DRC holds the results of create session processing. CREATE_SESSION can be preceeded by a SEQUENCE operation (an embedded CREATE_SESSION) and the create session single slot cache must be maintained. nfsd4_replay_cache_entry() and nfsd4_store_cache_entry() do not implement the replay of an embedded CREATE_SESSION. The clientid DRC slot does not need the inuse, cachethis or other fields that the multiple slot session cache uses. Replace the clientid DRC cache struct nfs4_slot cache with a new nfsd4_clid_slot cache. Save the xdr struct nfsd4_create_session into the cache at the end of processing, and on a replay, replace the struct for the replay request with the cached version all while under the state lock. nfsd4_proc_compound will handle both the solo and embedded CREATE_SESSION case via the normal use of encode_operation. Errors that do not change the create session cache: A create session NFS4ERR_STALE_CLIENTID error means that a client record (and associated create session slot) could not be found and therefore can't be changed. NFSERR_SEQ_MISORDERED errors do not change the slot cache. All other errors get cached. Remove the clientid DRC specific check in nfs4svc_encode_compoundres to put the session only if cstate.session is set which will now always be true. Signed-off-by: Andy Adamson Signed-off-by: J. Bruce Fields --- fs/nfsd/nfs4proc.c | 2 +- fs/nfsd/nfs4state.c | 64 +++++++++++++++++++++++++++------------------- fs/nfsd/nfs4xdr.c | 3 +-- include/linux/nfsd/state.h | 21 ++++++++++++++- include/linux/nfsd/xdr4.h | 12 --------- 5 files changed, 60 insertions(+), 42 deletions(-) (limited to 'include') diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c index d781658e808..d606c6a427d 100644 --- a/fs/nfsd/nfs4proc.c +++ b/fs/nfsd/nfs4proc.c @@ -1120,7 +1120,7 @@ nfsd4_proc_compound(struct svc_rqst *rqstp, BUG_ON(op->status == nfs_ok); encode_op: - /* Only from SEQUENCE or CREATE_SESSION */ + /* Only from SEQUENCE */ if (resp->cstate.status == nfserr_replay_cache) { dprintk("%s NFS4.1 replay from cache\n", __func__); if (nfsd4_not_cached(resp)) diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c index 99df8e7a687..7729d092c8a 100644 --- a/fs/nfsd/nfs4state.c +++ b/fs/nfsd/nfs4state.c @@ -653,8 +653,6 @@ static inline void free_client(struct nfs4_client *clp) { shutdown_callback_client(clp); - nfsd4_release_respages(clp->cl_slot.sl_cache_entry.ce_respages, - clp->cl_slot.sl_cache_entry.ce_resused); if (clp->cl_cred.cr_group_info) put_group_info(clp->cl_cred.cr_group_info); kfree(clp->cl_principal); @@ -1293,12 +1291,11 @@ out_copy: exid->clientid.cl_boot = new->cl_clientid.cl_boot; exid->clientid.cl_id = new->cl_clientid.cl_id; - new->cl_slot.sl_seqid = 0; exid->seqid = 1; nfsd4_set_ex_flags(new, exid); dprintk("nfsd4_exchange_id seqid %d flags %x\n", - new->cl_slot.sl_seqid, new->cl_exchange_flags); + new->cl_cs_slot.sl_seqid, new->cl_exchange_flags); status = nfs_ok; out: @@ -1334,15 +1331,35 @@ check_slot_seqid(u32 seqid, u32 slot_seqid, int slot_inuse) return nfserr_seq_misordered; } +/* + * Cache the create session result into the create session single DRC + * slot cache by saving the xdr structure. sl_seqid has been set. + * Do this for solo or embedded create session operations. + */ +static void +nfsd4_cache_create_session(struct nfsd4_create_session *cr_ses, + struct nfsd4_clid_slot *slot, int nfserr) +{ + slot->sl_status = nfserr; + memcpy(&slot->sl_cr_ses, cr_ses, sizeof(*cr_ses)); +} + +static __be32 +nfsd4_replay_create_session(struct nfsd4_create_session *cr_ses, + struct nfsd4_clid_slot *slot) +{ + memcpy(cr_ses, &slot->sl_cr_ses, sizeof(*cr_ses)); + return slot->sl_status; +} + __be32 nfsd4_create_session(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, struct nfsd4_create_session *cr_ses) { u32 ip_addr = svc_addr_in(rqstp)->sin_addr.s_addr; - struct nfsd4_compoundres *resp = rqstp->rq_resp; struct nfs4_client *conf, *unconf; - struct nfsd4_slot *slot = NULL; + struct nfsd4_clid_slot *cs_slot = NULL; int status = 0; nfs4_lock_state(); @@ -1350,25 +1367,22 @@ nfsd4_create_session(struct svc_rqst *rqstp, conf = find_confirmed_client(&cr_ses->clientid); if (conf) { - slot = &conf->cl_slot; - status = check_slot_seqid(cr_ses->seqid, slot->sl_seqid, - slot->sl_inuse); + cs_slot = &conf->cl_cs_slot; + status = check_slot_seqid(cr_ses->seqid, cs_slot->sl_seqid, 0); if (status == nfserr_replay_cache) { dprintk("Got a create_session replay! seqid= %d\n", - slot->sl_seqid); - cstate->slot = slot; - cstate->status = status; + cs_slot->sl_seqid); /* Return the cached reply status */ - status = nfsd4_replay_cache_entry(resp, NULL); + status = nfsd4_replay_create_session(cr_ses, cs_slot); goto out; - } else if (cr_ses->seqid != conf->cl_slot.sl_seqid + 1) { + } else if (cr_ses->seqid != cs_slot->sl_seqid + 1) { status = nfserr_seq_misordered; dprintk("Sequence misordered!\n"); dprintk("Expected seqid= %d but got seqid= %d\n", - slot->sl_seqid, cr_ses->seqid); + cs_slot->sl_seqid, cr_ses->seqid); goto out; } - conf->cl_slot.sl_seqid++; + cs_slot->sl_seqid++; } else if (unconf) { if (!same_creds(&unconf->cl_cred, &rqstp->rq_cred) || (ip_addr != unconf->cl_addr)) { @@ -1376,16 +1390,15 @@ nfsd4_create_session(struct svc_rqst *rqstp, goto out; } - slot = &unconf->cl_slot; - status = check_slot_seqid(cr_ses->seqid, slot->sl_seqid, - slot->sl_inuse); + cs_slot = &unconf->cl_cs_slot; + status = check_slot_seqid(cr_ses->seqid, cs_slot->sl_seqid, 0); if (status) { /* an unconfirmed replay returns misordered */ status = nfserr_seq_misordered; - goto out; + goto out_cache; } - slot->sl_seqid++; /* from 0 to 1 */ + cs_slot->sl_seqid++; /* from 0 to 1 */ move_to_confirmed(unconf); /* @@ -1406,12 +1419,11 @@ nfsd4_create_session(struct svc_rqst *rqstp, memcpy(cr_ses->sessionid.data, conf->cl_sessionid.data, NFS4_MAX_SESSIONID_LEN); - cr_ses->seqid = slot->sl_seqid; + cr_ses->seqid = cs_slot->sl_seqid; - slot->sl_inuse = true; - cstate->slot = slot; - /* Ensure a page is used for the cache */ - slot->sl_cache_entry.ce_cachethis = 1; +out_cache: + /* cache solo and embedded create sessions under the state lock */ + nfsd4_cache_create_session(cr_ses, cs_slot, status); out: nfs4_unlock_state(); dprintk("%s returns %d\n", __func__, ntohl(status)); diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c index 2dcc7feaa6f..fdf632bf1cf 100644 --- a/fs/nfsd/nfs4xdr.c +++ b/fs/nfsd/nfs4xdr.c @@ -3313,8 +3313,7 @@ nfs4svc_encode_compoundres(struct svc_rqst *rqstp, __be32 *p, struct nfsd4_compo dprintk("%s: SET SLOT STATE TO AVAILABLE\n", __func__); resp->cstate.slot->sl_inuse = 0; } - if (resp->cstate.session) - nfsd4_put_session(resp->cstate.session); + nfsd4_put_session(resp->cstate.session); } return 1; } diff --git a/include/linux/nfsd/state.h b/include/linux/nfsd/state.h index a6c87d62389..58bb19784e1 100644 --- a/include/linux/nfsd/state.h +++ b/include/linux/nfsd/state.h @@ -127,6 +127,25 @@ struct nfsd4_channel_attrs { u32 rdma_attrs; }; +struct nfsd4_create_session { + clientid_t clientid; + struct nfs4_sessionid sessionid; + u32 seqid; + u32 flags; + struct nfsd4_channel_attrs fore_channel; + struct nfsd4_channel_attrs back_channel; + u32 callback_prog; + u32 uid; + u32 gid; +}; + +/* The single slot clientid cache structure */ +struct nfsd4_clid_slot { + u32 sl_seqid; + __be32 sl_status; + struct nfsd4_create_session sl_cr_ses; +}; + struct nfsd4_session { struct kref se_ref; struct list_head se_hash; /* hash by sessionid */ @@ -193,7 +212,7 @@ struct nfs4_client { /* for nfs41 */ struct list_head cl_sessions; - struct nfsd4_slot cl_slot; /* create_session slot */ + struct nfsd4_clid_slot cl_cs_slot; /* create_session slot */ u32 cl_exchange_flags; struct nfs4_sessionid cl_sessionid; }; diff --git a/include/linux/nfsd/xdr4.h b/include/linux/nfsd/xdr4.h index 2bacf753506..5e4beb0deb8 100644 --- a/include/linux/nfsd/xdr4.h +++ b/include/linux/nfsd/xdr4.h @@ -366,18 +366,6 @@ struct nfsd4_exchange_id { int spa_how; }; -struct nfsd4_create_session { - clientid_t clientid; - struct nfs4_sessionid sessionid; - u32 seqid; - u32 flags; - struct nfsd4_channel_attrs fore_channel; - struct nfsd4_channel_attrs back_channel; - u32 callback_prog; - u32 uid; - u32 gid; -}; - struct nfsd4_sequence { struct nfs4_sessionid sessionid; /* request/response */ u32 seqid; /* request/response */ -- cgit v1.2.3-70-g09d2 From e5f5ccb646bc6009572b5c23201b5e81638ff150 Mon Sep 17 00:00:00 2001 From: Daniel Mack Date: Thu, 23 Jul 2009 20:35:53 +0200 Subject: power_supply: get_by_name and set_charged functionality This adds a function that indicates that a battery is fully charged. It also includes functions to get a power_supply device from the class of registered devices by name reference. These can be used to find a specific battery to call power_supply_set_battery_charged() on. Some battery drivers might need this information to calibrate themselves. Signed-off-by: Daniel Mack Cc: Ian Molton Cc: Anton Vorontsov Cc: Matt Reimer Signed-off-by: Anton Vorontsov --- drivers/power/power_supply_core.c | 28 ++++++++++++++++++++++++++++ include/linux/power_supply.h | 3 +++ 2 files changed, 31 insertions(+) (limited to 'include') diff --git a/drivers/power/power_supply_core.c b/drivers/power/power_supply_core.c index 12cd6e36ff1..cce75b40b43 100644 --- a/drivers/power/power_supply_core.c +++ b/drivers/power/power_supply_core.c @@ -116,6 +116,34 @@ int power_supply_is_system_supplied(void) } EXPORT_SYMBOL_GPL(power_supply_is_system_supplied); +int power_supply_set_battery_charged(struct power_supply *psy) +{ + if (psy->type == POWER_SUPPLY_TYPE_BATTERY && psy->set_charged) { + psy->set_charged(psy); + return 0; + } + + return -EINVAL; +} +EXPORT_SYMBOL_GPL(power_supply_set_battery_charged); + +static int power_supply_match_device_by_name(struct device *dev, void *data) +{ + const char *name = data; + struct power_supply *psy = dev_get_drvdata(dev); + + return strcmp(psy->name, name) == 0; +} + +struct power_supply *power_supply_get_by_name(char *name) +{ + struct device *dev = class_find_device(power_supply_class, NULL, name, + power_supply_match_device_by_name); + + return dev ? dev_get_drvdata(dev) : NULL; +} +EXPORT_SYMBOL_GPL(power_supply_get_by_name); + int power_supply_register(struct device *parent, struct power_supply *psy) { int rc = 0; diff --git a/include/linux/power_supply.h b/include/linux/power_supply.h index 4c7c6fc3548..b5d096d3a9b 100644 --- a/include/linux/power_supply.h +++ b/include/linux/power_supply.h @@ -144,6 +144,7 @@ struct power_supply { enum power_supply_property psp, union power_supply_propval *val); void (*external_power_changed)(struct power_supply *psy); + void (*set_charged)(struct power_supply *psy); /* For APM emulation, think legacy userspace. */ int use_for_apm; @@ -183,8 +184,10 @@ struct power_supply_info { int use_for_apm; }; +extern struct power_supply *power_supply_get_by_name(char *name); extern void power_supply_changed(struct power_supply *psy); extern int power_supply_am_i_supplied(struct power_supply *psy); +extern int power_supply_set_battery_charged(struct power_supply *psy); #if defined(CONFIG_POWER_SUPPLY) || defined(CONFIG_POWER_SUPPLY_MODULE) extern int power_supply_is_system_supplied(void); -- cgit v1.2.3-70-g09d2 From 42c4ab41a176ee784c0f28c0b29025a8fc34f05a Mon Sep 17 00:00:00 2001 From: Stanislaw Gruszka Date: Wed, 29 Jul 2009 12:15:26 +0200 Subject: itimers: Merge ITIMER_VIRT and ITIMER_PROF Both cpu itimers have same data flow in the few places, this patch make unification of code related with VIRT and PROF itimers. Signed-off-by: Stanislaw Gruszka Acked-by: Peter Zijlstra Acked-by: Thomas Gleixner Cc: Oleg Nesterov Cc: Andrew Morton Cc: Paul Mackerras Cc: Benjamin Herrenschmidt LKML-Reference: <1248862529-6063-2-git-send-email-sgruszka@redhat.com> Signed-off-by: Ingo Molnar --- include/linux/sched.h | 14 ++++- kernel/fork.c | 9 +-- kernel/itimer.c | 146 +++++++++++++++++++++------------------------- kernel/posix-cpu-timers.c | 98 +++++++++++++++---------------- 4 files changed, 130 insertions(+), 137 deletions(-) (limited to 'include') diff --git a/include/linux/sched.h b/include/linux/sched.h index 3ab08e4bb6b..3b3efaddd95 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -470,6 +470,11 @@ struct pacct_struct { unsigned long ac_minflt, ac_majflt; }; +struct cpu_itimer { + cputime_t expires; + cputime_t incr; +}; + /** * struct task_cputime - collected CPU time counts * @utime: time spent in user mode, in &cputime_t units @@ -564,9 +569,12 @@ struct signal_struct { struct pid *leader_pid; ktime_t it_real_incr; - /* ITIMER_PROF and ITIMER_VIRTUAL timers for the process */ - cputime_t it_prof_expires, it_virt_expires; - cputime_t it_prof_incr, it_virt_incr; + /* + * ITIMER_PROF and ITIMER_VIRTUAL timers for the process, we use + * CPUCLOCK_PROF and CPUCLOCK_VIRT for indexing array as these + * values are defined to 0 and 1 respectively + */ + struct cpu_itimer it[2]; /* * Thread group totals for process CPU timers. diff --git a/kernel/fork.c b/kernel/fork.c index 29b532e718f..893ab0bf5e3 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -62,6 +62,7 @@ #include #include #include +#include #include #include @@ -790,10 +791,10 @@ static void posix_cpu_timers_init_group(struct signal_struct *sig) thread_group_cputime_init(sig); /* Expiration times and increments. */ - sig->it_virt_expires = cputime_zero; - sig->it_virt_incr = cputime_zero; - sig->it_prof_expires = cputime_zero; - sig->it_prof_incr = cputime_zero; + sig->it[CPUCLOCK_PROF].expires = cputime_zero; + sig->it[CPUCLOCK_PROF].incr = cputime_zero; + sig->it[CPUCLOCK_VIRT].expires = cputime_zero; + sig->it[CPUCLOCK_VIRT].incr = cputime_zero; /* Cached expiration times. */ sig->cputime_expires.prof_exp = cputime_zero; diff --git a/kernel/itimer.c b/kernel/itimer.c index 58762f7077e..852c88ddd1f 100644 --- a/kernel/itimer.c +++ b/kernel/itimer.c @@ -41,10 +41,43 @@ static struct timeval itimer_get_remtime(struct hrtimer *timer) return ktime_to_timeval(rem); } +static void get_cpu_itimer(struct task_struct *tsk, unsigned int clock_id, + struct itimerval *value) +{ + cputime_t cval, cinterval; + struct cpu_itimer *it = &tsk->signal->it[clock_id]; + + spin_lock_irq(&tsk->sighand->siglock); + + cval = it->expires; + cinterval = it->incr; + if (!cputime_eq(cval, cputime_zero)) { + struct task_cputime cputime; + cputime_t t; + + thread_group_cputimer(tsk, &cputime); + if (clock_id == CPUCLOCK_PROF) + t = cputime_add(cputime.utime, cputime.stime); + else + /* CPUCLOCK_VIRT */ + t = cputime.utime; + + if (cputime_le(cval, t)) + /* about to fire */ + cval = jiffies_to_cputime(1); + else + cval = cputime_sub(cval, t); + } + + spin_unlock_irq(&tsk->sighand->siglock); + + cputime_to_timeval(cval, &value->it_value); + cputime_to_timeval(cinterval, &value->it_interval); +} + int do_getitimer(int which, struct itimerval *value) { struct task_struct *tsk = current; - cputime_t cinterval, cval; switch (which) { case ITIMER_REAL: @@ -55,44 +88,10 @@ int do_getitimer(int which, struct itimerval *value) spin_unlock_irq(&tsk->sighand->siglock); break; case ITIMER_VIRTUAL: - spin_lock_irq(&tsk->sighand->siglock); - cval = tsk->signal->it_virt_expires; - cinterval = tsk->signal->it_virt_incr; - if (!cputime_eq(cval, cputime_zero)) { - struct task_cputime cputime; - cputime_t utime; - - thread_group_cputimer(tsk, &cputime); - utime = cputime.utime; - if (cputime_le(cval, utime)) { /* about to fire */ - cval = jiffies_to_cputime(1); - } else { - cval = cputime_sub(cval, utime); - } - } - spin_unlock_irq(&tsk->sighand->siglock); - cputime_to_timeval(cval, &value->it_value); - cputime_to_timeval(cinterval, &value->it_interval); + get_cpu_itimer(tsk, CPUCLOCK_VIRT, value); break; case ITIMER_PROF: - spin_lock_irq(&tsk->sighand->siglock); - cval = tsk->signal->it_prof_expires; - cinterval = tsk->signal->it_prof_incr; - if (!cputime_eq(cval, cputime_zero)) { - struct task_cputime times; - cputime_t ptime; - - thread_group_cputimer(tsk, ×); - ptime = cputime_add(times.utime, times.stime); - if (cputime_le(cval, ptime)) { /* about to fire */ - cval = jiffies_to_cputime(1); - } else { - cval = cputime_sub(cval, ptime); - } - } - spin_unlock_irq(&tsk->sighand->siglock); - cputime_to_timeval(cval, &value->it_value); - cputime_to_timeval(cinterval, &value->it_interval); + get_cpu_itimer(tsk, CPUCLOCK_PROF, value); break; default: return(-EINVAL); @@ -128,6 +127,36 @@ enum hrtimer_restart it_real_fn(struct hrtimer *timer) return HRTIMER_NORESTART; } +static void set_cpu_itimer(struct task_struct *tsk, unsigned int clock_id, + struct itimerval *value, struct itimerval *ovalue) +{ + cputime_t cval, cinterval, nval, ninterval; + struct cpu_itimer *it = &tsk->signal->it[clock_id]; + + nval = timeval_to_cputime(&value->it_value); + ninterval = timeval_to_cputime(&value->it_interval); + + spin_lock_irq(&tsk->sighand->siglock); + + cval = it->expires; + cinterval = it->incr; + if (!cputime_eq(cval, cputime_zero) || + !cputime_eq(nval, cputime_zero)) { + if (cputime_gt(nval, cputime_zero)) + nval = cputime_add(nval, jiffies_to_cputime(1)); + set_process_cpu_timer(tsk, clock_id, &nval, &cval); + } + it->expires = nval; + it->incr = ninterval; + + spin_unlock_irq(&tsk->sighand->siglock); + + if (ovalue) { + cputime_to_timeval(cval, &ovalue->it_value); + cputime_to_timeval(cinterval, &ovalue->it_interval); + } +} + /* * Returns true if the timeval is in canonical form */ @@ -139,7 +168,6 @@ int do_setitimer(int which, struct itimerval *value, struct itimerval *ovalue) struct task_struct *tsk = current; struct hrtimer *timer; ktime_t expires; - cputime_t cval, cinterval, nval, ninterval; /* * Validate the timevals in value. @@ -174,48 +202,10 @@ again: spin_unlock_irq(&tsk->sighand->siglock); break; case ITIMER_VIRTUAL: - nval = timeval_to_cputime(&value->it_value); - ninterval = timeval_to_cputime(&value->it_interval); - spin_lock_irq(&tsk->sighand->siglock); - cval = tsk->signal->it_virt_expires; - cinterval = tsk->signal->it_virt_incr; - if (!cputime_eq(cval, cputime_zero) || - !cputime_eq(nval, cputime_zero)) { - if (cputime_gt(nval, cputime_zero)) - nval = cputime_add(nval, - jiffies_to_cputime(1)); - set_process_cpu_timer(tsk, CPUCLOCK_VIRT, - &nval, &cval); - } - tsk->signal->it_virt_expires = nval; - tsk->signal->it_virt_incr = ninterval; - spin_unlock_irq(&tsk->sighand->siglock); - if (ovalue) { - cputime_to_timeval(cval, &ovalue->it_value); - cputime_to_timeval(cinterval, &ovalue->it_interval); - } + set_cpu_itimer(tsk, CPUCLOCK_VIRT, value, ovalue); break; case ITIMER_PROF: - nval = timeval_to_cputime(&value->it_value); - ninterval = timeval_to_cputime(&value->it_interval); - spin_lock_irq(&tsk->sighand->siglock); - cval = tsk->signal->it_prof_expires; - cinterval = tsk->signal->it_prof_incr; - if (!cputime_eq(cval, cputime_zero) || - !cputime_eq(nval, cputime_zero)) { - if (cputime_gt(nval, cputime_zero)) - nval = cputime_add(nval, - jiffies_to_cputime(1)); - set_process_cpu_timer(tsk, CPUCLOCK_PROF, - &nval, &cval); - } - tsk->signal->it_prof_expires = nval; - tsk->signal->it_prof_incr = ninterval; - spin_unlock_irq(&tsk->sighand->siglock); - if (ovalue) { - cputime_to_timeval(cval, &ovalue->it_value); - cputime_to_timeval(cinterval, &ovalue->it_interval); - } + set_cpu_itimer(tsk, CPUCLOCK_PROF, value, ovalue); break; default: return -EINVAL; diff --git a/kernel/posix-cpu-timers.c b/kernel/posix-cpu-timers.c index bece7c0b67b..9b2d5e4dc8c 100644 --- a/kernel/posix-cpu-timers.c +++ b/kernel/posix-cpu-timers.c @@ -14,11 +14,11 @@ */ void update_rlimit_cpu(unsigned long rlim_new) { - cputime_t cputime; + cputime_t cputime = secs_to_cputime(rlim_new); + struct signal_struct *const sig = current->signal; - cputime = secs_to_cputime(rlim_new); - if (cputime_eq(current->signal->it_prof_expires, cputime_zero) || - cputime_gt(current->signal->it_prof_expires, cputime)) { + if (cputime_eq(sig->it[CPUCLOCK_PROF].expires, cputime_zero) || + cputime_gt(sig->it[CPUCLOCK_PROF].expires, cputime)) { spin_lock_irq(¤t->sighand->siglock); set_process_cpu_timer(current, CPUCLOCK_PROF, &cputime, NULL); spin_unlock_irq(¤t->sighand->siglock); @@ -613,6 +613,9 @@ static void arm_timer(struct k_itimer *timer, union cpu_time_count now) break; } } else { + struct signal_struct *const sig = p->signal; + union cpu_time_count *exp = &timer->it.cpu.expires; + /* * For a process timer, set the cached expiration time. */ @@ -620,30 +623,27 @@ static void arm_timer(struct k_itimer *timer, union cpu_time_count now) default: BUG(); case CPUCLOCK_VIRT: - if (!cputime_eq(p->signal->it_virt_expires, + if (!cputime_eq(sig->it[CPUCLOCK_VIRT].expires, cputime_zero) && - cputime_lt(p->signal->it_virt_expires, - timer->it.cpu.expires.cpu)) + cputime_lt(sig->it[CPUCLOCK_VIRT].expires, + exp->cpu)) break; - p->signal->cputime_expires.virt_exp = - timer->it.cpu.expires.cpu; + sig->cputime_expires.virt_exp = exp->cpu; break; case CPUCLOCK_PROF: - if (!cputime_eq(p->signal->it_prof_expires, + if (!cputime_eq(sig->it[CPUCLOCK_PROF].expires, cputime_zero) && - cputime_lt(p->signal->it_prof_expires, - timer->it.cpu.expires.cpu)) + cputime_lt(sig->it[CPUCLOCK_PROF].expires, + exp->cpu)) break; - i = p->signal->rlim[RLIMIT_CPU].rlim_cur; + i = sig->rlim[RLIMIT_CPU].rlim_cur; if (i != RLIM_INFINITY && - i <= cputime_to_secs(timer->it.cpu.expires.cpu)) + i <= cputime_to_secs(exp->cpu)) break; - p->signal->cputime_expires.prof_exp = - timer->it.cpu.expires.cpu; + sig->cputime_expires.prof_exp = exp->cpu; break; case CPUCLOCK_SCHED: - p->signal->cputime_expires.sched_exp = - timer->it.cpu.expires.sched; + sig->cputime_expires.sched_exp = exp->sched; break; } } @@ -1070,6 +1070,27 @@ static void stop_process_timers(struct task_struct *tsk) spin_unlock_irqrestore(&cputimer->lock, flags); } +static void check_cpu_itimer(struct task_struct *tsk, struct cpu_itimer *it, + cputime_t *expires, cputime_t cur_time, int signo) +{ + if (cputime_eq(it->expires, cputime_zero)) + return; + + if (cputime_ge(cur_time, it->expires)) { + it->expires = it->incr; + if (!cputime_eq(it->expires, cputime_zero)) + it->expires = cputime_add(it->expires, cur_time); + + __group_send_sig_info(signo, SEND_SIG_PRIV, tsk); + } + + if (!cputime_eq(it->expires, cputime_zero) && + (cputime_eq(*expires, cputime_zero) || + cputime_lt(it->expires, *expires))) { + *expires = it->expires; + } +} + /* * Check for any per-thread CPU timers that have fired and move them * off the tsk->*_timers list onto the firing list. Per-thread timers @@ -1089,10 +1110,10 @@ static void check_process_timers(struct task_struct *tsk, * Don't sample the current process CPU clocks if there are no timers. */ if (list_empty(&timers[CPUCLOCK_PROF]) && - cputime_eq(sig->it_prof_expires, cputime_zero) && + cputime_eq(sig->it[CPUCLOCK_PROF].expires, cputime_zero) && sig->rlim[RLIMIT_CPU].rlim_cur == RLIM_INFINITY && list_empty(&timers[CPUCLOCK_VIRT]) && - cputime_eq(sig->it_virt_expires, cputime_zero) && + cputime_eq(sig->it[CPUCLOCK_VIRT].expires, cputime_zero) && list_empty(&timers[CPUCLOCK_SCHED])) { stop_process_timers(tsk); return; @@ -1152,38 +1173,11 @@ static void check_process_timers(struct task_struct *tsk, /* * Check for the special case process timers. */ - if (!cputime_eq(sig->it_prof_expires, cputime_zero)) { - if (cputime_ge(ptime, sig->it_prof_expires)) { - /* ITIMER_PROF fires and reloads. */ - sig->it_prof_expires = sig->it_prof_incr; - if (!cputime_eq(sig->it_prof_expires, cputime_zero)) { - sig->it_prof_expires = cputime_add( - sig->it_prof_expires, ptime); - } - __group_send_sig_info(SIGPROF, SEND_SIG_PRIV, tsk); - } - if (!cputime_eq(sig->it_prof_expires, cputime_zero) && - (cputime_eq(prof_expires, cputime_zero) || - cputime_lt(sig->it_prof_expires, prof_expires))) { - prof_expires = sig->it_prof_expires; - } - } - if (!cputime_eq(sig->it_virt_expires, cputime_zero)) { - if (cputime_ge(utime, sig->it_virt_expires)) { - /* ITIMER_VIRTUAL fires and reloads. */ - sig->it_virt_expires = sig->it_virt_incr; - if (!cputime_eq(sig->it_virt_expires, cputime_zero)) { - sig->it_virt_expires = cputime_add( - sig->it_virt_expires, utime); - } - __group_send_sig_info(SIGVTALRM, SEND_SIG_PRIV, tsk); - } - if (!cputime_eq(sig->it_virt_expires, cputime_zero) && - (cputime_eq(virt_expires, cputime_zero) || - cputime_lt(sig->it_virt_expires, virt_expires))) { - virt_expires = sig->it_virt_expires; - } - } + check_cpu_itimer(tsk, &sig->it[CPUCLOCK_PROF], &prof_expires, ptime, + SIGPROF); + check_cpu_itimer(tsk, &sig->it[CPUCLOCK_VIRT], &virt_expires, utime, + SIGVTALRM); + if (sig->rlim[RLIMIT_CPU].rlim_cur != RLIM_INFINITY) { unsigned long psecs = cputime_to_secs(ptime); cputime_t x; -- cgit v1.2.3-70-g09d2 From 8356b5f9c424e5831715abbce747197c30d1fd71 Mon Sep 17 00:00:00 2001 From: Stanislaw Gruszka Date: Wed, 29 Jul 2009 12:15:27 +0200 Subject: itimers: Fix periodic tics precision Measure ITIMER_PROF and ITIMER_VIRT timers interval error between real ticks and requested by user. Take it into account when scheduling next tick. This patch introduce possibility where time between two consecutive tics is smaller then requested interval, it preserve however dependency that n tick is generated not earlier than n*interval time - counting from the beginning of periodic signal generation. Signed-off-by: Stanislaw Gruszka Acked-by: Peter Zijlstra Acked-by: Thomas Gleixner Cc: Oleg Nesterov Cc: Andrew Morton Cc: Paul Mackerras Cc: Benjamin Herrenschmidt LKML-Reference: <1248862529-6063-3-git-send-email-sgruszka@redhat.com> Signed-off-by: Ingo Molnar --- include/linux/sched.h | 2 ++ kernel/itimer.c | 24 +++++++++++++++++++++--- kernel/posix-cpu-timers.c | 20 +++++++++++++++++--- 3 files changed, 40 insertions(+), 6 deletions(-) (limited to 'include') diff --git a/include/linux/sched.h b/include/linux/sched.h index 3b3efaddd95..a069e65e8bb 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -473,6 +473,8 @@ struct pacct_struct { struct cpu_itimer { cputime_t expires; cputime_t incr; + u32 error; + u32 incr_error; }; /** diff --git a/kernel/itimer.c b/kernel/itimer.c index 852c88ddd1f..21adff7b2a1 100644 --- a/kernel/itimer.c +++ b/kernel/itimer.c @@ -42,7 +42,7 @@ static struct timeval itimer_get_remtime(struct hrtimer *timer) } static void get_cpu_itimer(struct task_struct *tsk, unsigned int clock_id, - struct itimerval *value) + struct itimerval *const value) { cputime_t cval, cinterval; struct cpu_itimer *it = &tsk->signal->it[clock_id]; @@ -127,14 +127,32 @@ enum hrtimer_restart it_real_fn(struct hrtimer *timer) return HRTIMER_NORESTART; } +static inline u32 cputime_sub_ns(cputime_t ct, s64 real_ns) +{ + struct timespec ts; + s64 cpu_ns; + + cputime_to_timespec(ct, &ts); + cpu_ns = timespec_to_ns(&ts); + + return (cpu_ns <= real_ns) ? 0 : cpu_ns - real_ns; +} + static void set_cpu_itimer(struct task_struct *tsk, unsigned int clock_id, - struct itimerval *value, struct itimerval *ovalue) + const struct itimerval *const value, + struct itimerval *const ovalue) { - cputime_t cval, cinterval, nval, ninterval; + cputime_t cval, nval, cinterval, ninterval; + s64 ns_ninterval, ns_nval; struct cpu_itimer *it = &tsk->signal->it[clock_id]; nval = timeval_to_cputime(&value->it_value); + ns_nval = timeval_to_ns(&value->it_value); ninterval = timeval_to_cputime(&value->it_interval); + ns_ninterval = timeval_to_ns(&value->it_interval); + + it->incr_error = cputime_sub_ns(ninterval, ns_ninterval); + it->error = cputime_sub_ns(nval, ns_nval); spin_lock_irq(&tsk->sighand->siglock); diff --git a/kernel/posix-cpu-timers.c b/kernel/posix-cpu-timers.c index 9b2d5e4dc8c..b60d644ea4b 100644 --- a/kernel/posix-cpu-timers.c +++ b/kernel/posix-cpu-timers.c @@ -1070,6 +1070,8 @@ static void stop_process_timers(struct task_struct *tsk) spin_unlock_irqrestore(&cputimer->lock, flags); } +static u32 onecputick; + static void check_cpu_itimer(struct task_struct *tsk, struct cpu_itimer *it, cputime_t *expires, cputime_t cur_time, int signo) { @@ -1077,9 +1079,16 @@ static void check_cpu_itimer(struct task_struct *tsk, struct cpu_itimer *it, return; if (cputime_ge(cur_time, it->expires)) { - it->expires = it->incr; - if (!cputime_eq(it->expires, cputime_zero)) - it->expires = cputime_add(it->expires, cur_time); + if (!cputime_eq(it->incr, cputime_zero)) { + it->expires = cputime_add(it->expires, it->incr); + it->error += it->incr_error; + if (it->error >= onecputick) { + it->expires = cputime_sub(it->expires, + jiffies_to_cputime(1)); + it->error -= onecputick; + } + } else + it->expires = cputime_zero; __group_send_sig_info(signo, SEND_SIG_PRIV, tsk); } @@ -1696,10 +1705,15 @@ static __init int init_posix_cpu_timers(void) .nsleep = thread_cpu_nsleep, .nsleep_restart = thread_cpu_nsleep_restart, }; + struct timespec ts; register_posix_clock(CLOCK_PROCESS_CPUTIME_ID, &process); register_posix_clock(CLOCK_THREAD_CPUTIME_ID, &thread); + cputime_to_timespec(jiffies_to_cputime(1), &ts); + onecputick = ts.tv_nsec; + WARN_ON(ts.tv_sec != 0); + return 0; } __initcall(init_posix_cpu_timers); -- cgit v1.2.3-70-g09d2 From a42548a18866e87092db93b771e6c5b060d78401 Mon Sep 17 00:00:00 2001 From: Stanislaw Gruszka Date: Wed, 29 Jul 2009 12:15:29 +0200 Subject: cputime: Optimize jiffies_to_cputime(1) For powerpc with CONFIG_VIRT_CPU_ACCOUNTING jiffies_to_cputime(1) is not compile time constant and run time calculations are quite expensive. To optimize we use precomputed value. For all other architectures is is preprocessor definition. Signed-off-by: Stanislaw Gruszka Acked-by: Peter Zijlstra Acked-by: Thomas Gleixner Cc: Oleg Nesterov Cc: Andrew Morton Cc: Paul Mackerras Cc: Benjamin Herrenschmidt LKML-Reference: <1248862529-6063-5-git-send-email-sgruszka@redhat.com> Signed-off-by: Ingo Molnar --- arch/ia64/include/asm/cputime.h | 1 + arch/powerpc/include/asm/cputime.h | 13 +++++++++++++ arch/powerpc/kernel/time.c | 4 ++++ arch/s390/include/asm/cputime.h | 1 + include/asm-generic/cputime.h | 1 + kernel/itimer.c | 4 ++-- kernel/posix-cpu-timers.c | 6 +++--- kernel/sched.c | 9 ++++----- 8 files changed, 29 insertions(+), 10 deletions(-) (limited to 'include') diff --git a/arch/ia64/include/asm/cputime.h b/arch/ia64/include/asm/cputime.h index d20b998cb91..7fa8a859466 100644 --- a/arch/ia64/include/asm/cputime.h +++ b/arch/ia64/include/asm/cputime.h @@ -30,6 +30,7 @@ typedef u64 cputime_t; typedef u64 cputime64_t; #define cputime_zero ((cputime_t)0) +#define cputime_one_jiffy jiffies_to_cputime(1) #define cputime_max ((~((cputime_t)0) >> 1) - 1) #define cputime_add(__a, __b) ((__a) + (__b)) #define cputime_sub(__a, __b) ((__a) - (__b)) diff --git a/arch/powerpc/include/asm/cputime.h b/arch/powerpc/include/asm/cputime.h index f42e623030e..fa19f3fe05f 100644 --- a/arch/powerpc/include/asm/cputime.h +++ b/arch/powerpc/include/asm/cputime.h @@ -18,6 +18,9 @@ #ifndef CONFIG_VIRT_CPU_ACCOUNTING #include +#ifdef __KERNEL__ +static inline void setup_cputime_one_jiffy(void) { } +#endif #else #include @@ -48,6 +51,11 @@ typedef u64 cputime64_t; #ifdef __KERNEL__ +/* + * One jiffy in timebase units computed during initialization + */ +extern cputime_t cputime_one_jiffy; + /* * Convert cputime <-> jiffies */ @@ -89,6 +97,11 @@ static inline cputime_t jiffies_to_cputime(const unsigned long jif) return ct; } +static inline void setup_cputime_one_jiffy(void) +{ + cputime_one_jiffy = jiffies_to_cputime(1); +} + static inline cputime64_t jiffies64_to_cputime64(const u64 jif) { cputime_t ct; diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c index eae4511ceea..211d7b0cd37 100644 --- a/arch/powerpc/kernel/time.c +++ b/arch/powerpc/kernel/time.c @@ -193,6 +193,8 @@ EXPORT_SYMBOL(__cputime_clockt_factor); DEFINE_PER_CPU(unsigned long, cputime_last_delta); DEFINE_PER_CPU(unsigned long, cputime_scaled_last_delta); +cputime_t cputime_one_jiffy; + static void calc_cputime_factors(void) { struct div_result res; @@ -500,6 +502,7 @@ static int __init iSeries_tb_recal(void) tb_to_xs = divres.result_low; vdso_data->tb_ticks_per_sec = tb_ticks_per_sec; vdso_data->tb_to_xs = tb_to_xs; + setup_cputime_one_jiffy(); } else { printk( "Titan recalibrate: FAILED (difference > 4 percent)\n" @@ -945,6 +948,7 @@ void __init time_init(void) tb_ticks_per_usec = ppc_tb_freq / 1000000; tb_to_us = mulhwu_scale_factor(ppc_tb_freq, 1000000); calc_cputime_factors(); + setup_cputime_one_jiffy(); /* * Calculate the length of each tick in ns. It will not be diff --git a/arch/s390/include/asm/cputime.h b/arch/s390/include/asm/cputime.h index 7a3817a656d..24b1244aadb 100644 --- a/arch/s390/include/asm/cputime.h +++ b/arch/s390/include/asm/cputime.h @@ -42,6 +42,7 @@ __div(unsigned long long n, unsigned int base) #endif /* __s390x__ */ #define cputime_zero (0ULL) +#define cputime_one_jiffy jiffies_to_cputime(1) #define cputime_max ((~0UL >> 1) - 1) #define cputime_add(__a, __b) ((__a) + (__b)) #define cputime_sub(__a, __b) ((__a) - (__b)) diff --git a/include/asm-generic/cputime.h b/include/asm-generic/cputime.h index 1c1fa422d18..ca0f239f0e1 100644 --- a/include/asm-generic/cputime.h +++ b/include/asm-generic/cputime.h @@ -7,6 +7,7 @@ typedef unsigned long cputime_t; #define cputime_zero (0UL) +#define cputime_one_jiffy jiffies_to_cputime(1) #define cputime_max ((~0UL >> 1) - 1) #define cputime_add(__a, __b) ((__a) + (__b)) #define cputime_sub(__a, __b) ((__a) - (__b)) diff --git a/kernel/itimer.c b/kernel/itimer.c index 21adff7b2a1..8078a32d3b1 100644 --- a/kernel/itimer.c +++ b/kernel/itimer.c @@ -64,7 +64,7 @@ static void get_cpu_itimer(struct task_struct *tsk, unsigned int clock_id, if (cputime_le(cval, t)) /* about to fire */ - cval = jiffies_to_cputime(1); + cval = cputime_one_jiffy; else cval = cputime_sub(cval, t); } @@ -161,7 +161,7 @@ static void set_cpu_itimer(struct task_struct *tsk, unsigned int clock_id, if (!cputime_eq(cval, cputime_zero) || !cputime_eq(nval, cputime_zero)) { if (cputime_gt(nval, cputime_zero)) - nval = cputime_add(nval, jiffies_to_cputime(1)); + nval = cputime_add(nval, cputime_one_jiffy); set_process_cpu_timer(tsk, clock_id, &nval, &cval); } it->expires = nval; diff --git a/kernel/posix-cpu-timers.c b/kernel/posix-cpu-timers.c index 69c92374355..18bdde6f676 100644 --- a/kernel/posix-cpu-timers.c +++ b/kernel/posix-cpu-timers.c @@ -1086,7 +1086,7 @@ static void check_cpu_itimer(struct task_struct *tsk, struct cpu_itimer *it, it->error += it->incr_error; if (it->error >= onecputick) { it->expires = cputime_sub(it->expires, - jiffies_to_cputime(1)); + cputime_one_jiffy); it->error -= onecputick; } } else @@ -1461,7 +1461,7 @@ void set_process_cpu_timer(struct task_struct *tsk, unsigned int clock_idx, if (!cputime_eq(*oldval, cputime_zero)) { if (cputime_le(*oldval, now.cpu)) { /* Just about to fire. */ - *oldval = jiffies_to_cputime(1); + *oldval = cputime_one_jiffy; } else { *oldval = cputime_sub(*oldval, now.cpu); } @@ -1712,7 +1712,7 @@ static __init int init_posix_cpu_timers(void) register_posix_clock(CLOCK_PROCESS_CPUTIME_ID, &process); register_posix_clock(CLOCK_THREAD_CPUTIME_ID, &thread); - cputime_to_timespec(jiffies_to_cputime(1), &ts); + cputime_to_timespec(cputime_one_jiffy, &ts); onecputick = ts.tv_nsec; WARN_ON(ts.tv_sec != 0); diff --git a/kernel/sched.c b/kernel/sched.c index 1b59e265273..8f977d5cc51 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -5031,17 +5031,16 @@ void account_idle_time(cputime_t cputime) */ void account_process_tick(struct task_struct *p, int user_tick) { - cputime_t one_jiffy = jiffies_to_cputime(1); - cputime_t one_jiffy_scaled = cputime_to_scaled(one_jiffy); + cputime_t one_jiffy_scaled = cputime_to_scaled(cputime_one_jiffy); struct rq *rq = this_rq(); if (user_tick) - account_user_time(p, one_jiffy, one_jiffy_scaled); + account_user_time(p, cputime_one_jiffy, one_jiffy_scaled); else if ((p != rq->idle) || (irq_count() != HARDIRQ_OFFSET)) - account_system_time(p, HARDIRQ_OFFSET, one_jiffy, + account_system_time(p, HARDIRQ_OFFSET, cputime_one_jiffy, one_jiffy_scaled); else - account_idle_time(one_jiffy); + account_idle_time(cputime_one_jiffy); } /* -- cgit v1.2.3-70-g09d2 From 8a4c47f346cc7a12d0897c05eb3cc1add26b487f Mon Sep 17 00:00:00 2001 From: Zhao Yakui Date: Mon, 20 Jul 2009 13:48:04 +0800 Subject: drm: Remove the unused prefix in DRM_DEBUG_KMS/DRIVER/MODE We will have to add a prefix when using the macro defintion of DRM_DEBUG_KMS /DRM_DEBUG_DRIVER/MODE. It is not convenient. We should use the DRM_NAME as default prefix. So remove the prefix in the macro definition of DRM_DEBUG_KMS/DRIVER/MODE. Signed-off-by: Zhao Yakui Acked-by: Ian Romanick Signed-off-by: Dave Airlie --- drivers/gpu/drm/drm_modes.c | 8 +++----- drivers/gpu/drm/i915/i915_dma.c | 35 +++++++++++++++-------------------- drivers/gpu/drm/i915/intel_lvds.c | 10 +++------- drivers/gpu/drm/i915/intel_sdvo.c | 35 ++++++++++++++++------------------- include/drm/drmP.h | 18 +++++++++--------- 5 files changed, 46 insertions(+), 60 deletions(-) (limited to 'include') diff --git a/drivers/gpu/drm/drm_modes.c b/drivers/gpu/drm/drm_modes.c index fd489d76fbb..5eca2d5c5f2 100644 --- a/drivers/gpu/drm/drm_modes.c +++ b/drivers/gpu/drm/drm_modes.c @@ -40,7 +40,6 @@ #include "drm.h" #include "drm_crtc.h" -#define DRM_MODESET_DEBUG "drm_mode" /** * drm_mode_debug_printmodeline - debug print a mode * @dev: DRM device @@ -53,8 +52,8 @@ */ void drm_mode_debug_printmodeline(struct drm_display_mode *mode) { - DRM_DEBUG_MODE(DRM_MODESET_DEBUG, - "Modeline %d:\"%s\" %d %d %d %d %d %d %d %d %d %d 0x%x 0x%x\n", + DRM_DEBUG_MODE("Modeline %d:\"%s\" %d %d %d %d %d %d %d %d %d %d " + "0x%x 0x%x\n", mode->base.id, mode->name, mode->vrefresh, mode->clock, mode->hdisplay, mode->hsync_start, mode->hsync_end, mode->htotal, @@ -819,8 +818,7 @@ void drm_mode_prune_invalid(struct drm_device *dev, list_del(&mode->head); if (verbose) { drm_mode_debug_printmodeline(mode); - DRM_DEBUG_MODE(DRM_MODESET_DEBUG, - "Not using %s mode %d\n", + DRM_DEBUG_MODE("Not using %s mode %d\n", mode->name, mode->status); } drm_mode_destroy(dev, mode); diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c index 8c4783180bf..14625e146f1 100644 --- a/drivers/gpu/drm/i915/i915_dma.c +++ b/drivers/gpu/drm/i915/i915_dma.c @@ -33,8 +33,6 @@ #include "i915_drm.h" #include "i915_drv.h" -#define I915_DRV "i915_drv" - /* Really want an OS-independent resettable timer. Would like to have * this loop run for (eg) 3 sec, but have the timer reset every time * the head pointer changes, so that EBUSY only happens if the ring @@ -101,7 +99,7 @@ static int i915_init_phys_hws(struct drm_device *dev) memset(dev_priv->hw_status_page, 0, PAGE_SIZE); I915_WRITE(HWS_PGA, dev_priv->dma_status_page); - DRM_DEBUG_DRIVER(I915_DRV, "Enabled hardware status page\n"); + DRM_DEBUG_DRIVER("Enabled hardware status page\n"); return 0; } @@ -187,8 +185,7 @@ static int i915_initialize(struct drm_device * dev, drm_i915_init_t * init) master_priv->sarea_priv = (drm_i915_sarea_t *) ((u8 *)master_priv->sarea->handle + init->sarea_priv_offset); } else { - DRM_DEBUG_DRIVER(I915_DRV, - "sarea not found assuming DRI2 userspace\n"); + DRM_DEBUG_DRIVER("sarea not found assuming DRI2 userspace\n"); } if (init->ring_size != 0) { @@ -238,7 +235,7 @@ static int i915_dma_resume(struct drm_device * dev) { drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; - DRM_DEBUG_DRIVER(I915_DRV, "%s\n", __func__); + DRM_DEBUG_DRIVER("%s\n", __func__); if (dev_priv->ring.map.handle == NULL) { DRM_ERROR("can not ioremap virtual address for" @@ -251,14 +248,14 @@ static int i915_dma_resume(struct drm_device * dev) DRM_ERROR("Can not find hardware status page\n"); return -EINVAL; } - DRM_DEBUG_DRIVER(I915_DRV, "hw status page @ %p\n", + DRM_DEBUG_DRIVER("hw status page @ %p\n", dev_priv->hw_status_page); if (dev_priv->status_gfx_addr != 0) I915_WRITE(HWS_PGA, dev_priv->status_gfx_addr); else I915_WRITE(HWS_PGA, dev_priv->dma_status_page); - DRM_DEBUG_DRIVER(I915_DRV, "Enabled hardware status page\n"); + DRM_DEBUG_DRIVER("Enabled hardware status page\n"); return 0; } @@ -552,7 +549,7 @@ static int i915_dispatch_flip(struct drm_device * dev) if (!master_priv->sarea_priv) return -EINVAL; - DRM_DEBUG_DRIVER(I915_DRV, "%s: page=%d pfCurrentPage=%d\n", + DRM_DEBUG_DRIVER("%s: page=%d pfCurrentPage=%d\n", __func__, dev_priv->current_page, master_priv->sarea_priv->pf_current_page); @@ -633,8 +630,7 @@ static int i915_batchbuffer(struct drm_device *dev, void *data, return -EINVAL; } - DRM_DEBUG_DRIVER(I915_DRV, - "i915 batchbuffer, start %x used %d cliprects %d\n", + DRM_DEBUG_DRIVER("i915 batchbuffer, start %x used %d cliprects %d\n", batch->start, batch->used, batch->num_cliprects); RING_LOCK_TEST_WITH_RETURN(dev, file_priv); @@ -681,8 +677,7 @@ static int i915_cmdbuffer(struct drm_device *dev, void *data, void *batch_data; int ret; - DRM_DEBUG_DRIVER(I915_DRV, - "i915 cmdbuffer, buf %p sz %d cliprects %d\n", + DRM_DEBUG_DRIVER("i915 cmdbuffer, buf %p sz %d cliprects %d\n", cmdbuf->buf, cmdbuf->sz, cmdbuf->num_cliprects); RING_LOCK_TEST_WITH_RETURN(dev, file_priv); @@ -735,7 +730,7 @@ static int i915_flip_bufs(struct drm_device *dev, void *data, { int ret; - DRM_DEBUG_DRIVER(I915_DRV, "%s\n", __func__); + DRM_DEBUG_DRIVER("%s\n", __func__); RING_LOCK_TEST_WITH_RETURN(dev, file_priv); @@ -778,7 +773,7 @@ static int i915_getparam(struct drm_device *dev, void *data, value = dev_priv->num_fence_regs - dev_priv->fence_reg_start; break; default: - DRM_DEBUG_DRIVER(I915_DRV, "Unknown parameter %d\n", + DRM_DEBUG_DRIVER("Unknown parameter %d\n", param->param); return -EINVAL; } @@ -819,7 +814,7 @@ static int i915_setparam(struct drm_device *dev, void *data, dev_priv->fence_reg_start = param->value; break; default: - DRM_DEBUG_DRIVER(I915_DRV, "unknown parameter %d\n", + DRM_DEBUG_DRIVER("unknown parameter %d\n", param->param); return -EINVAL; } @@ -846,7 +841,7 @@ static int i915_set_status_page(struct drm_device *dev, void *data, return 0; } - DRM_DEBUG("set status page addr 0x%08x\n", (u32)hws->addr); + DRM_DEBUG_DRIVER("set status page addr 0x%08x\n", (u32)hws->addr); dev_priv->status_gfx_addr = hws->addr & (0x1ffff<<12); @@ -868,9 +863,9 @@ static int i915_set_status_page(struct drm_device *dev, void *data, memset(dev_priv->hw_status_page, 0, PAGE_SIZE); I915_WRITE(HWS_PGA, dev_priv->status_gfx_addr); - DRM_DEBUG_DRIVER(I915_DRV, "load hws HWS_PGA with gfx mem 0x%x\n", + DRM_DEBUG_DRIVER("load hws HWS_PGA with gfx mem 0x%x\n", dev_priv->status_gfx_addr); - DRM_DEBUG_DRIVER(I915_DRV, "load hws at %p\n", + DRM_DEBUG_DRIVER("load hws at %p\n", dev_priv->hw_status_page); return 0; } @@ -1310,7 +1305,7 @@ int i915_driver_open(struct drm_device *dev, struct drm_file *file_priv) { struct drm_i915_file_private *i915_file_priv; - DRM_DEBUG_DRIVER(I915_DRV, "\n"); + DRM_DEBUG_DRIVER("\n"); i915_file_priv = (struct drm_i915_file_private *) kmalloc(sizeof(*i915_file_priv), GFP_KERNEL); diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c index 9ab38efffec..b59c65d19d8 100644 --- a/drivers/gpu/drm/i915/intel_lvds.c +++ b/drivers/gpu/drm/i915/intel_lvds.c @@ -38,8 +38,6 @@ #include "i915_drv.h" #include -#define I915_LVDS "i915_lvds" - /* * the following four scaling options are defined. * #define DRM_MODE_SCALE_NON_GPU 0 @@ -673,8 +671,7 @@ static int intel_lvds_set_property(struct drm_connector *connector, struct drm_crtc *crtc = connector->encoder->crtc; struct intel_lvds_priv *lvds_priv = intel_output->dev_priv; if (value == DRM_MODE_SCALE_NON_GPU) { - DRM_DEBUG_KMS(I915_LVDS, - "non_GPU property is unsupported\n"); + DRM_DEBUG_KMS("non_GPU property is unsupported\n"); return 0; } if (lvds_priv->fitting_mode == value) { @@ -731,8 +728,7 @@ static const struct drm_encoder_funcs intel_lvds_enc_funcs = { static int __init intel_no_lvds_dmi_callback(const struct dmi_system_id *id) { - DRM_DEBUG_KMS(I915_LVDS, - "Skipping LVDS initialization for %s\n", id->ident); + DRM_DEBUG_KMS("Skipping LVDS initialization for %s\n", id->ident); return 1; } @@ -1013,7 +1009,7 @@ out: return; failed: - DRM_DEBUG_KMS(I915_LVDS, "No LVDS modes found, disabling.\n"); + DRM_DEBUG_KMS("No LVDS modes found, disabling.\n"); if (intel_output->ddc_bus) intel_i2c_destroy(intel_output->ddc_bus); drm_connector_cleanup(connector); diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c index 4f0c30948bc..abef69c8a49 100644 --- a/drivers/gpu/drm/i915/intel_sdvo.c +++ b/drivers/gpu/drm/i915/intel_sdvo.c @@ -36,7 +36,6 @@ #include "intel_sdvo_regs.h" #undef SDVO_DEBUG -#define I915_SDVO "i915_sdvo" struct intel_sdvo_priv { u8 slave_addr; @@ -178,7 +177,7 @@ static bool intel_sdvo_read_byte(struct intel_output *intel_output, u8 addr, return true; } - DRM_DEBUG("i2c transfer returned %d\n", ret); + DRM_DEBUG_KMS("i2c transfer returned %d\n", ret); return false; } @@ -288,7 +287,7 @@ static void intel_sdvo_debug_write(struct intel_output *intel_output, u8 cmd, struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv; int i; - DRM_DEBUG_KMS(I915_SDVO, "%s: W: %02X ", + DRM_DEBUG_KMS("%s: W: %02X ", SDVO_NAME(sdvo_priv), cmd); for (i = 0; i < args_len; i++) DRM_LOG_KMS("%02X ", ((u8 *)args)[i]); @@ -341,7 +340,7 @@ static void intel_sdvo_debug_response(struct intel_output *intel_output, struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv; int i; - DRM_DEBUG_KMS(I915_SDVO, "%s: R: ", SDVO_NAME(sdvo_priv)); + DRM_DEBUG_KMS("%s: R: ", SDVO_NAME(sdvo_priv)); for (i = 0; i < response_len; i++) DRM_LOG_KMS("%02X ", ((u8 *)response)[i]); for (; i < 8; i++) @@ -658,10 +657,10 @@ static int intel_sdvo_get_clock_rate_mult(struct intel_output *intel_output) status = intel_sdvo_read_response(intel_output, &response, 1); if (status != SDVO_CMD_STATUS_SUCCESS) { - DRM_DEBUG("Couldn't get SDVO clock rate multiplier\n"); + DRM_DEBUG_KMS("Couldn't get SDVO clock rate multiplier\n"); return SDVO_CLOCK_RATE_MULT_1X; } else { - DRM_DEBUG("Current clock rate multiplier: %d\n", response); + DRM_DEBUG_KMS("Current clock rate multiplier: %d\n", response); } return response; @@ -942,14 +941,14 @@ static void intel_sdvo_set_tv_format(struct intel_output *output) format = &sdvo_priv->tv_format; memset(&unset, 0, sizeof(unset)); if (memcmp(format, &unset, sizeof(*format))) { - DRM_DEBUG("%s: Choosing default TV format of NTSC-M\n", + DRM_DEBUG_KMS("%s: Choosing default TV format of NTSC-M\n", SDVO_NAME(sdvo_priv)); format->ntsc_m = 1; intel_sdvo_write_cmd(output, SDVO_CMD_SET_TV_FORMAT, format, sizeof(*format)); status = intel_sdvo_read_response(output, NULL, 0); if (status != SDVO_CMD_STATUS_SUCCESS) - DRM_DEBUG("%s: Failed to set TV format\n", + DRM_DEBUG_KMS("%s: Failed to set TV format\n", SDVO_NAME(sdvo_priv)); } } @@ -1220,8 +1219,8 @@ static void intel_sdvo_dpms(struct drm_encoder *encoder, int mode) * a given it the status is a success, we succeeded. */ if (status == SDVO_CMD_STATUS_SUCCESS && !input1) { - DRM_DEBUG("First %s output reported failure to sync\n", - SDVO_NAME(sdvo_priv)); + DRM_DEBUG_KMS("First %s output reported failure to " + "sync\n", SDVO_NAME(sdvo_priv)); } if (0) @@ -1316,8 +1315,8 @@ static void intel_sdvo_restore(struct drm_connector *connector) intel_wait_for_vblank(dev); status = intel_sdvo_get_trained_inputs(intel_output, &input1, &input2); if (status == SDVO_CMD_STATUS_SUCCESS && !input1) - DRM_DEBUG("First %s output reported failure to sync\n", - SDVO_NAME(sdvo_priv)); + DRM_DEBUG_KMS("First %s output reported failure to " + "sync\n", SDVO_NAME(sdvo_priv)); } intel_sdvo_set_active_outputs(intel_output, sdvo_priv->save_active_outputs); @@ -1395,7 +1394,7 @@ int intel_sdvo_supports_hotplug(struct drm_connector *connector) u8 response[2]; u8 status; struct intel_output *intel_output; - DRM_DEBUG("\n"); + DRM_DEBUG_KMS("\n"); if (!connector) return 0; @@ -1460,7 +1459,7 @@ static enum drm_connector_status intel_sdvo_detect(struct drm_connector *connect intel_sdvo_write_cmd(intel_output, SDVO_CMD_GET_ATTACHED_DISPLAYS, NULL, 0); status = intel_sdvo_read_response(intel_output, &response, 2); - DRM_DEBUG("SDVO response %d %d\n", response[0], response[1]); + DRM_DEBUG_KMS("SDVO response %d %d\n", response[0], response[1]); if (status != SDVO_CMD_STATUS_SUCCESS) return connector_status_unknown; @@ -1905,8 +1904,7 @@ bool intel_sdvo_init(struct drm_device *dev, int output_device) /* Read the regs to test if we can talk to the device */ for (i = 0; i < 0x40; i++) { if (!intel_sdvo_read_byte(intel_output, i, &ch[i])) { - DRM_DEBUG_KMS(I915_SDVO, - "No SDVO device found on SDVO%c\n", + DRM_DEBUG_KMS("No SDVO device found on SDVO%c\n", output_device == SDVOB ? 'B' : 'C'); goto err_i2c; } @@ -1989,8 +1987,7 @@ bool intel_sdvo_init(struct drm_device *dev, int output_device) sdvo_priv->controlled_output = 0; memcpy (bytes, &sdvo_priv->caps.output_flags, 2); - DRM_DEBUG_KMS(I915_SDVO, - "%s: Unknown SDVO output type (0x%02x%02x)\n", + DRM_DEBUG_KMS("%s: Unknown SDVO output type (0x%02x%02x)\n", SDVO_NAME(sdvo_priv), bytes[0], bytes[1]); encoder_type = DRM_MODE_ENCODER_NONE; @@ -2022,7 +2019,7 @@ bool intel_sdvo_init(struct drm_device *dev, int output_device) &sdvo_priv->pixel_clock_max); - DRM_DEBUG_KMS(I915_SDVO, "%s device VID/DID: %02X:%02X.%02X, " + DRM_DEBUG_KMS("%s device VID/DID: %02X:%02X.%02X, " "clock range %dMHz - %dMHz, " "input 1: %c, input 2: %c, " "output 1: %c, output 2: %c\n", diff --git a/include/drm/drmP.h b/include/drm/drmP.h index 45b67d9c39c..edbdb02a7a3 100644 --- a/include/drm/drmP.h +++ b/include/drm/drmP.h @@ -174,19 +174,19 @@ extern void drm_ut_debug_printk(unsigned int request_level, __func__, fmt, ##args); \ } while (0) -#define DRM_DEBUG_DRIVER(prefix, fmt, args...) \ +#define DRM_DEBUG_DRIVER(fmt, args...) \ do { \ - drm_ut_debug_printk(DRM_UT_DRIVER, prefix, \ + drm_ut_debug_printk(DRM_UT_DRIVER, DRM_NAME, \ __func__, fmt, ##args); \ } while (0) -#define DRM_DEBUG_KMS(prefix, fmt, args...) \ +#define DRM_DEBUG_KMS(fmt, args...) \ do { \ - drm_ut_debug_printk(DRM_UT_KMS, prefix, \ + drm_ut_debug_printk(DRM_UT_KMS, DRM_NAME, \ __func__, fmt, ##args); \ } while (0) -#define DRM_DEBUG_MODE(prefix, fmt, args...) \ +#define DRM_DEBUG_MODE(fmt, args...) \ do { \ - drm_ut_debug_printk(DRM_UT_MODE, prefix, \ + drm_ut_debug_printk(DRM_UT_MODE, DRM_NAME, \ __func__, fmt, ##args); \ } while (0) #define DRM_LOG(fmt, args...) \ @@ -210,9 +210,9 @@ extern void drm_ut_debug_printk(unsigned int request_level, NULL, fmt, ##args); \ } while (0) #else -#define DRM_DEBUG_DRIVER(prefix, fmt, args...) do { } while (0) -#define DRM_DEBUG_KMS(prefix, fmt, args...) do { } while (0) -#define DRM_DEBUG_MODE(prefix, fmt, args...) do { } while (0) +#define DRM_DEBUG_DRIVER(fmt, args...) do { } while (0) +#define DRM_DEBUG_KMS(fmt, args...) do { } while (0) +#define DRM_DEBUG_MODE(fmt, args...) do { } while (0) #define DRM_DEBUG(fmt, arg...) do { } while (0) #define DRM_LOG(fmt, arg...) do { } while (0) #define DRM_LOG_KMS(fmt, args...) do { } while (0) -- cgit v1.2.3-70-g09d2 From f940f37f022f7392ab81a35516222cbd46110b42 Mon Sep 17 00:00:00 2001 From: Zhao Yakui Date: Mon, 20 Jul 2009 13:48:05 +0800 Subject: drm: Remove the macro defintion of DRM_DEBUG_MODE Two macro definitions of DRM_DEBUG_KMS/MODE can be used to add the debug info related with KMS. It is confusing. So remove the macro definition of DRM_DEBUG_MODE. Instead it can be replaced by the DRM_DEBUG_KMS. Signed-off-by: Zhao Yakui Acked-by: Ian Romanick Signed-off-by: Dave Airlie --- drivers/gpu/drm/drm_modes.c | 4 ++-- include/drm/drmP.h | 7 ------- 2 files changed, 2 insertions(+), 9 deletions(-) (limited to 'include') diff --git a/drivers/gpu/drm/drm_modes.c b/drivers/gpu/drm/drm_modes.c index 5eca2d5c5f2..6b4d2dc3cdd 100644 --- a/drivers/gpu/drm/drm_modes.c +++ b/drivers/gpu/drm/drm_modes.c @@ -52,7 +52,7 @@ */ void drm_mode_debug_printmodeline(struct drm_display_mode *mode) { - DRM_DEBUG_MODE("Modeline %d:\"%s\" %d %d %d %d %d %d %d %d %d %d " + DRM_DEBUG_KMS("Modeline %d:\"%s\" %d %d %d %d %d %d %d %d %d %d " "0x%x 0x%x\n", mode->base.id, mode->name, mode->vrefresh, mode->clock, mode->hdisplay, mode->hsync_start, @@ -818,7 +818,7 @@ void drm_mode_prune_invalid(struct drm_device *dev, list_del(&mode->head); if (verbose) { drm_mode_debug_printmodeline(mode); - DRM_DEBUG_MODE("Not using %s mode %d\n", + DRM_DEBUG_KMS("Not using %s mode %d\n", mode->name, mode->status); } drm_mode_destroy(dev, mode); diff --git a/include/drm/drmP.h b/include/drm/drmP.h index edbdb02a7a3..6513d16cd02 100644 --- a/include/drm/drmP.h +++ b/include/drm/drmP.h @@ -88,7 +88,6 @@ struct drm_device; #define DRM_UT_CORE 0x01 #define DRM_UT_DRIVER 0x02 #define DRM_UT_KMS 0x04 -#define DRM_UT_MODE 0x08 extern void drm_ut_debug_printk(unsigned int request_level, const char *prefix, @@ -184,11 +183,6 @@ extern void drm_ut_debug_printk(unsigned int request_level, drm_ut_debug_printk(DRM_UT_KMS, DRM_NAME, \ __func__, fmt, ##args); \ } while (0) -#define DRM_DEBUG_MODE(fmt, args...) \ - do { \ - drm_ut_debug_printk(DRM_UT_MODE, DRM_NAME, \ - __func__, fmt, ##args); \ - } while (0) #define DRM_LOG(fmt, args...) \ do { \ drm_ut_debug_printk(DRM_UT_CORE, NULL, \ @@ -212,7 +206,6 @@ extern void drm_ut_debug_printk(unsigned int request_level, #else #define DRM_DEBUG_DRIVER(fmt, args...) do { } while (0) #define DRM_DEBUG_KMS(fmt, args...) do { } while (0) -#define DRM_DEBUG_MODE(fmt, args...) do { } while (0) #define DRM_DEBUG(fmt, arg...) do { } while (0) #define DRM_LOG(fmt, arg...) do { } while (0) #define DRM_LOG_KMS(fmt, args...) do { } while (0) -- cgit v1.2.3-70-g09d2 From 87fdff81cd2d770f0adc742e21eb5e062ad20def Mon Sep 17 00:00:00 2001 From: Zhao Yakui Date: Mon, 20 Jul 2009 13:48:06 +0800 Subject: DRM: Add the explanation about DRM debug level Add the explanation about DRM debug level in the drmP header file. This is to explain how/where to use the different DRM debug level. Signed-off-by: Zhao Yakui Signed-off-by: Dave Airlie --- include/drm/drmP.h | 31 +++++++++++++++++++++++++++++++ 1 file changed, 31 insertions(+) (limited to 'include') diff --git a/include/drm/drmP.h b/include/drm/drmP.h index 6513d16cd02..e0f1c1fee58 100644 --- a/include/drm/drmP.h +++ b/include/drm/drmP.h @@ -88,6 +88,37 @@ struct drm_device; #define DRM_UT_CORE 0x01 #define DRM_UT_DRIVER 0x02 #define DRM_UT_KMS 0x04 +/* + * Three debug levels are defined. + * drm_core, drm_driver, drm_kms + * drm_core level can be used in the generic drm code. For example: + * drm_ioctl, drm_mm, drm_memory + * The macro definiton of DRM_DEBUG is used. + * DRM_DEBUG(fmt, args...) + * The debug info by using the DRM_DEBUG can be obtained by adding + * the boot option of "drm.debug=1". + * + * drm_driver level can be used in the specific drm driver. It is used + * to add the debug info related with the drm driver. For example: + * i915_drv, i915_dma, i915_gem, radeon_drv, + * The macro definition of DRM_DEBUG_DRIVER can be used. + * DRM_DEBUG_DRIVER(fmt, args...) + * The debug info by using the DRM_DEBUG_DRIVER can be obtained by + * adding the boot option of "drm.debug=0x02" + * + * drm_kms level can be used in the KMS code related with specific drm driver. + * It is used to add the debug info related with KMS mode. For example: + * the connector/crtc , + * The macro definition of DRM_DEBUG_KMS can be used. + * DRM_DEBUG_KMS(fmt, args...) + * The debug info by using the DRM_DEBUG_KMS can be obtained by + * adding the boot option of "drm.debug=0x04" + * + * If we add the boot option of "drm.debug=0x06", we can get the debug info by + * using the DRM_DEBUG_KMS and DRM_DEBUG_DRIVER. + * If we add the boot option of "drm.debug=0x05", we can get the debug info by + * using the DRM_DEBUG_KMS and DRM_DEBUG. + */ extern void drm_ut_debug_printk(unsigned int request_level, const char *prefix, -- cgit v1.2.3-70-g09d2 From 2066facca4c7dfe9f5068ece0200a4dbf10f49e1 Mon Sep 17 00:00:00 2001 From: Francisco Jerez Date: Sun, 2 Aug 2009 04:19:17 +0200 Subject: drm/kms: slave encoder interface. Define some helper functions to make easier to detach a KMS encoder implementation from the drm module of the GPU it's used in. This is mainly useful for some external I2C encoders known to be present on cards with GPUs from several different manufacturers. Signed-off-by: Francisco Jerez Signed-off-by: Dave Airlie --- drivers/gpu/drm/Makefile | 2 +- drivers/gpu/drm/drm_encoder_slave.c | 116 ++++++++++++++++++++++++++ include/drm/drm_encoder_slave.h | 162 ++++++++++++++++++++++++++++++++++++ 3 files changed, 279 insertions(+), 1 deletion(-) create mode 100644 drivers/gpu/drm/drm_encoder_slave.c create mode 100644 include/drm/drm_encoder_slave.h (limited to 'include') diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile index fe23f29f7cb..5f0aec4f082 100644 --- a/drivers/gpu/drm/Makefile +++ b/drivers/gpu/drm/Makefile @@ -11,7 +11,7 @@ drm-y := drm_auth.o drm_bufs.o drm_cache.o \ drm_agpsupport.o drm_scatter.o ati_pcigart.o drm_pci.o \ drm_sysfs.o drm_hashtab.o drm_sman.o drm_mm.o \ drm_crtc.o drm_crtc_helper.o drm_modes.o drm_edid.o \ - drm_info.o drm_debugfs.o + drm_info.o drm_debugfs.o drm_encoder_slave.o drm-$(CONFIG_COMPAT) += drm_ioc32.o diff --git a/drivers/gpu/drm/drm_encoder_slave.c b/drivers/gpu/drm/drm_encoder_slave.c new file mode 100644 index 00000000000..6ffd600ccfa --- /dev/null +++ b/drivers/gpu/drm/drm_encoder_slave.c @@ -0,0 +1,116 @@ +/* + * Copyright (C) 2009 Francisco Jerez. + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining + * a copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sublicense, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial + * portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE + * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION + * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION + * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#include + +/** + * drm_i2c_encoder_init - Initialize an I2C slave encoder + * @dev: DRM device. + * @encoder: Encoder to be attached to the I2C device. You aren't + * required to have called drm_encoder_init() before. + * @adap: I2C adapter that will be used to communicate with + * the device. + * @info: Information that will be used to create the I2C device. + * Required fields are @addr and @type. + * + * Create an I2C device on the specified bus (the module containing its + * driver is transparently loaded) and attach it to the specified + * &drm_encoder_slave. The @slave_funcs field will be initialized with + * the hooks provided by the slave driver. + * + * Returns 0 on success or a negative errno on failure, in particular, + * -ENODEV is returned when no matching driver is found. + */ +int drm_i2c_encoder_init(struct drm_device *dev, + struct drm_encoder_slave *encoder, + struct i2c_adapter *adap, + const struct i2c_board_info *info) +{ + char modalias[sizeof(I2C_MODULE_PREFIX) + + I2C_NAME_SIZE]; + struct module *module = NULL; + struct i2c_client *client; + struct drm_i2c_encoder_driver *encoder_drv; + int err = 0; + + snprintf(modalias, sizeof(modalias), + "%s%s", I2C_MODULE_PREFIX, info->type); + request_module(modalias); + + client = i2c_new_device(adap, info); + if (!client) { + err = -ENOMEM; + goto fail; + } + + if (!client->driver) { + err = -ENODEV; + goto fail_unregister; + } + + module = client->driver->driver.owner; + if (!try_module_get(module)) { + err = -ENODEV; + goto fail_unregister; + } + + encoder->bus_priv = client; + + encoder_drv = to_drm_i2c_encoder_driver(client->driver); + + err = encoder_drv->encoder_init(client, dev, encoder); + if (err) + goto fail_unregister; + + return 0; + +fail_unregister: + i2c_unregister_device(client); + module_put(module); +fail: + return err; +} +EXPORT_SYMBOL(drm_i2c_encoder_init); + +/** + * drm_i2c_encoder_destroy - Unregister the I2C device backing an encoder + * @drm_encoder: Encoder to be unregistered. + * + * This should be called from the @destroy method of an I2C slave + * encoder driver once I2C access is no longer needed. + */ +void drm_i2c_encoder_destroy(struct drm_encoder *drm_encoder) +{ + struct drm_encoder_slave *encoder = to_encoder_slave(drm_encoder); + struct i2c_client *client = drm_i2c_encoder_get_client(drm_encoder); + struct module *module = client->driver->driver.owner; + + i2c_unregister_device(client); + encoder->bus_priv = NULL; + + module_put(module); +} +EXPORT_SYMBOL(drm_i2c_encoder_destroy); diff --git a/include/drm/drm_encoder_slave.h b/include/drm/drm_encoder_slave.h new file mode 100644 index 00000000000..821ec40c17d --- /dev/null +++ b/include/drm/drm_encoder_slave.h @@ -0,0 +1,162 @@ +/* + * Copyright (C) 2009 Francisco Jerez. + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining + * a copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sublicense, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial + * portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE + * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION + * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION + * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#ifndef __DRM_ENCODER_SLAVE_H__ +#define __DRM_ENCODER_SLAVE_H__ + +#include +#include + +/** + * struct drm_encoder_slave_funcs - Entry points exposed by a slave encoder driver + * @set_config: Initialize any encoder-specific modesetting parameters. + * The meaning of the @params parameter is implementation + * dependent. It will usually be a structure with DVO port + * data format settings or timings. It's not required for + * the new parameters to take effect until the next mode + * is set. + * + * Most of its members are analogous to the function pointers in + * &drm_encoder_helper_funcs and they can optionally be used to + * initialize the latter. Connector-like methods (e.g. @get_modes and + * @set_property) will typically be wrapped around and only be called + * if the encoder is the currently selected one for the connector. + */ +struct drm_encoder_slave_funcs { + void (*set_config)(struct drm_encoder *encoder, + void *params); + + void (*destroy)(struct drm_encoder *encoder); + void (*dpms)(struct drm_encoder *encoder, int mode); + void (*save)(struct drm_encoder *encoder); + void (*restore)(struct drm_encoder *encoder); + bool (*mode_fixup)(struct drm_encoder *encoder, + struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode); + int (*mode_valid)(struct drm_encoder *encoder, + struct drm_display_mode *mode); + void (*mode_set)(struct drm_encoder *encoder, + struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode); + + enum drm_connector_status (*detect)(struct drm_encoder *encoder, + struct drm_connector *connector); + int (*get_modes)(struct drm_encoder *encoder, + struct drm_connector *connector); + int (*create_resources)(struct drm_encoder *encoder, + struct drm_connector *connector); + int (*set_property)(struct drm_encoder *encoder, + struct drm_connector *connector, + struct drm_property *property, + uint64_t val); + +}; + +/** + * struct drm_encoder_slave - Slave encoder struct + * @base: DRM encoder object. + * @slave_funcs: Slave encoder callbacks. + * @slave_priv: Slave encoder private data. + * @bus_priv: Bus specific data. + * + * A &drm_encoder_slave has two sets of callbacks, @slave_funcs and the + * ones in @base. The former are never actually called by the common + * CRTC code, it's just a convenience for splitting the encoder + * functions in an upper, GPU-specific layer and a (hopefully) + * GPU-agnostic lower layer: It's the GPU driver responsibility to + * call the slave methods when appropriate. + * + * drm_i2c_encoder_init() provides a way to get an implementation of + * this. + */ +struct drm_encoder_slave { + struct drm_encoder base; + + struct drm_encoder_slave_funcs *slave_funcs; + void *slave_priv; + void *bus_priv; +}; +#define to_encoder_slave(x) container_of((x), struct drm_encoder_slave, base) + +int drm_i2c_encoder_init(struct drm_device *dev, + struct drm_encoder_slave *encoder, + struct i2c_adapter *adap, + const struct i2c_board_info *info); + + +/** + * struct drm_i2c_encoder_driver + * + * Describes a device driver for an encoder connected to the GPU + * through an I2C bus. In addition to the entry points in @i2c_driver + * an @encoder_init function should be provided. It will be called to + * give the driver an opportunity to allocate any per-encoder data + * structures and to initialize the @slave_funcs and (optionally) + * @slave_priv members of @encoder. + */ +struct drm_i2c_encoder_driver { + struct i2c_driver i2c_driver; + + int (*encoder_init)(struct i2c_client *client, + struct drm_device *dev, + struct drm_encoder_slave *encoder); + +}; +#define to_drm_i2c_encoder_driver(x) container_of((x), \ + struct drm_i2c_encoder_driver, \ + i2c_driver) + +/** + * drm_i2c_encoder_get_client - Get the I2C client corresponding to an encoder + */ +static inline struct i2c_client *drm_i2c_encoder_get_client(struct drm_encoder *encoder) +{ + return (struct i2c_client *)to_encoder_slave(encoder)->bus_priv; +} + +/** + * drm_i2c_encoder_register - Register an I2C encoder driver + * @owner: Module containing the driver. + * @driver: Driver to be registered. + */ +static inline int drm_i2c_encoder_register(struct module *owner, + struct drm_i2c_encoder_driver *driver) +{ + return i2c_register_driver(owner, &driver->i2c_driver); +} + +/** + * drm_i2c_encoder_unregister - Unregister an I2C encoder driver + * @driver: Driver to be unregistered. + */ +static inline void drm_i2c_encoder_unregister(struct drm_i2c_encoder_driver *driver) +{ + return i2c_del_driver(&driver->i2c_driver); +} + +void drm_i2c_encoder_destroy(struct drm_encoder *encoder); + +#endif -- cgit v1.2.3-70-g09d2 From 74bd3c26b90f39b9dcc05c471333da8998572b5d Mon Sep 17 00:00:00 2001 From: Francisco Jerez Date: Sun, 2 Aug 2009 04:19:18 +0200 Subject: drm: Define DRM_MODE_CONNECTOR_TV The existing TV connector types are often unsuitable either because there is no way to probe them until they're actually plugged in or because they can change during run time (e.g. 7-pin DIN connectors that behave as S-Video, Component, Composite or SCART depending on the adaptor plugged in). Signed-off-by: Francisco Jerez Signed-off-by: Dave Airlie --- drivers/gpu/drm/drm_crtc.c | 1 + drivers/gpu/drm/drm_sysfs.c | 3 +++ include/drm/drm_mode.h | 1 + 3 files changed, 5 insertions(+) (limited to 'include') diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c index 9c758305472..c7ab80b45e3 100644 --- a/drivers/gpu/drm/drm_crtc.c +++ b/drivers/gpu/drm/drm_crtc.c @@ -146,6 +146,7 @@ static struct drm_conn_prop_enum_list drm_connector_enum_list[] = { DRM_MODE_CONNECTOR_DisplayPort, "DisplayPort", 0 }, { DRM_MODE_CONNECTOR_HDMIA, "HDMI Type A", 0 }, { DRM_MODE_CONNECTOR_HDMIB, "HDMI Type B", 0 }, + { DRM_MODE_CONNECTOR_TV, "TV", 0 }, }; static struct drm_prop_enum_list drm_encoder_enum_list[] = diff --git a/drivers/gpu/drm/drm_sysfs.c b/drivers/gpu/drm/drm_sysfs.c index 85ec31b3ff0..adc179459c2 100644 --- a/drivers/gpu/drm/drm_sysfs.c +++ b/drivers/gpu/drm/drm_sysfs.c @@ -247,6 +247,7 @@ static ssize_t subconnector_show(struct device *device, case DRM_MODE_CONNECTOR_Composite: case DRM_MODE_CONNECTOR_SVIDEO: case DRM_MODE_CONNECTOR_Component: + case DRM_MODE_CONNECTOR_TV: prop = dev->mode_config.tv_subconnector_property; is_tv = 1; break; @@ -287,6 +288,7 @@ static ssize_t select_subconnector_show(struct device *device, case DRM_MODE_CONNECTOR_Composite: case DRM_MODE_CONNECTOR_SVIDEO: case DRM_MODE_CONNECTOR_Component: + case DRM_MODE_CONNECTOR_TV: prop = dev->mode_config.tv_select_subconnector_property; is_tv = 1; break; @@ -385,6 +387,7 @@ int drm_sysfs_connector_add(struct drm_connector *connector) case DRM_MODE_CONNECTOR_Composite: case DRM_MODE_CONNECTOR_SVIDEO: case DRM_MODE_CONNECTOR_Component: + case DRM_MODE_CONNECTOR_TV: for (i = 0; i < ARRAY_SIZE(connector_attrs_opt1); i++) { ret = device_create_file(&connector->kdev, &connector_attrs_opt1[i]); if (ret) diff --git a/include/drm/drm_mode.h b/include/drm/drm_mode.h index ae304cc73c9..c51e9f528c8 100644 --- a/include/drm/drm_mode.h +++ b/include/drm/drm_mode.h @@ -155,6 +155,7 @@ struct drm_mode_get_encoder { #define DRM_MODE_CONNECTOR_DisplayPort 10 #define DRM_MODE_CONNECTOR_HDMIA 11 #define DRM_MODE_CONNECTOR_HDMIB 12 +#define DRM_MODE_CONNECTOR_TV 13 struct drm_mode_get_connector { -- cgit v1.2.3-70-g09d2 From aeaa1ad3ff32be833680e484d99ec29d892da1ff Mon Sep 17 00:00:00 2001 From: Francisco Jerez Date: Sun, 2 Aug 2009 04:19:19 +0200 Subject: drm: Define DRM_MODE_SUBCONNECTOR_SCART Signed-off-by: Francisco Jerez Signed-off-by: Dave Airlie --- drivers/gpu/drm/drm_crtc.c | 2 ++ include/drm/drm_mode.h | 1 + 2 files changed, 3 insertions(+) (limited to 'include') diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c index c7ab80b45e3..ed53c5c37ac 100644 --- a/drivers/gpu/drm/drm_crtc.c +++ b/drivers/gpu/drm/drm_crtc.c @@ -108,6 +108,7 @@ static struct drm_prop_enum_list drm_tv_select_enum_list[] = { DRM_MODE_SUBCONNECTOR_Composite, "Composite" }, /* TV-out */ { DRM_MODE_SUBCONNECTOR_SVIDEO, "SVIDEO" }, /* TV-out */ { DRM_MODE_SUBCONNECTOR_Component, "Component" }, /* TV-out */ + { DRM_MODE_SUBCONNECTOR_SCART, "SCART" }, /* TV-out */ }; DRM_ENUM_NAME_FN(drm_get_tv_select_name, drm_tv_select_enum_list) @@ -118,6 +119,7 @@ static struct drm_prop_enum_list drm_tv_subconnector_enum_list[] = { DRM_MODE_SUBCONNECTOR_Composite, "Composite" }, /* TV-out */ { DRM_MODE_SUBCONNECTOR_SVIDEO, "SVIDEO" }, /* TV-out */ { DRM_MODE_SUBCONNECTOR_Component, "Component" }, /* TV-out */ + { DRM_MODE_SUBCONNECTOR_SCART, "SCART" }, /* TV-out */ }; DRM_ENUM_NAME_FN(drm_get_tv_subconnector_name, diff --git a/include/drm/drm_mode.h b/include/drm/drm_mode.h index c51e9f528c8..616aeb42b77 100644 --- a/include/drm/drm_mode.h +++ b/include/drm/drm_mode.h @@ -141,6 +141,7 @@ struct drm_mode_get_encoder { #define DRM_MODE_SUBCONNECTOR_Composite 5 #define DRM_MODE_SUBCONNECTOR_SVIDEO 6 #define DRM_MODE_SUBCONNECTOR_Component 8 +#define DRM_MODE_SUBCONNECTOR_SCART 9 #define DRM_MODE_CONNECTOR_Unknown 0 #define DRM_MODE_CONNECTOR_VGA 1 -- cgit v1.2.3-70-g09d2 From b6b7902e54c7e8abbc213d8bdc290350c00ccfe5 Mon Sep 17 00:00:00 2001 From: Francisco Jerez Date: Sun, 2 Aug 2009 04:19:20 +0200 Subject: drm: Define some new standard TV properties. Namely "brightness", "contrast" and "flicker reduction". Signed-off-by: Francisco Jerez Signed-off-by: Dave Airlie --- drivers/gpu/drm/drm_crtc.c | 18 ++++++++++++++++++ include/drm/drm_crtc.h | 3 +++ 2 files changed, 21 insertions(+) (limited to 'include') diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c index ed53c5c37ac..a8c831134fc 100644 --- a/drivers/gpu/drm/drm_crtc.c +++ b/drivers/gpu/drm/drm_crtc.c @@ -718,6 +718,24 @@ int drm_mode_create_tv_properties(struct drm_device *dev, int num_modes, drm_property_add_enum(dev->mode_config.tv_mode_property, i, i, modes[i]); + dev->mode_config.tv_brightness_property = + drm_property_create(dev, DRM_MODE_PROP_RANGE, + "brightness", 2); + dev->mode_config.tv_brightness_property->values[0] = 0; + dev->mode_config.tv_brightness_property->values[1] = 100; + + dev->mode_config.tv_contrast_property = + drm_property_create(dev, DRM_MODE_PROP_RANGE, + "contrast", 2); + dev->mode_config.tv_contrast_property->values[0] = 0; + dev->mode_config.tv_contrast_property->values[1] = 100; + + dev->mode_config.tv_flicker_reduction_property = + drm_property_create(dev, DRM_MODE_PROP_RANGE, + "flicker reduction", 2); + dev->mode_config.tv_flicker_reduction_property->values[0] = 0; + dev->mode_config.tv_flicker_reduction_property->values[1] = 100; + return 0; } EXPORT_SYMBOL(drm_mode_create_tv_properties); diff --git a/include/drm/drm_crtc.h b/include/drm/drm_crtc.h index 125994d8ac0..5f2cc0ca4c7 100644 --- a/include/drm/drm_crtc.h +++ b/include/drm/drm_crtc.h @@ -572,6 +572,9 @@ struct drm_mode_config { struct drm_property *tv_right_margin_property; struct drm_property *tv_top_margin_property; struct drm_property *tv_bottom_margin_property; + struct drm_property *tv_brightness_property; + struct drm_property *tv_contrast_property; + struct drm_property *tv_flicker_reduction_property; /* Optional properties */ struct drm_property *scaling_mode_property; -- cgit v1.2.3-70-g09d2 From a75f0236292a5fca65f26efedca48bd07db1834d Mon Sep 17 00:00:00 2001 From: Francisco Jerez Date: Wed, 12 Aug 2009 02:30:10 +0200 Subject: drm: Add more standard TV properties. Overscan, saturation, hue. Used in the nouveau driver for GPUs with integrated TV encoders. Signed-off-by: Francisco Jerez Signed-off-by: Dave Airlie --- drivers/gpu/drm/drm_crtc.c | 18 ++++++++++++++++++ include/drm/drm_crtc.h | 3 +++ 2 files changed, 21 insertions(+) (limited to 'include') diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c index a8c831134fc..362a538cded 100644 --- a/drivers/gpu/drm/drm_crtc.c +++ b/drivers/gpu/drm/drm_crtc.c @@ -736,6 +736,24 @@ int drm_mode_create_tv_properties(struct drm_device *dev, int num_modes, dev->mode_config.tv_flicker_reduction_property->values[0] = 0; dev->mode_config.tv_flicker_reduction_property->values[1] = 100; + dev->mode_config.tv_overscan_property = + drm_property_create(dev, DRM_MODE_PROP_RANGE, + "overscan", 2); + dev->mode_config.tv_overscan_property->values[0] = 0; + dev->mode_config.tv_overscan_property->values[1] = 100; + + dev->mode_config.tv_saturation_property = + drm_property_create(dev, DRM_MODE_PROP_RANGE, + "saturation", 2); + dev->mode_config.tv_saturation_property->values[0] = 0; + dev->mode_config.tv_saturation_property->values[1] = 100; + + dev->mode_config.tv_hue_property = + drm_property_create(dev, DRM_MODE_PROP_RANGE, + "hue", 2); + dev->mode_config.tv_hue_property->values[0] = 0; + dev->mode_config.tv_hue_property->values[1] = 100; + return 0; } EXPORT_SYMBOL(drm_mode_create_tv_properties); diff --git a/include/drm/drm_crtc.h b/include/drm/drm_crtc.h index 5f2cc0ca4c7..db92a83f8ca 100644 --- a/include/drm/drm_crtc.h +++ b/include/drm/drm_crtc.h @@ -575,6 +575,9 @@ struct drm_mode_config { struct drm_property *tv_brightness_property; struct drm_property *tv_contrast_property; struct drm_property *tv_flicker_reduction_property; + struct drm_property *tv_overscan_property; + struct drm_property *tv_saturation_property; + struct drm_property *tv_hue_property; /* Optional properties */ struct drm_property *scaling_mode_property; -- cgit v1.2.3-70-g09d2 From 31089c13bcb18d2cd2a3ddfbe3a28666346f237e Mon Sep 17 00:00:00 2001 From: John Stultz Date: Fri, 14 Aug 2009 15:47:18 +0200 Subject: timekeeping: Introduce timekeeping_leap_insert Move the adjustment of xtime, wall_to_monotonic and the update of the vsyscall variables to the timekeeping code. Signed-off-by: John Stultz Signed-off-by: Martin Schwidefsky LKML-Reference: <20090814134807.609730216@de.ibm.com> Signed-off-by: Thomas Gleixner --- include/linux/time.h | 1 + kernel/time/ntp.c | 7 ++----- kernel/time/timekeeping.c | 7 +++++++ 3 files changed, 10 insertions(+), 5 deletions(-) (limited to 'include') diff --git a/include/linux/time.h b/include/linux/time.h index ea16c1a01d5..e7c84455888 100644 --- a/include/linux/time.h +++ b/include/linux/time.h @@ -147,6 +147,7 @@ extern struct timespec timespec_trunc(struct timespec t, unsigned gran); extern int timekeeping_valid_for_hres(void); extern void update_wall_time(void); extern void update_xtime_cache(u64 nsec); +extern void timekeeping_leap_insert(int leapsecond); struct tms; extern void do_sys_times(struct tms *); diff --git a/kernel/time/ntp.c b/kernel/time/ntp.c index 7fc64375ff4..4800f933910 100644 --- a/kernel/time/ntp.c +++ b/kernel/time/ntp.c @@ -194,8 +194,7 @@ static enum hrtimer_restart ntp_leap_second(struct hrtimer *timer) case TIME_OK: break; case TIME_INS: - xtime.tv_sec--; - wall_to_monotonic.tv_sec++; + timekeeping_leap_insert(-1); time_state = TIME_OOP; printk(KERN_NOTICE "Clock: inserting leap second 23:59:60 UTC\n"); @@ -203,9 +202,8 @@ static enum hrtimer_restart ntp_leap_second(struct hrtimer *timer) res = HRTIMER_RESTART; break; case TIME_DEL: - xtime.tv_sec++; + timekeeping_leap_insert(1); time_tai--; - wall_to_monotonic.tv_sec--; time_state = TIME_WAIT; printk(KERN_NOTICE "Clock: deleting leap second 23:59:59 UTC\n"); @@ -219,7 +217,6 @@ static enum hrtimer_restart ntp_leap_second(struct hrtimer *timer) time_state = TIME_OK; break; } - update_vsyscall(&xtime, clock); write_sequnlock(&xtime_lock); diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c index 02c0b2c9c67..b8b70fb545f 100644 --- a/kernel/time/timekeeping.c +++ b/kernel/time/timekeeping.c @@ -58,6 +58,13 @@ void update_xtime_cache(u64 nsec) struct clocksource *clock; +/* must hold xtime_lock */ +void timekeeping_leap_insert(int leapsecond) +{ + xtime.tv_sec += leapsecond; + wall_to_monotonic.tv_sec -= leapsecond; + update_vsyscall(&xtime, clock); +} #ifdef CONFIG_GENERIC_TIME /** -- cgit v1.2.3-70-g09d2 From a0f7d48bfb95a4c5172a2756dbc4b82afc8e9ae4 Mon Sep 17 00:00:00 2001 From: Martin Schwidefsky Date: Fri, 14 Aug 2009 15:47:19 +0200 Subject: timekeeping: Remove clocksource inline functions The three inline functions clocksource_read, clocksource_enable and clocksource_disable are simple wrappers of an indirect call plus the copy from and to the mult_orig value. The functions are exclusively used by the timekeeping code which has intimate knowledge of the clocksource anyway. Therefore remove the inline functions. No functional change. Signed-off-by: Martin Schwidefsky Acked-by: John Stultz Cc: Daniel Walker LKML-Reference: <20090814134807.903108946@de.ibm.com> Signed-off-by: Thomas Gleixner --- include/linux/clocksource.h | 58 --------------------------------------------- kernel/time/timekeeping.c | 41 ++++++++++++++++++++++---------- 2 files changed, 28 insertions(+), 71 deletions(-) (limited to 'include') diff --git a/include/linux/clocksource.h b/include/linux/clocksource.h index 1219be4fb42..a1ef46f61c8 100644 --- a/include/linux/clocksource.h +++ b/include/linux/clocksource.h @@ -267,64 +267,6 @@ static inline u32 clocksource_hz2mult(u32 hz, u32 shift_constant) return (u32)tmp; } -/** - * clocksource_read: - Access the clocksource's current cycle value - * @cs: pointer to clocksource being read - * - * Uses the clocksource to return the current cycle_t value - */ -static inline cycle_t clocksource_read(struct clocksource *cs) -{ - return cs->read(cs); -} - -/** - * clocksource_enable: - enable clocksource - * @cs: pointer to clocksource - * - * Enables the specified clocksource. The clocksource callback - * function should start up the hardware and setup mult and field - * members of struct clocksource to reflect hardware capabilities. - */ -static inline int clocksource_enable(struct clocksource *cs) -{ - int ret = 0; - - if (cs->enable) - ret = cs->enable(cs); - - /* - * The frequency may have changed while the clocksource - * was disabled. If so the code in ->enable() must update - * the mult value to reflect the new frequency. Make sure - * mult_orig follows this change. - */ - cs->mult_orig = cs->mult; - - return ret; -} - -/** - * clocksource_disable: - disable clocksource - * @cs: pointer to clocksource - * - * Disables the specified clocksource. The clocksource callback - * function should power down the now unused hardware block to - * save power. - */ -static inline void clocksource_disable(struct clocksource *cs) -{ - /* - * Save mult_orig in mult so clocksource_enable() can - * restore the value regardless if ->enable() updates - * the value of mult or not. - */ - cs->mult = cs->mult_orig; - - if (cs->disable) - cs->disable(cs); -} - /** * cyc2ns - converts clocksource cycles to nanoseconds * @cs: Pointer to clocksource diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c index b8b70fb545f..016a2591d71 100644 --- a/kernel/time/timekeeping.c +++ b/kernel/time/timekeeping.c @@ -79,7 +79,7 @@ static void clocksource_forward_now(void) cycle_t cycle_now, cycle_delta; s64 nsec; - cycle_now = clocksource_read(clock); + cycle_now = clock->read(clock); cycle_delta = (cycle_now - clock->cycle_last) & clock->mask; clock->cycle_last = cycle_now; @@ -114,7 +114,7 @@ void getnstimeofday(struct timespec *ts) *ts = xtime; /* read clocksource: */ - cycle_now = clocksource_read(clock); + cycle_now = clock->read(clock); /* calculate the delta since the last update_wall_time: */ cycle_delta = (cycle_now - clock->cycle_last) & clock->mask; @@ -146,7 +146,7 @@ ktime_t ktime_get(void) nsecs = xtime.tv_nsec + wall_to_monotonic.tv_nsec; /* read clocksource: */ - cycle_now = clocksource_read(clock); + cycle_now = clock->read(clock); /* calculate the delta since the last update_wall_time: */ cycle_delta = (cycle_now - clock->cycle_last) & clock->mask; @@ -186,7 +186,7 @@ void ktime_get_ts(struct timespec *ts) tomono = wall_to_monotonic; /* read clocksource: */ - cycle_now = clocksource_read(clock); + cycle_now = clock->read(clock); /* calculate the delta since the last update_wall_time: */ cycle_delta = (cycle_now - clock->cycle_last) & clock->mask; @@ -274,16 +274,29 @@ static void change_clocksource(void) clocksource_forward_now(); - if (clocksource_enable(new)) + if (new->enable && !new->enable(new)) return; + /* + * The frequency may have changed while the clocksource + * was disabled. If so the code in ->enable() must update + * the mult value to reflect the new frequency. Make sure + * mult_orig follows this change. + */ + new->mult_orig = new->mult; new->raw_time = clock->raw_time; old = clock; clock = new; - clocksource_disable(old); + /* + * Save mult_orig in mult so that the value can be restored + * regardless if ->enable() updates the value of mult or not. + */ + old->mult = old->mult_orig; + if (old->disable) + old->disable(old); clock->cycle_last = 0; - clock->cycle_last = clocksource_read(clock); + clock->cycle_last = clock->read(clock); clock->error = 0; clock->xtime_nsec = 0; clocksource_calculate_interval(clock, NTP_INTERVAL_LENGTH); @@ -373,7 +386,7 @@ void getrawmonotonic(struct timespec *ts) seq = read_seqbegin(&xtime_lock); /* read clocksource: */ - cycle_now = clocksource_read(clock); + cycle_now = clock->read(clock); /* calculate the delta since the last update_wall_time: */ cycle_delta = (cycle_now - clock->cycle_last) & clock->mask; @@ -435,9 +448,12 @@ void __init timekeeping_init(void) ntp_init(); clock = clocksource_get_next(); - clocksource_enable(clock); + if (clock->enable) + clock->enable(clock); + /* set mult_orig on enable */ + clock->mult_orig = clock->mult; clocksource_calculate_interval(clock, NTP_INTERVAL_LENGTH); - clock->cycle_last = clocksource_read(clock); + clock->cycle_last = clock->read(clock); xtime.tv_sec = sec; xtime.tv_nsec = 0; @@ -477,8 +493,7 @@ static int timekeeping_resume(struct sys_device *dev) } update_xtime_cache(0); /* re-base the last cycle value */ - clock->cycle_last = 0; - clock->cycle_last = clocksource_read(clock); + clock->cycle_last = clock->read(clock); clock->error = 0; timekeeping_suspended = 0; write_sequnlock_irqrestore(&xtime_lock, flags); @@ -630,7 +645,7 @@ void update_wall_time(void) return; #ifdef CONFIG_GENERIC_TIME - offset = (clocksource_read(clock) - clock->cycle_last) & clock->mask; + offset = (clock->read(clock) - clock->cycle_last) & clock->mask; #else offset = clock->cycle_interval; #endif -- cgit v1.2.3-70-g09d2 From f1b82746c1e93daf24e1ab9bfbd39bcdb2e7018b Mon Sep 17 00:00:00 2001 From: Martin Schwidefsky Date: Fri, 14 Aug 2009 15:47:21 +0200 Subject: clocksource: Cleanup clocksource selection If a non high-resolution clocksource is first set as override clock and then registered it becomes active even if the system is in one-shot mode. Move the override check from sysfs_override_clocksource to the clocksource selection. That fixes the bug and simplifies the code. The check in clocksource_register for double registration of the same clocksource is removed without replacement. To find the initial clocksource a new weak function in jiffies.c is defined that returns the jiffies clocksource. The architecture code can then override the weak function with a more suitable clocksource, e.g. the TOD clock on s390. [ tglx: Folded in a fix from John Stultz ] Signed-off-by: Martin Schwidefsky Acked-by: John Stultz Cc: Daniel Walker LKML-Reference: <20090814134808.388024160@de.ibm.com> Signed-off-by: Thomas Gleixner --- arch/s390/kernel/time.c | 4 ++ include/linux/clocksource.h | 2 + kernel/time/clocksource.c | 134 +++++++++++++++++--------------------------- kernel/time/jiffies.c | 6 +- kernel/time/timekeeping.c | 4 +- 5 files changed, 64 insertions(+), 86 deletions(-) (limited to 'include') diff --git a/arch/s390/kernel/time.c b/arch/s390/kernel/time.c index d4c8e9c47c8..afefe514df0 100644 --- a/arch/s390/kernel/time.c +++ b/arch/s390/kernel/time.c @@ -205,6 +205,10 @@ static struct clocksource clocksource_tod = { .flags = CLOCK_SOURCE_IS_CONTINUOUS, }; +struct clocksource * __init clocksource_default_clock(void) +{ + return &clocksource_tod; +} void update_vsyscall(struct timespec *wall_time, struct clocksource *clock) { diff --git a/include/linux/clocksource.h b/include/linux/clocksource.h index a1ef46f61c8..f263b3abf46 100644 --- a/include/linux/clocksource.h +++ b/include/linux/clocksource.h @@ -14,6 +14,7 @@ #include #include #include +#include #include #include @@ -322,6 +323,7 @@ extern void clocksource_touch_watchdog(void); extern struct clocksource* clocksource_get_next(void); extern void clocksource_change_rating(struct clocksource *cs, int rating); extern void clocksource_resume(void); +extern struct clocksource * __init __weak clocksource_default_clock(void); #ifdef CONFIG_GENERIC_TIME_VSYSCALL extern void update_vsyscall(struct timespec *ts, struct clocksource *c); diff --git a/kernel/time/clocksource.c b/kernel/time/clocksource.c index 7466cb81125..e91662e87cd 100644 --- a/kernel/time/clocksource.c +++ b/kernel/time/clocksource.c @@ -21,7 +21,6 @@ * * TODO WishList: * o Allow clocksource drivers to be unregistered - * o get rid of clocksource_jiffies extern */ #include @@ -107,12 +106,9 @@ u64 timecounter_cyc2time(struct timecounter *tc, } EXPORT_SYMBOL(timecounter_cyc2time); -/* XXX - Would like a better way for initializing curr_clocksource */ -extern struct clocksource clocksource_jiffies; - /*[Clocksource internal variables]--------- * curr_clocksource: - * currently selected clocksource. Initialized to clocksource_jiffies. + * currently selected clocksource. * next_clocksource: * pending next selected clocksource. * clocksource_list: @@ -123,9 +119,8 @@ extern struct clocksource clocksource_jiffies; * override_name: * Name of the user-specified clocksource. */ -static struct clocksource *curr_clocksource = &clocksource_jiffies; +static struct clocksource *curr_clocksource; static struct clocksource *next_clocksource; -static struct clocksource *clocksource_override; static LIST_HEAD(clocksource_list); static DEFINE_SPINLOCK(clocksource_lock); static char override_name[32]; @@ -320,6 +315,7 @@ void clocksource_touch_watchdog(void) clocksource_resume_watchdog(); } +#ifdef CONFIG_GENERIC_TIME /** * clocksource_get_next - Returns the selected clocksource * @@ -339,56 +335,65 @@ struct clocksource *clocksource_get_next(void) } /** - * select_clocksource - Selects the best registered clocksource. + * clocksource_select - Select the best clocksource available * * Private function. Must hold clocksource_lock when called. * * Select the clocksource with the best rating, or the clocksource, * which is selected by userspace override. */ -static struct clocksource *select_clocksource(void) +static void clocksource_select(void) { - struct clocksource *next; + struct clocksource *best, *cs; if (list_empty(&clocksource_list)) - return NULL; + return; + /* First clocksource on the list has the best rating. */ + best = list_first_entry(&clocksource_list, struct clocksource, list); + /* Check for the override clocksource. */ + list_for_each_entry(cs, &clocksource_list, list) { + if (strcmp(cs->name, override_name) != 0) + continue; + /* + * Check to make sure we don't switch to a non-highres + * capable clocksource if the tick code is in oneshot + * mode (highres or nohz) + */ + if (!(cs->flags & CLOCK_SOURCE_VALID_FOR_HRES) && + tick_oneshot_mode_active()) { + /* Override clocksource cannot be used. */ + printk(KERN_WARNING "Override clocksource %s is not " + "HRT compatible. Cannot switch while in " + "HRT/NOHZ mode\n", cs->name); + override_name[0] = 0; + } else + /* Override clocksource can be used. */ + best = cs; + break; + } + if (curr_clocksource != best) + next_clocksource = best; +} - if (clocksource_override) - next = clocksource_override; - else - next = list_entry(clocksource_list.next, struct clocksource, - list); +#else /* CONFIG_GENERIC_TIME */ - if (next == curr_clocksource) - return NULL; +static void clocksource_select(void) { } - return next; -} +#endif /* * Enqueue the clocksource sorted by rating */ -static int clocksource_enqueue(struct clocksource *c) +static void clocksource_enqueue(struct clocksource *cs) { - struct list_head *tmp, *entry = &clocksource_list; - - list_for_each(tmp, &clocksource_list) { - struct clocksource *cs; + struct list_head *entry = &clocksource_list; + struct clocksource *tmp; - cs = list_entry(tmp, struct clocksource, list); - if (cs == c) - return -EBUSY; + list_for_each_entry(tmp, &clocksource_list, list) /* Keep track of the place, where to insert */ - if (cs->rating >= c->rating) - entry = tmp; - } - list_add(&c->list, entry); - - if (strlen(c->name) == strlen(override_name) && - !strcmp(c->name, override_name)) - clocksource_override = c; - - return 0; + if (tmp->rating >= cs->rating) + entry = &tmp->list; + list_add(&cs->list, entry); } /** @@ -397,19 +402,16 @@ static int clocksource_enqueue(struct clocksource *c) * * Returns -EBUSY if registration fails, zero otherwise. */ -int clocksource_register(struct clocksource *c) +int clocksource_register(struct clocksource *cs) { unsigned long flags; - int ret; spin_lock_irqsave(&clocksource_lock, flags); - ret = clocksource_enqueue(c); - if (!ret) - next_clocksource = select_clocksource(); + clocksource_enqueue(cs); + clocksource_select(); spin_unlock_irqrestore(&clocksource_lock, flags); - if (!ret) - clocksource_check_watchdog(c); - return ret; + clocksource_check_watchdog(cs); + return 0; } EXPORT_SYMBOL(clocksource_register); @@ -425,7 +427,7 @@ void clocksource_change_rating(struct clocksource *cs, int rating) list_del(&cs->list); cs->rating = rating; clocksource_enqueue(cs); - next_clocksource = select_clocksource(); + clocksource_select(); spin_unlock_irqrestore(&clocksource_lock, flags); } @@ -438,9 +440,7 @@ void clocksource_unregister(struct clocksource *cs) spin_lock_irqsave(&clocksource_lock, flags); list_del(&cs->list); - if (clocksource_override == cs) - clocksource_override = NULL; - next_clocksource = select_clocksource(); + clocksource_select(); spin_unlock_irqrestore(&clocksource_lock, flags); } @@ -478,9 +478,7 @@ static ssize_t sysfs_override_clocksource(struct sys_device *dev, struct sysdev_attribute *attr, const char *buf, size_t count) { - struct clocksource *ovr = NULL; size_t ret = count; - int len; /* strings from sysfs write are not 0 terminated! */ if (count >= sizeof(override_name)) @@ -495,37 +493,7 @@ static ssize_t sysfs_override_clocksource(struct sys_device *dev, if (count > 0) memcpy(override_name, buf, count); override_name[count] = 0; - - len = strlen(override_name); - if (len) { - struct clocksource *cs; - - ovr = clocksource_override; - /* try to select it: */ - list_for_each_entry(cs, &clocksource_list, list) { - if (strlen(cs->name) == len && - !strcmp(cs->name, override_name)) - ovr = cs; - } - } - - /* - * Check to make sure we don't switch to a non-highres capable - * clocksource if the tick code is in oneshot mode (highres or nohz) - */ - if (tick_oneshot_mode_active() && ovr && - !(ovr->flags & CLOCK_SOURCE_VALID_FOR_HRES)) { - printk(KERN_WARNING "%s clocksource is not HRT compatible. " - "Cannot switch while in HRT/NOHZ mode\n", ovr->name); - ovr = NULL; - override_name[0] = 0; - } - - /* Reselect, when the override name has changed */ - if (ovr != clocksource_override) { - clocksource_override = ovr; - next_clocksource = select_clocksource(); - } + clocksource_select(); spin_unlock_irq(&clocksource_lock); diff --git a/kernel/time/jiffies.c b/kernel/time/jiffies.c index c3f6c30816e..5404a845690 100644 --- a/kernel/time/jiffies.c +++ b/kernel/time/jiffies.c @@ -61,7 +61,6 @@ struct clocksource clocksource_jiffies = { .read = jiffies_read, .mask = 0xffffffff, /*32bits*/ .mult = NSEC_PER_JIFFY << JIFFIES_SHIFT, /* details above */ - .mult_orig = NSEC_PER_JIFFY << JIFFIES_SHIFT, .shift = JIFFIES_SHIFT, }; @@ -71,3 +70,8 @@ static int __init init_jiffies_clocksource(void) } core_initcall(init_jiffies_clocksource); + +struct clocksource * __init __weak clocksource_default_clock(void) +{ + return &clocksource_jiffies; +} diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c index b5673016089..325a9b63265 100644 --- a/kernel/time/timekeeping.c +++ b/kernel/time/timekeeping.c @@ -269,7 +269,7 @@ static void change_clocksource(void) new = clocksource_get_next(); - if (clock == new) + if (!new || clock == new) return; clocksource_forward_now(); @@ -446,7 +446,7 @@ void __init timekeeping_init(void) ntp_init(); - clock = clocksource_get_next(); + clock = clocksource_default_clock(); if (clock->enable) clock->enable(clock); /* set mult_orig on enable */ -- cgit v1.2.3-70-g09d2 From c55c87c892c1875deace0c8fc28787335277fdf2 Mon Sep 17 00:00:00 2001 From: Martin Schwidefsky Date: Fri, 14 Aug 2009 15:47:25 +0200 Subject: clocksource: Move watchdog downgrade to a work queue thread Move the downgrade of an unstable clocksource from the timer interrupt context into the process context of a work queue thread. This is needed to be able to do the clocksource switch with stop_machine. Signed-off-by: Martin Schwidefsky Cc: Ingo Molnar Acked-by: John Stultz Cc: Daniel Walker LKML-Reference: <20090814134809.354926067@de.ibm.com> Signed-off-by: Thomas Gleixner --- include/linux/clocksource.h | 1 + kernel/time/clocksource.c | 56 +++++++++++++++++++++++++++++++-------------- 2 files changed, 40 insertions(+), 17 deletions(-) (limited to 'include') diff --git a/include/linux/clocksource.h b/include/linux/clocksource.h index f263b3abf46..19ad43af62d 100644 --- a/include/linux/clocksource.h +++ b/include/linux/clocksource.h @@ -213,6 +213,7 @@ extern struct clocksource *clock; /* current clocksource */ #define CLOCK_SOURCE_WATCHDOG 0x10 #define CLOCK_SOURCE_VALID_FOR_HRES 0x20 +#define CLOCK_SOURCE_UNSTABLE 0x40 /* simplify initialization of mask field */ #define CLOCKSOURCE_MASK(bits) (cycle_t)((bits) < 64 ? ((1ULL<<(bits))-1) : -1) diff --git a/kernel/time/clocksource.c b/kernel/time/clocksource.c index 56aaa749645..f1508019bfb 100644 --- a/kernel/time/clocksource.c +++ b/kernel/time/clocksource.c @@ -143,10 +143,13 @@ fs_initcall(clocksource_done_booting); static LIST_HEAD(watchdog_list); static struct clocksource *watchdog; static struct timer_list watchdog_timer; +static struct work_struct watchdog_work; static DEFINE_SPINLOCK(watchdog_lock); static cycle_t watchdog_last; static int watchdog_running; +static void clocksource_watchdog_work(struct work_struct *work); + /* * Interval: 0.5sec Threshold: 0.0625s */ @@ -158,15 +161,16 @@ static void clocksource_unstable(struct clocksource *cs, int64_t delta) printk(KERN_WARNING "Clocksource %s unstable (delta = %Ld ns)\n", cs->name, delta); cs->flags &= ~(CLOCK_SOURCE_VALID_FOR_HRES | CLOCK_SOURCE_WATCHDOG); - clocksource_change_rating(cs, 0); - list_del(&cs->wd_list); + cs->flags |= CLOCK_SOURCE_UNSTABLE; + schedule_work(&watchdog_work); } static void clocksource_watchdog(unsigned long data) { - struct clocksource *cs, *tmp; + struct clocksource *cs; cycle_t csnow, wdnow; int64_t wd_nsec, cs_nsec; + int next_cpu; spin_lock(&watchdog_lock); if (!watchdog_running) @@ -176,7 +180,12 @@ static void clocksource_watchdog(unsigned long data) wd_nsec = cyc2ns(watchdog, (wdnow - watchdog_last) & watchdog->mask); watchdog_last = wdnow; - list_for_each_entry_safe(cs, tmp, &watchdog_list, wd_list) { + list_for_each_entry(cs, &watchdog_list, wd_list) { + + /* Clocksource already marked unstable? */ + if (cs->flags & CLOCK_SOURCE_UNSTABLE) + continue; + csnow = cs->read(cs); /* Clocksource initialized ? */ @@ -207,19 +216,15 @@ static void clocksource_watchdog(unsigned long data) } } - if (!list_empty(&watchdog_list)) { - /* - * Cycle through CPUs to check if the CPUs stay - * synchronized to each other. - */ - int next_cpu = cpumask_next(raw_smp_processor_id(), - cpu_online_mask); - - if (next_cpu >= nr_cpu_ids) - next_cpu = cpumask_first(cpu_online_mask); - watchdog_timer.expires += WATCHDOG_INTERVAL; - add_timer_on(&watchdog_timer, next_cpu); - } + /* + * Cycle through CPUs to check if the CPUs stay synchronized + * to each other. + */ + next_cpu = cpumask_next(raw_smp_processor_id(), cpu_online_mask); + if (next_cpu >= nr_cpu_ids) + next_cpu = cpumask_first(cpu_online_mask); + watchdog_timer.expires += WATCHDOG_INTERVAL; + add_timer_on(&watchdog_timer, next_cpu); out: spin_unlock(&watchdog_lock); } @@ -228,6 +233,7 @@ static inline void clocksource_start_watchdog(void) { if (watchdog_running || !watchdog || list_empty(&watchdog_list)) return; + INIT_WORK(&watchdog_work, clocksource_watchdog_work); init_timer(&watchdog_timer); watchdog_timer.function = clocksource_watchdog; watchdog_last = watchdog->read(watchdog); @@ -313,6 +319,22 @@ static void clocksource_dequeue_watchdog(struct clocksource *cs) spin_unlock_irqrestore(&watchdog_lock, flags); } +static void clocksource_watchdog_work(struct work_struct *work) +{ + struct clocksource *cs, *tmp; + unsigned long flags; + + spin_lock_irqsave(&watchdog_lock, flags); + list_for_each_entry_safe(cs, tmp, &watchdog_list, wd_list) + if (cs->flags & CLOCK_SOURCE_UNSTABLE) { + list_del_init(&cs->wd_list); + clocksource_change_rating(cs, 0); + } + /* Check if the watchdog timer needs to be stopped. */ + clocksource_stop_watchdog(); + spin_unlock(&watchdog_lock); +} + #else /* CONFIG_CLOCKSOURCE_WATCHDOG */ static void clocksource_enqueue_watchdog(struct clocksource *cs) -- cgit v1.2.3-70-g09d2 From 155ec60226ae0ae2aadaa57c951a58a359331030 Mon Sep 17 00:00:00 2001 From: Martin Schwidefsky Date: Fri, 14 Aug 2009 15:47:26 +0200 Subject: timekeeping: Introduce struct timekeeper Add struct timekeeper to keep the internal values timekeeping.c needs in regard to the currently selected clock source. This moves the timekeeping intervals, xtime_nsec and the ntp error value from struct clocksource to struct timekeeper. The raw_time is removed from the clocksource as well. It gets treated like xtime as a global variable. Eventually xtime raw_time should be moved to struct timekeeper. [ tglx: minor cleanup ] Signed-off-by: Martin Schwidefsky Cc: Ingo Molnar Acked-by: John Stultz Cc: Daniel Walker LKML-Reference: <20090814134809.613209842@de.ibm.com> Signed-off-by: Thomas Gleixner --- arch/s390/kernel/time.c | 1 - include/linux/clocksource.h | 54 +--------- kernel/time/clocksource.c | 6 +- kernel/time/timekeeping.c | 235 +++++++++++++++++++++++++++++--------------- 4 files changed, 164 insertions(+), 132 deletions(-) (limited to 'include') diff --git a/arch/s390/kernel/time.c b/arch/s390/kernel/time.c index afefe514df0..e76c2e7a8b9 100644 --- a/arch/s390/kernel/time.c +++ b/arch/s390/kernel/time.c @@ -280,7 +280,6 @@ void __init time_init(void) now = get_clock(); tod_to_timeval(now - TOD_UNIX_EPOCH, &xtime); clocksource_tod.cycle_last = now; - clocksource_tod.raw_time = xtime; tod_to_timeval(sched_clock_base_cc - TOD_UNIX_EPOCH, &ts); set_normalized_timespec(&wall_to_monotonic, -ts.tv_sec, -ts.tv_nsec); write_sequnlock_irqrestore(&xtime_lock, flags); diff --git a/include/linux/clocksource.h b/include/linux/clocksource.h index 19ad43af62d..e12e3095e2f 100644 --- a/include/linux/clocksource.h +++ b/include/linux/clocksource.h @@ -155,8 +155,6 @@ extern u64 timecounter_cyc2time(struct timecounter *tc, * @flags: flags describing special properties * @vread: vsyscall based read * @resume: resume function for the clocksource, if necessary - * @cycle_interval: Used internally by timekeeping core, please ignore. - * @xtime_interval: Used internally by timekeeping core, please ignore. */ struct clocksource { /* @@ -182,19 +180,12 @@ struct clocksource { #define CLKSRC_FSYS_MMIO_SET(mmio, addr) do { } while (0) #endif - /* timekeeping specific data, ignore */ - cycle_t cycle_interval; - u64 xtime_interval; - u32 raw_interval; /* * Second part is written at each timer interrupt * Keep it in a different cache line to dirty no * more than one cache line. */ cycle_t cycle_last ____cacheline_aligned_in_smp; - u64 xtime_nsec; - s64 error; - struct timespec raw_time; #ifdef CONFIG_CLOCKSOURCE_WATCHDOG /* Watchdog related data, used by the framework */ @@ -203,8 +194,6 @@ struct clocksource { #endif }; -extern struct clocksource *clock; /* current clocksource */ - /* * Clock source flags bits:: */ @@ -270,50 +259,15 @@ static inline u32 clocksource_hz2mult(u32 hz, u32 shift_constant) } /** - * cyc2ns - converts clocksource cycles to nanoseconds - * @cs: Pointer to clocksource - * @cycles: Cycles + * clocksource_cyc2ns - converts clocksource cycles to nanoseconds * - * Uses the clocksource and ntp ajdustment to convert cycle_ts to nanoseconds. + * Converts cycles to nanoseconds, using the given mult and shift. * * XXX - This could use some mult_lxl_ll() asm optimization */ -static inline s64 cyc2ns(struct clocksource *cs, cycle_t cycles) +static inline s64 clocksource_cyc2ns(cycle_t cycles, u32 mult, u32 shift) { - u64 ret = (u64)cycles; - ret = (ret * cs->mult) >> cs->shift; - return ret; -} - -/** - * clocksource_calculate_interval - Calculates a clocksource interval struct - * - * @c: Pointer to clocksource. - * @length_nsec: Desired interval length in nanoseconds. - * - * Calculates a fixed cycle/nsec interval for a given clocksource/adjustment - * pair and interval request. - * - * Unless you're the timekeeping code, you should not be using this! - */ -static inline void clocksource_calculate_interval(struct clocksource *c, - unsigned long length_nsec) -{ - u64 tmp; - - /* Do the ns -> cycle conversion first, using original mult */ - tmp = length_nsec; - tmp <<= c->shift; - tmp += c->mult_orig/2; - do_div(tmp, c->mult_orig); - - c->cycle_interval = (cycle_t)tmp; - if (c->cycle_interval == 0) - c->cycle_interval = 1; - - /* Go back from cycles -> shifted ns, this time use ntp adjused mult */ - c->xtime_interval = (u64)c->cycle_interval * c->mult; - c->raw_interval = ((u64)c->cycle_interval * c->mult_orig) >> c->shift; + return ((u64) cycles * mult) >> shift; } diff --git a/kernel/time/clocksource.c b/kernel/time/clocksource.c index f1508019bfb..f18c9a6bdcf 100644 --- a/kernel/time/clocksource.c +++ b/kernel/time/clocksource.c @@ -177,7 +177,8 @@ static void clocksource_watchdog(unsigned long data) goto out; wdnow = watchdog->read(watchdog); - wd_nsec = cyc2ns(watchdog, (wdnow - watchdog_last) & watchdog->mask); + wd_nsec = clocksource_cyc2ns((wdnow - watchdog_last) & watchdog->mask, + watchdog->mult, watchdog->shift); watchdog_last = wdnow; list_for_each_entry(cs, &watchdog_list, wd_list) { @@ -196,7 +197,8 @@ static void clocksource_watchdog(unsigned long data) } /* Check the deviation from the watchdog clocksource. */ - cs_nsec = cyc2ns(cs, (csnow - cs->wd_last) & cs->mask); + cs_nsec = clocksource_cyc2ns((csnow - cs->wd_last) & + cs->mask, cs->mult, cs->shift); cs->wd_last = csnow; if (abs(cs_nsec - wd_nsec) > WATCHDOG_THRESHOLD) { clocksource_unstable(cs, cs_nsec - wd_nsec); diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c index 325a9b63265..7af45cbf6b1 100644 --- a/kernel/time/timekeeping.c +++ b/kernel/time/timekeeping.c @@ -19,6 +19,65 @@ #include #include +/* Structure holding internal timekeeping values. */ +struct timekeeper { + /* Current clocksource used for timekeeping. */ + struct clocksource *clock; + + /* Number of clock cycles in one NTP interval. */ + cycle_t cycle_interval; + /* Number of clock shifted nano seconds in one NTP interval. */ + u64 xtime_interval; + /* Raw nano seconds accumulated per NTP interval. */ + u32 raw_interval; + + /* Clock shifted nano seconds remainder not stored in xtime.tv_nsec. */ + u64 xtime_nsec; + /* Difference between accumulated time and NTP time in ntp + * shifted nano seconds. */ + s64 ntp_error; +}; + +struct timekeeper timekeeper; + +/** + * timekeeper_setup_internals - Set up internals to use clocksource clock. + * + * @clock: Pointer to clocksource. + * + * Calculates a fixed cycle/nsec interval for a given clocksource/adjustment + * pair and interval request. + * + * Unless you're the timekeeping code, you should not be using this! + */ +static void timekeeper_setup_internals(struct clocksource *clock) +{ + cycle_t interval; + u64 tmp; + + timekeeper.clock = clock; + clock->cycle_last = clock->read(clock); + + /* Do the ns -> cycle conversion first, using original mult */ + tmp = NTP_INTERVAL_LENGTH; + tmp <<= clock->shift; + tmp += clock->mult_orig/2; + do_div(tmp, clock->mult_orig); + if (tmp == 0) + tmp = 1; + + interval = (cycle_t) tmp; + timekeeper.cycle_interval = interval; + + /* Go back from cycles -> shifted ns */ + timekeeper.xtime_interval = (u64) interval * clock->mult; + timekeeper.raw_interval = + ((u64) interval * clock->mult_orig) >> clock->shift; + + timekeeper.xtime_nsec = 0; + + timekeeper.ntp_error = 0; +} /* * This read-write spinlock protects us from races in SMP while @@ -46,6 +105,11 @@ struct timespec xtime __attribute__ ((aligned (16))); struct timespec wall_to_monotonic __attribute__ ((aligned (16))); static unsigned long total_sleep_time; /* seconds */ +/* + * The raw monotonic time for the CLOCK_MONOTONIC_RAW posix clock. + */ +struct timespec raw_time; + /* flag for if timekeeping is suspended */ int __read_mostly timekeeping_suspended; @@ -56,42 +120,42 @@ void update_xtime_cache(u64 nsec) timespec_add_ns(&xtime_cache, nsec); } -struct clocksource *clock; - /* must hold xtime_lock */ void timekeeping_leap_insert(int leapsecond) { xtime.tv_sec += leapsecond; wall_to_monotonic.tv_sec -= leapsecond; - update_vsyscall(&xtime, clock); + update_vsyscall(&xtime, timekeeper.clock); } #ifdef CONFIG_GENERIC_TIME /** - * clocksource_forward_now - update clock to the current time + * timekeeping_forward_now - update clock to the current time * * Forward the current clock to update its state since the last call to * update_wall_time(). This is useful before significant clock changes, * as it avoids having to deal with this time offset explicitly. */ -static void clocksource_forward_now(void) +static void timekeeping_forward_now(void) { cycle_t cycle_now, cycle_delta; + struct clocksource *clock; s64 nsec; + clock = timekeeper.clock; cycle_now = clock->read(clock); cycle_delta = (cycle_now - clock->cycle_last) & clock->mask; clock->cycle_last = cycle_now; - nsec = cyc2ns(clock, cycle_delta); + nsec = clocksource_cyc2ns(cycle_delta, clock->mult, clock->shift); /* If arch requires, add in gettimeoffset() */ nsec += arch_gettimeoffset(); timespec_add_ns(&xtime, nsec); - nsec = ((s64)cycle_delta * clock->mult_orig) >> clock->shift; - clock->raw_time.tv_nsec += nsec; + nsec = clocksource_cyc2ns(cycle_delta, clock->mult_orig, clock->shift); + timespec_add_ns(&raw_time, nsec); } /** @@ -103,6 +167,7 @@ static void clocksource_forward_now(void) void getnstimeofday(struct timespec *ts) { cycle_t cycle_now, cycle_delta; + struct clocksource *clock; unsigned long seq; s64 nsecs; @@ -114,13 +179,15 @@ void getnstimeofday(struct timespec *ts) *ts = xtime; /* read clocksource: */ + clock = timekeeper.clock; cycle_now = clock->read(clock); /* calculate the delta since the last update_wall_time: */ cycle_delta = (cycle_now - clock->cycle_last) & clock->mask; /* convert to nanoseconds: */ - nsecs = cyc2ns(clock, cycle_delta); + nsecs = clocksource_cyc2ns(cycle_delta, clock->mult, + clock->shift); /* If arch requires, add in gettimeoffset() */ nsecs += arch_gettimeoffset(); @@ -135,6 +202,7 @@ EXPORT_SYMBOL(getnstimeofday); ktime_t ktime_get(void) { cycle_t cycle_now, cycle_delta; + struct clocksource *clock; unsigned int seq; s64 secs, nsecs; @@ -146,13 +214,15 @@ ktime_t ktime_get(void) nsecs = xtime.tv_nsec + wall_to_monotonic.tv_nsec; /* read clocksource: */ + clock = timekeeper.clock; cycle_now = clock->read(clock); /* calculate the delta since the last update_wall_time: */ cycle_delta = (cycle_now - clock->cycle_last) & clock->mask; /* convert to nanoseconds: */ - nsecs += cyc2ns(clock, cycle_delta); + nsecs += clocksource_cyc2ns(cycle_delta, clock->mult, + clock->shift); } while (read_seqretry(&xtime_lock, seq)); /* @@ -174,6 +244,7 @@ EXPORT_SYMBOL_GPL(ktime_get); void ktime_get_ts(struct timespec *ts) { cycle_t cycle_now, cycle_delta; + struct clocksource *clock; struct timespec tomono; unsigned int seq; s64 nsecs; @@ -186,13 +257,15 @@ void ktime_get_ts(struct timespec *ts) tomono = wall_to_monotonic; /* read clocksource: */ + clock = timekeeper.clock; cycle_now = clock->read(clock); /* calculate the delta since the last update_wall_time: */ cycle_delta = (cycle_now - clock->cycle_last) & clock->mask; /* convert to nanoseconds: */ - nsecs = cyc2ns(clock, cycle_delta); + nsecs = clocksource_cyc2ns(cycle_delta, clock->mult, + clock->shift); } while (read_seqretry(&xtime_lock, seq)); @@ -233,7 +306,7 @@ int do_settimeofday(struct timespec *tv) write_seqlock_irqsave(&xtime_lock, flags); - clocksource_forward_now(); + timekeeping_forward_now(); ts_delta.tv_sec = tv->tv_sec - xtime.tv_sec; ts_delta.tv_nsec = tv->tv_nsec - xtime.tv_nsec; @@ -243,10 +316,10 @@ int do_settimeofday(struct timespec *tv) update_xtime_cache(0); - clock->error = 0; + timekeeper.ntp_error = 0; ntp_clear(); - update_vsyscall(&xtime, clock); + update_vsyscall(&xtime, timekeeper.clock); write_sequnlock_irqrestore(&xtime_lock, flags); @@ -269,10 +342,10 @@ static void change_clocksource(void) new = clocksource_get_next(); - if (!new || clock == new) + if (!new || timekeeper.clock == new) return; - clocksource_forward_now(); + timekeeping_forward_now(); if (new->enable && !new->enable(new)) return; @@ -284,9 +357,9 @@ static void change_clocksource(void) */ new->mult_orig = new->mult; - new->raw_time = clock->raw_time; - old = clock; - clock = new; + old = timekeeper.clock; + timekeeper_setup_internals(new); + /* * Save mult_orig in mult so that the value can be restored * regardless if ->enable() updates the value of mult or not. @@ -295,22 +368,10 @@ static void change_clocksource(void) if (old->disable) old->disable(old); - clock->cycle_last = clock->read(clock); - clock->error = 0; - clock->xtime_nsec = 0; - clocksource_calculate_interval(clock, NTP_INTERVAL_LENGTH); - tick_clock_notify(); - - /* - * We're holding xtime lock and waking up klogd would deadlock - * us on enqueue. So no printing! - printk(KERN_INFO "Time: %s clocksource has been installed.\n", - clock->name); - */ } #else /* GENERIC_TIME */ -static inline void clocksource_forward_now(void) { } +static inline void timekeeping_forward_now(void) { } static inline void change_clocksource(void) { } /** @@ -380,20 +441,23 @@ void getrawmonotonic(struct timespec *ts) unsigned long seq; s64 nsecs; cycle_t cycle_now, cycle_delta; + struct clocksource *clock; do { seq = read_seqbegin(&xtime_lock); /* read clocksource: */ + clock = timekeeper.clock; cycle_now = clock->read(clock); /* calculate the delta since the last update_wall_time: */ cycle_delta = (cycle_now - clock->cycle_last) & clock->mask; /* convert to nanoseconds: */ - nsecs = ((s64)cycle_delta * clock->mult_orig) >> clock->shift; + nsecs = clocksource_cyc2ns(cycle_delta, clock->mult_orig, + clock->shift); - *ts = clock->raw_time; + *ts = raw_time; } while (read_seqretry(&xtime_lock, seq)); @@ -413,7 +477,7 @@ int timekeeping_valid_for_hres(void) do { seq = read_seqbegin(&xtime_lock); - ret = clock->flags & CLOCK_SOURCE_VALID_FOR_HRES; + ret = timekeeper.clock->flags & CLOCK_SOURCE_VALID_FOR_HRES; } while (read_seqretry(&xtime_lock, seq)); @@ -439,6 +503,7 @@ unsigned long __attribute__((weak)) read_persistent_clock(void) */ void __init timekeeping_init(void) { + struct clocksource *clock; unsigned long flags; unsigned long sec = read_persistent_clock(); @@ -451,11 +516,13 @@ void __init timekeeping_init(void) clock->enable(clock); /* set mult_orig on enable */ clock->mult_orig = clock->mult; - clocksource_calculate_interval(clock, NTP_INTERVAL_LENGTH); - clock->cycle_last = clock->read(clock); + + timekeeper_setup_internals(clock); xtime.tv_sec = sec; xtime.tv_nsec = 0; + raw_time.tv_sec = 0; + raw_time.tv_nsec = 0; set_normalized_timespec(&wall_to_monotonic, -xtime.tv_sec, -xtime.tv_nsec); update_xtime_cache(0); @@ -492,8 +559,8 @@ static int timekeeping_resume(struct sys_device *dev) } update_xtime_cache(0); /* re-base the last cycle value */ - clock->cycle_last = clock->read(clock); - clock->error = 0; + timekeeper.clock->cycle_last = timekeeper.clock->read(timekeeper.clock); + timekeeper.ntp_error = 0; timekeeping_suspended = 0; write_sequnlock_irqrestore(&xtime_lock, flags); @@ -514,7 +581,7 @@ static int timekeeping_suspend(struct sys_device *dev, pm_message_t state) timekeeping_suspend_time = read_persistent_clock(); write_seqlock_irqsave(&xtime_lock, flags); - clocksource_forward_now(); + timekeeping_forward_now(); timekeeping_suspended = 1; write_sequnlock_irqrestore(&xtime_lock, flags); @@ -549,7 +616,7 @@ device_initcall(timekeeping_init_device); * If the error is already larger, we look ahead even further * to compensate for late or lost adjustments. */ -static __always_inline int clocksource_bigadjust(s64 error, s64 *interval, +static __always_inline int timekeeping_bigadjust(s64 error, s64 *interval, s64 *offset) { s64 tick_error, i; @@ -565,7 +632,7 @@ static __always_inline int clocksource_bigadjust(s64 error, s64 *interval, * here. This is tuned so that an error of about 1 msec is adjusted * within about 1 sec (or 2^20 nsec in 2^SHIFT_HZ ticks). */ - error2 = clock->error >> (NTP_SCALE_SHIFT + 22 - 2 * SHIFT_HZ); + error2 = timekeeper.ntp_error >> (NTP_SCALE_SHIFT + 22 - 2 * SHIFT_HZ); error2 = abs(error2); for (look_ahead = 0; error2 > 0; look_ahead++) error2 >>= 2; @@ -574,8 +641,9 @@ static __always_inline int clocksource_bigadjust(s64 error, s64 *interval, * Now calculate the error in (1 << look_ahead) ticks, but first * remove the single look ahead already included in the error. */ - tick_error = tick_length >> (NTP_SCALE_SHIFT - clock->shift + 1); - tick_error -= clock->xtime_interval >> 1; + tick_error = tick_length >> + (NTP_SCALE_SHIFT - timekeeper.clock->shift + 1); + tick_error -= timekeeper.xtime_interval >> 1; error = ((error - tick_error) >> look_ahead) + tick_error; /* Finally calculate the adjustment shift value. */ @@ -600,18 +668,19 @@ static __always_inline int clocksource_bigadjust(s64 error, s64 *interval, * this is optimized for the most common adjustments of -1,0,1, * for other values we can do a bit more work. */ -static void clocksource_adjust(s64 offset) +static void timekeeping_adjust(s64 offset) { - s64 error, interval = clock->cycle_interval; + s64 error, interval = timekeeper.cycle_interval; int adj; - error = clock->error >> (NTP_SCALE_SHIFT - clock->shift - 1); + error = timekeeper.ntp_error >> + (NTP_SCALE_SHIFT - timekeeper.clock->shift - 1); if (error > interval) { error >>= 2; if (likely(error <= interval)) adj = 1; else - adj = clocksource_bigadjust(error, &interval, &offset); + adj = timekeeping_bigadjust(error, &interval, &offset); } else if (error < -interval) { error >>= 2; if (likely(error >= -interval)) { @@ -619,15 +688,15 @@ static void clocksource_adjust(s64 offset) interval = -interval; offset = -offset; } else - adj = clocksource_bigadjust(error, &interval, &offset); + adj = timekeeping_bigadjust(error, &interval, &offset); } else return; - clock->mult += adj; - clock->xtime_interval += interval; - clock->xtime_nsec -= offset; - clock->error -= (interval - offset) << - (NTP_SCALE_SHIFT - clock->shift); + timekeeper.clock->mult += adj; + timekeeper.xtime_interval += interval; + timekeeper.xtime_nsec -= offset; + timekeeper.ntp_error -= (interval - offset) << + (NTP_SCALE_SHIFT - timekeeper.clock->shift); } /** @@ -637,53 +706,59 @@ static void clocksource_adjust(s64 offset) */ void update_wall_time(void) { + struct clocksource *clock; cycle_t offset; + s64 nsecs; /* Make sure we're fully resumed: */ if (unlikely(timekeeping_suspended)) return; + clock = timekeeper.clock; #ifdef CONFIG_GENERIC_TIME offset = (clock->read(clock) - clock->cycle_last) & clock->mask; #else - offset = clock->cycle_interval; + offset = timekeeper.cycle_interval; #endif - clock->xtime_nsec = (s64)xtime.tv_nsec << clock->shift; + timekeeper.xtime_nsec = (s64)xtime.tv_nsec << clock->shift; /* normally this loop will run just once, however in the * case of lost or late ticks, it will accumulate correctly. */ - while (offset >= clock->cycle_interval) { + while (offset >= timekeeper.cycle_interval) { + u64 nsecps = (u64)NSEC_PER_SEC << clock->shift; + /* accumulate one interval */ - offset -= clock->cycle_interval; - clock->cycle_last += clock->cycle_interval; + offset -= timekeeper.cycle_interval; + clock->cycle_last += timekeeper.cycle_interval; - clock->xtime_nsec += clock->xtime_interval; - if (clock->xtime_nsec >= (u64)NSEC_PER_SEC << clock->shift) { - clock->xtime_nsec -= (u64)NSEC_PER_SEC << clock->shift; + timekeeper.xtime_nsec += timekeeper.xtime_interval; + if (timekeeper.xtime_nsec >= nsecps) { + timekeeper.xtime_nsec -= nsecps; xtime.tv_sec++; second_overflow(); } - clock->raw_time.tv_nsec += clock->raw_interval; - if (clock->raw_time.tv_nsec >= NSEC_PER_SEC) { - clock->raw_time.tv_nsec -= NSEC_PER_SEC; - clock->raw_time.tv_sec++; + raw_time.tv_nsec += timekeeper.raw_interval; + if (raw_time.tv_nsec >= NSEC_PER_SEC) { + raw_time.tv_nsec -= NSEC_PER_SEC; + raw_time.tv_sec++; } /* accumulate error between NTP and clock interval */ - clock->error += tick_length; - clock->error -= clock->xtime_interval << (NTP_SCALE_SHIFT - clock->shift); + timekeeper.ntp_error += tick_length; + timekeeper.ntp_error -= timekeeper.xtime_interval << + (NTP_SCALE_SHIFT - clock->shift); } /* correct the clock when NTP error is too big */ - clocksource_adjust(offset); + timekeeping_adjust(offset); /* * Since in the loop above, we accumulate any amount of time * in xtime_nsec over a second into xtime.tv_sec, its possible for * xtime_nsec to be fairly small after the loop. Further, if we're - * slightly speeding the clocksource up in clocksource_adjust(), + * slightly speeding the clocksource up in timekeeping_adjust(), * its possible the required corrective factor to xtime_nsec could * cause it to underflow. * @@ -695,24 +770,26 @@ void update_wall_time(void) * We'll correct this error next time through this function, when * xtime_nsec is not as small. */ - if (unlikely((s64)clock->xtime_nsec < 0)) { - s64 neg = -(s64)clock->xtime_nsec; - clock->xtime_nsec = 0; - clock->error += neg << (NTP_SCALE_SHIFT - clock->shift); + if (unlikely((s64)timekeeper.xtime_nsec < 0)) { + s64 neg = -(s64)timekeeper.xtime_nsec; + timekeeper.xtime_nsec = 0; + timekeeper.ntp_error += neg << (NTP_SCALE_SHIFT - clock->shift); } /* store full nanoseconds into xtime after rounding it up and * add the remainder to the error difference. */ - xtime.tv_nsec = ((s64)clock->xtime_nsec >> clock->shift) + 1; - clock->xtime_nsec -= (s64)xtime.tv_nsec << clock->shift; - clock->error += clock->xtime_nsec << (NTP_SCALE_SHIFT - clock->shift); + xtime.tv_nsec = ((s64)timekeeper.xtime_nsec >> clock->shift) + 1; + timekeeper.xtime_nsec -= (s64)xtime.tv_nsec << clock->shift; + timekeeper.ntp_error += timekeeper.xtime_nsec << + (NTP_SCALE_SHIFT - clock->shift); - update_xtime_cache(cyc2ns(clock, offset)); + nsecs = clocksource_cyc2ns(offset, clock->mult, clock->shift); + update_xtime_cache(nsecs); /* check to see if there is a new clocksource to use */ change_clocksource(); - update_vsyscall(&xtime, clock); + update_vsyscall(&xtime, timekeeper.clock); } /** -- cgit v1.2.3-70-g09d2 From 0a54419836254a27baecd9037103171bcbabaf67 Mon Sep 17 00:00:00 2001 From: Martin Schwidefsky Date: Fri, 14 Aug 2009 15:47:28 +0200 Subject: timekeeping: Move NTP adjusted clock multiplier to struct timekeeper The clocksource structure has two multipliers, the unmodified multiplier clock->mult_orig and the NTP corrected multiplier clock->mult. The NTP multiplier is misplaced in the struct clocksource, this is private information of the timekeeping code. Add the mult field to the struct timekeeper to contain the NTP corrected value, keep the unmodifed multiplier in clock->mult and remove clock->mult_orig. Signed-off-by: Martin Schwidefsky Cc: Ingo Molnar Acked-by: John Stultz Cc: Daniel Walker LKML-Reference: <20090814134810.149047645@de.ibm.com> Signed-off-by: Thomas Gleixner --- arch/arm/plat-omap/common.c | 7 ++---- include/linux/clocksource.h | 4 +--- kernel/time/timekeeping.c | 53 ++++++++++++++++++++------------------------- 3 files changed, 27 insertions(+), 37 deletions(-) (limited to 'include') diff --git a/arch/arm/plat-omap/common.c b/arch/arm/plat-omap/common.c index ebcf006406f..95587b6c025 100644 --- a/arch/arm/plat-omap/common.c +++ b/arch/arm/plat-omap/common.c @@ -253,11 +253,8 @@ static struct clocksource clocksource_32k = { */ unsigned long long sched_clock(void) { - unsigned long long ret; - - ret = (unsigned long long)clocksource_32k.read(&clocksource_32k); - ret = (ret * clocksource_32k.mult_orig) >> clocksource_32k.shift; - return ret; + return clocksource_cyc2ns(clocksource_32k.read(&clocksource_32k), + clocksource_32k.mult, clocksource_32k.shift); } static int __init omap_init_clocksource_32k(void) diff --git a/include/linux/clocksource.h b/include/linux/clocksource.h index e12e3095e2f..e34015effeb 100644 --- a/include/linux/clocksource.h +++ b/include/linux/clocksource.h @@ -149,8 +149,7 @@ extern u64 timecounter_cyc2time(struct timecounter *tc, * @disable: optional function to disable the clocksource * @mask: bitmask for two's complement * subtraction of non 64 bit counters - * @mult: cycle to nanosecond multiplier (adjusted by NTP) - * @mult_orig: cycle to nanosecond multiplier (unadjusted by NTP) + * @mult: cycle to nanosecond multiplier * @shift: cycle to nanosecond divisor (power of two) * @flags: flags describing special properties * @vread: vsyscall based read @@ -168,7 +167,6 @@ struct clocksource { void (*disable)(struct clocksource *cs); cycle_t mask; u32 mult; - u32 mult_orig; u32 shift; unsigned long flags; cycle_t (*vread)(void); diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c index dfdab1cefe1..f4056f6c263 100644 --- a/kernel/time/timekeeping.c +++ b/kernel/time/timekeeping.c @@ -41,6 +41,8 @@ struct timekeeper { /* Shift conversion between clock shifted nano seconds and * ntp shifted nano seconds. */ int ntp_error_shift; + /* NTP adjusted clock multiplier */ + u32 mult; }; struct timekeeper timekeeper; @@ -66,8 +68,8 @@ static void timekeeper_setup_internals(struct clocksource *clock) /* Do the ns -> cycle conversion first, using original mult */ tmp = NTP_INTERVAL_LENGTH; tmp <<= clock->shift; - tmp += clock->mult_orig/2; - do_div(tmp, clock->mult_orig); + tmp += clock->mult/2; + do_div(tmp, clock->mult); if (tmp == 0) tmp = 1; @@ -77,13 +79,20 @@ static void timekeeper_setup_internals(struct clocksource *clock) /* Go back from cycles -> shifted ns */ timekeeper.xtime_interval = (u64) interval * clock->mult; timekeeper.raw_interval = - ((u64) interval * clock->mult_orig) >> clock->shift; + ((u64) interval * clock->mult) >> clock->shift; timekeeper.xtime_nsec = 0; timekeeper.shift = clock->shift; timekeeper.ntp_error = 0; timekeeper.ntp_error_shift = NTP_SCALE_SHIFT - clock->shift; + + /* + * The timekeeper keeps its own mult values for the currently + * active clocksource. These value will be adjusted via NTP + * to counteract clock drifting. + */ + timekeeper.mult = clock->mult; } /* @@ -154,14 +163,15 @@ static void timekeeping_forward_now(void) cycle_delta = (cycle_now - clock->cycle_last) & clock->mask; clock->cycle_last = cycle_now; - nsec = clocksource_cyc2ns(cycle_delta, clock->mult, clock->shift); + nsec = clocksource_cyc2ns(cycle_delta, timekeeper.mult, + timekeeper.shift); /* If arch requires, add in gettimeoffset() */ nsec += arch_gettimeoffset(); timespec_add_ns(&xtime, nsec); - nsec = clocksource_cyc2ns(cycle_delta, clock->mult_orig, clock->shift); + nsec = clocksource_cyc2ns(cycle_delta, clock->mult, clock->shift); timespec_add_ns(&raw_time, nsec); } @@ -193,8 +203,8 @@ void getnstimeofday(struct timespec *ts) cycle_delta = (cycle_now - clock->cycle_last) & clock->mask; /* convert to nanoseconds: */ - nsecs = clocksource_cyc2ns(cycle_delta, clock->mult, - clock->shift); + nsecs = clocksource_cyc2ns(cycle_delta, timekeeper.mult, + timekeeper.shift); /* If arch requires, add in gettimeoffset() */ nsecs += arch_gettimeoffset(); @@ -228,8 +238,8 @@ ktime_t ktime_get(void) cycle_delta = (cycle_now - clock->cycle_last) & clock->mask; /* convert to nanoseconds: */ - nsecs += clocksource_cyc2ns(cycle_delta, clock->mult, - clock->shift); + nsecs += clocksource_cyc2ns(cycle_delta, timekeeper.mult, + timekeeper.shift); } while (read_seqretry(&xtime_lock, seq)); /* @@ -271,8 +281,8 @@ void ktime_get_ts(struct timespec *ts) cycle_delta = (cycle_now - clock->cycle_last) & clock->mask; /* convert to nanoseconds: */ - nsecs = clocksource_cyc2ns(cycle_delta, clock->mult, - clock->shift); + nsecs = clocksource_cyc2ns(cycle_delta, timekeeper.mult, + timekeeper.shift); } while (read_seqretry(&xtime_lock, seq)); @@ -356,22 +366,10 @@ static void change_clocksource(void) if (new->enable && !new->enable(new)) return; - /* - * The frequency may have changed while the clocksource - * was disabled. If so the code in ->enable() must update - * the mult value to reflect the new frequency. Make sure - * mult_orig follows this change. - */ - new->mult_orig = new->mult; old = timekeeper.clock; timekeeper_setup_internals(new); - /* - * Save mult_orig in mult so that the value can be restored - * regardless if ->enable() updates the value of mult or not. - */ - old->mult = old->mult_orig; if (old->disable) old->disable(old); @@ -461,7 +459,7 @@ void getrawmonotonic(struct timespec *ts) cycle_delta = (cycle_now - clock->cycle_last) & clock->mask; /* convert to nanoseconds: */ - nsecs = clocksource_cyc2ns(cycle_delta, clock->mult_orig, + nsecs = clocksource_cyc2ns(cycle_delta, clock->mult, clock->shift); *ts = raw_time; @@ -521,9 +519,6 @@ void __init timekeeping_init(void) clock = clocksource_default_clock(); if (clock->enable) clock->enable(clock); - /* set mult_orig on enable */ - clock->mult_orig = clock->mult; - timekeeper_setup_internals(clock); xtime.tv_sec = sec; @@ -697,7 +692,7 @@ static void timekeeping_adjust(s64 offset) } else return; - timekeeper.clock->mult += adj; + timekeeper.mult += adj; timekeeper.xtime_interval += interval; timekeeper.xtime_nsec -= offset; timekeeper.ntp_error -= (interval - offset) << @@ -789,7 +784,7 @@ void update_wall_time(void) timekeeper.ntp_error += timekeeper.xtime_nsec << timekeeper.ntp_error_shift; - nsecs = clocksource_cyc2ns(offset, clock->mult, clock->shift); + nsecs = clocksource_cyc2ns(offset, timekeeper.mult, timekeeper.shift); update_xtime_cache(nsecs); /* check to see if there is a new clocksource to use */ -- cgit v1.2.3-70-g09d2 From 75c5158f70c065b9704b924503d96e8297838f79 Mon Sep 17 00:00:00 2001 From: Martin Schwidefsky Date: Fri, 14 Aug 2009 15:47:30 +0200 Subject: timekeeping: Update clocksource with stop_machine update_wall_time calls change_clocksource HZ times per second to check if a new clock source is available. In close to 100% of all calls there is no new clock. Replace the tick based check by an update done with stop_machine. Signed-off-by: Martin Schwidefsky Cc: Ingo Molnar Acked-by: John Stultz Cc: Daniel Walker LKML-Reference: <20090814134810.711836357@de.ibm.com> Signed-off-by: Thomas Gleixner --- include/linux/clocksource.h | 2 + kernel/time/clocksource.c | 112 +++++++++++++++++--------------------------- kernel/time/timekeeping.c | 41 ++++++++++------ 3 files changed, 72 insertions(+), 83 deletions(-) (limited to 'include') diff --git a/include/linux/clocksource.h b/include/linux/clocksource.h index e34015effeb..9ea40ff26f0 100644 --- a/include/linux/clocksource.h +++ b/include/linux/clocksource.h @@ -291,4 +291,6 @@ static inline void update_vsyscall_tz(void) } #endif +extern void timekeeping_notify(struct clocksource *clock); + #endif /* _LINUX_CLOCKSOURCE_H */ diff --git a/kernel/time/clocksource.c b/kernel/time/clocksource.c index f18c9a6bdcf..a1657b5fdeb 100644 --- a/kernel/time/clocksource.c +++ b/kernel/time/clocksource.c @@ -109,35 +109,17 @@ EXPORT_SYMBOL(timecounter_cyc2time); /*[Clocksource internal variables]--------- * curr_clocksource: * currently selected clocksource. - * next_clocksource: - * pending next selected clocksource. * clocksource_list: * linked list with the registered clocksources - * clocksource_lock: - * protects manipulations to curr_clocksource and next_clocksource - * and the clocksource_list + * clocksource_mutex: + * protects manipulations to curr_clocksource and the clocksource_list * override_name: * Name of the user-specified clocksource. */ static struct clocksource *curr_clocksource; -static struct clocksource *next_clocksource; static LIST_HEAD(clocksource_list); -static DEFINE_SPINLOCK(clocksource_lock); +static DEFINE_MUTEX(clocksource_mutex); static char override_name[32]; -static int finished_booting; - -/* clocksource_done_booting - Called near the end of core bootup - * - * Hack to avoid lots of clocksource churn at boot time. - * We use fs_initcall because we want this to start before - * device_initcall but after subsys_initcall. - */ -static int __init clocksource_done_booting(void) -{ - finished_booting = 1; - return 0; -} -fs_initcall(clocksource_done_booting); #ifdef CONFIG_CLOCKSOURCE_WATCHDOG static LIST_HEAD(watchdog_list); @@ -356,18 +338,16 @@ static inline void clocksource_resume_watchdog(void) { } void clocksource_resume(void) { struct clocksource *cs; - unsigned long flags; - spin_lock_irqsave(&clocksource_lock, flags); + mutex_lock(&clocksource_mutex); - list_for_each_entry(cs, &clocksource_list, list) { + list_for_each_entry(cs, &clocksource_list, list) if (cs->resume) cs->resume(); - } clocksource_resume_watchdog(); - spin_unlock_irqrestore(&clocksource_lock, flags); + mutex_unlock(&clocksource_mutex); } /** @@ -383,28 +363,13 @@ void clocksource_touch_watchdog(void) } #ifdef CONFIG_GENERIC_TIME -/** - * clocksource_get_next - Returns the selected clocksource - * - */ -struct clocksource *clocksource_get_next(void) -{ - unsigned long flags; - spin_lock_irqsave(&clocksource_lock, flags); - if (next_clocksource && finished_booting) { - curr_clocksource = next_clocksource; - next_clocksource = NULL; - } - spin_unlock_irqrestore(&clocksource_lock, flags); - - return curr_clocksource; -} +static int finished_booting; /** * clocksource_select - Select the best clocksource available * - * Private function. Must hold clocksource_lock when called. + * Private function. Must hold clocksource_mutex when called. * * Select the clocksource with the best rating, or the clocksource, * which is selected by userspace override. @@ -413,7 +378,7 @@ static void clocksource_select(void) { struct clocksource *best, *cs; - if (list_empty(&clocksource_list)) + if (!finished_booting || list_empty(&clocksource_list)) return; /* First clocksource on the list has the best rating. */ best = list_first_entry(&clocksource_list, struct clocksource, list); @@ -438,13 +403,31 @@ static void clocksource_select(void) best = cs; break; } - if (curr_clocksource != best) - next_clocksource = best; + if (curr_clocksource != best) { + printk(KERN_INFO "Switching to clocksource %s\n", best->name); + curr_clocksource = best; + timekeeping_notify(curr_clocksource); + } } +/* + * clocksource_done_booting - Called near the end of core bootup + * + * Hack to avoid lots of clocksource churn at boot time. + * We use fs_initcall because we want this to start before + * device_initcall but after subsys_initcall. + */ +static int __init clocksource_done_booting(void) +{ + finished_booting = 1; + clocksource_select(); + return 0; +} +fs_initcall(clocksource_done_booting); + #else /* CONFIG_GENERIC_TIME */ -static void clocksource_select(void) { } +static inline void clocksource_select(void) { } #endif @@ -471,13 +454,11 @@ static void clocksource_enqueue(struct clocksource *cs) */ int clocksource_register(struct clocksource *cs) { - unsigned long flags; - - spin_lock_irqsave(&clocksource_lock, flags); + mutex_lock(&clocksource_mutex); clocksource_enqueue(cs); clocksource_select(); - spin_unlock_irqrestore(&clocksource_lock, flags); clocksource_enqueue_watchdog(cs); + mutex_unlock(&clocksource_mutex); return 0; } EXPORT_SYMBOL(clocksource_register); @@ -487,14 +468,12 @@ EXPORT_SYMBOL(clocksource_register); */ void clocksource_change_rating(struct clocksource *cs, int rating) { - unsigned long flags; - - spin_lock_irqsave(&clocksource_lock, flags); + mutex_lock(&clocksource_mutex); list_del(&cs->list); cs->rating = rating; clocksource_enqueue(cs); clocksource_select(); - spin_unlock_irqrestore(&clocksource_lock, flags); + mutex_unlock(&clocksource_mutex); } EXPORT_SYMBOL(clocksource_change_rating); @@ -503,13 +482,11 @@ EXPORT_SYMBOL(clocksource_change_rating); */ void clocksource_unregister(struct clocksource *cs) { - unsigned long flags; - + mutex_lock(&clocksource_mutex); clocksource_dequeue_watchdog(cs); - spin_lock_irqsave(&clocksource_lock, flags); list_del(&cs->list); clocksource_select(); - spin_unlock_irqrestore(&clocksource_lock, flags); + mutex_unlock(&clocksource_mutex); } EXPORT_SYMBOL(clocksource_unregister); @@ -527,9 +504,9 @@ sysfs_show_current_clocksources(struct sys_device *dev, { ssize_t count = 0; - spin_lock_irq(&clocksource_lock); + mutex_lock(&clocksource_mutex); count = snprintf(buf, PAGE_SIZE, "%s\n", curr_clocksource->name); - spin_unlock_irq(&clocksource_lock); + mutex_unlock(&clocksource_mutex); return count; } @@ -557,14 +534,14 @@ static ssize_t sysfs_override_clocksource(struct sys_device *dev, if (buf[count-1] == '\n') count--; - spin_lock_irq(&clocksource_lock); + mutex_lock(&clocksource_mutex); if (count > 0) memcpy(override_name, buf, count); override_name[count] = 0; clocksource_select(); - spin_unlock_irq(&clocksource_lock); + mutex_unlock(&clocksource_mutex); return ret; } @@ -584,7 +561,7 @@ sysfs_show_available_clocksources(struct sys_device *dev, struct clocksource *src; ssize_t count = 0; - spin_lock_irq(&clocksource_lock); + mutex_lock(&clocksource_mutex); list_for_each_entry(src, &clocksource_list, list) { /* * Don't show non-HRES clocksource if the tick code is @@ -596,7 +573,7 @@ sysfs_show_available_clocksources(struct sys_device *dev, max((ssize_t)PAGE_SIZE - count, (ssize_t)0), "%s ", src->name); } - spin_unlock_irq(&clocksource_lock); + mutex_unlock(&clocksource_mutex); count += snprintf(buf + count, max((ssize_t)PAGE_SIZE - count, (ssize_t)0), "\n"); @@ -651,11 +628,10 @@ device_initcall(init_clocksource_sysfs); */ static int __init boot_override_clocksource(char* str) { - unsigned long flags; - spin_lock_irqsave(&clocksource_lock, flags); + mutex_lock(&clocksource_mutex); if (str) strlcpy(override_name, str, sizeof(override_name)); - spin_unlock_irqrestore(&clocksource_lock, flags); + mutex_unlock(&clocksource_mutex); return 1; } diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c index 27ae01b596b..41579e7fcf9 100644 --- a/kernel/time/timekeeping.c +++ b/kernel/time/timekeeping.c @@ -18,6 +18,7 @@ #include #include #include +#include /* Structure holding internal timekeeping values. */ struct timekeeper { @@ -179,6 +180,7 @@ void timekeeping_leap_insert(int leapsecond) } #ifdef CONFIG_GENERIC_TIME + /** * timekeeping_forward_now - update clock to the current time * @@ -351,31 +353,40 @@ EXPORT_SYMBOL(do_settimeofday); * * Accumulates current time interval and initializes new clocksource */ -static void change_clocksource(void) +static int change_clocksource(void *data) { struct clocksource *new, *old; - new = clocksource_get_next(); - - if (!new || timekeeper.clock == new) - return; + new = (struct clocksource *) data; timekeeping_forward_now(); + if (!new->enable || new->enable(new) == 0) { + old = timekeeper.clock; + timekeeper_setup_internals(new); + if (old->disable) + old->disable(old); + } + return 0; +} - if (new->enable && !new->enable(new)) +/** + * timekeeping_notify - Install a new clock source + * @clock: pointer to the clock source + * + * This function is called from clocksource.c after a new, better clock + * source has been registered. The caller holds the clocksource_mutex. + */ +void timekeeping_notify(struct clocksource *clock) +{ + if (timekeeper.clock == clock) return; - - old = timekeeper.clock; - timekeeper_setup_internals(new); - - if (old->disable) - old->disable(old); - + stop_machine(change_clocksource, clock, NULL); tick_clock_notify(); } + #else /* GENERIC_TIME */ + static inline void timekeeping_forward_now(void) { } -static inline void change_clocksource(void) { } /** * ktime_get - get the monotonic time in ktime_t format @@ -416,6 +427,7 @@ void ktime_get_ts(struct timespec *ts) ts->tv_nsec + tomono.tv_nsec); } EXPORT_SYMBOL_GPL(ktime_get_ts); + #endif /* !GENERIC_TIME */ /** @@ -773,7 +785,6 @@ void update_wall_time(void) update_xtime_cache(nsecs); /* check to see if there is a new clocksource to use */ - change_clocksource(); update_vsyscall(&xtime, timekeeper.clock); } -- cgit v1.2.3-70-g09d2 From d4f587c67fc39e0030ddd718675e252e208da4d7 Mon Sep 17 00:00:00 2001 From: Martin Schwidefsky Date: Fri, 14 Aug 2009 15:47:31 +0200 Subject: timekeeping: Increase granularity of read_persistent_clock() The persistent clock of some architectures (e.g. s390) have a better granularity than seconds. To reduce the delta between the host clock and the guest clock in a virtualized system change the read_persistent_clock function to return a struct timespec. Signed-off-by: Martin Schwidefsky Cc: Ingo Molnar Acked-by: John Stultz Cc: Daniel Walker LKML-Reference: <20090814134811.013873340@de.ibm.com> Signed-off-by: Thomas Gleixner --- arch/m68knommu/kernel/time.c | 5 ++-- arch/mips/dec/time.c | 5 ++-- arch/mips/lasat/ds1603.c | 5 ++-- arch/mips/lasat/sysctl.c | 8 ++++-- arch/mips/lemote/lm2e/setup.c | 5 ++-- arch/mips/mti-malta/malta-time.c | 5 ++-- arch/mips/pmc-sierra/yosemite/setup.c | 5 ++-- arch/mips/sibyte/swarm/setup.c | 15 +++++++--- arch/mips/sni/time.c | 5 ++-- arch/powerpc/kernel/time.c | 7 +++-- arch/s390/kernel/time.c | 22 +++------------ arch/sh/kernel/time.c | 6 ++-- arch/x86/kernel/rtc.c | 5 ++-- arch/xtensa/kernel/time.c | 5 ++-- include/linux/time.h | 2 +- kernel/time/timekeeping.c | 52 +++++++++++++++++++---------------- 16 files changed, 83 insertions(+), 74 deletions(-) (limited to 'include') diff --git a/arch/m68knommu/kernel/time.c b/arch/m68knommu/kernel/time.c index d182b2f7221..68432248515 100644 --- a/arch/m68knommu/kernel/time.c +++ b/arch/m68knommu/kernel/time.c @@ -72,9 +72,10 @@ static unsigned long read_rtc_mmss(void) return mktime(year, mon, day, hour, min, sec);; } -unsigned long read_persistent_clock(void) +void read_persistent_clock(struct timespec *ts) { - return read_rtc_mmss(); + ts->tv_sec = read_rtc_mmss(); + ts->tv_nsec = 0; } int update_persistent_clock(struct timespec now) diff --git a/arch/mips/dec/time.c b/arch/mips/dec/time.c index 463136e6685..02f505f23c3 100644 --- a/arch/mips/dec/time.c +++ b/arch/mips/dec/time.c @@ -18,7 +18,7 @@ #include #include -unsigned long read_persistent_clock(void) +void read_persistent_clock(struct timespec *ts) { unsigned int year, mon, day, hour, min, sec, real_year; unsigned long flags; @@ -53,7 +53,8 @@ unsigned long read_persistent_clock(void) year += real_year - 72 + 2000; - return mktime(year, mon, day, hour, min, sec); + ts->tv_sec = mktime(year, mon, day, hour, min, sec); + ts->tv_nsec = 0; } /* diff --git a/arch/mips/lasat/ds1603.c b/arch/mips/lasat/ds1603.c index 52cb1436a12..c6fd96ff118 100644 --- a/arch/mips/lasat/ds1603.c +++ b/arch/mips/lasat/ds1603.c @@ -135,7 +135,7 @@ static void rtc_end_op(void) lasat_ndelay(1000); } -unsigned long read_persistent_clock(void) +void read_persistent_clock(struct timespec *ts) { unsigned long word; unsigned long flags; @@ -147,7 +147,8 @@ unsigned long read_persistent_clock(void) rtc_end_op(); spin_unlock_irqrestore(&rtc_lock, flags); - return word; + ts->tv_sec = word; + ts->tv_nsec = 0; } int rtc_mips_set_mmss(unsigned long time) diff --git a/arch/mips/lasat/sysctl.c b/arch/mips/lasat/sysctl.c index 8f88886feb1..3f04d4c406b 100644 --- a/arch/mips/lasat/sysctl.c +++ b/arch/mips/lasat/sysctl.c @@ -92,10 +92,12 @@ static int rtctmp; int proc_dolasatrtc(ctl_table *table, int write, struct file *filp, void *buffer, size_t *lenp, loff_t *ppos) { + struct timespec ts; int r; if (!write) { - rtctmp = read_persistent_clock(); + read_persistent_clock(&ts); + rtctmp = ts.tv_sec; /* check for time < 0 and set to 0 */ if (rtctmp < 0) rtctmp = 0; @@ -134,9 +136,11 @@ int sysctl_lasat_rtc(ctl_table *table, void *oldval, size_t *oldlenp, void *newval, size_t newlen) { + struct timespec ts; int r; - rtctmp = read_persistent_clock(); + read_persistent_clock(&ts); + rtctmp = ts.tv_sec; if (rtctmp < 0) rtctmp = 0; r = sysctl_intvec(table, oldval, oldlenp, newval, newlen); diff --git a/arch/mips/lemote/lm2e/setup.c b/arch/mips/lemote/lm2e/setup.c index ebd6ceaef2f..24b355df612 100644 --- a/arch/mips/lemote/lm2e/setup.c +++ b/arch/mips/lemote/lm2e/setup.c @@ -54,9 +54,10 @@ void __init plat_time_init(void) mips_hpt_frequency = cpu_clock_freq / 2; } -unsigned long read_persistent_clock(void) +void read_persistent_clock(struct timespec *ts) { - return mc146818_get_cmos_time(); + ts->tv_sec = mc146818_get_cmos_time(); + ts->tv_nsec = 0; } void (*__wbflush)(void); diff --git a/arch/mips/mti-malta/malta-time.c b/arch/mips/mti-malta/malta-time.c index 0b97d47691f..3c6f190aa61 100644 --- a/arch/mips/mti-malta/malta-time.c +++ b/arch/mips/mti-malta/malta-time.c @@ -100,9 +100,10 @@ static unsigned int __init estimate_cpu_frequency(void) return count; } -unsigned long read_persistent_clock(void) +void read_persistent_clock(struct timespec *ts) { - return mc146818_get_cmos_time(); + ts->tv_sec = mc146818_get_cmos_time(); + ts->tv_nsec = 0; } static void __init plat_perf_setup(void) diff --git a/arch/mips/pmc-sierra/yosemite/setup.c b/arch/mips/pmc-sierra/yosemite/setup.c index 2d3c0dca275..3498ac9c35a 100644 --- a/arch/mips/pmc-sierra/yosemite/setup.c +++ b/arch/mips/pmc-sierra/yosemite/setup.c @@ -70,7 +70,7 @@ void __init bus_error_init(void) } -unsigned long read_persistent_clock(void) +void read_persistent_clock(struct timespec *ts) { unsigned int year, month, day, hour, min, sec; unsigned long flags; @@ -92,7 +92,8 @@ unsigned long read_persistent_clock(void) m48t37_base->control = 0x00; spin_unlock_irqrestore(&rtc_lock, flags); - return mktime(year, month, day, hour, min, sec); + ts->tv_sec = mktime(year, month, day, hour, min, sec); + ts->tv_nsec = 0; } int rtc_mips_set_time(unsigned long tim) diff --git a/arch/mips/sibyte/swarm/setup.c b/arch/mips/sibyte/swarm/setup.c index 672e45d495a..623ffc933c4 100644 --- a/arch/mips/sibyte/swarm/setup.c +++ b/arch/mips/sibyte/swarm/setup.c @@ -87,19 +87,26 @@ enum swarm_rtc_type { enum swarm_rtc_type swarm_rtc_type; -unsigned long read_persistent_clock(void) +void read_persistent_clock(struct timespec *ts) { + unsigned long sec; + switch (swarm_rtc_type) { case RTC_XICOR: - return xicor_get_time(); + sec = xicor_get_time(); + break; case RTC_M4LT81: - return m41t81_get_time(); + sec = m41t81_get_time(); + break; case RTC_NONE: default: - return mktime(2000, 1, 1, 0, 0, 0); + sec = mktime(2000, 1, 1, 0, 0, 0); + break; } + ts->tv_sec = sec; + tv->tv_nsec = 0; } int rtc_mips_set_time(unsigned long sec) diff --git a/arch/mips/sni/time.c b/arch/mips/sni/time.c index 0d9ec1a5c24..62df6a598e0 100644 --- a/arch/mips/sni/time.c +++ b/arch/mips/sni/time.c @@ -182,7 +182,8 @@ void __init plat_time_init(void) setup_pit_timer(); } -unsigned long read_persistent_clock(void) +void read_persistent_clock(struct timespec *ts) { - return -1; + ts->tv_sec = -1; + ts->tv_nsec = 0; } diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c index eae4511ceea..ad63f30fe3d 100644 --- a/arch/powerpc/kernel/time.c +++ b/arch/powerpc/kernel/time.c @@ -769,7 +769,7 @@ int update_persistent_clock(struct timespec now) return ppc_md.set_rtc_time(&tm); } -unsigned long read_persistent_clock(void) +void read_persistent_clock(struct timespec *ts) { struct rtc_time tm; static int first = 1; @@ -787,8 +787,9 @@ unsigned long read_persistent_clock(void) if (!ppc_md.get_rtc_time) return 0; ppc_md.get_rtc_time(&tm); - return mktime(tm.tm_year+1900, tm.tm_mon+1, tm.tm_mday, - tm.tm_hour, tm.tm_min, tm.tm_sec); + ts->tv_sec = mktime(tm.tm_year+1900, tm.tm_mon+1, tm.tm_mday, + tm.tm_hour, tm.tm_min, tm.tm_sec); + ts->tv_nsec = 0; } /* clocksource code */ diff --git a/arch/s390/kernel/time.c b/arch/s390/kernel/time.c index e76c2e7a8b9..a94ec48587b 100644 --- a/arch/s390/kernel/time.c +++ b/arch/s390/kernel/time.c @@ -182,12 +182,9 @@ static void timing_alert_interrupt(__u16 code) static void etr_reset(void); static void stp_reset(void); -unsigned long read_persistent_clock(void) +void read_persistent_clock(struct timespec *ts) { - struct timespec ts; - - tod_to_timeval(get_clock() - TOD_UNIX_EPOCH, &ts); - return ts.tv_sec; + tod_to_timeval(get_clock() - TOD_UNIX_EPOCH, ts); } static cycle_t read_tod_clock(struct clocksource *cs) @@ -248,7 +245,6 @@ void __init time_init(void) { struct timespec ts; unsigned long flags; - cycle_t now; /* Reset time synchronization interfaces. */ etr_reset(); @@ -266,20 +262,10 @@ void __init time_init(void) panic("Could not register TOD clock source"); /* - * The TOD clock is an accurate clock. The xtime should be - * initialized in a way that the difference between TOD and - * xtime is reasonably small. Too bad that timekeeping_init - * sets xtime.tv_nsec to zero. In addition the clock source - * change from the jiffies clock source to the TOD clock - * source add another error of up to 1/HZ second. The same - * function sets wall_to_monotonic to a value that is too - * small for /proc/uptime to be accurate. - * Reset xtime and wall_to_monotonic to sane values. + * Reset wall_to_monotonic to the initial timestamp created + * in head.S to get a precise value in /proc/uptime. */ write_seqlock_irqsave(&xtime_lock, flags); - now = get_clock(); - tod_to_timeval(now - TOD_UNIX_EPOCH, &xtime); - clocksource_tod.cycle_last = now; tod_to_timeval(sched_clock_base_cc - TOD_UNIX_EPOCH, &ts); set_normalized_timespec(&wall_to_monotonic, -ts.tv_sec, -ts.tv_nsec); write_sequnlock_irqrestore(&xtime_lock, flags); diff --git a/arch/sh/kernel/time.c b/arch/sh/kernel/time.c index 9b352a1e3fb..3f4706aa975 100644 --- a/arch/sh/kernel/time.c +++ b/arch/sh/kernel/time.c @@ -39,11 +39,9 @@ void (*rtc_sh_get_time)(struct timespec *) = null_rtc_get_time; int (*rtc_sh_set_time)(const time_t) = null_rtc_set_time; #ifdef CONFIG_GENERIC_CMOS_UPDATE -unsigned long read_persistent_clock(void) +void read_persistent_clock(struct timespec *ts) { - struct timespec tv; - rtc_sh_get_time(&tv); - return tv.tv_sec; + rtc_sh_get_time(&ts); } int update_persistent_clock(struct timespec now) diff --git a/arch/x86/kernel/rtc.c b/arch/x86/kernel/rtc.c index 5d465b207e7..bf67dcb4a44 100644 --- a/arch/x86/kernel/rtc.c +++ b/arch/x86/kernel/rtc.c @@ -178,7 +178,7 @@ static int set_rtc_mmss(unsigned long nowtime) } /* not static: needed by APM */ -unsigned long read_persistent_clock(void) +void read_persistent_clock(struct timespec *ts) { unsigned long retval, flags; @@ -186,7 +186,8 @@ unsigned long read_persistent_clock(void) retval = get_wallclock(); spin_unlock_irqrestore(&rtc_lock, flags); - return retval; + ts->tv_sec = retval; + ts->tv_nsec = 0; } int update_persistent_clock(struct timespec now) diff --git a/arch/xtensa/kernel/time.c b/arch/xtensa/kernel/time.c index 8848120d291..19085ff0484 100644 --- a/arch/xtensa/kernel/time.c +++ b/arch/xtensa/kernel/time.c @@ -59,9 +59,8 @@ static struct irqaction timer_irqaction = { void __init time_init(void) { - xtime.tv_nsec = 0; - xtime.tv_sec = read_persistent_clock(); - + /* FIXME: xtime&wall_to_monotonic are set in timekeeping_init. */ + read_persistent_clock(&xtime); set_normalized_timespec(&wall_to_monotonic, -xtime.tv_sec, -xtime.tv_nsec); diff --git a/include/linux/time.h b/include/linux/time.h index e7c84455888..53a3216f0d1 100644 --- a/include/linux/time.h +++ b/include/linux/time.h @@ -101,7 +101,7 @@ extern struct timespec xtime; extern struct timespec wall_to_monotonic; extern seqlock_t xtime_lock; -extern unsigned long read_persistent_clock(void); +extern void read_persistent_clock(struct timespec *ts); extern int update_persistent_clock(struct timespec now); extern int no_sync_cmos_clock __read_mostly; void timekeeping_init(void); diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c index 41579e7fcf9..f1a21ce491e 100644 --- a/kernel/time/timekeeping.c +++ b/kernel/time/timekeeping.c @@ -154,7 +154,7 @@ __cacheline_aligned_in_smp DEFINE_SEQLOCK(xtime_lock); */ struct timespec xtime __attribute__ ((aligned (16))); struct timespec wall_to_monotonic __attribute__ ((aligned (16))); -static unsigned long total_sleep_time; /* seconds */ +static struct timespec total_sleep_time; /* * The raw monotonic time for the CLOCK_MONOTONIC_RAW posix clock. @@ -487,17 +487,18 @@ int timekeeping_valid_for_hres(void) } /** - * read_persistent_clock - Return time in seconds from the persistent clock. + * read_persistent_clock - Return time from the persistent clock. * * Weak dummy function for arches that do not yet support it. - * Returns seconds from epoch using the battery backed persistent clock. - * Returns zero if unsupported. + * Reads the time from the battery backed persistent clock. + * Returns a timespec with tv_sec=0 and tv_nsec=0 if unsupported. * * XXX - Do be sure to remove it once all arches implement it. */ -unsigned long __attribute__((weak)) read_persistent_clock(void) +void __attribute__((weak)) read_persistent_clock(struct timespec *ts) { - return 0; + ts->tv_sec = 0; + ts->tv_nsec = 0; } /* @@ -507,7 +508,9 @@ void __init timekeeping_init(void) { struct clocksource *clock; unsigned long flags; - unsigned long sec = read_persistent_clock(); + struct timespec now; + + read_persistent_clock(&now); write_seqlock_irqsave(&xtime_lock, flags); @@ -518,19 +521,20 @@ void __init timekeeping_init(void) clock->enable(clock); timekeeper_setup_internals(clock); - xtime.tv_sec = sec; - xtime.tv_nsec = 0; + xtime.tv_sec = now.tv_sec; + xtime.tv_nsec = now.tv_nsec; raw_time.tv_sec = 0; raw_time.tv_nsec = 0; set_normalized_timespec(&wall_to_monotonic, -xtime.tv_sec, -xtime.tv_nsec); update_xtime_cache(0); - total_sleep_time = 0; + total_sleep_time.tv_sec = 0; + total_sleep_time.tv_nsec = 0; write_sequnlock_irqrestore(&xtime_lock, flags); } /* time in seconds when suspend began */ -static unsigned long timekeeping_suspend_time; +static struct timespec timekeeping_suspend_time; /** * timekeeping_resume - Resumes the generic timekeeping subsystem. @@ -543,18 +547,19 @@ static unsigned long timekeeping_suspend_time; static int timekeeping_resume(struct sys_device *dev) { unsigned long flags; - unsigned long now = read_persistent_clock(); + struct timespec ts; + + read_persistent_clock(&ts); clocksource_resume(); write_seqlock_irqsave(&xtime_lock, flags); - if (now && (now > timekeeping_suspend_time)) { - unsigned long sleep_length = now - timekeeping_suspend_time; - - xtime.tv_sec += sleep_length; - wall_to_monotonic.tv_sec -= sleep_length; - total_sleep_time += sleep_length; + if (timespec_compare(&ts, &timekeeping_suspend_time) > 0) { + ts = timespec_sub(ts, timekeeping_suspend_time); + xtime = timespec_add_safe(xtime, ts); + wall_to_monotonic = timespec_sub(wall_to_monotonic, ts); + total_sleep_time = timespec_add_safe(total_sleep_time, ts); } update_xtime_cache(0); /* re-base the last cycle value */ @@ -577,7 +582,7 @@ static int timekeeping_suspend(struct sys_device *dev, pm_message_t state) { unsigned long flags; - timekeeping_suspend_time = read_persistent_clock(); + read_persistent_clock(&timekeeping_suspend_time); write_seqlock_irqsave(&xtime_lock, flags); timekeeping_forward_now(); @@ -801,9 +806,10 @@ void update_wall_time(void) */ void getboottime(struct timespec *ts) { - set_normalized_timespec(ts, - - (wall_to_monotonic.tv_sec + total_sleep_time), - - wall_to_monotonic.tv_nsec); + struct timespec boottime; + + boottime = timespec_add_safe(wall_to_monotonic, total_sleep_time); + set_normalized_timespec(ts, -boottime.tv_sec, -boottime.tv_nsec); } /** @@ -812,7 +818,7 @@ void getboottime(struct timespec *ts) */ void monotonic_to_bootbased(struct timespec *ts) { - ts->tv_sec += total_sleep_time; + *ts = timespec_add_safe(*ts, total_sleep_time); } unsigned long get_seconds(void) -- cgit v1.2.3-70-g09d2 From 23970e389e9cee43c4b41023935e1417271708b2 Mon Sep 17 00:00:00 2001 From: Martin Schwidefsky Date: Fri, 14 Aug 2009 15:47:32 +0200 Subject: timekeeping: Introduce read_boot_clock Add the new function read_boot_clock to get the exact time the system has been started. For architectures without support for exact boot time a new weak function is added that returns 0. Use the exact boot time to initialize wall_to_monotonic, or xtime if the read_boot_clock returned 0. Signed-off-by: Martin Schwidefsky Cc: Ingo Molnar Acked-by: John Stultz Cc: Daniel Walker LKML-Reference: <20090814134811.296703241@de.ibm.com> Signed-off-by: Thomas Gleixner --- arch/s390/kernel/time.c | 17 +++++------------ include/linux/time.h | 1 + kernel/time/timekeeping.c | 24 ++++++++++++++++++++++-- 3 files changed, 28 insertions(+), 14 deletions(-) (limited to 'include') diff --git a/arch/s390/kernel/time.c b/arch/s390/kernel/time.c index a94ec48587b..6bff1a1d906 100644 --- a/arch/s390/kernel/time.c +++ b/arch/s390/kernel/time.c @@ -187,6 +187,11 @@ void read_persistent_clock(struct timespec *ts) tod_to_timeval(get_clock() - TOD_UNIX_EPOCH, ts); } +void read_boot_clock(struct timespec *ts) +{ + tod_to_timeval(sched_clock_base_cc - TOD_UNIX_EPOCH, ts); +} + static cycle_t read_tod_clock(struct clocksource *cs) { return get_clock(); @@ -243,9 +248,6 @@ void update_vsyscall_tz(void) */ void __init time_init(void) { - struct timespec ts; - unsigned long flags; - /* Reset time synchronization interfaces. */ etr_reset(); stp_reset(); @@ -261,15 +263,6 @@ void __init time_init(void) if (clocksource_register(&clocksource_tod) != 0) panic("Could not register TOD clock source"); - /* - * Reset wall_to_monotonic to the initial timestamp created - * in head.S to get a precise value in /proc/uptime. - */ - write_seqlock_irqsave(&xtime_lock, flags); - tod_to_timeval(sched_clock_base_cc - TOD_UNIX_EPOCH, &ts); - set_normalized_timespec(&wall_to_monotonic, -ts.tv_sec, -ts.tv_nsec); - write_sequnlock_irqrestore(&xtime_lock, flags); - /* Enable TOD clock interrupts on the boot cpu. */ init_cpu_timer(); diff --git a/include/linux/time.h b/include/linux/time.h index 53a3216f0d1..f505988398e 100644 --- a/include/linux/time.h +++ b/include/linux/time.h @@ -102,6 +102,7 @@ extern struct timespec wall_to_monotonic; extern seqlock_t xtime_lock; extern void read_persistent_clock(struct timespec *ts); +extern void read_boot_clock(struct timespec *ts); extern int update_persistent_clock(struct timespec now); extern int no_sync_cmos_clock __read_mostly; void timekeeping_init(void); diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c index f1a21ce491e..15e06defca5 100644 --- a/kernel/time/timekeeping.c +++ b/kernel/time/timekeeping.c @@ -501,6 +501,21 @@ void __attribute__((weak)) read_persistent_clock(struct timespec *ts) ts->tv_nsec = 0; } +/** + * read_boot_clock - Return time of the system start. + * + * Weak dummy function for arches that do not yet support it. + * Function to read the exact time the system has been started. + * Returns a timespec with tv_sec=0 and tv_nsec=0 if unsupported. + * + * XXX - Do be sure to remove it once all arches implement it. + */ +void __attribute__((weak)) read_boot_clock(struct timespec *ts) +{ + ts->tv_sec = 0; + ts->tv_nsec = 0; +} + /* * timekeeping_init - Initializes the clocksource and common timekeeping values */ @@ -508,9 +523,10 @@ void __init timekeeping_init(void) { struct clocksource *clock; unsigned long flags; - struct timespec now; + struct timespec now, boot; read_persistent_clock(&now); + read_boot_clock(&boot); write_seqlock_irqsave(&xtime_lock, flags); @@ -525,8 +541,12 @@ void __init timekeeping_init(void) xtime.tv_nsec = now.tv_nsec; raw_time.tv_sec = 0; raw_time.tv_nsec = 0; + if (boot.tv_sec == 0 && boot.tv_nsec == 0) { + boot.tv_sec = xtime.tv_sec; + boot.tv_nsec = xtime.tv_nsec; + } set_normalized_timespec(&wall_to_monotonic, - -xtime.tv_sec, -xtime.tv_nsec); + -boot.tv_sec, -boot.tv_nsec); update_xtime_cache(0); total_sleep_time.tv_sec = 0; total_sleep_time.tv_nsec = 0; -- cgit v1.2.3-70-g09d2 From 0ccff1a49def92d6b838a6da166c89004b3a4d0c Mon Sep 17 00:00:00 2001 From: H Hartley Sweeten Date: Mon, 17 Aug 2009 22:38:04 -0400 Subject: jbd2: bitfields should be unsigned This fixes sparse noise: error: dubious one-bit signed bitfield Signed-off-by: H Hartley Sweeten Signed-off-by: Andrew Morton Signed-off-by: "Theodore Ts'o" Cc: Jan Kara --- include/linux/jbd2.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include') diff --git a/include/linux/jbd2.h b/include/linux/jbd2.h index d97eb652d6c..52695d3dfd0 100644 --- a/include/linux/jbd2.h +++ b/include/linux/jbd2.h @@ -652,7 +652,7 @@ struct transaction_s * This transaction is being forced and some process is * waiting for it to finish. */ - int t_synchronous_commit:1; + unsigned int t_synchronous_commit:1; /* * For use by the filesystem to store fs-specific data -- cgit v1.2.3-70-g09d2 From 776f3360de6ed246e973577828f725681120fd7a Mon Sep 17 00:00:00 2001 From: Dave Airlie Date: Wed, 19 Aug 2009 15:56:37 +1000 Subject: drm: fixup includes in encoder slave header files. Signed-off-by: Dave Airlie --- include/drm/drm_encoder_slave.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'include') diff --git a/include/drm/drm_encoder_slave.h b/include/drm/drm_encoder_slave.h index 821ec40c17d..e5e5c94ca92 100644 --- a/include/drm/drm_encoder_slave.h +++ b/include/drm/drm_encoder_slave.h @@ -27,8 +27,8 @@ #ifndef __DRM_ENCODER_SLAVE_H__ #define __DRM_ENCODER_SLAVE_H__ -#include -#include +#include "drmP.h" +#include "drm_crtc.h" /** * struct drm_encoder_slave_funcs - Entry points exposed by a slave encoder driver -- cgit v1.2.3-70-g09d2 From 53bd83899f5ba6b0da8f5ef976129273854a72d4 Mon Sep 17 00:00:00 2001 From: Jesse Barnes Date: Wed, 1 Jul 2009 10:04:40 -0700 Subject: drm: clarify scaling property names Now that we're using the scaling property in the Intel driver I noticed that the names were a bit confusing. I've corrected them according to our discussion on IRC and the mailing list, though I've left out potential new additions for a new scaling property with an integer (or two) for the scaling factor. None of the drivers implement that today, but if someone wants to do it, I think it could be done with the addition of a single new type and a new property to describe the scaling factor in the X and Y directions. Signed-off-by: Jesse Barnes Acked-by: Alex Deucher Signed-off-by: Dave Airlie --- drivers/gpu/drm/drm_crtc.c | 8 ++++---- drivers/gpu/drm/i915/intel_lvds.c | 14 +++----------- include/drm/drm_mode.h | 9 +++++---- 3 files changed, 12 insertions(+), 19 deletions(-) (limited to 'include') diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c index 362a538cded..39a6bc69d22 100644 --- a/drivers/gpu/drm/drm_crtc.c +++ b/drivers/gpu/drm/drm_crtc.c @@ -68,10 +68,10 @@ DRM_ENUM_NAME_FN(drm_get_dpms_name, drm_dpms_enum_list) */ static struct drm_prop_enum_list drm_scaling_mode_enum_list[] = { - { DRM_MODE_SCALE_NON_GPU, "Non-GPU" }, - { DRM_MODE_SCALE_FULLSCREEN, "Fullscreen" }, - { DRM_MODE_SCALE_NO_SCALE, "No scale" }, - { DRM_MODE_SCALE_ASPECT, "Aspect" }, + { DRM_MODE_SCALE_NONE, "None" }, + { DRM_MODE_SCALE_FULLSCREEN, "Full" }, + { DRM_MODE_SCALE_CENTER, "Center" }, + { DRM_MODE_SCALE_ASPECT, "Full aspect" }, }; static struct drm_prop_enum_list drm_dithering_mode_enum_list[] = diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c index b59c65d19d8..5df486fbe05 100644 --- a/drivers/gpu/drm/i915/intel_lvds.c +++ b/drivers/gpu/drm/i915/intel_lvds.c @@ -38,14 +38,6 @@ #include "i915_drv.h" #include -/* - * the following four scaling options are defined. - * #define DRM_MODE_SCALE_NON_GPU 0 - * #define DRM_MODE_SCALE_FULLSCREEN 1 - * #define DRM_MODE_SCALE_NO_SCALE 2 - * #define DRM_MODE_SCALE_ASPECT 3 - */ - /* Private structure for the integrated LVDS support */ struct intel_lvds_priv { int fitting_mode; @@ -334,7 +326,7 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder, I915_WRITE(BCLRPAT_B, 0); switch (lvds_priv->fitting_mode) { - case DRM_MODE_SCALE_NO_SCALE: + case DRM_MODE_SCALE_CENTER: /* * For centered modes, we have to calculate border widths & * heights and modify the values programmed into the CRTC. @@ -670,8 +662,8 @@ static int intel_lvds_set_property(struct drm_connector *connector, connector->encoder) { struct drm_crtc *crtc = connector->encoder->crtc; struct intel_lvds_priv *lvds_priv = intel_output->dev_priv; - if (value == DRM_MODE_SCALE_NON_GPU) { - DRM_DEBUG_KMS("non_GPU property is unsupported\n"); + if (value == DRM_MODE_SCALE_NONE) { + DRM_DEBUG_KMS("no scaling not supported\n"); return 0; } if (lvds_priv->fitting_mode == value) { diff --git a/include/drm/drm_mode.h b/include/drm/drm_mode.h index 616aeb42b77..1f908416aed 100644 --- a/include/drm/drm_mode.h +++ b/include/drm/drm_mode.h @@ -68,10 +68,11 @@ #define DRM_MODE_DPMS_OFF 3 /* Scaling mode options */ -#define DRM_MODE_SCALE_NON_GPU 0 -#define DRM_MODE_SCALE_FULLSCREEN 1 -#define DRM_MODE_SCALE_NO_SCALE 2 -#define DRM_MODE_SCALE_ASPECT 3 +#define DRM_MODE_SCALE_NONE 0 /* Unmodified timing (display or + software can still scale) */ +#define DRM_MODE_SCALE_FULLSCREEN 1 /* Full screen, ignore aspect */ +#define DRM_MODE_SCALE_CENTER 2 /* Centered, no scaling */ +#define DRM_MODE_SCALE_ASPECT 3 /* Full screen, preserve aspect */ /* Dithering mode options */ #define DRM_MODE_DITHERING_OFF 0 -- cgit v1.2.3-70-g09d2 From 949ef70e2d1a5c12178875f513df34fc85d91a38 Mon Sep 17 00:00:00 2001 From: Pekka Paalanen Date: Mon, 17 Aug 2009 19:49:19 +0300 Subject: drm/kms: no need to return void value (encoder) Cc: Francisco Jerez Signed-off-by: Pekka Paalanen Signed-off-by: Dave Airlie --- include/drm/drm_encoder_slave.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include') diff --git a/include/drm/drm_encoder_slave.h b/include/drm/drm_encoder_slave.h index e5e5c94ca92..2f65633d28a 100644 --- a/include/drm/drm_encoder_slave.h +++ b/include/drm/drm_encoder_slave.h @@ -154,7 +154,7 @@ static inline int drm_i2c_encoder_register(struct module *owner, */ static inline void drm_i2c_encoder_unregister(struct drm_i2c_encoder_driver *driver) { - return i2c_del_driver(&driver->i2c_driver); + i2c_del_driver(&driver->i2c_driver); } void drm_i2c_encoder_destroy(struct drm_encoder *encoder); -- cgit v1.2.3-70-g09d2 From a0724fcf829e5afb66159ef68cb16a805ea11b42 Mon Sep 17 00:00:00 2001 From: Pekka Paalanen Date: Mon, 17 Aug 2009 01:18:38 +0300 Subject: drm/ttm: optimize bo_kmap_type values A micro-optimization on the function ttm_kmap_obj_virtual(). By defining the values of enum ttm_bo_kmap_obj::bo_kmap_type to have a bit indicating iomem, size of the function ttm_kmap_obj_virtual() will be reduced by 16 bytes on x86_64 (gcc 4.1.2). ttm_kmap_obj_virtual() may be heavily used, when buffer objects are accessed via wrappers, that work for both kinds of memory addresses: iomem cookies and kernel virtual. Signed-off-by: Pekka Paalanen Signed-off-by: Dave Airlie --- include/drm/ttm/ttm_bo_api.h | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) (limited to 'include') diff --git a/include/drm/ttm/ttm_bo_api.h b/include/drm/ttm/ttm_bo_api.h index cd22ab4b495..99dc521aa1a 100644 --- a/include/drm/ttm/ttm_bo_api.h +++ b/include/drm/ttm/ttm_bo_api.h @@ -245,14 +245,15 @@ struct ttm_buffer_object { * premapped region. */ +#define TTM_BO_MAP_IOMEM_MASK 0x80 struct ttm_bo_kmap_obj { void *virtual; struct page *page; enum { - ttm_bo_map_iomap, - ttm_bo_map_vmap, - ttm_bo_map_kmap, - ttm_bo_map_premapped, + ttm_bo_map_iomap = 1 | TTM_BO_MAP_IOMEM_MASK, + ttm_bo_map_vmap = 2, + ttm_bo_map_kmap = 3, + ttm_bo_map_premapped = 4 | TTM_BO_MAP_IOMEM_MASK, } bo_kmap_type; }; @@ -522,8 +523,7 @@ extern int ttm_bo_evict_mm(struct ttm_bo_device *bdev, unsigned mem_type); static inline void *ttm_kmap_obj_virtual(struct ttm_bo_kmap_obj *map, bool *is_iomem) { - *is_iomem = (map->bo_kmap_type == ttm_bo_map_iomap || - map->bo_kmap_type == ttm_bo_map_premapped); + *is_iomem = !!(map->bo_kmap_type & TTM_BO_MAP_IOMEM_MASK); return map->virtual; } -- cgit v1.2.3-70-g09d2 From 327c225bd548bf7871f116a0baa5ebdac884e452 Mon Sep 17 00:00:00 2001 From: Thomas Hellstrom Date: Mon, 17 Aug 2009 16:28:37 +0200 Subject: drm: Enable drm drivers to add drm sysfs devices. Export utility functions for drivers to add specialized devices in the sysfs drm class subdirectory. Initially this will be needed form TTM to add a virtual device that handles power management. Signed-off-by: Thomas Hellstrom Signed-off-by: Dave Airlie --- drivers/gpu/drm/drm_sysfs.c | 25 +++++++++++++++++++++++++ include/drm/drm_sysfs.h | 12 ++++++++++++ 2 files changed, 37 insertions(+) create mode 100644 include/drm/drm_sysfs.h (limited to 'include') diff --git a/drivers/gpu/drm/drm_sysfs.c b/drivers/gpu/drm/drm_sysfs.c index adc179459c2..de154556c40 100644 --- a/drivers/gpu/drm/drm_sysfs.c +++ b/drivers/gpu/drm/drm_sysfs.c @@ -16,6 +16,7 @@ #include #include +#include "drm_sysfs.h" #include "drm_core.h" #include "drmP.h" @@ -515,3 +516,27 @@ void drm_sysfs_device_remove(struct drm_minor *minor) { device_unregister(&minor->kdev); } + + +/** + * drm_class_device_register - Register a struct device in the drm class. + * + * @dev: pointer to struct device to register. + * + * @dev should have all relevant members pre-filled with the exception + * of the class member. In particular, the device_type member must + * be set. + */ + +int drm_class_device_register(struct device *dev) +{ + dev->class = drm_class; + return device_register(dev); +} +EXPORT_SYMBOL_GPL(drm_class_device_register); + +void drm_class_device_unregister(struct device *dev) +{ + return device_unregister(dev); +} +EXPORT_SYMBOL_GPL(drm_class_device_unregister); diff --git a/include/drm/drm_sysfs.h b/include/drm/drm_sysfs.h new file mode 100644 index 00000000000..1d8e033fde6 --- /dev/null +++ b/include/drm/drm_sysfs.h @@ -0,0 +1,12 @@ +#ifndef _DRM_SYSFS_H_ +#define _DRM_SYSFS_H_ + +/** + * This minimalistic include file is intended for users (read TTM) that + * don't want to include the full drmP.h file. + */ + +extern int drm_class_device_register(struct device *dev); +extern void drm_class_device_unregister(struct device *dev); + +#endif -- cgit v1.2.3-70-g09d2 From 5fd9cbad3a4ae82c83c55b9c621d156c326724ef Mon Sep 17 00:00:00 2001 From: Thomas Hellstrom Date: Mon, 17 Aug 2009 16:28:39 +0200 Subject: drm/ttm: Memory accounting rework. Use inclusive zones to simplify accounting and its sysfs representation. Use DMA32 accounting where applicable. Add a sysfs interface to make the heuristically determined limits readable and configurable. Signed-off-by: Thomas Hellstrom Signed-off-by: Dave Airlie --- drivers/gpu/drm/ttm/ttm_bo.c | 6 +- drivers/gpu/drm/ttm/ttm_global.c | 4 +- drivers/gpu/drm/ttm/ttm_memory.c | 488 +++++++++++++++++++++++++++++++++------ drivers/gpu/drm/ttm/ttm_tt.c | 29 +-- include/drm/ttm/ttm_memory.h | 43 ++-- include/drm/ttm/ttm_module.h | 2 + 6 files changed, 453 insertions(+), 119 deletions(-) (limited to 'include') diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c index c1c407f7cca..f16909ceec9 100644 --- a/drivers/gpu/drm/ttm/ttm_bo.c +++ b/drivers/gpu/drm/ttm/ttm_bo.c @@ -70,7 +70,7 @@ static void ttm_bo_release_list(struct kref *list_kref) if (bo->destroy) bo->destroy(bo); else { - ttm_mem_global_free(bdev->mem_glob, bo->acc_size, false); + ttm_mem_global_free(bdev->mem_glob, bo->acc_size); kfree(bo); } } @@ -1065,14 +1065,14 @@ int ttm_buffer_object_create(struct ttm_bo_device *bdev, size_t acc_size = ttm_bo_size(bdev, (size + PAGE_SIZE - 1) >> PAGE_SHIFT); - ret = ttm_mem_global_alloc(mem_glob, acc_size, false, false, false); + ret = ttm_mem_global_alloc(mem_glob, acc_size, false, false); if (unlikely(ret != 0)) return ret; bo = kzalloc(sizeof(*bo), GFP_KERNEL); if (unlikely(bo == NULL)) { - ttm_mem_global_free(mem_glob, acc_size, false); + ttm_mem_global_free(mem_glob, acc_size); return -ENOMEM; } diff --git a/drivers/gpu/drm/ttm/ttm_global.c b/drivers/gpu/drm/ttm/ttm_global.c index 0b14eb1972b..541744d00d3 100644 --- a/drivers/gpu/drm/ttm/ttm_global.c +++ b/drivers/gpu/drm/ttm/ttm_global.c @@ -71,7 +71,7 @@ int ttm_global_item_ref(struct ttm_global_reference *ref) mutex_lock(&item->mutex); if (item->refcount == 0) { - item->object = kmalloc(ref->size, GFP_KERNEL); + item->object = kzalloc(ref->size, GFP_KERNEL); if (unlikely(item->object == NULL)) { ret = -ENOMEM; goto out_err; @@ -89,7 +89,6 @@ int ttm_global_item_ref(struct ttm_global_reference *ref) mutex_unlock(&item->mutex); return 0; out_err: - kfree(item->object); mutex_unlock(&item->mutex); item->object = NULL; return ret; @@ -105,7 +104,6 @@ void ttm_global_item_unref(struct ttm_global_reference *ref) BUG_ON(ref->object != item->object); if (--item->refcount == 0) { ref->release(ref); - kfree(item->object); item->object = NULL; } mutex_unlock(&item->mutex); diff --git a/drivers/gpu/drm/ttm/ttm_memory.c b/drivers/gpu/drm/ttm/ttm_memory.c index 87323d4ff68..62fb5cf0899 100644 --- a/drivers/gpu/drm/ttm/ttm_memory.c +++ b/drivers/gpu/drm/ttm/ttm_memory.c @@ -26,15 +26,180 @@ **************************************************************************/ #include "ttm/ttm_memory.h" +#include "ttm/ttm_module.h" #include #include #include #include #include -#define TTM_PFX "[TTM] " #define TTM_MEMORY_ALLOC_RETRIES 4 +struct ttm_mem_zone { + struct kobject kobj; + struct ttm_mem_global *glob; + const char *name; + uint64_t zone_mem; + uint64_t emer_mem; + uint64_t max_mem; + uint64_t swap_limit; + uint64_t used_mem; +}; + +static struct attribute ttm_mem_sys = { + .name = "zone_memory", + .mode = S_IRUGO +}; +static struct attribute ttm_mem_emer = { + .name = "emergency_memory", + .mode = S_IRUGO | S_IWUSR +}; +static struct attribute ttm_mem_max = { + .name = "available_memory", + .mode = S_IRUGO | S_IWUSR +}; +static struct attribute ttm_mem_swap = { + .name = "swap_limit", + .mode = S_IRUGO | S_IWUSR +}; +static struct attribute ttm_mem_used = { + .name = "used_memory", + .mode = S_IRUGO +}; + +static void ttm_mem_zone_kobj_release(struct kobject *kobj) +{ + struct ttm_mem_zone *zone = + container_of(kobj, struct ttm_mem_zone, kobj); + + printk(KERN_INFO TTM_PFX + "Zone %7s: Used memory at exit: %llu kiB.\n", + zone->name, (unsigned long long) zone->used_mem >> 10); + kfree(zone); +} + +static ssize_t ttm_mem_zone_show(struct kobject *kobj, + struct attribute *attr, + char *buffer) +{ + struct ttm_mem_zone *zone = + container_of(kobj, struct ttm_mem_zone, kobj); + uint64_t val = 0; + + spin_lock(&zone->glob->lock); + if (attr == &ttm_mem_sys) + val = zone->zone_mem; + else if (attr == &ttm_mem_emer) + val = zone->emer_mem; + else if (attr == &ttm_mem_max) + val = zone->max_mem; + else if (attr == &ttm_mem_swap) + val = zone->swap_limit; + else if (attr == &ttm_mem_used) + val = zone->used_mem; + spin_unlock(&zone->glob->lock); + + return snprintf(buffer, PAGE_SIZE, "%llu\n", + (unsigned long long) val >> 10); +} + +static void ttm_check_swapping(struct ttm_mem_global *glob); + +static ssize_t ttm_mem_zone_store(struct kobject *kobj, + struct attribute *attr, + const char *buffer, + size_t size) +{ + struct ttm_mem_zone *zone = + container_of(kobj, struct ttm_mem_zone, kobj); + int chars; + unsigned long val; + uint64_t val64; + + chars = sscanf(buffer, "%lu", &val); + if (chars == 0) + return size; + + val64 = val; + val64 <<= 10; + + spin_lock(&zone->glob->lock); + if (val64 > zone->zone_mem) + val64 = zone->zone_mem; + if (attr == &ttm_mem_emer) { + zone->emer_mem = val64; + if (zone->max_mem > val64) + zone->max_mem = val64; + } else if (attr == &ttm_mem_max) { + zone->max_mem = val64; + if (zone->emer_mem < val64) + zone->emer_mem = val64; + } else if (attr == &ttm_mem_swap) + zone->swap_limit = val64; + spin_unlock(&zone->glob->lock); + + ttm_check_swapping(zone->glob); + + return size; +} + +static struct attribute *ttm_mem_zone_attrs[] = { + &ttm_mem_sys, + &ttm_mem_emer, + &ttm_mem_max, + &ttm_mem_swap, + &ttm_mem_used, + NULL +}; + +static struct sysfs_ops ttm_mem_zone_ops = { + .show = &ttm_mem_zone_show, + .store = &ttm_mem_zone_store +}; + +static struct kobj_type ttm_mem_zone_kobj_type = { + .release = &ttm_mem_zone_kobj_release, + .sysfs_ops = &ttm_mem_zone_ops, + .default_attrs = ttm_mem_zone_attrs, +}; + +static void ttm_mem_global_kobj_release(struct kobject *kobj) +{ + struct ttm_mem_global *glob = + container_of(kobj, struct ttm_mem_global, kobj); + + kfree(glob); +} + +static struct kobj_type ttm_mem_glob_kobj_type = { + .release = &ttm_mem_global_kobj_release, +}; + +static bool ttm_zones_above_swap_target(struct ttm_mem_global *glob, + bool from_wq, uint64_t extra) +{ + unsigned int i; + struct ttm_mem_zone *zone; + uint64_t target; + + for (i = 0; i < glob->num_zones; ++i) { + zone = glob->zones[i]; + + if (from_wq) + target = zone->swap_limit; + else if (capable(CAP_SYS_ADMIN)) + target = zone->emer_mem; + else + target = zone->max_mem; + + target = (extra > target) ? 0ULL : target; + + if (zone->used_mem > target) + return true; + } + return false; +} + /** * At this point we only support a single shrink callback. * Extend this if needed, perhaps using a linked list of callbacks. @@ -42,34 +207,17 @@ * many threads may try to swap out at any given time. */ -static void ttm_shrink(struct ttm_mem_global *glob, bool from_workqueue, +static void ttm_shrink(struct ttm_mem_global *glob, bool from_wq, uint64_t extra) { int ret; struct ttm_mem_shrink *shrink; - uint64_t target; - uint64_t total_target; spin_lock(&glob->lock); if (glob->shrink == NULL) goto out; - if (from_workqueue) { - target = glob->swap_limit; - total_target = glob->total_memory_swap_limit; - } else if (capable(CAP_SYS_ADMIN)) { - total_target = glob->emer_total_memory; - target = glob->emer_memory; - } else { - total_target = glob->max_total_memory; - target = glob->max_memory; - } - - total_target = (extra >= total_target) ? 0 : total_target - extra; - target = (extra >= target) ? 0 : target - extra; - - while (glob->used_memory > target || - glob->used_total_memory > total_target) { + while (ttm_zones_above_swap_target(glob, from_wq, extra)) { shrink = glob->shrink; spin_unlock(&glob->lock); ret = shrink->do_shrink(shrink); @@ -81,6 +229,8 @@ out: spin_unlock(&glob->lock); } + + static void ttm_shrink_work(struct work_struct *work) { struct ttm_mem_global *glob = @@ -89,63 +239,178 @@ static void ttm_shrink_work(struct work_struct *work) ttm_shrink(glob, true, 0ULL); } +static int ttm_mem_init_kernel_zone(struct ttm_mem_global *glob, + const struct sysinfo *si) +{ + struct ttm_mem_zone *zone = kzalloc(sizeof(*zone), GFP_KERNEL); + uint64_t mem; + + if (unlikely(!zone)) + return -ENOMEM; + + mem = si->totalram - si->totalhigh; + mem *= si->mem_unit; + + zone->name = "kernel"; + zone->zone_mem = mem; + zone->max_mem = mem >> 1; + zone->emer_mem = (mem >> 1) + (mem >> 2); + zone->swap_limit = zone->max_mem - (mem >> 3); + zone->used_mem = 0; + zone->glob = glob; + glob->zone_kernel = zone; + glob->zones[glob->num_zones++] = zone; + kobject_init(&zone->kobj, &ttm_mem_zone_kobj_type); + return kobject_add(&zone->kobj, &glob->kobj, zone->name); +} + +#ifdef CONFIG_HIGHMEM +static int ttm_mem_init_highmem_zone(struct ttm_mem_global *glob, + const struct sysinfo *si) +{ + struct ttm_mem_zone *zone = kzalloc(sizeof(*zone), GFP_KERNEL); + uint64_t mem; + + if (unlikely(!zone)) + return -ENOMEM; + + if (si->totalhigh == 0) + return 0; + + mem = si->totalram; + mem *= si->mem_unit; + + zone->name = "highmem"; + zone->zone_mem = mem; + zone->max_mem = mem >> 1; + zone->emer_mem = (mem >> 1) + (mem >> 2); + zone->swap_limit = zone->max_mem - (mem >> 3); + zone->used_mem = 0; + zone->glob = glob; + glob->zone_highmem = zone; + glob->zones[glob->num_zones++] = zone; + kobject_init(&zone->kobj, &ttm_mem_zone_kobj_type); + return kobject_add(&zone->kobj, &glob->kobj, zone->name); +} +#else +static int ttm_mem_init_dma32_zone(struct ttm_mem_global *glob, + const struct sysinfo *si) +{ + struct ttm_mem_zone *zone = kzalloc(sizeof(*zone), GFP_KERNEL); + uint64_t mem; + + if (unlikely(!zone)) + return -ENOMEM; + + mem = si->totalram; + mem *= si->mem_unit; + + /** + * No special dma32 zone needed. + */ + + if (mem <= ((uint64_t) 1ULL << 32)) + return 0; + + /* + * Limit max dma32 memory to 4GB for now + * until we can figure out how big this + * zone really is. + */ + + mem = ((uint64_t) 1ULL << 32); + zone->name = "dma32"; + zone->zone_mem = mem; + zone->max_mem = mem >> 1; + zone->emer_mem = (mem >> 1) + (mem >> 2); + zone->swap_limit = zone->max_mem - (mem >> 3); + zone->used_mem = 0; + zone->glob = glob; + glob->zone_dma32 = zone; + glob->zones[glob->num_zones++] = zone; + kobject_init(&zone->kobj, &ttm_mem_zone_kobj_type); + return kobject_add(&zone->kobj, &glob->kobj, zone->name); +} +#endif + int ttm_mem_global_init(struct ttm_mem_global *glob) { struct sysinfo si; - uint64_t mem; + int ret; + int i; + struct ttm_mem_zone *zone; spin_lock_init(&glob->lock); glob->swap_queue = create_singlethread_workqueue("ttm_swap"); INIT_WORK(&glob->work, ttm_shrink_work); init_waitqueue_head(&glob->queue); + kobject_init(&glob->kobj, &ttm_mem_glob_kobj_type); + ret = kobject_add(&glob->kobj, + ttm_get_kobj(), + "memory_accounting"); + if (unlikely(ret != 0)) + goto out_no_zone; si_meminfo(&si); - mem = si.totalram - si.totalhigh; - mem *= si.mem_unit; - - glob->max_memory = mem >> 1; - glob->emer_memory = (mem >> 1) + (mem >> 2); - glob->swap_limit = glob->max_memory - (mem >> 3); - glob->used_memory = 0; - glob->used_total_memory = 0; - glob->shrink = NULL; - - mem = si.totalram; - mem *= si.mem_unit; - - glob->max_total_memory = mem >> 1; - glob->emer_total_memory = (mem >> 1) + (mem >> 2); - - glob->total_memory_swap_limit = glob->max_total_memory - (mem >> 3); - - printk(KERN_INFO TTM_PFX "TTM available graphics memory: %llu MiB\n", - glob->max_total_memory >> 20); - printk(KERN_INFO TTM_PFX "TTM available object memory: %llu MiB\n", - glob->max_memory >> 20); - + ret = ttm_mem_init_kernel_zone(glob, &si); + if (unlikely(ret != 0)) + goto out_no_zone; +#ifdef CONFIG_HIGHMEM + ret = ttm_mem_init_highmem_zone(glob, &si); + if (unlikely(ret != 0)) + goto out_no_zone; +#else + ret = ttm_mem_init_dma32_zone(glob, &si); + if (unlikely(ret != 0)) + goto out_no_zone; +#endif + for (i = 0; i < glob->num_zones; ++i) { + zone = glob->zones[i]; + printk(KERN_INFO TTM_PFX + "Zone %7s: Available graphics memory: %llu kiB.\n", + zone->name, (unsigned long long) zone->max_mem >> 10); + } return 0; +out_no_zone: + ttm_mem_global_release(glob); + return ret; } EXPORT_SYMBOL(ttm_mem_global_init); void ttm_mem_global_release(struct ttm_mem_global *glob) { - printk(KERN_INFO TTM_PFX "Used total memory is %llu bytes.\n", - (unsigned long long)glob->used_total_memory); + unsigned int i; + struct ttm_mem_zone *zone; + flush_workqueue(glob->swap_queue); destroy_workqueue(glob->swap_queue); glob->swap_queue = NULL; + for (i = 0; i < glob->num_zones; ++i) { + zone = glob->zones[i]; + kobject_del(&zone->kobj); + kobject_put(&zone->kobj); + } + kobject_del(&glob->kobj); + kobject_put(&glob->kobj); } EXPORT_SYMBOL(ttm_mem_global_release); -static inline void ttm_check_swapping(struct ttm_mem_global *glob) +static void ttm_check_swapping(struct ttm_mem_global *glob) { - bool needs_swapping; + bool needs_swapping = false; + unsigned int i; + struct ttm_mem_zone *zone; spin_lock(&glob->lock); - needs_swapping = (glob->used_memory > glob->swap_limit || - glob->used_total_memory > - glob->total_memory_swap_limit); + for (i = 0; i < glob->num_zones; ++i) { + zone = glob->zones[i]; + if (zone->used_mem > zone->swap_limit) { + needs_swapping = true; + break; + } + } + spin_unlock(&glob->lock); if (unlikely(needs_swapping)) @@ -153,44 +418,60 @@ static inline void ttm_check_swapping(struct ttm_mem_global *glob) } -void ttm_mem_global_free(struct ttm_mem_global *glob, - uint64_t amount, bool himem) +static void ttm_mem_global_free_zone(struct ttm_mem_global *glob, + struct ttm_mem_zone *single_zone, + uint64_t amount) { + unsigned int i; + struct ttm_mem_zone *zone; + spin_lock(&glob->lock); - glob->used_total_memory -= amount; - if (!himem) - glob->used_memory -= amount; - wake_up_all(&glob->queue); + for (i = 0; i < glob->num_zones; ++i) { + zone = glob->zones[i]; + if (single_zone && zone != single_zone) + continue; + zone->used_mem -= amount; + } spin_unlock(&glob->lock); } +void ttm_mem_global_free(struct ttm_mem_global *glob, + uint64_t amount) +{ + return ttm_mem_global_free_zone(glob, NULL, amount); +} + static int ttm_mem_global_reserve(struct ttm_mem_global *glob, - uint64_t amount, bool himem, bool reserve) + struct ttm_mem_zone *single_zone, + uint64_t amount, bool reserve) { uint64_t limit; - uint64_t lomem_limit; int ret = -ENOMEM; + unsigned int i; + struct ttm_mem_zone *zone; spin_lock(&glob->lock); + for (i = 0; i < glob->num_zones; ++i) { + zone = glob->zones[i]; + if (single_zone && zone != single_zone) + continue; - if (capable(CAP_SYS_ADMIN)) { - limit = glob->emer_total_memory; - lomem_limit = glob->emer_memory; - } else { - limit = glob->max_total_memory; - lomem_limit = glob->max_memory; - } + limit = (capable(CAP_SYS_ADMIN)) ? + zone->emer_mem : zone->max_mem; - if (unlikely(glob->used_total_memory + amount > limit)) - goto out_unlock; - if (unlikely(!himem && glob->used_memory + amount > lomem_limit)) - goto out_unlock; + if (zone->used_mem > limit) + goto out_unlock; + } if (reserve) { - glob->used_total_memory += amount; - if (!himem) - glob->used_memory += amount; + for (i = 0; i < glob->num_zones; ++i) { + zone = glob->zones[i]; + if (single_zone && zone != single_zone) + continue; + zone->used_mem += amount; + } } + ret = 0; out_unlock: spin_unlock(&glob->lock); @@ -199,12 +480,17 @@ out_unlock: return ret; } -int ttm_mem_global_alloc(struct ttm_mem_global *glob, uint64_t memory, - bool no_wait, bool interruptible, bool himem) + +static int ttm_mem_global_alloc_zone(struct ttm_mem_global *glob, + struct ttm_mem_zone *single_zone, + uint64_t memory, + bool no_wait, bool interruptible) { int count = TTM_MEMORY_ALLOC_RETRIES; - while (unlikely(ttm_mem_global_reserve(glob, memory, himem, true) + while (unlikely(ttm_mem_global_reserve(glob, + single_zone, + memory, true) != 0)) { if (no_wait) return -ENOMEM; @@ -216,6 +502,56 @@ int ttm_mem_global_alloc(struct ttm_mem_global *glob, uint64_t memory, return 0; } +int ttm_mem_global_alloc(struct ttm_mem_global *glob, uint64_t memory, + bool no_wait, bool interruptible) +{ + /** + * Normal allocations of kernel memory are registered in + * all zones. + */ + + return ttm_mem_global_alloc_zone(glob, NULL, memory, no_wait, + interruptible); +} + +int ttm_mem_global_alloc_page(struct ttm_mem_global *glob, + struct page *page, + bool no_wait, bool interruptible) +{ + + struct ttm_mem_zone *zone = NULL; + + /** + * Page allocations may be registed in a single zone + * only if highmem or !dma32. + */ + +#ifdef CONFIG_HIGHMEM + if (PageHighMem(page) && glob->zone_highmem != NULL) + zone = glob->zone_highmem; +#else + if (glob->zone_dma32 && page_to_pfn(page) > 0x00100000UL) + zone = glob->zone_kernel; +#endif + return ttm_mem_global_alloc_zone(glob, zone, PAGE_SIZE, no_wait, + interruptible); +} + +void ttm_mem_global_free_page(struct ttm_mem_global *glob, struct page *page) +{ + struct ttm_mem_zone *zone = NULL; + +#ifdef CONFIG_HIGHMEM + if (PageHighMem(page) && glob->zone_highmem != NULL) + zone = glob->zone_highmem; +#else + if (glob->zone_dma32 && page_to_pfn(page) > 0x00100000UL) + zone = glob->zone_kernel; +#endif + ttm_mem_global_free_zone(glob, zone, PAGE_SIZE); +} + + size_t ttm_round_pot(size_t size) { if ((size & (size - 1)) == 0) diff --git a/drivers/gpu/drm/ttm/ttm_tt.c b/drivers/gpu/drm/ttm/ttm_tt.c index 75dc8bd2459..4e1e2566d51 100644 --- a/drivers/gpu/drm/ttm/ttm_tt.c +++ b/drivers/gpu/drm/ttm/ttm_tt.c @@ -166,7 +166,7 @@ static void ttm_tt_free_user_pages(struct ttm_tt *ttm) set_page_dirty_lock(page); ttm->pages[i] = NULL; - ttm_mem_global_free(ttm->bdev->mem_glob, PAGE_SIZE, false); + ttm_mem_global_free(ttm->bdev->mem_glob, PAGE_SIZE); put_page(page); } ttm->state = tt_unpopulated; @@ -187,21 +187,14 @@ static struct page *__ttm_tt_get_page(struct ttm_tt *ttm, int index) if (!p) return NULL; - if (PageHighMem(p)) { - ret = - ttm_mem_global_alloc(mem_glob, PAGE_SIZE, - false, false, true); - if (unlikely(ret != 0)) - goto out_err; + ret = ttm_mem_global_alloc_page(mem_glob, p, false, false); + if (unlikely(ret != 0)) + goto out_err; + + if (PageHighMem(p)) ttm->pages[--ttm->first_himem_page] = p; - } else { - ret = - ttm_mem_global_alloc(mem_glob, PAGE_SIZE, - false, false, false); - if (unlikely(ret != 0)) - goto out_err; + else ttm->pages[++ttm->last_lomem_page] = p; - } } return p; out_err: @@ -355,8 +348,8 @@ static void ttm_tt_free_alloced_pages(struct ttm_tt *ttm) printk(KERN_ERR TTM_PFX "Erroneous page count. " "Leaking pages.\n"); - ttm_mem_global_free(ttm->bdev->mem_glob, PAGE_SIZE, - PageHighMem(cur_page)); + ttm_mem_global_free_page(ttm->bdev->mem_glob, + cur_page); __free_page(cur_page); } } @@ -411,7 +404,7 @@ int ttm_tt_set_user(struct ttm_tt *ttm, */ ret = ttm_mem_global_alloc(mem_glob, num_pages * PAGE_SIZE, - false, false, false); + false, false); if (unlikely(ret != 0)) return ret; @@ -422,7 +415,7 @@ int ttm_tt_set_user(struct ttm_tt *ttm, if (ret != num_pages && write) { ttm_tt_free_user_pages(ttm); - ttm_mem_global_free(mem_glob, num_pages * PAGE_SIZE, false); + ttm_mem_global_free(mem_glob, num_pages * PAGE_SIZE); return -ENOMEM; } diff --git a/include/drm/ttm/ttm_memory.h b/include/drm/ttm/ttm_memory.h index d8b8f042c4f..6983a7cf4da 100644 --- a/include/drm/ttm/ttm_memory.h +++ b/include/drm/ttm/ttm_memory.h @@ -32,6 +32,7 @@ #include #include #include +#include /** * struct ttm_mem_shrink - callback to shrink TTM memory usage. @@ -60,34 +61,33 @@ struct ttm_mem_shrink { * @queue: Wait queue for processes suspended waiting for memory. * @lock: Lock to protect the @shrink - and the memory accounting members, * that is, essentially the whole structure with some exceptions. - * @emer_memory: Lowmem memory limit available for root. - * @max_memory: Lowmem memory limit available for non-root. - * @swap_limit: Lowmem memory limit where the shrink workqueue kicks in. - * @used_memory: Currently used lowmem memory. - * @used_total_memory: Currently used total (lowmem + highmem) memory. - * @total_memory_swap_limit: Total memory limit where the shrink workqueue - * kicks in. - * @max_total_memory: Total memory available to non-root processes. - * @emer_total_memory: Total memory available to root processes. + * @zones: Array of pointers to accounting zones. + * @num_zones: Number of populated entries in the @zones array. + * @zone_kernel: Pointer to the kernel zone. + * @zone_highmem: Pointer to the highmem zone if there is one. + * @zone_dma32: Pointer to the dma32 zone if there is one. * * Note that this structure is not per device. It should be global for all * graphics devices. */ +#define TTM_MEM_MAX_ZONES 2 +struct ttm_mem_zone; struct ttm_mem_global { + struct kobject kobj; struct ttm_mem_shrink *shrink; struct workqueue_struct *swap_queue; struct work_struct work; wait_queue_head_t queue; spinlock_t lock; - uint64_t emer_memory; - uint64_t max_memory; - uint64_t swap_limit; - uint64_t used_memory; - uint64_t used_total_memory; - uint64_t total_memory_swap_limit; - uint64_t max_total_memory; - uint64_t emer_total_memory; + struct ttm_mem_zone *zones[TTM_MEM_MAX_ZONES]; + unsigned int num_zones; + struct ttm_mem_zone *zone_kernel; +#ifdef CONFIG_HIGHMEM + struct ttm_mem_zone *zone_highmem; +#else + struct ttm_mem_zone *zone_dma32; +#endif }; /** @@ -146,8 +146,13 @@ static inline void ttm_mem_unregister_shrink(struct ttm_mem_global *glob, extern int ttm_mem_global_init(struct ttm_mem_global *glob); extern void ttm_mem_global_release(struct ttm_mem_global *glob); extern int ttm_mem_global_alloc(struct ttm_mem_global *glob, uint64_t memory, - bool no_wait, bool interruptible, bool himem); + bool no_wait, bool interruptible); extern void ttm_mem_global_free(struct ttm_mem_global *glob, - uint64_t amount, bool himem); + uint64_t amount); +extern int ttm_mem_global_alloc_page(struct ttm_mem_global *glob, + struct page *page, + bool no_wait, bool interruptible); +extern void ttm_mem_global_free_page(struct ttm_mem_global *glob, + struct page *page); extern size_t ttm_round_pot(size_t size); #endif diff --git a/include/drm/ttm/ttm_module.h b/include/drm/ttm/ttm_module.h index 889a4c7958a..0a72ac7c7e5 100644 --- a/include/drm/ttm/ttm_module.h +++ b/include/drm/ttm/ttm_module.h @@ -32,6 +32,7 @@ #define _TTM_MODULE_H_ #include +struct kobject; #define TTM_PFX "[TTM]" @@ -54,5 +55,6 @@ extern void ttm_global_init(void); extern void ttm_global_release(void); extern int ttm_global_item_ref(struct ttm_global_reference *ref); extern void ttm_global_item_unref(struct ttm_global_reference *ref); +extern struct kobject *ttm_get_kobj(void); #endif /* _TTM_MODULE_H_ */ -- cgit v1.2.3-70-g09d2 From a987fcaa805fcb24ba885c2e29fd4fdb6816f08f Mon Sep 17 00:00:00 2001 From: Thomas Hellstrom Date: Tue, 18 Aug 2009 16:51:56 +0200 Subject: ttm: Make parts of a struct ttm_bo_device global. Common resources, like memory accounting and swap lists should be global and not per device. Introduce a struct ttm_bo_global to accomodate this, and register it with sysfs. Add a small sysfs interface to return the number of active buffer objects. Signed-off-by: Thomas Hellstrom Signed-off-by: Dave Airlie --- drivers/gpu/drm/radeon/radeon_object.h | 1 + drivers/gpu/drm/radeon/radeon_ttm.c | 33 +++- drivers/gpu/drm/ttm/ttm_bo.c | 292 ++++++++++++++++++++++----------- drivers/gpu/drm/ttm/ttm_bo_util.c | 4 +- drivers/gpu/drm/ttm/ttm_tt.c | 12 +- include/drm/ttm/ttm_bo_api.h | 1 + include/drm/ttm/ttm_bo_driver.h | 94 ++++++++--- 7 files changed, 296 insertions(+), 141 deletions(-) (limited to 'include') diff --git a/drivers/gpu/drm/radeon/radeon_object.h b/drivers/gpu/drm/radeon/radeon_object.h index 473e4775dc5..10e8af6bb45 100644 --- a/drivers/gpu/drm/radeon/radeon_object.h +++ b/drivers/gpu/drm/radeon/radeon_object.h @@ -37,6 +37,7 @@ * TTM. */ struct radeon_mman { + struct ttm_bo_global_ref bo_global_ref; struct ttm_global_reference mem_global_ref; bool mem_global_referenced; struct ttm_bo_device bdev; diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c index 1227a97f516..343b6d6b99c 100644 --- a/drivers/gpu/drm/radeon/radeon_ttm.c +++ b/drivers/gpu/drm/radeon/radeon_ttm.c @@ -77,9 +77,25 @@ static int radeon_ttm_global_init(struct radeon_device *rdev) global_ref->release = &radeon_ttm_mem_global_release; r = ttm_global_item_ref(global_ref); if (r != 0) { - DRM_ERROR("Failed referencing a global TTM memory object.\n"); + DRM_ERROR("Failed setting up TTM memory accounting " + "subsystem.\n"); return r; } + + rdev->mman.bo_global_ref.mem_glob = + rdev->mman.mem_global_ref.object; + global_ref = &rdev->mman.bo_global_ref.ref; + global_ref->global_type = TTM_GLOBAL_TTM_BO; + global_ref->size = sizeof(struct ttm_mem_global); + global_ref->init = &ttm_bo_global_init; + global_ref->release = &ttm_bo_global_release; + r = ttm_global_item_ref(global_ref); + if (r != 0) { + DRM_ERROR("Failed setting up TTM BO subsystem.\n"); + ttm_global_item_unref(&rdev->mman.mem_global_ref); + return r; + } + rdev->mman.mem_global_referenced = true; return 0; } @@ -87,6 +103,7 @@ static int radeon_ttm_global_init(struct radeon_device *rdev) static void radeon_ttm_global_fini(struct radeon_device *rdev) { if (rdev->mman.mem_global_referenced) { + ttm_global_item_unref(&rdev->mman.bo_global_ref.ref); ttm_global_item_unref(&rdev->mman.mem_global_ref); rdev->mman.mem_global_referenced = false; } @@ -286,9 +303,11 @@ static int radeon_move_vram_ram(struct ttm_buffer_object *bo, r = ttm_bo_move_ttm(bo, true, no_wait, new_mem); out_cleanup: if (tmp_mem.mm_node) { - spin_lock(&rdev->mman.bdev.lru_lock); + struct ttm_bo_global *glob = rdev->mman.bdev.glob; + + spin_lock(&glob->lru_lock); drm_mm_put_block(tmp_mem.mm_node); - spin_unlock(&rdev->mman.bdev.lru_lock); + spin_unlock(&glob->lru_lock); return r; } return r; @@ -323,9 +342,11 @@ static int radeon_move_ram_vram(struct ttm_buffer_object *bo, } out_cleanup: if (tmp_mem.mm_node) { - spin_lock(&rdev->mman.bdev.lru_lock); + struct ttm_bo_global *glob = rdev->mman.bdev.glob; + + spin_lock(&glob->lru_lock); drm_mm_put_block(tmp_mem.mm_node); - spin_unlock(&rdev->mman.bdev.lru_lock); + spin_unlock(&glob->lru_lock); return r; } return r; @@ -441,7 +462,7 @@ int radeon_ttm_init(struct radeon_device *rdev) } /* No others user of address space so set it to 0 */ r = ttm_bo_device_init(&rdev->mman.bdev, - rdev->mman.mem_global_ref.object, + rdev->mman.bo_global_ref.ref.object, &radeon_bo_driver, DRM_FILE_PAGE_OFFSET); if (r) { DRM_ERROR("failed initializing buffer object driver(%d).\n", r); diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c index f16909ceec9..0d0b1b7afbc 100644 --- a/drivers/gpu/drm/ttm/ttm_bo.c +++ b/drivers/gpu/drm/ttm/ttm_bo.c @@ -45,6 +45,39 @@ static int ttm_bo_setup_vm(struct ttm_buffer_object *bo); static void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo); static int ttm_bo_swapout(struct ttm_mem_shrink *shrink); +static void ttm_bo_global_kobj_release(struct kobject *kobj); + +static struct attribute ttm_bo_count = { + .name = "bo_count", + .mode = S_IRUGO +}; + +static ssize_t ttm_bo_global_show(struct kobject *kobj, + struct attribute *attr, + char *buffer) +{ + struct ttm_bo_global *glob = + container_of(kobj, struct ttm_bo_global, kobj); + + return snprintf(buffer, PAGE_SIZE, "%lu\n", + (unsigned long) atomic_read(&glob->bo_count)); +} + +static struct attribute *ttm_bo_global_attrs[] = { + &ttm_bo_count, + NULL +}; + +static struct sysfs_ops ttm_bo_global_ops = { + .show = &ttm_bo_global_show +}; + +static struct kobj_type ttm_bo_glob_kobj_type = { + .release = &ttm_bo_global_kobj_release, + .sysfs_ops = &ttm_bo_global_ops, + .default_attrs = ttm_bo_global_attrs +}; + static inline uint32_t ttm_bo_type_flags(unsigned type) { @@ -67,10 +100,11 @@ static void ttm_bo_release_list(struct kref *list_kref) if (bo->ttm) ttm_tt_destroy(bo->ttm); + atomic_dec(&bo->glob->bo_count); if (bo->destroy) bo->destroy(bo); else { - ttm_mem_global_free(bdev->mem_glob, bo->acc_size); + ttm_mem_global_free(bdev->glob->mem_glob, bo->acc_size); kfree(bo); } } @@ -107,7 +141,7 @@ static void ttm_bo_add_to_lru(struct ttm_buffer_object *bo) kref_get(&bo->list_kref); if (bo->ttm != NULL) { - list_add_tail(&bo->swap, &bdev->swap_lru); + list_add_tail(&bo->swap, &bo->glob->swap_lru); kref_get(&bo->list_kref); } } @@ -142,7 +176,7 @@ int ttm_bo_reserve_locked(struct ttm_buffer_object *bo, bool interruptible, bool no_wait, bool use_sequence, uint32_t sequence) { - struct ttm_bo_device *bdev = bo->bdev; + struct ttm_bo_global *glob = bo->glob; int ret; while (unlikely(atomic_cmpxchg(&bo->reserved, 0, 1) != 0)) { @@ -154,9 +188,9 @@ int ttm_bo_reserve_locked(struct ttm_buffer_object *bo, if (no_wait) return -EBUSY; - spin_unlock(&bdev->lru_lock); + spin_unlock(&glob->lru_lock); ret = ttm_bo_wait_unreserved(bo, interruptible); - spin_lock(&bdev->lru_lock); + spin_lock(&glob->lru_lock); if (unlikely(ret)) return ret; @@ -182,16 +216,16 @@ int ttm_bo_reserve(struct ttm_buffer_object *bo, bool interruptible, bool no_wait, bool use_sequence, uint32_t sequence) { - struct ttm_bo_device *bdev = bo->bdev; + struct ttm_bo_global *glob = bo->glob; int put_count = 0; int ret; - spin_lock(&bdev->lru_lock); + spin_lock(&glob->lru_lock); ret = ttm_bo_reserve_locked(bo, interruptible, no_wait, use_sequence, sequence); if (likely(ret == 0)) put_count = ttm_bo_del_from_lru(bo); - spin_unlock(&bdev->lru_lock); + spin_unlock(&glob->lru_lock); while (put_count--) kref_put(&bo->list_kref, ttm_bo_ref_bug); @@ -201,13 +235,13 @@ int ttm_bo_reserve(struct ttm_buffer_object *bo, void ttm_bo_unreserve(struct ttm_buffer_object *bo) { - struct ttm_bo_device *bdev = bo->bdev; + struct ttm_bo_global *glob = bo->glob; - spin_lock(&bdev->lru_lock); + spin_lock(&glob->lru_lock); ttm_bo_add_to_lru(bo); atomic_set(&bo->reserved, 0); wake_up_all(&bo->event_queue); - spin_unlock(&bdev->lru_lock); + spin_unlock(&glob->lru_lock); } EXPORT_SYMBOL(ttm_bo_unreserve); @@ -218,6 +252,7 @@ EXPORT_SYMBOL(ttm_bo_unreserve); static int ttm_bo_add_ttm(struct ttm_buffer_object *bo, bool zero_alloc) { struct ttm_bo_device *bdev = bo->bdev; + struct ttm_bo_global *glob = bo->glob; int ret = 0; uint32_t page_flags = 0; @@ -230,14 +265,14 @@ static int ttm_bo_add_ttm(struct ttm_buffer_object *bo, bool zero_alloc) page_flags |= TTM_PAGE_FLAG_ZERO_ALLOC; case ttm_bo_type_kernel: bo->ttm = ttm_tt_create(bdev, bo->num_pages << PAGE_SHIFT, - page_flags, bdev->dummy_read_page); + page_flags, glob->dummy_read_page); if (unlikely(bo->ttm == NULL)) ret = -ENOMEM; break; case ttm_bo_type_user: bo->ttm = ttm_tt_create(bdev, bo->num_pages << PAGE_SHIFT, page_flags | TTM_PAGE_FLAG_USER, - bdev->dummy_read_page); + glob->dummy_read_page); if (unlikely(bo->ttm == NULL)) ret = -ENOMEM; break; @@ -355,6 +390,7 @@ out_err: static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo, bool remove_all) { struct ttm_bo_device *bdev = bo->bdev; + struct ttm_bo_global *glob = bo->glob; struct ttm_bo_driver *driver = bdev->driver; int ret; @@ -366,7 +402,7 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo, bool remove_all) spin_unlock(&bo->lock); - spin_lock(&bdev->lru_lock); + spin_lock(&glob->lru_lock); ret = ttm_bo_reserve_locked(bo, false, false, false, 0); BUG_ON(ret); if (bo->ttm) @@ -381,7 +417,7 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo, bool remove_all) bo->mem.mm_node = NULL; } put_count = ttm_bo_del_from_lru(bo); - spin_unlock(&bdev->lru_lock); + spin_unlock(&glob->lru_lock); atomic_set(&bo->reserved, 0); @@ -391,14 +427,14 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo, bool remove_all) return 0; } - spin_lock(&bdev->lru_lock); + spin_lock(&glob->lru_lock); if (list_empty(&bo->ddestroy)) { void *sync_obj = bo->sync_obj; void *sync_obj_arg = bo->sync_obj_arg; kref_get(&bo->list_kref); list_add_tail(&bo->ddestroy, &bdev->ddestroy); - spin_unlock(&bdev->lru_lock); + spin_unlock(&glob->lru_lock); spin_unlock(&bo->lock); if (sync_obj) @@ -408,7 +444,7 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo, bool remove_all) ret = 0; } else { - spin_unlock(&bdev->lru_lock); + spin_unlock(&glob->lru_lock); spin_unlock(&bo->lock); ret = -EBUSY; } @@ -423,11 +459,12 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo, bool remove_all) static int ttm_bo_delayed_delete(struct ttm_bo_device *bdev, bool remove_all) { + struct ttm_bo_global *glob = bdev->glob; struct ttm_buffer_object *entry, *nentry; struct list_head *list, *next; int ret; - spin_lock(&bdev->lru_lock); + spin_lock(&glob->lru_lock); list_for_each_safe(list, next, &bdev->ddestroy) { entry = list_entry(list, struct ttm_buffer_object, ddestroy); nentry = NULL; @@ -444,16 +481,16 @@ static int ttm_bo_delayed_delete(struct ttm_bo_device *bdev, bool remove_all) } kref_get(&entry->list_kref); - spin_unlock(&bdev->lru_lock); + spin_unlock(&glob->lru_lock); ret = ttm_bo_cleanup_refs(entry, remove_all); kref_put(&entry->list_kref, ttm_bo_release_list); - spin_lock(&bdev->lru_lock); + spin_lock(&glob->lru_lock); if (nentry) { bool next_onlist = !list_empty(next); - spin_unlock(&bdev->lru_lock); + spin_unlock(&glob->lru_lock); kref_put(&nentry->list_kref, ttm_bo_release_list); - spin_lock(&bdev->lru_lock); + spin_lock(&glob->lru_lock); /* * Someone might have raced us and removed the * next entry from the list. We don't bother restarting @@ -467,7 +504,7 @@ static int ttm_bo_delayed_delete(struct ttm_bo_device *bdev, bool remove_all) break; } ret = !list_empty(&bdev->ddestroy); - spin_unlock(&bdev->lru_lock); + spin_unlock(&glob->lru_lock); return ret; } @@ -517,6 +554,7 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo, unsigned mem_type, { int ret = 0; struct ttm_bo_device *bdev = bo->bdev; + struct ttm_bo_global *glob = bo->glob; struct ttm_mem_reg evict_mem; uint32_t proposed_placement; @@ -565,12 +603,12 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo, unsigned mem_type, goto out; } - spin_lock(&bdev->lru_lock); + spin_lock(&glob->lru_lock); if (evict_mem.mm_node) { drm_mm_put_block(evict_mem.mm_node); evict_mem.mm_node = NULL; } - spin_unlock(&bdev->lru_lock); + spin_unlock(&glob->lru_lock); bo->evicted = true; out: return ret; @@ -585,6 +623,7 @@ static int ttm_bo_mem_force_space(struct ttm_bo_device *bdev, uint32_t mem_type, bool interruptible, bool no_wait) { + struct ttm_bo_global *glob = bdev->glob; struct drm_mm_node *node; struct ttm_buffer_object *entry; struct ttm_mem_type_manager *man = &bdev->man[mem_type]; @@ -598,7 +637,7 @@ retry_pre_get: if (unlikely(ret != 0)) return ret; - spin_lock(&bdev->lru_lock); + spin_lock(&glob->lru_lock); do { node = drm_mm_search_free(&man->manager, num_pages, mem->page_alignment, 1); @@ -619,7 +658,7 @@ retry_pre_get: if (likely(ret == 0)) put_count = ttm_bo_del_from_lru(entry); - spin_unlock(&bdev->lru_lock); + spin_unlock(&glob->lru_lock); if (unlikely(ret != 0)) return ret; @@ -635,21 +674,21 @@ retry_pre_get: if (ret) return ret; - spin_lock(&bdev->lru_lock); + spin_lock(&glob->lru_lock); } while (1); if (!node) { - spin_unlock(&bdev->lru_lock); + spin_unlock(&glob->lru_lock); return -ENOMEM; } node = drm_mm_get_block_atomic(node, num_pages, mem->page_alignment); if (unlikely(!node)) { - spin_unlock(&bdev->lru_lock); + spin_unlock(&glob->lru_lock); goto retry_pre_get; } - spin_unlock(&bdev->lru_lock); + spin_unlock(&glob->lru_lock); mem->mm_node = node; mem->mem_type = mem_type; return 0; @@ -697,6 +736,7 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo, bool interruptible, bool no_wait) { struct ttm_bo_device *bdev = bo->bdev; + struct ttm_bo_global *glob = bo->glob; struct ttm_mem_type_manager *man; uint32_t num_prios = bdev->driver->num_mem_type_prio; @@ -733,20 +773,20 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo, if (unlikely(ret)) return ret; - spin_lock(&bdev->lru_lock); + spin_lock(&glob->lru_lock); node = drm_mm_search_free(&man->manager, mem->num_pages, mem->page_alignment, 1); if (unlikely(!node)) { - spin_unlock(&bdev->lru_lock); + spin_unlock(&glob->lru_lock); break; } node = drm_mm_get_block_atomic(node, mem->num_pages, mem-> page_alignment); - spin_unlock(&bdev->lru_lock); + spin_unlock(&glob->lru_lock); } while (!node); } if (node) @@ -816,7 +856,7 @@ int ttm_bo_move_buffer(struct ttm_buffer_object *bo, uint32_t proposed_placement, bool interruptible, bool no_wait) { - struct ttm_bo_device *bdev = bo->bdev; + struct ttm_bo_global *glob = bo->glob; int ret = 0; struct ttm_mem_reg mem; @@ -852,9 +892,9 @@ int ttm_bo_move_buffer(struct ttm_buffer_object *bo, out_unlock: if (ret && mem.mm_node) { - spin_lock(&bdev->lru_lock); + spin_lock(&glob->lru_lock); drm_mm_put_block(mem.mm_node); - spin_unlock(&bdev->lru_lock); + spin_unlock(&glob->lru_lock); } return ret; } @@ -990,6 +1030,7 @@ int ttm_buffer_object_init(struct ttm_bo_device *bdev, INIT_LIST_HEAD(&bo->ddestroy); INIT_LIST_HEAD(&bo->swap); bo->bdev = bdev; + bo->glob = bdev->glob; bo->type = type; bo->num_pages = num_pages; bo->mem.mem_type = TTM_PL_SYSTEM; @@ -1002,6 +1043,7 @@ int ttm_buffer_object_init(struct ttm_bo_device *bdev, bo->seq_valid = false; bo->persistant_swap_storage = persistant_swap_storage; bo->acc_size = acc_size; + atomic_inc(&bo->glob->bo_count); ret = ttm_bo_check_placement(bo, flags, 0ULL); if (unlikely(ret != 0)) @@ -1040,13 +1082,13 @@ out_err: } EXPORT_SYMBOL(ttm_buffer_object_init); -static inline size_t ttm_bo_size(struct ttm_bo_device *bdev, +static inline size_t ttm_bo_size(struct ttm_bo_global *glob, unsigned long num_pages) { size_t page_array_size = (num_pages * sizeof(void *) + PAGE_SIZE - 1) & PAGE_MASK; - return bdev->ttm_bo_size + 2 * page_array_size; + return glob->ttm_bo_size + 2 * page_array_size; } int ttm_buffer_object_create(struct ttm_bo_device *bdev, @@ -1061,10 +1103,10 @@ int ttm_buffer_object_create(struct ttm_bo_device *bdev, { struct ttm_buffer_object *bo; int ret; - struct ttm_mem_global *mem_glob = bdev->mem_glob; + struct ttm_mem_global *mem_glob = bdev->glob->mem_glob; size_t acc_size = - ttm_bo_size(bdev, (size + PAGE_SIZE - 1) >> PAGE_SHIFT); + ttm_bo_size(bdev->glob, (size + PAGE_SIZE - 1) >> PAGE_SHIFT); ret = ttm_mem_global_alloc(mem_glob, acc_size, false, false); if (unlikely(ret != 0)) return ret; @@ -1118,6 +1160,7 @@ static int ttm_bo_force_list_clean(struct ttm_bo_device *bdev, struct list_head *head, unsigned mem_type, bool allow_errors) { + struct ttm_bo_global *glob = bdev->glob; struct ttm_buffer_object *entry; int ret; int put_count; @@ -1126,30 +1169,31 @@ static int ttm_bo_force_list_clean(struct ttm_bo_device *bdev, * Can't use standard list traversal since we're unlocking. */ - spin_lock(&bdev->lru_lock); + spin_lock(&glob->lru_lock); while (!list_empty(head)) { entry = list_first_entry(head, struct ttm_buffer_object, lru); kref_get(&entry->list_kref); ret = ttm_bo_reserve_locked(entry, false, false, false, 0); put_count = ttm_bo_del_from_lru(entry); - spin_unlock(&bdev->lru_lock); + spin_unlock(&glob->lru_lock); while (put_count--) kref_put(&entry->list_kref, ttm_bo_ref_bug); BUG_ON(ret); ret = ttm_bo_leave_list(entry, mem_type, allow_errors); ttm_bo_unreserve(entry); kref_put(&entry->list_kref, ttm_bo_release_list); - spin_lock(&bdev->lru_lock); + spin_lock(&glob->lru_lock); } - spin_unlock(&bdev->lru_lock); + spin_unlock(&glob->lru_lock); return 0; } int ttm_bo_clean_mm(struct ttm_bo_device *bdev, unsigned mem_type) { + struct ttm_bo_global *glob = bdev->glob; struct ttm_mem_type_manager *man = &bdev->man[mem_type]; int ret = -EINVAL; @@ -1171,13 +1215,13 @@ int ttm_bo_clean_mm(struct ttm_bo_device *bdev, unsigned mem_type) if (mem_type > 0) { ttm_bo_force_list_clean(bdev, &man->lru, mem_type, false); - spin_lock(&bdev->lru_lock); + spin_lock(&glob->lru_lock); if (drm_mm_clean(&man->manager)) drm_mm_takedown(&man->manager); else ret = -EBUSY; - spin_unlock(&bdev->lru_lock); + spin_unlock(&glob->lru_lock); } return ret; @@ -1251,11 +1295,83 @@ int ttm_bo_init_mm(struct ttm_bo_device *bdev, unsigned type, } EXPORT_SYMBOL(ttm_bo_init_mm); +static void ttm_bo_global_kobj_release(struct kobject *kobj) +{ + struct ttm_bo_global *glob = + container_of(kobj, struct ttm_bo_global, kobj); + + printk(KERN_INFO TTM_PFX "Freeing bo global.\n"); + ttm_mem_unregister_shrink(glob->mem_glob, &glob->shrink); + __free_page(glob->dummy_read_page); + kfree(glob); +} + +void ttm_bo_global_release(struct ttm_global_reference *ref) +{ + struct ttm_bo_global *glob = ref->object; + + kobject_del(&glob->kobj); + kobject_put(&glob->kobj); +} +EXPORT_SYMBOL(ttm_bo_global_release); + +int ttm_bo_global_init(struct ttm_global_reference *ref) +{ + struct ttm_bo_global_ref *bo_ref = + container_of(ref, struct ttm_bo_global_ref, ref); + struct ttm_bo_global *glob = ref->object; + int ret; + + mutex_init(&glob->device_list_mutex); + spin_lock_init(&glob->lru_lock); + glob->mem_glob = bo_ref->mem_glob; + glob->dummy_read_page = alloc_page(__GFP_ZERO | GFP_DMA32); + + if (unlikely(glob->dummy_read_page == NULL)) { + ret = -ENOMEM; + goto out_no_drp; + } + + INIT_LIST_HEAD(&glob->swap_lru); + INIT_LIST_HEAD(&glob->device_list); + + ttm_mem_init_shrink(&glob->shrink, ttm_bo_swapout); + ret = ttm_mem_register_shrink(glob->mem_glob, &glob->shrink); + if (unlikely(ret != 0)) { + printk(KERN_ERR TTM_PFX + "Could not register buffer object swapout.\n"); + goto out_no_shrink; + } + + glob->ttm_bo_extra_size = + ttm_round_pot(sizeof(struct ttm_tt)) + + ttm_round_pot(sizeof(struct ttm_backend)); + + glob->ttm_bo_size = glob->ttm_bo_extra_size + + ttm_round_pot(sizeof(struct ttm_buffer_object)); + + atomic_set(&glob->bo_count, 0); + + kobject_init(&glob->kobj, &ttm_bo_glob_kobj_type); + ret = kobject_add(&glob->kobj, ttm_get_kobj(), "buffer_objects"); + if (unlikely(ret != 0)) + kobject_put(&glob->kobj); + return ret; +out_no_shrink: + __free_page(glob->dummy_read_page); +out_no_drp: + kfree(glob); + return ret; +} +EXPORT_SYMBOL(ttm_bo_global_init); + + int ttm_bo_device_release(struct ttm_bo_device *bdev) { int ret = 0; unsigned i = TTM_NUM_MEM_TYPES; struct ttm_mem_type_manager *man; + struct ttm_bo_global *glob = bdev->glob; while (i--) { man = &bdev->man[i]; @@ -1271,98 +1387,74 @@ int ttm_bo_device_release(struct ttm_bo_device *bdev) } } + mutex_lock(&glob->device_list_mutex); + list_del(&bdev->device_list); + mutex_unlock(&glob->device_list_mutex); + if (!cancel_delayed_work(&bdev->wq)) flush_scheduled_work(); while (ttm_bo_delayed_delete(bdev, true)) ; - spin_lock(&bdev->lru_lock); + spin_lock(&glob->lru_lock); if (list_empty(&bdev->ddestroy)) TTM_DEBUG("Delayed destroy list was clean\n"); if (list_empty(&bdev->man[0].lru)) TTM_DEBUG("Swap list was clean\n"); - spin_unlock(&bdev->lru_lock); + spin_unlock(&glob->lru_lock); - ttm_mem_unregister_shrink(bdev->mem_glob, &bdev->shrink); BUG_ON(!drm_mm_clean(&bdev->addr_space_mm)); write_lock(&bdev->vm_lock); drm_mm_takedown(&bdev->addr_space_mm); write_unlock(&bdev->vm_lock); - __free_page(bdev->dummy_read_page); return ret; } EXPORT_SYMBOL(ttm_bo_device_release); -/* - * This function is intended to be called on drm driver load. - * If you decide to call it from firstopen, you must protect the call - * from a potentially racing ttm_bo_driver_finish in lastclose. - * (This may happen on X server restart). - */ - int ttm_bo_device_init(struct ttm_bo_device *bdev, - struct ttm_mem_global *mem_glob, - struct ttm_bo_driver *driver, uint64_t file_page_offset) + struct ttm_bo_global *glob, + struct ttm_bo_driver *driver, + uint64_t file_page_offset) { int ret = -EINVAL; - bdev->dummy_read_page = NULL; rwlock_init(&bdev->vm_lock); - spin_lock_init(&bdev->lru_lock); + spin_lock_init(&glob->lru_lock); bdev->driver = driver; - bdev->mem_glob = mem_glob; memset(bdev->man, 0, sizeof(bdev->man)); - bdev->dummy_read_page = alloc_page(__GFP_ZERO | GFP_DMA32); - if (unlikely(bdev->dummy_read_page == NULL)) { - ret = -ENOMEM; - goto out_err0; - } - /* * Initialize the system memory buffer type. * Other types need to be driver / IOCTL initialized. */ ret = ttm_bo_init_mm(bdev, TTM_PL_SYSTEM, 0, 0); if (unlikely(ret != 0)) - goto out_err1; + goto out_no_sys; bdev->addr_space_rb = RB_ROOT; ret = drm_mm_init(&bdev->addr_space_mm, file_page_offset, 0x10000000); if (unlikely(ret != 0)) - goto out_err2; + goto out_no_addr_mm; INIT_DELAYED_WORK(&bdev->wq, ttm_bo_delayed_workqueue); bdev->nice_mode = true; INIT_LIST_HEAD(&bdev->ddestroy); - INIT_LIST_HEAD(&bdev->swap_lru); bdev->dev_mapping = NULL; - ttm_mem_init_shrink(&bdev->shrink, ttm_bo_swapout); - ret = ttm_mem_register_shrink(mem_glob, &bdev->shrink); - if (unlikely(ret != 0)) { - printk(KERN_ERR TTM_PFX - "Could not register buffer object swapout.\n"); - goto out_err2; - } + bdev->glob = glob; - bdev->ttm_bo_extra_size = - ttm_round_pot(sizeof(struct ttm_tt)) + - ttm_round_pot(sizeof(struct ttm_backend)); - - bdev->ttm_bo_size = bdev->ttm_bo_extra_size + - ttm_round_pot(sizeof(struct ttm_buffer_object)); + mutex_lock(&glob->device_list_mutex); + list_add_tail(&bdev->device_list, &glob->device_list); + mutex_unlock(&glob->device_list_mutex); return 0; -out_err2: +out_no_addr_mm: ttm_bo_clean_mm(bdev, 0); -out_err1: - __free_page(bdev->dummy_read_page); -out_err0: +out_no_sys: return ret; } EXPORT_SYMBOL(ttm_bo_device_init); @@ -1607,21 +1699,21 @@ void ttm_bo_synccpu_write_release(struct ttm_buffer_object *bo) static int ttm_bo_swapout(struct ttm_mem_shrink *shrink) { - struct ttm_bo_device *bdev = - container_of(shrink, struct ttm_bo_device, shrink); + struct ttm_bo_global *glob = + container_of(shrink, struct ttm_bo_global, shrink); struct ttm_buffer_object *bo; int ret = -EBUSY; int put_count; uint32_t swap_placement = (TTM_PL_FLAG_CACHED | TTM_PL_FLAG_SYSTEM); - spin_lock(&bdev->lru_lock); + spin_lock(&glob->lru_lock); while (ret == -EBUSY) { - if (unlikely(list_empty(&bdev->swap_lru))) { - spin_unlock(&bdev->lru_lock); + if (unlikely(list_empty(&glob->swap_lru))) { + spin_unlock(&glob->lru_lock); return -EBUSY; } - bo = list_first_entry(&bdev->swap_lru, + bo = list_first_entry(&glob->swap_lru, struct ttm_buffer_object, swap); kref_get(&bo->list_kref); @@ -1633,16 +1725,16 @@ static int ttm_bo_swapout(struct ttm_mem_shrink *shrink) ret = ttm_bo_reserve_locked(bo, false, true, false, 0); if (unlikely(ret == -EBUSY)) { - spin_unlock(&bdev->lru_lock); + spin_unlock(&glob->lru_lock); ttm_bo_wait_unreserved(bo, false); kref_put(&bo->list_kref, ttm_bo_release_list); - spin_lock(&bdev->lru_lock); + spin_lock(&glob->lru_lock); } } BUG_ON(ret != 0); put_count = ttm_bo_del_from_lru(bo); - spin_unlock(&bdev->lru_lock); + spin_unlock(&glob->lru_lock); while (put_count--) kref_put(&bo->list_kref, ttm_bo_ref_bug); @@ -1696,6 +1788,6 @@ out: void ttm_bo_swapout_all(struct ttm_bo_device *bdev) { - while (ttm_bo_swapout(&bdev->shrink) == 0) + while (ttm_bo_swapout(&bdev->glob->shrink) == 0) ; } diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c index bdec583901e..12cd47aa18c 100644 --- a/drivers/gpu/drm/ttm/ttm_bo_util.c +++ b/drivers/gpu/drm/ttm/ttm_bo_util.c @@ -41,9 +41,9 @@ void ttm_bo_free_old_node(struct ttm_buffer_object *bo) struct ttm_mem_reg *old_mem = &bo->mem; if (old_mem->mm_node) { - spin_lock(&bo->bdev->lru_lock); + spin_lock(&bo->glob->lru_lock); drm_mm_put_block(old_mem->mm_node); - spin_unlock(&bo->bdev->lru_lock); + spin_unlock(&bo->glob->lru_lock); } old_mem->mm_node = NULL; } diff --git a/drivers/gpu/drm/ttm/ttm_tt.c b/drivers/gpu/drm/ttm/ttm_tt.c index 4e1e2566d51..b0f73096d37 100644 --- a/drivers/gpu/drm/ttm/ttm_tt.c +++ b/drivers/gpu/drm/ttm/ttm_tt.c @@ -166,7 +166,7 @@ static void ttm_tt_free_user_pages(struct ttm_tt *ttm) set_page_dirty_lock(page); ttm->pages[i] = NULL; - ttm_mem_global_free(ttm->bdev->mem_glob, PAGE_SIZE); + ttm_mem_global_free(ttm->glob->mem_glob, PAGE_SIZE); put_page(page); } ttm->state = tt_unpopulated; @@ -177,8 +177,7 @@ static void ttm_tt_free_user_pages(struct ttm_tt *ttm) static struct page *__ttm_tt_get_page(struct ttm_tt *ttm, int index) { struct page *p; - struct ttm_bo_device *bdev = ttm->bdev; - struct ttm_mem_global *mem_glob = bdev->mem_glob; + struct ttm_mem_global *mem_glob = ttm->glob->mem_glob; int ret; while (NULL == (p = ttm->pages[index])) { @@ -348,7 +347,7 @@ static void ttm_tt_free_alloced_pages(struct ttm_tt *ttm) printk(KERN_ERR TTM_PFX "Erroneous page count. " "Leaking pages.\n"); - ttm_mem_global_free_page(ttm->bdev->mem_glob, + ttm_mem_global_free_page(ttm->glob->mem_glob, cur_page); __free_page(cur_page); } @@ -394,7 +393,7 @@ int ttm_tt_set_user(struct ttm_tt *ttm, struct mm_struct *mm = tsk->mm; int ret; int write = (ttm->page_flags & TTM_PAGE_FLAG_WRITE) != 0; - struct ttm_mem_global *mem_glob = ttm->bdev->mem_glob; + struct ttm_mem_global *mem_glob = ttm->glob->mem_glob; BUG_ON(num_pages != ttm->num_pages); BUG_ON((ttm->page_flags & TTM_PAGE_FLAG_USER) == 0); @@ -439,8 +438,7 @@ struct ttm_tt *ttm_tt_create(struct ttm_bo_device *bdev, unsigned long size, if (!ttm) return NULL; - ttm->bdev = bdev; - + ttm->glob = bdev->glob; ttm->num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT; ttm->first_himem_page = ttm->num_pages; ttm->last_lomem_page = -1; diff --git a/include/drm/ttm/ttm_bo_api.h b/include/drm/ttm/ttm_bo_api.h index 99dc521aa1a..49114617052 100644 --- a/include/drm/ttm/ttm_bo_api.h +++ b/include/drm/ttm/ttm_bo_api.h @@ -155,6 +155,7 @@ struct ttm_buffer_object { * Members constant at init. */ + struct ttm_bo_global *glob; struct ttm_bo_device *bdev; unsigned long buffer_start; enum ttm_bo_type type; diff --git a/include/drm/ttm/ttm_bo_driver.h b/include/drm/ttm/ttm_bo_driver.h index 62ed733c52a..9dc32f70b9a 100644 --- a/include/drm/ttm/ttm_bo_driver.h +++ b/include/drm/ttm/ttm_bo_driver.h @@ -32,6 +32,7 @@ #include "ttm/ttm_bo_api.h" #include "ttm/ttm_memory.h" +#include "ttm/ttm_module.h" #include "drm_mm.h" #include "linux/workqueue.h" #include "linux/fs.h" @@ -160,7 +161,7 @@ struct ttm_tt { long last_lomem_page; uint32_t page_flags; unsigned long num_pages; - struct ttm_bo_device *bdev; + struct ttm_bo_global *glob; struct ttm_backend *be; struct task_struct *tsk; unsigned long start; @@ -355,24 +356,73 @@ struct ttm_bo_driver { void *(*sync_obj_ref) (void *sync_obj); }; -#define TTM_NUM_MEM_TYPES 8 +/** + * struct ttm_bo_global_ref - Argument to initialize a struct ttm_bo_global. + */ + +struct ttm_bo_global_ref { + struct ttm_global_reference ref; + struct ttm_mem_global *mem_glob; +}; -#define TTM_BO_PRIV_FLAG_MOVING 0 /* Buffer object is moving and needs - idling before CPU mapping */ -#define TTM_BO_PRIV_FLAG_MAX 1 /** - * struct ttm_bo_device - Buffer object driver device-specific data. + * struct ttm_bo_global - Buffer object driver global data. * * @mem_glob: Pointer to a struct ttm_mem_global object for accounting. - * @driver: Pointer to a struct ttm_bo_driver struct setup by the driver. - * @count: Current number of buffer object. - * @pages: Current number of pinned pages. * @dummy_read_page: Pointer to a dummy page used for mapping requests * of unpopulated pages. - * @shrink: A shrink callback object used for buffre object swap. + * @shrink: A shrink callback object used for buffer object swap. * @ttm_bo_extra_size: Extra size (sizeof(struct ttm_buffer_object) excluded) * used by a buffer object. This is excluding page arrays and backing pages. * @ttm_bo_size: This is @ttm_bo_extra_size + sizeof(struct ttm_buffer_object). + * @device_list_mutex: Mutex protecting the device list. + * This mutex is held while traversing the device list for pm options. + * @lru_lock: Spinlock protecting the bo subsystem lru lists. + * @device_list: List of buffer object devices. + * @swap_lru: Lru list of buffer objects used for swapping. + */ + +struct ttm_bo_global { + + /** + * Constant after init. + */ + + struct kobject kobj; + struct ttm_mem_global *mem_glob; + struct page *dummy_read_page; + struct ttm_mem_shrink shrink; + size_t ttm_bo_extra_size; + size_t ttm_bo_size; + struct mutex device_list_mutex; + spinlock_t lru_lock; + + /** + * Protected by device_list_mutex. + */ + struct list_head device_list; + + /** + * Protected by the lru_lock. + */ + struct list_head swap_lru; + + /** + * Internal protection. + */ + atomic_t bo_count; +}; + + +#define TTM_NUM_MEM_TYPES 8 + +#define TTM_BO_PRIV_FLAG_MOVING 0 /* Buffer object is moving and needs + idling before CPU mapping */ +#define TTM_BO_PRIV_FLAG_MAX 1 +/** + * struct ttm_bo_device - Buffer object driver device-specific data. + * + * @driver: Pointer to a struct ttm_bo_driver struct setup by the driver. * @man: An array of mem_type_managers. * @addr_space_mm: Range manager for the device address space. * lru_lock: Spinlock that protects the buffer+device lru lists and @@ -390,32 +440,21 @@ struct ttm_bo_device { /* * Constant after bo device init / atomic. */ - - struct ttm_mem_global *mem_glob; + struct list_head device_list; + struct ttm_bo_global *glob; struct ttm_bo_driver *driver; - struct page *dummy_read_page; - struct ttm_mem_shrink shrink; - - size_t ttm_bo_extra_size; - size_t ttm_bo_size; - rwlock_t vm_lock; + struct ttm_mem_type_manager man[TTM_NUM_MEM_TYPES]; /* * Protected by the vm lock. */ - struct ttm_mem_type_manager man[TTM_NUM_MEM_TYPES]; struct rb_root addr_space_rb; struct drm_mm addr_space_mm; /* - * Might want to change this to one lock per manager. - */ - spinlock_t lru_lock; - /* - * Protected by the lru lock. + * Protected by the global:lru lock. */ struct list_head ddestroy; - struct list_head swap_lru; /* * Protected by load / firstopen / lastclose /unload sync. @@ -629,6 +668,9 @@ extern int ttm_bo_pci_offset(struct ttm_bo_device *bdev, unsigned long *bus_offset, unsigned long *bus_size); +extern void ttm_bo_global_release(struct ttm_global_reference *ref); +extern int ttm_bo_global_init(struct ttm_global_reference *ref); + extern int ttm_bo_device_release(struct ttm_bo_device *bdev); /** @@ -646,7 +688,7 @@ extern int ttm_bo_device_release(struct ttm_bo_device *bdev); * !0: Failure. */ extern int ttm_bo_device_init(struct ttm_bo_device *bdev, - struct ttm_mem_global *mem_glob, + struct ttm_bo_global *glob, struct ttm_bo_driver *driver, uint64_t file_page_offset); -- cgit v1.2.3-70-g09d2 From 4516fc0454e7ffe2f369e80045b23c2b32155004 Mon Sep 17 00:00:00 2001 From: Jeff Layton Date: Fri, 14 Aug 2009 12:57:54 -0400 Subject: sunrpc: add routine for comparing addresses lockd needs these sort of routines, as does the NFSv4 callback code. Move lockd's routines into common code and rename them so that they can be used by others. Signed-off-by: Jeff Layton Acked-by: Chuck Lever Signed-off-by: J. Bruce Fields --- fs/lockd/clntlock.c | 2 +- fs/lockd/host.c | 4 ++-- fs/lockd/mon.c | 2 +- fs/lockd/svcsubs.c | 2 +- include/linux/lockd/lockd.h | 43 ---------------------------------------- include/linux/sunrpc/clnt.h | 48 +++++++++++++++++++++++++++++++++++++++++++++ 6 files changed, 53 insertions(+), 48 deletions(-) (limited to 'include') diff --git a/fs/lockd/clntlock.c b/fs/lockd/clntlock.c index 1f3b0fc0d35..fc9032dc886 100644 --- a/fs/lockd/clntlock.c +++ b/fs/lockd/clntlock.c @@ -166,7 +166,7 @@ __be32 nlmclnt_grant(const struct sockaddr *addr, const struct nlm_lock *lock) */ if (fl_blocked->fl_u.nfs_fl.owner->pid != lock->svid) continue; - if (!nlm_cmp_addr(nlm_addr(block->b_host), addr)) + if (!rpc_cmp_addr(nlm_addr(block->b_host), addr)) continue; if (nfs_compare_fh(NFS_FH(fl_blocked->fl_file->f_path.dentry->d_inode) ,fh) != 0) continue; diff --git a/fs/lockd/host.c b/fs/lockd/host.c index 7cb076ac6b4..4600c2037b8 100644 --- a/fs/lockd/host.c +++ b/fs/lockd/host.c @@ -111,7 +111,7 @@ static struct nlm_host *nlm_lookup_host(struct nlm_lookup_host_info *ni) */ chain = &nlm_hosts[nlm_hash_address(ni->sap)]; hlist_for_each_entry(host, pos, chain, h_hash) { - if (!nlm_cmp_addr(nlm_addr(host), ni->sap)) + if (!rpc_cmp_addr(nlm_addr(host), ni->sap)) continue; /* See if we have an NSM handle for this client */ @@ -125,7 +125,7 @@ static struct nlm_host *nlm_lookup_host(struct nlm_lookup_host_info *ni) if (host->h_server != ni->server) continue; if (ni->server && - !nlm_cmp_addr(nlm_srcaddr(host), ni->src_sap)) + !rpc_cmp_addr(nlm_srcaddr(host), ni->src_sap)) continue; /* Move to head of hash chain. */ diff --git a/fs/lockd/mon.c b/fs/lockd/mon.c index 30c933188dd..f956651d0f6 100644 --- a/fs/lockd/mon.c +++ b/fs/lockd/mon.c @@ -209,7 +209,7 @@ static struct nsm_handle *nsm_lookup_addr(const struct sockaddr *sap) struct nsm_handle *nsm; list_for_each_entry(nsm, &nsm_handles, sm_link) - if (nlm_cmp_addr(nsm_addr(nsm), sap)) + if (rpc_cmp_addr(nsm_addr(nsm), sap)) return nsm; return NULL; } diff --git a/fs/lockd/svcsubs.c b/fs/lockd/svcsubs.c index 9e4d6aab611..ad478da7ca6 100644 --- a/fs/lockd/svcsubs.c +++ b/fs/lockd/svcsubs.c @@ -417,7 +417,7 @@ EXPORT_SYMBOL_GPL(nlmsvc_unlock_all_by_sb); static int nlmsvc_match_ip(void *datap, struct nlm_host *host) { - return nlm_cmp_addr(nlm_srcaddr(host), datap); + return rpc_cmp_addr(nlm_srcaddr(host), datap); } /** diff --git a/include/linux/lockd/lockd.h b/include/linux/lockd/lockd.h index c325b187966..e7a251a988c 100644 --- a/include/linux/lockd/lockd.h +++ b/include/linux/lockd/lockd.h @@ -338,49 +338,6 @@ static inline int nlm_privileged_requester(const struct svc_rqst *rqstp) } } -static inline int __nlm_cmp_addr4(const struct sockaddr *sap1, - const struct sockaddr *sap2) -{ - const struct sockaddr_in *sin1 = (const struct sockaddr_in *)sap1; - const struct sockaddr_in *sin2 = (const struct sockaddr_in *)sap2; - return sin1->sin_addr.s_addr == sin2->sin_addr.s_addr; -} - -#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) -static inline int __nlm_cmp_addr6(const struct sockaddr *sap1, - const struct sockaddr *sap2) -{ - const struct sockaddr_in6 *sin1 = (const struct sockaddr_in6 *)sap1; - const struct sockaddr_in6 *sin2 = (const struct sockaddr_in6 *)sap2; - return ipv6_addr_equal(&sin1->sin6_addr, &sin2->sin6_addr); -} -#else /* !(CONFIG_IPV6 || CONFIG_IPV6_MODULE) */ -static inline int __nlm_cmp_addr6(const struct sockaddr *sap1, - const struct sockaddr *sap2) -{ - return 0; -} -#endif /* !(CONFIG_IPV6 || CONFIG_IPV6_MODULE) */ - -/* - * Compare two host addresses - * - * Return TRUE if the addresses are the same; otherwise FALSE. - */ -static inline int nlm_cmp_addr(const struct sockaddr *sap1, - const struct sockaddr *sap2) -{ - if (sap1->sa_family == sap2->sa_family) { - switch (sap1->sa_family) { - case AF_INET: - return __nlm_cmp_addr4(sap1, sap2); - case AF_INET6: - return __nlm_cmp_addr6(sap1, sap2); - } - } - return 0; -} - /* * Compare two NLM locks. * When the second lock is of type F_UNLCK, this acts like a wildcard. diff --git a/include/linux/sunrpc/clnt.h b/include/linux/sunrpc/clnt.h index ab3f6e90caa..b17df361be8 100644 --- a/include/linux/sunrpc/clnt.h +++ b/include/linux/sunrpc/clnt.h @@ -22,6 +22,7 @@ #include #include #include +#include struct rpc_inode; @@ -188,5 +189,52 @@ static inline void rpc_set_port(struct sockaddr *sap, #define IPV6_SCOPE_DELIMITER '%' #define IPV6_SCOPE_ID_LEN sizeof("%nnnnnnnnnn") +static inline bool __rpc_cmp_addr4(const struct sockaddr *sap1, + const struct sockaddr *sap2) +{ + const struct sockaddr_in *sin1 = (const struct sockaddr_in *)sap1; + const struct sockaddr_in *sin2 = (const struct sockaddr_in *)sap2; + + return sin1->sin_addr.s_addr == sin2->sin_addr.s_addr; +} + +#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) +static inline bool __rpc_cmp_addr6(const struct sockaddr *sap1, + const struct sockaddr *sap2) +{ + const struct sockaddr_in6 *sin1 = (const struct sockaddr_in6 *)sap1; + const struct sockaddr_in6 *sin2 = (const struct sockaddr_in6 *)sap2; + return ipv6_addr_equal(&sin1->sin6_addr, &sin2->sin6_addr); +} +#else /* !(CONFIG_IPV6 || CONFIG_IPV6_MODULE) */ +static inline bool __rpc_cmp_addr6(const struct sockaddr *sap1, + const struct sockaddr *sap2) +{ + return false; +} +#endif /* !(CONFIG_IPV6 || CONFIG_IPV6_MODULE) */ + +/** + * rpc_cmp_addr - compare the address portion of two sockaddrs. + * @sap1: first sockaddr + * @sap2: second sockaddr + * + * Just compares the family and address portion. Ignores port, scope, etc. + * Returns true if the addrs are equal, false if they aren't. + */ +static inline bool rpc_cmp_addr(const struct sockaddr *sap1, + const struct sockaddr *sap2) +{ + if (sap1->sa_family == sap2->sa_family) { + switch (sap1->sa_family) { + case AF_INET: + return __rpc_cmp_addr4(sap1, sap2); + case AF_INET6: + return __rpc_cmp_addr6(sap1, sap2); + } + } + return false; +} + #endif /* __KERNEL__ */ #endif /* _LINUX_SUNRPC_CLNT_H */ -- cgit v1.2.3-70-g09d2 From be3ad6b0b675fd1d6b48362ca30bdee75fbef6b4 Mon Sep 17 00:00:00 2001 From: Jeff Layton Date: Fri, 14 Aug 2009 12:57:55 -0400 Subject: sunrpc: add common routine for copying address portion of a sockaddr Signed-off-by: Jeff Layton Acked-by: Chuck Lever Signed-off-by: J. Bruce Fields --- include/linux/sunrpc/clnt.h | 50 +++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 50 insertions(+) (limited to 'include') diff --git a/include/linux/sunrpc/clnt.h b/include/linux/sunrpc/clnt.h index b17df361be8..044f531aee7 100644 --- a/include/linux/sunrpc/clnt.h +++ b/include/linux/sunrpc/clnt.h @@ -198,6 +198,17 @@ static inline bool __rpc_cmp_addr4(const struct sockaddr *sap1, return sin1->sin_addr.s_addr == sin2->sin_addr.s_addr; } +static inline bool __rpc_copy_addr4(struct sockaddr *dst, + const struct sockaddr *src) +{ + const struct sockaddr_in *ssin = (struct sockaddr_in *) src; + struct sockaddr_in *dsin = (struct sockaddr_in *) dst; + + dsin->sin_family = ssin->sin_family; + dsin->sin_addr.s_addr = ssin->sin_addr.s_addr; + return true; +} + #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) static inline bool __rpc_cmp_addr6(const struct sockaddr *sap1, const struct sockaddr *sap2) @@ -206,12 +217,29 @@ static inline bool __rpc_cmp_addr6(const struct sockaddr *sap1, const struct sockaddr_in6 *sin2 = (const struct sockaddr_in6 *)sap2; return ipv6_addr_equal(&sin1->sin6_addr, &sin2->sin6_addr); } + +static inline bool __rpc_copy_addr6(struct sockaddr *dst, + const struct sockaddr *src) +{ + const struct sockaddr_in6 *ssin6 = (const struct sockaddr_in6 *) src; + struct sockaddr_in6 *dsin6 = (struct sockaddr_in6 *) dst; + + dsin6->sin6_family = ssin6->sin6_family; + ipv6_addr_copy(&dsin6->sin6_addr, &ssin6->sin6_addr); + return true; +} #else /* !(CONFIG_IPV6 || CONFIG_IPV6_MODULE) */ static inline bool __rpc_cmp_addr6(const struct sockaddr *sap1, const struct sockaddr *sap2) { return false; } + +static inline bool __rpc_copy_addr6(struct sockaddr *dst, + const struct sockaddr *src) +{ + return false; +} #endif /* !(CONFIG_IPV6 || CONFIG_IPV6_MODULE) */ /** @@ -236,5 +264,27 @@ static inline bool rpc_cmp_addr(const struct sockaddr *sap1, return false; } +/** + * rpc_copy_addr - copy the address portion of one sockaddr to another + * @dst: destination sockaddr + * @src: source sockaddr + * + * Just copies the address portion and family. Ignores port, scope, etc. + * Caller is responsible for making certain that dst is large enough to hold + * the address in src. Returns true if address family is supported. Returns + * false otherwise. + */ +static inline bool rpc_copy_addr(struct sockaddr *dst, + const struct sockaddr *src) +{ + switch (src->sa_family) { + case AF_INET: + return __rpc_copy_addr4(dst, src); + case AF_INET6: + return __rpc_copy_addr6(dst, src); + } + return false; +} + #endif /* __KERNEL__ */ #endif /* _LINUX_SUNRPC_CLNT_H */ -- cgit v1.2.3-70-g09d2 From 363168b4ea8ec26aeb982ac6024a09f907ecd27e Mon Sep 17 00:00:00 2001 From: Jeff Layton Date: Fri, 14 Aug 2009 12:57:56 -0400 Subject: nfsd: make nfs4_client->cl_addr a struct sockaddr_storage It's currently a __be32, which isn't big enough to hold an IPv6 address. Signed-off-by: Jeff Layton Acked-by: Chuck Lever Signed-off-by: J. Bruce Fields --- fs/nfsd/nfs4state.c | 32 +++++++++++++++++++------------- include/linux/nfsd/state.h | 2 +- 2 files changed, 20 insertions(+), 14 deletions(-) (limited to 'include') diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c index 9295c4b56bc..bfc14d879ea 100644 --- a/fs/nfsd/nfs4state.c +++ b/fs/nfsd/nfs4state.c @@ -55,6 +55,7 @@ #include #include #include +#include #define NFSDDBG_FACILITY NFSDDBG_PROC @@ -1220,13 +1221,15 @@ nfsd4_exchange_id(struct svc_rqst *rqstp, int status; unsigned int strhashval; char dname[HEXDIR_LEN]; + char addr_str[INET6_ADDRSTRLEN]; nfs4_verifier verf = exid->verifier; - u32 ip_addr = svc_addr_in(rqstp)->sin_addr.s_addr; + struct sockaddr *sa = svc_addr(rqstp); + rpc_ntop(sa, addr_str, sizeof(addr_str)); dprintk("%s rqstp=%p exid=%p clname.len=%u clname.data=%p " - " ip_addr=%u flags %x, spa_how %d\n", + "ip_addr=%s flags %x, spa_how %d\n", __func__, rqstp, exid, exid->clname.len, exid->clname.data, - ip_addr, exid->flags, exid->spa_how); + addr_str, exid->flags, exid->spa_how); if (!check_name(exid->clname) || (exid->flags & ~EXCHGID4_FLAG_MASK_A)) return nfserr_inval; @@ -1315,7 +1318,7 @@ out_new: copy_verf(new, &verf); copy_cred(&new->cl_cred, &rqstp->rq_cred); - new->cl_addr = ip_addr; + rpc_copy_addr((struct sockaddr *) &new->cl_addr, sa); gen_clid(new); gen_confirm(new); add_to_unconfirmed(new, strhashval); @@ -1389,7 +1392,7 @@ nfsd4_create_session(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, struct nfsd4_create_session *cr_ses) { - u32 ip_addr = svc_addr_in(rqstp)->sin_addr.s_addr; + struct sockaddr *sa = svc_addr(rqstp); struct nfs4_client *conf, *unconf; struct nfsd4_clid_slot *cs_slot = NULL; int status = 0; @@ -1417,7 +1420,7 @@ nfsd4_create_session(struct svc_rqst *rqstp, cs_slot->sl_seqid++; } else if (unconf) { if (!same_creds(&unconf->cl_cred, &rqstp->rq_cred) || - (ip_addr != unconf->cl_addr)) { + !rpc_cmp_addr(sa, (struct sockaddr *) &unconf->cl_addr)) { status = nfserr_clid_inuse; goto out; } @@ -1564,7 +1567,7 @@ __be32 nfsd4_setclientid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, struct nfsd4_setclientid *setclid) { - struct sockaddr_in *sin = svc_addr_in(rqstp); + struct sockaddr *sa = svc_addr(rqstp); struct xdr_netobj clname = { .len = setclid->se_namelen, .data = setclid->se_name, @@ -1596,8 +1599,11 @@ nfsd4_setclientid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, /* RFC 3530 14.2.33 CASE 0: */ status = nfserr_clid_inuse; if (!same_creds(&conf->cl_cred, &rqstp->rq_cred)) { - dprintk("NFSD: setclientid: string in use by client" - " at %pI4\n", &conf->cl_addr); + char addr_str[INET6_ADDRSTRLEN]; + rpc_ntop((struct sockaddr *) &conf->cl_addr, addr_str, + sizeof(addr_str)); + dprintk("NFSD: setclientid: string in use by client " + "at %s\n", addr_str); goto out; } } @@ -1659,7 +1665,7 @@ nfsd4_setclientid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, gen_clid(new); } copy_verf(new, &clverifier); - new->cl_addr = sin->sin_addr.s_addr; + rpc_copy_addr((struct sockaddr *) &new->cl_addr, sa); new->cl_flavor = rqstp->rq_flavor; princ = svc_gss_principal(rqstp); if (princ) { @@ -1693,7 +1699,7 @@ nfsd4_setclientid_confirm(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, struct nfsd4_setclientid_confirm *setclientid_confirm) { - struct sockaddr_in *sin = svc_addr_in(rqstp); + struct sockaddr *sa = svc_addr(rqstp); struct nfs4_client *conf, *unconf; nfs4_verifier confirm = setclientid_confirm->sc_confirm; clientid_t * clid = &setclientid_confirm->sc_clientid; @@ -1712,9 +1718,9 @@ nfsd4_setclientid_confirm(struct svc_rqst *rqstp, unconf = find_unconfirmed_client(clid); status = nfserr_clid_inuse; - if (conf && conf->cl_addr != sin->sin_addr.s_addr) + if (conf && !rpc_cmp_addr((struct sockaddr *) &conf->cl_addr, sa)) goto out; - if (unconf && unconf->cl_addr != sin->sin_addr.s_addr) + if (unconf && !rpc_cmp_addr((struct sockaddr *) &unconf->cl_addr, sa)) goto out; /* diff --git a/include/linux/nfsd/state.h b/include/linux/nfsd/state.h index 58bb19784e1..3510ddd4be4 100644 --- a/include/linux/nfsd/state.h +++ b/include/linux/nfsd/state.h @@ -200,7 +200,7 @@ struct nfs4_client { char cl_recdir[HEXDIR_LEN]; /* recovery dir */ nfs4_verifier cl_verifier; /* generated by client */ time_t cl_time; /* time of last lease renewal */ - __be32 cl_addr; /* client ipaddress */ + struct sockaddr_storage cl_addr; /* client ipaddress */ u32 cl_flavor; /* setclientid pseudoflavor */ char *cl_principal; /* setclientid principal name */ struct svc_cred cl_cred; /* setclientid principal */ -- cgit v1.2.3-70-g09d2 From aa9a4ec7707a5391cde556f3fa1b0eb4bca3bcf6 Mon Sep 17 00:00:00 2001 From: Jeff Layton Date: Fri, 14 Aug 2009 12:57:57 -0400 Subject: nfsd: convert nfs4_cb_conn struct to hold address in sockaddr_storage ...rather than as a separate address and port fields. This will be necessary for implementing callbacks over IPv6. Also, convert gen_callback to use the standard rpcuaddr2sockaddr routine rather than its own private one. Signed-off-by: Jeff Layton Acked-by: Chuck Lever Signed-off-by: J. Bruce Fields --- fs/nfsd/nfs4callback.c | 11 ++----- fs/nfsd/nfs4state.c | 81 ++++++---------------------------------------- include/linux/nfsd/state.h | 4 +-- 3 files changed, 13 insertions(+), 83 deletions(-) (limited to 'include') diff --git a/fs/nfsd/nfs4callback.c b/fs/nfsd/nfs4callback.c index 3fd23f7acec..81d1c5285dc 100644 --- a/fs/nfsd/nfs4callback.c +++ b/fs/nfsd/nfs4callback.c @@ -377,7 +377,6 @@ static int max_cb_time(void) int setup_callback_client(struct nfs4_client *clp) { - struct sockaddr_in addr; struct nfs4_cb_conn *cb = &clp->cl_cb_conn; struct rpc_timeout timeparms = { .to_initval = max_cb_time(), @@ -385,8 +384,8 @@ int setup_callback_client(struct nfs4_client *clp) }; struct rpc_create_args args = { .protocol = IPPROTO_TCP, - .address = (struct sockaddr *)&addr, - .addrsize = sizeof(addr), + .address = (struct sockaddr *) &cb->cb_addr, + .addrsize = cb->cb_addrlen, .timeout = &timeparms, .program = &cb_program, .prognumber = cb->cb_prog, @@ -400,12 +399,6 @@ int setup_callback_client(struct nfs4_client *clp) if (!clp->cl_principal && (clp->cl_flavor >= RPC_AUTH_GSS_KRB5)) return -EINVAL; - /* Initialize address */ - memset(&addr, 0, sizeof(addr)); - addr.sin_family = AF_INET; - addr.sin_port = htons(cb->cb_port); - addr.sin_addr.s_addr = htonl(cb->cb_addr); - /* Create RPC client */ client = rpc_create(&args); if (IS_ERR(client)) { diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c index bfc14d879ea..96a742308ce 100644 --- a/fs/nfsd/nfs4state.c +++ b/fs/nfsd/nfs4state.c @@ -897,76 +897,6 @@ find_unconfirmed_client_by_str(const char *dname, unsigned int hashval, return NULL; } -/* a helper function for parse_callback */ -static int -parse_octet(unsigned int *lenp, char **addrp) -{ - unsigned int len = *lenp; - char *p = *addrp; - int n = -1; - char c; - - for (;;) { - if (!len) - break; - len--; - c = *p++; - if (c == '.') - break; - if ((c < '0') || (c > '9')) { - n = -1; - break; - } - if (n < 0) - n = 0; - n = (n * 10) + (c - '0'); - if (n > 255) { - n = -1; - break; - } - } - *lenp = len; - *addrp = p; - return n; -} - -/* parse and set the setclientid ipv4 callback address */ -static int -parse_ipv4(unsigned int addr_len, char *addr_val, unsigned int *cbaddrp, unsigned short *cbportp) -{ - int temp = 0; - u32 cbaddr = 0; - u16 cbport = 0; - u32 addrlen = addr_len; - char *addr = addr_val; - int i, shift; - - /* ipaddress */ - shift = 24; - for(i = 4; i > 0 ; i--) { - if ((temp = parse_octet(&addrlen, &addr)) < 0) { - return 0; - } - cbaddr |= (temp << shift); - if (shift > 0) - shift -= 8; - } - *cbaddrp = cbaddr; - - /* port */ - shift = 8; - for(i = 2; i > 0 ; i--) { - if ((temp = parse_octet(&addrlen, &addr)) < 0) { - return 0; - } - cbport |= (temp << shift); - if (shift > 0) - shift -= 8; - } - *cbportp = cbport; - return 1; -} - static void gen_callback(struct nfs4_client *clp, struct nfsd4_setclientid *se) { @@ -976,14 +906,21 @@ gen_callback(struct nfs4_client *clp, struct nfsd4_setclientid *se) if ((se->se_callback_netid_len != 3) || memcmp((char *)se->se_callback_netid_val, "tcp", 3)) goto out_err; - if ( !(parse_ipv4(se->se_callback_addr_len, se->se_callback_addr_val, - &cb->cb_addr, &cb->cb_port))) + cb->cb_addrlen = rpc_uaddr2sockaddr(se->se_callback_addr_val, + se->se_callback_addr_len, + (struct sockaddr *) &cb->cb_addr, + sizeof(cb->cb_addr)); + + if (!cb->cb_addrlen || cb->cb_addr.ss_family != AF_INET) goto out_err; + cb->cb_minorversion = 0; cb->cb_prog = se->se_callback_prog; cb->cb_ident = se->se_callback_ident; return; out_err: + cb->cb_addr.ss_family = AF_UNSPEC; + cb->cb_addrlen = 0; dprintk(KERN_INFO "NFSD: this client (clientid %08x/%08x) " "will not receive delegations\n", clp->cl_clientid.cl_boot, clp->cl_clientid.cl_id); diff --git a/include/linux/nfsd/state.h b/include/linux/nfsd/state.h index 3510ddd4be4..fb0c404c7c5 100644 --- a/include/linux/nfsd/state.h +++ b/include/linux/nfsd/state.h @@ -81,8 +81,8 @@ struct nfs4_delegation { /* client delegation callback info */ struct nfs4_cb_conn { /* SETCLIENTID info */ - u32 cb_addr; - unsigned short cb_port; + struct sockaddr_storage cb_addr; + size_t cb_addrlen; u32 cb_prog; u32 cb_minorversion; u32 cb_ident; /* minorversion 0 only */ -- cgit v1.2.3-70-g09d2 From fbf4665f41b02e757ab9d9198df65e319388e728 Mon Sep 17 00:00:00 2001 From: Jeff Layton Date: Fri, 14 Aug 2009 12:57:59 -0400 Subject: nfsd: populate sin6_scope_id on callback address with scopeid from rq_addr on SETCLIENTID call When a SETCLIENTID call comes in, one of the args given is the svc_rqst. This struct contains an rq_addr field which holds the address that sent the call. If this is an IPv6 address, then we can use the sin6_scope_id field in this address to populate the sin6_scope_id field in the callback address. AFAICT, the rq_addr.sin6_scope_id is non-zero if and only if the client mounted the server's link-local address. Signed-off-by: Jeff Layton Acked-by: Chuck Lever Signed-off-by: J. Bruce Fields --- fs/nfsd/nfs4state.c | 7 +++++-- include/linux/sunrpc/clnt.h | 15 +++++++++++++++ 2 files changed, 20 insertions(+), 2 deletions(-) (limited to 'include') diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c index 9ec0ca1ef4e..d2a05248090 100644 --- a/fs/nfsd/nfs4state.c +++ b/fs/nfsd/nfs4state.c @@ -898,7 +898,7 @@ find_unconfirmed_client_by_str(const char *dname, unsigned int hashval, } static void -gen_callback(struct nfs4_client *clp, struct nfsd4_setclientid *se) +gen_callback(struct nfs4_client *clp, struct nfsd4_setclientid *se, u32 scopeid) { struct nfs4_cb_conn *cb = &clp->cl_cb_conn; unsigned short expected_family; @@ -921,6 +921,9 @@ gen_callback(struct nfs4_client *clp, struct nfsd4_setclientid *se) if (!cb->cb_addrlen || cb->cb_addr.ss_family != expected_family) goto out_err; + if (cb->cb_addr.ss_family == AF_INET6) + ((struct sockaddr_in6 *) &cb->cb_addr)->sin6_scope_id = scopeid; + cb->cb_minorversion = 0; cb->cb_prog = se->se_callback_prog; cb->cb_ident = se->se_callback_ident; @@ -1621,7 +1624,7 @@ nfsd4_setclientid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, } copy_cred(&new->cl_cred, &rqstp->rq_cred); gen_confirm(new); - gen_callback(new, setclid); + gen_callback(new, setclid, rpc_get_scope_id(sa)); add_to_unconfirmed(new, strhashval); setclid->se_clientid.cl_boot = new->cl_clientid.cl_boot; setclid->se_clientid.cl_id = new->cl_clientid.cl_id; diff --git a/include/linux/sunrpc/clnt.h b/include/linux/sunrpc/clnt.h index 044f531aee7..3d025588e56 100644 --- a/include/linux/sunrpc/clnt.h +++ b/include/linux/sunrpc/clnt.h @@ -286,5 +286,20 @@ static inline bool rpc_copy_addr(struct sockaddr *dst, return false; } +/** + * rpc_get_scope_id - return scopeid for a given sockaddr + * @sa: sockaddr to get scopeid from + * + * Returns the value of the sin6_scope_id for AF_INET6 addrs, or 0 if + * not an AF_INET6 address. + */ +static inline u32 rpc_get_scope_id(const struct sockaddr *sa) +{ + if (sa->sa_family != AF_INET6) + return 0; + + return ((struct sockaddr_in6 *) sa)->sin6_scope_id; +} + #endif /* __KERNEL__ */ #endif /* _LINUX_SUNRPC_CLNT_H */ -- cgit v1.2.3-70-g09d2 From da15cfdae03351c689736f8d142618592e3cebc3 Mon Sep 17 00:00:00 2001 From: john stultz Date: Wed, 19 Aug 2009 19:13:34 -0700 Subject: time: Introduce CLOCK_REALTIME_COARSE After talking with some application writers who want very fast, but not fine-grained timestamps, I decided to try to implement new clock_ids to clock_gettime(): CLOCK_REALTIME_COARSE and CLOCK_MONOTONIC_COARSE which returns the time at the last tick. This is very fast as we don't have to access any hardware (which can be very painful if you're using something like the acpi_pm clocksource), and we can even use the vdso clock_gettime() method to avoid the syscall. The only trade off is you only get low-res tick grained time resolution. This isn't a new idea, I know Ingo has a patch in the -rt tree that made the vsyscall gettimeofday() return coarse grained time when the vsyscall64 sysctrl was set to 2. However this affects all applications on a system. With this method, applications can choose the proper speed/granularity trade-off for themselves. Signed-off-by: John Stultz Cc: Andi Kleen Cc: nikolag@ca.ibm.com Cc: Darren Hart Cc: arjan@infradead.org Cc: jonathan@jonmasters.org LKML-Reference: <1250734414.6897.5.camel@localhost.localdomain> Signed-off-by: Thomas Gleixner --- arch/x86/include/asm/vgtod.h | 1 + arch/x86/kernel/vsyscall_64.c | 1 + arch/x86/vdso/vclock_gettime.c | 39 ++++++++++++++++++++++++++++++++++++--- include/linux/time.h | 4 ++++ kernel/posix-timers.c | 35 +++++++++++++++++++++++++++++++++++ kernel/time/timekeeping.c | 21 +++++++++++++++++++++ 6 files changed, 98 insertions(+), 3 deletions(-) (limited to 'include') diff --git a/arch/x86/include/asm/vgtod.h b/arch/x86/include/asm/vgtod.h index dc27a69e5d2..3d61e204826 100644 --- a/arch/x86/include/asm/vgtod.h +++ b/arch/x86/include/asm/vgtod.h @@ -21,6 +21,7 @@ struct vsyscall_gtod_data { u32 shift; } clock; struct timespec wall_to_monotonic; + struct timespec wall_time_coarse; }; extern struct vsyscall_gtod_data __vsyscall_gtod_data __section_vsyscall_gtod_data; diff --git a/arch/x86/kernel/vsyscall_64.c b/arch/x86/kernel/vsyscall_64.c index 25ee06a80aa..cf53a78e2dc 100644 --- a/arch/x86/kernel/vsyscall_64.c +++ b/arch/x86/kernel/vsyscall_64.c @@ -87,6 +87,7 @@ void update_vsyscall(struct timespec *wall_time, struct clocksource *clock) vsyscall_gtod_data.wall_time_sec = wall_time->tv_sec; vsyscall_gtod_data.wall_time_nsec = wall_time->tv_nsec; vsyscall_gtod_data.wall_to_monotonic = wall_to_monotonic; + vsyscall_gtod_data.wall_time_coarse = __current_kernel_time(); write_sequnlock_irqrestore(&vsyscall_gtod_data.lock, flags); } diff --git a/arch/x86/vdso/vclock_gettime.c b/arch/x86/vdso/vclock_gettime.c index 6a40b78b46a..ee55754cc3c 100644 --- a/arch/x86/vdso/vclock_gettime.c +++ b/arch/x86/vdso/vclock_gettime.c @@ -86,14 +86,47 @@ notrace static noinline int do_monotonic(struct timespec *ts) return 0; } +notrace static noinline int do_realtime_coarse(struct timespec *ts) +{ + unsigned long seq; + do { + seq = read_seqbegin(>od->lock); + ts->tv_sec = gtod->wall_time_coarse.tv_sec; + ts->tv_nsec = gtod->wall_time_coarse.tv_nsec; + } while (unlikely(read_seqretry(>od->lock, seq))); + return 0; +} + +notrace static noinline int do_monotonic_coarse(struct timespec *ts) +{ + unsigned long seq, ns, secs; + do { + seq = read_seqbegin(>od->lock); + secs = gtod->wall_time_coarse.tv_sec; + ns = gtod->wall_time_coarse.tv_nsec; + secs += gtod->wall_to_monotonic.tv_sec; + ns += gtod->wall_to_monotonic.tv_nsec; + } while (unlikely(read_seqretry(>od->lock, seq))); + vset_normalized_timespec(ts, secs, ns); + return 0; +} + notrace int __vdso_clock_gettime(clockid_t clock, struct timespec *ts) { - if (likely(gtod->sysctl_enabled && gtod->clock.vread)) + if (likely(gtod->sysctl_enabled)) switch (clock) { case CLOCK_REALTIME: - return do_realtime(ts); + if (likely(gtod->clock.vread)) + return do_realtime(ts); + break; case CLOCK_MONOTONIC: - return do_monotonic(ts); + if (likely(gtod->clock.vread)) + return do_monotonic(ts); + break; + case CLOCK_REALTIME_COARSE: + return do_realtime_coarse(ts); + case CLOCK_MONOTONIC_COARSE: + return do_monotonic_coarse(ts); } return vdso_fallback_gettime(clock, ts); } diff --git a/include/linux/time.h b/include/linux/time.h index f505988398e..256232f7e5e 100644 --- a/include/linux/time.h +++ b/include/linux/time.h @@ -110,6 +110,8 @@ extern int timekeeping_suspended; unsigned long get_seconds(void); struct timespec current_kernel_time(void); +struct timespec __current_kernel_time(void); /* does not hold xtime_lock */ +struct timespec get_monotonic_coarse(void); #define CURRENT_TIME (current_kernel_time()) #define CURRENT_TIME_SEC ((struct timespec) { get_seconds(), 0 }) @@ -243,6 +245,8 @@ struct itimerval { #define CLOCK_PROCESS_CPUTIME_ID 2 #define CLOCK_THREAD_CPUTIME_ID 3 #define CLOCK_MONOTONIC_RAW 4 +#define CLOCK_REALTIME_COARSE 5 +#define CLOCK_MONOTONIC_COARSE 6 /* * The IDs of various hardware clocks: diff --git a/kernel/posix-timers.c b/kernel/posix-timers.c index d089d052c4a..495440779ce 100644 --- a/kernel/posix-timers.c +++ b/kernel/posix-timers.c @@ -242,6 +242,25 @@ static int posix_get_monotonic_raw(clockid_t which_clock, struct timespec *tp) return 0; } + +static int posix_get_realtime_coarse(clockid_t which_clock, struct timespec *tp) +{ + *tp = current_kernel_time(); + return 0; +} + +static int posix_get_monotonic_coarse(clockid_t which_clock, + struct timespec *tp) +{ + *tp = get_monotonic_coarse(); + return 0; +} + +int posix_get_coarse_res(const clockid_t which_clock, struct timespec *tp) +{ + *tp = ktime_to_timespec(KTIME_LOW_RES); + return 0; +} /* * Initialize everything, well, just everything in Posix clocks/timers ;) */ @@ -262,10 +281,26 @@ static __init int init_posix_timers(void) .timer_create = no_timer_create, .nsleep = no_nsleep, }; + struct k_clock clock_realtime_coarse = { + .clock_getres = posix_get_coarse_res, + .clock_get = posix_get_realtime_coarse, + .clock_set = do_posix_clock_nosettime, + .timer_create = no_timer_create, + .nsleep = no_nsleep, + }; + struct k_clock clock_monotonic_coarse = { + .clock_getres = posix_get_coarse_res, + .clock_get = posix_get_monotonic_coarse, + .clock_set = do_posix_clock_nosettime, + .timer_create = no_timer_create, + .nsleep = no_nsleep, + }; register_posix_clock(CLOCK_REALTIME, &clock_realtime); register_posix_clock(CLOCK_MONOTONIC, &clock_monotonic); register_posix_clock(CLOCK_MONOTONIC_RAW, &clock_monotonic_raw); + register_posix_clock(CLOCK_REALTIME_COARSE, &clock_realtime_coarse); + register_posix_clock(CLOCK_MONOTONIC_COARSE, &clock_monotonic_coarse); posix_timers_cache = kmem_cache_create("posix_timers_cache", sizeof (struct k_itimer), 0, SLAB_PANIC, diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c index 15e06defca5..03cbeb34d14 100644 --- a/kernel/time/timekeeping.c +++ b/kernel/time/timekeeping.c @@ -847,6 +847,10 @@ unsigned long get_seconds(void) } EXPORT_SYMBOL(get_seconds); +struct timespec __current_kernel_time(void) +{ + return xtime_cache; +} struct timespec current_kernel_time(void) { @@ -862,3 +866,20 @@ struct timespec current_kernel_time(void) return now; } EXPORT_SYMBOL(current_kernel_time); + +struct timespec get_monotonic_coarse(void) +{ + struct timespec now, mono; + unsigned long seq; + + do { + seq = read_seqbegin(&xtime_lock); + + now = xtime_cache; + mono = wall_to_monotonic; + } while (read_seqretry(&xtime_lock, seq)); + + set_normalized_timespec(&now, now.tv_sec + mono.tv_sec, + now.tv_nsec + mono.tv_nsec); + return now; +} -- cgit v1.2.3-70-g09d2 From 05ecd5a1f76c183cca381705b3adb7d77c9a0439 Mon Sep 17 00:00:00 2001 From: Pawel Moll Date: Mon, 24 Aug 2009 19:52:38 +0900 Subject: sh: Simplify "multi-evt" interrupt handling. This patch changes the way in which "multi-evt" interrups are handled. The intc_evt2irq_table and related intc_evt2irq() have been removed and the "redirecting" handler is installed for the coupled interrupts. Thanks to that the do_IRQ() function don't have to use another level of indirection for all the interrupts... Signed-off-by: Pawel Moll Signed-off-by: Stuart Menefy Signed-off-by: Paul Mundt --- arch/sh/kernel/irq.c | 2 +- drivers/sh/intc.c | 54 ++++++++++++++++--------------------------------- include/linux/sh_intc.h | 1 - 3 files changed, 18 insertions(+), 39 deletions(-) (limited to 'include') diff --git a/arch/sh/kernel/irq.c b/arch/sh/kernel/irq.c index 278c68c6048..d1053392e28 100644 --- a/arch/sh/kernel/irq.c +++ b/arch/sh/kernel/irq.c @@ -114,7 +114,7 @@ asmlinkage int do_IRQ(unsigned int irq, struct pt_regs *regs) #endif irq_enter(); - irq = irq_demux(intc_evt2irq(irq)); + irq = irq_demux(evt2irq(irq)); #ifdef CONFIG_IRQSTACKS curctx = (union irq_ctx *)current_thread_info(); diff --git a/drivers/sh/intc.c b/drivers/sh/intc.c index 4b1ca9d2835..a9174ec7285 100644 --- a/drivers/sh/intc.c +++ b/drivers/sh/intc.c @@ -663,16 +663,9 @@ static unsigned int __init save_reg(struct intc_desc_int *d, return 0; } -static unsigned char *intc_evt2irq_table; - -unsigned int intc_evt2irq(unsigned int vector) +static void intc_redirect_irq(unsigned int irq, struct irq_desc *desc) { - unsigned int irq = evt2irq(vector); - - if (intc_evt2irq_table && intc_evt2irq_table[irq]) - irq = intc_evt2irq_table[irq]; - - return irq; + generic_handle_irq((unsigned int)get_irq_data(irq)); } void __init register_intc_controller(struct intc_desc *desc) @@ -745,34 +738,6 @@ void __init register_intc_controller(struct intc_desc *desc) BUG_ON(k > 256); /* _INTC_ADDR_E() and _INTC_ADDR_D() are 8 bits */ - /* keep the first vector only if same enum is used multiple times */ - for (i = 0; i < desc->nr_vectors; i++) { - struct intc_vect *vect = desc->vectors + i; - int first_irq = evt2irq(vect->vect); - - if (!vect->enum_id) - continue; - - for (k = i + 1; k < desc->nr_vectors; k++) { - struct intc_vect *vect2 = desc->vectors + k; - - if (vect->enum_id != vect2->enum_id) - continue; - - vect2->enum_id = 0; - - if (!intc_evt2irq_table) - intc_evt2irq_table = kzalloc(NR_IRQS, GFP_NOWAIT); - - if (!intc_evt2irq_table) { - pr_warning("intc: cannot allocate evt2irq!\n"); - continue; - } - - intc_evt2irq_table[evt2irq(vect2->vect)] = first_irq; - } - } - /* register the vectors one by one */ for (i = 0; i < desc->nr_vectors; i++) { struct intc_vect *vect = desc->vectors + i; @@ -789,6 +754,21 @@ void __init register_intc_controller(struct intc_desc *desc) } intc_register_irq(desc, d, vect->enum_id, irq); + + for (k = i + 1; k < desc->nr_vectors; k++) { + struct intc_vect *vect2 = desc->vectors + k; + unsigned int irq2 = evt2irq(vect2->vect); + + if (vect->enum_id != vect2->enum_id) + continue; + + vect2->enum_id = 0; + + /* redirect this interrupts to the first one */ + set_irq_chip_and_handler_name(irq2, &d->chip, + intc_redirect_irq, "redirect"); + set_irq_data(irq2, (void *)irq); + } } } diff --git a/include/linux/sh_intc.h b/include/linux/sh_intc.h index eb1423a0078..68e212ff9dd 100644 --- a/include/linux/sh_intc.h +++ b/include/linux/sh_intc.h @@ -85,7 +85,6 @@ struct intc_desc symbol __initdata = { \ } #endif -unsigned int intc_evt2irq(unsigned int vector); void __init register_intc_controller(struct intc_desc *desc); int intc_set_priority(unsigned int irq, unsigned int prio); -- cgit v1.2.3-70-g09d2 From c9c97b8c75019814d8c007059bc827bb475be917 Mon Sep 17 00:00:00 2001 From: Dave Airlie Date: Thu, 27 Aug 2009 09:53:47 +1000 Subject: drm/ttm: consolidate cache flushing code in one place. This merges the TTM and drm cache flushing into one file in the drm core. Signed-off-by: Dave Airlie --- drivers/gpu/drm/drm_cache.c | 51 +++++++++++++++++++++++++++------ drivers/gpu/drm/ttm/ttm_tt.c | 67 ++------------------------------------------ include/drm/drm_cache.h | 38 +++++++++++++++++++++++++ 3 files changed, 82 insertions(+), 74 deletions(-) create mode 100644 include/drm/drm_cache.h (limited to 'include') diff --git a/drivers/gpu/drm/drm_cache.c b/drivers/gpu/drm/drm_cache.c index 0e994a0e46d..3a5575e638d 100644 --- a/drivers/gpu/drm/drm_cache.c +++ b/drivers/gpu/drm/drm_cache.c @@ -45,25 +45,58 @@ drm_clflush_page(struct page *page) clflush(page_virtual + i); kunmap_atomic(page_virtual, KM_USER0); } -#endif +static void drm_cache_flush_clflush(struct page *pages[], + unsigned long num_pages) +{ + unsigned long i; + + mb(); + for (i = 0; i < num_pages; i++) + drm_clflush_page(*pages++); + mb(); +} + +static void +drm_clflush_ipi_handler(void *null) +{ + wbinvd(); +} +#elif !defined(__powerpc__) +static void drm_cache_ipi_handler(void *dummy) +{ +} +#endif void drm_clflush_pages(struct page *pages[], unsigned long num_pages) { #if defined(CONFIG_X86) if (cpu_has_clflush) { - unsigned long i; - - mb(); - for (i = 0; i < num_pages; ++i) - drm_clflush_page(*pages++); - mb(); - + drm_cache_flush_clflush(pages, num_pages); return; } - wbinvd(); + if (on_each_cpu(drm_clflush_ipi_handler, NULL, 1) != 0) + printk(KERN_ERR "Timed out waiting for cache flush.\n"); + +#elif defined(__powerpc__) + unsigned long i; + for (i = 0; i < num_pages; i++) { + struct page *page = pages[i]; + void *page_virtual; + + if (unlikely(page == NULL)) + continue; + + page_virtual = kmap_atomic(page, KM_USER0); + flush_dcache_range((unsigned long)page_virtual, + (unsigned long)page_virtual + PAGE_SIZE); + kunmap_atomic(page_virtual, KM_USER0); + } +#else + if (on_each_cpu(drm_clflush_ipi_handler, NULL, 1) != 0) + printk(KERN_ERR "Timed out waiting for drm cache flush\n"); #endif } EXPORT_SYMBOL(drm_clflush_pages); diff --git a/drivers/gpu/drm/ttm/ttm_tt.c b/drivers/gpu/drm/ttm/ttm_tt.c index 42cca551976..a55ee1a56c1 100644 --- a/drivers/gpu/drm/ttm/ttm_tt.c +++ b/drivers/gpu/drm/ttm/ttm_tt.c @@ -34,76 +34,13 @@ #include #include #include +#include "drm_cache.h" #include "ttm/ttm_module.h" #include "ttm/ttm_bo_driver.h" #include "ttm/ttm_placement.h" static int ttm_tt_swapin(struct ttm_tt *ttm); -#if defined(CONFIG_X86) -static void ttm_tt_clflush_page(struct page *page) -{ - uint8_t *page_virtual; - unsigned int i; - - if (unlikely(page == NULL)) - return; - - page_virtual = kmap_atomic(page, KM_USER0); - - for (i = 0; i < PAGE_SIZE; i += boot_cpu_data.x86_clflush_size) - clflush(page_virtual + i); - - kunmap_atomic(page_virtual, KM_USER0); -} - -static void ttm_tt_cache_flush_clflush(struct page *pages[], - unsigned long num_pages) -{ - unsigned long i; - - mb(); - for (i = 0; i < num_pages; ++i) - ttm_tt_clflush_page(*pages++); - mb(); -} -#elif !defined(__powerpc__) -static void ttm_tt_ipi_handler(void *null) -{ - ; -} -#endif - -void ttm_tt_cache_flush(struct page *pages[], unsigned long num_pages) -{ - -#if defined(CONFIG_X86) - if (cpu_has_clflush) { - ttm_tt_cache_flush_clflush(pages, num_pages); - return; - } -#elif defined(__powerpc__) - unsigned long i; - - for (i = 0; i < num_pages; ++i) { - struct page *page = pages[i]; - void *page_virtual; - - if (unlikely(page == NULL)) - continue; - - page_virtual = kmap_atomic(page, KM_USER0); - flush_dcache_range((unsigned long) page_virtual, - (unsigned long) page_virtual + PAGE_SIZE); - kunmap_atomic(page_virtual, KM_USER0); - } -#else - if (on_each_cpu(ttm_tt_ipi_handler, NULL, 1) != 0) - printk(KERN_ERR TTM_PFX - "Timed out waiting for drm cache flush.\n"); -#endif -} - /** * Allocates storage for pointers to the pages that back the ttm. * @@ -302,7 +239,7 @@ static int ttm_tt_set_caching(struct ttm_tt *ttm, } if (ttm->caching_state == tt_cached) - ttm_tt_cache_flush(ttm->pages, ttm->num_pages); + drm_clflush_pages(ttm->pages, ttm->num_pages); for (i = 0; i < ttm->num_pages; ++i) { cur_page = ttm->pages[i]; diff --git a/include/drm/drm_cache.h b/include/drm/drm_cache.h new file mode 100644 index 00000000000..7bfb063029d --- /dev/null +++ b/include/drm/drm_cache.h @@ -0,0 +1,38 @@ +/************************************************************************** + * + * Copyright 2009 Red Hat Inc. + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sub license, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial portions + * of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, + * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR + * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE + * USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * + **************************************************************************/ +/* + * Authors: + * Dave Airlie + */ + +#ifndef _DRM_CACHE_H_ +#define _DRM_CACHE_H_ + +void drm_clflush_pages(struct page *pages[], unsigned long num_pages); + +#endif -- cgit v1.2.3-70-g09d2 From a1a2d1d32250f6fcc317419e9dfb4a5a6946d2e6 Mon Sep 17 00:00:00 2001 From: Pekka Paalanen Date: Sun, 23 Aug 2009 12:40:55 +0300 Subject: drm: GEM handles are u32, not int Several functions in the GEM kernel API used int as handle type, but user API has it __u32 which is also the intended type. Replace int with u32. Signed-off-by: Pekka Paalanen Signed-off-by: Dave Airlie --- drivers/gpu/drm/drm_gem.c | 11 +++++------ drivers/gpu/drm/i915/i915_gem.c | 3 ++- include/drm/drmP.h | 4 ++-- 3 files changed, 9 insertions(+), 9 deletions(-) (limited to 'include') diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c index ffe8f4394d5..230c9ffdd5e 100644 --- a/drivers/gpu/drm/drm_gem.c +++ b/drivers/gpu/drm/drm_gem.c @@ -164,7 +164,7 @@ EXPORT_SYMBOL(drm_gem_object_alloc); * Removes the mapping from handle to filp for this object. */ static int -drm_gem_handle_delete(struct drm_file *filp, int handle) +drm_gem_handle_delete(struct drm_file *filp, u32 handle) { struct drm_device *dev; struct drm_gem_object *obj; @@ -207,7 +207,7 @@ drm_gem_handle_delete(struct drm_file *filp, int handle) int drm_gem_handle_create(struct drm_file *file_priv, struct drm_gem_object *obj, - int *handlep) + u32 *handlep) { int ret; @@ -221,7 +221,7 @@ again: /* do the allocation under our spinlock */ spin_lock(&file_priv->table_lock); - ret = idr_get_new_above(&file_priv->object_idr, obj, 1, handlep); + ret = idr_get_new_above(&file_priv->object_idr, obj, 1, (int *)handlep); spin_unlock(&file_priv->table_lock); if (ret == -EAGAIN) goto again; @@ -237,7 +237,7 @@ EXPORT_SYMBOL(drm_gem_handle_create); /** Returns a reference to the object named by the handle. */ struct drm_gem_object * drm_gem_object_lookup(struct drm_device *dev, struct drm_file *filp, - int handle) + u32 handle) { struct drm_gem_object *obj; @@ -344,7 +344,7 @@ drm_gem_open_ioctl(struct drm_device *dev, void *data, struct drm_gem_open *args = data; struct drm_gem_object *obj; int ret; - int handle; + u32 handle; if (!(dev->driver->driver_features & DRIVER_GEM)) return -ENODEV; @@ -539,7 +539,6 @@ int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma) vma->vm_flags |= VM_RESERVED | VM_IO | VM_PFNMAP | VM_DONTEXPAND; vma->vm_ops = obj->dev->driver->gem_vm_ops; vma->vm_private_data = map->handle; - /* FIXME: use pgprot_writecombine when available */ vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot); /* Take a ref for this mapping of the object, so that the fault diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 140bee142fc..0e6c9cca897 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -111,7 +111,8 @@ i915_gem_create_ioctl(struct drm_device *dev, void *data, { struct drm_i915_gem_create *args = data; struct drm_gem_object *obj; - int handle, ret; + int ret; + u32 handle; args->size = roundup(args->size, PAGE_SIZE); diff --git a/include/drm/drmP.h b/include/drm/drmP.h index e0f1c1fee58..eeefb6369e1 100644 --- a/include/drm/drmP.h +++ b/include/drm/drmP.h @@ -1441,7 +1441,7 @@ drm_gem_object_unreference(struct drm_gem_object *obj) int drm_gem_handle_create(struct drm_file *file_priv, struct drm_gem_object *obj, - int *handlep); + u32 *handlep); static inline void drm_gem_object_handle_reference(struct drm_gem_object *obj) @@ -1467,7 +1467,7 @@ drm_gem_object_handle_unreference(struct drm_gem_object *obj) struct drm_gem_object *drm_gem_object_lookup(struct drm_device *dev, struct drm_file *filp, - int handle); + u32 handle); int drm_gem_close_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); int drm_gem_flink_ioctl(struct drm_device *dev, void *data, -- cgit v1.2.3-70-g09d2 From f8d80cdf40fe4d2393159012b38ce9f85a488686 Mon Sep 17 00:00:00 2001 From: Bob Moore Date: Tue, 2 Jun 2009 13:28:13 +0800 Subject: ACPICA: Remove duplicate extern declarations for public globals Some were defined twice, causes a warning with gcc -Wredundant-decls. Signed-off-by: Bob Moore Signed-off-by: Lin Ming Signed-off-by: Len Brown --- drivers/acpi/acpica/acglobal.h | 33 ++++++++++++++++++--------------- include/acpi/acpixf.h | 1 + 2 files changed, 19 insertions(+), 15 deletions(-) (limited to 'include') diff --git a/drivers/acpi/acpica/acglobal.h b/drivers/acpi/acpica/acglobal.h index 3d87362d17e..0b73b31c1b5 100644 --- a/drivers/acpi/acpica/acglobal.h +++ b/drivers/acpi/acpica/acglobal.h @@ -58,6 +58,10 @@ #define ACPI_INIT_GLOBAL(a,b) a #endif +#ifdef DEFINE_ACPI_GLOBALS + +/* Public globals, available from outside ACPICA subsystem */ + /***************************************************************************** * * Runtime configuration (static defaults that can be overriden at runtime) @@ -78,7 +82,7 @@ * 5) Allow unresolved references (invalid target name) in package objects * 6) Enable warning messages for behavior that is not ACPI spec compliant */ -ACPI_EXTERN u8 ACPI_INIT_GLOBAL(acpi_gbl_enable_interpreter_slack, FALSE); +u8 ACPI_INIT_GLOBAL(acpi_gbl_enable_interpreter_slack, FALSE); /* * Automatically serialize ALL control methods? Default is FALSE, meaning @@ -86,27 +90,36 @@ ACPI_EXTERN u8 ACPI_INIT_GLOBAL(acpi_gbl_enable_interpreter_slack, FALSE); * Only change this if the ASL code is poorly written and cannot handle * reentrancy even though methods are marked "NotSerialized". */ -ACPI_EXTERN u8 ACPI_INIT_GLOBAL(acpi_gbl_all_methods_serialized, FALSE); +u8 ACPI_INIT_GLOBAL(acpi_gbl_all_methods_serialized, FALSE); /* * Create the predefined _OSI method in the namespace? Default is TRUE * because ACPI CA is fully compatible with other ACPI implementations. * Changing this will revert ACPI CA (and machine ASL) to pre-OSI behavior. */ -ACPI_EXTERN u8 ACPI_INIT_GLOBAL(acpi_gbl_create_osi_method, TRUE); +u8 ACPI_INIT_GLOBAL(acpi_gbl_create_osi_method, TRUE); /* * Disable wakeup GPEs during runtime? Default is TRUE because WAKE and * RUNTIME GPEs should never be shared, and WAKE GPEs should typically only * be enabled just before going to sleep. */ -ACPI_EXTERN u8 ACPI_INIT_GLOBAL(acpi_gbl_leave_wake_gpes_disabled, TRUE); +u8 ACPI_INIT_GLOBAL(acpi_gbl_leave_wake_gpes_disabled, TRUE); /* * Optionally use default values for the ACPI register widths. Set this to * TRUE to use the defaults, if an FADT contains incorrect widths/lengths. */ -ACPI_EXTERN u8 ACPI_INIT_GLOBAL(acpi_gbl_use_default_register_widths, TRUE); +u8 ACPI_INIT_GLOBAL(acpi_gbl_use_default_register_widths, TRUE); + +/* acpi_gbl_FADT is a local copy of the FADT, converted to a common format. */ + +struct acpi_table_fadt acpi_gbl_FADT; +u32 acpi_current_gpe_count; +u32 acpi_gbl_trace_flags; +acpi_name acpi_gbl_trace_method_name; + +#endif /***************************************************************************** * @@ -114,11 +127,6 @@ ACPI_EXTERN u8 ACPI_INIT_GLOBAL(acpi_gbl_use_default_register_widths, TRUE); * ****************************************************************************/ -/* Runtime configuration of debug print levels */ - -extern u32 acpi_dbg_level; -extern u32 acpi_dbg_layer; - /* Procedure nesting level for debug output */ extern u32 acpi_gbl_nesting_level; @@ -127,10 +135,8 @@ extern u32 acpi_gbl_nesting_level; ACPI_EXTERN u32 acpi_gbl_original_dbg_level; ACPI_EXTERN u32 acpi_gbl_original_dbg_layer; -ACPI_EXTERN acpi_name acpi_gbl_trace_method_name; ACPI_EXTERN u32 acpi_gbl_trace_dbg_level; ACPI_EXTERN u32 acpi_gbl_trace_dbg_layer; -ACPI_EXTERN u32 acpi_gbl_trace_flags; /***************************************************************************** * @@ -142,10 +148,8 @@ ACPI_EXTERN u32 acpi_gbl_trace_flags; * acpi_gbl_root_table_list is the master list of ACPI tables found in the * RSDT/XSDT. * - * acpi_gbl_FADT is a local copy of the FADT, converted to a common format. */ ACPI_EXTERN struct acpi_internal_rsdt acpi_gbl_root_table_list; -ACPI_EXTERN struct acpi_table_fadt acpi_gbl_FADT; ACPI_EXTERN struct acpi_table_facs *acpi_gbl_FACS; /* These addresses are calculated from the FADT Event Block addresses */ @@ -340,7 +344,6 @@ ACPI_EXTERN struct acpi_fixed_event_handler ACPI_EXTERN struct acpi_gpe_xrupt_info *acpi_gbl_gpe_xrupt_list_head; ACPI_EXTERN struct acpi_gpe_block_info *acpi_gbl_gpe_fadt_blocks[ACPI_MAX_GPE_BLOCKS]; -ACPI_EXTERN u32 acpi_current_gpe_count; /***************************************************************************** * diff --git a/include/acpi/acpixf.h b/include/acpi/acpixf.h index 82ec6a3c050..2aecaa5cc06 100644 --- a/include/acpi/acpixf.h +++ b/include/acpi/acpixf.h @@ -64,6 +64,7 @@ extern u8 acpi_gbl_enable_interpreter_slack; extern u8 acpi_gbl_all_methods_serialized; extern u8 acpi_gbl_create_osi_method; extern u8 acpi_gbl_leave_wake_gpes_disabled; +extern u8 acpi_gbl_use_default_register_widths; extern acpi_name acpi_gbl_trace_method_name; extern u32 acpi_gbl_trace_flags; -- cgit v1.2.3-70-g09d2 From c6b5774caafa4c12b6019366e2fdaaff117e95a4 Mon Sep 17 00:00:00 2001 From: Bob Moore Date: Wed, 24 Jun 2009 09:44:06 +0800 Subject: ACPICA: Add 64-bit support to acpi_read and acpi_write Needed by drivers for new ACPi tables. Internal versions of these functions still use 32-bit max transfers, in order to minimize disruption and stack use for the standard ACPI registers (FADT-based). Signed-off-by: Bob Moore Signed-off-by: Lin Ming Signed-off-by: Len Brown --- drivers/acpi/acpica/achware.h | 8 ++ drivers/acpi/acpica/evgpe.c | 8 +- drivers/acpi/acpica/evgpeblk.c | 4 +- drivers/acpi/acpica/hwgpe.c | 34 +++---- drivers/acpi/acpica/hwregs.c | 206 ++++++++++++++++++++++++++++++++++++++--- drivers/acpi/acpica/hwtimer.c | 2 +- drivers/acpi/acpica/hwxface.c | 166 +++++++++++++++++++-------------- include/acpi/acpixf.h | 4 +- 8 files changed, 327 insertions(+), 105 deletions(-) (limited to 'include') diff --git a/drivers/acpi/acpica/achware.h b/drivers/acpi/acpica/achware.h index 4afa3d8e0ef..36192f142fb 100644 --- a/drivers/acpi/acpica/achware.h +++ b/drivers/acpi/acpica/achware.h @@ -62,6 +62,14 @@ u32 acpi_hw_get_mode(void); /* * hwregs - ACPI Register I/O */ +acpi_status +acpi_hw_validate_register(struct acpi_generic_address *reg, + u8 max_bit_width, u64 *address); + +acpi_status acpi_hw_read(u32 *value, struct acpi_generic_address *reg); + +acpi_status acpi_hw_write(u32 value, struct acpi_generic_address *reg); + struct acpi_bit_register_info *acpi_hw_get_bit_register_info(u32 register_id); acpi_status acpi_hw_write_pm1_control(u32 pm1a_control, u32 pm1b_control); diff --git a/drivers/acpi/acpica/evgpe.c b/drivers/acpi/acpica/evgpe.c index b9d8ee69ca6..afacf4416c7 100644 --- a/drivers/acpi/acpica/evgpe.c +++ b/drivers/acpi/acpica/evgpe.c @@ -424,8 +424,8 @@ u32 acpi_ev_gpe_detect(struct acpi_gpe_xrupt_info * gpe_xrupt_list) /* Read the Status Register */ status = - acpi_read(&status_reg, - &gpe_register_info->status_address); + acpi_hw_read(&status_reg, + &gpe_register_info->status_address); if (ACPI_FAILURE(status)) { goto unlock_and_exit; } @@ -433,8 +433,8 @@ u32 acpi_ev_gpe_detect(struct acpi_gpe_xrupt_info * gpe_xrupt_list) /* Read the Enable Register */ status = - acpi_read(&enable_reg, - &gpe_register_info->enable_address); + acpi_hw_read(&enable_reg, + &gpe_register_info->enable_address); if (ACPI_FAILURE(status)) { goto unlock_and_exit; } diff --git a/drivers/acpi/acpica/evgpeblk.c b/drivers/acpi/acpica/evgpeblk.c index 7b346363942..a60aaa7635f 100644 --- a/drivers/acpi/acpica/evgpeblk.c +++ b/drivers/acpi/acpica/evgpeblk.c @@ -843,14 +843,14 @@ acpi_ev_create_gpe_info_blocks(struct acpi_gpe_block_info *gpe_block) /* Disable all GPEs within this register */ - status = acpi_write(0x00, &this_register->enable_address); + status = acpi_hw_write(0x00, &this_register->enable_address); if (ACPI_FAILURE(status)) { goto error_exit; } /* Clear any pending GPE events within this register */ - status = acpi_write(0xFF, &this_register->status_address); + status = acpi_hw_write(0xFF, &this_register->status_address); if (ACPI_FAILURE(status)) { goto error_exit; } diff --git a/drivers/acpi/acpica/hwgpe.c b/drivers/acpi/acpica/hwgpe.c index d3b7e37c9ee..c28c41b3180 100644 --- a/drivers/acpi/acpica/hwgpe.c +++ b/drivers/acpi/acpica/hwgpe.c @@ -82,7 +82,7 @@ acpi_status acpi_hw_low_disable_gpe(struct acpi_gpe_event_info *gpe_event_info) /* Get current value of the enable register that contains this GPE */ - status = acpi_read(&enable_mask, &gpe_register_info->enable_address); + status = acpi_hw_read(&enable_mask, &gpe_register_info->enable_address); if (ACPI_FAILURE(status)) { return (status); } @@ -95,7 +95,7 @@ acpi_status acpi_hw_low_disable_gpe(struct acpi_gpe_event_info *gpe_event_info) /* Write the updated enable mask */ - status = acpi_write(enable_mask, &gpe_register_info->enable_address); + status = acpi_hw_write(enable_mask, &gpe_register_info->enable_address); return (status); } @@ -130,8 +130,8 @@ acpi_hw_write_gpe_enable_reg(struct acpi_gpe_event_info * gpe_event_info) /* Write the entire GPE (runtime) enable register */ - status = acpi_write(gpe_register_info->enable_for_run, - &gpe_register_info->enable_address); + status = acpi_hw_write(gpe_register_info->enable_for_run, + &gpe_register_info->enable_address); return (status); } @@ -163,8 +163,8 @@ acpi_status acpi_hw_clear_gpe(struct acpi_gpe_event_info * gpe_event_info) * Write a one to the appropriate bit in the status register to * clear this GPE. */ - status = acpi_write(register_bit, - &gpe_event_info->register_info->status_address); + status = acpi_hw_write(register_bit, + &gpe_event_info->register_info->status_address); return (status); } @@ -222,7 +222,7 @@ acpi_hw_get_gpe_status(struct acpi_gpe_event_info * gpe_event_info, /* GPE currently active (status bit == 1)? */ - status = acpi_read(&in_byte, &gpe_register_info->status_address); + status = acpi_hw_read(&in_byte, &gpe_register_info->status_address); if (ACPI_FAILURE(status)) { goto unlock_and_exit; } @@ -266,8 +266,8 @@ acpi_hw_disable_gpe_block(struct acpi_gpe_xrupt_info *gpe_xrupt_info, /* Disable all GPEs in this register */ status = - acpi_write(0x00, - &gpe_block->register_info[i].enable_address); + acpi_hw_write(0x00, + &gpe_block->register_info[i].enable_address); if (ACPI_FAILURE(status)) { return (status); } @@ -303,8 +303,8 @@ acpi_hw_clear_gpe_block(struct acpi_gpe_xrupt_info *gpe_xrupt_info, /* Clear status on all GPEs in this register */ status = - acpi_write(0xFF, - &gpe_block->register_info[i].status_address); + acpi_hw_write(0xFF, + &gpe_block->register_info[i].status_address); if (ACPI_FAILURE(status)) { return (status); } @@ -345,9 +345,9 @@ acpi_hw_enable_runtime_gpe_block(struct acpi_gpe_xrupt_info *gpe_xrupt_info, /* Enable all "runtime" GPEs in this register */ - status = acpi_write(gpe_block->register_info[i].enable_for_run, - &gpe_block->register_info[i]. - enable_address); + status = + acpi_hw_write(gpe_block->register_info[i].enable_for_run, + &gpe_block->register_info[i].enable_address); if (ACPI_FAILURE(status)) { return (status); } @@ -387,9 +387,9 @@ acpi_hw_enable_wakeup_gpe_block(struct acpi_gpe_xrupt_info *gpe_xrupt_info, /* Enable all "wake" GPEs in this register */ - status = acpi_write(gpe_block->register_info[i].enable_for_wake, - &gpe_block->register_info[i]. - enable_address); + status = + acpi_hw_write(gpe_block->register_info[i].enable_for_wake, + &gpe_block->register_info[i].enable_address); if (ACPI_FAILURE(status)) { return (status); } diff --git a/drivers/acpi/acpica/hwregs.c b/drivers/acpi/acpica/hwregs.c index 23d5505cb1f..15c9ed2be85 100644 --- a/drivers/acpi/acpica/hwregs.c +++ b/drivers/acpi/acpica/hwregs.c @@ -62,6 +62,184 @@ acpi_hw_write_multiple(u32 value, struct acpi_generic_address *register_a, struct acpi_generic_address *register_b); +/****************************************************************************** + * + * FUNCTION: acpi_hw_validate_register + * + * PARAMETERS: Reg - GAS register structure + * max_bit_width - Max bit_width supported (32 or 64) + * Address - Pointer to where the gas->address + * is returned + * + * RETURN: Status + * + * DESCRIPTION: Validate the contents of a GAS register. Checks the GAS + * pointer, Address, space_id, bit_width, and bit_offset. + * + ******************************************************************************/ + +acpi_status +acpi_hw_validate_register(struct acpi_generic_address *reg, + u8 max_bit_width, u64 *address) +{ + + /* Must have a valid pointer to a GAS structure */ + + if (!reg) { + return (AE_BAD_PARAMETER); + } + + /* + * Copy the target address. This handles possible alignment issues. + * Address must not be null. A null address also indicates an optional + * ACPI register that is not supported, so no error message. + */ + ACPI_MOVE_64_TO_64(address, ®->address); + if (!(*address)) { + return (AE_BAD_ADDRESS); + } + + /* Validate the space_iD */ + + if ((reg->space_id != ACPI_ADR_SPACE_SYSTEM_MEMORY) && + (reg->space_id != ACPI_ADR_SPACE_SYSTEM_IO)) { + ACPI_ERROR((AE_INFO, + "Unsupported address space: 0x%X", reg->space_id)); + return (AE_SUPPORT); + } + + /* Validate the bit_width */ + + if ((reg->bit_width != 8) && + (reg->bit_width != 16) && + (reg->bit_width != 32) && (reg->bit_width != max_bit_width)) { + ACPI_ERROR((AE_INFO, + "Unsupported register bit width: 0x%X", + reg->bit_width)); + return (AE_SUPPORT); + } + + /* Validate the bit_offset. Just a warning for now. */ + + if (reg->bit_offset != 0) { + ACPI_WARNING((AE_INFO, + "Unsupported register bit offset: 0x%X", + reg->bit_offset)); + } + + return (AE_OK); +} + +/****************************************************************************** + * + * FUNCTION: acpi_hw_read + * + * PARAMETERS: Value - Where the value is returned + * Reg - GAS register structure + * + * RETURN: Status + * + * DESCRIPTION: Read from either memory or IO space. This is a 32-bit max + * version of acpi_read, used internally since the overhead of + * 64-bit values is not needed. + * + * LIMITATIONS: + * bit_width must be exactly 8, 16, or 32. + * space_iD must be system_memory or system_iO. + * bit_offset and access_width are currently ignored, as there has + * not been a need to implement these. + * + ******************************************************************************/ + +acpi_status acpi_hw_read(u32 *value, struct acpi_generic_address *reg) +{ + u64 address; + acpi_status status; + + ACPI_FUNCTION_NAME(hw_read); + + /* Validate contents of the GAS register */ + + status = acpi_hw_validate_register(reg, 32, &address); + if (ACPI_FAILURE(status)) { + return (status); + } + + /* Initialize entire 32-bit return value to zero */ + + *value = 0; + + /* + * Two address spaces supported: Memory or IO. PCI_Config is + * not supported here because the GAS structure is insufficient + */ + if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) { + status = acpi_os_read_memory((acpi_physical_address) + address, value, reg->bit_width); + } else { /* ACPI_ADR_SPACE_SYSTEM_IO, validated earlier */ + + status = acpi_hw_read_port((acpi_io_address) + address, value, reg->bit_width); + } + + ACPI_DEBUG_PRINT((ACPI_DB_IO, + "Read: %8.8X width %2d from %8.8X%8.8X (%s)\n", + *value, reg->bit_width, ACPI_FORMAT_UINT64(address), + acpi_ut_get_region_name(reg->space_id))); + + return (status); +} + +/****************************************************************************** + * + * FUNCTION: acpi_hw_write + * + * PARAMETERS: Value - Value to be written + * Reg - GAS register structure + * + * RETURN: Status + * + * DESCRIPTION: Write to either memory or IO space. This is a 32-bit max + * version of acpi_write, used internally since the overhead of + * 64-bit values is not needed. + * + ******************************************************************************/ + +acpi_status acpi_hw_write(u32 value, struct acpi_generic_address *reg) +{ + u64 address; + acpi_status status; + + ACPI_FUNCTION_NAME(hw_write); + + /* Validate contents of the GAS register */ + + status = acpi_hw_validate_register(reg, 32, &address); + if (ACPI_FAILURE(status)) { + return (status); + } + + /* + * Two address spaces supported: Memory or IO. PCI_Config is + * not supported here because the GAS structure is insufficient + */ + if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) { + status = acpi_os_write_memory((acpi_physical_address) + address, value, reg->bit_width); + } else { /* ACPI_ADR_SPACE_SYSTEM_IO, validated earlier */ + + status = acpi_hw_write_port((acpi_io_address) + address, value, reg->bit_width); + } + + ACPI_DEBUG_PRINT((ACPI_DB_IO, + "Wrote: %8.8X width %2d to %8.8X%8.8X (%s)\n", + value, reg->bit_width, ACPI_FORMAT_UINT64(address), + acpi_ut_get_region_name(reg->space_id))); + + return (status); +} + /******************************************************************************* * * FUNCTION: acpi_hw_clear_acpi_status @@ -152,15 +330,16 @@ acpi_status acpi_hw_write_pm1_control(u32 pm1a_control, u32 pm1b_control) ACPI_FUNCTION_TRACE(hw_write_pm1_control); - status = acpi_write(pm1a_control, &acpi_gbl_FADT.xpm1a_control_block); + status = + acpi_hw_write(pm1a_control, &acpi_gbl_FADT.xpm1a_control_block); if (ACPI_FAILURE(status)) { return_ACPI_STATUS(status); } if (acpi_gbl_FADT.xpm1b_control_block.address) { status = - acpi_write(pm1b_control, - &acpi_gbl_FADT.xpm1b_control_block); + acpi_hw_write(pm1b_control, + &acpi_gbl_FADT.xpm1b_control_block); } return_ACPI_STATUS(status); } @@ -218,12 +397,13 @@ acpi_hw_register_read(u32 register_id, u32 * return_value) case ACPI_REGISTER_PM2_CONTROL: /* 8-bit access */ - status = acpi_read(&value, &acpi_gbl_FADT.xpm2_control_block); + status = + acpi_hw_read(&value, &acpi_gbl_FADT.xpm2_control_block); break; case ACPI_REGISTER_PM_TIMER: /* 32-bit access */ - status = acpi_read(&value, &acpi_gbl_FADT.xpm_timer_block); + status = acpi_hw_read(&value, &acpi_gbl_FADT.xpm_timer_block); break; case ACPI_REGISTER_SMI_COMMAND_BLOCK: /* 8-bit access */ @@ -340,7 +520,8 @@ acpi_status acpi_hw_register_write(u32 register_id, u32 value) * as per the ACPI spec. */ status = - acpi_read(&read_value, &acpi_gbl_FADT.xpm2_control_block); + acpi_hw_read(&read_value, + &acpi_gbl_FADT.xpm2_control_block); if (ACPI_FAILURE(status)) { goto exit; } @@ -350,12 +531,13 @@ acpi_status acpi_hw_register_write(u32 register_id, u32 value) ACPI_INSERT_BITS(value, ACPI_PM2_CONTROL_PRESERVED_BITS, read_value); - status = acpi_write(value, &acpi_gbl_FADT.xpm2_control_block); + status = + acpi_hw_write(value, &acpi_gbl_FADT.xpm2_control_block); break; case ACPI_REGISTER_PM_TIMER: /* 32-bit access */ - status = acpi_write(value, &acpi_gbl_FADT.xpm_timer_block); + status = acpi_hw_write(value, &acpi_gbl_FADT.xpm_timer_block); break; case ACPI_REGISTER_SMI_COMMAND_BLOCK: /* 8-bit access */ @@ -401,7 +583,7 @@ acpi_hw_read_multiple(u32 *value, /* The first register is always required */ - status = acpi_read(&value_a, register_a); + status = acpi_hw_read(&value_a, register_a); if (ACPI_FAILURE(status)) { return (status); } @@ -409,7 +591,7 @@ acpi_hw_read_multiple(u32 *value, /* Second register is optional */ if (register_b->address) { - status = acpi_read(&value_b, register_b); + status = acpi_hw_read(&value_b, register_b); if (ACPI_FAILURE(status)) { return (status); } @@ -452,7 +634,7 @@ acpi_hw_write_multiple(u32 value, /* The first register is always required */ - status = acpi_write(value, register_a); + status = acpi_hw_write(value, register_a); if (ACPI_FAILURE(status)) { return (status); } @@ -470,7 +652,7 @@ acpi_hw_write_multiple(u32 value, * and writes have no side effects" */ if (register_b->address) { - status = acpi_write(value, register_b); + status = acpi_hw_write(value, register_b); } return (status); diff --git a/drivers/acpi/acpica/hwtimer.c b/drivers/acpi/acpica/hwtimer.c index b7f522c8f02..6b282e85d03 100644 --- a/drivers/acpi/acpica/hwtimer.c +++ b/drivers/acpi/acpica/hwtimer.c @@ -100,7 +100,7 @@ acpi_status acpi_get_timer(u32 * ticks) } status = - acpi_hw_low_level_read(32, ticks, &acpi_gbl_FADT.xpm_timer_block); + acpi_hw_read(ticks, &acpi_gbl_FADT.xpm_timer_block); return_ACPI_STATUS(status); } diff --git a/drivers/acpi/acpica/hwxface.c b/drivers/acpi/acpica/hwxface.c index 9829979f2bd..4ead85f2921 100644 --- a/drivers/acpi/acpica/hwxface.c +++ b/drivers/acpi/acpica/hwxface.c @@ -80,7 +80,7 @@ acpi_status acpi_reset(void) /* Write the reset value to the reset register */ - status = acpi_write(acpi_gbl_FADT.reset_value, reset_reg); + status = acpi_hw_write(acpi_gbl_FADT.reset_value, reset_reg); return_ACPI_STATUS(status); } @@ -97,67 +97,92 @@ ACPI_EXPORT_SYMBOL(acpi_reset) * * DESCRIPTION: Read from either memory or IO space. * + * LIMITATIONS: + * bit_width must be exactly 8, 16, 32, or 64. + * space_iD must be system_memory or system_iO. + * bit_offset and access_width are currently ignored, as there has + * not been a need to implement these. + * ******************************************************************************/ -acpi_status acpi_read(u32 *value, struct acpi_generic_address *reg) +acpi_status acpi_read(u64 *return_value, struct acpi_generic_address *reg) { + u32 value; u32 width; u64 address; acpi_status status; ACPI_FUNCTION_NAME(acpi_read); - /* - * Must have a valid pointer to a GAS structure, and a non-zero address - * within. - */ - if (!reg) { + if (!return_value) { return (AE_BAD_PARAMETER); } - /* Get a local copy of the address. Handles possible alignment issues */ + /* Validate contents of the GAS register. Allow 64-bit transfers */ - ACPI_MOVE_64_TO_64(&address, ®->address); - if (!address) { - return (AE_BAD_ADDRESS); + status = acpi_hw_validate_register(reg, 64, &address); + if (ACPI_FAILURE(status)) { + return (status); } - /* Supported widths are 8/16/32 */ - width = reg->bit_width; - if ((width != 8) && (width != 16) && (width != 32)) { - return (AE_SUPPORT); + if (width == 64) { + width = 32; /* Break into two 32-bit transfers */ } - /* Initialize entire 32-bit return value to zero */ + /* Initialize entire 64-bit return value to zero */ - *value = 0; + *return_value = 0; + value = 0; /* * Two address spaces supported: Memory or IO. PCI_Config is * not supported here because the GAS structure is insufficient */ - switch (reg->space_id) { - case ACPI_ADR_SPACE_SYSTEM_MEMORY: + if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) { + status = acpi_os_read_memory((acpi_physical_address) + address, &value, width); + if (ACPI_FAILURE(status)) { + return (status); + } + *return_value = value; + + if (reg->bit_width == 64) { - status = acpi_os_read_memory((acpi_physical_address) address, - value, width); - break; + /* Read the top 32 bits */ - case ACPI_ADR_SPACE_SYSTEM_IO: + status = acpi_os_read_memory((acpi_physical_address) + (address + 4), &value, 32); + if (ACPI_FAILURE(status)) { + return (status); + } + *return_value |= ((u64)value << 32); + } + } else { /* ACPI_ADR_SPACE_SYSTEM_IO, validated earlier */ - status = - acpi_hw_read_port((acpi_io_address) address, value, width); - break; + status = acpi_hw_read_port((acpi_io_address) + address, &value, width); + if (ACPI_FAILURE(status)) { + return (status); + } + *return_value = value; - default: - ACPI_ERROR((AE_INFO, - "Unsupported address space: %X", reg->space_id)); - return (AE_BAD_PARAMETER); + if (reg->bit_width == 64) { + + /* Read the top 32 bits */ + + status = acpi_hw_read_port((acpi_io_address) + (address + 4), &value, 32); + if (ACPI_FAILURE(status)) { + return (status); + } + *return_value |= ((u64)value << 32); + } } ACPI_DEBUG_PRINT((ACPI_DB_IO, - "Read: %8.8X width %2d from %8.8X%8.8X (%s)\n", - *value, width, ACPI_FORMAT_UINT64(address), + "Read: %8.8X%8.8X width %2d from %8.8X%8.8X (%s)\n", + ACPI_FORMAT_UINT64(*return_value), reg->bit_width, + ACPI_FORMAT_UINT64(address), acpi_ut_get_region_name(reg->space_id))); return (status); @@ -169,7 +194,7 @@ ACPI_EXPORT_SYMBOL(acpi_read) * * FUNCTION: acpi_write * - * PARAMETERS: Value - To be written + * PARAMETERS: Value - Value to be written * Reg - GAS register structure * * RETURN: Status @@ -177,7 +202,7 @@ ACPI_EXPORT_SYMBOL(acpi_read) * DESCRIPTION: Write to either memory or IO space. * ******************************************************************************/ -acpi_status acpi_write(u32 value, struct acpi_generic_address *reg) +acpi_status acpi_write(u64 value, struct acpi_generic_address *reg) { u32 width; u64 address; @@ -185,54 +210,61 @@ acpi_status acpi_write(u32 value, struct acpi_generic_address *reg) ACPI_FUNCTION_NAME(acpi_write); - /* - * Must have a valid pointer to a GAS structure, and a non-zero address - * within. - */ - if (!reg) { - return (AE_BAD_PARAMETER); - } + /* Validate contents of the GAS register. Allow 64-bit transfers */ - /* Get a local copy of the address. Handles possible alignment issues */ - - ACPI_MOVE_64_TO_64(&address, ®->address); - if (!address) { - return (AE_BAD_ADDRESS); + status = acpi_hw_validate_register(reg, 64, &address); + if (ACPI_FAILURE(status)) { + return (status); } - /* Supported widths are 8/16/32 */ - width = reg->bit_width; - if ((width != 8) && (width != 16) && (width != 32)) { - return (AE_SUPPORT); + if (width == 64) { + width = 32; /* Break into two 32-bit transfers */ } /* - * Two address spaces supported: Memory or IO. - * PCI_Config is not supported here because the GAS struct is insufficient + * Two address spaces supported: Memory or IO. PCI_Config is + * not supported here because the GAS structure is insufficient */ - switch (reg->space_id) { - case ACPI_ADR_SPACE_SYSTEM_MEMORY: - - status = acpi_os_write_memory((acpi_physical_address) address, - value, width); - break; + if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) { + status = acpi_os_write_memory((acpi_physical_address) + address, ACPI_LODWORD(value), + width); + if (ACPI_FAILURE(status)) { + return (status); + } - case ACPI_ADR_SPACE_SYSTEM_IO: + if (reg->bit_width == 64) { + status = acpi_os_write_memory((acpi_physical_address) + (address + 4), + ACPI_HIDWORD(value), 32); + if (ACPI_FAILURE(status)) { + return (status); + } + } + } else { /* ACPI_ADR_SPACE_SYSTEM_IO, validated earlier */ - status = acpi_hw_write_port((acpi_io_address) address, value, + status = acpi_hw_write_port((acpi_io_address) + address, ACPI_LODWORD(value), width); - break; + if (ACPI_FAILURE(status)) { + return (status); + } - default: - ACPI_ERROR((AE_INFO, - "Unsupported address space: %X", reg->space_id)); - return (AE_BAD_PARAMETER); + if (reg->bit_width == 64) { + status = acpi_hw_write_port((acpi_io_address) + (address + 4), + ACPI_HIDWORD(value), 32); + if (ACPI_FAILURE(status)) { + return (status); + } + } } ACPI_DEBUG_PRINT((ACPI_DB_IO, - "Wrote: %8.8X width %2d to %8.8X%8.8X (%s)\n", - value, width, ACPI_FORMAT_UINT64(address), + "Wrote: %8.8X%8.8X width %2d to %8.8X%8.8X (%s)\n", + ACPI_FORMAT_UINT64(value), reg->bit_width, + ACPI_FORMAT_UINT64(address), acpi_ut_get_region_name(reg->space_id))); return (status); diff --git a/include/acpi/acpixf.h b/include/acpi/acpixf.h index 2aecaa5cc06..b450a195319 100644 --- a/include/acpi/acpixf.h +++ b/include/acpi/acpixf.h @@ -360,9 +360,9 @@ acpi_status acpi_set_firmware_waking_vector(u32 physical_address); acpi_status acpi_set_firmware_waking_vector64(u64 physical_address); #endif -acpi_status acpi_read(u32 *value, struct acpi_generic_address *reg); +acpi_status acpi_read(u64 *value, struct acpi_generic_address *reg); -acpi_status acpi_write(u32 value, struct acpi_generic_address *reg); +acpi_status acpi_write(u64 value, struct acpi_generic_address *reg); acpi_status acpi_get_sleep_type_data(u8 sleep_state, u8 * slp_typ_a, u8 * slp_typ_b); -- cgit v1.2.3-70-g09d2 From 15b8dd53f5ffaf8e2d9095c423f713423f576c0f Mon Sep 17 00:00:00 2001 From: Bob Moore Date: Mon, 29 Jun 2009 13:39:29 +0800 Subject: ACPICA: Major update for acpi_get_object_info external interface Completed a major update for the acpi_get_object_info external interface. Changes include: - Support for variable, unlimited length HID, UID, and CID strings - Support Processor objects the same as Devices (HID,UID,CID,ADR,STA, etc.) - Call the _SxW power methods on behalf of a device object - Determine if a device is a PCI root bridge - Change the ACPI_BUFFER parameter to ACPI_DEVICE_INFO. These changes will require an update to all callers of this interface. See the ACPICA Programmer Reference for details. Also, update all invocations of acpi_get_object_info interface Signed-off-by: Bob Moore Signed-off-by: Lin Ming Signed-off-by: Len Brown --- arch/ia64/hp/common/sba_iommu.c | 7 +- drivers/acpi/acpi_memhotplug.c | 11 +- drivers/acpi/acpica/Makefile | 2 +- drivers/acpi/acpica/acconfig.h | 5 + drivers/acpi/acpica/acglobal.h | 3 +- drivers/acpi/acpica/acinterp.h | 4 +- drivers/acpi/acpica/acutils.h | 24 ++- drivers/acpi/acpica/evrgnini.c | 45 +---- drivers/acpi/acpica/exutils.c | 53 +++-- drivers/acpi/acpica/nsdumpdv.c | 7 +- drivers/acpi/acpica/nsxfeval.c | 23 ++- drivers/acpi/acpica/nsxfname.c | 237 +++++++++++++++++------ drivers/acpi/acpica/uteval.c | 375 ++++-------------------------------- drivers/acpi/acpica/utglobal.c | 10 +- drivers/acpi/acpica/utids.c | 382 +++++++++++++++++++++++++++++++++++++ drivers/acpi/acpica/utmisc.c | 28 +++ drivers/acpi/container.c | 11 +- drivers/acpi/dock.c | 8 +- drivers/acpi/glue.c | 6 +- drivers/acpi/scan.c | 153 +++++++++------ drivers/char/agp/hp-agp.c | 9 +- drivers/ide/ide-acpi.c | 5 +- drivers/pci/hotplug/acpiphp_ibm.c | 12 +- drivers/platform/x86/sony-laptop.c | 7 +- drivers/pnp/pnpacpi/core.c | 6 +- include/acpi/acpi_bus.h | 8 +- include/acpi/acpixf.h | 3 +- include/acpi/actypes.h | 87 +++++---- 28 files changed, 901 insertions(+), 630 deletions(-) create mode 100644 drivers/acpi/acpica/utids.c (limited to 'include') diff --git a/arch/ia64/hp/common/sba_iommu.c b/arch/ia64/hp/common/sba_iommu.c index 8cfb001092a..674a8374c6d 100644 --- a/arch/ia64/hp/common/sba_iommu.c +++ b/arch/ia64/hp/common/sba_iommu.c @@ -2026,24 +2026,21 @@ acpi_sba_ioc_add(struct acpi_device *device) struct ioc *ioc; acpi_status status; u64 hpa, length; - struct acpi_buffer buffer; struct acpi_device_info *dev_info; status = hp_acpi_csr_space(device->handle, &hpa, &length); if (ACPI_FAILURE(status)) return 1; - buffer.length = ACPI_ALLOCATE_LOCAL_BUFFER; - status = acpi_get_object_info(device->handle, &buffer); + status = acpi_get_object_info(device->handle, &dev_info); if (ACPI_FAILURE(status)) return 1; - dev_info = buffer.pointer; /* * For HWP0001, only SBA appears in ACPI namespace. It encloses the PCI * root bridges, and its CSR space includes the IOC function. */ - if (strncmp("HWP0001", dev_info->hardware_id.value, 7) == 0) { + if (strncmp("HWP0001", dev_info->hardware_id.string, 7) == 0) { hpa += ZX1_IOC_OFFSET; /* zx1 based systems default to kernel page size iommu pages */ if (!iovp_shift) diff --git a/drivers/acpi/acpi_memhotplug.c b/drivers/acpi/acpi_memhotplug.c index 9a62224cc27..80eacbe157e 100644 --- a/drivers/acpi/acpi_memhotplug.c +++ b/drivers/acpi/acpi_memhotplug.c @@ -481,26 +481,23 @@ static acpi_status is_memory_device(acpi_handle handle) { char *hardware_id; acpi_status status; - struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; struct acpi_device_info *info; - - status = acpi_get_object_info(handle, &buffer); + status = acpi_get_object_info(handle, &info); if (ACPI_FAILURE(status)) return status; - info = buffer.pointer; if (!(info->valid & ACPI_VALID_HID)) { - kfree(buffer.pointer); + kfree(info); return AE_ERROR; } - hardware_id = info->hardware_id.value; + hardware_id = info->hardware_id.string; if ((hardware_id == NULL) || (strcmp(hardware_id, ACPI_MEMORY_DEVICE_HID))) status = AE_ERROR; - kfree(buffer.pointer); + kfree(info); return status; } diff --git a/drivers/acpi/acpica/Makefile b/drivers/acpi/acpica/Makefile index 72ac28da14e..0e7d56185f6 100644 --- a/drivers/acpi/acpica/Makefile +++ b/drivers/acpi/acpica/Makefile @@ -44,4 +44,4 @@ acpi-y += tbxface.o tbinstal.o tbutils.o tbfind.o tbfadt.o tbxfroot.o acpi-y += utalloc.o utdebug.o uteval.o utinit.o utmisc.o utxface.o \ utcopy.o utdelete.o utglobal.o utmath.o utobject.o \ - utstate.o utmutex.o utobject.o utresrc.o utlock.o + utstate.o utmutex.o utobject.o utresrc.o utlock.o utids.o diff --git a/drivers/acpi/acpica/acconfig.h b/drivers/acpi/acpica/acconfig.h index e6777fb883d..6c1fb2d9f4d 100644 --- a/drivers/acpi/acpica/acconfig.h +++ b/drivers/acpi/acpica/acconfig.h @@ -203,6 +203,11 @@ #define ACPI_SMBUS_BUFFER_SIZE 34 +/* _sx_d and _sx_w control methods */ + +#define ACPI_NUM_sx_d_METHODS 4 +#define ACPI_NUM_sx_w_METHODS 5 + /****************************************************************************** * * ACPI AML Debugger diff --git a/drivers/acpi/acpica/acglobal.h b/drivers/acpi/acpica/acglobal.h index 0b73b31c1b5..6389f7c1de5 100644 --- a/drivers/acpi/acpica/acglobal.h +++ b/drivers/acpi/acpica/acglobal.h @@ -265,7 +265,8 @@ ACPI_EXTERN u8 acpi_gbl_osi_data; extern u8 acpi_gbl_shutdown; extern u32 acpi_gbl_startup_flags; extern const char *acpi_gbl_sleep_state_names[ACPI_S_STATE_COUNT]; -extern const char *acpi_gbl_highest_dstate_names[4]; +extern const char *acpi_gbl_lowest_dstate_names[ACPI_NUM_sx_w_METHODS]; +extern const char *acpi_gbl_highest_dstate_names[ACPI_NUM_sx_d_METHODS]; extern const struct acpi_opcode_info acpi_gbl_aml_op_info[AML_NUM_OPCODES]; extern const char *acpi_gbl_region_types[ACPI_NUM_PREDEFINED_REGIONS]; diff --git a/drivers/acpi/acpica/acinterp.h b/drivers/acpi/acpica/acinterp.h index e8db7a3143a..5db9f2916f7 100644 --- a/drivers/acpi/acpica/acinterp.h +++ b/drivers/acpi/acpica/acinterp.h @@ -461,9 +461,9 @@ void acpi_ex_acquire_global_lock(u32 rule); void acpi_ex_release_global_lock(u32 rule); -void acpi_ex_eisa_id_to_string(u32 numeric_id, char *out_string); +void acpi_ex_eisa_id_to_string(char *dest, acpi_integer compressed_id); -void acpi_ex_unsigned_integer_to_string(acpi_integer value, char *out_string); +void acpi_ex_integer_to_string(char *dest, acpi_integer value); /* * exregion - default op_region handlers diff --git a/drivers/acpi/acpica/acutils.h b/drivers/acpi/acpica/acutils.h index 897810ba0cc..b0add85de30 100644 --- a/drivers/acpi/acpica/acutils.h +++ b/drivers/acpi/acpica/acutils.h @@ -324,26 +324,30 @@ acpi_ut_evaluate_object(struct acpi_namespace_node *prefix_node, acpi_status acpi_ut_evaluate_numeric_object(char *object_name, struct acpi_namespace_node *device_node, - acpi_integer * address); + acpi_integer *value); acpi_status -acpi_ut_execute_HID(struct acpi_namespace_node *device_node, - struct acpica_device_id *hid); +acpi_ut_execute_STA(struct acpi_namespace_node *device_node, u32 *status_flags); acpi_status -acpi_ut_execute_CID(struct acpi_namespace_node *device_node, - struct acpi_compatible_id_list **return_cid_list); +acpi_ut_execute_power_methods(struct acpi_namespace_node *device_node, + const char **method_names, + u8 method_count, u8 *out_values); +/* + * utids - device ID support + */ acpi_status -acpi_ut_execute_STA(struct acpi_namespace_node *device_node, - u32 * status_flags); +acpi_ut_execute_HID(struct acpi_namespace_node *device_node, + struct acpica_device_id **return_id); acpi_status acpi_ut_execute_UID(struct acpi_namespace_node *device_node, - struct acpica_device_id *uid); + struct acpica_device_id **return_id); acpi_status -acpi_ut_execute_sxds(struct acpi_namespace_node *device_node, u8 * highest); +acpi_ut_execute_CID(struct acpi_namespace_node *device_node, + struct acpica_device_id_list **return_cid_list); /* * utlock - reader/writer locks @@ -445,6 +449,8 @@ acpi_ut_short_divide(acpi_integer in_dividend, */ const char *acpi_ut_validate_exception(acpi_status status); +u8 acpi_ut_is_pci_root_bridge(char *id); + u8 acpi_ut_is_aml_table(struct acpi_table_header *table); acpi_status acpi_ut_allocate_owner_id(acpi_owner_id * owner_id); diff --git a/drivers/acpi/acpica/evrgnini.c b/drivers/acpi/acpica/evrgnini.c index 284a7becbe9..cf29c495302 100644 --- a/drivers/acpi/acpica/evrgnini.c +++ b/drivers/acpi/acpica/evrgnini.c @@ -50,8 +50,6 @@ ACPI_MODULE_NAME("evrgnini") /* Local prototypes */ -static u8 acpi_ev_match_pci_root_bridge(char *id); - static u8 acpi_ev_is_pci_root_bridge(struct acpi_namespace_node *node); /******************************************************************************* @@ -330,37 +328,6 @@ acpi_ev_pci_config_region_setup(acpi_handle handle, return_ACPI_STATUS(AE_OK); } -/******************************************************************************* - * - * FUNCTION: acpi_ev_match_pci_root_bridge - * - * PARAMETERS: Id - The HID/CID in string format - * - * RETURN: TRUE if the Id is a match for a PCI/PCI-Express Root Bridge - * - * DESCRIPTION: Determine if the input ID is a PCI Root Bridge ID. - * - ******************************************************************************/ - -static u8 acpi_ev_match_pci_root_bridge(char *id) -{ - - /* - * Check if this is a PCI root. - * ACPI 3.0+: check for a PCI Express root also. - */ - if (!(ACPI_STRNCMP(id, - PCI_ROOT_HID_STRING, - sizeof(PCI_ROOT_HID_STRING))) || - !(ACPI_STRNCMP(id, - PCI_EXPRESS_ROOT_HID_STRING, - sizeof(PCI_EXPRESS_ROOT_HID_STRING)))) { - return (TRUE); - } - - return (FALSE); -} - /******************************************************************************* * * FUNCTION: acpi_ev_is_pci_root_bridge @@ -377,9 +344,10 @@ static u8 acpi_ev_match_pci_root_bridge(char *id) static u8 acpi_ev_is_pci_root_bridge(struct acpi_namespace_node *node) { acpi_status status; - struct acpica_device_id hid; - struct acpi_compatible_id_list *cid; + struct acpica_device_id *hid; + struct acpica_device_id_list *cid; u32 i; + u8 match; /* Get the _HID and check for a PCI Root Bridge */ @@ -388,7 +356,10 @@ static u8 acpi_ev_is_pci_root_bridge(struct acpi_namespace_node *node) return (FALSE); } - if (acpi_ev_match_pci_root_bridge(hid.value)) { + match = acpi_ut_is_pci_root_bridge(hid->string); + ACPI_FREE(hid); + + if (match) { return (TRUE); } @@ -402,7 +373,7 @@ static u8 acpi_ev_is_pci_root_bridge(struct acpi_namespace_node *node) /* Check all _CIDs in the returned list */ for (i = 0; i < cid->count; i++) { - if (acpi_ev_match_pci_root_bridge(cid->id[i].value)) { + if (acpi_ut_is_pci_root_bridge(cid->ids[i].string)) { ACPI_FREE(cid); return (TRUE); } diff --git a/drivers/acpi/acpica/exutils.c b/drivers/acpi/acpica/exutils.c index 87730e94413..7d41f99f705 100644 --- a/drivers/acpi/acpica/exutils.c +++ b/drivers/acpi/acpica/exutils.c @@ -358,50 +358,67 @@ static u32 acpi_ex_digits_needed(acpi_integer value, u32 base) * * FUNCTION: acpi_ex_eisa_id_to_string * - * PARAMETERS: numeric_id - EISA ID to be converted + * PARAMETERS: compressed_id - EISAID to be converted * out_string - Where to put the converted string (8 bytes) * * RETURN: None * - * DESCRIPTION: Convert a numeric EISA ID to string representation + * DESCRIPTION: Convert a numeric EISAID to string representation. Return + * buffer must be large enough to hold the string. The string + * returned is always exactly of length ACPI_EISAID_STRING_SIZE + * (includes null terminator). The EISAID is always 32 bits. * ******************************************************************************/ -void acpi_ex_eisa_id_to_string(u32 numeric_id, char *out_string) +void acpi_ex_eisa_id_to_string(char *out_string, acpi_integer compressed_id) { - u32 eisa_id; + u32 swapped_id; ACPI_FUNCTION_ENTRY(); + /* The EISAID should be a 32-bit integer */ + + if (compressed_id > ACPI_UINT32_MAX) { + ACPI_WARNING((AE_INFO, + "Expected EISAID is larger than 32 bits: 0x%8.8X%8.8X, truncating", + ACPI_FORMAT_UINT64(compressed_id))); + } + /* Swap ID to big-endian to get contiguous bits */ - eisa_id = acpi_ut_dword_byte_swap(numeric_id); + swapped_id = acpi_ut_dword_byte_swap((u32)compressed_id); - out_string[0] = (char)('@' + (((unsigned long)eisa_id >> 26) & 0x1f)); - out_string[1] = (char)('@' + ((eisa_id >> 21) & 0x1f)); - out_string[2] = (char)('@' + ((eisa_id >> 16) & 0x1f)); - out_string[3] = acpi_ut_hex_to_ascii_char((acpi_integer) eisa_id, 12); - out_string[4] = acpi_ut_hex_to_ascii_char((acpi_integer) eisa_id, 8); - out_string[5] = acpi_ut_hex_to_ascii_char((acpi_integer) eisa_id, 4); - out_string[6] = acpi_ut_hex_to_ascii_char((acpi_integer) eisa_id, 0); + /* First 3 bytes are uppercase letters. Next 4 bytes are hexadecimal */ + + out_string[0] = + (char)(0x40 + (((unsigned long)swapped_id >> 26) & 0x1F)); + out_string[1] = (char)(0x40 + ((swapped_id >> 21) & 0x1F)); + out_string[2] = (char)(0x40 + ((swapped_id >> 16) & 0x1F)); + out_string[3] = acpi_ut_hex_to_ascii_char((acpi_integer)swapped_id, 12); + out_string[4] = acpi_ut_hex_to_ascii_char((acpi_integer)swapped_id, 8); + out_string[5] = acpi_ut_hex_to_ascii_char((acpi_integer)swapped_id, 4); + out_string[6] = acpi_ut_hex_to_ascii_char((acpi_integer)swapped_id, 0); out_string[7] = 0; } /******************************************************************************* * - * FUNCTION: acpi_ex_unsigned_integer_to_string + * FUNCTION: acpi_ex_integer_to_string * - * PARAMETERS: Value - Value to be converted - * out_string - Where to put the converted string (8 bytes) + * PARAMETERS: out_string - Where to put the converted string. At least + * 21 bytes are needed to hold the largest + * possible 64-bit integer. + * Value - Value to be converted * * RETURN: None, string * - * DESCRIPTION: Convert a number to string representation. Assumes string - * buffer is large enough to hold the string. + * DESCRIPTION: Convert a 64-bit integer to decimal string representation. + * Assumes string buffer is large enough to hold the string. The + * largest string is (ACPI_MAX64_DECIMAL_DIGITS + 1). * ******************************************************************************/ -void acpi_ex_unsigned_integer_to_string(acpi_integer value, char *out_string) +void acpi_ex_integer_to_string(char *out_string, acpi_integer value) { u32 count; u32 digits_needed; diff --git a/drivers/acpi/acpica/nsdumpdv.c b/drivers/acpi/acpica/nsdumpdv.c index 41994fe7fbb..0fe87f1aef1 100644 --- a/drivers/acpi/acpica/nsdumpdv.c +++ b/drivers/acpi/acpica/nsdumpdv.c @@ -70,7 +70,6 @@ static acpi_status acpi_ns_dump_one_device(acpi_handle obj_handle, u32 level, void *context, void **return_value) { - struct acpi_buffer buffer; struct acpi_device_info *info; acpi_status status; u32 i; @@ -80,17 +79,15 @@ acpi_ns_dump_one_device(acpi_handle obj_handle, status = acpi_ns_dump_one_object(obj_handle, level, context, return_value); - buffer.length = ACPI_ALLOCATE_LOCAL_BUFFER; - status = acpi_get_object_info(obj_handle, &buffer); + status = acpi_get_object_info(obj_handle, &info); if (ACPI_SUCCESS(status)) { - info = buffer.pointer; for (i = 0; i < level; i++) { ACPI_DEBUG_PRINT_RAW((ACPI_DB_TABLES, " ")); } ACPI_DEBUG_PRINT_RAW((ACPI_DB_TABLES, " HID: %s, ADR: %8.8X%8.8X, Status: %X\n", - info->hardware_id.value, + info->hardware_id.string, ACPI_FORMAT_UINT64(info->address), info->current_status)); ACPI_FREE(info); diff --git a/drivers/acpi/acpica/nsxfeval.c b/drivers/acpi/acpica/nsxfeval.c index daf4ad37896..4929dbdbc8f 100644 --- a/drivers/acpi/acpica/nsxfeval.c +++ b/drivers/acpi/acpica/nsxfeval.c @@ -535,10 +535,11 @@ acpi_ns_get_device_callback(acpi_handle obj_handle, acpi_status status; struct acpi_namespace_node *node; u32 flags; - struct acpica_device_id hid; - struct acpi_compatible_id_list *cid; + struct acpica_device_id *hid; + struct acpica_device_id_list *cid; u32 i; - int found; + u8 found; + int no_match; status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE); if (ACPI_FAILURE(status)) { @@ -582,10 +583,14 @@ acpi_ns_get_device_callback(acpi_handle obj_handle, return (AE_CTRL_DEPTH); } - if (ACPI_STRNCMP(hid.value, info->hid, sizeof(hid.value)) != 0) { - - /* Get the list of Compatible IDs */ + no_match = ACPI_STRCMP(hid->string, info->hid); + ACPI_FREE(hid); + if (no_match) { + /* + * HID does not match, attempt match within the + * list of Compatible IDs (CIDs) + */ status = acpi_ut_execute_CID(node, &cid); if (status == AE_NOT_FOUND) { return (AE_OK); @@ -597,10 +602,8 @@ acpi_ns_get_device_callback(acpi_handle obj_handle, found = 0; for (i = 0; i < cid->count; i++) { - if (ACPI_STRNCMP(cid->id[i].value, info->hid, - sizeof(struct - acpi_compatible_id)) == - 0) { + if (ACPI_STRCMP(cid->ids[i].string, info->hid) + == 0) { found = 1; break; } diff --git a/drivers/acpi/acpica/nsxfname.c b/drivers/acpi/acpica/nsxfname.c index f23593d6add..ddc84af6336 100644 --- a/drivers/acpi/acpica/nsxfname.c +++ b/drivers/acpi/acpica/nsxfname.c @@ -51,6 +51,11 @@ #define _COMPONENT ACPI_NAMESPACE ACPI_MODULE_NAME("nsxfname") +/* Local prototypes */ +static char *acpi_ns_copy_device_id(struct acpica_device_id *dest, + struct acpica_device_id *source, + char *string_area); + /****************************************************************************** * * FUNCTION: acpi_get_handle @@ -68,6 +73,7 @@ ACPI_MODULE_NAME("nsxfname") * namespace handle. * ******************************************************************************/ + acpi_status acpi_get_handle(acpi_handle parent, acpi_string pathname, acpi_handle * ret_handle) @@ -208,12 +214,40 @@ acpi_get_name(acpi_handle handle, u32 name_type, struct acpi_buffer * buffer) ACPI_EXPORT_SYMBOL(acpi_get_name) +/****************************************************************************** + * + * FUNCTION: acpi_ns_copy_device_id + * + * PARAMETERS: Dest - Pointer to the destination DEVICE_ID + * Source - Pointer to the source DEVICE_ID + * string_area - Pointer to where to copy the dest string + * + * RETURN: Pointer to the next string area + * + * DESCRIPTION: Copy a single DEVICE_ID, including the string data. + * + ******************************************************************************/ +static char *acpi_ns_copy_device_id(struct acpica_device_id *dest, + struct acpica_device_id *source, + char *string_area) +{ + /* Create the destination DEVICE_ID */ + + dest->string = string_area; + dest->length = source->length; + + /* Copy actual string and return a pointer to the next string area */ + + ACPI_MEMCPY(string_area, source->string, source->length); + return (string_area + source->length); +} + /****************************************************************************** * * FUNCTION: acpi_get_object_info * - * PARAMETERS: Handle - Object Handle - * Buffer - Where the info is returned + * PARAMETERS: Handle - Object Handle + * return_buffer - Where the info is returned * * RETURN: Status * @@ -221,33 +255,37 @@ ACPI_EXPORT_SYMBOL(acpi_get_name) * namespace node and possibly by running several standard * control methods (Such as in the case of a device.) * + * For Device and Processor objects, run the Device _HID, _UID, _CID, _STA, + * _ADR, _sx_w, and _sx_d methods. + * + * Note: Allocates the return buffer, must be freed by the caller. + * ******************************************************************************/ + acpi_status -acpi_get_object_info(acpi_handle handle, struct acpi_buffer * buffer) +acpi_get_object_info(acpi_handle handle, + struct acpi_device_info **return_buffer) { - acpi_status status; struct acpi_namespace_node *node; struct acpi_device_info *info; - struct acpi_device_info *return_info; - struct acpi_compatible_id_list *cid_list = NULL; - acpi_size size; + struct acpica_device_id_list *cid_list = NULL; + struct acpica_device_id *hid = NULL; + struct acpica_device_id *uid = NULL; + char *next_id_string; + acpi_object_type type; + acpi_name name; + u8 param_count = 0; + u8 valid = 0; + u32 info_size; + u32 i; + acpi_status status; /* Parameter validation */ - if (!handle || !buffer) { + if (!handle || !return_buffer) { return (AE_BAD_PARAMETER); } - status = acpi_ut_validate_buffer(buffer); - if (ACPI_FAILURE(status)) { - return (status); - } - - info = ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_device_info)); - if (!info) { - return (AE_NO_MEMORY); - } - status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE); if (ACPI_FAILURE(status)) { goto cleanup; @@ -256,66 +294,91 @@ acpi_get_object_info(acpi_handle handle, struct acpi_buffer * buffer) node = acpi_ns_map_handle_to_node(handle); if (!node) { (void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE); - status = AE_BAD_PARAMETER; - goto cleanup; + return (AE_BAD_PARAMETER); } - /* Init return structure */ - - size = sizeof(struct acpi_device_info); + /* Get the namespace node data while the namespace is locked */ - info->type = node->type; - info->name = node->name.integer; - info->valid = 0; + info_size = sizeof(struct acpi_device_info); + type = node->type; + name = node->name.integer; if (node->type == ACPI_TYPE_METHOD) { - info->param_count = node->object->method.param_count; + param_count = node->object->method.param_count; } status = acpi_ut_release_mutex(ACPI_MTX_NAMESPACE); if (ACPI_FAILURE(status)) { - goto cleanup; + return (status); } - /* If not a device, we are all done */ - - if (info->type == ACPI_TYPE_DEVICE) { + if ((type == ACPI_TYPE_DEVICE) || (type == ACPI_TYPE_PROCESSOR)) { /* - * Get extra info for ACPI Devices objects only: - * Run the Device _HID, _UID, _CID, _STA, _ADR and _sx_d methods. + * Get extra info for ACPI Device/Processor objects only: + * Run the Device _HID, _UID, and _CID methods. * * Note: none of these methods are required, so they may or may - * not be present for this device. The Info->Valid bitfield is used - * to indicate which methods were found and ran successfully. + * not be present for this device. The Info->Valid bitfield is used + * to indicate which methods were found and run successfully. */ /* Execute the Device._HID method */ - status = acpi_ut_execute_HID(node, &info->hardware_id); + status = acpi_ut_execute_HID(node, &hid); if (ACPI_SUCCESS(status)) { - info->valid |= ACPI_VALID_HID; + info_size += hid->length; + valid |= ACPI_VALID_HID; } /* Execute the Device._UID method */ - status = acpi_ut_execute_UID(node, &info->unique_id); + status = acpi_ut_execute_UID(node, &uid); if (ACPI_SUCCESS(status)) { - info->valid |= ACPI_VALID_UID; + info_size += uid->length; + valid |= ACPI_VALID_UID; } /* Execute the Device._CID method */ status = acpi_ut_execute_CID(node, &cid_list); if (ACPI_SUCCESS(status)) { - size += cid_list->size; - info->valid |= ACPI_VALID_CID; + + /* Add size of CID strings and CID pointer array */ + + info_size += + (cid_list->list_size - + sizeof(struct acpica_device_id_list)); + valid |= ACPI_VALID_CID; } + } + + /* + * Now that we have the variable-length data, we can allocate the + * return buffer + */ + info = ACPI_ALLOCATE_ZEROED(info_size); + if (!info) { + status = AE_NO_MEMORY; + goto cleanup; + } + + /* Get the fixed-length data */ + + if ((type == ACPI_TYPE_DEVICE) || (type == ACPI_TYPE_PROCESSOR)) { + /* + * Get extra info for ACPI Device/Processor objects only: + * Run the _STA, _ADR and, sx_w, and _sx_d methods. + * + * Note: none of these methods are required, so they may or may + * not be present for this device. The Info->Valid bitfield is used + * to indicate which methods were found and run successfully. + */ /* Execute the Device._STA method */ status = acpi_ut_execute_STA(node, &info->current_status); if (ACPI_SUCCESS(status)) { - info->valid |= ACPI_VALID_STA; + valid |= ACPI_VALID_STA; } /* Execute the Device._ADR method */ @@ -323,36 +386,100 @@ acpi_get_object_info(acpi_handle handle, struct acpi_buffer * buffer) status = acpi_ut_evaluate_numeric_object(METHOD_NAME__ADR, node, &info->address); if (ACPI_SUCCESS(status)) { - info->valid |= ACPI_VALID_ADR; + valid |= ACPI_VALID_ADR; + } + + /* Execute the Device._sx_w methods */ + + status = acpi_ut_execute_power_methods(node, + acpi_gbl_lowest_dstate_names, + ACPI_NUM_sx_w_METHODS, + info->lowest_dstates); + if (ACPI_SUCCESS(status)) { + valid |= ACPI_VALID_SXWS; } /* Execute the Device._sx_d methods */ - status = acpi_ut_execute_sxds(node, info->highest_dstates); + status = acpi_ut_execute_power_methods(node, + acpi_gbl_highest_dstate_names, + ACPI_NUM_sx_d_METHODS, + info->highest_dstates); if (ACPI_SUCCESS(status)) { - info->valid |= ACPI_VALID_SXDS; + valid |= ACPI_VALID_SXDS; } } - /* Validate/Allocate/Clear caller buffer */ + /* + * Create a pointer to the string area of the return buffer. + * Point to the end of the base struct acpi_device_info structure. + */ + next_id_string = ACPI_CAST_PTR(char, info->compatible_id_list.ids); + if (cid_list) { - status = acpi_ut_initialize_buffer(buffer, size); - if (ACPI_FAILURE(status)) { - goto cleanup; + /* Point past the CID DEVICE_ID array */ + + next_id_string += + ((acpi_size) cid_list->count * + sizeof(struct acpica_device_id)); } - /* Populate the return buffer */ + /* + * Copy the HID, UID, and CIDs to the return buffer. The variable-length + * strings are copied to the reserved area at the end of the buffer. + * + * For HID and CID, check if the ID is a PCI Root Bridge. + */ + if (hid) { + next_id_string = acpi_ns_copy_device_id(&info->hardware_id, + hid, next_id_string); + + if (acpi_ut_is_pci_root_bridge(hid->string)) { + info->flags |= ACPI_PCI_ROOT_BRIDGE; + } + } - return_info = buffer->pointer; - ACPI_MEMCPY(return_info, info, sizeof(struct acpi_device_info)); + if (uid) { + next_id_string = acpi_ns_copy_device_id(&info->unique_id, + uid, next_id_string); + } if (cid_list) { - ACPI_MEMCPY(&return_info->compatibility_id, cid_list, - cid_list->size); + info->compatible_id_list.count = cid_list->count; + info->compatible_id_list.list_size = cid_list->list_size; + + /* Copy each CID */ + + for (i = 0; i < cid_list->count; i++) { + next_id_string = + acpi_ns_copy_device_id(&info->compatible_id_list. + ids[i], &cid_list->ids[i], + next_id_string); + + if (acpi_ut_is_pci_root_bridge(cid_list->ids[i].string)) { + info->flags |= ACPI_PCI_ROOT_BRIDGE; + } + } } + /* Copy the fixed-length data */ + + info->info_size = info_size; + info->type = type; + info->name = name; + info->param_count = param_count; + info->valid = valid; + + *return_buffer = info; + status = AE_OK; + cleanup: - ACPI_FREE(info); + if (hid) { + ACPI_FREE(hid); + } + if (uid) { + ACPI_FREE(uid); + } if (cid_list) { ACPI_FREE(cid_list); } diff --git a/drivers/acpi/acpica/uteval.c b/drivers/acpi/acpica/uteval.c index 006b16c2601..5503307b8bb 100644 --- a/drivers/acpi/acpica/uteval.c +++ b/drivers/acpi/acpica/uteval.c @@ -44,19 +44,10 @@ #include #include "accommon.h" #include "acnamesp.h" -#include "acinterp.h" #define _COMPONENT ACPI_UTILITIES ACPI_MODULE_NAME("uteval") -/* Local prototypes */ -static void -acpi_ut_copy_id_string(char *destination, char *source, acpi_size max_length); - -static acpi_status -acpi_ut_translate_one_cid(union acpi_operand_object *obj_desc, - struct acpi_compatible_id *one_cid); - /* * Strings supported by the _OSI predefined (internal) method. * @@ -213,7 +204,7 @@ acpi_status acpi_osi_invalidate(char *interface) * RETURN: Status * * DESCRIPTION: Evaluates a namespace object and verifies the type of the - * return object. Common code that simplifies accessing objects + * return object. Common code that simplifies accessing objects * that have required return objects of fixed types. * * NOTE: Internal function, no parameter validation @@ -298,7 +289,7 @@ acpi_ut_evaluate_object(struct acpi_namespace_node *prefix_node, if ((acpi_gbl_enable_interpreter_slack) && (!expected_return_btypes)) { /* - * We received a return object, but one was not expected. This can + * We received a return object, but one was not expected. This can * happen frequently if the "implicit return" feature is enabled. * Just delete the return object and return AE_OK. */ @@ -340,12 +331,12 @@ acpi_ut_evaluate_object(struct acpi_namespace_node *prefix_node, * * PARAMETERS: object_name - Object name to be evaluated * device_node - Node for the device - * Address - Where the value is returned + * Value - Where the value is returned * * RETURN: Status * * DESCRIPTION: Evaluates a numeric namespace object for a selected device - * and stores result in *Address. + * and stores result in *Value. * * NOTE: Internal function, no parameter validation * @@ -354,7 +345,7 @@ acpi_ut_evaluate_object(struct acpi_namespace_node *prefix_node, acpi_status acpi_ut_evaluate_numeric_object(char *object_name, struct acpi_namespace_node *device_node, - acpi_integer * address) + acpi_integer *value) { union acpi_operand_object *obj_desc; acpi_status status; @@ -369,295 +360,7 @@ acpi_ut_evaluate_numeric_object(char *object_name, /* Get the returned Integer */ - *address = obj_desc->integer.value; - - /* On exit, we must delete the return object */ - - acpi_ut_remove_reference(obj_desc); - return_ACPI_STATUS(status); -} - -/******************************************************************************* - * - * FUNCTION: acpi_ut_copy_id_string - * - * PARAMETERS: Destination - Where to copy the string - * Source - Source string - * max_length - Length of the destination buffer - * - * RETURN: None - * - * DESCRIPTION: Copies an ID string for the _HID, _CID, and _UID methods. - * Performs removal of a leading asterisk if present -- workaround - * for a known issue on a bunch of machines. - * - ******************************************************************************/ - -static void -acpi_ut_copy_id_string(char *destination, char *source, acpi_size max_length) -{ - - /* - * Workaround for ID strings that have a leading asterisk. This construct - * is not allowed by the ACPI specification (ID strings must be - * alphanumeric), but enough existing machines have this embedded in their - * ID strings that the following code is useful. - */ - if (*source == '*') { - source++; - } - - /* Do the actual copy */ - - ACPI_STRNCPY(destination, source, max_length); -} - -/******************************************************************************* - * - * FUNCTION: acpi_ut_execute_HID - * - * PARAMETERS: device_node - Node for the device - * Hid - Where the HID is returned - * - * RETURN: Status - * - * DESCRIPTION: Executes the _HID control method that returns the hardware - * ID of the device. - * - * NOTE: Internal function, no parameter validation - * - ******************************************************************************/ - -acpi_status -acpi_ut_execute_HID(struct acpi_namespace_node *device_node, - struct acpica_device_id *hid) -{ - union acpi_operand_object *obj_desc; - acpi_status status; - - ACPI_FUNCTION_TRACE(ut_execute_HID); - - status = acpi_ut_evaluate_object(device_node, METHOD_NAME__HID, - ACPI_BTYPE_INTEGER | ACPI_BTYPE_STRING, - &obj_desc); - if (ACPI_FAILURE(status)) { - return_ACPI_STATUS(status); - } - - if (obj_desc->common.type == ACPI_TYPE_INTEGER) { - - /* Convert the Numeric HID to string */ - - acpi_ex_eisa_id_to_string((u32) obj_desc->integer.value, - hid->value); - } else { - /* Copy the String HID from the returned object */ - - acpi_ut_copy_id_string(hid->value, obj_desc->string.pointer, - sizeof(hid->value)); - } - - /* On exit, we must delete the return object */ - - acpi_ut_remove_reference(obj_desc); - return_ACPI_STATUS(status); -} - -/******************************************************************************* - * - * FUNCTION: acpi_ut_translate_one_cid - * - * PARAMETERS: obj_desc - _CID object, must be integer or string - * one_cid - Where the CID string is returned - * - * RETURN: Status - * - * DESCRIPTION: Return a numeric or string _CID value as a string. - * (Compatible ID) - * - * NOTE: Assumes a maximum _CID string length of - * ACPI_MAX_CID_LENGTH. - * - ******************************************************************************/ - -static acpi_status -acpi_ut_translate_one_cid(union acpi_operand_object *obj_desc, - struct acpi_compatible_id *one_cid) -{ - - switch (obj_desc->common.type) { - case ACPI_TYPE_INTEGER: - - /* Convert the Numeric CID to string */ - - acpi_ex_eisa_id_to_string((u32) obj_desc->integer.value, - one_cid->value); - return (AE_OK); - - case ACPI_TYPE_STRING: - - if (obj_desc->string.length > ACPI_MAX_CID_LENGTH) { - return (AE_AML_STRING_LIMIT); - } - - /* Copy the String CID from the returned object */ - - acpi_ut_copy_id_string(one_cid->value, obj_desc->string.pointer, - ACPI_MAX_CID_LENGTH); - return (AE_OK); - - default: - - return (AE_TYPE); - } -} - -/******************************************************************************* - * - * FUNCTION: acpi_ut_execute_CID - * - * PARAMETERS: device_node - Node for the device - * return_cid_list - Where the CID list is returned - * - * RETURN: Status - * - * DESCRIPTION: Executes the _CID control method that returns one or more - * compatible hardware IDs for the device. - * - * NOTE: Internal function, no parameter validation - * - ******************************************************************************/ - -acpi_status -acpi_ut_execute_CID(struct acpi_namespace_node * device_node, - struct acpi_compatible_id_list ** return_cid_list) -{ - union acpi_operand_object *obj_desc; - acpi_status status; - u32 count; - u32 size; - struct acpi_compatible_id_list *cid_list; - u32 i; - - ACPI_FUNCTION_TRACE(ut_execute_CID); - - /* Evaluate the _CID method for this device */ - - status = acpi_ut_evaluate_object(device_node, METHOD_NAME__CID, - ACPI_BTYPE_INTEGER | ACPI_BTYPE_STRING - | ACPI_BTYPE_PACKAGE, &obj_desc); - if (ACPI_FAILURE(status)) { - return_ACPI_STATUS(status); - } - - /* Get the number of _CIDs returned */ - - count = 1; - if (obj_desc->common.type == ACPI_TYPE_PACKAGE) { - count = obj_desc->package.count; - } - - /* Allocate a worst-case buffer for the _CIDs */ - - size = (((count - 1) * sizeof(struct acpi_compatible_id)) + - sizeof(struct acpi_compatible_id_list)); - - cid_list = ACPI_ALLOCATE_ZEROED((acpi_size) size); - if (!cid_list) { - return_ACPI_STATUS(AE_NO_MEMORY); - } - - /* Init CID list */ - - cid_list->count = count; - cid_list->size = size; - - /* - * A _CID can return either a single compatible ID or a package of - * compatible IDs. Each compatible ID can be one of the following: - * 1) Integer (32 bit compressed EISA ID) or - * 2) String (PCI ID format, e.g. "PCI\VEN_vvvv&DEV_dddd&SUBSYS_ssssssss") - */ - - /* The _CID object can be either a single CID or a package (list) of CIDs */ - - if (obj_desc->common.type == ACPI_TYPE_PACKAGE) { - - /* Translate each package element */ - - for (i = 0; i < count; i++) { - status = - acpi_ut_translate_one_cid(obj_desc->package. - elements[i], - &cid_list->id[i]); - if (ACPI_FAILURE(status)) { - break; - } - } - } else { - /* Only one CID, translate to a string */ - - status = acpi_ut_translate_one_cid(obj_desc, cid_list->id); - } - - /* Cleanup on error */ - - if (ACPI_FAILURE(status)) { - ACPI_FREE(cid_list); - } else { - *return_cid_list = cid_list; - } - - /* On exit, we must delete the _CID return object */ - - acpi_ut_remove_reference(obj_desc); - return_ACPI_STATUS(status); -} - -/******************************************************************************* - * - * FUNCTION: acpi_ut_execute_UID - * - * PARAMETERS: device_node - Node for the device - * Uid - Where the UID is returned - * - * RETURN: Status - * - * DESCRIPTION: Executes the _UID control method that returns the hardware - * ID of the device. - * - * NOTE: Internal function, no parameter validation - * - ******************************************************************************/ - -acpi_status -acpi_ut_execute_UID(struct acpi_namespace_node *device_node, - struct acpica_device_id *uid) -{ - union acpi_operand_object *obj_desc; - acpi_status status; - - ACPI_FUNCTION_TRACE(ut_execute_UID); - - status = acpi_ut_evaluate_object(device_node, METHOD_NAME__UID, - ACPI_BTYPE_INTEGER | ACPI_BTYPE_STRING, - &obj_desc); - if (ACPI_FAILURE(status)) { - return_ACPI_STATUS(status); - } - - if (obj_desc->common.type == ACPI_TYPE_INTEGER) { - - /* Convert the Numeric UID to string */ - - acpi_ex_unsigned_integer_to_string(obj_desc->integer.value, - uid->value); - } else { - /* Copy the String UID from the returned object */ - - acpi_ut_copy_id_string(uid->value, obj_desc->string.pointer, - sizeof(uid->value)); - } + *value = obj_desc->integer.value; /* On exit, we must delete the return object */ @@ -716,60 +419,64 @@ acpi_ut_execute_STA(struct acpi_namespace_node *device_node, u32 * flags) /******************************************************************************* * - * FUNCTION: acpi_ut_execute_Sxds + * FUNCTION: acpi_ut_execute_power_methods * * PARAMETERS: device_node - Node for the device - * Flags - Where the status flags are returned + * method_names - Array of power method names + * method_count - Number of methods to execute + * out_values - Where the power method values are returned * - * RETURN: Status + * RETURN: Status, out_values * - * DESCRIPTION: Executes _STA for selected device and stores results in - * *Flags. + * DESCRIPTION: Executes the specified power methods for the device and returns + * the result(s). * * NOTE: Internal function, no parameter validation * - ******************************************************************************/ +******************************************************************************/ acpi_status -acpi_ut_execute_sxds(struct acpi_namespace_node *device_node, u8 * highest) +acpi_ut_execute_power_methods(struct acpi_namespace_node *device_node, + const char **method_names, + u8 method_count, u8 *out_values) { union acpi_operand_object *obj_desc; acpi_status status; + acpi_status final_status = AE_NOT_FOUND; u32 i; - ACPI_FUNCTION_TRACE(ut_execute_sxds); + ACPI_FUNCTION_TRACE(ut_execute_power_methods); - for (i = 0; i < 4; i++) { - highest[i] = 0xFF; + for (i = 0; i < method_count; i++) { + /* + * Execute the power method (_sx_d or _sx_w). The only allowable + * return type is an Integer. + */ status = acpi_ut_evaluate_object(device_node, ACPI_CAST_PTR(char, - acpi_gbl_highest_dstate_names - [i]), + method_names[i]), ACPI_BTYPE_INTEGER, &obj_desc); - if (ACPI_FAILURE(status)) { - if (status != AE_NOT_FOUND) { - ACPI_DEBUG_PRINT((ACPI_DB_EXEC, - "%s on Device %4.4s, %s\n", - ACPI_CAST_PTR(char, - acpi_gbl_highest_dstate_names - [i]), - acpi_ut_get_node_name - (device_node), - acpi_format_exception - (status))); - - return_ACPI_STATUS(status); - } - } else { - /* Extract the Dstate value */ - - highest[i] = (u8) obj_desc->integer.value; + if (ACPI_SUCCESS(status)) { + out_values[i] = (u8)obj_desc->integer.value; /* Delete the return object */ acpi_ut_remove_reference(obj_desc); + final_status = AE_OK; /* At least one value is valid */ + continue; } + + out_values[i] = ACPI_UINT8_MAX; + if (status == AE_NOT_FOUND) { + continue; /* Ignore if not found */ + } + + ACPI_DEBUG_PRINT((ACPI_DB_EXEC, + "Failed %s on Device %4.4s, %s\n", + ACPI_CAST_PTR(char, method_names[i]), + acpi_ut_get_node_name(device_node), + acpi_format_exception(status))); } - return_ACPI_STATUS(AE_OK); + return_ACPI_STATUS(final_status); } diff --git a/drivers/acpi/acpica/utglobal.c b/drivers/acpi/acpica/utglobal.c index 59e46f257c0..ed7a33c67fb 100644 --- a/drivers/acpi/acpica/utglobal.c +++ b/drivers/acpi/acpica/utglobal.c @@ -90,7 +90,15 @@ const char *acpi_gbl_sleep_state_names[ACPI_S_STATE_COUNT] = { "\\_S5_" }; -const char *acpi_gbl_highest_dstate_names[4] = { +const char *acpi_gbl_lowest_dstate_names[ACPI_NUM_sx_w_METHODS] = { + "_S0W", + "_S1W", + "_S2W", + "_S3W", + "_S4W" +}; + +const char *acpi_gbl_highest_dstate_names[ACPI_NUM_sx_d_METHODS] = { "_S1D", "_S2D", "_S3D", diff --git a/drivers/acpi/acpica/utids.c b/drivers/acpi/acpica/utids.c new file mode 100644 index 00000000000..52eaae40455 --- /dev/null +++ b/drivers/acpi/acpica/utids.c @@ -0,0 +1,382 @@ +/****************************************************************************** + * + * Module Name: utids - support for device IDs - HID, UID, CID + * + *****************************************************************************/ + +/* + * Copyright (C) 2000 - 2009, Intel Corp. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions, and the following disclaimer, + * without modification. + * 2. Redistributions in binary form must reproduce at minimum a disclaimer + * substantially similar to the "NO WARRANTY" disclaimer below + * ("Disclaimer") and any redistribution must be conditioned upon + * including a substantially similar Disclaimer requirement for further + * binary redistribution. + * 3. Neither the names of the above-listed copyright holders nor the names + * of any contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * NO WARRANTY + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGES. + */ + +#include +#include "accommon.h" +#include "acinterp.h" + +#define _COMPONENT ACPI_UTILITIES +ACPI_MODULE_NAME("utids") + +/* Local prototypes */ +static void acpi_ut_copy_id_string(char *destination, char *source); + +/******************************************************************************* + * + * FUNCTION: acpi_ut_copy_id_string + * + * PARAMETERS: Destination - Where to copy the string + * Source - Source string + * + * RETURN: None + * + * DESCRIPTION: Copies an ID string for the _HID, _CID, and _UID methods. + * Performs removal of a leading asterisk if present -- workaround + * for a known issue on a bunch of machines. + * + ******************************************************************************/ + +static void acpi_ut_copy_id_string(char *destination, char *source) +{ + + /* + * Workaround for ID strings that have a leading asterisk. This construct + * is not allowed by the ACPI specification (ID strings must be + * alphanumeric), but enough existing machines have this embedded in their + * ID strings that the following code is useful. + */ + if (*source == '*') { + source++; + } + + /* Do the actual copy */ + + ACPI_STRCPY(destination, source); +} + +/******************************************************************************* + * + * FUNCTION: acpi_ut_execute_HID + * + * PARAMETERS: device_node - Node for the device + * return_id - Where the string HID is returned + * + * RETURN: Status + * + * DESCRIPTION: Executes the _HID control method that returns the hardware + * ID of the device. The HID is either an 32-bit encoded EISAID + * Integer or a String. A string is always returned. An EISAID + * is converted to a string. + * + * NOTE: Internal function, no parameter validation + * + ******************************************************************************/ + +acpi_status +acpi_ut_execute_HID(struct acpi_namespace_node *device_node, + struct acpica_device_id **return_id) +{ + union acpi_operand_object *obj_desc; + struct acpica_device_id *hid; + u32 length; + acpi_status status; + + ACPI_FUNCTION_TRACE(ut_execute_HID); + + status = acpi_ut_evaluate_object(device_node, METHOD_NAME__HID, + ACPI_BTYPE_INTEGER | ACPI_BTYPE_STRING, + &obj_desc); + if (ACPI_FAILURE(status)) { + return_ACPI_STATUS(status); + } + + /* Get the size of the String to be returned, includes null terminator */ + + if (obj_desc->common.type == ACPI_TYPE_INTEGER) { + length = ACPI_EISAID_STRING_SIZE; + } else { + length = obj_desc->string.length + 1; + } + + /* Allocate a buffer for the HID */ + + hid = + ACPI_ALLOCATE_ZEROED(sizeof(struct acpica_device_id) + + (acpi_size) length); + if (!hid) { + status = AE_NO_MEMORY; + goto cleanup; + } + + /* Area for the string starts after DEVICE_ID struct */ + + hid->string = ACPI_ADD_PTR(char, hid, sizeof(struct acpica_device_id)); + + /* Convert EISAID to a string or simply copy existing string */ + + if (obj_desc->common.type == ACPI_TYPE_INTEGER) { + acpi_ex_eisa_id_to_string(hid->string, obj_desc->integer.value); + } else { + acpi_ut_copy_id_string(hid->string, obj_desc->string.pointer); + } + + hid->length = length; + *return_id = hid; + +cleanup: + + /* On exit, we must delete the return object */ + + acpi_ut_remove_reference(obj_desc); + return_ACPI_STATUS(status); +} + +/******************************************************************************* + * + * FUNCTION: acpi_ut_execute_UID + * + * PARAMETERS: device_node - Node for the device + * return_id - Where the string UID is returned + * + * RETURN: Status + * + * DESCRIPTION: Executes the _UID control method that returns the unique + * ID of the device. The UID is either a 64-bit Integer (NOT an + * EISAID) or a string. Always returns a string. A 64-bit integer + * is converted to a decimal string. + * + * NOTE: Internal function, no parameter validation + * + ******************************************************************************/ + +acpi_status +acpi_ut_execute_UID(struct acpi_namespace_node *device_node, + struct acpica_device_id **return_id) +{ + union acpi_operand_object *obj_desc; + struct acpica_device_id *uid; + u32 length; + acpi_status status; + + ACPI_FUNCTION_TRACE(ut_execute_UID); + + status = acpi_ut_evaluate_object(device_node, METHOD_NAME__UID, + ACPI_BTYPE_INTEGER | ACPI_BTYPE_STRING, + &obj_desc); + if (ACPI_FAILURE(status)) { + return_ACPI_STATUS(status); + } + + /* Get the size of the String to be returned, includes null terminator */ + + if (obj_desc->common.type == ACPI_TYPE_INTEGER) { + length = ACPI_MAX64_DECIMAL_DIGITS + 1; + } else { + length = obj_desc->string.length + 1; + } + + /* Allocate a buffer for the UID */ + + uid = + ACPI_ALLOCATE_ZEROED(sizeof(struct acpica_device_id) + + (acpi_size) length); + if (!uid) { + status = AE_NO_MEMORY; + goto cleanup; + } + + /* Area for the string starts after DEVICE_ID struct */ + + uid->string = ACPI_ADD_PTR(char, uid, sizeof(struct acpica_device_id)); + + /* Convert an Integer to string, or just copy an existing string */ + + if (obj_desc->common.type == ACPI_TYPE_INTEGER) { + acpi_ex_integer_to_string(uid->string, obj_desc->integer.value); + } else { + acpi_ut_copy_id_string(uid->string, obj_desc->string.pointer); + } + + uid->length = length; + *return_id = uid; + +cleanup: + + /* On exit, we must delete the return object */ + + acpi_ut_remove_reference(obj_desc); + return_ACPI_STATUS(status); +} + +/******************************************************************************* + * + * FUNCTION: acpi_ut_execute_CID + * + * PARAMETERS: device_node - Node for the device + * return_cid_list - Where the CID list is returned + * + * RETURN: Status, list of CID strings + * + * DESCRIPTION: Executes the _CID control method that returns one or more + * compatible hardware IDs for the device. + * + * NOTE: Internal function, no parameter validation + * + * A _CID method can return either a single compatible ID or a package of + * compatible IDs. Each compatible ID can be one of the following: + * 1) Integer (32 bit compressed EISA ID) or + * 2) String (PCI ID format, e.g. "PCI\VEN_vvvv&DEV_dddd&SUBSYS_ssssssss") + * + * The Integer CIDs are converted to string format by this function. + * + ******************************************************************************/ + +acpi_status +acpi_ut_execute_CID(struct acpi_namespace_node *device_node, + struct acpica_device_id_list **return_cid_list) +{ + union acpi_operand_object **cid_objects; + union acpi_operand_object *obj_desc; + struct acpica_device_id_list *cid_list; + char *next_id_string; + u32 string_area_size; + u32 length; + u32 cid_list_size; + acpi_status status; + u32 count; + u32 i; + + ACPI_FUNCTION_TRACE(ut_execute_CID); + + /* Evaluate the _CID method for this device */ + + status = acpi_ut_evaluate_object(device_node, METHOD_NAME__CID, + ACPI_BTYPE_INTEGER | ACPI_BTYPE_STRING + | ACPI_BTYPE_PACKAGE, &obj_desc); + if (ACPI_FAILURE(status)) { + return_ACPI_STATUS(status); + } + + /* + * Get the count and size of the returned _CIDs. _CID can return either + * a Package of Integers/Strings or a single Integer or String. + * Note: This section also validates that all CID elements are of the + * correct type (Integer or String). + */ + if (obj_desc->common.type == ACPI_TYPE_PACKAGE) { + count = obj_desc->package.count; + cid_objects = obj_desc->package.elements; + } else { /* Single Integer or String CID */ + + count = 1; + cid_objects = &obj_desc; + } + + string_area_size = 0; + for (i = 0; i < count; i++) { + + /* String lengths include null terminator */ + + switch (cid_objects[i]->common.type) { + case ACPI_TYPE_INTEGER: + string_area_size += ACPI_EISAID_STRING_SIZE; + break; + + case ACPI_TYPE_STRING: + string_area_size += cid_objects[i]->string.length + 1; + break; + + default: + status = AE_TYPE; + goto cleanup; + } + } + + /* + * Now that we know the length of the CIDs, allocate return buffer: + * 1) Size of the base structure + + * 2) Size of the CID DEVICE_ID array + + * 3) Size of the actual CID strings + */ + cid_list_size = sizeof(struct acpica_device_id_list) + + ((count - 1) * sizeof(struct acpica_device_id)) + string_area_size; + + cid_list = ACPI_ALLOCATE_ZEROED(cid_list_size); + if (!cid_list) { + status = AE_NO_MEMORY; + goto cleanup; + } + + /* Area for CID strings starts after the CID DEVICE_ID array */ + + next_id_string = ACPI_CAST_PTR(char, cid_list->ids) + + ((acpi_size) count * sizeof(struct acpica_device_id)); + + /* Copy/convert the CIDs to the return buffer */ + + for (i = 0; i < count; i++) { + if (cid_objects[i]->common.type == ACPI_TYPE_INTEGER) { + + /* Convert the Integer (EISAID) CID to a string */ + + acpi_ex_eisa_id_to_string(next_id_string, + cid_objects[i]->integer. + value); + length = ACPI_EISAID_STRING_SIZE; + } else { /* ACPI_TYPE_STRING */ + + /* Copy the String CID from the returned object */ + + acpi_ut_copy_id_string(next_id_string, + cid_objects[i]->string.pointer); + length = cid_objects[i]->string.length + 1; + } + + cid_list->ids[i].string = next_id_string; + cid_list->ids[i].length = length; + next_id_string += length; + } + + /* Finish the CID list */ + + cid_list->count = count; + cid_list->list_size = cid_list_size; + *return_cid_list = cid_list; + +cleanup: + + /* On exit, we must delete the _CID return object */ + + acpi_ut_remove_reference(obj_desc); + return_ACPI_STATUS(status); +} diff --git a/drivers/acpi/acpica/utmisc.c b/drivers/acpi/acpica/utmisc.c index fbe782348b0..9cd65334ca7 100644 --- a/drivers/acpi/acpica/utmisc.c +++ b/drivers/acpi/acpica/utmisc.c @@ -118,6 +118,34 @@ const char *acpi_ut_validate_exception(acpi_status status) return (ACPI_CAST_PTR(const char, exception)); } +/******************************************************************************* + * + * FUNCTION: acpi_ut_is_pci_root_bridge + * + * PARAMETERS: Id - The HID/CID in string format + * + * RETURN: TRUE if the Id is a match for a PCI/PCI-Express Root Bridge + * + * DESCRIPTION: Determine if the input ID is a PCI Root Bridge ID. + * + ******************************************************************************/ + +u8 acpi_ut_is_pci_root_bridge(char *id) +{ + + /* + * Check if this is a PCI root bridge. + * ACPI 3.0+: check for a PCI Express root also. + */ + if (!(ACPI_STRCMP(id, + PCI_ROOT_HID_STRING)) || + !(ACPI_STRCMP(id, PCI_EXPRESS_ROOT_HID_STRING))) { + return (TRUE); + } + + return (FALSE); +} + /******************************************************************************* * * FUNCTION: acpi_ut_is_aml_table diff --git a/drivers/acpi/container.c b/drivers/acpi/container.c index fe0cdf83641..2aee8c24dc5 100644 --- a/drivers/acpi/container.c +++ b/drivers/acpi/container.c @@ -200,20 +200,17 @@ container_walk_namespace_cb(acpi_handle handle, u32 lvl, void *context, void **rv) { char *hid = NULL; - struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; struct acpi_device_info *info; acpi_status status; int *action = context; - - status = acpi_get_object_info(handle, &buffer); - if (ACPI_FAILURE(status) || !buffer.pointer) { + status = acpi_get_object_info(handle, &info); + if (ACPI_FAILURE(status)) { return AE_OK; } - info = buffer.pointer; if (info->valid & ACPI_VALID_HID) - hid = info->hardware_id.value; + hid = info->hardware_id.string; if (hid == NULL) { goto end; @@ -240,7 +237,7 @@ container_walk_namespace_cb(acpi_handle handle, } end: - kfree(buffer.pointer); + kfree(info); return AE_OK; } diff --git a/drivers/acpi/dock.c b/drivers/acpi/dock.c index efb959d6c8a..39536b80bce 100644 --- a/drivers/acpi/dock.c +++ b/drivers/acpi/dock.c @@ -231,18 +231,16 @@ static int is_ata(acpi_handle handle) static int is_battery(acpi_handle handle) { struct acpi_device_info *info; - struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER, NULL}; int ret = 1; - if (!ACPI_SUCCESS(acpi_get_object_info(handle, &buffer))) + if (!ACPI_SUCCESS(acpi_get_object_info(handle, &info))) return 0; - info = buffer.pointer; if (!(info->valid & ACPI_VALID_HID)) ret = 0; else - ret = !strcmp("PNP0C0A", info->hardware_id.value); + ret = !strcmp("PNP0C0A", info->hardware_id.string); - kfree(buffer.pointer); + kfree(info); return ret; } diff --git a/drivers/acpi/glue.c b/drivers/acpi/glue.c index a8a5c29958c..27a7072347e 100644 --- a/drivers/acpi/glue.c +++ b/drivers/acpi/glue.c @@ -93,15 +93,13 @@ do_acpi_find_child(acpi_handle handle, u32 lvl, void *context, void **rv) { acpi_status status; struct acpi_device_info *info; - struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; struct acpi_find_child *find = context; - status = acpi_get_object_info(handle, &buffer); + status = acpi_get_object_info(handle, &info); if (ACPI_SUCCESS(status)) { - info = buffer.pointer; if (info->address == find->address) find->handle = handle; - kfree(buffer.pointer); + kfree(info); } return AE_OK; } diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c index 781435d7e36..0ab526de7c5 100644 --- a/drivers/acpi/scan.c +++ b/drivers/acpi/scan.c @@ -60,13 +60,13 @@ static int create_modalias(struct acpi_device *acpi_dev, char *modalias, } if (acpi_dev->flags.compatible_ids) { - struct acpi_compatible_id_list *cid_list; + struct acpica_device_id_list *cid_list; int i; cid_list = acpi_dev->pnp.cid_list; for (i = 0; i < cid_list->count; i++) { count = snprintf(&modalias[len], size, "%s:", - cid_list->id[i].value); + cid_list->ids[i].string); if (count < 0 || count >= size) { printk(KERN_ERR PREFIX "%s cid[%i] exceeds event buffer size", acpi_dev->pnp.device_name, i); @@ -287,14 +287,14 @@ int acpi_match_device_ids(struct acpi_device *device, } if (device->flags.compatible_ids) { - struct acpi_compatible_id_list *cid_list = device->pnp.cid_list; + struct acpica_device_id_list *cid_list = device->pnp.cid_list; int i; for (id = ids; id->id[0]; id++) { /* compare multiple _CID entries against driver ids */ for (i = 0; i < cid_list->count; i++) { if (!strcmp((char*)id->id, - cid_list->id[i].value)) + cid_list->ids[i].string)) return 0; } } @@ -999,33 +999,89 @@ static int acpi_dock_match(struct acpi_device *device) return acpi_get_handle(device->handle, "_DCK", &tmp); } +static struct acpica_device_id_list* +acpi_add_cid( + struct acpi_device_info *info, + struct acpica_device_id *new_cid) +{ + struct acpica_device_id_list *cid; + char *next_id_string; + acpi_size cid_length; + acpi_size new_cid_length; + u32 i; + + + /* Allocate new CID list with room for the new CID */ + + if (!new_cid) + new_cid_length = info->compatible_id_list.list_size; + else if (info->compatible_id_list.list_size) + new_cid_length = info->compatible_id_list.list_size + + new_cid->length + sizeof(struct acpica_device_id); + else + new_cid_length = sizeof(struct acpica_device_id_list) + new_cid->length; + + cid = ACPI_ALLOCATE_ZEROED(new_cid_length); + if (!cid) { + return NULL; + } + + cid->list_size = new_cid_length; + cid->count = info->compatible_id_list.count; + if (new_cid) + cid->count++; + next_id_string = (char *) cid->ids + (cid->count * sizeof(struct acpica_device_id)); + + /* Copy all existing CIDs */ + + for (i = 0; i < info->compatible_id_list.count; i++) { + cid_length = info->compatible_id_list.ids[i].length; + cid->ids[i].string = next_id_string; + cid->ids[i].length = cid_length; + + ACPI_MEMCPY(next_id_string, info->compatible_id_list.ids[i].string, + cid_length); + + next_id_string += cid_length; + } + + /* Append the new CID */ + + if (new_cid) { + cid->ids[i].string = next_id_string; + cid->ids[i].length = new_cid->length; + + ACPI_MEMCPY(next_id_string, new_cid->string, new_cid->length); + } + + return cid; +} + static void acpi_device_set_id(struct acpi_device *device, struct acpi_device *parent, acpi_handle handle, int type) { - struct acpi_device_info *info; - struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; + struct acpi_device_info *info = NULL; char *hid = NULL; char *uid = NULL; - struct acpi_compatible_id_list *cid_list = NULL; - const char *cid_add = NULL; + struct acpica_device_id_list *cid_list = NULL; + char *cid_add = NULL; acpi_status status; switch (type) { case ACPI_BUS_TYPE_DEVICE: - status = acpi_get_object_info(handle, &buffer); + status = acpi_get_object_info(handle, &info); if (ACPI_FAILURE(status)) { printk(KERN_ERR PREFIX "%s: Error reading device info\n", __func__); return; } - info = buffer.pointer; if (info->valid & ACPI_VALID_HID) - hid = info->hardware_id.value; + hid = info->hardware_id.string; if (info->valid & ACPI_VALID_UID) - uid = info->unique_id.value; + uid = info->unique_id.string; if (info->valid & ACPI_VALID_CID) - cid_list = &info->compatibility_id; + cid_list = &info->compatible_id_list; if (info->valid & ACPI_VALID_ADR) { device->pnp.bus_address = info->address; device->flags.bus_address = 1; @@ -1076,55 +1132,44 @@ static void acpi_device_set_id(struct acpi_device *device, } if (hid) { - strcpy(device->pnp.hardware_id, hid); - device->flags.hardware_id = 1; - } + device->pnp.hardware_id = ACPI_ALLOCATE_ZEROED(strlen (hid) + 1); + if (device->pnp.hardware_id) { + strcpy(device->pnp.hardware_id, hid); + device->flags.hardware_id = 1; + } + } else + device->pnp.hardware_id = NULL; + if (uid) { - strcpy(device->pnp.unique_id, uid); - device->flags.unique_id = 1; - } + device->pnp.unique_id = ACPI_ALLOCATE_ZEROED(strlen (uid) + 1); + if (device->pnp.unique_id) { + strcpy(device->pnp.unique_id, uid); + device->flags.unique_id = 1; + } + } else + device->pnp.unique_id = NULL; + if (cid_list || cid_add) { - struct acpi_compatible_id_list *list; - int size = 0; - int count = 0; - - if (cid_list) { - size = cid_list->size; - } else if (cid_add) { - size = sizeof(struct acpi_compatible_id_list); - cid_list = ACPI_ALLOCATE_ZEROED((acpi_size) size); - if (!cid_list) { - printk(KERN_ERR "Memory allocation error\n"); - kfree(buffer.pointer); - return; - } else { - cid_list->count = 0; - cid_list->size = size; - } + struct acpica_device_id_list *list; + + if (cid_add) { + struct acpica_device_id cid; + cid.length = strlen (cid_add) + 1; + cid.string = cid_add; + + list = acpi_add_cid(info, &cid); + } else { + list = acpi_add_cid(info, NULL); } - if (cid_add) - size += sizeof(struct acpi_compatible_id); - list = kmalloc(size, GFP_KERNEL); if (list) { - if (cid_list) { - memcpy(list, cid_list, cid_list->size); - count = cid_list->count; - } - if (cid_add) { - strncpy(list->id[count].value, cid_add, - ACPI_MAX_CID_LENGTH); - count++; - device->flags.compatible_ids = 1; - } - list->size = size; - list->count = count; device->pnp.cid_list = list; - } else - printk(KERN_ERR PREFIX "Memory allocation error\n"); + if (cid_add) + device->flags.compatible_ids = 1; + } } - kfree(buffer.pointer); + kfree(info); } static int acpi_device_set_context(struct acpi_device *device, int type) diff --git a/drivers/char/agp/hp-agp.c b/drivers/char/agp/hp-agp.c index 8f3d4c18491..7bead4c816c 100644 --- a/drivers/char/agp/hp-agp.c +++ b/drivers/char/agp/hp-agp.c @@ -478,7 +478,6 @@ zx1_gart_probe (acpi_handle obj, u32 depth, void *context, void **ret) { acpi_handle handle, parent; acpi_status status; - struct acpi_buffer buffer; struct acpi_device_info *info; u64 lba_hpa, sba_hpa, length; int match; @@ -490,13 +489,11 @@ zx1_gart_probe (acpi_handle obj, u32 depth, void *context, void **ret) /* Look for an enclosing IOC scope and find its CSR space */ handle = obj; do { - buffer.length = ACPI_ALLOCATE_LOCAL_BUFFER; - status = acpi_get_object_info(handle, &buffer); + status = acpi_get_object_info(handle, &info); if (ACPI_SUCCESS(status)) { /* TBD check _CID also */ - info = buffer.pointer; - info->hardware_id.value[sizeof(info->hardware_id)-1] = '\0'; - match = (strcmp(info->hardware_id.value, "HWP0001") == 0); + info->hardware_id.string[sizeof(info->hardware_id.length)-1] = '\0'; + match = (strcmp(info->hardware_id.string, "HWP0001") == 0); kfree(info); if (match) { status = hp_acpi_csr_space(handle, &sba_hpa, &length); diff --git a/drivers/ide/ide-acpi.c b/drivers/ide/ide-acpi.c index c509c991646..c0cf45a11b9 100644 --- a/drivers/ide/ide-acpi.c +++ b/drivers/ide/ide-acpi.c @@ -114,8 +114,6 @@ static int ide_get_dev_handle(struct device *dev, acpi_handle *handle, unsigned int bus, devnum, func; acpi_integer addr; acpi_handle dev_handle; - struct acpi_buffer buffer = {.length = ACPI_ALLOCATE_BUFFER, - .pointer = NULL}; acpi_status status; struct acpi_device_info *dinfo = NULL; int ret = -ENODEV; @@ -134,12 +132,11 @@ static int ide_get_dev_handle(struct device *dev, acpi_handle *handle, goto err; } - status = acpi_get_object_info(dev_handle, &buffer); + status = acpi_get_object_info(dev_handle, &dinfo); if (ACPI_FAILURE(status)) { DEBPRINT("get_object_info for device failed\n"); goto err; } - dinfo = buffer.pointer; if (dinfo && (dinfo->valid & ACPI_VALID_ADR) && dinfo->address == addr) { *pcidevfn = addr; diff --git a/drivers/pci/hotplug/acpiphp_ibm.c b/drivers/pci/hotplug/acpiphp_ibm.c index 5befa7e379b..a9d926b7d80 100644 --- a/drivers/pci/hotplug/acpiphp_ibm.c +++ b/drivers/pci/hotplug/acpiphp_ibm.c @@ -398,23 +398,21 @@ static acpi_status __init ibm_find_acpi_device(acpi_handle handle, acpi_handle *phandle = (acpi_handle *)context; acpi_status status; struct acpi_device_info *info; - struct acpi_buffer info_buffer = { ACPI_ALLOCATE_BUFFER, NULL }; int retval = 0; - status = acpi_get_object_info(handle, &info_buffer); + status = acpi_get_object_info(handle, &info); if (ACPI_FAILURE(status)) { err("%s: Failed to get device information status=0x%x\n", __func__, status); return retval; } - info = info_buffer.pointer; - info->hardware_id.value[sizeof(info->hardware_id.value) - 1] = '\0'; + info->hardware_id.string[sizeof(info->hardware_id.length) - 1] = '\0'; if (info->current_status && (info->valid & ACPI_VALID_HID) && - (!strcmp(info->hardware_id.value, IBM_HARDWARE_ID1) || - !strcmp(info->hardware_id.value, IBM_HARDWARE_ID2))) { + (!strcmp(info->hardware_id.string, IBM_HARDWARE_ID1) || + !strcmp(info->hardware_id.string, IBM_HARDWARE_ID2))) { dbg("found hardware: %s, handle: %p\n", - info->hardware_id.value, handle); + info->hardware_id.string, handle); *phandle = handle; /* returning non-zero causes the search to stop * and returns this value to the caller of diff --git a/drivers/platform/x86/sony-laptop.c b/drivers/platform/x86/sony-laptop.c index dafaa4a92df..f9f68e0e734 100644 --- a/drivers/platform/x86/sony-laptop.c +++ b/drivers/platform/x86/sony-laptop.c @@ -976,15 +976,12 @@ static acpi_status sony_walk_callback(acpi_handle handle, u32 level, void *context, void **return_value) { struct acpi_device_info *info; - struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER, NULL}; - - if (ACPI_SUCCESS(acpi_get_object_info(handle, &buffer))) { - info = buffer.pointer; + if (ACPI_SUCCESS(acpi_get_object_info(handle, &info))) { printk(KERN_WARNING DRV_PFX "method: name: %4.4s, args %X\n", (char *)&info->name, info->param_count); - kfree(buffer.pointer); + kfree(info); } return AE_OK; diff --git a/drivers/pnp/pnpacpi/core.c b/drivers/pnp/pnpacpi/core.c index 9496494f340..c07fdb94d66 100644 --- a/drivers/pnp/pnpacpi/core.c +++ b/drivers/pnp/pnpacpi/core.c @@ -194,13 +194,13 @@ static int __init pnpacpi_add_device(struct acpi_device *device) pnpacpi_parse_resource_option_data(dev); if (device->flags.compatible_ids) { - struct acpi_compatible_id_list *cid_list = device->pnp.cid_list; + struct acpica_device_id_list *cid_list = device->pnp.cid_list; int i; for (i = 0; i < cid_list->count; i++) { - if (!ispnpidacpi(cid_list->id[i].value)) + if (!ispnpidacpi(cid_list->ids[i].string)) continue; - pnp_add_id(dev, cid_list->id[i].value); + pnp_add_id(dev, cid_list->ids[i].string); } } diff --git a/include/acpi/acpi_bus.h b/include/acpi/acpi_bus.h index c65e4ce6c3a..b91420b52c6 100644 --- a/include/acpi/acpi_bus.h +++ b/include/acpi/acpi_bus.h @@ -173,17 +173,15 @@ struct acpi_device_dir { typedef char acpi_bus_id[8]; typedef unsigned long acpi_bus_address; -typedef char acpi_hardware_id[15]; -typedef char acpi_unique_id[9]; typedef char acpi_device_name[40]; typedef char acpi_device_class[20]; struct acpi_device_pnp { acpi_bus_id bus_id; /* Object name */ acpi_bus_address bus_address; /* _ADR */ - acpi_hardware_id hardware_id; /* _HID */ - struct acpi_compatible_id_list *cid_list; /* _CIDs */ - acpi_unique_id unique_id; /* _UID */ + char *hardware_id; /* _HID */ + struct acpica_device_id_list *cid_list; /* _CIDs */ + char *unique_id; /* _UID */ acpi_device_name device_name; /* Driver-determined */ acpi_device_class device_class; /* " */ }; diff --git a/include/acpi/acpixf.h b/include/acpi/acpixf.h index b450a195319..04904c7f1aa 100644 --- a/include/acpi/acpixf.h +++ b/include/acpi/acpixf.h @@ -200,7 +200,8 @@ acpi_evaluate_object_typed(acpi_handle object, acpi_object_type return_type); acpi_status -acpi_get_object_info(acpi_handle handle, struct acpi_buffer *return_buffer); +acpi_get_object_info(acpi_handle handle, + struct acpi_device_info **return_buffer); acpi_status acpi_install_method(u8 *buffer); diff --git a/include/acpi/actypes.h b/include/acpi/actypes.h index 37ba576d06e..7a4ff79e238 100644 --- a/include/acpi/actypes.h +++ b/include/acpi/actypes.h @@ -338,7 +338,7 @@ typedef u32 acpi_physical_address; /* PM Timer ticks per second (HZ) */ -#define PM_TIMER_FREQUENCY 3579545 +#define PM_TIMER_FREQUENCY 3579545 /******************************************************************************* * @@ -969,38 +969,60 @@ acpi_status(*acpi_walk_callback) (acpi_handle obj_handle, #define ACPI_INTERRUPT_NOT_HANDLED 0x00 #define ACPI_INTERRUPT_HANDLED 0x01 -/* Length of _HID, _UID, _CID, and UUID values */ +/* Length of 32-bit EISAID values when converted back to a string */ + +#define ACPI_EISAID_STRING_SIZE 8 /* Includes null terminator */ + +/* Length of UUID (string) values */ -#define ACPI_DEVICE_ID_LENGTH 0x09 -#define ACPI_MAX_CID_LENGTH 48 #define ACPI_UUID_LENGTH 16 -/* Common string version of device HIDs and UIDs */ +/* Structures used for device/processor HID, UID, CID */ struct acpica_device_id { - char value[ACPI_DEVICE_ID_LENGTH]; + u32 length; /* Length of string + null */ + char *string; }; -/* Common string version of device CIDs */ - -struct acpi_compatible_id { - char value[ACPI_MAX_CID_LENGTH]; +struct acpica_device_id_list { + u32 count; /* Number of IDs in Ids array */ + u32 list_size; /* Size of list, including ID strings */ + struct acpica_device_id ids[1]; /* ID array */ }; -struct acpi_compatible_id_list { - u32 count; - u32 size; - struct acpi_compatible_id id[1]; +/* + * Structure returned from acpi_get_object_info. + * Optimized for both 32- and 64-bit builds + */ +struct acpi_device_info { + u32 info_size; /* Size of info, including ID strings */ + u32 name; /* ACPI object Name */ + acpi_object_type type; /* ACPI object Type */ + u8 param_count; /* If a method, required parameter count */ + u8 valid; /* Indicates which optional fields are valid */ + u8 flags; /* Miscellaneous info */ + u8 highest_dstates[4]; /* _sx_d values: 0xFF indicates not valid */ + u8 lowest_dstates[5]; /* _sx_w values: 0xFF indicates not valid */ + u32 current_status; /* _STA value */ + acpi_integer address; /* _ADR value */ + struct acpica_device_id hardware_id; /* _HID value */ + struct acpica_device_id unique_id; /* _UID value */ + struct acpica_device_id_list compatible_id_list; /* _CID list */ }; -/* Structure and flags for acpi_get_object_info */ +/* Values for Flags field above (acpi_get_object_info) */ + +#define ACPI_PCI_ROOT_BRIDGE 0x01 -#define ACPI_VALID_STA 0x0001 -#define ACPI_VALID_ADR 0x0002 -#define ACPI_VALID_HID 0x0004 -#define ACPI_VALID_UID 0x0008 -#define ACPI_VALID_CID 0x0010 -#define ACPI_VALID_SXDS 0x0020 +/* Flags for Valid field above (acpi_get_object_info) */ + +#define ACPI_VALID_STA 0x01 +#define ACPI_VALID_ADR 0x02 +#define ACPI_VALID_HID 0x04 +#define ACPI_VALID_UID 0x08 +#define ACPI_VALID_CID 0x10 +#define ACPI_VALID_SXDS 0x20 +#define ACPI_VALID_SXWS 0x40 /* Flags for _STA method */ @@ -1011,29 +1033,6 @@ struct acpi_compatible_id_list { #define ACPI_STA_DEVICE_OK 0x08 /* Synonym */ #define ACPI_STA_BATTERY_PRESENT 0x10 -#define ACPI_COMMON_OBJ_INFO \ - acpi_object_type type; /* ACPI object type */ \ - acpi_name name /* ACPI object Name */ - -struct acpi_obj_info_header { - ACPI_COMMON_OBJ_INFO; -}; - -/* Structure returned from Get Object Info */ - -struct acpi_device_info { - ACPI_COMMON_OBJ_INFO; - - u32 param_count; /* If a method, required parameter count */ - u32 valid; /* Indicates which fields below are valid */ - u32 current_status; /* _STA value */ - acpi_integer address; /* _ADR value if any */ - struct acpica_device_id hardware_id; /* _HID value if any */ - struct acpica_device_id unique_id; /* _UID value if any */ - u8 highest_dstates[4]; /* _sx_d values: 0xFF indicates not valid */ - struct acpi_compatible_id_list compatibility_id; /* List of _CIDs if any */ -}; - /* Context structs for address space handlers */ struct acpi_pci_id { -- cgit v1.2.3-70-g09d2 From 6557a49a443a347d24aed58076365432ded30edc Mon Sep 17 00:00:00 2001 From: Lin Ming Date: Wed, 24 Jun 2009 11:32:04 +0800 Subject: ACPICA: ACPI 4.0: Interpreter support for IPMI. Adds support for IPMI which is similar to SMBus and uses a bi-directional data buffer. ACPICA BZ 773. http://acpica.org/bugzilla/show_bug.cgi?id=773 Signed-off-by: Lin Ming Signed-off-by: Bob Moore Signed-off-by: Len Brown --- drivers/acpi/acpica/acconfig.h | 3 +- drivers/acpi/acpica/exfield.c | 82 ++++++++++++++++++++++++++++-------------- drivers/acpi/acpica/exfldio.c | 7 ++-- include/acpi/actypes.h | 3 +- 4 files changed, 63 insertions(+), 32 deletions(-) (limited to 'include') diff --git a/drivers/acpi/acpica/acconfig.h b/drivers/acpi/acpica/acconfig.h index 9123d5a1162..8e679ef5b23 100644 --- a/drivers/acpi/acpica/acconfig.h +++ b/drivers/acpi/acpica/acconfig.h @@ -199,9 +199,10 @@ #define ACPI_RSDP_CHECKSUM_LENGTH 20 #define ACPI_RSDP_XCHECKSUM_LENGTH 36 -/* SMBus bidirectional buffer size */ +/* SMBus and IPMI bidirectional buffer size */ #define ACPI_SMBUS_BUFFER_SIZE 34 +#define ACPI_IPMI_BUFFER_SIZE 66 /* _sx_d and _sx_w control methods */ diff --git a/drivers/acpi/acpica/exfield.c b/drivers/acpi/acpica/exfield.c index 546dcdd8678..0b33d6c887b 100644 --- a/drivers/acpi/acpica/exfield.c +++ b/drivers/acpi/acpica/exfield.c @@ -72,6 +72,7 @@ acpi_ex_read_data_from_field(struct acpi_walk_state *walk_state, union acpi_operand_object *buffer_desc; acpi_size length; void *buffer; + u32 function; ACPI_FUNCTION_TRACE_PTR(ex_read_data_from_field, obj_desc); @@ -97,13 +98,27 @@ acpi_ex_read_data_from_field(struct acpi_walk_state *walk_state, } } else if ((obj_desc->common.type == ACPI_TYPE_LOCAL_REGION_FIELD) && (obj_desc->field.region_obj->region.space_id == - ACPI_ADR_SPACE_SMBUS)) { + ACPI_ADR_SPACE_SMBUS + || obj_desc->field.region_obj->region.space_id == + ACPI_ADR_SPACE_IPMI)) { /* - * This is an SMBus read. We must create a buffer to hold the data - * and directly access the region handler. + * This is an SMBus or IPMI read. We must create a buffer to hold + * the data and then directly access the region handler. + * + * Note: Smbus protocol value is passed in upper 16-bits of Function */ - buffer_desc = - acpi_ut_create_buffer_object(ACPI_SMBUS_BUFFER_SIZE); + if (obj_desc->field.region_obj->region.space_id == + ACPI_ADR_SPACE_SMBUS) { + length = ACPI_SMBUS_BUFFER_SIZE; + function = + ACPI_READ | (obj_desc->field.attribute << 16); + } else { /* IPMI */ + + length = ACPI_IPMI_BUFFER_SIZE; + function = ACPI_READ; + } + + buffer_desc = acpi_ut_create_buffer_object(length); if (!buffer_desc) { return_ACPI_STATUS(AE_NO_MEMORY); } @@ -112,16 +127,13 @@ acpi_ex_read_data_from_field(struct acpi_walk_state *walk_state, acpi_ex_acquire_global_lock(obj_desc->common_field.field_flags); - /* - * Perform the read. - * Note: Smbus protocol value is passed in upper 16-bits of Function - */ + /* Call the region handler for the read */ + status = acpi_ex_access_region(obj_desc, 0, ACPI_CAST_PTR(acpi_integer, buffer_desc-> buffer.pointer), - ACPI_READ | (obj_desc->field. - attribute << 16)); + function); acpi_ex_release_global_lock(obj_desc->common_field.field_flags); goto exit; } @@ -212,6 +224,7 @@ acpi_ex_write_data_to_field(union acpi_operand_object *source_desc, u32 length; void *buffer; union acpi_operand_object *buffer_desc; + u32 function; ACPI_FUNCTION_TRACE_PTR(ex_write_data_to_field, obj_desc); @@ -234,39 +247,56 @@ acpi_ex_write_data_to_field(union acpi_operand_object *source_desc, } } else if ((obj_desc->common.type == ACPI_TYPE_LOCAL_REGION_FIELD) && (obj_desc->field.region_obj->region.space_id == - ACPI_ADR_SPACE_SMBUS)) { + ACPI_ADR_SPACE_SMBUS + || obj_desc->field.region_obj->region.space_id == + ACPI_ADR_SPACE_IPMI)) { /* - * This is an SMBus write. We will bypass the entire field mechanism - * and handoff the buffer directly to the handler. + * This is an SMBus or IPMI write. We will bypass the entire field + * mechanism and handoff the buffer directly to the handler. For + * these address spaces, the buffer is bi-directional; on a write, + * return data is returned in the same buffer. + * + * Source must be a buffer of sufficient size: + * ACPI_SMBUS_BUFFER_SIZE or ACPI_IPMI_BUFFER_SIZE. * - * Source must be a buffer of sufficient size (ACPI_SMBUS_BUFFER_SIZE). + * Note: SMBus protocol type is passed in upper 16-bits of Function */ if (source_desc->common.type != ACPI_TYPE_BUFFER) { ACPI_ERROR((AE_INFO, - "SMBus write requires Buffer, found type %s", + "SMBus or IPMI write requires Buffer, found type %s", acpi_ut_get_object_type_name(source_desc))); return_ACPI_STATUS(AE_AML_OPERAND_TYPE); } - if (source_desc->buffer.length < ACPI_SMBUS_BUFFER_SIZE) { + if (obj_desc->field.region_obj->region.space_id == + ACPI_ADR_SPACE_SMBUS) { + length = ACPI_SMBUS_BUFFER_SIZE; + function = + ACPI_WRITE | (obj_desc->field.attribute << 16); + } else { /* IPMI */ + + length = ACPI_IPMI_BUFFER_SIZE; + function = ACPI_WRITE; + } + + if (source_desc->buffer.length < length) { ACPI_ERROR((AE_INFO, - "SMBus write requires Buffer of length %X, found length %X", - ACPI_SMBUS_BUFFER_SIZE, - source_desc->buffer.length)); + "SMBus or IPMI write requires Buffer of length %X, found length %X", + length, source_desc->buffer.length)); return_ACPI_STATUS(AE_AML_BUFFER_LIMIT); } - buffer_desc = - acpi_ut_create_buffer_object(ACPI_SMBUS_BUFFER_SIZE); + /* Create the bi-directional buffer */ + + buffer_desc = acpi_ut_create_buffer_object(length); if (!buffer_desc) { return_ACPI_STATUS(AE_NO_MEMORY); } buffer = buffer_desc->buffer.pointer; - ACPI_MEMCPY(buffer, source_desc->buffer.pointer, - ACPI_SMBUS_BUFFER_SIZE); + ACPI_MEMCPY(buffer, source_desc->buffer.pointer, length); /* Lock entire transaction if requested */ @@ -275,12 +305,10 @@ acpi_ex_write_data_to_field(union acpi_operand_object *source_desc, /* * Perform the write (returns status and perhaps data in the * same buffer) - * Note: SMBus protocol type is passed in upper 16-bits of Function. */ status = acpi_ex_access_region(obj_desc, 0, (acpi_integer *) buffer, - ACPI_WRITE | (obj_desc->field. - attribute << 16)); + function); acpi_ex_release_global_lock(obj_desc->common_field.field_flags); *result_desc = buffer_desc; diff --git a/drivers/acpi/acpica/exfldio.c b/drivers/acpi/acpica/exfldio.c index 6687be167f5..d7b3b418fb4 100644 --- a/drivers/acpi/acpica/exfldio.c +++ b/drivers/acpi/acpica/exfldio.c @@ -120,12 +120,13 @@ acpi_ex_setup_region(union acpi_operand_object *obj_desc, } /* - * Exit now for SMBus address space, it has a non-linear address space + * Exit now for SMBus or IPMI address space, it has a non-linear address space * and the request cannot be directly validated */ - if (rgn_desc->region.space_id == ACPI_ADR_SPACE_SMBUS) { + if (rgn_desc->region.space_id == ACPI_ADR_SPACE_SMBUS || + rgn_desc->region.space_id == ACPI_ADR_SPACE_IPMI) { - /* SMBus has a non-linear address space */ + /* SMBus or IPMI has a non-linear address space */ return_ACPI_STATUS(AE_OK); } diff --git a/include/acpi/actypes.h b/include/acpi/actypes.h index 7a4ff79e238..4371805d2de 100644 --- a/include/acpi/actypes.h +++ b/include/acpi/actypes.h @@ -732,7 +732,8 @@ typedef u8 acpi_adr_space_type; #define ACPI_ADR_SPACE_SMBUS (acpi_adr_space_type) 4 #define ACPI_ADR_SPACE_CMOS (acpi_adr_space_type) 5 #define ACPI_ADR_SPACE_PCI_BAR_TARGET (acpi_adr_space_type) 6 -#define ACPI_ADR_SPACE_DATA_TABLE (acpi_adr_space_type) 7 +#define ACPI_ADR_SPACE_IPMI (acpi_adr_space_type) 7 +#define ACPI_ADR_SPACE_DATA_TABLE (acpi_adr_space_type) 8 #define ACPI_ADR_SPACE_FIXED_HARDWARE (acpi_adr_space_type) 127 /* -- cgit v1.2.3-70-g09d2 From 8e4319c425077c4cc540696a5bb6c4d12f017dcd Mon Sep 17 00:00:00 2001 From: Bob Moore Date: Mon, 29 Jun 2009 13:43:27 +0800 Subject: ACPICA: Fix several acpi_attach_data problems Handler was never invoked. Now invoked if/when host node is deleted. Data object was not automatically deleted when host node was deleted. Interface to handler had an unused parameter, removed it. ACPICA BZ 778. http://acpica.org/bugzilla/show_bug.cgi?id=778 Signed-off-by: Bob Moore Signed-off-by: Lin Ming Signed-off-by: Len Brown --- drivers/acpi/acpica/acnamesp.h | 2 + drivers/acpi/acpica/nsalloc.c | 88 +++++++++++++++++++++++++++++------------- drivers/acpi/acpica/nsload.c | 3 +- drivers/acpi/bus.c | 2 +- drivers/acpi/glue.c | 2 +- drivers/acpi/scan.c | 2 +- include/acpi/acpi_bus.h | 4 +- include/acpi/actypes.h | 2 +- 8 files changed, 70 insertions(+), 35 deletions(-) (limited to 'include') diff --git a/drivers/acpi/acpica/acnamesp.h b/drivers/acpi/acpica/acnamesp.h index 94cdc2b8cb9..a78e02f62d5 100644 --- a/drivers/acpi/acpica/acnamesp.h +++ b/drivers/acpi/acpica/acnamesp.h @@ -144,6 +144,8 @@ struct acpi_namespace_node *acpi_ns_create_node(u32 name); void acpi_ns_delete_node(struct acpi_namespace_node *node); +void acpi_ns_remove_node(struct acpi_namespace_node *node); + void acpi_ns_delete_namespace_subtree(struct acpi_namespace_node *parent_handle); diff --git a/drivers/acpi/acpica/nsalloc.c b/drivers/acpi/acpica/nsalloc.c index efc971ab7d6..8a58a1b85aa 100644 --- a/drivers/acpi/acpica/nsalloc.c +++ b/drivers/acpi/acpica/nsalloc.c @@ -96,17 +96,68 @@ struct acpi_namespace_node *acpi_ns_create_node(u32 name) * * RETURN: None * - * DESCRIPTION: Delete a namespace node + * DESCRIPTION: Delete a namespace node. All node deletions must come through + * here. Detaches any attached objects, including any attached + * data. If a handler is associated with attached data, it is + * invoked before the node is deleted. * ******************************************************************************/ void acpi_ns_delete_node(struct acpi_namespace_node *node) +{ + union acpi_operand_object *obj_desc; + + ACPI_FUNCTION_NAME(ns_delete_node); + + /* Detach an object if there is one */ + + acpi_ns_detach_object(node); + + /* + * Delete an attached data object if present (an object that was created + * and attached via acpi_attach_data). Note: After any normal object is + * detached above, the only possible remaining object is a data object. + */ + obj_desc = node->object; + if (obj_desc && (obj_desc->common.type == ACPI_TYPE_LOCAL_DATA)) { + + /* Invoke the attached data deletion handler if present */ + + if (obj_desc->data.handler) { + obj_desc->data.handler(node, obj_desc->data.pointer); + } + + acpi_ut_remove_reference(obj_desc); + } + + /* Now we can delete the node */ + + (void)acpi_os_release_object(acpi_gbl_namespace_cache, node); + + ACPI_MEM_TRACKING(acpi_gbl_ns_node_list->total_freed++); + ACPI_DEBUG_PRINT((ACPI_DB_ALLOCATIONS, "Node %p, Remaining %X\n", + node, acpi_gbl_current_node_count)); +} + +/******************************************************************************* + * + * FUNCTION: acpi_ns_remove_node + * + * PARAMETERS: Node - Node to be removed/deleted + * + * RETURN: None + * + * DESCRIPTION: Remove (unlink) and delete a namespace node + * + ******************************************************************************/ + +void acpi_ns_remove_node(struct acpi_namespace_node *node) { struct acpi_namespace_node *parent_node; struct acpi_namespace_node *prev_node; struct acpi_namespace_node *next_node; - ACPI_FUNCTION_TRACE_PTR(ns_delete_node, node); + ACPI_FUNCTION_TRACE_PTR(ns_remove_node, node); parent_node = acpi_ns_get_parent_node(node); @@ -142,12 +193,9 @@ void acpi_ns_delete_node(struct acpi_namespace_node *node) } } - ACPI_MEM_TRACKING(acpi_gbl_ns_node_list->total_freed++); - - /* Detach an object if there is one, then delete the node */ + /* Delete the node and any attached objects */ - acpi_ns_detach_object(node); - (void)acpi_os_release_object(acpi_gbl_namespace_cache, node); + acpi_ns_delete_node(node); return_VOID; } @@ -273,25 +321,11 @@ void acpi_ns_delete_children(struct acpi_namespace_node *parent_node) parent_node, child_node)); } - /* Now we can free this child object */ - - ACPI_MEM_TRACKING(acpi_gbl_ns_node_list->total_freed++); - - ACPI_DEBUG_PRINT((ACPI_DB_ALLOCATIONS, - "Object %p, Remaining %X\n", child_node, - acpi_gbl_current_node_count)); - - /* Detach an object if there is one, then free the child node */ - - acpi_ns_detach_object(child_node); - - /* Now we can delete the node */ - - (void)acpi_os_release_object(acpi_gbl_namespace_cache, - child_node); - - /* And move on to the next child in the list */ - + /* + * Delete this child node and move on to the next child in the list. + * No need to unlink the node since we are deleting the entire branch. + */ + acpi_ns_delete_node(child_node); child_node = next_node; } while (!(flags & ANOBJ_END_OF_PEER_LIST)); @@ -433,7 +467,7 @@ void acpi_ns_delete_namespace_by_owner(acpi_owner_id owner_id) if (deletion_node) { acpi_ns_delete_children(deletion_node); - acpi_ns_delete_node(deletion_node); + acpi_ns_remove_node(deletion_node); deletion_node = NULL; } diff --git a/drivers/acpi/acpica/nsload.c b/drivers/acpi/acpica/nsload.c index dcd7a6adbbb..a7234e60e98 100644 --- a/drivers/acpi/acpica/nsload.c +++ b/drivers/acpi/acpica/nsload.c @@ -270,8 +270,7 @@ static acpi_status acpi_ns_delete_subtree(acpi_handle start_handle) /* Now delete the starting object, and we are done */ - acpi_ns_delete_node(child_handle); - + acpi_ns_remove_node(child_handle); return_ACPI_STATUS(AE_OK); } diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c index 2876fc70c3a..620183f13e5 100644 --- a/drivers/acpi/bus.c +++ b/drivers/acpi/bus.c @@ -141,7 +141,7 @@ int acpi_bus_get_status(struct acpi_device *device) EXPORT_SYMBOL(acpi_bus_get_status); void acpi_bus_private_data_handler(acpi_handle handle, - u32 function, void *context) + void *context) { return; } diff --git a/drivers/acpi/glue.c b/drivers/acpi/glue.c index 27a7072347e..9a4ce33f137 100644 --- a/drivers/acpi/glue.c +++ b/drivers/acpi/glue.c @@ -119,7 +119,7 @@ EXPORT_SYMBOL(acpi_get_child); /* Link ACPI devices with physical devices */ static void acpi_glue_data_handler(acpi_handle handle, - u32 function, void *context) + void *context) { /* we provide an empty handler */ } diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c index 0ab526de7c5..9606af13d3b 100644 --- a/drivers/acpi/scan.c +++ b/drivers/acpi/scan.c @@ -687,7 +687,7 @@ acpi_bus_get_ejd(acpi_handle handle, acpi_handle *ejd) } EXPORT_SYMBOL_GPL(acpi_bus_get_ejd); -void acpi_bus_data_handler(acpi_handle handle, u32 function, void *context) +void acpi_bus_data_handler(acpi_handle handle, void *context) { /* TBD */ diff --git a/include/acpi/acpi_bus.h b/include/acpi/acpi_bus.h index b91420b52c6..6e83a68fbd7 100644 --- a/include/acpi/acpi_bus.h +++ b/include/acpi/acpi_bus.h @@ -312,7 +312,7 @@ struct acpi_bus_event { extern struct kobject *acpi_kobj; extern int acpi_bus_generate_netlink_event(const char*, const char*, u8, int); -void acpi_bus_private_data_handler(acpi_handle, u32, void *); +void acpi_bus_private_data_handler(acpi_handle, void *); int acpi_bus_get_private_data(acpi_handle, void **); extern int acpi_notifier_call_chain(struct acpi_device *, u32, u32); extern int register_acpi_notifier(struct notifier_block *); @@ -325,7 +325,7 @@ extern void unregister_acpi_bus_notifier(struct notifier_block *nb); */ int acpi_bus_get_device(acpi_handle handle, struct acpi_device **device); -void acpi_bus_data_handler(acpi_handle handle, u32 function, void *context); +void acpi_bus_data_handler(acpi_handle handle, void *context); int acpi_bus_get_status(struct acpi_device *device); int acpi_bus_get_power(acpi_handle handle, int *state); int acpi_bus_set_power(acpi_handle handle, int state); diff --git a/include/acpi/actypes.h b/include/acpi/actypes.h index 4371805d2de..ef4601149f4 100644 --- a/include/acpi/actypes.h +++ b/include/acpi/actypes.h @@ -922,7 +922,7 @@ typedef void (*acpi_notify_handler) (acpi_handle device, u32 value, void *context); typedef -void (*acpi_object_handler) (acpi_handle object, u32 function, void *data); +void (*acpi_object_handler) (acpi_handle object, void *data); typedef acpi_status(*acpi_init_handler) (acpi_handle object, u32 function); -- cgit v1.2.3-70-g09d2 From eb2289ba1ba994de25af0d94b5e80ba93d2c1c3c Mon Sep 17 00:00:00 2001 From: Bob Moore Date: Wed, 24 Jun 2009 13:42:00 +0800 Subject: ACPICA: ACPI 4.0: Changes for existing ACPI tables. FACS: new flag and new OspmFlags field. SRAT: x2APIC - add ClockDomain field to descriptor #2 Includes header and disassembler support. Signed-off-by: Bob Moore Signed-off-by: Lin Ming Signed-off-by: Len Brown --- include/acpi/actbl.h | 19 +++++++++++++------ include/acpi/actbl1.h | 8 +++++--- 2 files changed, 18 insertions(+), 9 deletions(-) (limited to 'include') diff --git a/include/acpi/actbl.h b/include/acpi/actbl.h index 222733d01f3..0649a567002 100644 --- a/include/acpi/actbl.h +++ b/include/acpi/actbl.h @@ -161,17 +161,24 @@ struct acpi_table_facs { u32 flags; u64 xfirmware_waking_vector; /* 64-bit version of the Firmware Waking Vector (ACPI 2.0+) */ u8 version; /* Version of this table (ACPI 2.0+) */ - u8 reserved[31]; /* Reserved, must be zero */ + u8 reserved[3]; /* Reserved, must be zero */ + u32 ospm_flags; /* Flags to be set by OSPM (ACPI 4.0) */ + u8 reserved1[24]; /* Reserved, must be zero */ }; -/* Flag macros */ +/* global_lock flags */ + +#define ACPI_GLOCK_PENDING (1) /* 00: Pending global lock ownership */ +#define ACPI_GLOCK_OWNED (1<<1) /* 01: Global lock is owned */ + +/* Flags */ -#define ACPI_FACS_S4_BIOS_PRESENT (1) /* 00: S4BIOS support is present */ +#define ACPI_FACS_S4_BIOS_PRESENT (1) /* 00: S4BIOS support is present */ +#define ACPI_FACS_64BIT_WAKE (1<<1) /* 01: 64-bit wake vector supported (ACPI 4.0) */ -/* Global lock flags */ +/* ospm_flags */ -#define ACPI_GLOCK_PENDING 0x01 /* 00: Pending global lock ownership */ -#define ACPI_GLOCK_OWNED 0x02 /* 01: Global lock is owned */ +#define ACPI_FACS_64BIT_ENVIRONMENT (1) /* 00: 64-bit wake environment is required (ACPI 4.0) */ /******************************************************************************* * diff --git a/include/acpi/actbl1.h b/include/acpi/actbl1.h index 59ade075247..ec36693f868 100644 --- a/include/acpi/actbl1.h +++ b/include/acpi/actbl1.h @@ -1011,7 +1011,7 @@ struct acpi_madt_interrupt_source { #define ACPI_MADT_CPEI_OVERRIDE (1) -/* 9: Processor Local X2_APIC (07/2008) */ +/* 9: Processor Local X2APIC (ACPI 4.0) */ struct acpi_madt_local_x2apic { struct acpi_subtable_header header; @@ -1021,7 +1021,7 @@ struct acpi_madt_local_x2apic { u32 uid; /* ACPI processor UID */ }; -/* 10: Local X2APIC NMI (07/2008) */ +/* 10: Local X2APIC NMI (ACPI 4.0) */ struct acpi_madt_local_x2apic_nmi { struct acpi_subtable_header header; @@ -1211,7 +1211,7 @@ struct acpi_srat_mem_affinity { #define ACPI_SRAT_MEM_HOT_PLUGGABLE (1<<1) /* 01: Memory region is hot pluggable */ #define ACPI_SRAT_MEM_NON_VOLATILE (1<<2) /* 02: Memory region is non-volatile */ -/* 2: Processor Local X2_APIC Affinity (07/2008) */ +/* 2: Processor Local X2_APIC Affinity (ACPI 4.0) */ struct acpi_srat_x2apic_cpu_affinity { struct acpi_subtable_header header; @@ -1219,6 +1219,8 @@ struct acpi_srat_x2apic_cpu_affinity { u32 proximity_domain; u32 apic_id; u32 flags; + u32 clock_domain; + u32 reserved2; }; /* Flags for struct acpi_srat_cpu_affinity and struct acpi_srat_x2apic_cpu_affinity */ -- cgit v1.2.3-70-g09d2 From 3ce804ed83827a7fd27190836f9421b29ac64512 Mon Sep 17 00:00:00 2001 From: Bob Moore Date: Thu, 25 Jun 2009 10:31:32 -0700 Subject: ACPICA: Update version to 20090625 Update version number. Signed-off-by: Bob Moore Signed-off-by: Lin Ming Signed-off-by: Len Brown --- include/acpi/acpixf.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include') diff --git a/include/acpi/acpixf.h b/include/acpi/acpixf.h index 04904c7f1aa..063e577e791 100644 --- a/include/acpi/acpixf.h +++ b/include/acpi/acpixf.h @@ -47,7 +47,7 @@ /* Current ACPICA subsystem version in YYYYMMDD format */ -#define ACPI_CA_VERSION 0x20090521 +#define ACPI_CA_VERSION 0x20090625 #include "actypes.h" #include "actbl.h" -- cgit v1.2.3-70-g09d2 From a5fe1a03f7720b8da8364a1737e1e5a357904e99 Mon Sep 17 00:00:00 2001 From: Lin Ming Date: Thu, 13 Aug 2009 10:43:27 +0800 Subject: ACPICA: fix leak of acpi_os_validate_address http://bugzilla.kernel.org/show_bug.cgi?id=13620 If the dynamic region is created and added to resource list over and over again, it has the potential to be a memory leak by growing the list every time. This patch fixes the memory leak, as below 1) add a new field "count" to struct acpi_res_list. When inserting, if the region(addr, len) is already in the resource list, we just increase "count", otherwise, the region is inserted with count=1. When deleting, the "count" is decreased, if it's decreased to 0, the region is deleted from the resource list. With "count", the region with same address and length can only be inserted to the resource list once, so prevent potential memory leak. 2) add a new function acpi_os_invalidate_address, which is called when region is deleted. Signed-off-by: Lin Ming Signed-off-by: Len Brown --- drivers/acpi/acpica/utdelete.c | 6 +++ drivers/acpi/osl.c | 94 ++++++++++++++++++++++++++++++++++++++++-- include/acpi/acpiosxf.h | 3 ++ 3 files changed, 100 insertions(+), 3 deletions(-) (limited to 'include') diff --git a/drivers/acpi/acpica/utdelete.c b/drivers/acpi/acpica/utdelete.c index bc171031508..96e26e70c63 100644 --- a/drivers/acpi/acpica/utdelete.c +++ b/drivers/acpi/acpica/utdelete.c @@ -215,6 +215,12 @@ static void acpi_ut_delete_internal_obj(union acpi_operand_object *object) ACPI_DEBUG_PRINT((ACPI_DB_ALLOCATIONS, "***** Region %p\n", object)); + /* Invalidate the region address/length via the host OS */ + + acpi_os_invalidate_address(object->region.space_id, + object->region.address, + (acpi_size) object->region.length); + second_desc = acpi_ns_get_secondary_object(object); if (second_desc) { /* diff --git a/drivers/acpi/osl.c b/drivers/acpi/osl.c index 5691f165a95..c5b4f1ed9b7 100644 --- a/drivers/acpi/osl.c +++ b/drivers/acpi/osl.c @@ -88,6 +88,7 @@ struct acpi_res_list { char name[5]; /* only can have a length of 4 chars, make use of this one instead of res->name, no need to kalloc then */ struct list_head resource_list; + int count; }; static LIST_HEAD(resource_list_head); @@ -1358,6 +1359,89 @@ acpi_os_validate_interface (char *interface) return AE_SUPPORT; } +static inline int acpi_res_list_add(struct acpi_res_list *res) +{ + struct acpi_res_list *res_list_elem; + + list_for_each_entry(res_list_elem, &resource_list_head, + resource_list) { + + if (res->resource_type == res_list_elem->resource_type && + res->start == res_list_elem->start && + res->end == res_list_elem->end) { + + /* + * The Region(addr,len) already exist in the list, + * just increase the count + */ + + res_list_elem->count++; + return 0; + } + } + + res->count = 1; + list_add(&res->resource_list, &resource_list_head); + return 1; +} + +static inline void acpi_res_list_del(struct acpi_res_list *res) +{ + struct acpi_res_list *res_list_elem; + + list_for_each_entry(res_list_elem, &resource_list_head, + resource_list) { + + if (res->resource_type == res_list_elem->resource_type && + res->start == res_list_elem->start && + res->end == res_list_elem->end) { + + /* + * If the res count is decreased to 0, + * remove and free it + */ + + if (--res_list_elem->count == 0) { + list_del(&res_list_elem->resource_list); + kfree(res_list_elem); + } + return; + } + } +} + +acpi_status +acpi_os_invalidate_address( + u8 space_id, + acpi_physical_address address, + acpi_size length) +{ + struct acpi_res_list res; + + switch (space_id) { + case ACPI_ADR_SPACE_SYSTEM_IO: + case ACPI_ADR_SPACE_SYSTEM_MEMORY: + /* Only interference checks against SystemIO and SytemMemory + are needed */ + res.start = address; + res.end = address + length - 1; + res.resource_type = space_id; + spin_lock(&acpi_res_lock); + acpi_res_list_del(&res); + spin_unlock(&acpi_res_lock); + break; + case ACPI_ADR_SPACE_PCI_CONFIG: + case ACPI_ADR_SPACE_EC: + case ACPI_ADR_SPACE_SMBUS: + case ACPI_ADR_SPACE_CMOS: + case ACPI_ADR_SPACE_PCI_BAR_TARGET: + case ACPI_ADR_SPACE_DATA_TABLE: + case ACPI_ADR_SPACE_FIXED_HARDWARE: + break; + } + return AE_OK; +} + /****************************************************************************** * * FUNCTION: acpi_os_validate_address @@ -1382,6 +1466,7 @@ acpi_os_validate_address ( char *name) { struct acpi_res_list *res; + int added; if (acpi_enforce_resources == ENFORCE_RESOURCES_NO) return AE_OK; @@ -1399,14 +1484,17 @@ acpi_os_validate_address ( res->end = address + length - 1; res->resource_type = space_id; spin_lock(&acpi_res_lock); - list_add(&res->resource_list, &resource_list_head); + added = acpi_res_list_add(res); spin_unlock(&acpi_res_lock); - pr_debug("Added %s resource: start: 0x%llx, end: 0x%llx, " - "name: %s\n", (space_id == ACPI_ADR_SPACE_SYSTEM_IO) + pr_debug("%s %s resource: start: 0x%llx, end: 0x%llx, " + "name: %s\n", added ? "Added" : "Already exist", + (space_id == ACPI_ADR_SPACE_SYSTEM_IO) ? "SystemIO" : "System Memory", (unsigned long long)res->start, (unsigned long long)res->end, res->name); + if (!added) + kfree(res); break; case ACPI_ADR_SPACE_PCI_CONFIG: case ACPI_ADR_SPACE_EC: diff --git a/include/acpi/acpiosxf.h b/include/acpi/acpiosxf.h index ab0b85cf21f..eb0e7189075 100644 --- a/include/acpi/acpiosxf.h +++ b/include/acpi/acpiosxf.h @@ -245,6 +245,9 @@ acpi_status acpi_osi_invalidate(char* interface); acpi_status acpi_os_validate_address(u8 space_id, acpi_physical_address address, acpi_size length, char *name); +acpi_status +acpi_os_invalidate_address(u8 space_id, acpi_physical_address address, + acpi_size length); u64 acpi_os_get_timer(void); -- cgit v1.2.3-70-g09d2 From 468de9e54a900559b55aa939a4daeaea1915e572 Mon Sep 17 00:00:00 2001 From: Andy Adamson Date: Thu, 27 Aug 2009 12:07:40 -0400 Subject: nfsd41: expand solo sequence check Compounds consisting of only a sequence operation don't need any additional caching beyond the sequence information we store in the slot entry. Fix nfsd4_is_solo_sequence to identify this case correctly. The additional check for a failed sequence in nfsd4_store_cache_entry() is redundant, since the nfsd4_is_solo_sequence call lower down catches this case. The final ce_cachethis set in nfsd4_sequence is also redundant. Signed-off-by: Andy Adamson Signed-off-by: J. Bruce Fields --- fs/nfsd/nfs4state.c | 9 --------- include/linux/nfsd/xdr4.h | 2 +- 2 files changed, 1 insertion(+), 10 deletions(-) (limited to 'include') diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c index 5f634d24861..b44a2cfde6f 100644 --- a/fs/nfsd/nfs4state.c +++ b/fs/nfsd/nfs4state.c @@ -991,16 +991,10 @@ nfsd4_store_cache_entry(struct nfsd4_compoundres *resp) { struct nfsd4_cache_entry *entry = &resp->cstate.slot->sl_cache_entry; struct svc_rqst *rqstp = resp->rqstp; - struct nfsd4_compoundargs *args = rqstp->rq_argp; - struct nfsd4_op *op = &args->ops[resp->opcnt]; struct kvec *resv = &rqstp->rq_res.head[0]; dprintk("--> %s entry %p\n", __func__, entry); - /* Don't cache a failed OP_SEQUENCE. */ - if (resp->opcnt == 1 && op->opnum == OP_SEQUENCE && resp->cstate.status) - return; - nfsd4_release_respages(entry->ce_respages, entry->ce_resused); entry->ce_opcnt = resp->opcnt; entry->ce_status = resp->cstate.status; @@ -1490,9 +1484,6 @@ nfsd4_sequence(struct svc_rqst *rqstp, slot->sl_inuse = true; slot->sl_seqid = seq->seqid; slot->sl_cache_entry.ce_cachethis = seq->cachethis; - /* Always set the cache entry cachethis for solo sequence */ - if (nfsd4_is_solo_sequence(resp)) - slot->sl_cache_entry.ce_cachethis = 1; cstate->slot = slot; cstate->session = session; diff --git a/include/linux/nfsd/xdr4.h b/include/linux/nfsd/xdr4.h index 5e4beb0deb8..3f716607c86 100644 --- a/include/linux/nfsd/xdr4.h +++ b/include/linux/nfsd/xdr4.h @@ -467,7 +467,7 @@ struct nfsd4_compoundres { static inline bool nfsd4_is_solo_sequence(struct nfsd4_compoundres *resp) { struct nfsd4_compoundargs *args = resp->rqstp->rq_argp; - return args->opcnt == 1; + return resp->opcnt == 1 && args->ops[0].opnum == OP_SEQUENCE; } static inline bool nfsd4_not_cached(struct nfsd4_compoundres *resp) -- cgit v1.2.3-70-g09d2 From 7285dd7fd375763bfb8ab1ac9cf3f1206f503c16 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Fri, 28 Aug 2009 20:25:24 +0200 Subject: clocksource: Resolve cpu hotplug dead lock with TSC unstable Martin Schwidefsky analyzed it: To register a clocksource the clocksource_mutex is acquired and if necessary timekeeping_notify is called to install the clocksource as the timekeeper clock. timekeeping_notify uses stop_machine which needs to take cpu_add_remove_lock mutex. Starting a new cpu is done with the cpu_add_remove_lock mutex held. native_cpu_up checks the tsc of the new cpu and if the tsc is no good clocksource_change_rating is called. Which needs the clocksource_mutex and the deadlock is complete. The solution is to replace the TSC via the clocksource watchdog mechanism. Mark the TSC as unstable and schedule the watchdog work so it gets removed in the watchdog thread context. Signed-off-by: Thomas Gleixner LKML-Reference: Cc: Martin Schwidefsky Cc: John Stultz --- arch/x86/kernel/tsc.c | 8 +++++--- include/linux/clocksource.h | 1 + kernel/time/clocksource.c | 33 ++++++++++++++++++++++++++++++--- 3 files changed, 36 insertions(+), 6 deletions(-) (limited to 'include') diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c index 968425422c4..fc3672a303d 100644 --- a/arch/x86/kernel/tsc.c +++ b/arch/x86/kernel/tsc.c @@ -767,12 +767,14 @@ void mark_tsc_unstable(char *reason) { if (!tsc_unstable) { tsc_unstable = 1; - printk("Marking TSC unstable due to %s\n", reason); + printk(KERN_INFO "Marking TSC unstable due to %s\n", reason); /* Change only the rating, when not registered */ if (clocksource_tsc.mult) - clocksource_change_rating(&clocksource_tsc, 0); - else + clocksource_mark_unstable(&clocksource_tsc); + else { + clocksource_tsc.flags |= CLOCK_SOURCE_UNSTABLE; clocksource_tsc.rating = 0; + } } } diff --git a/include/linux/clocksource.h b/include/linux/clocksource.h index 9ea40ff26f0..83d2fbd81b9 100644 --- a/include/linux/clocksource.h +++ b/include/linux/clocksource.h @@ -277,6 +277,7 @@ extern struct clocksource* clocksource_get_next(void); extern void clocksource_change_rating(struct clocksource *cs, int rating); extern void clocksource_resume(void); extern struct clocksource * __init __weak clocksource_default_clock(void); +extern void clocksource_mark_unstable(struct clocksource *cs); #ifdef CONFIG_GENERIC_TIME_VSYSCALL extern void update_vsyscall(struct timespec *ts, struct clocksource *c); diff --git a/kernel/time/clocksource.c b/kernel/time/clocksource.c index e0c86ad6e9f..a0af4ffcb6e 100644 --- a/kernel/time/clocksource.c +++ b/kernel/time/clocksource.c @@ -149,15 +149,42 @@ static void clocksource_watchdog_work(struct work_struct *work) kthread_run(clocksource_watchdog_kthread, NULL, "kwatchdog"); } -static void clocksource_unstable(struct clocksource *cs, int64_t delta) +static void __clocksource_unstable(struct clocksource *cs) { - printk(KERN_WARNING "Clocksource %s unstable (delta = %Ld ns)\n", - cs->name, delta); cs->flags &= ~(CLOCK_SOURCE_VALID_FOR_HRES | CLOCK_SOURCE_WATCHDOG); cs->flags |= CLOCK_SOURCE_UNSTABLE; schedule_work(&watchdog_work); } +static void clocksource_unstable(struct clocksource *cs, int64_t delta) +{ + printk(KERN_WARNING "Clocksource %s unstable (delta = %Ld ns)\n", + cs->name, delta); + __clocksource_unstable(cs); +} + +/** + * clocksource_mark_unstable - mark clocksource unstable via watchdog + * @cs: clocksource to be marked unstable + * + * This function is called instead of clocksource_change_rating from + * cpu hotplug code to avoid a deadlock between the clocksource mutex + * and the cpu hotplug mutex. It defers the update of the clocksource + * to the watchdog thread. + */ +void clocksource_mark_unstable(struct clocksource *cs) +{ + unsigned long flags; + + spin_lock_irqsave(&watchdog_lock, flags); + if (!(cs->flags & CLOCK_SOURCE_UNSTABLE)) { + if (list_empty(&cs->wd_list)) + list_add(&cs->wd_list, &watchdog_list); + __clocksource_unstable(cs); + } + spin_unlock_irqrestore(&watchdog_lock, flags); +} + static void clocksource_watchdog(unsigned long data) { struct clocksource *cs; -- cgit v1.2.3-70-g09d2 From b24aad44438d5bc21cbbfb94a99d9bf710d8295b Mon Sep 17 00:00:00 2001 From: Bob Moore Date: Fri, 24 Jul 2009 13:30:17 +0800 Subject: ACPICA: Split large ACPI table header Split out the non-acpi-defined ACPI tables into the existing (but empty) actbl2.h file. Preparation for new ACPI 4.0 tables. Signed-off-by: Bob Moore Signed-off-by: Lin Ming Signed-off-by: Len Brown --- include/acpi/actbl.h | 35 +-- include/acpi/actbl1.h | 553 +---------------------------------------------- include/acpi/actbl2.h | 585 ++++++++++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 611 insertions(+), 562 deletions(-) create mode 100644 include/acpi/actbl2.h (limited to 'include') diff --git a/include/acpi/actbl.h b/include/acpi/actbl.h index 0649a567002..55fcfc6725b 100644 --- a/include/acpi/actbl.h +++ b/include/acpi/actbl.h @@ -44,6 +44,19 @@ #ifndef __ACTBL_H__ #define __ACTBL_H__ +/******************************************************************************* + * + * Fundamental ACPI tables + * + * This file contains definitions for the ACPI tables that are directly consumed + * by ACPICA. All other tables are consumed by the OS-dependent ACPI-related + * device drivers and other OS support code. + * + * The RSDP and FACS do not use the common ACPI table header. All other ACPI + * tables use the header. + * + ******************************************************************************/ + /* * Values for description table header signatures. Useful because they make * it more difficult to inadvertently type in the wrong signature. @@ -65,11 +78,6 @@ #pragma pack(1) /* - * These are the ACPI tables that are directly consumed by the subsystem. - * - * The RSDP and FACS do not use the common ACPI table header. All other ACPI - * tables use the header. - * * Note about bitfields: The u8 type is used for bitfields in ACPI tables. * This is the only type that is even remotely portable. Anything else is not * portable, so do not use any other bitfield types. @@ -77,9 +85,8 @@ /******************************************************************************* * - * ACPI Table Header. This common header is used by all tables except the - * RSDP and FACS. The define is used for direct inclusion of header into - * other ACPI tables + * Master ACPI Table Header. This common header is used by all ACPI tables + * except the RSDP and FACS. * ******************************************************************************/ @@ -95,13 +102,16 @@ struct acpi_table_header { u32 asl_compiler_revision; /* ASL compiler version */ }; -/* +/******************************************************************************* + * * GAS - Generic Address Structure (ACPI 2.0+) * * Note: Since this structure is used in the ACPI tables, it is byte aligned. - * If misalignment is not supported, access to the Address field must be - * performed with care. - */ + * If misaliged access is not supported by the hardware, accesses to the + * 64-bit Address field must be performed with care. + * + ******************************************************************************/ + struct acpi_generic_address { u8 space_id; /* Address space where struct or register exists */ u8 bit_width; /* Size in bits of given register */ @@ -325,5 +335,6 @@ struct acpi_table_desc { */ #include +#include #endif /* __ACTBL_H__ */ diff --git a/include/acpi/actbl1.h b/include/acpi/actbl1.h index ec36693f868..582af1fcb8f 100644 --- a/include/acpi/actbl1.h +++ b/include/acpi/actbl1.h @@ -46,41 +46,29 @@ /******************************************************************************* * - * Additional ACPI Tables + * Additional ACPI Tables (1) * * These tables are not consumed directly by the ACPICA subsystem, but are * included here to support device drivers and the AML disassembler. * + * The tables in this file are fully defined within the ACPI specification. + * ******************************************************************************/ /* * Values for description table header signatures. Useful because they make * it more difficult to inadvertently type in the wrong signature. */ -#define ACPI_SIG_ASF "ASF!" /* Alert Standard Format table */ #define ACPI_SIG_BERT "BERT" /* Boot Error Record Table */ -#define ACPI_SIG_BOOT "BOOT" /* Simple Boot Flag Table */ #define ACPI_SIG_CPEP "CPEP" /* Corrected Platform Error Polling table */ -#define ACPI_SIG_DBGP "DBGP" /* Debug Port table */ -#define ACPI_SIG_DMAR "DMAR" /* DMA Remapping table */ #define ACPI_SIG_ECDT "ECDT" /* Embedded Controller Boot Resources Table */ #define ACPI_SIG_EINJ "EINJ" /* Error Injection table */ #define ACPI_SIG_ERST "ERST" /* Error Record Serialization Table */ #define ACPI_SIG_HEST "HEST" /* Hardware Error Source Table */ -#define ACPI_SIG_HPET "HPET" /* High Precision Event Timer table */ -#define ACPI_SIG_IBFT "IBFT" /* i_sCSI Boot Firmware Table */ #define ACPI_SIG_MADT "APIC" /* Multiple APIC Description Table */ -#define ACPI_SIG_MCFG "MCFG" /* PCI Memory Mapped Configuration table */ #define ACPI_SIG_SBST "SBST" /* Smart Battery Specification Table */ -#define ACPI_SIG_SLIC "SLIC" /* Software Licensing Description Table */ #define ACPI_SIG_SLIT "SLIT" /* System Locality Distance Information Table */ -#define ACPI_SIG_SPCR "SPCR" /* Serial Port Console Redirection table */ -#define ACPI_SIG_SPMI "SPMI" /* Server Platform Management Interface table */ #define ACPI_SIG_SRAT "SRAT" /* System Resource Affinity Table */ -#define ACPI_SIG_TCPA "TCPA" /* Trusted Computing Platform Alliance table */ -#define ACPI_SIG_UEFI "UEFI" /* Uefi Boot Optimization Table */ -#define ACPI_SIG_WDAT "WDAT" /* Watchdog Action Table */ -#define ACPI_SIG_WDRT "WDRT" /* Watchdog Resource Table */ /* * All tables must be byte-packed to match the ACPI specification, since @@ -113,115 +101,6 @@ struct acpi_whea_header { u64 mask; /* Bitmask required for this register instruction */ }; -/******************************************************************************* - * - * ASF - Alert Standard Format table (Signature "ASF!") - * - * Conforms to the Alert Standard Format Specification V2.0, 23 April 2003 - * - ******************************************************************************/ - -struct acpi_table_asf { - struct acpi_table_header header; /* Common ACPI table header */ -}; - -/* ASF subtable header */ - -struct acpi_asf_header { - u8 type; - u8 reserved; - u16 length; -}; - -/* Values for Type field above */ - -enum acpi_asf_type { - ACPI_ASF_TYPE_INFO = 0, - ACPI_ASF_TYPE_ALERT = 1, - ACPI_ASF_TYPE_CONTROL = 2, - ACPI_ASF_TYPE_BOOT = 3, - ACPI_ASF_TYPE_ADDRESS = 4, - ACPI_ASF_TYPE_RESERVED = 5 -}; - -/* - * ASF subtables - */ - -/* 0: ASF Information */ - -struct acpi_asf_info { - struct acpi_asf_header header; - u8 min_reset_value; - u8 min_poll_interval; - u16 system_id; - u32 mfg_id; - u8 flags; - u8 reserved2[3]; -}; - -/* 1: ASF Alerts */ - -struct acpi_asf_alert { - struct acpi_asf_header header; - u8 assert_mask; - u8 deassert_mask; - u8 alerts; - u8 data_length; -}; - -struct acpi_asf_alert_data { - u8 address; - u8 command; - u8 mask; - u8 value; - u8 sensor_type; - u8 type; - u8 offset; - u8 source_type; - u8 severity; - u8 sensor_number; - u8 entity; - u8 instance; -}; - -/* 2: ASF Remote Control */ - -struct acpi_asf_remote { - struct acpi_asf_header header; - u8 controls; - u8 data_length; - u16 reserved2; -}; - -struct acpi_asf_control_data { - u8 function; - u8 address; - u8 command; - u8 value; -}; - -/* 3: ASF RMCP Boot Options */ - -struct acpi_asf_rmcp { - struct acpi_asf_header header; - u8 capabilities[7]; - u8 completion_code; - u32 enterprise_id; - u8 command; - u16 parameter; - u16 boot_options; - u16 oem_parameters; -}; - -/* 4: ASF Address */ - -struct acpi_asf_address { - struct acpi_asf_header header; - u8 eprom_address; - u8 devices; -}; - /******************************************************************************* * * BERT - Boot Error Record Table @@ -251,18 +130,6 @@ struct acpi_bert_region { #define ACPI_BERT_MULTIPLE_UNCORRECTABLE (4) #define ACPI_BERT_MULTIPLE_CORRECTABLE (8) -/******************************************************************************* - * - * BOOT - Simple Boot Flag Table - * - ******************************************************************************/ - -struct acpi_table_boot { - struct acpi_table_header header; /* Common ACPI table header */ - u8 cmos_index; /* Index in CMOS RAM for the boot register */ - u8 reserved[3]; -}; - /******************************************************************************* * * CPEP - Corrected Platform Error Polling table @@ -284,123 +151,6 @@ struct acpi_cpep_polling { u32 interval; /* Polling interval (msec) */ }; -/******************************************************************************* - * - * DBGP - Debug Port table - * - ******************************************************************************/ - -struct acpi_table_dbgp { - struct acpi_table_header header; /* Common ACPI table header */ - u8 type; /* 0=full 16550, 1=subset of 16550 */ - u8 reserved[3]; - struct acpi_generic_address debug_port; -}; - -/******************************************************************************* - * - * DMAR - DMA Remapping table - * From "Intel Virtualization Technology for Directed I/O", Sept. 2007 - * - ******************************************************************************/ - -struct acpi_table_dmar { - struct acpi_table_header header; /* Common ACPI table header */ - u8 width; /* Host Address Width */ - u8 flags; - u8 reserved[10]; -}; - -/* Flags */ - -#define ACPI_DMAR_INTR_REMAP (1) - -/* DMAR subtable header */ - -struct acpi_dmar_header { - u16 type; - u16 length; -}; - -/* Values for subtable type in struct acpi_dmar_header */ - -enum acpi_dmar_type { - ACPI_DMAR_TYPE_HARDWARE_UNIT = 0, - ACPI_DMAR_TYPE_RESERVED_MEMORY = 1, - ACPI_DMAR_TYPE_ATSR = 2, - ACPI_DMAR_TYPE_RESERVED = 3 /* 3 and greater are reserved */ -}; - -struct acpi_dmar_device_scope { - u8 entry_type; - u8 length; - u16 reserved; - u8 enumeration_id; - u8 bus; -}; - -/* Values for entry_type in struct acpi_dmar_device_scope */ - -enum acpi_dmar_scope_type { - ACPI_DMAR_SCOPE_TYPE_NOT_USED = 0, - ACPI_DMAR_SCOPE_TYPE_ENDPOINT = 1, - ACPI_DMAR_SCOPE_TYPE_BRIDGE = 2, - ACPI_DMAR_SCOPE_TYPE_IOAPIC = 3, - ACPI_DMAR_SCOPE_TYPE_HPET = 4, - ACPI_DMAR_SCOPE_TYPE_RESERVED = 5 /* 5 and greater are reserved */ -}; - -struct acpi_dmar_pci_path { - u8 dev; - u8 fn; -}; - -/* - * DMAR Sub-tables, correspond to Type in struct acpi_dmar_header - */ - -/* 0: Hardware Unit Definition */ - -struct acpi_dmar_hardware_unit { - struct acpi_dmar_header header; - u8 flags; - u8 reserved; - u16 segment; - u64 address; /* Register Base Address */ -}; - -/* Flags */ - -#define ACPI_DMAR_INCLUDE_ALL (1) - -/* 1: Reserved Memory Defininition */ - -struct acpi_dmar_reserved_memory { - struct acpi_dmar_header header; - u16 reserved; - u16 segment; - u64 base_address; /* 4_k aligned base address */ - u64 end_address; /* 4_k aligned limit address */ -}; - -/* Flags */ - -#define ACPI_DMAR_ALLOW_ALL (1) - - -/* 2: Root Port ATS Capability Reporting Structure */ - -struct acpi_dmar_atsr { - struct acpi_dmar_header header; - u8 flags; - u8 reserved; - u16 segment; -}; - -/* Flags */ - -#define ACPI_DMAR_ALL_PORTS (1) - /******************************************************************************* * * ECDT - Embedded Controller Boot Resources Table @@ -762,119 +512,6 @@ struct acpi_hest_generic { u32 error_status_block_length; }; -/******************************************************************************* - * - * HPET - High Precision Event Timer table - * - ******************************************************************************/ - -struct acpi_table_hpet { - struct acpi_table_header header; /* Common ACPI table header */ - u32 id; /* Hardware ID of event timer block */ - struct acpi_generic_address address; /* Address of event timer block */ - u8 sequence; /* HPET sequence number */ - u16 minimum_tick; /* Main counter min tick, periodic mode */ - u8 flags; -}; - -/*! Flags */ - -#define ACPI_HPET_PAGE_PROTECT (1) /* 00: No page protection */ -#define ACPI_HPET_PAGE_PROTECT_4 (1<<1) /* 01: 4KB page protected */ -#define ACPI_HPET_PAGE_PROTECT_64 (1<<2) /* 02: 64KB page protected */ - -/*! [End] no source code translation !*/ - -/******************************************************************************* - * - * IBFT - Boot Firmware Table - * - ******************************************************************************/ - -struct acpi_table_ibft { - struct acpi_table_header header; /* Common ACPI table header */ - u8 reserved[12]; -}; - -/* IBFT common subtable header */ - -struct acpi_ibft_header { - u8 type; - u8 version; - u16 length; - u8 index; - u8 flags; -}; - -/* Values for Type field above */ - -enum acpi_ibft_type { - ACPI_IBFT_TYPE_NOT_USED = 0, - ACPI_IBFT_TYPE_CONTROL = 1, - ACPI_IBFT_TYPE_INITIATOR = 2, - ACPI_IBFT_TYPE_NIC = 3, - ACPI_IBFT_TYPE_TARGET = 4, - ACPI_IBFT_TYPE_EXTENSIONS = 5, - ACPI_IBFT_TYPE_RESERVED = 6 /* 6 and greater are reserved */ -}; - -/* IBFT subtables */ - -struct acpi_ibft_control { - struct acpi_ibft_header header; - u16 extensions; - u16 initiator_offset; - u16 nic0_offset; - u16 target0_offset; - u16 nic1_offset; - u16 target1_offset; -}; - -struct acpi_ibft_initiator { - struct acpi_ibft_header header; - u8 sns_server[16]; - u8 slp_server[16]; - u8 primary_server[16]; - u8 secondary_server[16]; - u16 name_length; - u16 name_offset; -}; - -struct acpi_ibft_nic { - struct acpi_ibft_header header; - u8 ip_address[16]; - u8 subnet_mask_prefix; - u8 origin; - u8 gateway[16]; - u8 primary_dns[16]; - u8 secondary_dns[16]; - u8 dhcp[16]; - u16 vlan; - u8 mac_address[6]; - u16 pci_address; - u16 name_length; - u16 name_offset; -}; - -struct acpi_ibft_target { - struct acpi_ibft_header header; - u8 target_ip_address[16]; - u16 target_ip_socket; - u8 target_boot_lun[8]; - u8 chap_type; - u8 nic_association; - u16 target_name_length; - u16 target_name_offset; - u16 chap_name_length; - u16 chap_name_offset; - u16 chap_secret_length; - u16 chap_secret_offset; - u16 reverse_chap_name_length; - u16 reverse_chap_name_offset; - u16 reverse_chap_secret_length; - u16 reverse_chap_secret_offset; -}; - /******************************************************************************* * * MADT - Multiple APIC Description Table @@ -1056,27 +693,6 @@ struct acpi_madt_local_x2apic_nmi { #define ACPI_MADT_TRIGGER_RESERVED (2<<2) #define ACPI_MADT_TRIGGER_LEVEL (3<<2) -/******************************************************************************* - * - * MCFG - PCI Memory Mapped Configuration table and sub-table - * - ******************************************************************************/ - -struct acpi_table_mcfg { - struct acpi_table_header header; /* Common ACPI table header */ - u8 reserved[8]; -}; - -/* Subtable */ - -struct acpi_mcfg_allocation { - u64 address; /* Base address, processor-relative */ - u16 pci_segment; /* PCI segment group number */ - u8 start_bus_number; /* Starting PCI Bus number */ - u8 end_bus_number; /* Final PCI Bus number */ - u32 reserved; -}; - /******************************************************************************* * * SBST - Smart Battery Specification Table @@ -1102,59 +718,6 @@ struct acpi_table_slit { u8 entry[1]; /* Real size = localities^2 */ }; -/******************************************************************************* - * - * SPCR - Serial Port Console Redirection table - * - ******************************************************************************/ - -struct acpi_table_spcr { - struct acpi_table_header header; /* Common ACPI table header */ - u8 interface_type; /* 0=full 16550, 1=subset of 16550 */ - u8 reserved[3]; - struct acpi_generic_address serial_port; - u8 interrupt_type; - u8 pc_interrupt; - u32 interrupt; - u8 baud_rate; - u8 parity; - u8 stop_bits; - u8 flow_control; - u8 terminal_type; - u8 reserved1; - u16 pci_device_id; - u16 pci_vendor_id; - u8 pci_bus; - u8 pci_device; - u8 pci_function; - u32 pci_flags; - u8 pci_segment; - u32 reserved2; -}; - -/******************************************************************************* - * - * SPMI - Server Platform Management Interface table - * - ******************************************************************************/ - -struct acpi_table_spmi { - struct acpi_table_header header; /* Common ACPI table header */ - u8 reserved; - u8 interface_type; - u16 spec_revision; /* Version of IPMI */ - u8 interrupt_type; - u8 gpe_number; /* GPE assigned */ - u8 reserved1; - u8 pci_device_flag; - u32 interrupt; - struct acpi_generic_address ipmi_register; - u8 pci_segment; - u8 pci_bus; - u8 pci_device; - u8 pci_function; -}; - /******************************************************************************* * * SRAT - System Resource Affinity Table @@ -1227,116 +790,6 @@ struct acpi_srat_x2apic_cpu_affinity { #define ACPI_SRAT_CPU_ENABLED (1) /* 00: Use affinity structure */ -/******************************************************************************* - * - * TCPA - Trusted Computing Platform Alliance table - * - ******************************************************************************/ - -struct acpi_table_tcpa { - struct acpi_table_header header; /* Common ACPI table header */ - u16 reserved; - u32 max_log_length; /* Maximum length for the event log area */ - u64 log_address; /* Address of the event log area */ -}; - -/******************************************************************************* - * - * UEFI - UEFI Boot optimization Table - * - ******************************************************************************/ - -struct acpi_table_uefi { - struct acpi_table_header header; /* Common ACPI table header */ - u8 identifier[16]; /* UUID identifier */ - u16 data_offset; /* Offset of remaining data in table */ - u8 data; -}; - -/******************************************************************************* - * - * WDAT - Watchdog Action Table - * - ******************************************************************************/ - -struct acpi_table_wdat { - struct acpi_table_header header; /* Common ACPI table header */ - u32 header_length; /* Watchdog Header Length */ - u16 pci_segment; /* PCI Segment number */ - u8 pci_bus; /* PCI Bus number */ - u8 pci_device; /* PCI Device number */ - u8 pci_function; /* PCI Function number */ - u8 reserved[3]; - u32 timer_period; /* Period of one timer count (msec) */ - u32 max_count; /* Maximum counter value supported */ - u32 min_count; /* Minimum counter value */ - u8 flags; - u8 reserved2[3]; - u32 entries; /* Number of watchdog entries that follow */ -}; - -/* WDAT Instruction Entries (actions) */ - -struct acpi_wdat_entry { - struct acpi_whea_header whea_header; /* Common header for WHEA tables */ -}; - -/* Values for Action field above */ - -enum acpi_wdat_actions { - ACPI_WDAT_RESET = 1, - ACPI_WDAT_GET_CURRENT_COUNTDOWN = 4, - ACPI_WDAT_GET_COUNTDOWN = 5, - ACPI_WDAT_SET_COUNTDOWN = 6, - ACPI_WDAT_GET_RUNNING_STATE = 8, - ACPI_WDAT_SET_RUNNING_STATE = 9, - ACPI_WDAT_GET_STOPPED_STATE = 10, - ACPI_WDAT_SET_STOPPED_STATE = 11, - ACPI_WDAT_GET_REBOOT = 16, - ACPI_WDAT_SET_REBOOT = 17, - ACPI_WDAT_GET_SHUTDOWN = 18, - ACPI_WDAT_SET_SHUTDOWN = 19, - ACPI_WDAT_GET_STATUS = 32, - ACPI_WDAT_SET_STATUS = 33, - ACPI_WDAT_ACTION_RESERVED = 34 /* 34 and greater are reserved */ -}; - -/* Values for Instruction field above */ - -enum acpi_wdat_instructions { - ACPI_WDAT_READ_VALUE = 0, - ACPI_WDAT_READ_COUNTDOWN = 1, - ACPI_WDAT_WRITE_VALUE = 2, - ACPI_WDAT_WRITE_COUNTDOWN = 3, - ACPI_WDAT_INSTRUCTION_RESERVED = 4, /* 4 and greater are reserved */ - ACPI_WDAT_PRESERVE_REGISTER = 0x80 /* Except for this value */ -}; - -/******************************************************************************* - * - * WDRT - Watchdog Resource Table - * - ******************************************************************************/ - -struct acpi_table_wdrt { - struct acpi_table_header header; /* Common ACPI table header */ - u32 header_length; /* Watchdog Header Length */ - u8 pci_segment; /* PCI Segment number */ - u8 pci_bus; /* PCI Bus number */ - u8 pci_device; /* PCI Device number */ - u8 pci_function; /* PCI Function number */ - u32 timer_period; /* Period of one timer count (msec) */ - u32 max_count; /* Maximum counter value supported */ - u32 min_count; /* Minimum counter value */ - u8 flags; - u8 reserved[3]; - u32 entries; /* Number of watchdog entries that follow */ -}; - -/* Flags */ - -#define ACPI_WDRT_TIMER_ENABLED (1) /* 00: Timer enabled */ - /* Reset to default packing */ #pragma pack() diff --git a/include/acpi/actbl2.h b/include/acpi/actbl2.h new file mode 100644 index 00000000000..b271aba0e52 --- /dev/null +++ b/include/acpi/actbl2.h @@ -0,0 +1,585 @@ +#ifndef __ACTBL2_H__ +#define __ACTBL2_H__ + +/******************************************************************************* + * + * Additional ACPI Tables (2) + * + * These tables are not consumed directly by the ACPICA subsystem, but are + * included here to support device drivers and the AML disassembler. + * + * The tables in this file are defined by third-party specifications, and are + * not defined directly by the ACPI specification itself. + * + ******************************************************************************/ + +/* + * Values for description table header signatures. Useful because they make + * it more difficult to inadvertently type in the wrong signature. + */ +#define ACPI_SIG_ASF "ASF!" /* Alert Standard Format table */ +#define ACPI_SIG_BOOT "BOOT" /* Simple Boot Flag Table */ +#define ACPI_SIG_DBGP "DBGP" /* Debug Port table */ +#define ACPI_SIG_DMAR "DMAR" /* DMA Remapping table */ +#define ACPI_SIG_HPET "HPET" /* High Precision Event Timer table */ +#define ACPI_SIG_IBFT "IBFT" /* i_sCSI Boot Firmware Table */ +#define ACPI_SIG_MCFG "MCFG" /* PCI Memory Mapped Configuration table */ +#define ACPI_SIG_SLIC "SLIC" /* Software Licensing Description Table */ +#define ACPI_SIG_SPCR "SPCR" /* Serial Port Console Redirection table */ +#define ACPI_SIG_SPMI "SPMI" /* Server Platform Management Interface table */ +#define ACPI_SIG_TCPA "TCPA" /* Trusted Computing Platform Alliance table */ +#define ACPI_SIG_UEFI "UEFI" /* Uefi Boot Optimization Table */ +#define ACPI_SIG_WDAT "WDAT" /* Watchdog Action Table */ +#define ACPI_SIG_WDRT "WDRT" /* Watchdog Resource Table */ + +/* + * All tables must be byte-packed to match the ACPI specification, since + * the tables are provided by the system BIOS. + */ +#pragma pack(1) + +/* + * Note about bitfields: The u8 type is used for bitfields in ACPI tables. + * This is the only type that is even remotely portable. Anything else is not + * portable, so do not use any other bitfield types. + */ + +/******************************************************************************* + * + * ASF - Alert Standard Format table (Signature "ASF!") + * + * Conforms to the Alert Standard Format Specification V2.0, 23 April 2003 + * + ******************************************************************************/ + +struct acpi_table_asf { + struct acpi_table_header header; /* Common ACPI table header */ +}; + +/* ASF subtable header */ + +struct acpi_asf_header { + u8 type; + u8 reserved; + u16 length; +}; + +/* Values for Type field above */ + +enum acpi_asf_type { + ACPI_ASF_TYPE_INFO = 0, + ACPI_ASF_TYPE_ALERT = 1, + ACPI_ASF_TYPE_CONTROL = 2, + ACPI_ASF_TYPE_BOOT = 3, + ACPI_ASF_TYPE_ADDRESS = 4, + ACPI_ASF_TYPE_RESERVED = 5 +}; + +/* + * ASF subtables + */ + +/* 0: ASF Information */ + +struct acpi_asf_info { + struct acpi_asf_header header; + u8 min_reset_value; + u8 min_poll_interval; + u16 system_id; + u32 mfg_id; + u8 flags; + u8 reserved2[3]; +}; + +/* 1: ASF Alerts */ + +struct acpi_asf_alert { + struct acpi_asf_header header; + u8 assert_mask; + u8 deassert_mask; + u8 alerts; + u8 data_length; +}; + +struct acpi_asf_alert_data { + u8 address; + u8 command; + u8 mask; + u8 value; + u8 sensor_type; + u8 type; + u8 offset; + u8 source_type; + u8 severity; + u8 sensor_number; + u8 entity; + u8 instance; +}; + +/* 2: ASF Remote Control */ + +struct acpi_asf_remote { + struct acpi_asf_header header; + u8 controls; + u8 data_length; + u16 reserved2; +}; + +struct acpi_asf_control_data { + u8 function; + u8 address; + u8 command; + u8 value; +}; + +/* 3: ASF RMCP Boot Options */ + +struct acpi_asf_rmcp { + struct acpi_asf_header header; + u8 capabilities[7]; + u8 completion_code; + u32 enterprise_id; + u8 command; + u16 parameter; + u16 boot_options; + u16 oem_parameters; +}; + +/* 4: ASF Address */ + +struct acpi_asf_address { + struct acpi_asf_header header; + u8 eprom_address; + u8 devices; +}; + +/******************************************************************************* + * + * BOOT - Simple Boot Flag Table + * + ******************************************************************************/ + +struct acpi_table_boot { + struct acpi_table_header header; /* Common ACPI table header */ + u8 cmos_index; /* Index in CMOS RAM for the boot register */ + u8 reserved[3]; +}; + +/******************************************************************************* + * + * DBGP - Debug Port table + * + ******************************************************************************/ + +struct acpi_table_dbgp { + struct acpi_table_header header; /* Common ACPI table header */ + u8 type; /* 0=full 16550, 1=subset of 16550 */ + u8 reserved[3]; + struct acpi_generic_address debug_port; +}; + +/******************************************************************************* + * + * DMAR - DMA Remapping table + * From "Intel Virtualization Technology for Directed I/O", Sept. 2007 + * + ******************************************************************************/ + +struct acpi_table_dmar { + struct acpi_table_header header; /* Common ACPI table header */ + u8 width; /* Host Address Width */ + u8 flags; + u8 reserved[10]; +}; + +/* Flags */ + +#define ACPI_DMAR_INTR_REMAP (1) + +/* DMAR subtable header */ + +struct acpi_dmar_header { + u16 type; + u16 length; +}; + +/* Values for subtable type in struct acpi_dmar_header */ + +enum acpi_dmar_type { + ACPI_DMAR_TYPE_HARDWARE_UNIT = 0, + ACPI_DMAR_TYPE_RESERVED_MEMORY = 1, + ACPI_DMAR_TYPE_ATSR = 2, + ACPI_DMAR_TYPE_RESERVED = 3 /* 3 and greater are reserved */ +}; + +struct acpi_dmar_device_scope { + u8 entry_type; + u8 length; + u16 reserved; + u8 enumeration_id; + u8 bus; +}; + +/* Values for entry_type in struct acpi_dmar_device_scope */ + +enum acpi_dmar_scope_type { + ACPI_DMAR_SCOPE_TYPE_NOT_USED = 0, + ACPI_DMAR_SCOPE_TYPE_ENDPOINT = 1, + ACPI_DMAR_SCOPE_TYPE_BRIDGE = 2, + ACPI_DMAR_SCOPE_TYPE_IOAPIC = 3, + ACPI_DMAR_SCOPE_TYPE_HPET = 4, + ACPI_DMAR_SCOPE_TYPE_RESERVED = 5 /* 5 and greater are reserved */ +}; + +struct acpi_dmar_pci_path { + u8 dev; + u8 fn; +}; + +/* + * DMAR Sub-tables, correspond to Type in struct acpi_dmar_header + */ + +/* 0: Hardware Unit Definition */ + +struct acpi_dmar_hardware_unit { + struct acpi_dmar_header header; + u8 flags; + u8 reserved; + u16 segment; + u64 address; /* Register Base Address */ +}; + +/* Flags */ + +#define ACPI_DMAR_INCLUDE_ALL (1) + +/* 1: Reserved Memory Defininition */ + +struct acpi_dmar_reserved_memory { + struct acpi_dmar_header header; + u16 reserved; + u16 segment; + u64 base_address; /* 4_k aligned base address */ + u64 end_address; /* 4_k aligned limit address */ +}; + +/* Flags */ + +#define ACPI_DMAR_ALLOW_ALL (1) + +/* 2: Root Port ATS Capability Reporting Structure */ + +struct acpi_dmar_atsr { + struct acpi_dmar_header header; + u8 flags; + u8 reserved; + u16 segment; +}; + +/* Flags */ + +#define ACPI_DMAR_ALL_PORTS (1) + +/******************************************************************************* + * + * HPET - High Precision Event Timer table + * + ******************************************************************************/ + +struct acpi_table_hpet { + struct acpi_table_header header; /* Common ACPI table header */ + u32 id; /* Hardware ID of event timer block */ + struct acpi_generic_address address; /* Address of event timer block */ + u8 sequence; /* HPET sequence number */ + u16 minimum_tick; /* Main counter min tick, periodic mode */ + u8 flags; +}; + +/*! Flags */ + +#define ACPI_HPET_PAGE_PROTECT (1) /* 00: No page protection */ +#define ACPI_HPET_PAGE_PROTECT_4 (1<<1) /* 01: 4KB page protected */ +#define ACPI_HPET_PAGE_PROTECT_64 (1<<2) /* 02: 64KB page protected */ + +/*! [End] no source code translation !*/ + +/******************************************************************************* + * + * IBFT - Boot Firmware Table + * + ******************************************************************************/ + +struct acpi_table_ibft { + struct acpi_table_header header; /* Common ACPI table header */ + u8 reserved[12]; +}; + +/* IBFT common subtable header */ + +struct acpi_ibft_header { + u8 type; + u8 version; + u16 length; + u8 index; + u8 flags; +}; + +/* Values for Type field above */ + +enum acpi_ibft_type { + ACPI_IBFT_TYPE_NOT_USED = 0, + ACPI_IBFT_TYPE_CONTROL = 1, + ACPI_IBFT_TYPE_INITIATOR = 2, + ACPI_IBFT_TYPE_NIC = 3, + ACPI_IBFT_TYPE_TARGET = 4, + ACPI_IBFT_TYPE_EXTENSIONS = 5, + ACPI_IBFT_TYPE_RESERVED = 6 /* 6 and greater are reserved */ +}; + +/* IBFT subtables */ + +struct acpi_ibft_control { + struct acpi_ibft_header header; + u16 extensions; + u16 initiator_offset; + u16 nic0_offset; + u16 target0_offset; + u16 nic1_offset; + u16 target1_offset; +}; + +struct acpi_ibft_initiator { + struct acpi_ibft_header header; + u8 sns_server[16]; + u8 slp_server[16]; + u8 primary_server[16]; + u8 secondary_server[16]; + u16 name_length; + u16 name_offset; +}; + +struct acpi_ibft_nic { + struct acpi_ibft_header header; + u8 ip_address[16]; + u8 subnet_mask_prefix; + u8 origin; + u8 gateway[16]; + u8 primary_dns[16]; + u8 secondary_dns[16]; + u8 dhcp[16]; + u16 vlan; + u8 mac_address[6]; + u16 pci_address; + u16 name_length; + u16 name_offset; +}; + +struct acpi_ibft_target { + struct acpi_ibft_header header; + u8 target_ip_address[16]; + u16 target_ip_socket; + u8 target_boot_lun[8]; + u8 chap_type; + u8 nic_association; + u16 target_name_length; + u16 target_name_offset; + u16 chap_name_length; + u16 chap_name_offset; + u16 chap_secret_length; + u16 chap_secret_offset; + u16 reverse_chap_name_length; + u16 reverse_chap_name_offset; + u16 reverse_chap_secret_length; + u16 reverse_chap_secret_offset; +}; + +/******************************************************************************* + * + * MCFG - PCI Memory Mapped Configuration table and sub-table + * + ******************************************************************************/ + +struct acpi_table_mcfg { + struct acpi_table_header header; /* Common ACPI table header */ + u8 reserved[8]; +}; + +/* Subtable */ + +struct acpi_mcfg_allocation { + u64 address; /* Base address, processor-relative */ + u16 pci_segment; /* PCI segment group number */ + u8 start_bus_number; /* Starting PCI Bus number */ + u8 end_bus_number; /* Final PCI Bus number */ + u32 reserved; +}; + +/******************************************************************************* + * + * SPCR - Serial Port Console Redirection table + * + ******************************************************************************/ + +struct acpi_table_spcr { + struct acpi_table_header header; /* Common ACPI table header */ + u8 interface_type; /* 0=full 16550, 1=subset of 16550 */ + u8 reserved[3]; + struct acpi_generic_address serial_port; + u8 interrupt_type; + u8 pc_interrupt; + u32 interrupt; + u8 baud_rate; + u8 parity; + u8 stop_bits; + u8 flow_control; + u8 terminal_type; + u8 reserved1; + u16 pci_device_id; + u16 pci_vendor_id; + u8 pci_bus; + u8 pci_device; + u8 pci_function; + u32 pci_flags; + u8 pci_segment; + u32 reserved2; +}; + +/******************************************************************************* + * + * SPMI - Server Platform Management Interface table + * + ******************************************************************************/ + +struct acpi_table_spmi { + struct acpi_table_header header; /* Common ACPI table header */ + u8 reserved; + u8 interface_type; + u16 spec_revision; /* Version of IPMI */ + u8 interrupt_type; + u8 gpe_number; /* GPE assigned */ + u8 reserved1; + u8 pci_device_flag; + u32 interrupt; + struct acpi_generic_address ipmi_register; + u8 pci_segment; + u8 pci_bus; + u8 pci_device; + u8 pci_function; +}; + +/******************************************************************************* + * + * TCPA - Trusted Computing Platform Alliance table + * + ******************************************************************************/ + +struct acpi_table_tcpa { + struct acpi_table_header header; /* Common ACPI table header */ + u16 reserved; + u32 max_log_length; /* Maximum length for the event log area */ + u64 log_address; /* Address of the event log area */ +}; + +/******************************************************************************* + * + * UEFI - UEFI Boot optimization Table + * + ******************************************************************************/ + +struct acpi_table_uefi { + struct acpi_table_header header; /* Common ACPI table header */ + u8 identifier[16]; /* UUID identifier */ + u16 data_offset; /* Offset of remaining data in table */ + u8 data; +}; + +/******************************************************************************* + * + * WDAT - Watchdog Action Table + * + ******************************************************************************/ + +struct acpi_table_wdat { + struct acpi_table_header header; /* Common ACPI table header */ + u32 header_length; /* Watchdog Header Length */ + u16 pci_segment; /* PCI Segment number */ + u8 pci_bus; /* PCI Bus number */ + u8 pci_device; /* PCI Device number */ + u8 pci_function; /* PCI Function number */ + u8 reserved[3]; + u32 timer_period; /* Period of one timer count (msec) */ + u32 max_count; /* Maximum counter value supported */ + u32 min_count; /* Minimum counter value */ + u8 flags; + u8 reserved2[3]; + u32 entries; /* Number of watchdog entries that follow */ +}; + +/* WDAT Instruction Entries (actions) */ + +struct acpi_wdat_entry { + struct acpi_whea_header whea_header; /* Common header for WHEA tables */ +}; + +/* Values for Action field above */ + +enum acpi_wdat_actions { + ACPI_WDAT_RESET = 1, + ACPI_WDAT_GET_CURRENT_COUNTDOWN = 4, + ACPI_WDAT_GET_COUNTDOWN = 5, + ACPI_WDAT_SET_COUNTDOWN = 6, + ACPI_WDAT_GET_RUNNING_STATE = 8, + ACPI_WDAT_SET_RUNNING_STATE = 9, + ACPI_WDAT_GET_STOPPED_STATE = 10, + ACPI_WDAT_SET_STOPPED_STATE = 11, + ACPI_WDAT_GET_REBOOT = 16, + ACPI_WDAT_SET_REBOOT = 17, + ACPI_WDAT_GET_SHUTDOWN = 18, + ACPI_WDAT_SET_SHUTDOWN = 19, + ACPI_WDAT_GET_STATUS = 32, + ACPI_WDAT_SET_STATUS = 33, + ACPI_WDAT_ACTION_RESERVED = 34 /* 34 and greater are reserved */ +}; + +/* Values for Instruction field above */ + +enum acpi_wdat_instructions { + ACPI_WDAT_READ_VALUE = 0, + ACPI_WDAT_READ_COUNTDOWN = 1, + ACPI_WDAT_WRITE_VALUE = 2, + ACPI_WDAT_WRITE_COUNTDOWN = 3, + ACPI_WDAT_INSTRUCTION_RESERVED = 4, /* 4 and greater are reserved */ + ACPI_WDAT_PRESERVE_REGISTER = 0x80 /* Except for this value */ +}; + +/******************************************************************************* + * + * WDRT - Watchdog Resource Table + * + ******************************************************************************/ + +struct acpi_table_wdrt { + struct acpi_table_header header; /* Common ACPI table header */ + u32 header_length; /* Watchdog Header Length */ + u8 pci_segment; /* PCI Segment number */ + u8 pci_bus; /* PCI Bus number */ + u8 pci_device; /* PCI Device number */ + u8 pci_function; /* PCI Function number */ + u32 timer_period; /* Period of one timer count (msec) */ + u32 max_count; /* Maximum counter value supported */ + u32 min_count; /* Minimum counter value */ + u8 flags; + u8 reserved[3]; + u32 entries; /* Number of watchdog entries that follow */ +}; + +/* Flags */ + +#define ACPI_WDRT_TIMER_ENABLED (1) /* 00: Timer enabled */ + +/* Reset to default packing */ + +#pragma pack() + +#endif /* __ACTBL2_H__ */ -- cgit v1.2.3-70-g09d2 From 6e2d5ebd0d36199920676fdceaff4f4bfe66297b Mon Sep 17 00:00:00 2001 From: Bob Moore Date: Mon, 27 Jul 2009 10:53:00 +0800 Subject: ACPICA: ACPI 4: Update headers for new and changed ACPI tables. Add IVRS,MSCT,UEFI,WAET,WDAT. Updated several existing tables for ACPI 4.0-related changes. Added document references for all tables not defined in ACPI spec. Signed-off-by: Bob Moore Signed-off-by: Lin Ming Signed-off-by: Len Brown --- include/acpi/actbl.h | 30 +++-- include/acpi/actbl1.h | 339 +++++++++++++++++++++++++++++++++----------------- include/acpi/actbl2.h | 339 +++++++++++++++++++++++++++++++++++++++++++++----- 3 files changed, 557 insertions(+), 151 deletions(-) (limited to 'include') diff --git a/include/acpi/actbl.h b/include/acpi/actbl.h index 55fcfc6725b..1b658795260 100644 --- a/include/acpi/actbl.h +++ b/include/acpi/actbl.h @@ -58,8 +58,9 @@ ******************************************************************************/ /* - * Values for description table header signatures. Useful because they make - * it more difficult to inadvertently type in the wrong signature. + * Values for description table header signatures for tables defined in this + * file. Useful because they make it more difficult to inadvertently type in + * the wrong signature. */ #define ACPI_SIG_DSDT "DSDT" /* Differentiated System Description Table */ #define ACPI_SIG_FADT "FACP" /* Fixed ACPI Description Table */ @@ -123,6 +124,7 @@ struct acpi_generic_address { /******************************************************************************* * * RSDP - Root System Description Pointer (Signature is "RSD PTR ") + * Version 2 * ******************************************************************************/ @@ -143,6 +145,7 @@ struct acpi_table_rsdp { /******************************************************************************* * * RSDT/XSDT - Root System Description Tables + * Version 1 (both) * ******************************************************************************/ @@ -176,23 +179,24 @@ struct acpi_table_facs { u8 reserved1[24]; /* Reserved, must be zero */ }; -/* global_lock flags */ +/* Masks for global_lock flag field above */ #define ACPI_GLOCK_PENDING (1) /* 00: Pending global lock ownership */ #define ACPI_GLOCK_OWNED (1<<1) /* 01: Global lock is owned */ -/* Flags */ +/* Masks for Flags field above */ #define ACPI_FACS_S4_BIOS_PRESENT (1) /* 00: S4BIOS support is present */ #define ACPI_FACS_64BIT_WAKE (1<<1) /* 01: 64-bit wake vector supported (ACPI 4.0) */ -/* ospm_flags */ +/* Masks for ospm_flags field above */ #define ACPI_FACS_64BIT_ENVIRONMENT (1) /* 00: 64-bit wake environment is required (ACPI 4.0) */ /******************************************************************************* * * FADT - Fixed ACPI Description Table (Signature "FACP") + * Version 4 * ******************************************************************************/ @@ -253,7 +257,7 @@ struct acpi_table_fadt { struct acpi_generic_address xgpe1_block; /* 64-bit Extended General Purpose Event 1 Reg Blk address */ }; -/* FADT Boot Architecture Flags (boot_flags) */ +/* Masks for FADT Boot Architecture Flags (boot_flags) */ #define ACPI_FADT_LEGACY_DEVICES (1) /* 00: [V2] System has LPC or ISA bus devices */ #define ACPI_FADT_8042 (1<<1) /* 01: [V3] System has an 8042 controller on port 60/64 */ @@ -263,7 +267,7 @@ struct acpi_table_fadt { #define FADT2_REVISION_ID 3 -/* FADT flags */ +/* Masks for FADT flags */ #define ACPI_FADT_WBINVD (1) /* 00: [V1] The wbinvd instruction works properly */ #define ACPI_FADT_WBINVD_FLUSH (1<<1) /* 01: [V1] wbinvd flushes but does not invalidate caches */ @@ -286,7 +290,7 @@ struct acpi_table_fadt { #define ACPI_FADT_APIC_CLUSTER (1<<18) /* 18: [V4] All local APICs must use cluster model (ACPI 3.0) */ #define ACPI_FADT_APIC_PHYSICAL (1<<19) /* 19: [V4] All local x_aPICs must use physical dest mode (ACPI 3.0) */ -/* FADT Prefered Power Management Profiles */ +/* Values for preferred_profile (Prefered Power Management Profiles) */ enum acpi_prefered_pm_profiles { PM_UNSPECIFIED = 0, @@ -304,14 +308,16 @@ enum acpi_prefered_pm_profiles { #define ACPI_FADT_OFFSET(f) (u8) ACPI_OFFSET (struct acpi_table_fadt, f) +/* + * Internal table-related structures + */ union acpi_name_union { u32 integer; char ascii[4]; }; -/* - * Internal ACPI Table Descriptor. One per ACPI table - */ +/* Internal ACPI Table Descriptor. One per ACPI table. */ + struct acpi_table_desc { acpi_physical_address address; struct acpi_table_header *pointer; @@ -321,7 +327,7 @@ struct acpi_table_desc { u8 flags; }; -/* Flags for above */ +/* Masks for Flags field above */ #define ACPI_TABLE_ORIGIN_UNKNOWN (0) #define ACPI_TABLE_ORIGIN_MAPPED (1) diff --git a/include/acpi/actbl1.h b/include/acpi/actbl1.h index 582af1fcb8f..0417f2abc44 100644 --- a/include/acpi/actbl1.h +++ b/include/acpi/actbl1.h @@ -56,8 +56,9 @@ ******************************************************************************/ /* - * Values for description table header signatures. Useful because they make - * it more difficult to inadvertently type in the wrong signature. + * Values for description table header signatures for tables defined in this + * file. Useful because they make it more difficult to inadvertently type in + * the wrong signature. */ #define ACPI_SIG_BERT "BERT" /* Boot Error Record Table */ #define ACPI_SIG_CPEP "CPEP" /* Corrected Platform Error Polling table */ @@ -66,6 +67,7 @@ #define ACPI_SIG_ERST "ERST" /* Error Record Serialization Table */ #define ACPI_SIG_HEST "HEST" /* Hardware Error Source Table */ #define ACPI_SIG_MADT "APIC" /* Multiple APIC Description Table */ +#define ACPI_SIG_MSCT "MSCT" /* Maximum System Characteristics Table */ #define ACPI_SIG_SBST "SBST" /* Smart Battery Specification Table */ #define ACPI_SIG_SLIT "SLIT" /* System Locality Distance Information Table */ #define ACPI_SIG_SRAT "SRAT" /* System Resource Affinity Table */ @@ -82,14 +84,20 @@ * portable, so do not use any other bitfield types. */ -/* Common Subtable header (used in MADT, SRAT, etc.) */ +/******************************************************************************* + * + * Common subtable headers + * + ******************************************************************************/ + +/* Generic subtable header (used in MADT, SRAT, etc.) */ struct acpi_subtable_header { u8 type; u8 length; }; -/* Common Subtable header for WHEA tables (EINJ, ERST, WDAT) */ +/* Subtable header for WHEA tables (EINJ, ERST, WDAT) */ struct acpi_whea_header { u8 action; @@ -103,7 +111,8 @@ struct acpi_whea_header { /******************************************************************************* * - * BERT - Boot Error Record Table + * BERT - Boot Error Record Table (ACPI 4.0) + * Version 1 * ******************************************************************************/ @@ -113,26 +122,43 @@ struct acpi_table_bert { u64 address; /* Physical addresss of the error region */ }; -/* Boot Error Region */ +/* Boot Error Region (not a subtable, pointed to by Address field above) */ struct acpi_bert_region { - u32 block_status; - u32 raw_data_offset; - u32 raw_data_length; - u32 data_length; - u32 error_severity; + u32 block_status; /* Type of error information */ + u32 raw_data_offset; /* Offset to raw error data */ + u32 raw_data_length; /* Length of raw error data */ + u32 data_length; /* Length of generic error data */ + u32 error_severity; /* Severity code */ }; -/* block_status Flags */ +/* Values for block_status flags above */ #define ACPI_BERT_UNCORRECTABLE (1) -#define ACPI_BERT_CORRECTABLE (2) -#define ACPI_BERT_MULTIPLE_UNCORRECTABLE (4) -#define ACPI_BERT_MULTIPLE_CORRECTABLE (8) +#define ACPI_BERT_CORRECTABLE (1<<1) +#define ACPI_BERT_MULTIPLE_UNCORRECTABLE (1<<2) +#define ACPI_BERT_MULTIPLE_CORRECTABLE (1<<3) +#define ACPI_BERT_ERROR_ENTRY_COUNT (0xFF<<4) /* 8 bits, error count */ + +/* Values for error_severity above */ + +enum acpi_bert_error_severity { + ACPI_BERT_ERROR_CORRECTABLE = 0, + ACPI_BERT_ERROR_FATAL = 1, + ACPI_BERT_ERROR_CORRECTED = 2, + ACPI_BERT_ERROR_NONE = 3, + ACPI_BERT_ERROR_RESERVED = 4 /* 4 and greater are reserved */ +}; + +/* + * Note: The generic error data that follows the error_severity field above + * uses the struct acpi_hest_generic_data defined under the HEST table below + */ /******************************************************************************* * - * CPEP - Corrected Platform Error Polling table + * CPEP - Corrected Platform Error Polling table (ACPI 4.0) + * Version 1 * ******************************************************************************/ @@ -144,8 +170,7 @@ struct acpi_table_cpep { /* Subtable */ struct acpi_cpep_polling { - u8 type; - u8 length; + struct acpi_subtable_header header; u8 id; /* Processor ID */ u8 eid; /* Processor EID */ u32 interval; /* Polling interval (msec) */ @@ -154,6 +179,7 @@ struct acpi_cpep_polling { /******************************************************************************* * * ECDT - Embedded Controller Boot Resources Table + * Version 1 * ******************************************************************************/ @@ -168,14 +194,16 @@ struct acpi_table_ecdt { /******************************************************************************* * - * EINJ - Error Injection Table + * EINJ - Error Injection Table (ACPI 4.0) + * Version 1 * ******************************************************************************/ struct acpi_table_einj { struct acpi_table_header header; /* Common ACPI table header */ u32 header_length; - u32 reserved; + u8 flags; + u8 reserved[3]; u32 entries; }; @@ -185,6 +213,10 @@ struct acpi_einj_entry { struct acpi_whea_header whea_header; /* Common header for WHEA tables */ }; +/* Masks for Flags field above */ + +#define ACPI_EINJ_PRESERVE (1) + /* Values for Action field above */ enum acpi_einj_actions { @@ -220,9 +252,34 @@ struct acpi_einj_trigger { u32 entry_count; }; +/* Command status return values */ + +enum acpi_einj_command_status { + ACPI_EINJ_SUCCESS = 0, + ACPI_EINJ_FAILURE = 1, + ACPI_EINJ_INVALID_ACCESS = 2, + ACPI_EINJ_STATUS_RESERVED = 3 /* 3 and greater are reserved */ +}; + +/* Error types returned from ACPI_EINJ_GET_ERROR_TYPE (bitfield) */ + +#define ACPI_EINJ_PROCESSOR_CORRECTABLE (1) +#define ACPI_EINJ_PROCESSOR_UNCORRECTABLE (1<<1) +#define ACPI_EINJ_PROCESSOR_FATAL (1<<2) +#define ACPI_EINJ_MEMORY_CORRECTABLE (1<<3) +#define ACPI_EINJ_MEMORY_UNCORRECTABLE (1<<4) +#define ACPI_EINJ_MEMORY_FATAL (1<<5) +#define ACPI_EINJ_PCIX_CORRECTABLE (1<<6) +#define ACPI_EINJ_PCIX_UNCORRECTABLE (1<<7) +#define ACPI_EINJ_PCIX_FATAL (1<<8) +#define ACPI_EINJ_PLATFORM_CORRECTABLE (1<<9) +#define ACPI_EINJ_PLATFORM_UNCORRECTABLE (1<<10) +#define ACPI_EINJ_PLATFORM_FATAL (1<<11) + /******************************************************************************* * - * ERST - Error Record Serialization Table + * ERST - Error Record Serialization Table (ACPI 4.0) + * Version 1 * ******************************************************************************/ @@ -239,19 +296,23 @@ struct acpi_erst_entry { struct acpi_whea_header whea_header; /* Common header for WHEA tables */ }; +/* Masks for Flags field above */ + +#define ACPI_ERST_PRESERVE (1) + /* Values for Action field above */ enum acpi_erst_actions { - ACPI_ERST_BEGIN_WRITE_OPERATION = 0, - ACPI_ERST_BEGIN_READ_OPERATION = 1, - ACPI_ERST_BETGIN_CLEAR_OPERATION = 2, - ACPI_ERST_END_OPERATION = 3, + ACPI_ERST_BEGIN_WRITE = 0, + ACPI_ERST_BEGIN_READ = 1, + ACPI_ERST_BEGIN_CLEAR = 2, + ACPI_ERST_END = 3, ACPI_ERST_SET_RECORD_OFFSET = 4, ACPI_ERST_EXECUTE_OPERATION = 5, ACPI_ERST_CHECK_BUSY_STATUS = 6, ACPI_ERST_GET_COMMAND_STATUS = 7, - ACPI_ERST_GET_RECORD_IDENTIFIER = 8, - ACPI_ERST_SET_RECORD_IDENTIFIER = 9, + ACPI_ERST_GET_RECORD_ID = 8, + ACPI_ERST_SET_RECORD_ID = 9, ACPI_ERST_GET_RECORD_COUNT = 10, ACPI_ERST_BEGIN_DUMMY_WRIITE = 11, ACPI_ERST_NOT_USED = 12, @@ -286,9 +347,29 @@ enum acpi_erst_instructions { ACPI_ERST_INSTRUCTION_RESERVED = 19 /* 19 and greater are reserved */ }; +/* Command status return values */ + +enum acpi_erst_command_status { + ACPI_ERST_SUCESS = 0, + ACPI_ERST_NO_SPACE = 1, + ACPI_ERST_NOT_AVAILABLE = 2, + ACPI_ERST_FAILURE = 3, + ACPI_ERST_RECORD_EMPTY = 4, + ACPI_ERST_NOT_FOUND = 5, + ACPI_ERST_STATUS_RESERVED = 6 /* 6 and greater are reserved */ +}; + +/* Error Record Serialization Information */ + +struct acpi_erst_info { + u16 signature; /* Should be "ER" */ + u8 data[48]; +}; + /******************************************************************************* * - * HEST - Hardware Error Source Table + * HEST - Hardware Error Source Table (ACPI 4.0) + * Version 1 * ******************************************************************************/ @@ -301,70 +382,49 @@ struct acpi_table_hest { struct acpi_hest_header { u16 type; + u16 source_id; }; /* Values for Type field above for subtables */ enum acpi_hest_types { - ACPI_HEST_TYPE_XPF_MACHINE_CHECK = 0, - ACPI_HEST_TYPE_XPF_CORRECTED_MACHINE_CHECK = 1, - ACPI_HEST_TYPE_XPF_UNUSED = 2, - ACPI_HEST_TYPE_XPF_NON_MASKABLE_INTERRUPT = 3, - ACPI_HEST_TYPE_IPF_CORRECTED_MACHINE_CHECK = 4, - ACPI_HEST_TYPE_IPF_CORRECTED_PLATFORM_ERROR = 5, + ACPI_HEST_TYPE_IA32_CHECK = 0, + ACPI_HEST_TYPE_IA32_CORRECTED_CHECK = 1, + ACPI_HEST_TYPE_IA32_NMI = 2, + ACPI_HEST_TYPE_NOT_USED3 = 3, + ACPI_HEST_TYPE_NOT_USED4 = 4, + ACPI_HEST_TYPE_NOT_USED5 = 5, ACPI_HEST_TYPE_AER_ROOT_PORT = 6, ACPI_HEST_TYPE_AER_ENDPOINT = 7, ACPI_HEST_TYPE_AER_BRIDGE = 8, - ACPI_HEST_TYPE_GENERIC_HARDWARE_ERROR_SOURCE = 9, + ACPI_HEST_TYPE_GENERIC_ERROR = 9, ACPI_HEST_TYPE_RESERVED = 10 /* 10 and greater are reserved */ }; /* - * HEST Sub-subtables + * HEST substructures contained in subtables */ -/* XPF Machine Check Error Bank */ - -struct acpi_hest_xpf_error_bank { +/* + * IA32 Error Bank(s) - Follows the struct acpi_hest_ia_machine_check and + * struct acpi_hest_ia_corrected structures. + */ +struct acpi_hest_ia_error_bank { u8 bank_number; u8 clear_status_on_init; u8 status_format; - u8 config_write_enable; + u8 reserved; u32 control_register; - u64 control_init_data; + u64 control_data; u32 status_register; u32 address_register; u32 misc_register; }; -/* Generic Error Status */ - -struct acpi_hest_generic_status { - u32 block_status; - u32 raw_data_offset; - u32 raw_data_length; - u32 data_length; - u32 error_severity; -}; - -/* Generic Error Data */ - -struct acpi_hest_generic_data { - u8 section_type[16]; - u32 error_severity; - u16 revision; - u8 validation_bits; - u8 flags; - u32 error_data_length; - u8 fru_id[16]; - u8 fru_text[20]; -}; - -/* Common HEST structure for PCI/AER types below (6,7,8) */ +/* Common HEST sub-structure for PCI/AER structures below (6,7,8) */ struct acpi_hest_aer_common { - u16 source_id; - u16 config_write_enable; + u16 reserved1; u8 flags; u8 enabled; u32 records_to_pre_allocate; @@ -373,13 +433,18 @@ struct acpi_hest_aer_common { u16 device; u16 function; u16 device_control; - u16 reserved; + u16 reserved2; u32 uncorrectable_error_mask; u32 uncorrectable_error_severity; u32 correctable_error_mask; u32 advanced_error_capabilities; }; +/* Masks for HEST Flags fields */ + +#define ACPI_HEST_FIRMWARE_FIRST (1) +#define ACPI_HEST_GLOBAL (1<<1) + /* Hardware Error Notification */ struct acpi_hest_notify { @@ -405,71 +470,59 @@ enum acpi_hest_notify_types { ACPI_HEST_NOTIFY_RESERVED = 5 /* 5 and greater are reserved */ }; +/* Values for config_write_enable bitfield above */ + +#define ACPI_HEST_TYPE (1) +#define ACPI_HEST_POLL_INTERVAL (1<<1) +#define ACPI_HEST_POLL_THRESHOLD_VALUE (1<<2) +#define ACPI_HEST_POLL_THRESHOLD_WINDOW (1<<3) +#define ACPI_HEST_ERR_THRESHOLD_VALUE (1<<4) +#define ACPI_HEST_ERR_THRESHOLD_WINDOW (1<<5) + /* * HEST subtables - * - * From WHEA Design Document, 16 May 2007. - * Note: There is no subtable type 2 in this version of the document, - * and there are two different subtable type 3s. */ - /* 0: XPF Machine Check Exception */ +/* 0: IA32 Machine Check Exception */ -struct acpi_hest_xpf_machine_check { +struct acpi_hest_ia_machine_check { struct acpi_hest_header header; - u16 source_id; - u16 config_write_enable; + u16 reserved1; u8 flags; - u8 reserved1; + u8 enabled; u32 records_to_pre_allocate; u32 max_sections_per_record; u64 global_capability_data; u64 global_control_data; u8 num_hardware_banks; - u8 reserved2[7]; + u8 reserved3[7]; }; -/* 1: XPF Corrected Machine Check */ +/* 1: IA32 Corrected Machine Check */ -struct acpi_table_hest_xpf_corrected { +struct acpi_table_hest_ia_corrected { struct acpi_hest_header header; - u16 source_id; - u16 config_write_enable; + u16 reserved1; u8 flags; u8 enabled; u32 records_to_pre_allocate; u32 max_sections_per_record; struct acpi_hest_notify notify; u8 num_hardware_banks; - u8 reserved[3]; + u8 reserved2[3]; }; -/* 3: XPF Non-Maskable Interrupt */ +/* 2: IA32 Non-Maskable Interrupt */ -struct acpi_hest_xpf_nmi { +struct acpi_hest_ia_nmi { struct acpi_hest_header header; - u16 source_id; u32 reserved; u32 records_to_pre_allocate; u32 max_sections_per_record; u32 max_raw_data_length; }; -/* 4: IPF Corrected Machine Check */ - -struct acpi_hest_ipf_corrected { - struct acpi_hest_header header; - u8 enabled; - u8 reserved; -}; - -/* 5: IPF Corrected Platform Error */ - -struct acpi_hest_ipf_corrected_platform { - struct acpi_hest_header header; - u8 enabled; - u8 reserved; -}; +/* 3,4,5: Not used */ /* 6: PCI Express Root Port AER */ @@ -491,30 +544,61 @@ struct acpi_hest_aer { struct acpi_hest_aer_bridge { struct acpi_hest_header header; struct acpi_hest_aer_common aer; - u32 secondary_uncorrectable_error_mask; - u32 secondary_uncorrectable_error_severity; - u32 secondary_advanced_capabilities; + u32 second_uncorrectable_error_mask; + u32 second_uncorrectable_error_severity; + u32 second_advanced_capabilities; }; /* 9: Generic Hardware Error Source */ struct acpi_hest_generic { struct acpi_hest_header header; - u16 source_id; u16 related_source_id; - u8 config_write_enable; + u8 reserved; u8 enabled; u32 records_to_pre_allocate; u32 max_sections_per_record; u32 max_raw_data_length; struct acpi_generic_address error_status_address; struct acpi_hest_notify notify; - u32 error_status_block_length; + u32 error_block_length; +}; + +/* Generic Error Status block */ + +struct acpi_hest_generic_status { + u32 block_status; + u32 raw_data_offset; + u32 raw_data_length; + u32 data_length; + u32 error_severity; +}; + +/* Values for block_status flags above */ + +#define ACPI_HEST_UNCORRECTABLE (1) +#define ACPI_HEST_CORRECTABLE (1<<1) +#define ACPI_HEST_MULTIPLE_UNCORRECTABLE (1<<2) +#define ACPI_HEST_MULTIPLE_CORRECTABLE (1<<3) +#define ACPI_HEST_ERROR_ENTRY_COUNT (0xFF<<4) /* 8 bits, error count */ + +/* Generic Error Data entry */ + +struct acpi_hest_generic_data { + u8 section_type[16]; + u32 error_severity; + u16 revision; + u8 validation_bits; + u8 flags; + u32 error_data_length; + u8 fru_id[16]; + u8 fru_text[20]; }; /******************************************************************************* * * MADT - Multiple APIC Description Table + * Version 3 * ******************************************************************************/ @@ -524,16 +608,16 @@ struct acpi_table_madt { u32 flags; }; -/* Flags */ +/* Masks for Flags field above */ -#define ACPI_MADT_PCAT_COMPAT (1) /* 00: System also has dual 8259s */ +#define ACPI_MADT_PCAT_COMPAT (1) /* 00: System also has dual 8259s */ /* Values for PCATCompat flag */ #define ACPI_MADT_DUAL_PIC 0 #define ACPI_MADT_MULTIPLE_APIC 1 -/* Values for subtable type in struct acpi_subtable_header */ +/* Values for MADT subtable type in struct acpi_subtable_header */ enum acpi_madt_type { ACPI_MADT_TYPE_LOCAL_APIC = 0, @@ -644,7 +728,7 @@ struct acpi_madt_interrupt_source { u32 flags; /* Interrupt Source Flags */ }; -/* Flags field above */ +/* Masks for Flags field above */ #define ACPI_MADT_CPEI_OVERRIDE (1) @@ -693,9 +777,36 @@ struct acpi_madt_local_x2apic_nmi { #define ACPI_MADT_TRIGGER_RESERVED (2<<2) #define ACPI_MADT_TRIGGER_LEVEL (3<<2) +/******************************************************************************* + * + * MSCT - Maximum System Characteristics Table (ACPI 4.0) + * Version 1 + * + ******************************************************************************/ + +struct acpi_table_msct { + struct acpi_table_header header; /* Common ACPI table header */ + u32 proximity_offset; /* Location of proximity info struct(s) */ + u32 max_proximity_domains; /* Max number of proximity domains */ + u32 max_clock_domains; /* Max number of clock domains */ + u64 max_address; /* Max physical address in system */ +}; + +/* Subtable - Maximum Proximity Domain Information. Version 1 */ + +struct acpi_msct_proximity { + u8 revision; + u8 length; + u32 range_start; /* Start of domain range */ + u32 range_end; /* End of domain range */ + u32 processor_capacity; + u64 memory_capacity; /* In bytes */ +}; + /******************************************************************************* * * SBST - Smart Battery Specification Table + * Version 1 * ******************************************************************************/ @@ -709,6 +820,7 @@ struct acpi_table_sbst { /******************************************************************************* * * SLIT - System Locality Distance Information Table + * Version 1 * ******************************************************************************/ @@ -721,6 +833,7 @@ struct acpi_table_slit { /******************************************************************************* * * SRAT - System Resource Affinity Table + * Version 3 * ******************************************************************************/ @@ -755,6 +868,10 @@ struct acpi_srat_cpu_affinity { u32 reserved; /* Reserved, must be zero */ }; +/* Flags */ + +#define ACPI_SRAT_CPU_USE_AFFINITY (1) /* 00: Use affinity structure */ + /* 1: Memory Affinity */ struct acpi_srat_mem_affinity { diff --git a/include/acpi/actbl2.h b/include/acpi/actbl2.h index b271aba0e52..6f3dce9991e 100644 --- a/include/acpi/actbl2.h +++ b/include/acpi/actbl2.h @@ -14,8 +14,9 @@ ******************************************************************************/ /* - * Values for description table header signatures. Useful because they make - * it more difficult to inadvertently type in the wrong signature. + * Values for description table header signatures for tables defined in this + * file. Useful because they make it more difficult to inadvertently type in + * the wrong signature. */ #define ACPI_SIG_ASF "ASF!" /* Alert Standard Format table */ #define ACPI_SIG_BOOT "BOOT" /* Simple Boot Flag Table */ @@ -23,12 +24,14 @@ #define ACPI_SIG_DMAR "DMAR" /* DMA Remapping table */ #define ACPI_SIG_HPET "HPET" /* High Precision Event Timer table */ #define ACPI_SIG_IBFT "IBFT" /* i_sCSI Boot Firmware Table */ +#define ACPI_SIG_IVRS "IVRS" /* I/O Virtualization Reporting Structure */ #define ACPI_SIG_MCFG "MCFG" /* PCI Memory Mapped Configuration table */ #define ACPI_SIG_SLIC "SLIC" /* Software Licensing Description Table */ #define ACPI_SIG_SPCR "SPCR" /* Serial Port Console Redirection table */ #define ACPI_SIG_SPMI "SPMI" /* Server Platform Management Interface table */ #define ACPI_SIG_TCPA "TCPA" /* Trusted Computing Platform Alliance table */ #define ACPI_SIG_UEFI "UEFI" /* Uefi Boot Optimization Table */ +#define ACPI_SIG_WAET "WAET" /* Windows ACPI Emulated devices Table */ #define ACPI_SIG_WDAT "WDAT" /* Watchdog Action Table */ #define ACPI_SIG_WDRT "WDRT" /* Watchdog Resource Table */ @@ -47,6 +50,7 @@ /******************************************************************************* * * ASF - Alert Standard Format table (Signature "ASF!") + * Revision 0x10 * * Conforms to the Alert Standard Format Specification V2.0, 23 April 2003 * @@ -91,6 +95,10 @@ struct acpi_asf_info { u8 reserved2[3]; }; +/* Masks for Flags field above */ + +#define ACPI_ASF_SMBUS_PROTOCOLS (1) + /* 1: ASF Alerts */ struct acpi_asf_alert { @@ -156,6 +164,9 @@ struct acpi_asf_address { /******************************************************************************* * * BOOT - Simple Boot Flag Table + * Version 1 + * + * Conforms to the "Simple Boot Flag Specification", Version 2.1 * ******************************************************************************/ @@ -168,6 +179,9 @@ struct acpi_table_boot { /******************************************************************************* * * DBGP - Debug Port table + * Version 1 + * + * Conforms to the "Debug Port Specification", Version 1.00, 2/9/2000 * ******************************************************************************/ @@ -181,7 +195,10 @@ struct acpi_table_dbgp { /******************************************************************************* * * DMAR - DMA Remapping table - * From "Intel Virtualization Technology for Directed I/O", Sept. 2007 + * Version 1 + * + * Conforms to "Intel Virtualization Technology for Directed I/O", + * Version 1.2, Sept. 2008 * ******************************************************************************/ @@ -192,7 +209,7 @@ struct acpi_table_dmar { u8 reserved[10]; }; -/* Flags */ +/* Masks for Flags field above */ #define ACPI_DMAR_INTR_REMAP (1) @@ -209,9 +226,12 @@ enum acpi_dmar_type { ACPI_DMAR_TYPE_HARDWARE_UNIT = 0, ACPI_DMAR_TYPE_RESERVED_MEMORY = 1, ACPI_DMAR_TYPE_ATSR = 2, - ACPI_DMAR_TYPE_RESERVED = 3 /* 3 and greater are reserved */ + ACPI_DMAR_HARDWARE_AFFINITY = 3, + ACPI_DMAR_TYPE_RESERVED = 4 /* 4 and greater are reserved */ }; +/* DMAR Device Scope structure */ + struct acpi_dmar_device_scope { u8 entry_type; u8 length; @@ -250,7 +270,7 @@ struct acpi_dmar_hardware_unit { u64 address; /* Register Base Address */ }; -/* Flags */ +/* Masks for Flags field above */ #define ACPI_DMAR_INCLUDE_ALL (1) @@ -264,7 +284,7 @@ struct acpi_dmar_reserved_memory { u64 end_address; /* 4_k aligned limit address */ }; -/* Flags */ +/* Masks for Flags field above */ #define ACPI_DMAR_ALLOW_ALL (1) @@ -277,13 +297,26 @@ struct acpi_dmar_atsr { u16 segment; }; -/* Flags */ +/* Masks for Flags field above */ #define ACPI_DMAR_ALL_PORTS (1) +/* 3: Remapping Hardware Static Affinity Structure */ + +struct acpi_dmar_rhsa { + struct acpi_dmar_header header; + u32 reserved; + u64 base_address; + u32 proximity_domain; +}; + /******************************************************************************* * * HPET - High Precision Event Timer table + * Version 1 + * + * Conforms to "IA-PC HPET (High Precision Event Timers) Specification", + * Version 1.0a, October 2004 * ******************************************************************************/ @@ -296,17 +329,28 @@ struct acpi_table_hpet { u8 flags; }; -/*! Flags */ +/* Masks for Flags field above */ -#define ACPI_HPET_PAGE_PROTECT (1) /* 00: No page protection */ -#define ACPI_HPET_PAGE_PROTECT_4 (1<<1) /* 01: 4KB page protected */ -#define ACPI_HPET_PAGE_PROTECT_64 (1<<2) /* 02: 64KB page protected */ +#define ACPI_HPET_PAGE_PROTECT_MASK (3) -/*! [End] no source code translation !*/ +/* Values for Page Protect flags */ + +enum acpi_hpet_page_protect { + ACPI_HPET_NO_PAGE_PROTECT = 0, + ACPI_HPET_PAGE_PROTECT4 = 1, + ACPI_HPET_PAGE_PROTECT64 = 2 +}; /******************************************************************************* * * IBFT - Boot Firmware Table + * Version 1 + * + * Conforms to "iSCSI Boot Firmware Table (iBFT) as Defined in ACPI 3.0b + * Specification", Version 1.01, March 1, 2007 + * + * Note: It appears that this table is not intended to appear in the RSDT/XSDT. + * Therefore, it is not currently supported by the disassembler. * ******************************************************************************/ @@ -394,9 +438,184 @@ struct acpi_ibft_target { u16 reverse_chap_secret_offset; }; +/******************************************************************************* + * + * IVRS - I/O Virtualization Reporting Structure + * Version 1 + * + * Conforms to "AMD I/O Virtualization Technology (IOMMU) Specification", + * Revision 1.26, February 2009. + * + ******************************************************************************/ + +struct acpi_table_ivrs { + struct acpi_table_header header; /* Common ACPI table header */ + u32 info; /* Common virtualization info */ + u64 reserved; +}; + +/* Values for Info field above */ + +#define ACPI_IVRS_PHYSICAL_SIZE 0x00007F00 /* 7 bits, physical address size */ +#define ACPI_IVRS_VIRTUAL_SIZE 0x003F8000 /* 7 bits, virtual address size */ +#define ACPI_IVRS_ATS_RESERVED 0x00400000 /* ATS address translation range reserved */ + +/* IVRS subtable header */ + +struct acpi_ivrs_header { + u8 type; /* Subtable type */ + u8 flags; + u16 length; /* Subtable length */ + u16 device_id; /* ID of IOMMU */ +}; + +/* Values for subtable Type above */ + +enum acpi_ivrs_type { + ACPI_IVRS_TYPE_HARDWARE = 0x10, + ACPI_IVRS_TYPE_MEMORY1 = 0x20, + ACPI_IVRS_TYPE_MEMORY2 = 0x21, + ACPI_IVRS_TYPE_MEMORY3 = 0x22 +}; + +/* Masks for Flags field above for IVHD subtable */ + +#define ACPI_IVHD_TT_ENABLE (1) +#define ACPI_IVHD_PASS_PW (1<<1) +#define ACPI_IVHD_RES_PASS_PW (1<<2) +#define ACPI_IVHD_ISOC (1<<3) +#define ACPI_IVHD_IOTLB (1<<4) + +/* Masks for Flags field above for IVMD subtable */ + +#define ACPI_IVMD_UNITY (1) +#define ACPI_IVMD_READ (1<<1) +#define ACPI_IVMD_WRITE (1<<2) +#define ACPI_IVMD_EXCLUSION_RANGE (1<<3) + +/* + * IVRS subtables, correspond to Type in struct acpi_ivrs_header + */ + +/* 0x10: I/O Virtualization Hardware Definition Block (IVHD) */ + +struct acpi_ivrs_hardware { + struct acpi_ivrs_header header; + u16 capability_offset; /* Offset for IOMMU control fields */ + u64 base_address; /* IOMMU control registers */ + u16 pci_segment_group; + u16 info; /* MSI number and unit ID */ + u32 reserved; +}; + +/* Masks for Info field above */ + +#define ACPI_IVHD_MSI_NUMBER_MASK 0x001F /* 5 bits, MSI message number */ +#define ACPI_IVHD_UNIT_ID_MASK 0x1F00 /* 5 bits, unit_iD */ + +/* + * Device Entries for IVHD subtable, appear after struct acpi_ivrs_hardware structure. + * Upper two bits of the Type field are the (encoded) length of the structure. + * Currently, only 4 and 8 byte entries are defined. 16 and 32 byte entries + * are reserved for future use but not defined. + */ +struct acpi_ivrs_de_header { + u8 type; + u16 id; + u8 data_setting; +}; + +/* Length of device entry is in the top two bits of Type field above */ + +#define ACPI_IVHD_ENTRY_LENGTH 0xC0 + +/* Values for device entry Type field above */ + +enum acpi_ivrs_device_entry_type { + /* 4-byte device entries, all use struct acpi_ivrs_device4 */ + + ACPI_IVRS_TYPE_PAD4 = 0, + ACPI_IVRS_TYPE_ALL = 1, + ACPI_IVRS_TYPE_SELECT = 2, + ACPI_IVRS_TYPE_START = 3, + ACPI_IVRS_TYPE_END = 4, + + /* 8-byte device entries */ + + ACPI_IVRS_TYPE_PAD8 = 64, + ACPI_IVRS_TYPE_NOT_USED = 65, + ACPI_IVRS_TYPE_ALIAS_SELECT = 66, /* Uses struct acpi_ivrs_device8a */ + ACPI_IVRS_TYPE_ALIAS_START = 67, /* Uses struct acpi_ivrs_device8a */ + ACPI_IVRS_TYPE_EXT_SELECT = 70, /* Uses struct acpi_ivrs_device8b */ + ACPI_IVRS_TYPE_EXT_START = 71, /* Uses struct acpi_ivrs_device8b */ + ACPI_IVRS_TYPE_SPECIAL = 72 /* Uses struct acpi_ivrs_device8c */ +}; + +/* Values for Data field above */ + +#define ACPI_IVHD_INIT_PASS (1) +#define ACPI_IVHD_EINT_PASS (1<<1) +#define ACPI_IVHD_NMI_PASS (1<<2) +#define ACPI_IVHD_SYSTEM_MGMT (3<<4) +#define ACPI_IVHD_LINT0_PASS (1<<6) +#define ACPI_IVHD_LINT1_PASS (1<<7) + +/* Types 0-4: 4-byte device entry */ + +struct acpi_ivrs_device4 { + struct acpi_ivrs_de_header header; +}; + +/* Types 66-67: 8-byte device entry */ + +struct acpi_ivrs_device8a { + struct acpi_ivrs_de_header header; + u8 reserved1; + u16 used_id; + u8 reserved2; +}; + +/* Types 70-71: 8-byte device entry */ + +struct acpi_ivrs_device8b { + struct acpi_ivrs_de_header header; + u32 extended_data; +}; + +/* Values for extended_data above */ + +#define ACPI_IVHD_ATS_DISABLED (1<<31) + +/* Type 72: 8-byte device entry */ + +struct acpi_ivrs_device8c { + struct acpi_ivrs_de_header header; + u8 handle; + u16 used_id; + u8 variety; +}; + +/* Values for Variety field above */ + +#define ACPI_IVHD_IOAPIC 1 +#define ACPI_IVHD_HPET 2 + +/* 0x20, 0x21, 0x22: I/O Virtualization Memory Definition Block (IVMD) */ + +struct acpi_ivrs_memory { + struct acpi_ivrs_header header; + u16 aux_data; + u64 reserved; + u64 start_address; + u64 memory_length; +}; + /******************************************************************************* * * MCFG - PCI Memory Mapped Configuration table and sub-table + * Version 1 + * + * Conforms to "PCI Firmware Specification", Revision 3.0, June 20, 2005 * ******************************************************************************/ @@ -418,6 +637,10 @@ struct acpi_mcfg_allocation { /******************************************************************************* * * SPCR - Serial Port Console Redirection table + * Version 1 + * + * Conforms to "Serial Port Console Redirection Table", + * Version 1.00, January 11, 2002 * ******************************************************************************/ @@ -445,16 +668,25 @@ struct acpi_table_spcr { u32 reserved2; }; +/* Masks for pci_flags field above */ + +#define ACPI_SPCR_DO_NOT_DISABLE (1) + /******************************************************************************* * * SPMI - Server Platform Management Interface table + * Version 5 + * + * Conforms to "Intelligent Platform Management Interface Specification + * Second Generation v2.0", Document Revision 1.0, February 12, 2004 with + * June 12, 2009 markup. * ******************************************************************************/ struct acpi_table_spmi { struct acpi_table_header header; /* Common ACPI table header */ - u8 reserved; u8 interface_type; + u8 reserved; /* Must be 1 */ u16 spec_revision; /* Version of IPMI */ u8 interrupt_type; u8 gpe_number; /* GPE assigned */ @@ -466,11 +698,27 @@ struct acpi_table_spmi { u8 pci_bus; u8 pci_device; u8 pci_function; + u8 reserved2; +}; + +/* Values for interface_type above */ + +enum acpi_spmi_interface_types { + ACPI_SPMI_NOT_USED = 0, + ACPI_SPMI_KEYBOARD = 1, + ACPI_SPMI_SMI = 2, + ACPI_SPMI_BLOCK_TRANSFER = 3, + ACPI_SPMI_SMBUS = 4, + ACPI_SPMI_RESERVED = 5 /* 5 and above are reserved */ }; /******************************************************************************* * * TCPA - Trusted Computing Platform Alliance table + * Version 1 + * + * Conforms to "TCG PC Specific Implementation Specification", + * Version 1.1, August 18, 2003 * ******************************************************************************/ @@ -484,6 +732,10 @@ struct acpi_table_tcpa { /******************************************************************************* * * UEFI - UEFI Boot optimization Table + * Version 1 + * + * Conforms to "Unified Extensible Firmware Interface Specification", + * Version 2.3, May 8, 2009 * ******************************************************************************/ @@ -491,12 +743,34 @@ struct acpi_table_uefi { struct acpi_table_header header; /* Common ACPI table header */ u8 identifier[16]; /* UUID identifier */ u16 data_offset; /* Offset of remaining data in table */ - u8 data; }; +/******************************************************************************* + * + * WAET - Windows ACPI Emulated devices Table + * Version 1 + * + * Conforms to "Windows ACPI Emulated Devices Table", version 1.0, April 6, 2009 + * + ******************************************************************************/ + +struct acpi_table_waet { + struct acpi_table_header header; /* Common ACPI table header */ + u32 flags; +}; + +/* Masks for Flags field above */ + +#define ACPI_WAET_RTC_NO_ACK (1) /* RTC requires no int acknowledge */ +#define ACPI_WAET_TIMER_ONE_READ (1<<1) /* PM timer requires only one read */ + /******************************************************************************* * * WDAT - Watchdog Action Table + * Version 1 + * + * Conforms to "Hardware Watchdog Timers Design Specification", + * Copyright 2006 Microsoft Corporation. * ******************************************************************************/ @@ -516,10 +790,20 @@ struct acpi_table_wdat { u32 entries; /* Number of watchdog entries that follow */ }; +/* Masks for Flags field above */ + +#define ACPI_WDAT_ENABLED (1) +#define ACPI_WDAT_STOPPED 0x80 + /* WDAT Instruction Entries (actions) */ struct acpi_wdat_entry { - struct acpi_whea_header whea_header; /* Common header for WHEA tables */ + u8 action; + u8 instruction; + u16 reserved; + struct acpi_generic_address register_region; + u32 value; /* Value used with Read/Write register */ + u32 mask; /* Bitmask required for this register instruction */ }; /* Values for Action field above */ @@ -556,28 +840,27 @@ enum acpi_wdat_instructions { /******************************************************************************* * * WDRT - Watchdog Resource Table + * Version 1 + * + * Conforms to "Watchdog Timer Hardware Requirements for Windows Server 2003", + * Version 1.01, August 28, 2006 * ******************************************************************************/ struct acpi_table_wdrt { struct acpi_table_header header; /* Common ACPI table header */ - u32 header_length; /* Watchdog Header Length */ - u8 pci_segment; /* PCI Segment number */ + struct acpi_generic_address control_register; + struct acpi_generic_address count_register; + u16 pci_device_id; + u16 pci_vendor_id; u8 pci_bus; /* PCI Bus number */ u8 pci_device; /* PCI Device number */ u8 pci_function; /* PCI Function number */ - u32 timer_period; /* Period of one timer count (msec) */ - u32 max_count; /* Maximum counter value supported */ - u32 min_count; /* Minimum counter value */ - u8 flags; - u8 reserved[3]; - u32 entries; /* Number of watchdog entries that follow */ + u8 pci_segment; /* PCI Segment number */ + u16 max_count; /* Maximum counter value supported */ + u8 units; }; -/* Flags */ - -#define ACPI_WDRT_TIMER_ENABLED (1) /* 00: Timer enabled */ - /* Reset to default packing */ #pragma pack() -- cgit v1.2.3-70-g09d2 From c276e3884163355464a76e60ed9e272b52b4acc2 Mon Sep 17 00:00:00 2001 From: Bob Moore Date: Mon, 27 Jul 2009 14:55:02 +0800 Subject: ACPICA: Update definitions for HEST table Eliminate duplicated code in disassembler. Shorten identifiers that were too long. Signed-off-by: Bob Moore Signed-off-by: Lin Ming Signed-off-by: Len Brown --- include/acpi/actbl1.h | 24 ++++++++++++------------ 1 file changed, 12 insertions(+), 12 deletions(-) (limited to 'include') diff --git a/include/acpi/actbl1.h b/include/acpi/actbl1.h index 0417f2abc44..34b10c06bcf 100644 --- a/include/acpi/actbl1.h +++ b/include/acpi/actbl1.h @@ -427,17 +427,17 @@ struct acpi_hest_aer_common { u16 reserved1; u8 flags; u8 enabled; - u32 records_to_pre_allocate; + u32 records_to_preallocate; u32 max_sections_per_record; u32 bus; u16 device; u16 function; u16 device_control; u16 reserved2; - u32 uncorrectable_error_mask; - u32 uncorrectable_error_severity; - u32 correctable_error_mask; - u32 advanced_error_capabilities; + u32 uncorrectable_mask; + u32 uncorrectable_severity; + u32 correctable_mask; + u32 advanced_capabilities; }; /* Masks for HEST Flags fields */ @@ -490,7 +490,7 @@ struct acpi_hest_ia_machine_check { u16 reserved1; u8 flags; u8 enabled; - u32 records_to_pre_allocate; + u32 records_to_preallocate; u32 max_sections_per_record; u64 global_capability_data; u64 global_control_data; @@ -505,7 +505,7 @@ struct acpi_table_hest_ia_corrected { u16 reserved1; u8 flags; u8 enabled; - u32 records_to_pre_allocate; + u32 records_to_preallocate; u32 max_sections_per_record; struct acpi_hest_notify notify; u8 num_hardware_banks; @@ -517,7 +517,7 @@ struct acpi_table_hest_ia_corrected { struct acpi_hest_ia_nmi { struct acpi_hest_header header; u32 reserved; - u32 records_to_pre_allocate; + u32 records_to_preallocate; u32 max_sections_per_record; u32 max_raw_data_length; }; @@ -544,9 +544,9 @@ struct acpi_hest_aer { struct acpi_hest_aer_bridge { struct acpi_hest_header header; struct acpi_hest_aer_common aer; - u32 second_uncorrectable_error_mask; - u32 second_uncorrectable_error_severity; - u32 second_advanced_capabilities; + u32 uncorrectable_mask2; + u32 uncorrectable_severity2; + u32 advanced_capabilities2; }; /* 9: Generic Hardware Error Source */ @@ -556,7 +556,7 @@ struct acpi_hest_generic { u16 related_source_id; u8 reserved; u8 enabled; - u32 records_to_pre_allocate; + u32 records_to_preallocate; u32 max_sections_per_record; u32 max_raw_data_length; struct acpi_generic_address error_status_address; -- cgit v1.2.3-70-g09d2 From 1872bbc94b2d092ece22a8fbf1c3e81f0fba0052 Mon Sep 17 00:00:00 2001 From: Bob Moore Date: Thu, 13 Aug 2009 13:31:00 +0800 Subject: ACPICA: Fix typo for HEST ACPI table Problem with the name of one of the subtables. Signed-off-by: Bob Moore Signed-off-by: Lin Ming Signed-off-by: Len Brown --- include/acpi/actbl1.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include') diff --git a/include/acpi/actbl1.h b/include/acpi/actbl1.h index 34b10c06bcf..0b9b430b092 100644 --- a/include/acpi/actbl1.h +++ b/include/acpi/actbl1.h @@ -500,7 +500,7 @@ struct acpi_hest_ia_machine_check { /* 1: IA32 Corrected Machine Check */ -struct acpi_table_hest_ia_corrected { +struct acpi_hest_ia_corrected { struct acpi_hest_header header; u16 reserved1; u8 flags; -- cgit v1.2.3-70-g09d2 From 49ae80c9944401222e47108883c486b5a5a24006 Mon Sep 17 00:00:00 2001 From: Bob Moore Date: Thu, 13 Aug 2009 13:43:12 +0800 Subject: ACPICA: Update version to 20090730 Version 20090730. Signed-off-by: Bob Moore Signed-off-by: Lin Ming Signed-off-by: Len Brown --- include/acpi/acpixf.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include') diff --git a/include/acpi/acpixf.h b/include/acpi/acpixf.h index 063e577e791..f3b358b7432 100644 --- a/include/acpi/acpixf.h +++ b/include/acpi/acpixf.h @@ -47,7 +47,7 @@ /* Current ACPICA subsystem version in YYYYMMDD format */ -#define ACPI_CA_VERSION 0x20090625 +#define ACPI_CA_VERSION 0x20090730 #include "actypes.h" #include "actbl.h" -- cgit v1.2.3-70-g09d2 From a192a9580bcc41692be1f36b77c3b681827f566a Mon Sep 17 00:00:00 2001 From: Len Brown Date: Tue, 28 Jul 2009 16:45:54 -0400 Subject: ACPI: Move definition of PREFIX from acpi_bus.h to internal..h Linux/ACPI core files using internal.h all PREFIX "ACPI: ", however, not all ACPI drivers use/want it -- and they should not have to #undef PREFIX to define their own. Add GPL commment to internal.h while we are there. This does not change any actual console output, asside from a whitespace fix. Signed-off-by: Len Brown --- arch/x86/pci/mmconfig-shared.c | 2 ++ drivers/acpi/ac.c | 2 ++ drivers/acpi/battery.c | 2 ++ drivers/acpi/blacklist.c | 2 ++ drivers/acpi/button.c | 2 ++ drivers/acpi/cm_sbs.c | 2 ++ drivers/acpi/container.c | 2 ++ drivers/acpi/dock.c | 2 ++ drivers/acpi/ec.c | 1 - drivers/acpi/event.c | 2 ++ drivers/acpi/fan.c | 2 ++ drivers/acpi/glue.c | 2 ++ drivers/acpi/internal.h | 22 +++++++++++++++++++++- drivers/acpi/numa.c | 2 ++ drivers/acpi/pci_irq.c | 2 ++ drivers/acpi/pci_link.c | 2 ++ drivers/acpi/pci_root.c | 2 ++ drivers/acpi/power.c | 2 ++ drivers/acpi/processor_core.c | 2 ++ drivers/acpi/processor_idle.c | 2 ++ drivers/acpi/processor_perflib.c | 2 ++ drivers/acpi/processor_thermal.c | 2 ++ drivers/acpi/processor_throttling.c | 2 ++ drivers/acpi/sbs.c | 2 ++ drivers/acpi/sbshc.c | 2 ++ drivers/acpi/system.c | 2 ++ drivers/acpi/thermal.c | 2 ++ drivers/acpi/utils.c | 2 ++ drivers/acpi/video.c | 2 ++ drivers/acpi/video_detect.c | 2 ++ drivers/pci/dmar.c | 3 +-- drivers/platform/x86/fujitsu-laptop.c | 4 ++-- drivers/platform/x86/wmi.c | 1 - include/acpi/acpi_bus.h | 2 -- 34 files changed, 80 insertions(+), 9 deletions(-) (limited to 'include') diff --git a/arch/x86/pci/mmconfig-shared.c b/arch/x86/pci/mmconfig-shared.c index 712443ec6d4..81d3466765c 100644 --- a/arch/x86/pci/mmconfig-shared.c +++ b/arch/x86/pci/mmconfig-shared.c @@ -18,6 +18,8 @@ #include #include +#define PREFIX "ACPI: " + /* aperture is up to 256MB but BIOS may reserve less */ #define MMCONFIG_APER_MIN (2 * 1024*1024) #define MMCONFIG_APER_MAX (256 * 1024*1024) diff --git a/drivers/acpi/ac.c b/drivers/acpi/ac.c index 0df8fcb687d..98b9690b015 100644 --- a/drivers/acpi/ac.c +++ b/drivers/acpi/ac.c @@ -37,6 +37,8 @@ #include #include +#define PREFIX "ACPI: " + #define ACPI_AC_CLASS "ac_adapter" #define ACPI_AC_DEVICE_NAME "AC Adapter" #define ACPI_AC_FILE_STATE "state" diff --git a/drivers/acpi/battery.c b/drivers/acpi/battery.c index 58b4517ce71..f8c3d1bb696 100644 --- a/drivers/acpi/battery.c +++ b/drivers/acpi/battery.c @@ -45,6 +45,8 @@ #include #endif +#define PREFIX "ACPI: " + #define ACPI_BATTERY_VALUE_UNKNOWN 0xFFFFFFFF #define ACPI_BATTERY_CLASS "battery" diff --git a/drivers/acpi/blacklist.c b/drivers/acpi/blacklist.c index f6baa77deef..19152ea2b10 100644 --- a/drivers/acpi/blacklist.c +++ b/drivers/acpi/blacklist.c @@ -34,6 +34,8 @@ #include #include +#include "internal.h" + enum acpi_blacklist_predicates { all_versions, less_than_or_equal, diff --git a/drivers/acpi/button.c b/drivers/acpi/button.c index 9195deba9d9..d295bdccc09 100644 --- a/drivers/acpi/button.c +++ b/drivers/acpi/button.c @@ -33,6 +33,8 @@ #include #include +#define PREFIX "ACPI: " + #define ACPI_BUTTON_CLASS "button" #define ACPI_BUTTON_FILE_INFO "info" #define ACPI_BUTTON_FILE_STATE "state" diff --git a/drivers/acpi/cm_sbs.c b/drivers/acpi/cm_sbs.c index 332fe4b2170..6c9ee68e46f 100644 --- a/drivers/acpi/cm_sbs.c +++ b/drivers/acpi/cm_sbs.c @@ -28,6 +28,8 @@ #include #include +#define PREFIX "ACPI: " + ACPI_MODULE_NAME("cm_sbs"); #define ACPI_AC_CLASS "ac_adapter" #define ACPI_BATTERY_CLASS "battery" diff --git a/drivers/acpi/container.c b/drivers/acpi/container.c index fe0cdf83641..5f2c3c00a31 100644 --- a/drivers/acpi/container.c +++ b/drivers/acpi/container.c @@ -35,6 +35,8 @@ #include #include +#define PREFIX "ACPI: " + #define ACPI_CONTAINER_DEVICE_NAME "ACPI container device" #define ACPI_CONTAINER_CLASS "container" diff --git a/drivers/acpi/dock.c b/drivers/acpi/dock.c index efb959d6c8a..9a855669ff1 100644 --- a/drivers/acpi/dock.c +++ b/drivers/acpi/dock.c @@ -33,6 +33,8 @@ #include #include +#define PREFIX "ACPI: " + #define ACPI_DOCK_DRIVER_DESCRIPTION "ACPI Dock Station Driver" ACPI_MODULE_NAME("dock"); diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c index 391f331674c..5180f0f1dd0 100644 --- a/drivers/acpi/ec.c +++ b/drivers/acpi/ec.c @@ -47,7 +47,6 @@ #define ACPI_EC_DEVICE_NAME "Embedded Controller" #define ACPI_EC_FILE_INFO "info" -#undef PREFIX #define PREFIX "ACPI: EC: " /* EC status register */ diff --git a/drivers/acpi/event.c b/drivers/acpi/event.c index aeb7e5fb4a0..c511071bfd7 100644 --- a/drivers/acpi/event.c +++ b/drivers/acpi/event.c @@ -14,6 +14,8 @@ #include #include +#include "internal.h" + #define _COMPONENT ACPI_SYSTEM_COMPONENT ACPI_MODULE_NAME("event"); diff --git a/drivers/acpi/fan.c b/drivers/acpi/fan.c index 53698ea0837..f419849a0d3 100644 --- a/drivers/acpi/fan.c +++ b/drivers/acpi/fan.c @@ -34,6 +34,8 @@ #include #include +#define PREFIX "ACPI: " + #define ACPI_FAN_CLASS "fan" #define ACPI_FAN_FILE_STATE "state" diff --git a/drivers/acpi/glue.c b/drivers/acpi/glue.c index a8a5c29958c..dc36a448de4 100644 --- a/drivers/acpi/glue.c +++ b/drivers/acpi/glue.c @@ -12,6 +12,8 @@ #include #include +#include "internal.h" + #define ACPI_GLUE_DEBUG 0 #if ACPI_GLUE_DEBUG #define DBG(x...) printk(PREFIX x) diff --git a/drivers/acpi/internal.h b/drivers/acpi/internal.h index 11a69b53004..074cf8682d5 100644 --- a/drivers/acpi/internal.h +++ b/drivers/acpi/internal.h @@ -1,4 +1,24 @@ -/* For use by Linux/ACPI infrastructure, not drivers */ +/* + * acpi/internal.h + * For use by Linux/ACPI infrastructure, not drivers + * + * Copyright (c) 2009, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + */ + +#define PREFIX "ACPI: " int init_acpi_device_notify(void); int acpi_scan_init(void); diff --git a/drivers/acpi/numa.c b/drivers/acpi/numa.c index d440ccd27d9..202dd0c976a 100644 --- a/drivers/acpi/numa.c +++ b/drivers/acpi/numa.c @@ -30,6 +30,8 @@ #include #include +#define PREFIX "ACPI: " + #define ACPI_NUMA 0x80000000 #define _COMPONENT ACPI_NUMA ACPI_MODULE_NAME("numa"); diff --git a/drivers/acpi/pci_irq.c b/drivers/acpi/pci_irq.c index b794eb88ab9..843699ed93f 100644 --- a/drivers/acpi/pci_irq.c +++ b/drivers/acpi/pci_irq.c @@ -40,6 +40,8 @@ #include #include +#define PREFIX "ACPI: " + #define _COMPONENT ACPI_PCI_COMPONENT ACPI_MODULE_NAME("pci_irq"); diff --git a/drivers/acpi/pci_link.c b/drivers/acpi/pci_link.c index 16e0f9d3d17..394ae89409c 100644 --- a/drivers/acpi/pci_link.c +++ b/drivers/acpi/pci_link.c @@ -43,6 +43,8 @@ #include #include +#define PREFIX "ACPI: " + #define _COMPONENT ACPI_PCI_COMPONENT ACPI_MODULE_NAME("pci_link"); #define ACPI_PCI_LINK_CLASS "pci_irq_routing" diff --git a/drivers/acpi/pci_root.c b/drivers/acpi/pci_root.c index 55b5b90c2a4..dee916707a7 100644 --- a/drivers/acpi/pci_root.c +++ b/drivers/acpi/pci_root.c @@ -36,6 +36,8 @@ #include #include +#define PREFIX "ACPI: " + #define _COMPONENT ACPI_PCI_COMPONENT ACPI_MODULE_NAME("pci_root"); #define ACPI_PCI_ROOT_CLASS "pci_bridge" diff --git a/drivers/acpi/power.c b/drivers/acpi/power.c index d74365d4a6e..e86603f37de 100644 --- a/drivers/acpi/power.c +++ b/drivers/acpi/power.c @@ -44,6 +44,8 @@ #include #include +#define PREFIX "ACPI: " + #define _COMPONENT ACPI_POWER_COMPONENT ACPI_MODULE_NAME("power"); #define ACPI_POWER_CLASS "power_resource" diff --git a/drivers/acpi/processor_core.c b/drivers/acpi/processor_core.c index 2cc4b303387..b4a1ab297e7 100644 --- a/drivers/acpi/processor_core.c +++ b/drivers/acpi/processor_core.c @@ -59,6 +59,8 @@ #include #include +#define PREFIX "ACPI: " + #define ACPI_PROCESSOR_CLASS "processor" #define ACPI_PROCESSOR_DEVICE_NAME "Processor" #define ACPI_PROCESSOR_FILE_INFO "info" diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c index 66393d5c4c7..22aab1fc9b4 100644 --- a/drivers/acpi/processor_idle.c +++ b/drivers/acpi/processor_idle.c @@ -60,6 +60,8 @@ #include #include +#define PREFIX "ACPI: " + #define ACPI_PROCESSOR_CLASS "processor" #define _COMPONENT ACPI_PROCESSOR_COMPONENT ACPI_MODULE_NAME("processor_idle"); diff --git a/drivers/acpi/processor_perflib.c b/drivers/acpi/processor_perflib.c index 60e543d3234..11088cf1031 100644 --- a/drivers/acpi/processor_perflib.c +++ b/drivers/acpi/processor_perflib.c @@ -39,6 +39,8 @@ #include #include +#define PREFIX "ACPI: " + #define ACPI_PROCESSOR_CLASS "processor" #define ACPI_PROCESSOR_FILE_PERFORMANCE "performance" #define _COMPONENT ACPI_PROCESSOR_COMPONENT diff --git a/drivers/acpi/processor_thermal.c b/drivers/acpi/processor_thermal.c index 31adda1099e..3e3181c0efc 100644 --- a/drivers/acpi/processor_thermal.c +++ b/drivers/acpi/processor_thermal.c @@ -40,6 +40,8 @@ #include #include +#define PREFIX "ACPI: " + #define ACPI_PROCESSOR_CLASS "processor" #define _COMPONENT ACPI_PROCESSOR_COMPONENT ACPI_MODULE_NAME("processor_thermal"); diff --git a/drivers/acpi/processor_throttling.c b/drivers/acpi/processor_throttling.c index ae39797aab5..b366b9c13d4 100644 --- a/drivers/acpi/processor_throttling.c +++ b/drivers/acpi/processor_throttling.c @@ -41,6 +41,8 @@ #include #include +#define PREFIX "ACPI: " + #define ACPI_PROCESSOR_CLASS "processor" #define _COMPONENT ACPI_PROCESSOR_COMPONENT ACPI_MODULE_NAME("processor_throttling"); diff --git a/drivers/acpi/sbs.c b/drivers/acpi/sbs.c index 4b214b74eba..52b9db8afc2 100644 --- a/drivers/acpi/sbs.c +++ b/drivers/acpi/sbs.c @@ -46,6 +46,8 @@ #include "sbshc.h" +#define PREFIX "ACPI: " + #define ACPI_SBS_CLASS "sbs" #define ACPI_AC_CLASS "ac_adapter" #define ACPI_BATTERY_CLASS "battery" diff --git a/drivers/acpi/sbshc.c b/drivers/acpi/sbshc.c index 0619734895b..d9339806df4 100644 --- a/drivers/acpi/sbshc.c +++ b/drivers/acpi/sbshc.c @@ -15,6 +15,8 @@ #include #include "sbshc.h" +#define PREFIX "ACPI: " + #define ACPI_SMB_HC_CLASS "smbus_host_controller" #define ACPI_SMB_HC_DEVICE_NAME "ACPI SMBus HC" diff --git a/drivers/acpi/system.c b/drivers/acpi/system.c index 9c61ab2177c..d11282975f3 100644 --- a/drivers/acpi/system.c +++ b/drivers/acpi/system.c @@ -31,6 +31,8 @@ #include +#define PREFIX "ACPI: " + #define _COMPONENT ACPI_SYSTEM_COMPONENT ACPI_MODULE_NAME("system"); diff --git a/drivers/acpi/thermal.c b/drivers/acpi/thermal.c index 564ea142428..65f67815902 100644 --- a/drivers/acpi/thermal.c +++ b/drivers/acpi/thermal.c @@ -47,6 +47,8 @@ #include #include +#define PREFIX "ACPI: " + #define ACPI_THERMAL_CLASS "thermal_zone" #define ACPI_THERMAL_DEVICE_NAME "Thermal Zone" #define ACPI_THERMAL_FILE_STATE "state" diff --git a/drivers/acpi/utils.c b/drivers/acpi/utils.c index f844941089b..811fec10462 100644 --- a/drivers/acpi/utils.c +++ b/drivers/acpi/utils.c @@ -30,6 +30,8 @@ #include #include +#include "internal.h" + #define _COMPONENT ACPI_BUS_COMPONENT ACPI_MODULE_NAME("utils"); diff --git a/drivers/acpi/video.c b/drivers/acpi/video.c index 8851315ce85..a0fa3946b50 100644 --- a/drivers/acpi/video.c +++ b/drivers/acpi/video.c @@ -44,6 +44,8 @@ #include #include +#define PREFIX "ACPI: " + #define ACPI_VIDEO_CLASS "video" #define ACPI_VIDEO_BUS_NAME "Video Bus" #define ACPI_VIDEO_DEVICE_NAME "Video Device" diff --git a/drivers/acpi/video_detect.c b/drivers/acpi/video_detect.c index 7cd2b63435e..7032f25da9b 100644 --- a/drivers/acpi/video_detect.c +++ b/drivers/acpi/video_detect.c @@ -38,6 +38,8 @@ #include #include +#define PREFIX "ACPI: " + ACPI_MODULE_NAME("video"); #define _COMPONENT ACPI_VIDEO_COMPONENT diff --git a/drivers/pci/dmar.c b/drivers/pci/dmar.c index 7b287cb38b7..998f02d2ba4 100644 --- a/drivers/pci/dmar.c +++ b/drivers/pci/dmar.c @@ -34,8 +34,7 @@ #include #include -#undef PREFIX -#define PREFIX "DMAR:" +#define PREFIX "DMAR: " /* No locks are needed as DMA remapping hardware unit * list is constructed at boot time and hotplug of diff --git a/drivers/platform/x86/fujitsu-laptop.c b/drivers/platform/x86/fujitsu-laptop.c index 218b9a16ac3..eabddc9c192 100644 --- a/drivers/platform/x86/fujitsu-laptop.c +++ b/drivers/platform/x86/fujitsu-laptop.c @@ -700,7 +700,7 @@ static int acpi_fujitsu_add(struct acpi_device *device) goto end; } - printk(KERN_INFO PREFIX "%s [%s] (%s)\n", + printk(KERN_INFO "ACPI: %s [%s] (%s)\n", acpi_device_name(device), acpi_device_bid(device), !device->power.state ? "on" : "off"); @@ -874,7 +874,7 @@ static int acpi_fujitsu_hotkey_add(struct acpi_device *device) goto end; } - printk(KERN_INFO PREFIX "%s [%s] (%s)\n", + printk(KERN_INFO "ACPI: %s [%s] (%s)\n", acpi_device_name(device), acpi_device_bid(device), !device->power.state ? "on" : "off"); diff --git a/drivers/platform/x86/wmi.c b/drivers/platform/x86/wmi.c index f215a591919..177f8d767df 100644 --- a/drivers/platform/x86/wmi.c +++ b/drivers/platform/x86/wmi.c @@ -42,7 +42,6 @@ MODULE_LICENSE("GPL"); #define ACPI_WMI_CLASS "wmi" -#undef PREFIX #define PREFIX "ACPI: WMI: " static DEFINE_MUTEX(wmi_data_lock); diff --git a/include/acpi/acpi_bus.h b/include/acpi/acpi_bus.h index c65e4ce6c3a..f485107ddc4 100644 --- a/include/acpi/acpi_bus.h +++ b/include/acpi/acpi_bus.h @@ -30,8 +30,6 @@ #include -#define PREFIX "ACPI: " - /* TBD: Make dynamic */ #define ACPI_MAX_HANDLES 10 struct acpi_handle_list { -- cgit v1.2.3-70-g09d2 From e55a5999ffcf72dc4d43d73618957964cb87065a Mon Sep 17 00:00:00 2001 From: Feng Tang Date: Tue, 28 Jul 2009 17:41:53 +0800 Subject: ACPI: Handle CONFIG_ACPI=n better from linux/acpi.h linux/acpi.h is the top level header for interfacing with the ACPI sub-system, so acpi_disabled should be up there instead of down in asm/acpi.h -- particularly since asm/acpi.h doesn't exist for all architectures. Same story for acpi_table_parse(), which is a top-level API to Linux/ACPI. This is necessary for building some code that used to always depend on CONFIG_ACPI=y, but will soon also need to build with CONFIG_ACPI=n. Signed-off-by: Feng Tang Signed-off-by: Len Brown --- arch/x86/include/asm/acpi.h | 1 - include/linux/acpi.h | 11 ++++++++++- 2 files changed, 10 insertions(+), 2 deletions(-) (limited to 'include') diff --git a/arch/x86/include/asm/acpi.h b/arch/x86/include/asm/acpi.h index 20d1465a2ab..4518dc50090 100644 --- a/arch/x86/include/asm/acpi.h +++ b/arch/x86/include/asm/acpi.h @@ -144,7 +144,6 @@ static inline unsigned int acpi_processor_cstate_check(unsigned int max_cstate) #else /* !CONFIG_ACPI */ -#define acpi_disabled 1 #define acpi_lapic 0 #define acpi_ioapic 0 static inline void acpi_noirq_set(void) { } diff --git a/include/linux/acpi.h b/include/linux/acpi.h index 34321cfffea..3fce811bf9a 100644 --- a/include/linux/acpi.h +++ b/include/linux/acpi.h @@ -292,7 +292,10 @@ void __init acpi_s4_no_nvs(void); extern acpi_status acpi_pci_osc_control_set(acpi_handle handle, u32 flags); extern void acpi_early_init(void); -#else /* CONFIG_ACPI */ +#else /* !CONFIG_ACPI */ + +#define acpi_disabled 1 + static inline void acpi_early_init(void) { } static inline int early_acpi_boot_init(void) @@ -331,5 +334,11 @@ static inline int acpi_check_mem_region(resource_size_t start, return 0; } +struct acpi_table_header; +static inline int acpi_table_parse(char *id, + int (*handler)(struct acpi_table_header *)) +{ + return -1; +} #endif /* !CONFIG_ACPI */ #endif /*_LINUX_ACPI_H*/ -- cgit v1.2.3-70-g09d2 From 117a9ac777f8034d4675b821172d2ff71f6ec47a Mon Sep 17 00:00:00 2001 From: Feng Tang Date: Fri, 14 Aug 2009 15:10:24 -0400 Subject: SFI: create linux/sfi.h include/linux/include/sfi.h defines everything that customers of SFI need to know in order to use the SFI suport in the kernel. The primary API is sfi_table_parse(), where a driver or another part of the kernel can supply a handler to parse the named table. sfi.h also includes the currently defined table signatures and table formats. Signed-off-by: Feng Tang Signed-off-by: Len Brown --- include/linux/sfi.h | 206 ++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 206 insertions(+) create mode 100644 include/linux/sfi.h (limited to 'include') diff --git a/include/linux/sfi.h b/include/linux/sfi.h new file mode 100644 index 00000000000..9a6f7607174 --- /dev/null +++ b/include/linux/sfi.h @@ -0,0 +1,206 @@ +/* sfi.h Simple Firmware Interface */ + +/* + + This file is provided under a dual BSD/GPLv2 license. When using or + redistributing this file, you may do so under either license. + + GPL LICENSE SUMMARY + + Copyright(c) 2009 Intel Corporation. All rights reserved. + + This program is free software; you can redistribute it and/or modify + it under the terms of version 2 of the GNU General Public License as + published by the Free Software Foundation. + + This program is distributed in the hope that it will be useful, but + WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + The full GNU General Public License is included in this distribution + in the file called LICENSE.GPL. + + BSD LICENSE + + Copyright(c) 2009 Intel Corporation. All rights reserved. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions + are met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + * Neither the name of Intel Corporation nor the names of its + contributors may be used to endorse or promote products derived + from this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +*/ + +#ifndef _LINUX_SFI_H +#define _LINUX_SFI_H + +/* Table signatures reserved by the SFI specification */ +#define SFI_SIG_SYST "SYST" +#define SFI_SIG_FREQ "FREQ" +#define SFI_SIG_IDLE "IDLE" +#define SFI_SIG_CPUS "CPUS" +#define SFI_SIG_MTMR "MTMR" +#define SFI_SIG_MRTC "MRTC" +#define SFI_SIG_MMAP "MMAP" +#define SFI_SIG_APIC "APIC" +#define SFI_SIG_XSDT "XSDT" +#define SFI_SIG_WAKE "WAKE" +#define SFI_SIG_SPIB "SPIB" +#define SFI_SIG_I2CB "I2CB" +#define SFI_SIG_GPEM "GPEM" + +#define SFI_SIGNATURE_SIZE 4 +#define SFI_OEM_ID_SIZE 6 +#define SFI_OEM_TABLE_ID_SIZE 8 + +#define SFI_SYST_SEARCH_BEGIN 0x000E0000 +#define SFI_SYST_SEARCH_END 0x000FFFFF + +#define SFI_GET_NUM_ENTRIES(ptable, entry_type) \ + ((ptable->header.len - sizeof(struct sfi_table_header)) / \ + (sizeof(entry_type))) +/* + * Table structures must be byte-packed to match the SFI specification, + * as they are provided by the BIOS. + */ +struct sfi_table_header { + char sig[SFI_SIGNATURE_SIZE]; + u32 len; + u8 rev; + u8 csum; + char oem_id[SFI_OEM_ID_SIZE]; + char oem_table_id[SFI_OEM_TABLE_ID_SIZE]; +} __packed; + +struct sfi_table_simple { + struct sfi_table_header header; + u64 pentry[1]; +} __packed; + +/* Comply with UEFI spec 2.1 */ +struct sfi_mem_entry { + u32 type; + u64 phys_start; + u64 virt_start; + u64 pages; + u64 attrib; +} __packed; + +struct sfi_cpu_table_entry { + u32 apic_id; +} __packed; + +struct sfi_cstate_table_entry { + u32 hint; /* MWAIT hint */ + u32 latency; /* latency in ms */ +} __packed; + +struct sfi_apic_table_entry { + u64 phys_addr; /* phy base addr for APIC reg */ +} __packed; + +struct sfi_freq_table_entry { + u32 freq_mhz; /* in MHZ */ + u32 latency; /* transition latency in ms */ + u32 ctrl_val; /* value to write to PERF_CTL */ +} __packed; + +struct sfi_wake_table_entry { + u64 phys_addr; /* pointer to where the wake vector locates */ +} __packed; + +struct sfi_timer_table_entry { + u64 phys_addr; /* phy base addr for the timer */ + u32 freq_hz; /* in HZ */ + u32 irq; +} __packed; + +struct sfi_rtc_table_entry { + u64 phys_addr; /* phy base addr for the RTC */ + u32 irq; +} __packed; + +struct sfi_spi_table_entry { + u16 host_num; /* attached to host 0, 1...*/ + u16 cs; /* chip select */ + u16 irq_info; + char name[16]; + u8 dev_info[10]; +} __packed; + +struct sfi_i2c_table_entry { + u16 host_num; + u16 addr; /* slave addr */ + u16 irq_info; + char name[16]; + u8 dev_info[10]; +} __packed; + +struct sfi_gpe_table_entry { + u16 logical_id; /* logical id */ + u16 phys_id; /* physical GPE id */ +} __packed; + + +typedef int (*sfi_table_handler) (struct sfi_table_header *table); + +#ifdef CONFIG_SFI +extern void __init sfi_init(void); +extern int __init sfi_platform_init(void); +extern void __init sfi_init_late(void); +extern int sfi_table_parse(char *signature, char *oem_id, char *oem_table_id, + sfi_table_handler handler); + +extern int sfi_disabled; +static inline void disable_sfi(void) +{ + sfi_disabled = 1; +} + +#else /* !CONFIG_SFI */ + +static inline void sfi_init(void) +{ +} + +static inline void sfi_init_late(void) +{ +} + +#define sfi_disabled 0 + +static inline int sfi_table_parse(char *signature, char *oem_id, + char *oem_table_id, + sfi_table_handler handler) +{ + return -1; +} + +#endif /* !CONFIG_SFI */ + +#endif /*_LINUX_SFI_H*/ -- cgit v1.2.3-70-g09d2 From 13e82d023c4c3f13ab1e665cbb917a7ebba8935c Mon Sep 17 00:00:00 2001 From: Feng Tang Date: Fri, 14 Aug 2009 15:17:53 -0400 Subject: SFI: add capability to parse ACPI tables Extend SFI to access standard ACPI tables. (eg. the PCI MCFG) using sfi_acpi_table_parse(). Note that this is _not_ a hybrid ACPI + SFI mode. The platform boots in either ACPI mode or SFI mode. SFI runs only with acpi_disabled=1, which can be set at build-time via CONFIG_ACPI=n, or at boot time by the failure to find ACPI platform support. So this extension simply allows SFI-platforms to re-use existing standard table formats that happen to be defined to live in ACPI envelopes. Signed-off-by: Feng Tang Signed-off-by: Len Brown --- drivers/sfi/sfi_acpi.c | 175 +++++++++++++++++++++++++++++++++++++++++++++++ include/linux/sfi_acpi.h | 93 +++++++++++++++++++++++++ 2 files changed, 268 insertions(+) create mode 100644 drivers/sfi/sfi_acpi.c create mode 100644 include/linux/sfi_acpi.h (limited to 'include') diff --git a/drivers/sfi/sfi_acpi.c b/drivers/sfi/sfi_acpi.c new file mode 100644 index 00000000000..34aba30eb84 --- /dev/null +++ b/drivers/sfi/sfi_acpi.c @@ -0,0 +1,175 @@ +/* sfi_acpi.c Simple Firmware Interface - ACPI extensions */ + +/* + + This file is provided under a dual BSD/GPLv2 license. When using or + redistributing this file, you may do so under either license. + + GPL LICENSE SUMMARY + + Copyright(c) 2009 Intel Corporation. All rights reserved. + + This program is free software; you can redistribute it and/or modify + it under the terms of version 2 of the GNU General Public License as + published by the Free Software Foundation. + + This program is distributed in the hope that it will be useful, but + WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + The full GNU General Public License is included in this distribution + in the file called LICENSE.GPL. + + BSD LICENSE + + Copyright(c) 2009 Intel Corporation. All rights reserved. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions + are met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + * Neither the name of Intel Corporation nor the names of its + contributors may be used to endorse or promote products derived + from this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +*/ + +#define KMSG_COMPONENT "SFI" +#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt + +#include +#include + +#include +#include "sfi_core.h" + +/* + * SFI can access ACPI-defined tables via an optional ACPI XSDT. + * + * This allows re-use, and avoids re-definition, of standard tables. + * For example, the "MCFG" table is defined by PCI, reserved by ACPI, + * and is expected to be present many SFI-only systems. + */ + +static struct acpi_table_xsdt *xsdt_va __read_mostly; + +#define XSDT_GET_NUM_ENTRIES(ptable, entry_type) \ + ((ptable->header.length - sizeof(struct acpi_table_header)) / \ + (sizeof(entry_type))) + +static inline struct sfi_table_header *acpi_to_sfi_th( + struct acpi_table_header *th) +{ + return (struct sfi_table_header *)th; +} + +static inline struct acpi_table_header *sfi_to_acpi_th( + struct sfi_table_header *th) +{ + return (struct acpi_table_header *)th; +} + +/* + * sfi_acpi_parse_xsdt() + * + * Parse the ACPI XSDT for later access by sfi_acpi_table_parse(). + */ +static int __init sfi_acpi_parse_xsdt(struct sfi_table_header *th) +{ + struct sfi_table_key key = SFI_ANY_KEY; + int tbl_cnt, i; + void *ret; + + xsdt_va = (struct acpi_table_xsdt *)th; + tbl_cnt = XSDT_GET_NUM_ENTRIES(xsdt_va, u64); + for (i = 0; i < tbl_cnt; i++) { + ret = sfi_check_table(xsdt_va->table_offset_entry[i], &key); + if (IS_ERR(ret)) { + disable_sfi(); + return -1; + } + } + + return 0; +} + +int __init sfi_acpi_init(void) +{ + struct sfi_table_key xsdt_key = { .sig = SFI_SIG_XSDT }; + + sfi_table_parse(SFI_SIG_XSDT, NULL, NULL, sfi_acpi_parse_xsdt); + + /* Only call the get_table to keep the table mapped */ + xsdt_va = (struct acpi_table_xsdt *)sfi_get_table(&xsdt_key); + return 0; +} + +static struct acpi_table_header *sfi_acpi_get_table(struct sfi_table_key *key) +{ + u32 tbl_cnt, i; + void *ret; + + tbl_cnt = XSDT_GET_NUM_ENTRIES(xsdt_va, u64); + for (i = 0; i < tbl_cnt; i++) { + ret = sfi_check_table(xsdt_va->table_offset_entry[i], key); + if (!IS_ERR(ret) && ret) + return sfi_to_acpi_th(ret); + } + + return NULL; +} + +static void sfi_acpi_put_table(struct acpi_table_header *table) +{ + sfi_put_table(acpi_to_sfi_th(table)); +} + +/* + * sfi_acpi_table_parse() + * + * Find specified table in XSDT, run handler on it and return its return value + */ +int sfi_acpi_table_parse(char *signature, char *oem_id, char *oem_table_id, + int(*handler)(struct acpi_table_header *)) +{ + struct acpi_table_header *table = NULL; + struct sfi_table_key key; + int ret = 0; + + if (sfi_disabled) + return -1; + + key.sig = signature; + key.oem_id = oem_id; + key.oem_table_id = oem_table_id; + + table = sfi_acpi_get_table(&key); + if (!table) + return -EINVAL; + + ret = handler(table); + sfi_acpi_put_table(table); + return ret; +} diff --git a/include/linux/sfi_acpi.h b/include/linux/sfi_acpi.h new file mode 100644 index 00000000000..c4a5a8cd446 --- /dev/null +++ b/include/linux/sfi_acpi.h @@ -0,0 +1,93 @@ +/* sfi.h Simple Firmware Interface */ + +/* + + This file is provided under a dual BSD/GPLv2 license. When using or + redistributing this file, you may do so under either license. + + GPL LICENSE SUMMARY + + Copyright(c) 2009 Intel Corporation. All rights reserved. + + This program is free software; you can redistribute it and/or modify + it under the terms of version 2 of the GNU General Public License as + published by the Free Software Foundation. + + This program is distributed in the hope that it will be useful, but + WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + The full GNU General Public License is included in this distribution + in the file called LICENSE.GPL. + + BSD LICENSE + + Copyright(c) 2009 Intel Corporation. All rights reserved. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions + are met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + * Neither the name of Intel Corporation nor the names of its + contributors may be used to endorse or promote products derived + from this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +*/ + +#ifndef _LINUX_SFI_ACPI_H +#define _LINUX_SFI_ACPI_H + +#ifdef CONFIG_SFI +#include /* struct acpi_table_header */ + +extern int sfi_acpi_table_parse(char *signature, char *oem_id, + char *oem_table_id, + int (*handler)(struct acpi_table_header *)); + +static inline int acpi_sfi_table_parse(char *signature, + int (*handler)(struct acpi_table_header *)) +{ + if (!acpi_table_parse(signature, handler)) + return 0; + + return sfi_acpi_table_parse(signature, NULL, NULL, handler); +} +#else /* !CONFIG_SFI */ + +static inline int sfi_acpi_table_parse(char *signature, char *oem_id, + char *oem_table_id, + int (*handler)(struct acpi_table_header *)) +{ + return -1; +} + +static inline int acpi_sfi_table_parse(char *signature, + int (*handler)(struct acpi_table_header *)) +{ + return acpi_table_parse(signature, handler); +} +#endif /* !CONFIG_SFI */ + +#endif /*_LINUX_SFI_ACPI_H*/ -- cgit v1.2.3-70-g09d2 From 2b022e3d4bf9885f781221c59d86283a2cdfc2ed Mon Sep 17 00:00:00 2001 From: Xiao Guangrong Date: Mon, 10 Aug 2009 10:48:59 +0800 Subject: timers: Add tracepoints for timer_list timers Add tracepoints which cover the timer life cycle. The tracepoints are integrated with the already existing debug_object debug points as far as possible. Based on patches from Mathieu: http://marc.info/?l=linux-kernel&m=123791201816247&w=2 and Anton: http://marc.info/?l=linux-kernel&m=124331396919301&w=2 [ tglx: Fixed timeout value in timer_start tracepoint, massaged comments and made the printk's more readable ] Signed-off-by: Xiao Guangrong Cc: Anton Blanchard Cc: Steven Rostedt Cc: Frederic Weisbecker Cc: Mathieu Desnoyers Cc: Peter Zijlstra Cc: KOSAKI Motohiro Cc: Zhaolei LKML-Reference: <4A7F8A9B.3040201@cn.fujitsu.com> Signed-off-by: Thomas Gleixner --- include/trace/events/timer.h | 137 +++++++++++++++++++++++++++++++++++++++++++ kernel/timer.c | 32 ++++++++-- 2 files changed, 165 insertions(+), 4 deletions(-) create mode 100644 include/trace/events/timer.h (limited to 'include') diff --git a/include/trace/events/timer.h b/include/trace/events/timer.h new file mode 100644 index 00000000000..725892a93b4 --- /dev/null +++ b/include/trace/events/timer.h @@ -0,0 +1,137 @@ +#undef TRACE_SYSTEM +#define TRACE_SYSTEM timer + +#if !defined(_TRACE_TIMER_H) || defined(TRACE_HEADER_MULTI_READ) +#define _TRACE_TIMER_H + +#include +#include + +/** + * timer_init - called when the timer is initialized + * @timer: pointer to struct timer_list + */ +TRACE_EVENT(timer_init, + + TP_PROTO(struct timer_list *timer), + + TP_ARGS(timer), + + TP_STRUCT__entry( + __field( void *, timer ) + ), + + TP_fast_assign( + __entry->timer = timer; + ), + + TP_printk("timer %p", __entry->timer) +); + +/** + * timer_start - called when the timer is started + * @timer: pointer to struct timer_list + * @expires: the timers expiry time + */ +TRACE_EVENT(timer_start, + + TP_PROTO(struct timer_list *timer, unsigned long expires), + + TP_ARGS(timer, expires), + + TP_STRUCT__entry( + __field( void *, timer ) + __field( void *, function ) + __field( unsigned long, expires ) + __field( unsigned long, now ) + ), + + TP_fast_assign( + __entry->timer = timer; + __entry->function = timer->function; + __entry->expires = expires; + __entry->now = jiffies; + ), + + TP_printk("timer %p: func %pf, expires %lu, timeout %ld", + __entry->timer, __entry->function, __entry->expires, + (long)__entry->expires - __entry->now) +); + +/** + * timer_expire_entry - called immediately before the timer callback + * @timer: pointer to struct timer_list + * + * Allows to determine the timer latency. + */ +TRACE_EVENT(timer_expire_entry, + + TP_PROTO(struct timer_list *timer), + + TP_ARGS(timer), + + TP_STRUCT__entry( + __field( void *, timer ) + __field( unsigned long, now ) + ), + + TP_fast_assign( + __entry->timer = timer; + __entry->now = jiffies; + ), + + TP_printk("timer %p: now %lu", __entry->timer, __entry->now) +); + +/** + * timer_expire_exit - called immediately after the timer callback returns + * @timer: pointer to struct timer_list + * + * When used in combination with the timer_expire_entry tracepoint we can + * determine the runtime of the timer callback function. + * + * NOTE: Do NOT derefernce timer in TP_fast_assign. The pointer might + * be invalid. We solely track the pointer. + */ +TRACE_EVENT(timer_expire_exit, + + TP_PROTO(struct timer_list *timer), + + TP_ARGS(timer), + + TP_STRUCT__entry( + __field(void *, timer ) + ), + + TP_fast_assign( + __entry->timer = timer; + ), + + TP_printk("timer %p", __entry->timer) +); + +/** + * timer_cancel - called when the timer is canceled + * @timer: pointer to struct timer_list + */ +TRACE_EVENT(timer_cancel, + + TP_PROTO(struct timer_list *timer), + + TP_ARGS(timer), + + TP_STRUCT__entry( + __field( void *, timer ) + ), + + TP_fast_assign( + __entry->timer = timer; + ), + + TP_printk("timer %p", __entry->timer) +); + +#endif /* _TRACE_TIMER_H */ + +/* This part must be outside protection */ +#include diff --git a/kernel/timer.c b/kernel/timer.c index 8e92be654da..a7352b00703 100644 --- a/kernel/timer.c +++ b/kernel/timer.c @@ -46,6 +46,9 @@ #include #include +#define CREATE_TRACE_POINTS +#include + u64 jiffies_64 __cacheline_aligned_in_smp = INITIAL_JIFFIES; EXPORT_SYMBOL(jiffies_64); @@ -521,6 +524,25 @@ static inline void debug_timer_activate(struct timer_list *timer) { } static inline void debug_timer_deactivate(struct timer_list *timer) { } #endif +static inline void debug_init(struct timer_list *timer) +{ + debug_timer_init(timer); + trace_timer_init(timer); +} + +static inline void +debug_activate(struct timer_list *timer, unsigned long expires) +{ + debug_timer_activate(timer); + trace_timer_start(timer, expires); +} + +static inline void debug_deactivate(struct timer_list *timer) +{ + debug_timer_deactivate(timer); + trace_timer_cancel(timer); +} + static void __init_timer(struct timer_list *timer, const char *name, struct lock_class_key *key) @@ -549,7 +571,7 @@ void init_timer_key(struct timer_list *timer, const char *name, struct lock_class_key *key) { - debug_timer_init(timer); + debug_init(timer); __init_timer(timer, name, key); } EXPORT_SYMBOL(init_timer_key); @@ -568,7 +590,7 @@ static inline void detach_timer(struct timer_list *timer, { struct list_head *entry = &timer->entry; - debug_timer_deactivate(timer); + debug_deactivate(timer); __list_del(entry->prev, entry->next); if (clear_pending) @@ -632,7 +654,7 @@ __mod_timer(struct timer_list *timer, unsigned long expires, goto out_unlock; } - debug_timer_activate(timer); + debug_activate(timer, expires); new_base = __get_cpu_var(tvec_bases); @@ -787,7 +809,7 @@ void add_timer_on(struct timer_list *timer, int cpu) BUG_ON(timer_pending(timer) || !timer->function); spin_lock_irqsave(&base->lock, flags); timer_set_base(timer, base); - debug_timer_activate(timer); + debug_activate(timer, timer->expires); if (time_before(timer->expires, base->next_timer) && !tbase_get_deferrable(timer->base)) base->next_timer = timer->expires; @@ -1000,7 +1022,9 @@ static inline void __run_timers(struct tvec_base *base) */ lock_map_acquire(&lockdep_map); + trace_timer_expire_entry(timer); fn(data); + trace_timer_expire_exit(timer); lock_map_release(&lockdep_map); -- cgit v1.2.3-70-g09d2 From c6a2a1770245f654f35f60e1458d4356680f9519 Mon Sep 17 00:00:00 2001 From: Xiao Guangrong Date: Mon, 10 Aug 2009 10:51:23 +0800 Subject: hrtimer: Add tracepoint for hrtimers Add tracepoints which cover the life cycle of a hrtimer. The tracepoints are integrated with the already existing debug_object debug points as far as possible. [ tglx: Fixed comments, made output conistent, easier to read and parse. Fixed output for 32bit archs which do not use the scalar representation of ktime_t. Hand current time to trace_hrtimer_expiry_entry instead of calling get_time() inside of the trace assignment. ] Signed-off-by: Xiao Guangrong Cc: Steven Rostedt Cc: Frederic Weisbecker Cc: Mathieu Desnoyers Cc: Anton Blanchard Cc: Peter Zijlstra Cc: KOSAKI Motohiro Cc: Zhaolei LKML-Reference: <4A7F8B2B.5020908@cn.fujitsu.com> Signed-off-by: Thomas Gleixner --- include/trace/events/timer.h | 139 +++++++++++++++++++++++++++++++++++++++++++ kernel/hrtimer.c | 40 ++++++++++--- 2 files changed, 171 insertions(+), 8 deletions(-) (limited to 'include') diff --git a/include/trace/events/timer.h b/include/trace/events/timer.h index 725892a93b4..df3c07fa0cb 100644 --- a/include/trace/events/timer.h +++ b/include/trace/events/timer.h @@ -5,6 +5,7 @@ #define _TRACE_TIMER_H #include +#include #include /** @@ -131,6 +132,144 @@ TRACE_EVENT(timer_cancel, TP_printk("timer %p", __entry->timer) ); +/** + * hrtimer_init - called when the hrtimer is initialized + * @timer: pointer to struct hrtimer + * @clockid: the hrtimers clock + * @mode: the hrtimers mode + */ +TRACE_EVENT(hrtimer_init, + + TP_PROTO(struct hrtimer *timer, clockid_t clockid, + enum hrtimer_mode mode), + + TP_ARGS(timer, clockid, mode), + + TP_STRUCT__entry( + __field( void *, timer ) + __field( clockid_t, clockid ) + __field( enum hrtimer_mode, mode ) + ), + + TP_fast_assign( + __entry->timer = timer; + __entry->clockid = clockid; + __entry->mode = mode; + ), + + TP_printk("hrtimer %p, clockid %s, mode %s", __entry->timer, + __entry->clockid == CLOCK_REALTIME ? + "CLOCK_REALTIME" : "CLOCK_MONOTONIC", + __entry->mode == HRTIMER_MODE_ABS ? + "HRTIMER_MODE_ABS" : "HRTIMER_MODE_REL") +); + +/** + * hrtimer_start - called when the hrtimer is started + * @timer: pointer to struct hrtimer + */ +TRACE_EVENT(hrtimer_start, + + TP_PROTO(struct hrtimer *timer), + + TP_ARGS(timer), + + TP_STRUCT__entry( + __field( void *, timer ) + __field( void *, function ) + __field( s64, expires ) + __field( s64, softexpires ) + ), + + TP_fast_assign( + __entry->timer = timer; + __entry->function = timer->function; + __entry->expires = hrtimer_get_expires(timer).tv64; + __entry->softexpires = hrtimer_get_softexpires(timer).tv64; + ), + + TP_printk("hrtimer %p, func %pf, expires %llu, softexpires %llu", + __entry->timer, __entry->function, + (unsigned long long)ktime_to_ns((ktime_t) { + .tv64 = __entry->expires }), + (unsigned long long)ktime_to_ns((ktime_t) { + .tv64 = __entry->softexpires })) +); + +/** + * htimmer_expire_entry - called immediately before the hrtimer callback + * @timer: pointer to struct hrtimer + * @now: pointer to variable which contains current time of the + * timers base. + * + * Allows to determine the timer latency. + */ +TRACE_EVENT(hrtimer_expire_entry, + + TP_PROTO(struct hrtimer *timer, ktime_t *now), + + TP_ARGS(timer, now), + + TP_STRUCT__entry( + __field( void *, timer ) + __field( s64, now ) + ), + + TP_fast_assign( + __entry->timer = timer; + __entry->now = now->tv64; + ), + + TP_printk("hrtimer %p, now %llu", __entry->timer, + (unsigned long long)ktime_to_ns((ktime_t) { + .tv64 = __entry->now })) + ); + +/** + * hrtimer_expire_exit - called immediately after the hrtimer callback returns + * @timer: pointer to struct hrtimer + * + * When used in combination with the hrtimer_expire_entry tracepoint we can + * determine the runtime of the callback function. + */ +TRACE_EVENT(hrtimer_expire_exit, + + TP_PROTO(struct hrtimer *timer), + + TP_ARGS(timer), + + TP_STRUCT__entry( + __field( void *, timer ) + ), + + TP_fast_assign( + __entry->timer = timer; + ), + + TP_printk("hrtimer %p", __entry->timer) +); + +/** + * hrtimer_cancel - called when the hrtimer is canceled + * @timer: pointer to struct hrtimer + */ +TRACE_EVENT(hrtimer_cancel, + + TP_PROTO(struct hrtimer *timer), + + TP_ARGS(timer), + + TP_STRUCT__entry( + __field( void *, timer ) + ), + + TP_fast_assign( + __entry->timer = timer; + ), + + TP_printk("hrtimer %p", __entry->timer) +); + #endif /* _TRACE_TIMER_H */ /* This part must be outside protection */ diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c index e2f91ecc01a..b44d1b07377 100644 --- a/kernel/hrtimer.c +++ b/kernel/hrtimer.c @@ -48,6 +48,8 @@ #include +#include + /* * The timer bases: * @@ -441,6 +443,26 @@ static inline void debug_hrtimer_activate(struct hrtimer *timer) { } static inline void debug_hrtimer_deactivate(struct hrtimer *timer) { } #endif +static inline void +debug_init(struct hrtimer *timer, clockid_t clockid, + enum hrtimer_mode mode) +{ + debug_hrtimer_init(timer); + trace_hrtimer_init(timer, clockid, mode); +} + +static inline void debug_activate(struct hrtimer *timer) +{ + debug_hrtimer_activate(timer); + trace_hrtimer_start(timer); +} + +static inline void debug_deactivate(struct hrtimer *timer) +{ + debug_hrtimer_deactivate(timer); + trace_hrtimer_cancel(timer); +} + /* High resolution timer related functions */ #ifdef CONFIG_HIGH_RES_TIMERS @@ -797,7 +819,7 @@ static int enqueue_hrtimer(struct hrtimer *timer, struct hrtimer *entry; int leftmost = 1; - debug_hrtimer_activate(timer); + debug_activate(timer); /* * Find the right place in the rbtree: @@ -883,7 +905,7 @@ remove_hrtimer(struct hrtimer *timer, struct hrtimer_clock_base *base) * reprogramming happens in the interrupt handler. This is a * rare case and less expensive than a smp call. */ - debug_hrtimer_deactivate(timer); + debug_deactivate(timer); timer_stats_hrtimer_clear_start_info(timer); reprogram = base->cpu_base == &__get_cpu_var(hrtimer_bases); __remove_hrtimer(timer, base, HRTIMER_STATE_INACTIVE, @@ -1116,7 +1138,7 @@ static void __hrtimer_init(struct hrtimer *timer, clockid_t clock_id, void hrtimer_init(struct hrtimer *timer, clockid_t clock_id, enum hrtimer_mode mode) { - debug_hrtimer_init(timer); + debug_init(timer, clock_id, mode); __hrtimer_init(timer, clock_id, mode); } EXPORT_SYMBOL_GPL(hrtimer_init); @@ -1140,7 +1162,7 @@ int hrtimer_get_res(const clockid_t which_clock, struct timespec *tp) } EXPORT_SYMBOL_GPL(hrtimer_get_res); -static void __run_hrtimer(struct hrtimer *timer) +static void __run_hrtimer(struct hrtimer *timer, ktime_t *now) { struct hrtimer_clock_base *base = timer->base; struct hrtimer_cpu_base *cpu_base = base->cpu_base; @@ -1149,7 +1171,7 @@ static void __run_hrtimer(struct hrtimer *timer) WARN_ON(!irqs_disabled()); - debug_hrtimer_deactivate(timer); + debug_deactivate(timer); __remove_hrtimer(timer, base, HRTIMER_STATE_CALLBACK, 0); timer_stats_account_hrtimer(timer); fn = timer->function; @@ -1160,7 +1182,9 @@ static void __run_hrtimer(struct hrtimer *timer) * the timer base. */ spin_unlock(&cpu_base->lock); + trace_hrtimer_expire_entry(timer, now); restart = fn(timer); + trace_hrtimer_expire_exit(timer); spin_lock(&cpu_base->lock); /* @@ -1271,7 +1295,7 @@ void hrtimer_interrupt(struct clock_event_device *dev) break; } - __run_hrtimer(timer); + __run_hrtimer(timer, &basenow); } base++; } @@ -1393,7 +1417,7 @@ void hrtimer_run_queues(void) hrtimer_get_expires_tv64(timer)) break; - __run_hrtimer(timer); + __run_hrtimer(timer, &base->softirq_time); } spin_unlock(&cpu_base->lock); } @@ -1569,7 +1593,7 @@ static void migrate_hrtimer_list(struct hrtimer_clock_base *old_base, while ((node = rb_first(&old_base->active))) { timer = rb_entry(node, struct hrtimer, node); BUG_ON(hrtimer_callback_running(timer)); - debug_hrtimer_deactivate(timer); + debug_deactivate(timer); /* * Mark it as STATE_MIGRATE not INACTIVE otherwise the -- cgit v1.2.3-70-g09d2 From 3f0a525ebf4b8ef041a332bbe4a73aee94bb064b Mon Sep 17 00:00:00 2001 From: Xiao Guangrong Date: Mon, 10 Aug 2009 10:52:30 +0800 Subject: itimers: Add tracepoints for itimer Add tracepoints for all itimer variants: ITIMER_REAL, ITIMER_VIRTUAL and ITIMER_PROF. [ tglx: Fixed comments and made the output more readable, parseable and consistent. Replaced pid_vnr by pid_nr because the hrtimer callback can happen in any namespace ] Signed-off-by: Xiao Guangrong Cc: Steven Rostedt Cc: Frederic Weisbecker Cc: Mathieu Desnoyers Cc: Anton Blanchard Cc: Peter Zijlstra Cc: KOSAKI Motohiro Cc: Zhaolei LKML-Reference: <4A7F8B6E.2010109@cn.fujitsu.com> Signed-off-by: Thomas Gleixner --- include/trace/events/timer.h | 66 ++++++++++++++++++++++++++++++++++++++++++++ kernel/itimer.c | 5 ++++ kernel/posix-cpu-timers.c | 7 ++++- 3 files changed, 77 insertions(+), 1 deletion(-) (limited to 'include') diff --git a/include/trace/events/timer.h b/include/trace/events/timer.h index df3c07fa0cb..1844c48d640 100644 --- a/include/trace/events/timer.h +++ b/include/trace/events/timer.h @@ -270,6 +270,72 @@ TRACE_EVENT(hrtimer_cancel, TP_printk("hrtimer %p", __entry->timer) ); +/** + * itimer_state - called when itimer is started or canceled + * @which: name of the interval timer + * @value: the itimers value, itimer is canceled if value->it_value is + * zero, otherwise it is started + * @expires: the itimers expiry time + */ +TRACE_EVENT(itimer_state, + + TP_PROTO(int which, const struct itimerval *const value, + cputime_t expires), + + TP_ARGS(which, value, expires), + + TP_STRUCT__entry( + __field( int, which ) + __field( cputime_t, expires ) + __field( long, value_sec ) + __field( long, value_usec ) + __field( long, interval_sec ) + __field( long, interval_usec ) + ), + + TP_fast_assign( + __entry->which = which; + __entry->expires = expires; + __entry->value_sec = value->it_value.tv_sec; + __entry->value_usec = value->it_value.tv_usec; + __entry->interval_sec = value->it_interval.tv_sec; + __entry->interval_usec = value->it_interval.tv_usec; + ), + + TP_printk("which %d, expires %lu, it_value %lu.%lu, it_interval %lu.%lu", + __entry->which, __entry->expires, + __entry->value_sec, __entry->value_usec, + __entry->interval_sec, __entry->interval_usec) +); + +/** + * itimer_expire - called when itimer expires + * @which: type of the interval timer + * @pid: pid of the process which owns the timer + * @now: current time, used to calculate the latency of itimer + */ +TRACE_EVENT(itimer_expire, + + TP_PROTO(int which, struct pid *pid, cputime_t now), + + TP_ARGS(which, pid, now), + + TP_STRUCT__entry( + __field( int , which ) + __field( pid_t, pid ) + __field( cputime_t, now ) + ), + + TP_fast_assign( + __entry->which = which; + __entry->now = now; + __entry->pid = pid_nr(pid); + ), + + TP_printk("which %d, pid %d, now %lu", __entry->which, + (int) __entry->pid, __entry->now) +); + #endif /* _TRACE_TIMER_H */ /* This part must be outside protection */ diff --git a/kernel/itimer.c b/kernel/itimer.c index 8078a32d3b1..b03451ede52 100644 --- a/kernel/itimer.c +++ b/kernel/itimer.c @@ -12,6 +12,7 @@ #include #include #include +#include #include @@ -122,6 +123,7 @@ enum hrtimer_restart it_real_fn(struct hrtimer *timer) struct signal_struct *sig = container_of(timer, struct signal_struct, real_timer); + trace_itimer_expire(ITIMER_REAL, sig->leader_pid, 0); kill_pid_info(SIGALRM, SEND_SIG_PRIV, sig->leader_pid); return HRTIMER_NORESTART; @@ -166,6 +168,8 @@ static void set_cpu_itimer(struct task_struct *tsk, unsigned int clock_id, } it->expires = nval; it->incr = ninterval; + trace_itimer_state(clock_id == CPUCLOCK_VIRT ? + ITIMER_VIRTUAL : ITIMER_PROF, value, nval); spin_unlock_irq(&tsk->sighand->siglock); @@ -217,6 +221,7 @@ again: } else tsk->signal->it_real_incr.tv64 = 0; + trace_itimer_state(ITIMER_REAL, value, 0); spin_unlock_irq(&tsk->sighand->siglock); break; case ITIMER_VIRTUAL: diff --git a/kernel/posix-cpu-timers.c b/kernel/posix-cpu-timers.c index 12161f74744..5c9dc228747 100644 --- a/kernel/posix-cpu-timers.c +++ b/kernel/posix-cpu-timers.c @@ -8,6 +8,7 @@ #include #include #include +#include /* * Called after updating RLIMIT_CPU to set timer expiration if necessary. @@ -1090,9 +1091,13 @@ static void check_cpu_itimer(struct task_struct *tsk, struct cpu_itimer *it, cputime_one_jiffy); it->error -= onecputick; } - } else + } else { it->expires = cputime_zero; + } + trace_itimer_expire(signo == SIGPROF ? + ITIMER_PROF : ITIMER_VIRTUAL, + tsk->signal->leader_pid, cur_time); __group_send_sig_info(signo, SEND_SIG_PRIV, tsk); } -- cgit v1.2.3-70-g09d2 From 138d15692bf76841f252d4b836a535cf5f9154e9 Mon Sep 17 00:00:00 2001 From: Alexey Starikovskiy Date: Fri, 28 Aug 2009 23:29:38 +0400 Subject: ACPICA: Don't switch task then not allowed Signed-off-by: Alexey Starikovskiy Signed-off-by: Len Brown --- include/acpi/platform/aclinux.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'include') diff --git a/include/acpi/platform/aclinux.h b/include/acpi/platform/aclinux.h index fcb8e4b159b..9d7febde10a 100644 --- a/include/acpi/platform/aclinux.h +++ b/include/acpi/platform/aclinux.h @@ -149,10 +149,10 @@ static inline void *acpi_os_acquire_object(acpi_cache_t * cache) #define ACPI_FREE(a) kfree(a) /* Used within ACPICA to show where it is safe to preempt execution */ - +#include #define ACPI_PREEMPTION_POINT() \ do { \ - if (!irqs_disabled()) \ + if (!in_atomic_preempt_off()) \ cond_resched(); \ } while (0) -- cgit v1.2.3-70-g09d2 From ad283ea4a3ce82cda2efe33163748a397b31b1eb Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Sat, 29 Aug 2009 19:09:26 -0700 Subject: async_tx: add sum check flags Replace the flat zero_sum_result with a collection of flags to contain the P (xor) zero-sum result, and the soon to be utilized Q (raid6 reed solomon syndrome) zero-sum result. Use the SUM_CHECK_ namespace instead of DMA_ since these flags will be used on non-dma-zero-sum enabled platforms. Reviewed-by: Andre Noll Acked-by: Maciej Sosnowski Signed-off-by: Dan Williams --- arch/arm/include/asm/hardware/iop3xx-adma.h | 5 +++-- arch/arm/mach-iop13xx/include/mach/adma.h | 12 +++++++----- crypto/async_tx/async_xor.c | 4 ++-- drivers/md/raid5.c | 2 +- drivers/md/raid5.h | 5 +++-- include/linux/async_tx.h | 2 +- include/linux/dmaengine.h | 21 ++++++++++++++++++++- 7 files changed, 37 insertions(+), 14 deletions(-) (limited to 'include') diff --git a/arch/arm/include/asm/hardware/iop3xx-adma.h b/arch/arm/include/asm/hardware/iop3xx-adma.h index 83e6ba338e2..26eefea0231 100644 --- a/arch/arm/include/asm/hardware/iop3xx-adma.h +++ b/arch/arm/include/asm/hardware/iop3xx-adma.h @@ -756,13 +756,14 @@ static inline void iop_desc_set_block_fill_val(struct iop_adma_desc_slot *desc, hw_desc->src[0] = val; } -static inline int iop_desc_get_zero_result(struct iop_adma_desc_slot *desc) +static inline enum sum_check_flags +iop_desc_get_zero_result(struct iop_adma_desc_slot *desc) { struct iop3xx_desc_aau *hw_desc = desc->hw_desc; struct iop3xx_aau_desc_ctrl desc_ctrl = hw_desc->desc_ctrl_field; iop_paranoia(!(desc_ctrl.tx_complete && desc_ctrl.zero_result_en)); - return desc_ctrl.zero_result_err; + return desc_ctrl.zero_result_err << SUM_CHECK_P; } static inline void iop_chan_append(struct iop_adma_chan *chan) diff --git a/arch/arm/mach-iop13xx/include/mach/adma.h b/arch/arm/mach-iop13xx/include/mach/adma.h index 5722e86f217..1cd31df8924 100644 --- a/arch/arm/mach-iop13xx/include/mach/adma.h +++ b/arch/arm/mach-iop13xx/include/mach/adma.h @@ -428,18 +428,20 @@ static inline void iop_desc_set_block_fill_val(struct iop_adma_desc_slot *desc, hw_desc->block_fill_data = val; } -static inline int iop_desc_get_zero_result(struct iop_adma_desc_slot *desc) +static inline enum sum_check_flags +iop_desc_get_zero_result(struct iop_adma_desc_slot *desc) { struct iop13xx_adma_desc_hw *hw_desc = desc->hw_desc; struct iop13xx_adma_desc_ctrl desc_ctrl = hw_desc->desc_ctrl_field; struct iop13xx_adma_byte_count byte_count = hw_desc->byte_count_field; + enum sum_check_flags flags; BUG_ON(!(byte_count.tx_complete && desc_ctrl.zero_result)); - if (desc_ctrl.pq_xfer_en) - return byte_count.zero_result_err_q; - else - return byte_count.zero_result_err; + flags = byte_count.zero_result_err_q << SUM_CHECK_Q; + flags |= byte_count.zero_result_err << SUM_CHECK_P; + + return flags; } static inline void iop_chan_append(struct iop_adma_chan *chan) diff --git a/crypto/async_tx/async_xor.c b/crypto/async_tx/async_xor.c index 1e96c4df706..78fb7780272 100644 --- a/crypto/async_tx/async_xor.c +++ b/crypto/async_tx/async_xor.c @@ -246,7 +246,7 @@ static int page_is_zero(struct page *p, unsigned int offset, size_t len) */ struct dma_async_tx_descriptor * async_xor_val(struct page *dest, struct page **src_list, unsigned int offset, - int src_cnt, size_t len, u32 *result, + int src_cnt, size_t len, enum sum_check_flags *result, struct async_submit_ctl *submit) { struct dma_chan *chan = async_tx_find_channel(submit, DMA_XOR_VAL, @@ -304,7 +304,7 @@ async_xor_val(struct page *dest, struct page **src_list, unsigned int offset, async_tx_quiesce(&tx); - *result = page_is_zero(dest, offset, len) ? 0 : 1; + *result = !page_is_zero(dest, offset, len) << SUM_CHECK_P; async_tx_sync_epilog(submit); submit->flags = flags_orig; diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index 7727954cf72..1f2a266f3cf 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c @@ -2590,7 +2590,7 @@ static void handle_parity_checks5(raid5_conf_t *conf, struct stripe_head *sh, * we are done. Otherwise update the mismatch count and repair * parity if !MD_RECOVERY_CHECK */ - if (sh->ops.zero_sum_result == 0) + if ((sh->ops.zero_sum_result & SUM_CHECK_P_RESULT) == 0) /* parity is correct (on disc, * not in buffer any more) */ diff --git a/drivers/md/raid5.h b/drivers/md/raid5.h index e7baabffee8..75f2c6c4cf9 100644 --- a/drivers/md/raid5.h +++ b/drivers/md/raid5.h @@ -2,6 +2,7 @@ #define _RAID5_H #include +#include /* * @@ -215,8 +216,8 @@ struct stripe_head { * @target - STRIPE_OP_COMPUTE_BLK target */ struct stripe_operations { - int target; - u32 zero_sum_result; + int target; + enum sum_check_flags zero_sum_result; } ops; struct r5dev { struct bio req; diff --git a/include/linux/async_tx.h b/include/linux/async_tx.h index 00cfb637ddf..3d21a251751 100644 --- a/include/linux/async_tx.h +++ b/include/linux/async_tx.h @@ -148,7 +148,7 @@ async_xor(struct page *dest, struct page **src_list, unsigned int offset, struct dma_async_tx_descriptor * async_xor_val(struct page *dest, struct page **src_list, unsigned int offset, - int src_cnt, size_t len, u32 *result, + int src_cnt, size_t len, enum sum_check_flags *result, struct async_submit_ctl *submit); struct dma_async_tx_descriptor * diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h index 6768727d00d..02447afceba 100644 --- a/include/linux/dmaengine.h +++ b/include/linux/dmaengine.h @@ -86,6 +86,25 @@ enum dma_ctrl_flags { DMA_COMPL_SKIP_DEST_UNMAP = (1 << 3), }; +/** + * enum sum_check_bits - bit position of pq_check_flags + */ +enum sum_check_bits { + SUM_CHECK_P = 0, + SUM_CHECK_Q = 1, +}; + +/** + * enum pq_check_flags - result of async_{xor,pq}_zero_sum operations + * @SUM_CHECK_P_RESULT - 1 if xor zero sum error, 0 otherwise + * @SUM_CHECK_Q_RESULT - 1 if reed-solomon zero sum error, 0 otherwise + */ +enum sum_check_flags { + SUM_CHECK_P_RESULT = (1 << SUM_CHECK_P), + SUM_CHECK_Q_RESULT = (1 << SUM_CHECK_Q), +}; + + /** * dma_cap_mask_t - capabilities bitmap modeled after cpumask_t. * See linux/cpumask.h @@ -245,7 +264,7 @@ struct dma_device { unsigned int src_cnt, size_t len, unsigned long flags); struct dma_async_tx_descriptor *(*device_prep_dma_xor_val)( struct dma_chan *chan, dma_addr_t *src, unsigned int src_cnt, - size_t len, u32 *result, unsigned long flags); + size_t len, enum sum_check_flags *result, unsigned long flags); struct dma_async_tx_descriptor *(*device_prep_dma_memset)( struct dma_chan *chan, dma_addr_t dest, int value, size_t len, unsigned long flags); -- cgit v1.2.3-70-g09d2 From 95475e57113c66aac7583925736ed2e2d58c990d Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Tue, 14 Jul 2009 12:19:02 -0700 Subject: async_tx: remove walk of tx->parent chain in dma_wait_for_async_tx We currently walk the parent chain when waiting for a given tx to complete however this walk may race with the driver cleanup routine. The routines in async_raid6_recov.c may fall back to the synchronous path at any point so we need to be prepared to call async_tx_quiesce() (which calls dma_wait_for_async_tx). To remove the ->parent walk we guarantee that every time a dependency is attached ->issue_pending() is invoked, then we can simply poll the initial descriptor until completion. This also allows for a lighter weight 'issue pending' implementation as there is no longer a requirement to iterate through all the channels' ->issue_pending() routines as long as operations have been submitted in an ordered chain. async_tx_issue_pending() is added for this case. Signed-off-by: Dan Williams --- crypto/async_tx/async_tx.c | 13 +++++++------ drivers/dma/dmaengine.c | 45 ++++++++++----------------------------------- include/linux/async_tx.h | 23 +++++++++++++++++++++++ 3 files changed, 40 insertions(+), 41 deletions(-) (limited to 'include') diff --git a/crypto/async_tx/async_tx.c b/crypto/async_tx/async_tx.c index 6e37ad3f441..60615fedcf5 100644 --- a/crypto/async_tx/async_tx.c +++ b/crypto/async_tx/async_tx.c @@ -77,8 +77,8 @@ static void async_tx_channel_switch(struct dma_async_tx_descriptor *depend_tx, struct dma_async_tx_descriptor *tx) { - struct dma_chan *chan; - struct dma_device *device; + struct dma_chan *chan = depend_tx->chan; + struct dma_device *device = chan->device; struct dma_async_tx_descriptor *intr_tx = (void *) ~0; /* first check to see if we can still append to depend_tx */ @@ -90,11 +90,11 @@ async_tx_channel_switch(struct dma_async_tx_descriptor *depend_tx, } spin_unlock_bh(&depend_tx->lock); - if (!intr_tx) + /* attached dependency, flush the parent channel */ + if (!intr_tx) { + device->device_issue_pending(chan); return; - - chan = depend_tx->chan; - device = chan->device; + } /* see if we can schedule an interrupt * otherwise poll for completion @@ -128,6 +128,7 @@ async_tx_channel_switch(struct dma_async_tx_descriptor *depend_tx, intr_tx->tx_submit(intr_tx); async_tx_ack(intr_tx); } + device->device_issue_pending(chan); } else { if (dma_wait_for_async_tx(depend_tx) == DMA_ERROR) panic("%s: DMA_ERROR waiting for depend_tx\n", diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c index 6781e8f3c06..e002e0e0d05 100644 --- a/drivers/dma/dmaengine.c +++ b/drivers/dma/dmaengine.c @@ -934,49 +934,24 @@ EXPORT_SYMBOL(dma_async_tx_descriptor_init); /* dma_wait_for_async_tx - spin wait for a transaction to complete * @tx: in-flight transaction to wait on - * - * This routine assumes that tx was obtained from a call to async_memcpy, - * async_xor, async_memset, etc which ensures that tx is "in-flight" (prepped - * and submitted). Walking the parent chain is only meant to cover for DMA - * drivers that do not implement the DMA_INTERRUPT capability and may race with - * the driver's descriptor cleanup routine. */ enum dma_status dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx) { - enum dma_status status; - struct dma_async_tx_descriptor *iter; - struct dma_async_tx_descriptor *parent; + unsigned long dma_sync_wait_timeout = jiffies + msecs_to_jiffies(5000); if (!tx) return DMA_SUCCESS; - WARN_ONCE(tx->parent, "%s: speculatively walking dependency chain for" - " %s\n", __func__, dma_chan_name(tx->chan)); - - /* poll through the dependency chain, return when tx is complete */ - do { - iter = tx; - - /* find the root of the unsubmitted dependency chain */ - do { - parent = iter->parent; - if (!parent) - break; - else - iter = parent; - } while (parent); - - /* there is a small window for ->parent == NULL and - * ->cookie == -EBUSY - */ - while (iter->cookie == -EBUSY) - cpu_relax(); - - status = dma_sync_wait(iter->chan, iter->cookie); - } while (status == DMA_IN_PROGRESS || (iter != tx)); - - return status; + while (tx->cookie == -EBUSY) { + if (time_after_eq(jiffies, dma_sync_wait_timeout)) { + pr_err("%s timeout waiting for descriptor submission\n", + __func__); + return DMA_ERROR; + } + cpu_relax(); + } + return dma_sync_wait(tx->chan, tx->cookie); } EXPORT_SYMBOL_GPL(dma_wait_for_async_tx); diff --git a/include/linux/async_tx.h b/include/linux/async_tx.h index 3d21a251751..12a2efcbd56 100644 --- a/include/linux/async_tx.h +++ b/include/linux/async_tx.h @@ -83,6 +83,24 @@ struct async_submit_ctl { #ifdef CONFIG_DMA_ENGINE #define async_tx_issue_pending_all dma_issue_pending_all + +/** + * async_tx_issue_pending - send pending descriptor to the hardware channel + * @tx: descriptor handle to retrieve hardware context + * + * Note: any dependent operations will have already been issued by + * async_tx_channel_switch, or (in the case of no channel switch) will + * be already pending on this channel. + */ +static inline void async_tx_issue_pending(struct dma_async_tx_descriptor *tx) +{ + if (likely(tx)) { + struct dma_chan *chan = tx->chan; + struct dma_device *dma = chan->device; + + dma->device_issue_pending(chan); + } +} #ifdef CONFIG_ARCH_HAS_ASYNC_TX_FIND_CHANNEL #include #else @@ -98,6 +116,11 @@ static inline void async_tx_issue_pending_all(void) do { } while (0); } +static inline void async_tx_issue_pending(struct dma_async_tx_descriptor *tx) +{ + do { } while (0); +} + static inline struct dma_chan * async_tx_find_channel(struct async_submit_ctl *submit, enum dma_transaction_type tx_type, struct page **dst, -- cgit v1.2.3-70-g09d2 From b2f46fd8ef3dff2ab30f31126833f78b7480283a Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Tue, 14 Jul 2009 12:20:36 -0700 Subject: async_tx: add support for asynchronous GF multiplication [ Based on an original patch by Yuri Tikhonov ] This adds support for doing asynchronous GF multiplication by adding two additional functions to the async_tx API: async_gen_syndrome() does simultaneous XOR and Galois field multiplication of sources. async_syndrome_val() validates the given source buffers against known P and Q values. When a request is made to run async_pq against more than the hardware maximum number of supported sources we need to reuse the previous generated P and Q values as sources into the next operation. Care must be taken to remove Q from P' and P from Q'. For example to perform a 5 source pq op with hardware that only supports 4 sources at a time the following approach is taken: p, q = PQ(src0, src1, src2, src3, COEF({01}, {02}, {04}, {08})) p', q' = PQ(p, q, q, src4, COEF({00}, {01}, {00}, {10})) p' = p + q + q + src4 = p + src4 q' = {00}*p + {01}*q + {00}*q + {10}*src4 = q + {10}*src4 Note: 4 is the minimum acceptable maxpq otherwise we punt to synchronous-software path. The DMA_PREP_CONTINUE flag indicates to the driver to reuse p and q as sources (in the above manner) and fill the remaining slots up to maxpq with the new sources/coefficients. Note1: Some devices have native support for P+Q continuation and can skip this extra work. Devices with this capability can advertise it with dma_set_maxpq. It is up to each driver how to handle the DMA_PREP_CONTINUE flag. Note2: The api supports disabling the generation of P when generating Q, this is ignored by the synchronous path but is implemented by some dma devices to save unnecessary writes. In this case the continuation algorithm is simplified to only reuse Q as a source. Cc: H. Peter Anvin Cc: David Woodhouse Signed-off-by: Yuri Tikhonov Signed-off-by: Ilya Yanok Reviewed-by: Andre Noll Acked-by: Maciej Sosnowski Signed-off-by: Dan Williams --- Documentation/crypto/async-tx-api.txt | 3 + arch/arm/mach-iop13xx/setup.c | 2 +- crypto/async_tx/Kconfig | 4 + crypto/async_tx/Makefile | 1 + crypto/async_tx/async_pq.c | 388 ++++++++++++++++++++++++++++++++++ crypto/async_tx/async_xor.c | 2 +- drivers/dma/dmaengine.c | 4 + drivers/dma/iop-adma.c | 2 +- include/linux/async_tx.h | 9 + include/linux/dmaengine.h | 87 +++++++- 10 files changed, 493 insertions(+), 9 deletions(-) create mode 100644 crypto/async_tx/async_pq.c (limited to 'include') diff --git a/Documentation/crypto/async-tx-api.txt b/Documentation/crypto/async-tx-api.txt index 6b15e488c0e..0e48e054d69 100644 --- a/Documentation/crypto/async-tx-api.txt +++ b/Documentation/crypto/async-tx-api.txt @@ -64,6 +64,9 @@ xor - xor a series of source buffers and write the result to a xor_val - xor a series of source buffers and set a flag if the result is zero. The implementation attempts to prevent writes to memory +pq - generate the p+q (raid6 syndrome) from a series of source buffers +pq_val - validate that a p and or q buffer are in sync with a given series of + sources 3.3 Descriptor management: The return value is non-NULL and points to a 'descriptor' when the operation diff --git a/arch/arm/mach-iop13xx/setup.c b/arch/arm/mach-iop13xx/setup.c index 9800228b71d..2e7ca0d75f8 100644 --- a/arch/arm/mach-iop13xx/setup.c +++ b/arch/arm/mach-iop13xx/setup.c @@ -506,7 +506,7 @@ void __init iop13xx_platform_init(void) dma_cap_set(DMA_MEMSET, plat_data->cap_mask); dma_cap_set(DMA_MEMCPY_CRC32C, plat_data->cap_mask); dma_cap_set(DMA_INTERRUPT, plat_data->cap_mask); - dma_cap_set(DMA_PQ_XOR, plat_data->cap_mask); + dma_cap_set(DMA_PQ, plat_data->cap_mask); dma_cap_set(DMA_PQ_UPDATE, plat_data->cap_mask); dma_cap_set(DMA_PQ_VAL, plat_data->cap_mask); break; diff --git a/crypto/async_tx/Kconfig b/crypto/async_tx/Kconfig index d8fb3914598..cb6d7314f19 100644 --- a/crypto/async_tx/Kconfig +++ b/crypto/async_tx/Kconfig @@ -14,3 +14,7 @@ config ASYNC_MEMSET tristate select ASYNC_CORE +config ASYNC_PQ + tristate + select ASYNC_CORE + diff --git a/crypto/async_tx/Makefile b/crypto/async_tx/Makefile index 27baa7d52fb..1b992658825 100644 --- a/crypto/async_tx/Makefile +++ b/crypto/async_tx/Makefile @@ -2,3 +2,4 @@ obj-$(CONFIG_ASYNC_CORE) += async_tx.o obj-$(CONFIG_ASYNC_MEMCPY) += async_memcpy.o obj-$(CONFIG_ASYNC_MEMSET) += async_memset.o obj-$(CONFIG_ASYNC_XOR) += async_xor.o +obj-$(CONFIG_ASYNC_PQ) += async_pq.o diff --git a/crypto/async_tx/async_pq.c b/crypto/async_tx/async_pq.c new file mode 100644 index 00000000000..108b21efb49 --- /dev/null +++ b/crypto/async_tx/async_pq.c @@ -0,0 +1,388 @@ +/* + * Copyright(c) 2007 Yuri Tikhonov + * Copyright(c) 2009 Intel Corporation + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston, MA 02111-1307, USA. + * + * The full GNU General Public License is included in this distribution in the + * file called COPYING. + */ +#include +#include +#include +#include +#include + +/** + * scribble - space to hold throwaway P buffer for synchronous gen_syndrome + */ +static struct page *scribble; + +static bool is_raid6_zero_block(struct page *p) +{ + return p == (void *) raid6_empty_zero_page; +} + +/* the struct page *blocks[] parameter passed to async_gen_syndrome() + * and async_syndrome_val() contains the 'P' destination address at + * blocks[disks-2] and the 'Q' destination address at blocks[disks-1] + * + * note: these are macros as they are used as lvalues + */ +#define P(b, d) (b[d-2]) +#define Q(b, d) (b[d-1]) + +/** + * do_async_gen_syndrome - asynchronously calculate P and/or Q + */ +static __async_inline struct dma_async_tx_descriptor * +do_async_gen_syndrome(struct dma_chan *chan, struct page **blocks, + const unsigned char *scfs, unsigned int offset, int disks, + size_t len, dma_addr_t *dma_src, + struct async_submit_ctl *submit) +{ + struct dma_async_tx_descriptor *tx = NULL; + struct dma_device *dma = chan->device; + enum dma_ctrl_flags dma_flags = 0; + enum async_tx_flags flags_orig = submit->flags; + dma_async_tx_callback cb_fn_orig = submit->cb_fn; + dma_async_tx_callback cb_param_orig = submit->cb_param; + int src_cnt = disks - 2; + unsigned char coefs[src_cnt]; + unsigned short pq_src_cnt; + dma_addr_t dma_dest[2]; + int src_off = 0; + int idx; + int i; + + /* DMAs use destinations as sources, so use BIDIRECTIONAL mapping */ + if (P(blocks, disks)) + dma_dest[0] = dma_map_page(dma->dev, P(blocks, disks), offset, + len, DMA_BIDIRECTIONAL); + else + dma_flags |= DMA_PREP_PQ_DISABLE_P; + if (Q(blocks, disks)) + dma_dest[1] = dma_map_page(dma->dev, Q(blocks, disks), offset, + len, DMA_BIDIRECTIONAL); + else + dma_flags |= DMA_PREP_PQ_DISABLE_Q; + + /* convert source addresses being careful to collapse 'empty' + * sources and update the coefficients accordingly + */ + for (i = 0, idx = 0; i < src_cnt; i++) { + if (is_raid6_zero_block(blocks[i])) + continue; + dma_src[idx] = dma_map_page(dma->dev, blocks[i], offset, len, + DMA_TO_DEVICE); + coefs[idx] = scfs[i]; + idx++; + } + src_cnt = idx; + + while (src_cnt > 0) { + submit->flags = flags_orig; + pq_src_cnt = min(src_cnt, dma_maxpq(dma, dma_flags)); + /* if we are submitting additional pqs, leave the chain open, + * clear the callback parameters, and leave the destination + * buffers mapped + */ + if (src_cnt > pq_src_cnt) { + submit->flags &= ~ASYNC_TX_ACK; + dma_flags |= DMA_COMPL_SKIP_DEST_UNMAP; + submit->cb_fn = NULL; + submit->cb_param = NULL; + } else { + dma_flags &= ~DMA_COMPL_SKIP_DEST_UNMAP; + submit->cb_fn = cb_fn_orig; + submit->cb_param = cb_param_orig; + if (cb_fn_orig) + dma_flags |= DMA_PREP_INTERRUPT; + } + + /* Since we have clobbered the src_list we are committed + * to doing this asynchronously. Drivers force forward + * progress in case they can not provide a descriptor + */ + for (;;) { + tx = dma->device_prep_dma_pq(chan, dma_dest, + &dma_src[src_off], + pq_src_cnt, + &coefs[src_off], len, + dma_flags); + if (likely(tx)) + break; + async_tx_quiesce(&submit->depend_tx); + dma_async_issue_pending(chan); + } + + async_tx_submit(chan, tx, submit); + submit->depend_tx = tx; + + /* drop completed sources */ + src_cnt -= pq_src_cnt; + src_off += pq_src_cnt; + + dma_flags |= DMA_PREP_CONTINUE; + } + + return tx; +} + +/** + * do_sync_gen_syndrome - synchronously calculate a raid6 syndrome + */ +static void +do_sync_gen_syndrome(struct page **blocks, unsigned int offset, int disks, + size_t len, struct async_submit_ctl *submit) +{ + void **srcs; + int i; + + if (submit->scribble) + srcs = submit->scribble; + else + srcs = (void **) blocks; + + for (i = 0; i < disks; i++) { + if (is_raid6_zero_block(blocks[i])) { + BUG_ON(i > disks - 3); /* P or Q can't be zero */ + srcs[i] = blocks[i]; + } else + srcs[i] = page_address(blocks[i]) + offset; + } + raid6_call.gen_syndrome(disks, len, srcs); + async_tx_sync_epilog(submit); +} + +/** + * async_gen_syndrome - asynchronously calculate a raid6 syndrome + * @blocks: source blocks from idx 0..disks-3, P @ disks-2 and Q @ disks-1 + * @offset: common offset into each block (src and dest) to start transaction + * @disks: number of blocks (including missing P or Q, see below) + * @len: length of operation in bytes + * @submit: submission/completion modifiers + * + * General note: This routine assumes a field of GF(2^8) with a + * primitive polynomial of 0x11d and a generator of {02}. + * + * 'disks' note: callers can optionally omit either P or Q (but not + * both) from the calculation by setting blocks[disks-2] or + * blocks[disks-1] to NULL. When P or Q is omitted 'len' must be <= + * PAGE_SIZE as a temporary buffer of this size is used in the + * synchronous path. 'disks' always accounts for both destination + * buffers. + * + * 'blocks' note: if submit->scribble is NULL then the contents of + * 'blocks' may be overridden + */ +struct dma_async_tx_descriptor * +async_gen_syndrome(struct page **blocks, unsigned int offset, int disks, + size_t len, struct async_submit_ctl *submit) +{ + int src_cnt = disks - 2; + struct dma_chan *chan = async_tx_find_channel(submit, DMA_PQ, + &P(blocks, disks), 2, + blocks, src_cnt, len); + struct dma_device *device = chan ? chan->device : NULL; + dma_addr_t *dma_src = NULL; + + BUG_ON(disks > 255 || !(P(blocks, disks) || Q(blocks, disks))); + + if (submit->scribble) + dma_src = submit->scribble; + else if (sizeof(dma_addr_t) <= sizeof(struct page *)) + dma_src = (dma_addr_t *) blocks; + + if (dma_src && device && + (src_cnt <= dma_maxpq(device, 0) || + dma_maxpq(device, DMA_PREP_CONTINUE) > 0)) { + /* run the p+q asynchronously */ + pr_debug("%s: (async) disks: %d len: %zu\n", + __func__, disks, len); + return do_async_gen_syndrome(chan, blocks, raid6_gfexp, offset, + disks, len, dma_src, submit); + } + + /* run the pq synchronously */ + pr_debug("%s: (sync) disks: %d len: %zu\n", __func__, disks, len); + + /* wait for any prerequisite operations */ + async_tx_quiesce(&submit->depend_tx); + + if (!P(blocks, disks)) { + P(blocks, disks) = scribble; + BUG_ON(len + offset > PAGE_SIZE); + } + if (!Q(blocks, disks)) { + Q(blocks, disks) = scribble; + BUG_ON(len + offset > PAGE_SIZE); + } + do_sync_gen_syndrome(blocks, offset, disks, len, submit); + + return NULL; +} +EXPORT_SYMBOL_GPL(async_gen_syndrome); + +/** + * async_syndrome_val - asynchronously validate a raid6 syndrome + * @blocks: source blocks from idx 0..disks-3, P @ disks-2 and Q @ disks-1 + * @offset: common offset into each block (src and dest) to start transaction + * @disks: number of blocks (including missing P or Q, see below) + * @len: length of operation in bytes + * @pqres: on val failure SUM_CHECK_P_RESULT and/or SUM_CHECK_Q_RESULT are set + * @spare: temporary result buffer for the synchronous case + * @submit: submission / completion modifiers + * + * The same notes from async_gen_syndrome apply to the 'blocks', + * and 'disks' parameters of this routine. The synchronous path + * requires a temporary result buffer and submit->scribble to be + * specified. + */ +struct dma_async_tx_descriptor * +async_syndrome_val(struct page **blocks, unsigned int offset, int disks, + size_t len, enum sum_check_flags *pqres, struct page *spare, + struct async_submit_ctl *submit) +{ + struct dma_chan *chan = async_tx_find_channel(submit, DMA_PQ_VAL, + NULL, 0, blocks, disks, + len); + struct dma_device *device = chan ? chan->device : NULL; + struct dma_async_tx_descriptor *tx; + enum dma_ctrl_flags dma_flags = submit->cb_fn ? DMA_PREP_INTERRUPT : 0; + dma_addr_t *dma_src = NULL; + + BUG_ON(disks < 4); + + if (submit->scribble) + dma_src = submit->scribble; + else if (sizeof(dma_addr_t) <= sizeof(struct page *)) + dma_src = (dma_addr_t *) blocks; + + if (dma_src && device && disks <= dma_maxpq(device, 0)) { + struct device *dev = device->dev; + dma_addr_t *pq = &dma_src[disks-2]; + int i; + + pr_debug("%s: (async) disks: %d len: %zu\n", + __func__, disks, len); + if (!P(blocks, disks)) + dma_flags |= DMA_PREP_PQ_DISABLE_P; + if (!Q(blocks, disks)) + dma_flags |= DMA_PREP_PQ_DISABLE_Q; + for (i = 0; i < disks; i++) + if (likely(blocks[i])) { + BUG_ON(is_raid6_zero_block(blocks[i])); + dma_src[i] = dma_map_page(dev, blocks[i], + offset, len, + DMA_TO_DEVICE); + } + + for (;;) { + tx = device->device_prep_dma_pq_val(chan, pq, dma_src, + disks - 2, + raid6_gfexp, + len, pqres, + dma_flags); + if (likely(tx)) + break; + async_tx_quiesce(&submit->depend_tx); + dma_async_issue_pending(chan); + } + async_tx_submit(chan, tx, submit); + + return tx; + } else { + struct page *p_src = P(blocks, disks); + struct page *q_src = Q(blocks, disks); + enum async_tx_flags flags_orig = submit->flags; + dma_async_tx_callback cb_fn_orig = submit->cb_fn; + void *scribble = submit->scribble; + void *cb_param_orig = submit->cb_param; + void *p, *q, *s; + + pr_debug("%s: (sync) disks: %d len: %zu\n", + __func__, disks, len); + + /* caller must provide a temporary result buffer and + * allow the input parameters to be preserved + */ + BUG_ON(!spare || !scribble); + + /* wait for any prerequisite operations */ + async_tx_quiesce(&submit->depend_tx); + + /* recompute p and/or q into the temporary buffer and then + * check to see the result matches the current value + */ + tx = NULL; + *pqres = 0; + if (p_src) { + init_async_submit(submit, ASYNC_TX_XOR_ZERO_DST, NULL, + NULL, NULL, scribble); + tx = async_xor(spare, blocks, offset, disks-2, len, submit); + async_tx_quiesce(&tx); + p = page_address(p_src) + offset; + s = page_address(spare) + offset; + *pqres |= !!memcmp(p, s, len) << SUM_CHECK_P; + } + + if (q_src) { + P(blocks, disks) = NULL; + Q(blocks, disks) = spare; + init_async_submit(submit, 0, NULL, NULL, NULL, scribble); + tx = async_gen_syndrome(blocks, offset, disks, len, submit); + async_tx_quiesce(&tx); + q = page_address(q_src) + offset; + s = page_address(spare) + offset; + *pqres |= !!memcmp(q, s, len) << SUM_CHECK_Q; + } + + /* restore P, Q and submit */ + P(blocks, disks) = p_src; + Q(blocks, disks) = q_src; + + submit->cb_fn = cb_fn_orig; + submit->cb_param = cb_param_orig; + submit->flags = flags_orig; + async_tx_sync_epilog(submit); + + return NULL; + } +} +EXPORT_SYMBOL_GPL(async_syndrome_val); + +static int __init async_pq_init(void) +{ + scribble = alloc_page(GFP_KERNEL); + + if (scribble) + return 0; + + pr_err("%s: failed to allocate required spare page\n", __func__); + + return -ENOMEM; +} + +static void __exit async_pq_exit(void) +{ + put_page(scribble); +} + +module_init(async_pq_init); +module_exit(async_pq_exit); + +MODULE_DESCRIPTION("asynchronous raid6 syndrome generation/validation"); +MODULE_LICENSE("GPL"); diff --git a/crypto/async_tx/async_xor.c b/crypto/async_tx/async_xor.c index 78fb7780272..56b5f98da46 100644 --- a/crypto/async_tx/async_xor.c +++ b/crypto/async_tx/async_xor.c @@ -62,7 +62,7 @@ do_async_xor(struct dma_chan *chan, struct page *dest, struct page **src_list, while (src_cnt) { submit->flags = flags_orig; dma_flags = 0; - xor_src_cnt = min(src_cnt, dma->max_xor); + xor_src_cnt = min(src_cnt, (int)dma->max_xor); /* if we are submitting additional xors, leave the chain open, * clear the callback parameters, and leave the destination * buffer mapped diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c index e002e0e0d05..cd5673d3043 100644 --- a/drivers/dma/dmaengine.c +++ b/drivers/dma/dmaengine.c @@ -646,6 +646,10 @@ int dma_async_device_register(struct dma_device *device) !device->device_prep_dma_xor); BUG_ON(dma_has_cap(DMA_XOR_VAL, device->cap_mask) && !device->device_prep_dma_xor_val); + BUG_ON(dma_has_cap(DMA_PQ, device->cap_mask) && + !device->device_prep_dma_pq); + BUG_ON(dma_has_cap(DMA_PQ_VAL, device->cap_mask) && + !device->device_prep_dma_pq_val); BUG_ON(dma_has_cap(DMA_MEMSET, device->cap_mask) && !device->device_prep_dma_memset); BUG_ON(dma_has_cap(DMA_INTERRUPT, device->cap_mask) && diff --git a/drivers/dma/iop-adma.c b/drivers/dma/iop-adma.c index 6ff79a67269..4496bc60666 100644 --- a/drivers/dma/iop-adma.c +++ b/drivers/dma/iop-adma.c @@ -1257,7 +1257,7 @@ static int __devinit iop_adma_probe(struct platform_device *pdev) dev_printk(KERN_INFO, &pdev->dev, "Intel(R) IOP: " "( %s%s%s%s%s%s%s%s%s%s)\n", - dma_has_cap(DMA_PQ_XOR, dma_dev->cap_mask) ? "pq_xor " : "", + dma_has_cap(DMA_PQ, dma_dev->cap_mask) ? "pq " : "", dma_has_cap(DMA_PQ_UPDATE, dma_dev->cap_mask) ? "pq_update " : "", dma_has_cap(DMA_PQ_VAL, dma_dev->cap_mask) ? "pq_val " : "", dma_has_cap(DMA_XOR, dma_dev->cap_mask) ? "xor " : "", diff --git a/include/linux/async_tx.h b/include/linux/async_tx.h index 12a2efcbd56..e6ce5f004f9 100644 --- a/include/linux/async_tx.h +++ b/include/linux/async_tx.h @@ -185,5 +185,14 @@ async_memset(struct page *dest, int val, unsigned int offset, struct dma_async_tx_descriptor *async_trigger_callback(struct async_submit_ctl *submit); +struct dma_async_tx_descriptor * +async_gen_syndrome(struct page **blocks, unsigned int offset, int src_cnt, + size_t len, struct async_submit_ctl *submit); + +struct dma_async_tx_descriptor * +async_syndrome_val(struct page **blocks, unsigned int offset, int src_cnt, + size_t len, enum sum_check_flags *pqres, struct page *spare, + struct async_submit_ctl *submit); + void async_tx_quiesce(struct dma_async_tx_descriptor **tx); #endif /* _ASYNC_TX_H_ */ diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h index 02447afceba..ce010cd991d 100644 --- a/include/linux/dmaengine.h +++ b/include/linux/dmaengine.h @@ -52,7 +52,7 @@ enum dma_status { enum dma_transaction_type { DMA_MEMCPY, DMA_XOR, - DMA_PQ_XOR, + DMA_PQ, DMA_DUAL_XOR, DMA_PQ_UPDATE, DMA_XOR_VAL, @@ -70,20 +70,28 @@ enum dma_transaction_type { /** * enum dma_ctrl_flags - DMA flags to augment operation preparation, - * control completion, and communicate status. + * control completion, and communicate status. * @DMA_PREP_INTERRUPT - trigger an interrupt (callback) upon completion of - * this transaction + * this transaction * @DMA_CTRL_ACK - the descriptor cannot be reused until the client - * acknowledges receipt, i.e. has has a chance to establish any - * dependency chains + * acknowledges receipt, i.e. has has a chance to establish any dependency + * chains * @DMA_COMPL_SKIP_SRC_UNMAP - set to disable dma-unmapping the source buffer(s) * @DMA_COMPL_SKIP_DEST_UNMAP - set to disable dma-unmapping the destination(s) + * @DMA_PREP_PQ_DISABLE_P - prevent generation of P while generating Q + * @DMA_PREP_PQ_DISABLE_Q - prevent generation of Q while generating P + * @DMA_PREP_CONTINUE - indicate to a driver that it is reusing buffers as + * sources that were the result of a previous operation, in the case of a PQ + * operation it continues the calculation with new sources */ enum dma_ctrl_flags { DMA_PREP_INTERRUPT = (1 << 0), DMA_CTRL_ACK = (1 << 1), DMA_COMPL_SKIP_SRC_UNMAP = (1 << 2), DMA_COMPL_SKIP_DEST_UNMAP = (1 << 3), + DMA_PREP_PQ_DISABLE_P = (1 << 4), + DMA_PREP_PQ_DISABLE_Q = (1 << 5), + DMA_PREP_CONTINUE = (1 << 6), }; /** @@ -226,6 +234,7 @@ struct dma_async_tx_descriptor { * @global_node: list_head for global dma_device_list * @cap_mask: one or more dma_capability flags * @max_xor: maximum number of xor sources, 0 if no capability + * @max_pq: maximum number of PQ sources and PQ-continue capability * @dev_id: unique device ID * @dev: struct device reference for dma mapping api * @device_alloc_chan_resources: allocate resources and return the @@ -234,6 +243,8 @@ struct dma_async_tx_descriptor { * @device_prep_dma_memcpy: prepares a memcpy operation * @device_prep_dma_xor: prepares a xor operation * @device_prep_dma_xor_val: prepares a xor validation operation + * @device_prep_dma_pq: prepares a pq operation + * @device_prep_dma_pq_val: prepares a pqzero_sum operation * @device_prep_dma_memset: prepares a memset operation * @device_prep_dma_interrupt: prepares an end of chain interrupt operation * @device_prep_slave_sg: prepares a slave dma operation @@ -248,7 +259,9 @@ struct dma_device { struct list_head channels; struct list_head global_node; dma_cap_mask_t cap_mask; - int max_xor; + unsigned short max_xor; + unsigned short max_pq; + #define DMA_HAS_PQ_CONTINUE (1 << 15) int dev_id; struct device *dev; @@ -265,6 +278,14 @@ struct dma_device { struct dma_async_tx_descriptor *(*device_prep_dma_xor_val)( struct dma_chan *chan, dma_addr_t *src, unsigned int src_cnt, size_t len, enum sum_check_flags *result, unsigned long flags); + struct dma_async_tx_descriptor *(*device_prep_dma_pq)( + struct dma_chan *chan, dma_addr_t *dst, dma_addr_t *src, + unsigned int src_cnt, const unsigned char *scf, + size_t len, unsigned long flags); + struct dma_async_tx_descriptor *(*device_prep_dma_pq_val)( + struct dma_chan *chan, dma_addr_t *pq, dma_addr_t *src, + unsigned int src_cnt, const unsigned char *scf, size_t len, + enum sum_check_flags *pqres, unsigned long flags); struct dma_async_tx_descriptor *(*device_prep_dma_memset)( struct dma_chan *chan, dma_addr_t dest, int value, size_t len, unsigned long flags); @@ -283,6 +304,60 @@ struct dma_device { void (*device_issue_pending)(struct dma_chan *chan); }; +static inline void +dma_set_maxpq(struct dma_device *dma, int maxpq, int has_pq_continue) +{ + dma->max_pq = maxpq; + if (has_pq_continue) + dma->max_pq |= DMA_HAS_PQ_CONTINUE; +} + +static inline bool dmaf_continue(enum dma_ctrl_flags flags) +{ + return (flags & DMA_PREP_CONTINUE) == DMA_PREP_CONTINUE; +} + +static inline bool dmaf_p_disabled_continue(enum dma_ctrl_flags flags) +{ + enum dma_ctrl_flags mask = DMA_PREP_CONTINUE | DMA_PREP_PQ_DISABLE_P; + + return (flags & mask) == mask; +} + +static inline bool dma_dev_has_pq_continue(struct dma_device *dma) +{ + return (dma->max_pq & DMA_HAS_PQ_CONTINUE) == DMA_HAS_PQ_CONTINUE; +} + +static unsigned short dma_dev_to_maxpq(struct dma_device *dma) +{ + return dma->max_pq & ~DMA_HAS_PQ_CONTINUE; +} + +/* dma_maxpq - reduce maxpq in the face of continued operations + * @dma - dma device with PQ capability + * @flags - to check if DMA_PREP_CONTINUE and DMA_PREP_PQ_DISABLE_P are set + * + * When an engine does not support native continuation we need 3 extra + * source slots to reuse P and Q with the following coefficients: + * 1/ {00} * P : remove P from Q', but use it as a source for P' + * 2/ {01} * Q : use Q to continue Q' calculation + * 3/ {00} * Q : subtract Q from P' to cancel (2) + * + * In the case where P is disabled we only need 1 extra source: + * 1/ {01} * Q : use Q to continue Q' calculation + */ +static inline int dma_maxpq(struct dma_device *dma, enum dma_ctrl_flags flags) +{ + if (dma_dev_has_pq_continue(dma) || !dmaf_continue(flags)) + return dma_dev_to_maxpq(dma); + else if (dmaf_p_disabled_continue(flags)) + return dma_dev_to_maxpq(dma) - 1; + else if (dmaf_continue(flags)) + return dma_dev_to_maxpq(dma) - 3; + BUG(); +} + /* --- public DMA engine API --- */ #ifdef CONFIG_DMA_ENGINE -- cgit v1.2.3-70-g09d2 From 0a82a6239beecc95db6e05fe43ee62d16b381d38 Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Tue, 14 Jul 2009 12:20:37 -0700 Subject: async_tx: add support for asynchronous RAID6 recovery operations async_raid6_2data_recov() recovers two data disk failures async_raid6_datap_recov() recovers a data disk and the P disk These routines are a port of the synchronous versions found in drivers/md/raid6recov.c. The primary difference is breaking out the xor operations into separate calls to async_xor. Two helper routines are introduced to perform scalar multiplication where needed. async_sum_product() multiplies two sources by scalar coefficients and then sums (xor) the result. async_mult() simply multiplies a single source by a scalar. This implemention also includes, in contrast to the original synchronous-only code, special case handling for the 4-disk and 5-disk array cases. In these situations the default N-disk algorithm will present 0-source or 1-source operations to dma devices. To cover for dma devices where the minimum source count is 2 we implement 4-disk and 5-disk handling in the recovery code. [ Impact: asynchronous raid6 recovery routines for 2data and datap cases ] Cc: Yuri Tikhonov Cc: Ilya Yanok Cc: H. Peter Anvin Cc: David Woodhouse Reviewed-by: Andre Noll Acked-by: Maciej Sosnowski Signed-off-by: Dan Williams --- Documentation/crypto/async-tx-api.txt | 4 + crypto/async_tx/Kconfig | 5 + crypto/async_tx/Makefile | 1 + crypto/async_tx/async_raid6_recov.c | 448 ++++++++++++++++++++++++++++++++++ include/linux/async_tx.h | 8 + 5 files changed, 466 insertions(+) create mode 100644 crypto/async_tx/async_raid6_recov.c (limited to 'include') diff --git a/Documentation/crypto/async-tx-api.txt b/Documentation/crypto/async-tx-api.txt index 0e48e054d69..ba046b8fa92 100644 --- a/Documentation/crypto/async-tx-api.txt +++ b/Documentation/crypto/async-tx-api.txt @@ -67,6 +67,10 @@ xor_val - xor a series of source buffers and set a flag if the pq - generate the p+q (raid6 syndrome) from a series of source buffers pq_val - validate that a p and or q buffer are in sync with a given series of sources +datap - (raid6_datap_recov) recover a raid6 data block and the p block + from the given sources +2data - (raid6_2data_recov) recover 2 raid6 data blocks from the given + sources 3.3 Descriptor management: The return value is non-NULL and points to a 'descriptor' when the operation diff --git a/crypto/async_tx/Kconfig b/crypto/async_tx/Kconfig index cb6d7314f19..e5aeb2b79e6 100644 --- a/crypto/async_tx/Kconfig +++ b/crypto/async_tx/Kconfig @@ -18,3 +18,8 @@ config ASYNC_PQ tristate select ASYNC_CORE +config ASYNC_RAID6_RECOV + tristate + select ASYNC_CORE + select ASYNC_PQ + diff --git a/crypto/async_tx/Makefile b/crypto/async_tx/Makefile index 1b992658825..9a1a76811b8 100644 --- a/crypto/async_tx/Makefile +++ b/crypto/async_tx/Makefile @@ -3,3 +3,4 @@ obj-$(CONFIG_ASYNC_MEMCPY) += async_memcpy.o obj-$(CONFIG_ASYNC_MEMSET) += async_memset.o obj-$(CONFIG_ASYNC_XOR) += async_xor.o obj-$(CONFIG_ASYNC_PQ) += async_pq.o +obj-$(CONFIG_ASYNC_RAID6_RECOV) += async_raid6_recov.o diff --git a/crypto/async_tx/async_raid6_recov.c b/crypto/async_tx/async_raid6_recov.c new file mode 100644 index 00000000000..0c14d48c989 --- /dev/null +++ b/crypto/async_tx/async_raid6_recov.c @@ -0,0 +1,448 @@ +/* + * Asynchronous RAID-6 recovery calculations ASYNC_TX API. + * Copyright(c) 2009 Intel Corporation + * + * based on raid6recov.c: + * Copyright 2002 H. Peter Anvin + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., 51 + * Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + * + */ +#include +#include +#include +#include +#include + +static struct dma_async_tx_descriptor * +async_sum_product(struct page *dest, struct page **srcs, unsigned char *coef, + size_t len, struct async_submit_ctl *submit) +{ + struct dma_chan *chan = async_tx_find_channel(submit, DMA_PQ, + &dest, 1, srcs, 2, len); + struct dma_device *dma = chan ? chan->device : NULL; + const u8 *amul, *bmul; + u8 ax, bx; + u8 *a, *b, *c; + + if (dma) { + dma_addr_t dma_dest[2]; + dma_addr_t dma_src[2]; + struct device *dev = dma->dev; + struct dma_async_tx_descriptor *tx; + enum dma_ctrl_flags dma_flags = DMA_PREP_PQ_DISABLE_P; + + dma_dest[1] = dma_map_page(dev, dest, 0, len, DMA_BIDIRECTIONAL); + dma_src[0] = dma_map_page(dev, srcs[0], 0, len, DMA_TO_DEVICE); + dma_src[1] = dma_map_page(dev, srcs[1], 0, len, DMA_TO_DEVICE); + tx = dma->device_prep_dma_pq(chan, dma_dest, dma_src, 2, coef, + len, dma_flags); + if (tx) { + async_tx_submit(chan, tx, submit); + return tx; + } + } + + /* run the operation synchronously */ + async_tx_quiesce(&submit->depend_tx); + amul = raid6_gfmul[coef[0]]; + bmul = raid6_gfmul[coef[1]]; + a = page_address(srcs[0]); + b = page_address(srcs[1]); + c = page_address(dest); + + while (len--) { + ax = amul[*a++]; + bx = bmul[*b++]; + *c++ = ax ^ bx; + } + + return NULL; +} + +static struct dma_async_tx_descriptor * +async_mult(struct page *dest, struct page *src, u8 coef, size_t len, + struct async_submit_ctl *submit) +{ + struct dma_chan *chan = async_tx_find_channel(submit, DMA_PQ, + &dest, 1, &src, 1, len); + struct dma_device *dma = chan ? chan->device : NULL; + const u8 *qmul; /* Q multiplier table */ + u8 *d, *s; + + if (dma) { + dma_addr_t dma_dest[2]; + dma_addr_t dma_src[1]; + struct device *dev = dma->dev; + struct dma_async_tx_descriptor *tx; + enum dma_ctrl_flags dma_flags = DMA_PREP_PQ_DISABLE_P; + + dma_dest[1] = dma_map_page(dev, dest, 0, len, DMA_BIDIRECTIONAL); + dma_src[0] = dma_map_page(dev, src, 0, len, DMA_TO_DEVICE); + tx = dma->device_prep_dma_pq(chan, dma_dest, dma_src, 1, &coef, + len, dma_flags); + if (tx) { + async_tx_submit(chan, tx, submit); + return tx; + } + } + + /* no channel available, or failed to allocate a descriptor, so + * perform the operation synchronously + */ + async_tx_quiesce(&submit->depend_tx); + qmul = raid6_gfmul[coef]; + d = page_address(dest); + s = page_address(src); + + while (len--) + *d++ = qmul[*s++]; + + return NULL; +} + +static struct dma_async_tx_descriptor * +__2data_recov_4(size_t bytes, int faila, int failb, struct page **blocks, + struct async_submit_ctl *submit) +{ + struct dma_async_tx_descriptor *tx = NULL; + struct page *p, *q, *a, *b; + struct page *srcs[2]; + unsigned char coef[2]; + enum async_tx_flags flags = submit->flags; + dma_async_tx_callback cb_fn = submit->cb_fn; + void *cb_param = submit->cb_param; + void *scribble = submit->scribble; + + p = blocks[4-2]; + q = blocks[4-1]; + + a = blocks[faila]; + b = blocks[failb]; + + /* in the 4 disk case P + Pxy == P and Q + Qxy == Q */ + /* Dx = A*(P+Pxy) + B*(Q+Qxy) */ + srcs[0] = p; + srcs[1] = q; + coef[0] = raid6_gfexi[failb-faila]; + coef[1] = raid6_gfinv[raid6_gfexp[faila]^raid6_gfexp[failb]]; + init_async_submit(submit, 0, tx, NULL, NULL, scribble); + tx = async_sum_product(b, srcs, coef, bytes, submit); + + /* Dy = P+Pxy+Dx */ + srcs[0] = p; + srcs[1] = b; + init_async_submit(submit, flags | ASYNC_TX_XOR_ZERO_DST, tx, cb_fn, + cb_param, scribble); + tx = async_xor(a, srcs, 0, 2, bytes, submit); + + return tx; + +} + +static struct dma_async_tx_descriptor * +__2data_recov_5(size_t bytes, int faila, int failb, struct page **blocks, + struct async_submit_ctl *submit) +{ + struct dma_async_tx_descriptor *tx = NULL; + struct page *p, *q, *g, *dp, *dq; + struct page *srcs[2]; + unsigned char coef[2]; + enum async_tx_flags flags = submit->flags; + dma_async_tx_callback cb_fn = submit->cb_fn; + void *cb_param = submit->cb_param; + void *scribble = submit->scribble; + int uninitialized_var(good); + int i; + + for (i = 0; i < 3; i++) { + if (i == faila || i == failb) + continue; + else { + good = i; + break; + } + } + BUG_ON(i >= 3); + + p = blocks[5-2]; + q = blocks[5-1]; + g = blocks[good]; + + /* Compute syndrome with zero for the missing data pages + * Use the dead data pages as temporary storage for delta p and + * delta q + */ + dp = blocks[faila]; + dq = blocks[failb]; + + init_async_submit(submit, 0, tx, NULL, NULL, scribble); + tx = async_memcpy(dp, g, 0, 0, bytes, submit); + init_async_submit(submit, 0, tx, NULL, NULL, scribble); + tx = async_mult(dq, g, raid6_gfexp[good], bytes, submit); + + /* compute P + Pxy */ + srcs[0] = dp; + srcs[1] = p; + init_async_submit(submit, ASYNC_TX_XOR_DROP_DST, tx, NULL, NULL, + scribble); + tx = async_xor(dp, srcs, 0, 2, bytes, submit); + + /* compute Q + Qxy */ + srcs[0] = dq; + srcs[1] = q; + init_async_submit(submit, ASYNC_TX_XOR_DROP_DST, tx, NULL, NULL, + scribble); + tx = async_xor(dq, srcs, 0, 2, bytes, submit); + + /* Dx = A*(P+Pxy) + B*(Q+Qxy) */ + srcs[0] = dp; + srcs[1] = dq; + coef[0] = raid6_gfexi[failb-faila]; + coef[1] = raid6_gfinv[raid6_gfexp[faila]^raid6_gfexp[failb]]; + init_async_submit(submit, 0, tx, NULL, NULL, scribble); + tx = async_sum_product(dq, srcs, coef, bytes, submit); + + /* Dy = P+Pxy+Dx */ + srcs[0] = dp; + srcs[1] = dq; + init_async_submit(submit, flags | ASYNC_TX_XOR_DROP_DST, tx, cb_fn, + cb_param, scribble); + tx = async_xor(dp, srcs, 0, 2, bytes, submit); + + return tx; +} + +static struct dma_async_tx_descriptor * +__2data_recov_n(int disks, size_t bytes, int faila, int failb, + struct page **blocks, struct async_submit_ctl *submit) +{ + struct dma_async_tx_descriptor *tx = NULL; + struct page *p, *q, *dp, *dq; + struct page *srcs[2]; + unsigned char coef[2]; + enum async_tx_flags flags = submit->flags; + dma_async_tx_callback cb_fn = submit->cb_fn; + void *cb_param = submit->cb_param; + void *scribble = submit->scribble; + + p = blocks[disks-2]; + q = blocks[disks-1]; + + /* Compute syndrome with zero for the missing data pages + * Use the dead data pages as temporary storage for + * delta p and delta q + */ + dp = blocks[faila]; + blocks[faila] = (void *)raid6_empty_zero_page; + blocks[disks-2] = dp; + dq = blocks[failb]; + blocks[failb] = (void *)raid6_empty_zero_page; + blocks[disks-1] = dq; + + init_async_submit(submit, 0, tx, NULL, NULL, scribble); + tx = async_gen_syndrome(blocks, 0, disks, bytes, submit); + + /* Restore pointer table */ + blocks[faila] = dp; + blocks[failb] = dq; + blocks[disks-2] = p; + blocks[disks-1] = q; + + /* compute P + Pxy */ + srcs[0] = dp; + srcs[1] = p; + init_async_submit(submit, ASYNC_TX_XOR_DROP_DST, tx, NULL, NULL, + scribble); + tx = async_xor(dp, srcs, 0, 2, bytes, submit); + + /* compute Q + Qxy */ + srcs[0] = dq; + srcs[1] = q; + init_async_submit(submit, ASYNC_TX_XOR_DROP_DST, tx, NULL, NULL, + scribble); + tx = async_xor(dq, srcs, 0, 2, bytes, submit); + + /* Dx = A*(P+Pxy) + B*(Q+Qxy) */ + srcs[0] = dp; + srcs[1] = dq; + coef[0] = raid6_gfexi[failb-faila]; + coef[1] = raid6_gfinv[raid6_gfexp[faila]^raid6_gfexp[failb]]; + init_async_submit(submit, 0, tx, NULL, NULL, scribble); + tx = async_sum_product(dq, srcs, coef, bytes, submit); + + /* Dy = P+Pxy+Dx */ + srcs[0] = dp; + srcs[1] = dq; + init_async_submit(submit, flags | ASYNC_TX_XOR_DROP_DST, tx, cb_fn, + cb_param, scribble); + tx = async_xor(dp, srcs, 0, 2, bytes, submit); + + return tx; +} + +/** + * async_raid6_2data_recov - asynchronously calculate two missing data blocks + * @disks: number of disks in the RAID-6 array + * @bytes: block size + * @faila: first failed drive index + * @failb: second failed drive index + * @blocks: array of source pointers where the last two entries are p and q + * @submit: submission/completion modifiers + */ +struct dma_async_tx_descriptor * +async_raid6_2data_recov(int disks, size_t bytes, int faila, int failb, + struct page **blocks, struct async_submit_ctl *submit) +{ + BUG_ON(faila == failb); + if (failb < faila) + swap(faila, failb); + + pr_debug("%s: disks: %d len: %zu\n", __func__, disks, bytes); + + /* we need to preserve the contents of 'blocks' for the async + * case, so punt to synchronous if a scribble buffer is not available + */ + if (!submit->scribble) { + void **ptrs = (void **) blocks; + int i; + + async_tx_quiesce(&submit->depend_tx); + for (i = 0; i < disks; i++) + ptrs[i] = page_address(blocks[i]); + + raid6_2data_recov(disks, bytes, faila, failb, ptrs); + + async_tx_sync_epilog(submit); + + return NULL; + } + + switch (disks) { + case 4: + /* dma devices do not uniformly understand a zero source pq + * operation (in contrast to the synchronous case), so + * explicitly handle the 4 disk special case + */ + return __2data_recov_4(bytes, faila, failb, blocks, submit); + case 5: + /* dma devices do not uniformly understand a single + * source pq operation (in contrast to the synchronous + * case), so explicitly handle the 5 disk special case + */ + return __2data_recov_5(bytes, faila, failb, blocks, submit); + default: + return __2data_recov_n(disks, bytes, faila, failb, blocks, submit); + } +} +EXPORT_SYMBOL_GPL(async_raid6_2data_recov); + +/** + * async_raid6_datap_recov - asynchronously calculate a data and the 'p' block + * @disks: number of disks in the RAID-6 array + * @bytes: block size + * @faila: failed drive index + * @blocks: array of source pointers where the last two entries are p and q + * @submit: submission/completion modifiers + */ +struct dma_async_tx_descriptor * +async_raid6_datap_recov(int disks, size_t bytes, int faila, + struct page **blocks, struct async_submit_ctl *submit) +{ + struct dma_async_tx_descriptor *tx = NULL; + struct page *p, *q, *dq; + u8 coef; + enum async_tx_flags flags = submit->flags; + dma_async_tx_callback cb_fn = submit->cb_fn; + void *cb_param = submit->cb_param; + void *scribble = submit->scribble; + struct page *srcs[2]; + + pr_debug("%s: disks: %d len: %zu\n", __func__, disks, bytes); + + /* we need to preserve the contents of 'blocks' for the async + * case, so punt to synchronous if a scribble buffer is not available + */ + if (!scribble) { + void **ptrs = (void **) blocks; + int i; + + async_tx_quiesce(&submit->depend_tx); + for (i = 0; i < disks; i++) + ptrs[i] = page_address(blocks[i]); + + raid6_datap_recov(disks, bytes, faila, ptrs); + + async_tx_sync_epilog(submit); + + return NULL; + } + + p = blocks[disks-2]; + q = blocks[disks-1]; + + /* Compute syndrome with zero for the missing data page + * Use the dead data page as temporary storage for delta q + */ + dq = blocks[faila]; + blocks[faila] = (void *)raid6_empty_zero_page; + blocks[disks-1] = dq; + + /* in the 4 disk case we only need to perform a single source + * multiplication + */ + if (disks == 4) { + int good = faila == 0 ? 1 : 0; + struct page *g = blocks[good]; + + init_async_submit(submit, 0, tx, NULL, NULL, scribble); + tx = async_memcpy(p, g, 0, 0, bytes, submit); + + init_async_submit(submit, 0, tx, NULL, NULL, scribble); + tx = async_mult(dq, g, raid6_gfexp[good], bytes, submit); + } else { + init_async_submit(submit, 0, tx, NULL, NULL, scribble); + tx = async_gen_syndrome(blocks, 0, disks, bytes, submit); + } + + /* Restore pointer table */ + blocks[faila] = dq; + blocks[disks-1] = q; + + /* calculate g^{-faila} */ + coef = raid6_gfinv[raid6_gfexp[faila]]; + + srcs[0] = dq; + srcs[1] = q; + init_async_submit(submit, ASYNC_TX_XOR_DROP_DST, tx, NULL, NULL, + scribble); + tx = async_xor(dq, srcs, 0, 2, bytes, submit); + + init_async_submit(submit, 0, tx, NULL, NULL, scribble); + tx = async_mult(dq, dq, coef, bytes, submit); + + srcs[0] = p; + srcs[1] = dq; + init_async_submit(submit, flags | ASYNC_TX_XOR_DROP_DST, tx, cb_fn, + cb_param, scribble); + tx = async_xor(p, srcs, 0, 2, bytes, submit); + + return tx; +} +EXPORT_SYMBOL_GPL(async_raid6_datap_recov); + +MODULE_AUTHOR("Dan Williams "); +MODULE_DESCRIPTION("asynchronous RAID-6 recovery api"); +MODULE_LICENSE("GPL"); diff --git a/include/linux/async_tx.h b/include/linux/async_tx.h index e6ce5f004f9..866e61c4e2e 100644 --- a/include/linux/async_tx.h +++ b/include/linux/async_tx.h @@ -194,5 +194,13 @@ async_syndrome_val(struct page **blocks, unsigned int offset, int src_cnt, size_t len, enum sum_check_flags *pqres, struct page *spare, struct async_submit_ctl *submit); +struct dma_async_tx_descriptor * +async_raid6_2data_recov(int src_num, size_t bytes, int faila, int failb, + struct page **ptrs, struct async_submit_ctl *submit); + +struct dma_async_tx_descriptor * +async_raid6_datap_recov(int src_num, size_t bytes, int faila, + struct page **ptrs, struct async_submit_ctl *submit); + void async_tx_quiesce(struct dma_async_tx_descriptor **tx); #endif /* _ASYNC_TX_H_ */ -- cgit v1.2.3-70-g09d2 From e500011ffa191d662ac64d4ada6a5187b3180e16 Mon Sep 17 00:00:00 2001 From: Randy Dunlap Date: Sun, 30 Aug 2009 13:19:12 -0700 Subject: timers: Drop a function prototype Drop prototype for non-existent next_timer_interrupt() function. Signed-off-by: Randy Dunlap Cc: akpm LKML-Reference: <4A9ADEC0.70306@oracle.com> Signed-off-by: Thomas Gleixner --- include/linux/timer.h | 5 ----- 1 file changed, 5 deletions(-) (limited to 'include') diff --git a/include/linux/timer.h b/include/linux/timer.h index be62ec2ebea..a2d1eb6cb3f 100644 --- a/include/linux/timer.h +++ b/include/linux/timer.h @@ -173,11 +173,6 @@ extern int mod_timer_pinned(struct timer_list *timer, unsigned long expires); */ #define NEXT_TIMER_MAX_DELTA ((1UL << 30) - 1) -/* - * Return when the next timer-wheel timeout occurs (in absolute jiffies), - * locks the timer base: - */ -extern unsigned long next_timer_interrupt(void); /* * Return when the next timer-wheel timeout occurs (in absolute jiffies), * locks the timer base and does the comparison against the given -- cgit v1.2.3-70-g09d2 From f380ef86916904e4b79f7bec599deb51057b2d0c Mon Sep 17 00:00:00 2001 From: Maarten Maathuis Date: Wed, 19 Aug 2009 00:56:44 +0200 Subject: drm/crtc_helper: place drm_helper_encoder_in_use() in the header file - The symbol was already exported. Signed-off-by: Maarten Maathuis Signed-off-by: Dave Airlie --- include/drm/drm_crtc_helper.h | 1 + 1 file changed, 1 insertion(+) (limited to 'include') diff --git a/include/drm/drm_crtc_helper.h b/include/drm/drm_crtc_helper.h index 6769ff6c1bc..e44a4f87303 100644 --- a/include/drm/drm_crtc_helper.h +++ b/include/drm/drm_crtc_helper.h @@ -98,6 +98,7 @@ extern bool drm_crtc_helper_set_mode(struct drm_crtc *crtc, int x, int y, struct drm_framebuffer *old_fb); extern bool drm_helper_crtc_in_use(struct drm_crtc *crtc); +extern bool drm_helper_encoder_in_use(struct drm_encoder *encoder); extern void drm_helper_connector_dpms(struct drm_connector *connector, int mode); -- cgit v1.2.3-70-g09d2 From 785b93ef8c309730c2de84ce9c229e40e2d01480 Mon Sep 17 00:00:00 2001 From: Dave Airlie Date: Fri, 28 Aug 2009 15:46:53 +1000 Subject: drm/kms: move driver specific fb common code to helper functions (v2) Initially I always meant this code to be shared, but things ran away from me before I got to it. This refactors the i915 and radeon kms fbdev interaction layers out into generic helpers + driver specific pieces. It moves all the panic/sysrq enhancements to the core file, and stores a linked list of kernel fbs. This could possibly be improved to only store the fb which has fbcon on it for panics etc. radeon retains some specific codes used for a big endian workaround. changes: fix oops in v1 fix freeing path for crtc_info Reviewed-by: Jesse Barnes Signed-off-by: Dave Airlie --- drivers/gpu/drm/Makefile | 3 +- drivers/gpu/drm/drm_fb_helper.c | 697 ++++++++++++++++++++++++++++++ drivers/gpu/drm/i915/i915_dma.c | 3 +- drivers/gpu/drm/i915/intel_display.c | 12 - drivers/gpu/drm/i915/intel_drv.h | 3 - drivers/gpu/drm/i915/intel_fb.c | 737 ++------------------------------ drivers/gpu/drm/radeon/radeon_display.c | 5 +- drivers/gpu/drm/radeon/radeon_fb.c | 670 ++++------------------------- drivers/gpu/drm/radeon/radeon_mode.h | 2 - include/drm/drm_crtc.h | 2 + include/drm/drm_fb_helper.h | 82 ++++ 11 files changed, 907 insertions(+), 1309 deletions(-) create mode 100644 drivers/gpu/drm/drm_fb_helper.c create mode 100644 include/drm/drm_fb_helper.h (limited to 'include') diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile index 5f0aec4f082..99071684de2 100644 --- a/drivers/gpu/drm/Makefile +++ b/drivers/gpu/drm/Makefile @@ -11,7 +11,8 @@ drm-y := drm_auth.o drm_bufs.o drm_cache.o \ drm_agpsupport.o drm_scatter.o ati_pcigart.o drm_pci.o \ drm_sysfs.o drm_hashtab.o drm_sman.o drm_mm.o \ drm_crtc.o drm_crtc_helper.o drm_modes.o drm_edid.o \ - drm_info.o drm_debugfs.o drm_encoder_slave.o + drm_info.o drm_debugfs.o drm_encoder_slave.o \ + drm_fb_helper.o drm-$(CONFIG_COMPAT) += drm_ioc32.o diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c new file mode 100644 index 00000000000..d6ffea74a50 --- /dev/null +++ b/drivers/gpu/drm/drm_fb_helper.c @@ -0,0 +1,697 @@ +/* + * Copyright (c) 2006-2009 Red Hat Inc. + * Copyright (c) 2006-2008 Intel Corporation + * Copyright (c) 2007 Dave Airlie + * + * DRM framebuffer helper functions + * + * Permission to use, copy, modify, distribute, and sell this software and its + * documentation for any purpose is hereby granted without fee, provided that + * the above copyright notice appear in all copies and that both that copyright + * notice and this permission notice appear in supporting documentation, and + * that the name of the copyright holders not be used in advertising or + * publicity pertaining to distribution of the software without specific, + * written prior permission. The copyright holders make no representations + * about the suitability of this software for any purpose. It is provided "as + * is" without express or implied warranty. + * + * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, + * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO + * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR + * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, + * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE + * OF THIS SOFTWARE. + * + * Authors: + * Dave Airlie + * Jesse Barnes + */ +#include +#include +#include "drmP.h" +#include "drm_crtc.h" +#include "drm_fb_helper.h" +#include "drm_crtc_helper.h" + +static LIST_HEAD(kernel_fb_helper_list); + +bool drm_fb_helper_force_kernel_mode(void) +{ + int i = 0; + bool ret, error = false; + struct drm_fb_helper *helper; + + if (list_empty(&kernel_fb_helper_list)) + return false; + + list_for_each_entry(helper, &kernel_fb_helper_list, kernel_fb_list) { + for (i = 0; i < helper->crtc_count; i++) { + struct drm_mode_set *mode_set = &helper->crtc_info[i].mode_set; + ret = drm_crtc_helper_set_config(mode_set); + if (ret) + error = true; + } + } + return error; +} + +int drm_fb_helper_panic(struct notifier_block *n, unsigned long ununsed, + void *panic_str) +{ + DRM_ERROR("panic occurred, switching back to text console\n"); + return drm_fb_helper_force_kernel_mode(); + return 0; +} +EXPORT_SYMBOL(drm_fb_helper_panic); + +static struct notifier_block paniced = { + .notifier_call = drm_fb_helper_panic, +}; + +/** + * drm_fb_helper_restore - restore the framebuffer console (kernel) config + * + * Restore's the kernel's fbcon mode, used for lastclose & panic paths. + */ +void drm_fb_helper_restore(void) +{ + bool ret; + ret = drm_fb_helper_force_kernel_mode(); + if (ret == true) + DRM_ERROR("Failed to restore crtc configuration\n"); +} +EXPORT_SYMBOL(drm_fb_helper_restore); + +static void drm_fb_helper_restore_work_fn(struct work_struct *ignored) +{ + drm_fb_helper_restore(); +} +static DECLARE_WORK(drm_fb_helper_restore_work, drm_fb_helper_restore_work_fn); + +static void drm_fb_helper_sysrq(int dummy1, struct tty_struct *dummy3) +{ + schedule_work(&drm_fb_helper_restore_work); +} + +static struct sysrq_key_op sysrq_drm_fb_helper_restore_op = { + .handler = drm_fb_helper_sysrq, + .help_msg = "force-fb(V)", + .action_msg = "Restore framebuffer console", +}; + +static void drm_fb_helper_on(struct fb_info *info) +{ + struct drm_fb_helper *fb_helper = info->par; + struct drm_device *dev = fb_helper->dev; + struct drm_crtc *crtc; + struct drm_encoder *encoder; + int i; + + /* + * For each CRTC in this fb, turn the crtc on then, + * find all associated encoders and turn them on. + */ + list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { + struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private; + + for (i = 0; i < fb_helper->crtc_count; i++) { + if (crtc->base.id == fb_helper->crtc_info[i].crtc_id) + break; + } + + mutex_lock(&dev->mode_config.mutex); + crtc_funcs->dpms(crtc, DRM_MODE_DPMS_ON); + mutex_unlock(&dev->mode_config.mutex); + + /* Found a CRTC on this fb, now find encoders */ + list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { + if (encoder->crtc == crtc) { + struct drm_encoder_helper_funcs *encoder_funcs; + + encoder_funcs = encoder->helper_private; + mutex_lock(&dev->mode_config.mutex); + encoder_funcs->dpms(encoder, DRM_MODE_DPMS_ON); + mutex_unlock(&dev->mode_config.mutex); + } + } + } +} + +static void drm_fb_helper_off(struct fb_info *info, int dpms_mode) +{ + struct drm_fb_helper *fb_helper = info->par; + struct drm_device *dev = fb_helper->dev; + struct drm_crtc *crtc; + struct drm_encoder *encoder; + int i; + + /* + * For each CRTC in this fb, find all associated encoders + * and turn them off, then turn off the CRTC. + */ + list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { + struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private; + + for (i = 0; i < fb_helper->crtc_count; i++) { + if (crtc->base.id == fb_helper->crtc_info[i].crtc_id) + break; + } + + /* Found a CRTC on this fb, now find encoders */ + list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { + if (encoder->crtc == crtc) { + struct drm_encoder_helper_funcs *encoder_funcs; + + encoder_funcs = encoder->helper_private; + mutex_lock(&dev->mode_config.mutex); + encoder_funcs->dpms(encoder, dpms_mode); + mutex_unlock(&dev->mode_config.mutex); + } + } + if (dpms_mode == DRM_MODE_DPMS_OFF) { + mutex_lock(&dev->mode_config.mutex); + crtc_funcs->dpms(crtc, dpms_mode); + mutex_unlock(&dev->mode_config.mutex); + } + } +} + +int drm_fb_helper_blank(int blank, struct fb_info *info) +{ + switch (blank) { + case FB_BLANK_UNBLANK: + drm_fb_helper_on(info); + break; + case FB_BLANK_NORMAL: + drm_fb_helper_off(info, DRM_MODE_DPMS_STANDBY); + break; + case FB_BLANK_HSYNC_SUSPEND: + drm_fb_helper_off(info, DRM_MODE_DPMS_STANDBY); + break; + case FB_BLANK_VSYNC_SUSPEND: + drm_fb_helper_off(info, DRM_MODE_DPMS_SUSPEND); + break; + case FB_BLANK_POWERDOWN: + drm_fb_helper_off(info, DRM_MODE_DPMS_OFF); + break; + } + return 0; +} +EXPORT_SYMBOL(drm_fb_helper_blank); + +static void drm_fb_helper_crtc_free(struct drm_fb_helper *helper) +{ + int i; + + for (i = 0; i < helper->crtc_count; i++) + kfree(helper->crtc_info[i].mode_set.connectors); + kfree(helper->crtc_info); +} + +int drm_fb_helper_init_crtc_count(struct drm_fb_helper *helper, int crtc_count, int max_conn_count) +{ + struct drm_device *dev = helper->dev; + struct drm_crtc *crtc; + int ret = 0; + int i; + + helper->crtc_info = kcalloc(crtc_count, sizeof(struct drm_fb_helper_crtc), GFP_KERNEL); + if (!helper->crtc_info) + return -ENOMEM; + + helper->crtc_count = crtc_count; + + for (i = 0; i < crtc_count; i++) { + helper->crtc_info[i].mode_set.connectors = + kcalloc(max_conn_count, + sizeof(struct drm_connector *), + GFP_KERNEL); + + if (!helper->crtc_info[i].mode_set.connectors) { + ret = -ENOMEM; + goto out_free; + } + helper->crtc_info[i].mode_set.num_connectors = 0; + } + + i = 0; + list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { + helper->crtc_info[i].crtc_id = crtc->base.id; + helper->crtc_info[i].mode_set.crtc = crtc; + i++; + } + helper->conn_limit = max_conn_count; + return 0; +out_free: + drm_fb_helper_crtc_free(helper); + return -ENOMEM; +} +EXPORT_SYMBOL(drm_fb_helper_init_crtc_count); + +int drm_fb_helper_setcolreg(unsigned regno, + unsigned red, + unsigned green, + unsigned blue, + unsigned transp, + struct fb_info *info) +{ + struct drm_fb_helper *fb_helper = info->par; + struct drm_device *dev = fb_helper->dev; + struct drm_crtc *crtc; + int i; + + list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { + struct drm_framebuffer *fb = fb_helper->fb; + + for (i = 0; i < fb_helper->crtc_count; i++) { + if (crtc->base.id == fb_helper->crtc_info[i].crtc_id) + break; + } + if (i == fb_helper->crtc_count) + continue; + + if (regno > 255) + return 1; + + if (fb->depth == 8) { + fb_helper->funcs->gamma_set(crtc, red, green, blue, regno); + return 0; + } + + if (regno < 16) { + switch (fb->depth) { + case 15: + fb->pseudo_palette[regno] = ((red & 0xf800) >> 1) | + ((green & 0xf800) >> 6) | + ((blue & 0xf800) >> 11); + break; + case 16: + fb->pseudo_palette[regno] = (red & 0xf800) | + ((green & 0xfc00) >> 5) | + ((blue & 0xf800) >> 11); + break; + case 24: + case 32: + fb->pseudo_palette[regno] = + (((red >> 8) & 0xff) << info->var.red.offset) | + (((green >> 8) & 0xff) << info->var.green.offset) | + (((blue >> 8) & 0xff) << info->var.blue.offset); + break; + } + } + } + return 0; +} +EXPORT_SYMBOL(drm_fb_helper_setcolreg); + +int drm_fb_helper_check_var(struct fb_var_screeninfo *var, + struct fb_info *info) +{ + struct drm_fb_helper *fb_helper = info->par; + struct drm_framebuffer *fb = fb_helper->fb; + int depth; + + if (var->pixclock == -1 || !var->pixclock) + return -EINVAL; + + /* Need to resize the fb object !!! */ + if (var->xres > fb->width || var->yres > fb->height) { + DRM_ERROR("Requested width/height is greater than current fb " + "object %dx%d > %dx%d\n", var->xres, var->yres, + fb->width, fb->height); + DRM_ERROR("Need resizing code.\n"); + return -EINVAL; + } + + switch (var->bits_per_pixel) { + case 16: + depth = (var->green.length == 6) ? 16 : 15; + break; + case 32: + depth = (var->transp.length > 0) ? 32 : 24; + break; + default: + depth = var->bits_per_pixel; + break; + } + + switch (depth) { + case 8: + var->red.offset = 0; + var->green.offset = 0; + var->blue.offset = 0; + var->red.length = 8; + var->green.length = 8; + var->blue.length = 8; + var->transp.length = 0; + var->transp.offset = 0; + break; + case 15: + var->red.offset = 10; + var->green.offset = 5; + var->blue.offset = 0; + var->red.length = 5; + var->green.length = 5; + var->blue.length = 5; + var->transp.length = 1; + var->transp.offset = 15; + break; + case 16: + var->red.offset = 11; + var->green.offset = 5; + var->blue.offset = 0; + var->red.length = 5; + var->green.length = 6; + var->blue.length = 5; + var->transp.length = 0; + var->transp.offset = 0; + break; + case 24: + var->red.offset = 16; + var->green.offset = 8; + var->blue.offset = 0; + var->red.length = 8; + var->green.length = 8; + var->blue.length = 8; + var->transp.length = 0; + var->transp.offset = 0; + break; + case 32: + var->red.offset = 16; + var->green.offset = 8; + var->blue.offset = 0; + var->red.length = 8; + var->green.length = 8; + var->blue.length = 8; + var->transp.length = 8; + var->transp.offset = 24; + break; + default: + return -EINVAL; + } + return 0; +} +EXPORT_SYMBOL(drm_fb_helper_check_var); + +/* this will let fbcon do the mode init */ +int drm_fb_helper_set_par(struct fb_info *info) +{ + struct drm_fb_helper *fb_helper = info->par; + struct drm_device *dev = fb_helper->dev; + struct fb_var_screeninfo *var = &info->var; + struct drm_crtc *crtc; + int ret; + int i; + + if (var->pixclock != -1) { + DRM_ERROR("PIXEL CLCOK SET\n"); + return -EINVAL; + } + + list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { + + for (i = 0; i < fb_helper->crtc_count; i++) { + if (crtc->base.id == fb_helper->crtc_info[i].crtc_id) + break; + } + if (i == fb_helper->crtc_count) + continue; + + if (crtc->fb == fb_helper->crtc_info[i].mode_set.fb) { + mutex_lock(&dev->mode_config.mutex); + ret = crtc->funcs->set_config(&fb_helper->crtc_info->mode_set); + mutex_unlock(&dev->mode_config.mutex); + if (ret) + return ret; + } + } + return 0; +} +EXPORT_SYMBOL(drm_fb_helper_set_par); + +int drm_fb_helper_pan_display(struct fb_var_screeninfo *var, + struct fb_info *info) +{ + struct drm_fb_helper *fb_helper = info->par; + struct drm_device *dev = fb_helper->dev; + struct drm_mode_set *modeset; + struct drm_crtc *crtc; + int ret = 0; + int i; + + list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { + for (i = 0; i < fb_helper->crtc_count; i++) { + if (crtc->base.id == fb_helper->crtc_info[i].crtc_id) + break; + } + + if (i == fb_helper->crtc_count) + continue; + + modeset = &fb_helper->crtc_info[i].mode_set; + + modeset->x = var->xoffset; + modeset->y = var->yoffset; + + if (modeset->num_connectors) { + mutex_lock(&dev->mode_config.mutex); + ret = crtc->funcs->set_config(modeset); + mutex_unlock(&dev->mode_config.mutex); + if (!ret) { + info->var.xoffset = var->xoffset; + info->var.yoffset = var->yoffset; + } + } + } + return ret; +} +EXPORT_SYMBOL(drm_fb_helper_pan_display); + +int drm_fb_helper_single_fb_probe(struct drm_device *dev, + int (*fb_create)(struct drm_device *dev, + uint32_t fb_width, + uint32_t fb_height, + uint32_t surface_width, + uint32_t surface_height, + struct drm_framebuffer **fb_ptr)) +{ + struct drm_crtc *crtc; + struct drm_connector *connector; + unsigned int fb_width = (unsigned)-1, fb_height = (unsigned)-1; + unsigned int surface_width = 0, surface_height = 0; + int new_fb = 0; + int crtc_count = 0; + int ret, i, conn_count = 0; + struct fb_info *info; + struct drm_framebuffer *fb; + struct drm_mode_set *modeset = NULL; + struct drm_fb_helper *fb_helper; + + /* first up get a count of crtcs now in use and new min/maxes width/heights */ + list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { + if (drm_helper_crtc_in_use(crtc)) { + if (crtc->desired_mode) { + if (crtc->desired_mode->hdisplay < fb_width) + fb_width = crtc->desired_mode->hdisplay; + + if (crtc->desired_mode->vdisplay < fb_height) + fb_height = crtc->desired_mode->vdisplay; + + if (crtc->desired_mode->hdisplay > surface_width) + surface_width = crtc->desired_mode->hdisplay; + + if (crtc->desired_mode->vdisplay > surface_height) + surface_height = crtc->desired_mode->vdisplay; + } + crtc_count++; + } + } + + if (crtc_count == 0 || fb_width == -1 || fb_height == -1) { + /* hmm everyone went away - assume VGA cable just fell out + and will come back later. */ + return 0; + } + + /* do we have an fb already? */ + if (list_empty(&dev->mode_config.fb_kernel_list)) { + ret = (*fb_create)(dev, fb_width, fb_height, surface_width, + surface_height, &fb); + if (ret) + return -EINVAL; + new_fb = 1; + } else { + fb = list_first_entry(&dev->mode_config.fb_kernel_list, + struct drm_framebuffer, filp_head); + + /* if someone hotplugs something bigger than we have already allocated, we are pwned. + As really we can't resize an fbdev that is in the wild currently due to fbdev + not really being designed for the lower layers moving stuff around under it. + - so in the grand style of things - punt. */ + if ((fb->width < surface_width) || + (fb->height < surface_height)) { + DRM_ERROR("Framebuffer not large enough to scale console onto.\n"); + return -EINVAL; + } + } + + info = fb->fbdev; + fb_helper = info->par; + + crtc_count = 0; + /* okay we need to setup new connector sets in the crtcs */ + list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { + modeset = &fb_helper->crtc_info[crtc_count].mode_set; + modeset->fb = fb; + conn_count = 0; + list_for_each_entry(connector, &dev->mode_config.connector_list, head) { + if (connector->encoder) + if (connector->encoder->crtc == modeset->crtc) { + modeset->connectors[conn_count] = connector; + conn_count++; + if (conn_count > fb_helper->conn_limit) + BUG(); + } + } + + for (i = conn_count; i < fb_helper->conn_limit; i++) + modeset->connectors[i] = NULL; + + modeset->crtc = crtc; + crtc_count++; + + modeset->num_connectors = conn_count; + if (modeset->crtc->desired_mode) { + if (modeset->mode) + drm_mode_destroy(dev, modeset->mode); + modeset->mode = drm_mode_duplicate(dev, + modeset->crtc->desired_mode); + } + } + fb_helper->crtc_count = crtc_count; + fb_helper->fb = fb; + + if (new_fb) { + info->var.pixclock = -1; + if (register_framebuffer(info) < 0) + return -EINVAL; + } else { + drm_fb_helper_set_par(info); + } + printk(KERN_INFO "fb%d: %s frame buffer device\n", info->node, + info->fix.id); + + /* Switch back to kernel console on panic */ + /* multi card linked list maybe */ + if (list_empty(&kernel_fb_helper_list)) { + printk(KERN_INFO "registered panic notifier\n"); + atomic_notifier_chain_register(&panic_notifier_list, + &paniced); + register_sysrq_key('v', &sysrq_drm_fb_helper_restore_op); + } + list_add(&fb_helper->kernel_fb_list, &kernel_fb_helper_list); + return 0; +} +EXPORT_SYMBOL(drm_fb_helper_single_fb_probe); + +void drm_fb_helper_free(struct drm_fb_helper *helper) +{ + list_del(&helper->kernel_fb_list); + if (list_empty(&kernel_fb_helper_list)) { + printk(KERN_INFO "unregistered panic notifier\n"); + atomic_notifier_chain_unregister(&panic_notifier_list, + &paniced); + unregister_sysrq_key('v', &sysrq_drm_fb_helper_restore_op); + } + drm_fb_helper_crtc_free(helper); +} +EXPORT_SYMBOL(drm_fb_helper_free); + +void drm_fb_helper_fill_fix(struct fb_info *info, uint32_t pitch) +{ + info->fix.type = FB_TYPE_PACKED_PIXELS; + info->fix.visual = FB_VISUAL_TRUECOLOR; + info->fix.type_aux = 0; + info->fix.xpanstep = 1; /* doing it in hw */ + info->fix.ypanstep = 1; /* doing it in hw */ + info->fix.ywrapstep = 0; + info->fix.accel = FB_ACCEL_I830; + info->fix.type_aux = 0; + + info->fix.line_length = pitch; + return; +} +EXPORT_SYMBOL(drm_fb_helper_fill_fix); + +void drm_fb_helper_fill_var(struct fb_info *info, struct drm_framebuffer *fb, + uint32_t fb_width, uint32_t fb_height) +{ + info->pseudo_palette = fb->pseudo_palette; + info->var.xres_virtual = fb->width; + info->var.yres_virtual = fb->height; + info->var.bits_per_pixel = fb->bits_per_pixel; + info->var.xoffset = 0; + info->var.yoffset = 0; + info->var.activate = FB_ACTIVATE_NOW; + info->var.height = -1; + info->var.width = -1; + + switch (fb->depth) { + case 8: + info->var.red.offset = 0; + info->var.green.offset = 0; + info->var.blue.offset = 0; + info->var.red.length = 8; /* 8bit DAC */ + info->var.green.length = 8; + info->var.blue.length = 8; + info->var.transp.offset = 0; + info->var.transp.length = 0; + break; + case 15: + info->var.red.offset = 10; + info->var.green.offset = 5; + info->var.blue.offset = 0; + info->var.red.length = 5; + info->var.green.length = 5; + info->var.blue.length = 5; + info->var.transp.offset = 15; + info->var.transp.length = 1; + break; + case 16: + info->var.red.offset = 11; + info->var.green.offset = 5; + info->var.blue.offset = 0; + info->var.red.length = 5; + info->var.green.length = 6; + info->var.blue.length = 5; + info->var.transp.offset = 0; + break; + case 24: + info->var.red.offset = 16; + info->var.green.offset = 8; + info->var.blue.offset = 0; + info->var.red.length = 8; + info->var.green.length = 8; + info->var.blue.length = 8; + info->var.transp.offset = 0; + info->var.transp.length = 0; + break; + case 32: + info->var.red.offset = 16; + info->var.green.offset = 8; + info->var.blue.offset = 0; + info->var.red.length = 8; + info->var.green.length = 8; + info->var.blue.length = 8; + info->var.transp.offset = 24; + info->var.transp.length = 8; + break; + default: + break; + } + + info->var.xres = fb_width; + info->var.yres = fb_height; +} +EXPORT_SYMBOL(drm_fb_helper_fill_var); diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c index 544d889b9b1..c628c367139 100644 --- a/drivers/gpu/drm/i915/i915_dma.c +++ b/drivers/gpu/drm/i915/i915_dma.c @@ -29,6 +29,7 @@ #include "drmP.h" #include "drm.h" #include "drm_crtc_helper.h" +#include "drm_fb_helper.h" #include "intel_drv.h" #include "i915_drm.h" #include "i915_drv.h" @@ -1347,7 +1348,7 @@ void i915_driver_lastclose(struct drm_device * dev) drm_i915_private_t *dev_priv = dev->dev_private; if (!dev_priv || drm_core_check_feature(dev, DRIVER_MODESET)) { - intelfb_restore(); + drm_fb_helper_restore(); return; } diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index d6fce213341..5fb7a4f4a42 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -3060,8 +3060,6 @@ static void intel_crtc_destroy(struct drm_crtc *crtc) { struct intel_crtc *intel_crtc = to_intel_crtc(crtc); - if (intel_crtc->mode_set.mode) - drm_mode_destroy(crtc->dev, intel_crtc->mode_set.mode); drm_crtc_cleanup(crtc); kfree(intel_crtc); } @@ -3107,16 +3105,6 @@ static void intel_crtc_init(struct drm_device *dev, int pipe) intel_crtc->cursor_addr = 0; intel_crtc->dpms_mode = DRM_MODE_DPMS_OFF; drm_crtc_helper_add(&intel_crtc->base, &intel_helper_funcs); - - intel_crtc->mode_set.crtc = &intel_crtc->base; - intel_crtc->mode_set.connectors = (struct drm_connector **)(intel_crtc + 1); - intel_crtc->mode_set.num_connectors = 0; - - if (i915_fbpercrtc) { - - - - } } int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data, diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h index d6f92ea1b55..38910f8f30e 100644 --- a/drivers/gpu/drm/i915/intel_drv.h +++ b/drivers/gpu/drm/i915/intel_drv.h @@ -96,9 +96,6 @@ struct intel_crtc { uint32_t cursor_addr; u8 lut_r[256], lut_g[256], lut_b[256]; int dpms_mode; - struct intel_framebuffer *fbdev_fb; - /* a mode_set for fbdev users on this crtc */ - struct drm_mode_set mode_set; }; #define to_intel_crtc(x) container_of(x, struct intel_crtc, base) diff --git a/drivers/gpu/drm/i915/intel_fb.c b/drivers/gpu/drm/i915/intel_fb.c index 1d30802e773..3041530c367 100644 --- a/drivers/gpu/drm/i915/intel_fb.c +++ b/drivers/gpu/drm/i915/intel_fb.c @@ -39,339 +39,34 @@ #include "drmP.h" #include "drm.h" #include "drm_crtc.h" +#include "drm_fb_helper.h" #include "intel_drv.h" #include "i915_drm.h" #include "i915_drv.h" struct intelfb_par { - struct drm_device *dev; - struct drm_display_mode *our_mode; + struct drm_fb_helper helper; struct intel_framebuffer *intel_fb; - int crtc_count; - /* crtc currently bound to this */ - uint32_t crtc_ids[2]; + struct drm_display_mode *our_mode; }; -static int intelfb_setcolreg(unsigned regno, unsigned red, unsigned green, - unsigned blue, unsigned transp, - struct fb_info *info) -{ - struct intelfb_par *par = info->par; - struct drm_device *dev = par->dev; - struct drm_crtc *crtc; - int i; - - list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { - struct intel_crtc *intel_crtc = to_intel_crtc(crtc); - struct drm_mode_set *modeset = &intel_crtc->mode_set; - struct drm_framebuffer *fb = modeset->fb; - - for (i = 0; i < par->crtc_count; i++) - if (crtc->base.id == par->crtc_ids[i]) - break; - - if (i == par->crtc_count) - continue; - - - if (regno > 255) - return 1; - - if (fb->depth == 8) { - intel_crtc_fb_gamma_set(crtc, red, green, blue, regno); - return 0; - } - - if (regno < 16) { - switch (fb->depth) { - case 15: - fb->pseudo_palette[regno] = ((red & 0xf800) >> 1) | - ((green & 0xf800) >> 6) | - ((blue & 0xf800) >> 11); - break; - case 16: - fb->pseudo_palette[regno] = (red & 0xf800) | - ((green & 0xfc00) >> 5) | - ((blue & 0xf800) >> 11); - break; - case 24: - case 32: - fb->pseudo_palette[regno] = ((red & 0xff00) << 8) | - (green & 0xff00) | - ((blue & 0xff00) >> 8); - break; - } - } - } - return 0; -} - -static int intelfb_check_var(struct fb_var_screeninfo *var, - struct fb_info *info) -{ - struct intelfb_par *par = info->par; - struct intel_framebuffer *intel_fb = par->intel_fb; - struct drm_framebuffer *fb = &intel_fb->base; - int depth; - - if (var->pixclock == -1 || !var->pixclock) - return -EINVAL; - - /* Need to resize the fb object !!! */ - if (var->xres > fb->width || var->yres > fb->height) { - DRM_ERROR("Requested width/height is greater than current fb object %dx%d > %dx%d\n",var->xres,var->yres,fb->width,fb->height); - DRM_ERROR("Need resizing code.\n"); - return -EINVAL; - } - - switch (var->bits_per_pixel) { - case 16: - depth = (var->green.length == 6) ? 16 : 15; - break; - case 32: - depth = (var->transp.length > 0) ? 32 : 24; - break; - default: - depth = var->bits_per_pixel; - break; - } - - switch (depth) { - case 8: - var->red.offset = 0; - var->green.offset = 0; - var->blue.offset = 0; - var->red.length = 8; - var->green.length = 8; - var->blue.length = 8; - var->transp.length = 0; - var->transp.offset = 0; - break; - case 15: - var->red.offset = 10; - var->green.offset = 5; - var->blue.offset = 0; - var->red.length = 5; - var->green.length = 5; - var->blue.length = 5; - var->transp.length = 1; - var->transp.offset = 15; - break; - case 16: - var->red.offset = 11; - var->green.offset = 5; - var->blue.offset = 0; - var->red.length = 5; - var->green.length = 6; - var->blue.length = 5; - var->transp.length = 0; - var->transp.offset = 0; - break; - case 24: - var->red.offset = 16; - var->green.offset = 8; - var->blue.offset = 0; - var->red.length = 8; - var->green.length = 8; - var->blue.length = 8; - var->transp.length = 0; - var->transp.offset = 0; - break; - case 32: - var->red.offset = 16; - var->green.offset = 8; - var->blue.offset = 0; - var->red.length = 8; - var->green.length = 8; - var->blue.length = 8; - var->transp.length = 8; - var->transp.offset = 24; - break; - default: - return -EINVAL; - } - - return 0; -} - -/* this will let fbcon do the mode init */ -/* FIXME: take mode config lock? */ -static int intelfb_set_par(struct fb_info *info) -{ - struct intelfb_par *par = info->par; - struct drm_device *dev = par->dev; - struct fb_var_screeninfo *var = &info->var; - int i; - - DRM_DEBUG("%d %d\n", var->xres, var->pixclock); - - if (var->pixclock != -1) { - - DRM_ERROR("PIXEL CLOCK SET\n"); - return -EINVAL; - } else { - struct drm_crtc *crtc; - int ret; - - list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { - struct intel_crtc *intel_crtc = to_intel_crtc(crtc); - - for (i = 0; i < par->crtc_count; i++) - if (crtc->base.id == par->crtc_ids[i]) - break; - - if (i == par->crtc_count) - continue; - - if (crtc->fb == intel_crtc->mode_set.fb) { - mutex_lock(&dev->mode_config.mutex); - ret = crtc->funcs->set_config(&intel_crtc->mode_set); - mutex_unlock(&dev->mode_config.mutex); - if (ret) - return ret; - } - } - return 0; - } -} - -static int intelfb_pan_display(struct fb_var_screeninfo *var, - struct fb_info *info) -{ - struct intelfb_par *par = info->par; - struct drm_device *dev = par->dev; - struct drm_mode_set *modeset; - struct drm_crtc *crtc; - struct intel_crtc *intel_crtc; - int ret = 0; - int i; - - list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { - for (i = 0; i < par->crtc_count; i++) - if (crtc->base.id == par->crtc_ids[i]) - break; - - if (i == par->crtc_count) - continue; - - intel_crtc = to_intel_crtc(crtc); - modeset = &intel_crtc->mode_set; - - modeset->x = var->xoffset; - modeset->y = var->yoffset; - - if (modeset->num_connectors) { - mutex_lock(&dev->mode_config.mutex); - ret = crtc->funcs->set_config(modeset); - mutex_unlock(&dev->mode_config.mutex); - if (!ret) { - info->var.xoffset = var->xoffset; - info->var.yoffset = var->yoffset; - } - } - } - - return ret; -} - -static void intelfb_on(struct fb_info *info) -{ - struct intelfb_par *par = info->par; - struct drm_device *dev = par->dev; - struct drm_crtc *crtc; - struct drm_encoder *encoder; - int i; - - /* - * For each CRTC in this fb, find all associated encoders - * and turn them off, then turn off the CRTC. - */ - list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { - struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private; - - for (i = 0; i < par->crtc_count; i++) - if (crtc->base.id == par->crtc_ids[i]) - break; - - crtc_funcs->dpms(crtc, DRM_MODE_DPMS_ON); - - /* Found a CRTC on this fb, now find encoders */ - list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { - if (encoder->crtc == crtc) { - struct drm_encoder_helper_funcs *encoder_funcs; - encoder_funcs = encoder->helper_private; - encoder_funcs->dpms(encoder, DRM_MODE_DPMS_ON); - } - } - } -} - -static void intelfb_off(struct fb_info *info, int dpms_mode) -{ - struct intelfb_par *par = info->par; - struct drm_device *dev = par->dev; - struct drm_crtc *crtc; - struct drm_encoder *encoder; - int i; - - /* - * For each CRTC in this fb, find all associated encoders - * and turn them off, then turn off the CRTC. - */ - list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { - struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private; - - for (i = 0; i < par->crtc_count; i++) - if (crtc->base.id == par->crtc_ids[i]) - break; - - /* Found a CRTC on this fb, now find encoders */ - list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { - if (encoder->crtc == crtc) { - struct drm_encoder_helper_funcs *encoder_funcs; - encoder_funcs = encoder->helper_private; - encoder_funcs->dpms(encoder, dpms_mode); - } - } - if (dpms_mode == DRM_MODE_DPMS_OFF) - crtc_funcs->dpms(crtc, dpms_mode); - } -} - -static int intelfb_blank(int blank, struct fb_info *info) -{ - switch (blank) { - case FB_BLANK_UNBLANK: - intelfb_on(info); - break; - case FB_BLANK_NORMAL: - intelfb_off(info, DRM_MODE_DPMS_STANDBY); - break; - case FB_BLANK_HSYNC_SUSPEND: - intelfb_off(info, DRM_MODE_DPMS_STANDBY); - break; - case FB_BLANK_VSYNC_SUSPEND: - intelfb_off(info, DRM_MODE_DPMS_SUSPEND); - break; - case FB_BLANK_POWERDOWN: - intelfb_off(info, DRM_MODE_DPMS_OFF); - break; - } - return 0; -} - static struct fb_ops intelfb_ops = { .owner = THIS_MODULE, - .fb_check_var = intelfb_check_var, - .fb_set_par = intelfb_set_par, - .fb_setcolreg = intelfb_setcolreg, + .fb_check_var = drm_fb_helper_check_var, + .fb_set_par = drm_fb_helper_set_par, + .fb_setcolreg = drm_fb_helper_setcolreg, .fb_fillrect = cfb_fillrect, .fb_copyarea = cfb_copyarea, .fb_imageblit = cfb_imageblit, - .fb_pan_display = intelfb_pan_display, - .fb_blank = intelfb_blank, + .fb_pan_display = drm_fb_helper_pan_display, + .fb_blank = drm_fb_helper_blank, }; +static struct drm_fb_helper_funcs intel_fb_helper_funcs = { + .gamma_set = intel_crtc_fb_gamma_set, +}; + + /** * Curretly it is assumed that the old framebuffer is reused. * @@ -412,25 +107,10 @@ int intelfb_resize(struct drm_device *dev, struct drm_crtc *crtc) } EXPORT_SYMBOL(intelfb_resize); -static struct drm_mode_set kernelfb_mode; - -static int intelfb_panic(struct notifier_block *n, unsigned long ununsed, - void *panic_str) -{ - DRM_ERROR("panic occurred, switching back to text console\n"); - - intelfb_restore(); - return 0; -} - -static struct notifier_block paniced = { - .notifier_call = intelfb_panic, -}; - static int intelfb_create(struct drm_device *dev, uint32_t fb_width, uint32_t fb_height, uint32_t surface_width, uint32_t surface_height, - struct intel_framebuffer **intel_fb_p) + struct drm_framebuffer **fb_p) { struct fb_info *info; struct intelfb_par *par; @@ -479,7 +159,7 @@ static int intelfb_create(struct drm_device *dev, uint32_t fb_width, list_add(&fb->filp_head, &dev->mode_config.fb_kernel_list); intel_fb = to_intel_framebuffer(fb); - *intel_fb_p = intel_fb; + *fb_p = fb; info = framebuffer_alloc(sizeof(struct intelfb_par), device); if (!info) { @@ -489,21 +169,19 @@ static int intelfb_create(struct drm_device *dev, uint32_t fb_width, par = info->par; + par->helper.funcs = &intel_fb_helper_funcs; + par->helper.dev = dev; + ret = drm_fb_helper_init_crtc_count(&par->helper, 2, + INTELFB_CONN_LIMIT); + if (ret) + goto out_unref; + strcpy(info->fix.id, "inteldrmfb"); - info->fix.type = FB_TYPE_PACKED_PIXELS; - info->fix.visual = FB_VISUAL_TRUECOLOR; - info->fix.type_aux = 0; - info->fix.xpanstep = 1; /* doing it in hw */ - info->fix.ypanstep = 1; /* doing it in hw */ - info->fix.ywrapstep = 0; - info->fix.accel = FB_ACCEL_I830; - info->fix.type_aux = 0; info->flags = FBINFO_DEFAULT; info->fbops = &intelfb_ops; - info->fix.line_length = fb->pitch; /* setup aperture base/size for vesafb takeover */ info->aperture_base = dev->mode_config.fb_base; @@ -527,18 +205,8 @@ static int intelfb_create(struct drm_device *dev, uint32_t fb_width, // memset(info->screen_base, 0, size); - info->pseudo_palette = fb->pseudo_palette; - info->var.xres_virtual = fb->width; - info->var.yres_virtual = fb->height; - info->var.bits_per_pixel = fb->bits_per_pixel; - info->var.xoffset = 0; - info->var.yoffset = 0; - info->var.activate = FB_ACTIVATE_NOW; - info->var.height = -1; - info->var.width = -1; - - info->var.xres = fb_width; - info->var.yres = fb_height; + drm_fb_helper_fill_fix(info, fb->depth); + drm_fb_helper_fill_var(info, fb, fb_width, fb_height); /* FIXME: we really shouldn't expose mmio space at all */ info->fix.mmio_start = pci_resource_start(dev->pdev, mmio_bar); @@ -550,64 +218,9 @@ static int intelfb_create(struct drm_device *dev, uint32_t fb_width, info->pixmap.flags = FB_PIXMAP_SYSTEM; info->pixmap.scan_align = 1; - switch(fb->depth) { - case 8: - info->var.red.offset = 0; - info->var.green.offset = 0; - info->var.blue.offset = 0; - info->var.red.length = 8; /* 8bit DAC */ - info->var.green.length = 8; - info->var.blue.length = 8; - info->var.transp.offset = 0; - info->var.transp.length = 0; - break; - case 15: - info->var.red.offset = 10; - info->var.green.offset = 5; - info->var.blue.offset = 0; - info->var.red.length = 5; - info->var.green.length = 5; - info->var.blue.length = 5; - info->var.transp.offset = 15; - info->var.transp.length = 1; - break; - case 16: - info->var.red.offset = 11; - info->var.green.offset = 5; - info->var.blue.offset = 0; - info->var.red.length = 5; - info->var.green.length = 6; - info->var.blue.length = 5; - info->var.transp.offset = 0; - break; - case 24: - info->var.red.offset = 16; - info->var.green.offset = 8; - info->var.blue.offset = 0; - info->var.red.length = 8; - info->var.green.length = 8; - info->var.blue.length = 8; - info->var.transp.offset = 0; - info->var.transp.length = 0; - break; - case 32: - info->var.red.offset = 16; - info->var.green.offset = 8; - info->var.blue.offset = 0; - info->var.red.length = 8; - info->var.green.length = 8; - info->var.blue.length = 8; - info->var.transp.offset = 24; - info->var.transp.length = 8; - break; - default: - break; - } - fb->fbdev = info; par->intel_fb = intel_fb; - par->dev = dev; /* To allow resizeing without swapping buffers */ DRM_DEBUG("allocated %dx%d fb: 0x%08x, bo %p\n", intel_fb->base.width, @@ -625,307 +238,12 @@ out: return ret; } -static int intelfb_multi_fb_probe_crtc(struct drm_device *dev, struct drm_crtc *crtc) -{ - struct intel_crtc *intel_crtc = to_intel_crtc(crtc); - struct intel_framebuffer *intel_fb; - struct drm_framebuffer *fb; - struct drm_connector *connector; - struct fb_info *info; - struct intelfb_par *par; - struct drm_mode_set *modeset; - unsigned int width, height; - int new_fb = 0; - int ret, i, conn_count; - - if (!drm_helper_crtc_in_use(crtc)) - return 0; - - if (!crtc->desired_mode) - return 0; - - width = crtc->desired_mode->hdisplay; - height = crtc->desired_mode->vdisplay; - - /* is there an fb bound to this crtc already */ - if (!intel_crtc->mode_set.fb) { - ret = intelfb_create(dev, width, height, width, height, &intel_fb); - if (ret) - return -EINVAL; - new_fb = 1; - } else { - fb = intel_crtc->mode_set.fb; - intel_fb = to_intel_framebuffer(fb); - if ((intel_fb->base.width < width) || (intel_fb->base.height < height)) - return -EINVAL; - } - - info = intel_fb->base.fbdev; - par = info->par; - - modeset = &intel_crtc->mode_set; - modeset->fb = &intel_fb->base; - conn_count = 0; - list_for_each_entry(connector, &dev->mode_config.connector_list, head) { - if (connector->encoder) - if (connector->encoder->crtc == modeset->crtc) { - modeset->connectors[conn_count] = connector; - conn_count++; - if (conn_count > INTELFB_CONN_LIMIT) - BUG(); - } - } - - for (i = conn_count; i < INTELFB_CONN_LIMIT; i++) - modeset->connectors[i] = NULL; - - par->crtc_ids[0] = crtc->base.id; - - modeset->num_connectors = conn_count; - if (modeset->crtc->desired_mode) { - if (modeset->mode) - drm_mode_destroy(dev, modeset->mode); - modeset->mode = drm_mode_duplicate(dev, - modeset->crtc->desired_mode); - } - - par->crtc_count = 1; - - if (new_fb) { - info->var.pixclock = -1; - if (register_framebuffer(info) < 0) - return -EINVAL; - } else - intelfb_set_par(info); - - DRM_INFO("fb%d: %s frame buffer device\n", info->node, - info->fix.id); - - /* Switch back to kernel console on panic */ - kernelfb_mode = *modeset; - atomic_notifier_chain_register(&panic_notifier_list, &paniced); - DRM_DEBUG("registered panic notifier\n"); - - return 0; -} - -static int intelfb_multi_fb_probe(struct drm_device *dev) -{ - - struct drm_crtc *crtc; - int ret = 0; - - list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { - ret = intelfb_multi_fb_probe_crtc(dev, crtc); - if (ret) - return ret; - } - return ret; -} - -static int intelfb_single_fb_probe(struct drm_device *dev) -{ - struct drm_crtc *crtc; - struct drm_connector *connector; - unsigned int fb_width = (unsigned)-1, fb_height = (unsigned)-1; - unsigned int surface_width = 0, surface_height = 0; - int new_fb = 0; - int crtc_count = 0; - int ret, i, conn_count = 0; - struct intel_framebuffer *intel_fb; - struct fb_info *info; - struct intelfb_par *par; - struct drm_mode_set *modeset = NULL; - - DRM_DEBUG("\n"); - - /* Get a count of crtcs now in use and new min/maxes width/heights */ - list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { - if (!drm_helper_crtc_in_use(crtc)) - continue; - - crtc_count++; - if (!crtc->desired_mode) - continue; - - /* Smallest mode determines console size... */ - if (crtc->desired_mode->hdisplay < fb_width) - fb_width = crtc->desired_mode->hdisplay; - - if (crtc->desired_mode->vdisplay < fb_height) - fb_height = crtc->desired_mode->vdisplay; - - /* ... but largest for memory allocation dimensions */ - if (crtc->desired_mode->hdisplay > surface_width) - surface_width = crtc->desired_mode->hdisplay; - - if (crtc->desired_mode->vdisplay > surface_height) - surface_height = crtc->desired_mode->vdisplay; - } - - if (crtc_count == 0 || fb_width == -1 || fb_height == -1) { - /* hmm everyone went away - assume VGA cable just fell out - and will come back later. */ - DRM_DEBUG("no CRTCs available?\n"); - return 0; - } - -//fail - /* Find the fb for our new config */ - if (list_empty(&dev->mode_config.fb_kernel_list)) { - DRM_DEBUG("creating new fb (console size %dx%d, " - "buffer size %dx%d)\n", fb_width, fb_height, - surface_width, surface_height); - ret = intelfb_create(dev, fb_width, fb_height, surface_width, - surface_height, &intel_fb); - if (ret) - return -EINVAL; - new_fb = 1; - } else { - struct drm_framebuffer *fb; - - fb = list_first_entry(&dev->mode_config.fb_kernel_list, - struct drm_framebuffer, filp_head); - intel_fb = to_intel_framebuffer(fb); - - /* if someone hotplugs something bigger than we have already - * allocated, we are pwned. As really we can't resize an - * fbdev that is in the wild currently due to fbdev not really - * being designed for the lower layers moving stuff around - * under it. - * - so in the grand style of things - punt. - */ - if ((fb->width < surface_width) || - (fb->height < surface_height)) { - DRM_ERROR("fb not large enough for console\n"); - return -EINVAL; - } - } -// fail - - info = intel_fb->base.fbdev; - par = info->par; - - crtc_count = 0; - /* - * For each CRTC, set up the connector list for the CRTC's mode - * set configuration. - */ - list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { - struct intel_crtc *intel_crtc = to_intel_crtc(crtc); - - modeset = &intel_crtc->mode_set; - modeset->fb = &intel_fb->base; - conn_count = 0; - list_for_each_entry(connector, &dev->mode_config.connector_list, - head) { - if (!connector->encoder) - continue; - - if(connector->encoder->crtc == modeset->crtc) { - modeset->connectors[conn_count++] = connector; - if (conn_count > INTELFB_CONN_LIMIT) - BUG(); - } - } - - /* Zero out remaining connector pointers */ - for (i = conn_count; i < INTELFB_CONN_LIMIT; i++) - modeset->connectors[i] = NULL; - - par->crtc_ids[crtc_count++] = crtc->base.id; - - modeset->num_connectors = conn_count; - if (modeset->crtc->desired_mode) { - if (modeset->mode) - drm_mode_destroy(dev, modeset->mode); - modeset->mode = drm_mode_duplicate(dev, - modeset->crtc->desired_mode); - } - } - par->crtc_count = crtc_count; - - if (new_fb) { - info->var.pixclock = -1; - if (register_framebuffer(info) < 0) - return -EINVAL; - } else - intelfb_set_par(info); - - DRM_INFO("fb%d: %s frame buffer device\n", info->node, - info->fix.id); - - /* Switch back to kernel console on panic */ - kernelfb_mode = *modeset; - atomic_notifier_chain_register(&panic_notifier_list, &paniced); - DRM_DEBUG("registered panic notifier\n"); - - return 0; -} - -/** - * intelfb_restore - restore the framebuffer console (kernel) config - * - * Restore's the kernel's fbcon mode, used for lastclose & panic paths. - */ -void intelfb_restore(void) -{ - int ret; - if ((ret = drm_crtc_helper_set_config(&kernelfb_mode)) != 0) { - DRM_ERROR("Failed to restore crtc configuration: %d\n", - ret); - } -} - -static void intelfb_restore_work_fn(struct work_struct *ignored) -{ - intelfb_restore(); -} -static DECLARE_WORK(intelfb_restore_work, intelfb_restore_work_fn); - -static void intelfb_sysrq(int dummy1, struct tty_struct *dummy3) -{ - schedule_work(&intelfb_restore_work); -} - -static struct sysrq_key_op sysrq_intelfb_restore_op = { - .handler = intelfb_sysrq, - .help_msg = "force-fb(V)", - .action_msg = "Restore framebuffer console", -}; - int intelfb_probe(struct drm_device *dev) { int ret; DRM_DEBUG("\n"); - - /* something has changed in the lower levels of hell - deal with it - here */ - - /* two modes : a) 1 fb to rule all crtcs. - b) one fb per crtc. - two actions 1) new connected device - 2) device removed. - case a/1 : if the fb surface isn't big enough - resize the surface fb. - if the fb size isn't big enough - resize fb into surface. - if everything big enough configure the new crtc/etc. - case a/2 : undo the configuration - possibly resize down the fb to fit the new configuration. - case b/1 : see if it is on a new crtc - setup a new fb and add it. - case b/2 : teardown the new fb. - */ - - /* mode a first */ - /* search for an fb */ - if (i915_fbpercrtc == 1) { - ret = intelfb_multi_fb_probe(dev); - } else { - ret = intelfb_single_fb_probe(dev); - } - - register_sysrq_key('v', &sysrq_intelfb_restore_op); - + ret = drm_fb_helper_single_fb_probe(dev, intelfb_create); return ret; } EXPORT_SYMBOL(intelfb_probe); @@ -940,13 +258,14 @@ int intelfb_remove(struct drm_device *dev, struct drm_framebuffer *fb) info = fb->fbdev; if (info) { + struct intelfb_par *par = info->par; unregister_framebuffer(info); iounmap(info->screen_base); + if (info->par) + drm_fb_helper_free(&par->helper); framebuffer_release(info); } - atomic_notifier_chain_unregister(&panic_notifier_list, &paniced); - memset(&kernelfb_mode, 0, sizeof(struct drm_mode_set)); return 0; } EXPORT_SYMBOL(intelfb_remove); diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c index a8fa1bb84cf..af035605d14 100644 --- a/drivers/gpu/drm/radeon/radeon_display.c +++ b/drivers/gpu/drm/radeon/radeon_display.c @@ -158,9 +158,6 @@ static void radeon_crtc_destroy(struct drm_crtc *crtc) { struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); - if (radeon_crtc->mode_set.mode) { - drm_mode_destroy(crtc->dev, radeon_crtc->mode_set.mode); - } drm_crtc_cleanup(crtc); kfree(radeon_crtc); } @@ -189,9 +186,11 @@ static void radeon_crtc_init(struct drm_device *dev, int index) radeon_crtc->crtc_id = index; rdev->mode_info.crtcs[index] = radeon_crtc; +#if 0 radeon_crtc->mode_set.crtc = &radeon_crtc->base; radeon_crtc->mode_set.connectors = (struct drm_connector **)(radeon_crtc + 1); radeon_crtc->mode_set.num_connectors = 0; +#endif for (i = 0; i < 256; i++) { radeon_crtc->lut_r[i] = i << 2; diff --git a/drivers/gpu/drm/radeon/radeon_fb.c b/drivers/gpu/drm/radeon/radeon_fb.c index ec383edf5f3..ebb58959f41 100644 --- a/drivers/gpu/drm/radeon/radeon_fb.c +++ b/drivers/gpu/drm/radeon/radeon_fb.c @@ -28,15 +28,7 @@ */ #include -#include -#include -#include -#include -#include -#include -#include #include -#include #include "drmP.h" #include "drm.h" @@ -45,375 +37,86 @@ #include "radeon_drm.h" #include "radeon.h" +#include "drm_fb_helper.h" + struct radeon_fb_device { - struct radeon_device *rdev; - struct drm_display_mode *mode; + struct drm_fb_helper helper; struct radeon_framebuffer *rfb; - int crtc_count; - /* crtc currently bound to this */ - uint32_t crtc_ids[2]; + struct radeon_device *rdev; }; -static int radeonfb_setcolreg(unsigned regno, - unsigned red, - unsigned green, - unsigned blue, - unsigned transp, - struct fb_info *info) -{ - struct radeon_fb_device *rfbdev = info->par; - struct drm_device *dev = rfbdev->rdev->ddev; - struct drm_crtc *crtc; - int i; - - list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { - struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); - struct drm_mode_set *modeset = &radeon_crtc->mode_set; - struct drm_framebuffer *fb = modeset->fb; - - for (i = 0; i < rfbdev->crtc_count; i++) { - if (crtc->base.id == rfbdev->crtc_ids[i]) { - break; - } - } - if (i == rfbdev->crtc_count) { - continue; - } - if (regno > 255) { - return 1; - } - if (fb->depth == 8) { - radeon_crtc_fb_gamma_set(crtc, red, green, blue, regno); - return 0; - } - - if (regno < 16) { - switch (fb->depth) { - case 15: - fb->pseudo_palette[regno] = ((red & 0xf800) >> 1) | - ((green & 0xf800) >> 6) | - ((blue & 0xf800) >> 11); - break; - case 16: - fb->pseudo_palette[regno] = (red & 0xf800) | - ((green & 0xfc00) >> 5) | - ((blue & 0xf800) >> 11); - break; - case 24: - case 32: - fb->pseudo_palette[regno] = - (((red >> 8) & 0xff) << info->var.red.offset) | - (((green >> 8) & 0xff) << info->var.green.offset) | - (((blue >> 8) & 0xff) << info->var.blue.offset); - break; - } - } - } - return 0; -} - -static int radeonfb_check_var(struct fb_var_screeninfo *var, - struct fb_info *info) +static int radeon_fb_check_var(struct fb_var_screeninfo *var, + struct fb_info *info) { - struct radeon_fb_device *rfbdev = info->par; - struct radeon_framebuffer *rfb = rfbdev->rfb; - struct drm_framebuffer *fb = &rfb->base; - int depth; - - if (var->pixclock == -1 || !var->pixclock) { - return -EINVAL; - } - /* Need to resize the fb object !!! */ - if (var->xres > fb->width || var->yres > fb->height) { - DRM_ERROR("Requested width/height is greater than current fb " - "object %dx%d > %dx%d\n", var->xres, var->yres, - fb->width, fb->height); - DRM_ERROR("Need resizing code.\n"); - return -EINVAL; - } - - switch (var->bits_per_pixel) { - case 16: - depth = (var->green.length == 6) ? 16 : 15; - break; - case 32: - depth = (var->transp.length > 0) ? 32 : 24; - break; - default: - depth = var->bits_per_pixel; - break; - } - - switch (depth) { - case 8: - var->red.offset = 0; - var->green.offset = 0; - var->blue.offset = 0; - var->red.length = 8; - var->green.length = 8; - var->blue.length = 8; - var->transp.length = 0; - var->transp.offset = 0; - break; -#ifdef __LITTLE_ENDIAN - case 15: - var->red.offset = 10; - var->green.offset = 5; - var->blue.offset = 0; - var->red.length = 5; - var->green.length = 5; - var->blue.length = 5; - var->transp.length = 1; - var->transp.offset = 15; - break; - case 16: - var->red.offset = 11; - var->green.offset = 5; - var->blue.offset = 0; - var->red.length = 5; - var->green.length = 6; - var->blue.length = 5; - var->transp.length = 0; - var->transp.offset = 0; - break; - case 24: - var->red.offset = 16; - var->green.offset = 8; - var->blue.offset = 0; - var->red.length = 8; - var->green.length = 8; - var->blue.length = 8; - var->transp.length = 0; - var->transp.offset = 0; - break; - case 32: - var->red.offset = 16; - var->green.offset = 8; - var->blue.offset = 0; - var->red.length = 8; - var->green.length = 8; - var->blue.length = 8; - var->transp.length = 8; - var->transp.offset = 24; - break; -#else - case 24: - var->red.offset = 8; - var->green.offset = 16; - var->blue.offset = 24; - var->red.length = 8; - var->green.length = 8; - var->blue.length = 8; - var->transp.length = 0; - var->transp.offset = 0; - break; - case 32: - var->red.offset = 8; - var->green.offset = 16; - var->blue.offset = 24; - var->red.length = 8; - var->green.length = 8; - var->blue.length = 8; - var->transp.length = 8; - var->transp.offset = 0; - break; -#endif - default: - return -EINVAL; - } - return 0; -} - -/* this will let fbcon do the mode init */ -static int radeonfb_set_par(struct fb_info *info) -{ - struct radeon_fb_device *rfbdev = info->par; - struct drm_device *dev = rfbdev->rdev->ddev; - struct fb_var_screeninfo *var = &info->var; - struct drm_crtc *crtc; int ret; - int i; - - if (var->pixclock != -1) { - DRM_ERROR("PIXEL CLCOK SET\n"); - return -EINVAL; - } - - list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { - struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); - - for (i = 0; i < rfbdev->crtc_count; i++) { - if (crtc->base.id == rfbdev->crtc_ids[i]) { - break; - } - } - if (i == rfbdev->crtc_count) { - continue; - } - if (crtc->fb == radeon_crtc->mode_set.fb) { - mutex_lock(&dev->mode_config.mutex); - ret = crtc->funcs->set_config(&radeon_crtc->mode_set); - mutex_unlock(&dev->mode_config.mutex); - if (ret) { - return ret; - } - } - } - return 0; -} - -static int radeonfb_pan_display(struct fb_var_screeninfo *var, - struct fb_info *info) -{ - struct radeon_fb_device *rfbdev = info->par; - struct drm_device *dev = rfbdev->rdev->ddev; - struct drm_mode_set *modeset; - struct drm_crtc *crtc; - struct radeon_crtc *radeon_crtc; - int ret = 0; - int i; - - list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { - for (i = 0; i < rfbdev->crtc_count; i++) { - if (crtc->base.id == rfbdev->crtc_ids[i]) { - break; - } - } - - if (i == rfbdev->crtc_count) { - continue; - } - - radeon_crtc = to_radeon_crtc(crtc); - modeset = &radeon_crtc->mode_set; - - modeset->x = var->xoffset; - modeset->y = var->yoffset; - - if (modeset->num_connectors) { - mutex_lock(&dev->mode_config.mutex); - ret = crtc->funcs->set_config(modeset); - mutex_unlock(&dev->mode_config.mutex); - if (!ret) { - info->var.xoffset = var->xoffset; - info->var.yoffset = var->yoffset; - } + ret = drm_fb_helper_check_var(var, info); + if (ret) + return ret; + + /* big endian override for radeon endian workaround */ +#ifdef __BIG_ENDIAN + { + int depth; + switch (var->bits_per_pixel) { + case 16: + depth = (var->green.length == 6) ? 16 : 15; + break; + case 32: + depth = (var->transp.length > 0) ? 32 : 24; + break; + default: + depth = var->bits_per_pixel; + break; } - } - return ret; -} - -static void radeonfb_on(struct fb_info *info) -{ - struct radeon_fb_device *rfbdev = info->par; - struct drm_device *dev = rfbdev->rdev->ddev; - struct drm_crtc *crtc; - struct drm_encoder *encoder; - int i; - - /* - * For each CRTC in this fb, find all associated encoders - * and turn them off, then turn off the CRTC. - */ - list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { - struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private; - - for (i = 0; i < rfbdev->crtc_count; i++) { - if (crtc->base.id == rfbdev->crtc_ids[i]) { - break; - } - } - - mutex_lock(&dev->mode_config.mutex); - crtc_funcs->dpms(crtc, DRM_MODE_DPMS_ON); - mutex_unlock(&dev->mode_config.mutex); - - /* Found a CRTC on this fb, now find encoders */ - list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { - if (encoder->crtc == crtc) { - struct drm_encoder_helper_funcs *encoder_funcs; - - encoder_funcs = encoder->helper_private; - mutex_lock(&dev->mode_config.mutex); - encoder_funcs->dpms(encoder, DRM_MODE_DPMS_ON); - mutex_unlock(&dev->mode_config.mutex); - } - } - } -} - -static void radeonfb_off(struct fb_info *info, int dpms_mode) -{ - struct radeon_fb_device *rfbdev = info->par; - struct drm_device *dev = rfbdev->rdev->ddev; - struct drm_crtc *crtc; - struct drm_encoder *encoder; - int i; - - /* - * For each CRTC in this fb, find all associated encoders - * and turn them off, then turn off the CRTC. - */ - list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { - struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private; - - for (i = 0; i < rfbdev->crtc_count; i++) { - if (crtc->base.id == rfbdev->crtc_ids[i]) { - break; - } - } - - /* Found a CRTC on this fb, now find encoders */ - list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { - if (encoder->crtc == crtc) { - struct drm_encoder_helper_funcs *encoder_funcs; - - encoder_funcs = encoder->helper_private; - mutex_lock(&dev->mode_config.mutex); - encoder_funcs->dpms(encoder, dpms_mode); - mutex_unlock(&dev->mode_config.mutex); - } - } - if (dpms_mode == DRM_MODE_DPMS_OFF) { - mutex_lock(&dev->mode_config.mutex); - crtc_funcs->dpms(crtc, dpms_mode); - mutex_unlock(&dev->mode_config.mutex); + switch (depth) { + case 8: + var->red.offset = 0; + var->green.offset = 0; + var->blue.offset = 0; + var->red.length = 8; + var->green.length = 8; + var->blue.length = 8; + var->transp.length = 0; + var->transp.offset = 0; + break; + case 24: + var->red.offset = 8; + var->green.offset = 16; + var->blue.offset = 24; + var->red.length = 8; + var->green.length = 8; + var->blue.length = 8; + var->transp.length = 0; + var->transp.offset = 0; + break; + case 32: + var->red.offset = 8; + var->green.offset = 16; + var->blue.offset = 24; + var->red.length = 8; + var->green.length = 8; + var->blue.length = 8; + var->transp.length = 8; + var->transp.offset = 0; + break; + default: + return -EINVAL; } } -} - -int radeonfb_blank(int blank, struct fb_info *info) -{ - switch (blank) { - case FB_BLANK_UNBLANK: - radeonfb_on(info); - break; - case FB_BLANK_NORMAL: - radeonfb_off(info, DRM_MODE_DPMS_STANDBY); - break; - case FB_BLANK_HSYNC_SUSPEND: - radeonfb_off(info, DRM_MODE_DPMS_STANDBY); - break; - case FB_BLANK_VSYNC_SUSPEND: - radeonfb_off(info, DRM_MODE_DPMS_SUSPEND); - break; - case FB_BLANK_POWERDOWN: - radeonfb_off(info, DRM_MODE_DPMS_OFF); - break; - } +#endif return 0; } static struct fb_ops radeonfb_ops = { .owner = THIS_MODULE, - .fb_check_var = radeonfb_check_var, - .fb_set_par = radeonfb_set_par, - .fb_setcolreg = radeonfb_setcolreg, + .fb_check_var = radeon_fb_check_var, + .fb_set_par = drm_fb_helper_set_par, + .fb_setcolreg = drm_fb_helper_setcolreg, .fb_fillrect = cfb_fillrect, .fb_copyarea = cfb_copyarea, .fb_imageblit = cfb_imageblit, - .fb_pan_display = radeonfb_pan_display, - .fb_blank = radeonfb_blank, + .fb_pan_display = drm_fb_helper_pan_display, + .fb_blank = drm_fb_helper_blank, }; /** @@ -456,21 +159,6 @@ int radeonfb_resize(struct drm_device *dev, struct drm_crtc *crtc) } EXPORT_SYMBOL(radeonfb_resize); -static struct drm_mode_set panic_mode; - -int radeonfb_panic(struct notifier_block *n, unsigned long ununsed, - void *panic_str) -{ - DRM_ERROR("panic occurred, switching back to text console\n"); - drm_crtc_helper_set_config(&panic_mode); - return 0; -} -EXPORT_SYMBOL(radeonfb_panic); - -static struct notifier_block paniced = { - .notifier_call = radeonfb_panic, -}; - static int radeon_align_pitch(struct radeon_device *rdev, int width, int bpp, bool tiled) { int aligned = width; @@ -495,11 +183,16 @@ static int radeon_align_pitch(struct radeon_device *rdev, int width, int bpp, bo return aligned; } -int radeonfb_create(struct radeon_device *rdev, +static struct drm_fb_helper_funcs radeon_fb_helper_funcs = { + .gamma_set = radeon_crtc_fb_gamma_set, +}; + +int radeonfb_create(struct drm_device *dev, uint32_t fb_width, uint32_t fb_height, uint32_t surface_width, uint32_t surface_height, - struct radeon_framebuffer **rfb_p) + struct drm_framebuffer **fb_p) { + struct radeon_device *rdev = dev->dev_private; struct fb_info *info; struct radeon_fb_device *rfbdev; struct drm_framebuffer *fb = NULL; @@ -554,8 +247,8 @@ int radeonfb_create(struct radeon_device *rdev, list_add(&fb->filp_head, &rdev->ddev->mode_config.fb_kernel_list); + *fb_p = fb; rfb = to_radeon_framebuffer(fb); - *rfb_p = rfb; rdev->fbdev_rfb = rfb; rdev->fbdev_robj = robj; @@ -564,7 +257,14 @@ int radeonfb_create(struct radeon_device *rdev, ret = -ENOMEM; goto out_unref; } + rfbdev = info->par; + rfbdev->helper.funcs = &radeon_fb_helper_funcs; + rfbdev->helper.dev = dev; + ret = drm_fb_helper_init_crtc_count(&rfbdev->helper, 2, + RADEONFB_CONN_LIMIT); + if (ret) + goto out_unref; if (fb_tiled) radeon_object_check_tiling(robj, 0, 0); @@ -577,33 +277,19 @@ int radeonfb_create(struct radeon_device *rdev, memset_io(fbptr, 0, aligned_size); strcpy(info->fix.id, "radeondrmfb"); - info->fix.type = FB_TYPE_PACKED_PIXELS; - info->fix.visual = FB_VISUAL_TRUECOLOR; - info->fix.type_aux = 0; - info->fix.xpanstep = 1; /* doing it in hw */ - info->fix.ypanstep = 1; /* doing it in hw */ - info->fix.ywrapstep = 0; - info->fix.accel = FB_ACCEL_NONE; - info->fix.type_aux = 0; + + drm_fb_helper_fill_fix(info, fb->pitch); + info->flags = FBINFO_DEFAULT; info->fbops = &radeonfb_ops; - info->fix.line_length = fb->pitch; + tmp = fb_gpuaddr - rdev->mc.vram_location; info->fix.smem_start = rdev->mc.aper_base + tmp; info->fix.smem_len = size; info->screen_base = fbptr; info->screen_size = size; - info->pseudo_palette = fb->pseudo_palette; - info->var.xres_virtual = fb->width; - info->var.yres_virtual = fb->height; - info->var.bits_per_pixel = fb->bits_per_pixel; - info->var.xoffset = 0; - info->var.yoffset = 0; - info->var.activate = FB_ACTIVATE_NOW; - info->var.height = -1; - info->var.width = -1; - info->var.xres = fb_width; - info->var.yres = fb_height; + + drm_fb_helper_fill_var(info, fb, fb_width, fb_height); /* setup aperture base/size for vesafb takeover */ info->aperture_base = rdev->ddev->mode_config.fb_base; @@ -626,6 +312,9 @@ int radeonfb_create(struct radeon_device *rdev, DRM_INFO("fb depth is %d\n", fb->depth); DRM_INFO(" pitch is %d\n", fb->pitch); +#ifdef __BIG_ENDIAN + /* fill var sets defaults for this stuff - override + on big endian */ switch (fb->depth) { case 8: info->var.red.offset = 0; @@ -637,47 +326,6 @@ int radeonfb_create(struct radeon_device *rdev, info->var.transp.offset = 0; info->var.transp.length = 0; break; -#ifdef __LITTLE_ENDIAN - case 15: - info->var.red.offset = 10; - info->var.green.offset = 5; - info->var.blue.offset = 0; - info->var.red.length = 5; - info->var.green.length = 5; - info->var.blue.length = 5; - info->var.transp.offset = 15; - info->var.transp.length = 1; - break; - case 16: - info->var.red.offset = 11; - info->var.green.offset = 5; - info->var.blue.offset = 0; - info->var.red.length = 5; - info->var.green.length = 6; - info->var.blue.length = 5; - info->var.transp.offset = 0; - break; - case 24: - info->var.red.offset = 16; - info->var.green.offset = 8; - info->var.blue.offset = 0; - info->var.red.length = 8; - info->var.green.length = 8; - info->var.blue.length = 8; - info->var.transp.offset = 0; - info->var.transp.length = 0; - break; - case 32: - info->var.red.offset = 16; - info->var.green.offset = 8; - info->var.blue.offset = 0; - info->var.red.length = 8; - info->var.green.length = 8; - info->var.blue.length = 8; - info->var.transp.offset = 24; - info->var.transp.length = 8; - break; -#else case 24: info->var.red.offset = 8; info->var.green.offset = 16; @@ -699,9 +347,9 @@ int radeonfb_create(struct radeon_device *rdev, info->var.transp.length = 8; break; default: -#endif break; } +#endif fb->fbdev = info; rfbdev->rfb = rfb; @@ -726,145 +374,10 @@ out: return ret; } -static int radeonfb_single_fb_probe(struct radeon_device *rdev) -{ - struct drm_crtc *crtc; - struct drm_connector *connector; - unsigned int fb_width = (unsigned)-1, fb_height = (unsigned)-1; - unsigned int surface_width = 0, surface_height = 0; - int new_fb = 0; - int crtc_count = 0; - int ret, i, conn_count = 0; - struct radeon_framebuffer *rfb; - struct fb_info *info; - struct radeon_fb_device *rfbdev; - struct drm_mode_set *modeset = NULL; - - /* first up get a count of crtcs now in use and new min/maxes width/heights */ - list_for_each_entry(crtc, &rdev->ddev->mode_config.crtc_list, head) { - if (drm_helper_crtc_in_use(crtc)) { - if (crtc->desired_mode) { - if (crtc->desired_mode->hdisplay < fb_width) - fb_width = crtc->desired_mode->hdisplay; - - if (crtc->desired_mode->vdisplay < fb_height) - fb_height = crtc->desired_mode->vdisplay; - - if (crtc->desired_mode->hdisplay > surface_width) - surface_width = crtc->desired_mode->hdisplay; - - if (crtc->desired_mode->vdisplay > surface_height) - surface_height = crtc->desired_mode->vdisplay; - } - crtc_count++; - } - } - - if (crtc_count == 0 || fb_width == -1 || fb_height == -1) { - /* hmm everyone went away - assume VGA cable just fell out - and will come back later. */ - return 0; - } - - /* do we have an fb already? */ - if (list_empty(&rdev->ddev->mode_config.fb_kernel_list)) { - /* create an fb if we don't have one */ - ret = radeonfb_create(rdev, fb_width, fb_height, surface_width, surface_height, &rfb); - if (ret) { - return -EINVAL; - } - new_fb = 1; - } else { - struct drm_framebuffer *fb; - fb = list_first_entry(&rdev->ddev->mode_config.fb_kernel_list, struct drm_framebuffer, filp_head); - rfb = to_radeon_framebuffer(fb); - - /* if someone hotplugs something bigger than we have already allocated, we are pwned. - As really we can't resize an fbdev that is in the wild currently due to fbdev - not really being designed for the lower layers moving stuff around under it. - - so in the grand style of things - punt. */ - if ((fb->width < surface_width) || (fb->height < surface_height)) { - DRM_ERROR("Framebuffer not large enough to scale console onto.\n"); - return -EINVAL; - } - } - - info = rfb->base.fbdev; - rdev->fbdev_info = info; - rfbdev = info->par; - - crtc_count = 0; - /* okay we need to setup new connector sets in the crtcs */ - list_for_each_entry(crtc, &rdev->ddev->mode_config.crtc_list, head) { - struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); - modeset = &radeon_crtc->mode_set; - modeset->fb = &rfb->base; - conn_count = 0; - list_for_each_entry(connector, &rdev->ddev->mode_config.connector_list, head) { - if (connector->encoder) - if (connector->encoder->crtc == modeset->crtc) { - modeset->connectors[conn_count] = connector; - conn_count++; - if (conn_count > RADEONFB_CONN_LIMIT) - BUG(); - } - } - - for (i = conn_count; i < RADEONFB_CONN_LIMIT; i++) - modeset->connectors[i] = NULL; - - - rfbdev->crtc_ids[crtc_count++] = crtc->base.id; - - modeset->num_connectors = conn_count; - if (modeset->crtc->desired_mode) { - if (modeset->mode) { - drm_mode_destroy(rdev->ddev, modeset->mode); - } - modeset->mode = drm_mode_duplicate(rdev->ddev, - modeset->crtc->desired_mode); - } - } - rfbdev->crtc_count = crtc_count; - - if (new_fb) { - info->var.pixclock = -1; - if (register_framebuffer(info) < 0) - return -EINVAL; - } else { - radeonfb_set_par(info); - } - printk(KERN_INFO "fb%d: %s frame buffer device\n", info->node, - info->fix.id); - - /* Switch back to kernel console on panic */ - panic_mode = *modeset; - atomic_notifier_chain_register(&panic_notifier_list, &paniced); - printk(KERN_INFO "registered panic notifier\n"); - - return 0; -} - int radeonfb_probe(struct drm_device *dev) { int ret; - - /* something has changed in the lower levels of hell - deal with it - here */ - - /* two modes : a) 1 fb to rule all crtcs. - b) one fb per crtc. - two actions 1) new connected device - 2) device removed. - case a/1 : if the fb surface isn't big enough - resize the surface fb. - if the fb size isn't big enough - resize fb into surface. - if everything big enough configure the new crtc/etc. - case a/2 : undo the configuration - possibly resize down the fb to fit the new configuration. - case b/1 : see if it is on a new crtc - setup a new fb and add it. - case b/2 : teardown the new fb. - */ - ret = radeonfb_single_fb_probe(dev->dev_private); + ret = drm_fb_helper_single_fb_probe(dev, &radeonfb_create); return ret; } EXPORT_SYMBOL(radeonfb_probe); @@ -880,16 +393,17 @@ int radeonfb_remove(struct drm_device *dev, struct drm_framebuffer *fb) } info = fb->fbdev; if (info) { + struct radeon_fb_device *rfbdev = info->par; robj = rfb->obj->driver_private; unregister_framebuffer(info); radeon_object_kunmap(robj); radeon_object_unpin(robj); + drm_fb_helper_free(&rfbdev->helper); framebuffer_release(info); } printk(KERN_INFO "unregistered panic notifier\n"); - atomic_notifier_chain_unregister(&panic_notifier_list, &paniced); - memset(&panic_mode, 0, sizeof(struct drm_mode_set)); + return 0; } EXPORT_SYMBOL(radeonfb_remove); diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h index 3b09a1f2d8f..20e9509a713 100644 --- a/drivers/gpu/drm/radeon/radeon_mode.h +++ b/drivers/gpu/drm/radeon/radeon_mode.h @@ -195,8 +195,6 @@ struct radeon_crtc { bool enabled; bool can_tile; uint32_t crtc_offset; - struct radeon_framebuffer *fbdev_fb; - struct drm_mode_set mode_set; struct drm_gem_object *cursor_bo; uint64_t cursor_addr; int cursor_width; diff --git a/include/drm/drm_crtc.h b/include/drm/drm_crtc.h index db92a83f8ca..b0427a70fcb 100644 --- a/include/drm/drm_crtc.h +++ b/include/drm/drm_crtc.h @@ -259,6 +259,8 @@ struct drm_framebuffer { void *fbdev; u32 pseudo_palette[17]; struct list_head filp_head; + /* if you are using the helper */ + void *helper_private; }; struct drm_property_blob { diff --git a/include/drm/drm_fb_helper.h b/include/drm/drm_fb_helper.h new file mode 100644 index 00000000000..88fffbdfa26 --- /dev/null +++ b/include/drm/drm_fb_helper.h @@ -0,0 +1,82 @@ +/* + * Copyright (c) 2006-2009 Red Hat Inc. + * Copyright (c) 2006-2008 Intel Corporation + * Copyright (c) 2007 Dave Airlie + * + * DRM framebuffer helper functions + * + * Permission to use, copy, modify, distribute, and sell this software and its + * documentation for any purpose is hereby granted without fee, provided that + * the above copyright notice appear in all copies and that both that copyright + * notice and this permission notice appear in supporting documentation, and + * that the name of the copyright holders not be used in advertising or + * publicity pertaining to distribution of the software without specific, + * written prior permission. The copyright holders make no representations + * about the suitability of this software for any purpose. It is provided "as + * is" without express or implied warranty. + * + * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, + * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO + * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR + * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, + * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE + * OF THIS SOFTWARE. + * + * Authors: + * Dave Airlie + * Jesse Barnes + */ +#ifndef DRM_FB_HELPER_H +#define DRM_FB_HELPER_H + +struct drm_fb_helper_crtc { + uint32_t crtc_id; + struct drm_mode_set mode_set; +}; + +struct drm_fb_helper_funcs { + void (*gamma_set)(struct drm_crtc *crtc, u16 red, u16 green, + u16 blue, int regno); +}; + +struct drm_fb_helper { + struct drm_framebuffer *fb; + struct drm_device *dev; + struct drm_display_mode *mode; + int crtc_count; + struct drm_fb_helper_crtc *crtc_info; + struct drm_fb_helper_funcs *funcs; + int conn_limit; + struct list_head kernel_fb_list; +}; + +int drm_fb_helper_single_fb_probe(struct drm_device *dev, + int (*fb_create)(struct drm_device *dev, + uint32_t fb_width, + uint32_t fb_height, + uint32_t surface_width, + uint32_t surface_height, + struct drm_framebuffer **fb_ptr)); +int drm_fb_helper_init_crtc_count(struct drm_fb_helper *helper, int crtc_count, + int max_conn); +void drm_fb_helper_free(struct drm_fb_helper *helper); +int drm_fb_helper_blank(int blank, struct fb_info *info); +int drm_fb_helper_pan_display(struct fb_var_screeninfo *var, + struct fb_info *info); +int drm_fb_helper_set_par(struct fb_info *info); +int drm_fb_helper_check_var(struct fb_var_screeninfo *var, + struct fb_info *info); +int drm_fb_helper_setcolreg(unsigned regno, + unsigned red, + unsigned green, + unsigned blue, + unsigned transp, + struct fb_info *info); + +void drm_fb_helper_restore(void); +void drm_fb_helper_fill_var(struct fb_info *info, struct drm_framebuffer *fb, + uint32_t fb_width, uint32_t fb_height); +void drm_fb_helper_fill_fix(struct fb_info *info, uint32_t pitch); + +#endif -- cgit v1.2.3-70-g09d2 From 98a56ab382079f777e261e14512cbd4fb2107af4 Mon Sep 17 00:00:00 2001 From: Theodore Ts'o Date: Thu, 17 Sep 2009 08:48:28 -0400 Subject: ext4: Fix spelling typo in the trace format for trace_ext4_da_writepages() Signed-off-by: "Theodore Ts'o" --- include/trace/events/ext4.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include') diff --git a/include/trace/events/ext4.h b/include/trace/events/ext4.h index 8d433c4e370..15051d2d121 100644 --- a/include/trace/events/ext4.h +++ b/include/trace/events/ext4.h @@ -243,7 +243,7 @@ TRACE_EVENT(ext4_da_writepages, __entry->range_cyclic = wbc->range_cyclic; ), - TP_printk("dev %s ino %lu nr_t_write %ld pages_skipped %ld range_start %llu range_end %llu nonblocking %d for_kupdate %d for_reclaim %d range_cyclic %d", + TP_printk("dev %s ino %lu nr_to_write %ld pages_skipped %ld range_start %llu range_end %llu nonblocking %d for_kupdate %d for_reclaim %d range_cyclic %d", jbd2_dev_to_name(__entry->dev), __entry->ino, __entry->nr_to_write, __entry->pages_skipped, __entry->range_start, __entry->range_end, __entry->nonblocking, -- cgit v1.2.3-70-g09d2 From b3a3ca8ca0c3c29abc5b2bfe94bb14f3f4590df9 Mon Sep 17 00:00:00 2001 From: Theodore Ts'o Date: Mon, 31 Aug 2009 23:13:11 -0400 Subject: ext4: Add new tracepoint: trace_ext4_da_write_pages() Add a new tracepoint which shows the pages that will be written using write_cache_pages() by ext4_da_writepages(). Signed-off-by: "Theodore Ts'o" --- fs/ext4/ext4.h | 15 +++++++++++++++ fs/ext4/inode.c | 13 +------------ include/trace/events/ext4.h | 34 ++++++++++++++++++++++++++++++++++ 3 files changed, 50 insertions(+), 12 deletions(-) (limited to 'include') diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h index 41a76e163b9..81014f4ed22 100644 --- a/fs/ext4/ext4.h +++ b/fs/ext4/ext4.h @@ -113,6 +113,21 @@ struct ext4_allocation_request { unsigned int flags; }; +/* + * For delayed allocation tracking + */ +struct mpage_da_data { + struct inode *inode; + sector_t b_blocknr; /* start block number of extent */ + size_t b_size; /* size of extent */ + unsigned long b_state; /* state of the extent */ + unsigned long first_page, next_page; /* extent of pages */ + struct writeback_control *wbc; + int io_done; + int pages_written; + int retval; +}; + /* * Special inodes numbers */ diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c index ff659e75757..17802a96af9 100644 --- a/fs/ext4/inode.c +++ b/fs/ext4/inode.c @@ -1875,18 +1875,6 @@ static void ext4_da_page_release_reservation(struct page *page, * Delayed allocation stuff */ -struct mpage_da_data { - struct inode *inode; - sector_t b_blocknr; /* start block number of extent */ - size_t b_size; /* size of extent */ - unsigned long b_state; /* state of the extent */ - unsigned long first_page, next_page; /* extent of pages */ - struct writeback_control *wbc; - int io_done; - int pages_written; - int retval; -}; - /* * mpage_da_submit_io - walks through extent of pages and try to write * them with writepage() call back @@ -2863,6 +2851,7 @@ retry: mpd.io_done = 1; ret = MPAGE_DA_EXTENT_TAIL; } + trace_ext4_da_write_pages(inode, &mpd); wbc->nr_to_write -= mpd.pages_written; ext4_journal_stop(handle); diff --git a/include/trace/events/ext4.h b/include/trace/events/ext4.h index 15051d2d121..dd43399288e 100644 --- a/include/trace/events/ext4.h +++ b/include/trace/events/ext4.h @@ -251,6 +251,40 @@ TRACE_EVENT(ext4_da_writepages, __entry->range_cyclic) ); +TRACE_EVENT(ext4_da_write_pages, + TP_PROTO(struct inode *inode, struct mpage_da_data *mpd), + + TP_ARGS(inode, mpd), + + TP_STRUCT__entry( + __field( dev_t, dev ) + __field( ino_t, ino ) + __field( __u64, b_blocknr ) + __field( __u32, b_size ) + __field( __u32, b_state ) + __field( unsigned long, first_page ) + __field( int, io_done ) + __field( int, pages_written ) + ), + + TP_fast_assign( + __entry->dev = inode->i_sb->s_dev; + __entry->ino = inode->i_ino; + __entry->b_blocknr = mpd->b_blocknr; + __entry->b_size = mpd->b_size; + __entry->b_state = mpd->b_state; + __entry->first_page = mpd->first_page; + __entry->io_done = mpd->io_done; + __entry->pages_written = mpd->pages_written; + ), + + TP_printk("dev %s ino %lu b_blocknr %llu b_size %u b_state 0x%04x first_page %lu io_done %d pages_written %d", + jbd2_dev_to_name(__entry->dev), __entry->ino, + __entry->b_blocknr, __entry->b_size, + __entry->b_state, __entry->first_page, + __entry->io_done, __entry->pages_written) +); + TRACE_EVENT(ext4_da_writepages_result, TP_PROTO(struct inode *inode, struct writeback_control *wbc, int ret, int pages_written), -- cgit v1.2.3-70-g09d2 From 8aa84ad8d6c740a04386f599694609ee4998e82e Mon Sep 17 00:00:00 2001 From: Thomas Renninger Date: Fri, 24 Jul 2009 15:25:05 +0200 Subject: [CPUFREQ] Introduce global, not per core: /sys/devices/system/cpu/cpufreq Currently everything in the cpufreq layer is per core based. This does not reflect reality, for example ondemand on conservative governors have global sysfs variables. Introduce a global cpufreq directory and add the kobject to the governor struct, so that governors can easily access it. The directory is initialized in the cpufreq_core_init initcall and thus will always be created if cpufreq is compiled in, even if no cpufreq driver is active later. Signed-off-by: Thomas Renninger Signed-off-by: Dave Jones --- drivers/cpufreq/cpufreq.c | 9 ++++++++- include/linux/cpufreq.h | 10 ++++++++++ 2 files changed, 18 insertions(+), 1 deletion(-) (limited to 'include') diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c index bbd5c2164ab..4da28444b23 100644 --- a/drivers/cpufreq/cpufreq.c +++ b/drivers/cpufreq/cpufreq.c @@ -686,6 +686,9 @@ static struct attribute *default_attrs[] = { NULL }; +struct kobject *cpufreq_global_kobject; +EXPORT_SYMBOL(cpufreq_global_kobject); + #define to_policy(k) container_of(k, struct cpufreq_policy, kobj) #define to_attr(a) container_of(a, struct freq_attr, attr) @@ -1935,7 +1938,11 @@ static int __init cpufreq_core_init(void) per_cpu(policy_cpu, cpu) = -1; init_rwsem(&per_cpu(cpu_policy_rwsem, cpu)); } + + cpufreq_global_kobject = kobject_create_and_add("cpufreq", + &cpu_sysdev_class.kset.kobj); + BUG_ON(!cpufreq_global_kobject); + return 0; } - core_initcall(cpufreq_core_init); diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h index 161042746af..44717eb4763 100644 --- a/include/linux/cpufreq.h +++ b/include/linux/cpufreq.h @@ -65,6 +65,9 @@ static inline int cpufreq_unregister_notifier(struct notifier_block *nb, struct cpufreq_governor; +/* /sys/devices/system/cpu/cpufreq: entry point for global variables */ +extern struct kobject *cpufreq_global_kobject; + #define CPUFREQ_ETERNAL (-1) struct cpufreq_cpuinfo { unsigned int max_freq; @@ -274,6 +277,13 @@ struct freq_attr { ssize_t (*store)(struct cpufreq_policy *, const char *, size_t count); }; +struct global_attr { + struct attribute attr; + ssize_t (*show)(struct kobject *kobj, + struct attribute *attr, char *buf); + ssize_t (*store)(struct kobject *a, struct attribute *b, + const char *c, size_t count); +}; /********************************************************************* * CPUFREQ 2.6. INTERFACE * -- cgit v1.2.3-70-g09d2 From fa8a123855e20068204982596b8fafceb1a67f0b Mon Sep 17 00:00:00 2001 From: Dave Airlie Date: Wed, 26 Aug 2009 13:13:37 +1000 Subject: drm/mm: add ability to dump mm lists via debugfs This adds code to the drm_mm to talk to debugfs, and adds support to radeon to add the VRAM and GTT mm lists to debugfs. I tested with spinlock debugging and it doesn't give out. Signed-off-by: Dave Airlie --- drivers/gpu/drm/drm_mm.c | 21 ++++++++++++++ drivers/gpu/drm/radeon/radeon_ttm.c | 56 +++++++++++++++++++++++++++++++++++++ include/drm/drm_mm.h | 4 +++ 3 files changed, 81 insertions(+) (limited to 'include') diff --git a/drivers/gpu/drm/drm_mm.c b/drivers/gpu/drm/drm_mm.c index 3e47869d6da..c861d80fd77 100644 --- a/drivers/gpu/drm/drm_mm.c +++ b/drivers/gpu/drm/drm_mm.c @@ -44,6 +44,7 @@ #include "drmP.h" #include "drm_mm.h" #include +#include #define MM_UNUSED_TARGET 4 @@ -370,3 +371,23 @@ void drm_mm_takedown(struct drm_mm * mm) BUG_ON(mm->num_unused != 0); } EXPORT_SYMBOL(drm_mm_takedown); + +#if defined(CONFIG_DEBUG_FS) +int drm_mm_dump_table(struct seq_file *m, struct drm_mm *mm) +{ + struct drm_mm_node *entry; + int total_used = 0, total_free = 0, total = 0; + + list_for_each_entry(entry, &mm->ml_entry, ml_entry) { + seq_printf(m, "0x%08lx-0x%08lx: 0x%08lx: %s\n", entry->start, entry->start + entry->size, entry->size, entry->free ? "free" : "used"); + total += entry->size; + if (entry->free) + total_free += entry->size; + else + total_used += entry->size; + } + seq_printf(m, "total: %d, used %d free %d\n", total, total_free, total_used); + return 0; +} +EXPORT_SYMBOL(drm_mm_dump_table); +#endif diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c index 0a85e7b5d59..dc7a44274ea 100644 --- a/drivers/gpu/drm/radeon/radeon_ttm.c +++ b/drivers/gpu/drm/radeon/radeon_ttm.c @@ -35,11 +35,14 @@ #include #include #include +#include #include "radeon_reg.h" #include "radeon.h" #define DRM_FILE_PAGE_OFFSET (0x100000000ULL >> PAGE_SHIFT) +static int radeon_ttm_debugfs_init(struct radeon_device *rdev); + static struct radeon_device *radeon_get_rdev(struct ttm_bo_device *bdev) { struct radeon_mman *mman; @@ -504,6 +507,12 @@ int radeon_ttm_init(struct radeon_device *rdev) if (unlikely(rdev->mman.bdev.dev_mapping == NULL)) { rdev->mman.bdev.dev_mapping = rdev->ddev->dev_mapping; } + + r = radeon_ttm_debugfs_init(rdev); + if (r) { + DRM_ERROR("Failed to init debugfs\n"); + return r; + } return 0; } @@ -678,3 +687,50 @@ struct ttm_backend *radeon_ttm_backend_create(struct radeon_device *rdev) gtt->bound = false; return >t->backend; } + +#define RADEON_DEBUGFS_MEM_TYPES 2 + +static struct drm_info_list radeon_mem_types_list[RADEON_DEBUGFS_MEM_TYPES]; +static char radeon_mem_types_names[RADEON_DEBUGFS_MEM_TYPES][32]; + +#if defined(CONFIG_DEBUG_FS) +static int radeon_mm_dump_table(struct seq_file *m, void *data) +{ + struct drm_info_node *node = (struct drm_info_node *)m->private; + struct drm_mm *mm = (struct drm_mm *)node->info_ent->data; + struct drm_device *dev = node->minor->dev; + struct radeon_device *rdev = dev->dev_private; + int ret; + struct ttm_bo_global *glob = rdev->mman.bdev.glob; + + spin_lock(&glob->lru_lock); + ret = drm_mm_dump_table(m, mm); + spin_unlock(&glob->lru_lock); + return ret; +} +#endif + +static int radeon_ttm_debugfs_init(struct radeon_device *rdev) +{ + unsigned i; + +#if defined(CONFIG_DEBUG_FS) + for (i = 0; i < RADEON_DEBUGFS_MEM_TYPES; i++) { + if (i == 0) + sprintf(radeon_mem_types_names[i], "radeon_vram_mm"); + else + sprintf(radeon_mem_types_names[i], "radeon_gtt_mm"); + radeon_mem_types_list[i].name = radeon_mem_types_names[i]; + radeon_mem_types_list[i].show = &radeon_mm_dump_table; + radeon_mem_types_list[i].driver_features = 0; + if (i == 0) + radeon_mem_types_list[i].data = &rdev->mman.bdev.man[TTM_PL_VRAM].manager; + else + radeon_mem_types_list[i].data = &rdev->mman.bdev.man[TTM_PL_TT].manager; + + } + return radeon_debugfs_add_files(rdev, radeon_mem_types_list, RADEON_DEBUGFS_MEM_TYPES); + +#endif + return 0; +} diff --git a/include/drm/drm_mm.h b/include/drm/drm_mm.h index f8332073d27..bc5a87e8aee 100644 --- a/include/drm/drm_mm.h +++ b/include/drm/drm_mm.h @@ -96,4 +96,8 @@ static inline struct drm_mm *drm_get_mm(struct drm_mm_node *block) return block->mm; } +#ifdef CONFIG_DEBUG_FS +int drm_mm_dump_table(struct seq_file *m, struct drm_mm *mm); +#endif + #endif -- cgit v1.2.3-70-g09d2 From a3a0544b2c84e1d7a2022b558ecf66d8c6a8dd93 Mon Sep 17 00:00:00 2001 From: Dave Airlie Date: Mon, 31 Aug 2009 15:16:30 +1000 Subject: drm/kms: add explicit encoder disable function and detach harder. For shared tv-out and VGA encoders, we really need to know if the encoder is just being switched off temporarily in blanking or if we are really disabling it hard. Also we need to try harder to disconnect encoders from unused connectors so we can share more efficently. (shared encoders stuff is coming in radeon tv-out support) Signed-off-by: Dave Airlie --- drivers/gpu/drm/drm_crtc_helper.c | 24 ++++++++++++++++++++---- include/drm/drm_crtc_helper.h | 2 ++ 2 files changed, 22 insertions(+), 4 deletions(-) (limited to 'include') diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c index 205349ea107..eea5e6c4099 100644 --- a/drivers/gpu/drm/drm_crtc_helper.c +++ b/drivers/gpu/drm/drm_crtc_helper.c @@ -260,13 +260,27 @@ EXPORT_SYMBOL(drm_helper_crtc_in_use); void drm_helper_disable_unused_functions(struct drm_device *dev) { struct drm_encoder *encoder; + struct drm_connector *connector; struct drm_encoder_helper_funcs *encoder_funcs; struct drm_crtc *crtc; + list_for_each_entry(connector, &dev->mode_config.connector_list, head) { + if (!connector->encoder) + continue; + if (connector->status == connector_status_disconnected) + connector->encoder = NULL; + } + list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { encoder_funcs = encoder->helper_private; - if (!drm_helper_encoder_in_use(encoder)) - (*encoder_funcs->dpms)(encoder, DRM_MODE_DPMS_OFF); + if (!drm_helper_encoder_in_use(encoder)) { + if (encoder_funcs->disable) + (*encoder_funcs->disable)(encoder); + else + (*encoder_funcs->dpms)(encoder, DRM_MODE_DPMS_OFF); + } + /* disconnector encoder from any connector */ + encoder->crtc = NULL; } list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { @@ -411,7 +425,7 @@ static int drm_pick_crtcs(struct drm_device *dev, c = 0; list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { - if ((connector->encoder->possible_crtcs & (1 << c)) == 0) { + if ((encoder->possible_crtcs & (1 << c)) == 0) { c++; continue; } @@ -496,8 +510,10 @@ static void drm_setup_crtcs(struct drm_device *dev) mode->name, crtc->base.id); crtc->desired_mode = mode; connector->encoder->crtc = crtc; - } else + } else { connector->encoder->crtc = NULL; + connector->encoder = NULL; + } i++; } diff --git a/include/drm/drm_crtc_helper.h b/include/drm/drm_crtc_helper.h index e44a4f87303..4c8dacaf4f5 100644 --- a/include/drm/drm_crtc_helper.h +++ b/include/drm/drm_crtc_helper.h @@ -79,6 +79,8 @@ struct drm_encoder_helper_funcs { /* detect for DAC style encoders */ enum drm_connector_status (*detect)(struct drm_encoder *encoder, struct drm_connector *connector); + /* disable encoder when not in use - more explicit than dpms off */ + void (*disable)(struct drm_encoder *encoder); }; struct drm_connector_helper_funcs { -- cgit v1.2.3-70-g09d2 From a649637c73a36174287a403cdda7607177d64523 Mon Sep 17 00:00:00 2001 From: Andy Adamson Date: Fri, 28 Aug 2009 08:45:01 -0400 Subject: nfsd41: bound forechannel drc size by memory usage By using the requested ca_maxresponsesize_cached * ca_maxresponses to bound a forechannel drc request size, clients can tailor a session to usage. For example, an I/O session (READ/WRITE only) can have a much smaller ca_maxresponsesize_cached (for only WRITE compound responses) and a lot larger ca_maxresponses to service a large in-flight data window. Signed-off-by: Andy Adamson Signed-off-by: J. Bruce Fields --- fs/nfsd/nfs4state.c | 66 +++++++++++++++++++++++++++++++++------------- include/linux/nfsd/state.h | 8 ++++-- 2 files changed, 54 insertions(+), 20 deletions(-) (limited to 'include') diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c index b44a2cfde6f..02b3ddd0bee 100644 --- a/fs/nfsd/nfs4state.c +++ b/fs/nfsd/nfs4state.c @@ -414,34 +414,64 @@ gen_sessionid(struct nfsd4_session *ses) } /* - * Give the client the number of slots it requests bound by - * NFSD_MAX_SLOTS_PER_SESSION and by nfsd_drc_max_mem. + * The protocol defines ca_maxresponssize_cached to include the size of + * the rpc header, but all we need to cache is the data starting after + * the end of the initial SEQUENCE operation--the rest we regenerate + * each time. Therefore we can advertise a ca_maxresponssize_cached + * value that is the number of bytes in our cache plus a few additional + * bytes. In order to stay on the safe side, and not promise more than + * we can cache, those additional bytes must be the minimum possible: 24 + * bytes of rpc header (xid through accept state, with AUTH_NULL + * verifier), 12 for the compound header (with zero-length tag), and 44 + * for the SEQUENCE op response: + */ +#define NFSD_MIN_HDR_SEQ_SZ (24 + 12 + 44) + +/* + * Give the client the number of ca_maxresponsesize_cached slots it + * requests, of size bounded by NFSD_SLOT_CACHE_SIZE, + * NFSD_MAX_MEM_PER_SESSION, and nfsd_drc_max_mem. Do not allow more + * than NFSD_MAX_SLOTS_PER_SESSION. * - * If we run out of reserved DRC memory we should (up to a point) re-negotiate - * active sessions and reduce their slot usage to make rooom for new - * connections. For now we just fail the create session. + * If we run out of reserved DRC memory we should (up to a point) + * re-negotiate active sessions and reduce their slot usage to make + * rooom for new connections. For now we just fail the create session. */ -static int set_forechannel_maxreqs(struct nfsd4_channel_attrs *fchan) +static int set_forechannel_drc_size(struct nfsd4_channel_attrs *fchan) { - int mem; + int mem, size = fchan->maxresp_cached; if (fchan->maxreqs < 1) return nfserr_inval; - else if (fchan->maxreqs > NFSD_MAX_SLOTS_PER_SESSION) - fchan->maxreqs = NFSD_MAX_SLOTS_PER_SESSION; - mem = fchan->maxreqs * NFSD_SLOT_CACHE_SIZE; + if (size < NFSD_MIN_HDR_SEQ_SZ) + size = NFSD_MIN_HDR_SEQ_SZ; + size -= NFSD_MIN_HDR_SEQ_SZ; + if (size > NFSD_SLOT_CACHE_SIZE) + size = NFSD_SLOT_CACHE_SIZE; + + /* bound the maxreqs by NFSD_MAX_MEM_PER_SESSION */ + mem = fchan->maxreqs * size; + if (mem > NFSD_MAX_MEM_PER_SESSION) { + fchan->maxreqs = NFSD_MAX_MEM_PER_SESSION / size; + if (fchan->maxreqs > NFSD_MAX_SLOTS_PER_SESSION) + fchan->maxreqs = NFSD_MAX_SLOTS_PER_SESSION; + mem = fchan->maxreqs * size; + } spin_lock(&nfsd_drc_lock); - if (mem + nfsd_drc_mem_used > nfsd_drc_max_mem) - mem = ((nfsd_drc_max_mem - nfsd_drc_mem_used) / - NFSD_SLOT_CACHE_SIZE) * NFSD_SLOT_CACHE_SIZE; + /* bound the total session drc memory ussage */ + if (mem + nfsd_drc_mem_used > nfsd_drc_max_mem) { + fchan->maxreqs = (nfsd_drc_max_mem - nfsd_drc_mem_used) / size; + mem = fchan->maxreqs * size; + } nfsd_drc_mem_used += mem; spin_unlock(&nfsd_drc_lock); - fchan->maxreqs = mem / NFSD_SLOT_CACHE_SIZE; if (fchan->maxreqs == 0) return nfserr_resource; + + fchan->maxresp_cached = size + NFSD_MIN_HDR_SEQ_SZ; return 0; } @@ -466,9 +496,6 @@ static int init_forechannel_attrs(struct svc_rqst *rqstp, fchan->maxresp_sz = maxcount; session_fchan->maxresp_sz = fchan->maxresp_sz; - session_fchan->maxresp_cached = NFSD_SLOT_CACHE_SIZE; - fchan->maxresp_cached = session_fchan->maxresp_cached; - /* Use the client's maxops if possible */ if (fchan->maxops > NFSD_MAX_OPS_PER_COMPOUND) fchan->maxops = NFSD_MAX_OPS_PER_COMPOUND; @@ -478,9 +505,12 @@ static int init_forechannel_attrs(struct svc_rqst *rqstp, * recover pages from existing sessions. For now fail session * creation. */ - status = set_forechannel_maxreqs(fchan); + status = set_forechannel_drc_size(fchan); + session_fchan->maxresp_cached = fchan->maxresp_cached; session_fchan->maxreqs = fchan->maxreqs; + + dprintk("%s status %d\n", __func__, status); return status; } diff --git a/include/linux/nfsd/state.h b/include/linux/nfsd/state.h index fb0c404c7c5..ff0b771efde 100644 --- a/include/linux/nfsd/state.h +++ b/include/linux/nfsd/state.h @@ -92,13 +92,17 @@ struct nfs4_cb_conn { struct rpc_cred * cb_cred; }; -/* Maximum number of slots per session. 128 is useful for long haul TCP */ -#define NFSD_MAX_SLOTS_PER_SESSION 128 +/* Maximum number of slots per session. 160 is useful for long haul TCP */ +#define NFSD_MAX_SLOTS_PER_SESSION 160 /* Maximum number of pages per slot cache entry */ #define NFSD_PAGES_PER_SLOT 1 #define NFSD_SLOT_CACHE_SIZE PAGE_SIZE /* Maximum number of operations per session compound */ #define NFSD_MAX_OPS_PER_COMPOUND 16 +/* Maximum number of NFSD_SLOT_CACHE_SIZE slots per session */ +#define NFSD_CACHE_SIZE_SLOTS_PER_SESSION 32 +#define NFSD_MAX_MEM_PER_SESSION \ + (NFSD_CACHE_SIZE_SLOTS_PER_SESSION * NFSD_SLOT_CACHE_SIZE) struct nfsd4_cache_entry { __be32 ce_status; -- cgit v1.2.3-70-g09d2 From 557ce2646e775f6bda734dd92b10d4780874b9c7 Mon Sep 17 00:00:00 2001 From: Andy Adamson Date: Fri, 28 Aug 2009 08:45:04 -0400 Subject: nfsd41: replace page based DRC with buffer based DRC Use NFSD_SLOT_CACHE_SIZE size buffers for sessions DRC instead of holding nfsd pages in cache. Connectathon testing has shown that 1024 bytes for encoded compound operation responses past the sequence operation is sufficient, 512 bytes is a little too small. Set NFSD_SLOT_CACHE_SIZE to 1024. Allocate memory for the session DRC in the CREATE_SESSION operation to guarantee that the memory resource is available for caching responses. Allocate each slot individually in preparation for slot table size negotiation. Remove struct nfsd4_cache_entry and helper functions for the old page-based DRC. The iov_len calculation in nfs4svc_encode_compoundres is now always correct. Replay is now done in nfsd4_sequence under the state lock, so the session ref count is only bumped on non-replay. Clean up the nfs4svc_encode_compoundres session logic. The nfsd4_compound_state statp pointer is also not used. Remove nfsd4_set_statp(). Move useful nfsd4_cache_entry fields into nfsd4_slot. Signed-off-by: Andy Adamson --- fs/nfsd/nfs4state.c | 211 +++++++++++++-------------------------------- fs/nfsd/nfs4xdr.c | 17 ++-- fs/nfsd/nfssvc.c | 4 - include/linux/nfsd/state.h | 27 +++--- include/linux/nfsd/xdr4.h | 5 +- 5 files changed, 79 insertions(+), 185 deletions(-) (limited to 'include') diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c index c9a45f49019..46e9ac52687 100644 --- a/fs/nfsd/nfs4state.c +++ b/fs/nfsd/nfs4state.c @@ -514,12 +514,23 @@ static int init_forechannel_attrs(struct svc_rqst *rqstp, return status; } +static void +free_session_slots(struct nfsd4_session *ses) +{ + int i; + + for (i = 0; i < ses->se_fchannel.maxreqs; i++) + kfree(ses->se_slots[i]); +} + static int alloc_init_session(struct svc_rqst *rqstp, struct nfs4_client *clp, struct nfsd4_create_session *cses) { struct nfsd4_session *new, tmp; - int idx, status = nfserr_serverfault, slotsize; + struct nfsd4_slot *sp; + int idx, slotsize, cachesize, i; + int status; memset(&tmp, 0, sizeof(tmp)); @@ -530,14 +541,27 @@ alloc_init_session(struct svc_rqst *rqstp, struct nfs4_client *clp, if (status) goto out; - /* allocate struct nfsd4_session and slot table in one piece */ - slotsize = tmp.se_fchannel.maxreqs * sizeof(struct nfsd4_slot); + BUILD_BUG_ON(NFSD_MAX_SLOTS_PER_SESSION * sizeof(struct nfsd4_slot) + + sizeof(struct nfsd4_session) > PAGE_SIZE); + + status = nfserr_serverfault; + /* allocate struct nfsd4_session and slot table pointers in one piece */ + slotsize = tmp.se_fchannel.maxreqs * sizeof(struct nfsd4_slot *); new = kzalloc(sizeof(*new) + slotsize, GFP_KERNEL); if (!new) goto out; memcpy(new, &tmp, sizeof(*new)); + /* allocate each struct nfsd4_slot and data cache in one piece */ + cachesize = new->se_fchannel.maxresp_cached - NFSD_MIN_HDR_SEQ_SZ; + for (i = 0; i < new->se_fchannel.maxreqs; i++) { + sp = kzalloc(sizeof(*sp) + cachesize, GFP_KERNEL); + if (!sp) + goto out_free; + new->se_slots[i] = sp; + } + new->se_client = clp; gen_sessionid(new); idx = hash_sessionid(&new->se_sessionid); @@ -554,6 +578,10 @@ alloc_init_session(struct svc_rqst *rqstp, struct nfs4_client *clp, status = nfs_ok; out: return status; +out_free: + free_session_slots(new); + kfree(new); + goto out; } /* caller must hold sessionid_lock */ @@ -596,22 +624,16 @@ release_session(struct nfsd4_session *ses) nfsd4_put_session(ses); } -static void nfsd4_release_respages(struct page **respages, short resused); - void free_session(struct kref *kref) { struct nfsd4_session *ses; - int i; ses = container_of(kref, struct nfsd4_session, se_ref); - for (i = 0; i < ses->se_fchannel.maxreqs; i++) { - struct nfsd4_cache_entry *e = &ses->se_slots[i].sl_cache_entry; - nfsd4_release_respages(e->ce_respages, e->ce_resused); - } spin_lock(&nfsd_drc_lock); nfsd_drc_mem_used -= ses->se_fchannel.maxreqs * NFSD_SLOT_CACHE_SIZE; spin_unlock(&nfsd_drc_lock); + free_session_slots(ses); kfree(ses); } @@ -968,116 +990,31 @@ out_err: return; } -void -nfsd4_set_statp(struct svc_rqst *rqstp, __be32 *statp) -{ - struct nfsd4_compoundres *resp = rqstp->rq_resp; - - resp->cstate.statp = statp; -} - -/* - * Dereference the result pages. - */ -static void -nfsd4_release_respages(struct page **respages, short resused) -{ - int i; - - dprintk("--> %s\n", __func__); - for (i = 0; i < resused; i++) { - if (!respages[i]) - continue; - put_page(respages[i]); - respages[i] = NULL; - } -} - -static void -nfsd4_copy_pages(struct page **topages, struct page **frompages, short count) -{ - int i; - - for (i = 0; i < count; i++) { - topages[i] = frompages[i]; - if (!topages[i]) - continue; - get_page(topages[i]); - } -} - /* - * Cache the reply pages up to NFSD_PAGES_PER_SLOT + 1, clearing the previous - * pages. We add a page to NFSD_PAGES_PER_SLOT for the case where the total - * length of the XDR response is less than se_fmaxresp_cached - * (NFSD_PAGES_PER_SLOT * PAGE_SIZE) but the xdr_buf pages is used for a - * of the reply (e.g. readdir). - * - * Store the base and length of the rq_req.head[0] page - * of the NFSv4.1 data, just past the rpc header. + * Cache a reply. nfsd4_check_drc_limit() has bounded the cache size. */ void nfsd4_store_cache_entry(struct nfsd4_compoundres *resp) { - struct nfsd4_cache_entry *entry = &resp->cstate.slot->sl_cache_entry; - struct svc_rqst *rqstp = resp->rqstp; - struct kvec *resv = &rqstp->rq_res.head[0]; - - dprintk("--> %s entry %p\n", __func__, entry); + struct nfsd4_slot *slot = resp->cstate.slot; + unsigned int base; - nfsd4_release_respages(entry->ce_respages, entry->ce_resused); - entry->ce_opcnt = resp->opcnt; - entry->ce_status = resp->cstate.status; + dprintk("--> %s slot %p\n", __func__, slot); - /* - * Don't need a page to cache just the sequence operation - the slot - * does this for us! - */ + slot->sl_opcnt = resp->opcnt; + slot->sl_status = resp->cstate.status; if (nfsd4_not_cached(resp)) { - entry->ce_resused = 0; - entry->ce_rpchdrlen = 0; - dprintk("%s Just cache SEQUENCE. ce_cachethis %d\n", __func__, - resp->cstate.slot->sl_cache_entry.ce_cachethis); + slot->sl_datalen = 0; return; } - entry->ce_resused = rqstp->rq_resused; - if (entry->ce_resused > NFSD_PAGES_PER_SLOT + 1) - entry->ce_resused = NFSD_PAGES_PER_SLOT + 1; - nfsd4_copy_pages(entry->ce_respages, rqstp->rq_respages, - entry->ce_resused); - entry->ce_datav.iov_base = resp->cstate.statp; - entry->ce_datav.iov_len = resv->iov_len - ((char *)resp->cstate.statp - - (char *)page_address(rqstp->rq_respages[0])); - /* Current request rpc header length*/ - entry->ce_rpchdrlen = (char *)resp->cstate.statp - - (char *)page_address(rqstp->rq_respages[0]); -} - -/* - * We keep the rpc header, but take the nfs reply from the replycache. - */ -static int -nfsd41_copy_replay_data(struct nfsd4_compoundres *resp, - struct nfsd4_cache_entry *entry) -{ - struct svc_rqst *rqstp = resp->rqstp; - struct kvec *resv = &resp->rqstp->rq_res.head[0]; - int len; - - /* Current request rpc header length*/ - len = (char *)resp->cstate.statp - - (char *)page_address(rqstp->rq_respages[0]); - if (entry->ce_datav.iov_len + len > PAGE_SIZE) { - dprintk("%s v41 cached reply too large (%Zd).\n", __func__, - entry->ce_datav.iov_len); - return 0; - } - /* copy the cached reply nfsd data past the current rpc header */ - memcpy((char *)resv->iov_base + len, entry->ce_datav.iov_base, - entry->ce_datav.iov_len); - resv->iov_len = len + entry->ce_datav.iov_len; - return 1; + slot->sl_datalen = (char *)resp->p - (char *)resp->cstate.datap; + base = (char *)resp->cstate.datap - + (char *)resp->xbuf->head[0].iov_base; + if (read_bytes_from_xdr_buf(resp->xbuf, base, slot->sl_data, + slot->sl_datalen)) + WARN("%s: sessions DRC could not cache compound\n", __func__); + return; } /* @@ -1095,14 +1032,14 @@ nfsd4_enc_sequence_replay(struct nfsd4_compoundargs *args, struct nfsd4_slot *slot = resp->cstate.slot; dprintk("--> %s resp->opcnt %d cachethis %u \n", __func__, - resp->opcnt, resp->cstate.slot->sl_cache_entry.ce_cachethis); + resp->opcnt, resp->cstate.slot->sl_cachethis); /* Encode the replayed sequence operation */ op = &args->ops[resp->opcnt - 1]; nfsd4_encode_operation(resp, op); /* Return nfserr_retry_uncached_rep in next operation. */ - if (args->opcnt > 1 && slot->sl_cache_entry.ce_cachethis == 0) { + if (args->opcnt > 1 && slot->sl_cachethis == 0) { op = &args->ops[resp->opcnt++]; op->status = nfserr_retry_uncached_rep; nfsd4_encode_operation(resp, op); @@ -1111,57 +1048,29 @@ nfsd4_enc_sequence_replay(struct nfsd4_compoundargs *args, } /* - * Keep the first page of the replay. Copy the NFSv4.1 data from the first - * cached page. Replace any futher replay pages from the cache. + * The sequence operation is not cached because we can use the slot and + * session values. */ __be32 nfsd4_replay_cache_entry(struct nfsd4_compoundres *resp, struct nfsd4_sequence *seq) { - struct nfsd4_cache_entry *entry = &resp->cstate.slot->sl_cache_entry; + struct nfsd4_slot *slot = resp->cstate.slot; __be32 status; - dprintk("--> %s entry %p\n", __func__, entry); - - /* - * If this is just the sequence operation, we did not keep - * a page in the cache entry because we can just use the - * slot info stored in struct nfsd4_sequence that was checked - * against the slot in nfsd4_sequence(). - * - * This occurs when seq->cachethis is FALSE, or when the client - * session inactivity timer fires and a solo sequence operation - * is sent (lease renewal). - */ + dprintk("--> %s slot %p\n", __func__, slot); /* Either returns 0 or nfserr_retry_uncached */ status = nfsd4_enc_sequence_replay(resp->rqstp->rq_argp, resp); if (status == nfserr_retry_uncached_rep) return status; - if (!nfsd41_copy_replay_data(resp, entry)) { - /* - * Not enough room to use the replay rpc header, send the - * cached header. Release all the allocated result pages. - */ - svc_free_res_pages(resp->rqstp); - nfsd4_copy_pages(resp->rqstp->rq_respages, entry->ce_respages, - entry->ce_resused); - } else { - /* Release all but the first allocated result page */ - - resp->rqstp->rq_resused--; - svc_free_res_pages(resp->rqstp); - - nfsd4_copy_pages(&resp->rqstp->rq_respages[1], - &entry->ce_respages[1], - entry->ce_resused - 1); - } + /* The sequence operation has been encoded, cstate->datap set. */ + memcpy(resp->cstate.datap, slot->sl_data, slot->sl_datalen); - resp->rqstp->rq_resused = entry->ce_resused; - resp->opcnt = entry->ce_opcnt; - resp->cstate.iovlen = entry->ce_datav.iov_len + entry->ce_rpchdrlen; - status = entry->ce_status; + resp->opcnt = slot->sl_opcnt; + resp->p = resp->cstate.datap + XDR_QUADLEN(slot->sl_datalen); + status = slot->sl_status; return status; } @@ -1493,7 +1402,7 @@ nfsd4_sequence(struct svc_rqst *rqstp, if (seq->slotid >= session->se_fchannel.maxreqs) goto out; - slot = &session->se_slots[seq->slotid]; + slot = session->se_slots[seq->slotid]; dprintk("%s: slotid %d\n", __func__, seq->slotid); /* We do not negotiate the number of slots yet, so set the @@ -1506,7 +1415,7 @@ nfsd4_sequence(struct svc_rqst *rqstp, cstate->slot = slot; cstate->session = session; /* Return the cached reply status and set cstate->status - * for nfsd4_svc_encode_compoundres processing */ + * for nfsd4_proc_compound processing */ status = nfsd4_replay_cache_entry(resp, seq); cstate->status = nfserr_replay_cache; goto out; @@ -1517,7 +1426,7 @@ nfsd4_sequence(struct svc_rqst *rqstp, /* Success! bump slot seqid */ slot->sl_inuse = true; slot->sl_seqid = seq->seqid; - slot->sl_cache_entry.ce_cachethis = seq->cachethis; + slot->sl_cachethis = seq->cachethis; cstate->slot = slot; cstate->session = session; diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c index 20c5e3db066..00ed16a1849 100644 --- a/fs/nfsd/nfs4xdr.c +++ b/fs/nfsd/nfs4xdr.c @@ -3057,6 +3057,7 @@ nfsd4_encode_sequence(struct nfsd4_compoundres *resp, int nfserr, WRITE32(0); ADJUST_ARGS(); + resp->cstate.datap = p; /* DRC cache data pointer */ return 0; } @@ -3159,7 +3160,7 @@ static int nfsd4_check_drc_limit(struct nfsd4_compoundres *resp) return status; session = resp->cstate.session; - if (session == NULL || slot->sl_cache_entry.ce_cachethis == 0) + if (session == NULL || slot->sl_cachethis == 0) return status; if (resp->opcnt >= args->opcnt) @@ -3284,6 +3285,7 @@ nfs4svc_encode_compoundres(struct svc_rqst *rqstp, __be32 *p, struct nfsd4_compo /* * All that remains is to write the tag and operation count... */ + struct nfsd4_compound_state *cs = &resp->cstate; struct kvec *iov; p = resp->tagp; *p++ = htonl(resp->taglen); @@ -3297,15 +3299,10 @@ nfs4svc_encode_compoundres(struct svc_rqst *rqstp, __be32 *p, struct nfsd4_compo iov = &rqstp->rq_res.head[0]; iov->iov_len = ((char*)resp->p) - (char*)iov->iov_base; BUG_ON(iov->iov_len > PAGE_SIZE); - if (nfsd4_has_session(&resp->cstate)) { - if (resp->cstate.status == nfserr_replay_cache && - !nfsd4_not_cached(resp)) { - iov->iov_len = resp->cstate.iovlen; - } else { - nfsd4_store_cache_entry(resp); - dprintk("%s: SET SLOT STATE TO AVAILABLE\n", __func__); - resp->cstate.slot->sl_inuse = 0; - } + if (nfsd4_has_session(cs) && cs->status != nfserr_replay_cache) { + nfsd4_store_cache_entry(resp); + dprintk("%s: SET SLOT STATE TO AVAILABLE\n", __func__); + resp->cstate.slot->sl_inuse = false; nfsd4_put_session(resp->cstate.session); } return 1; diff --git a/fs/nfsd/nfssvc.c b/fs/nfsd/nfssvc.c index 675d395c4ab..4472449c093 100644 --- a/fs/nfsd/nfssvc.c +++ b/fs/nfsd/nfssvc.c @@ -577,10 +577,6 @@ nfsd_dispatch(struct svc_rqst *rqstp, __be32 *statp) + rqstp->rq_res.head[0].iov_len; rqstp->rq_res.head[0].iov_len += sizeof(__be32); - /* NFSv4.1 DRC requires statp */ - if (rqstp->rq_vers == 4) - nfsd4_set_statp(rqstp, statp); - /* Now call the procedure handler, and encode NFS status. */ nfserr = proc->pc_func(rqstp, rqstp->rq_argp, rqstp->rq_resp); nfserr = map_new_errors(rqstp->rq_vers, nfserr); diff --git a/include/linux/nfsd/state.h b/include/linux/nfsd/state.h index ff0b771efde..70ef5f4abbb 100644 --- a/include/linux/nfsd/state.h +++ b/include/linux/nfsd/state.h @@ -94,30 +94,23 @@ struct nfs4_cb_conn { /* Maximum number of slots per session. 160 is useful for long haul TCP */ #define NFSD_MAX_SLOTS_PER_SESSION 160 -/* Maximum number of pages per slot cache entry */ -#define NFSD_PAGES_PER_SLOT 1 -#define NFSD_SLOT_CACHE_SIZE PAGE_SIZE /* Maximum number of operations per session compound */ #define NFSD_MAX_OPS_PER_COMPOUND 16 +/* Maximum session per slot cache size */ +#define NFSD_SLOT_CACHE_SIZE 1024 /* Maximum number of NFSD_SLOT_CACHE_SIZE slots per session */ #define NFSD_CACHE_SIZE_SLOTS_PER_SESSION 32 #define NFSD_MAX_MEM_PER_SESSION \ (NFSD_CACHE_SIZE_SLOTS_PER_SESSION * NFSD_SLOT_CACHE_SIZE) -struct nfsd4_cache_entry { - __be32 ce_status; - struct kvec ce_datav; /* encoded NFSv4.1 data in rq_res.head[0] */ - struct page *ce_respages[NFSD_PAGES_PER_SLOT + 1]; - int ce_cachethis; - short ce_resused; - int ce_opcnt; - int ce_rpchdrlen; -}; - struct nfsd4_slot { - bool sl_inuse; - u32 sl_seqid; - struct nfsd4_cache_entry sl_cache_entry; + bool sl_inuse; + bool sl_cachethis; + u16 sl_opcnt; + u32 sl_seqid; + __be32 sl_status; + u32 sl_datalen; + char sl_data[]; }; struct nfsd4_channel_attrs { @@ -159,7 +152,7 @@ struct nfsd4_session { struct nfs4_sessionid se_sessionid; struct nfsd4_channel_attrs se_fchannel; struct nfsd4_channel_attrs se_bchannel; - struct nfsd4_slot se_slots[]; /* forward channel slots */ + struct nfsd4_slot *se_slots[]; /* forward channel slots */ }; static inline void diff --git a/include/linux/nfsd/xdr4.h b/include/linux/nfsd/xdr4.h index 3f716607c86..73164c2b3d2 100644 --- a/include/linux/nfsd/xdr4.h +++ b/include/linux/nfsd/xdr4.h @@ -51,7 +51,7 @@ struct nfsd4_compound_state { /* For sessions DRC */ struct nfsd4_session *session; struct nfsd4_slot *slot; - __be32 *statp; + __be32 *datap; size_t iovlen; u32 minorversion; u32 status; @@ -472,8 +472,7 @@ static inline bool nfsd4_is_solo_sequence(struct nfsd4_compoundres *resp) static inline bool nfsd4_not_cached(struct nfsd4_compoundres *resp) { - return !resp->cstate.slot->sl_cache_entry.ce_cachethis || - nfsd4_is_solo_sequence(resp); + return !resp->cstate.slot->sl_cachethis || nfsd4_is_solo_sequence(resp); } #define NFS4_SVC_XDRSIZE sizeof(struct nfsd4_compoundargs) -- cgit v1.2.3-70-g09d2 From 652696efce135559b98ee5a3d7899295e8d553fa Mon Sep 17 00:00:00 2001 From: Kyungmin Park Date: Wed, 24 Jun 2009 12:03:51 +0900 Subject: mtd: OneNAND: 4-bit ECC status macros Define ECC status for 4-bit ECC status Signed-off-by: Kyungmin Park Signed-off-by: Artem Bityutskiy Signed-off-by: David Woodhouse --- include/linux/mtd/onenand_regs.h | 3 +++ 1 file changed, 3 insertions(+) (limited to 'include') diff --git a/include/linux/mtd/onenand_regs.h b/include/linux/mtd/onenand_regs.h index 86a6bbef646..acadbf53a69 100644 --- a/include/linux/mtd/onenand_regs.h +++ b/include/linux/mtd/onenand_regs.h @@ -207,6 +207,9 @@ #define ONENAND_ECC_2BIT (1 << 1) #define ONENAND_ECC_2BIT_ALL (0xAAAA) #define FLEXONENAND_UNCORRECTABLE_ERROR (0x1010) +#define ONENAND_ECC_3BIT (1 << 2) +#define ONENAND_ECC_4BIT (1 << 3) +#define ONENAND_ECC_4BIT_UNCORRECTABLE (0x1010) /* * One-Time Programmable (OTP) -- cgit v1.2.3-70-g09d2 From b8bdc1d0cfc488ac0d94724639f9a61b0a5a1d40 Mon Sep 17 00:00:00 2001 From: Marek Vasut Date: Mon, 31 Aug 2009 06:20:12 +0200 Subject: wm97xx_battery: Use platform_data This patch converts the wm97xx-battery driver to use platform_data supplied by ac97 bus. Signed-off-by: Marek Vasut Signed-off-by: Anton Vorontsov --- drivers/power/wm97xx_battery.c | 32 +++++++++++++++++++++++++++----- include/linux/wm97xx.h | 18 ++++++++++++++++++ include/linux/wm97xx_batt.h | 18 ++++-------------- 3 files changed, 49 insertions(+), 19 deletions(-) (limited to 'include') diff --git a/drivers/power/wm97xx_battery.c b/drivers/power/wm97xx_battery.c index 8bde92126d3..14ebd960ebe 100644 --- a/drivers/power/wm97xx_battery.c +++ b/drivers/power/wm97xx_battery.c @@ -22,17 +22,19 @@ #include #include #include -#include static DEFINE_MUTEX(bat_lock); static struct work_struct bat_work; struct mutex work_lock; static int bat_status = POWER_SUPPLY_STATUS_UNKNOWN; -static struct wm97xx_batt_info *pdata; +static struct wm97xx_batt_info *gpdata; static enum power_supply_property *prop; static unsigned long wm97xx_read_bat(struct power_supply *bat_ps) { + struct wm97xx_pdata *wmdata = bat_ps->dev->parent->platform_data; + struct wm97xx_batt_pdata *pdata = wmdata->batt_pdata; + return wm97xx_read_aux_adc(bat_ps->dev->parent->driver_data, pdata->batt_aux) * pdata->batt_mult / pdata->batt_div; @@ -40,6 +42,9 @@ static unsigned long wm97xx_read_bat(struct power_supply *bat_ps) static unsigned long wm97xx_read_temp(struct power_supply *bat_ps) { + struct wm97xx_pdata *wmdata = bat_ps->dev->parent->platform_data; + struct wm97xx_batt_pdata *pdata = wmdata->batt_pdata; + return wm97xx_read_aux_adc(bat_ps->dev->parent->driver_data, pdata->temp_aux) * pdata->temp_mult / pdata->temp_div; @@ -49,6 +54,9 @@ static int wm97xx_bat_get_property(struct power_supply *bat_ps, enum power_supply_property psp, union power_supply_propval *val) { + struct wm97xx_pdata *wmdata = bat_ps->dev->parent->platform_data; + struct wm97xx_batt_pdata *pdata = wmdata->batt_pdata; + switch (psp) { case POWER_SUPPLY_PROP_STATUS: val->intval = bat_status; @@ -97,6 +105,8 @@ static void wm97xx_bat_external_power_changed(struct power_supply *bat_ps) static void wm97xx_bat_update(struct power_supply *bat_ps) { int old_status = bat_status; + struct wm97xx_pdata *wmdata = bat_ps->dev->parent->platform_data; + struct wm97xx_batt_pdata *pdata = wmdata->batt_pdata; mutex_lock(&work_lock); @@ -149,6 +159,15 @@ static int __devinit wm97xx_bat_probe(struct platform_device *dev) int ret = 0; int props = 1; /* POWER_SUPPLY_PROP_PRESENT */ int i = 0; + struct wm97xx_pdata *wmdata = dev->dev.platform_data; + struct wm97xx_batt_pdata *pdata; + + if (gpdata) { + dev_err(&dev->dev, "Do not pass platform_data through " + "wm97xx_bat_set_pdata!\n"); + return -EINVAL; + } else + pdata = wmdata->batt_pdata; if (dev->id != -1) return -EINVAL; @@ -156,7 +175,7 @@ static int __devinit wm97xx_bat_probe(struct platform_device *dev) mutex_init(&work_lock); if (!pdata) { - dev_err(&dev->dev, "Please use wm97xx_bat_set_pdata\n"); + dev_err(&dev->dev, "No platform_data supplied\n"); return -EINVAL; } @@ -229,6 +248,9 @@ err: static int __devexit wm97xx_bat_remove(struct platform_device *dev) { + struct wm97xx_pdata *wmdata = dev->dev.platform_data; + struct wm97xx_batt_pdata *pdata = wmdata->batt_pdata; + if (pdata && pdata->charge_gpio && pdata->charge_gpio >= 0) gpio_free(pdata->charge_gpio); flush_scheduled_work(); @@ -258,9 +280,9 @@ static void __exit wm97xx_bat_exit(void) platform_driver_unregister(&wm97xx_bat_driver); } -void __init wm97xx_bat_set_pdata(struct wm97xx_batt_info *data) +void wm97xx_bat_set_pdata(struct wm97xx_batt_info *data) { - pdata = data; + gpdata = data; } EXPORT_SYMBOL_GPL(wm97xx_bat_set_pdata); diff --git a/include/linux/wm97xx.h b/include/linux/wm97xx.h index 6f69968eab2..b2c2297844e 100644 --- a/include/linux/wm97xx.h +++ b/include/linux/wm97xx.h @@ -286,6 +286,24 @@ struct wm97xx { u16 suspend_mode; /* PRP in suspend mode */ }; +struct wm97xx_batt_pdata { + int batt_aux; + int temp_aux; + int charge_gpio; + int min_voltage; + int max_voltage; + int batt_div; + int batt_mult; + int temp_div; + int temp_mult; + int batt_tech; + char *batt_name; +}; + +struct wm97xx_pdata { + struct wm97xx_batt_pdata *batt_pdata; /* battery data */ +}; + /* * Codec GPIO access (not supported on WM9705) * This can be used to set/get codec GPIO and Virtual GPIO status. diff --git a/include/linux/wm97xx_batt.h b/include/linux/wm97xx_batt.h index 9681d1ab0e4..a1d6419c2ff 100644 --- a/include/linux/wm97xx_batt.h +++ b/include/linux/wm97xx_batt.h @@ -3,22 +3,12 @@ #include -struct wm97xx_batt_info { - int batt_aux; - int temp_aux; - int charge_gpio; - int min_voltage; - int max_voltage; - int batt_div; - int batt_mult; - int temp_div; - int temp_mult; - int batt_tech; - char *batt_name; -}; +#warning This file will be removed soon, use wm97xx.h instead! + +#define wm97xx_batt_info wm97xx_batt_pdata #ifdef CONFIG_BATTERY_WM97XX -void __init wm97xx_bat_set_pdata(struct wm97xx_batt_info *data); +void wm97xx_bat_set_pdata(struct wm97xx_batt_info *data); #else static inline void wm97xx_bat_set_pdata(struct wm97xx_batt_info *data) {} #endif -- cgit v1.2.3-70-g09d2 From 3961f7c3cf247eee5df7fabadc7a40f2deeb98f3 Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Mon, 10 Aug 2009 17:43:53 +0100 Subject: power_supply: Add driver for the PMU on WM831x PMICs The WM831x PMICs provide power path management from three sources: a wall supply, USB and a battery with integrated charger. They also provide an additional backup supply with integrated for maintaining always on functionality such as the RTC and monitoring of power switches. After some initial configuration at startup the device operates autonomously, the driver simply provides reporting of the current state. Signed-off-by: Mark Brown Signed-off-by: Anton Vorontsov --- drivers/power/Kconfig | 7 + drivers/power/Makefile | 1 + drivers/power/wm831x_power.c | 779 +++++++++++++++++++++++++++++++++++++++++ include/linux/mfd/wm831x/pmu.h | 189 ++++++++++ 4 files changed, 976 insertions(+) create mode 100644 drivers/power/wm831x_power.c create mode 100644 include/linux/mfd/wm831x/pmu.h (limited to 'include') diff --git a/drivers/power/Kconfig b/drivers/power/Kconfig index bdbc4f73fcd..cea6cef27e8 100644 --- a/drivers/power/Kconfig +++ b/drivers/power/Kconfig @@ -29,6 +29,13 @@ config APM_POWER Say Y here to enable support APM status emulation using battery class devices. +config WM831X_POWER + tristate "WM831X PMU support" + depends on MFD_WM831X + help + Say Y here to enable support for the power management unit + provided by Wolfson Microelectronics WM831x PMICs. + config WM8350_POWER tristate "WM8350 PMU support" depends on MFD_WM8350 diff --git a/drivers/power/Makefile b/drivers/power/Makefile index 380d17c9ae2..b96f29d91c2 100644 --- a/drivers/power/Makefile +++ b/drivers/power/Makefile @@ -16,6 +16,7 @@ obj-$(CONFIG_POWER_SUPPLY) += power_supply.o obj-$(CONFIG_PDA_POWER) += pda_power.o obj-$(CONFIG_APM_POWER) += apm_power.o +obj-$(CONFIG_WM831X_POWER) += wm831x_power.o obj-$(CONFIG_WM8350_POWER) += wm8350_power.o obj-$(CONFIG_BATTERY_DS2760) += ds2760_battery.o diff --git a/drivers/power/wm831x_power.c b/drivers/power/wm831x_power.c new file mode 100644 index 00000000000..2a4c8b0b829 --- /dev/null +++ b/drivers/power/wm831x_power.c @@ -0,0 +1,779 @@ +/* + * PMU driver for Wolfson Microelectronics wm831x PMICs + * + * Copyright 2009 Wolfson Microelectronics PLC. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include +#include +#include +#include + +#include +#include +#include +#include + +struct wm831x_power { + struct wm831x *wm831x; + struct power_supply wall; + struct power_supply backup; + struct power_supply usb; + struct power_supply battery; +}; + +static int wm831x_power_check_online(struct wm831x *wm831x, int supply, + union power_supply_propval *val) +{ + int ret; + + ret = wm831x_reg_read(wm831x, WM831X_SYSTEM_STATUS); + if (ret < 0) + return ret; + + if (ret & supply) + val->intval = 1; + else + val->intval = 0; + + return 0; +} + +static int wm831x_power_read_voltage(struct wm831x *wm831x, + enum wm831x_auxadc src, + union power_supply_propval *val) +{ + int ret; + + ret = wm831x_auxadc_read_uv(wm831x, src); + if (ret >= 0) + val->intval = ret; + + return ret; +} + +/********************************************************************* + * WALL Power + *********************************************************************/ +static int wm831x_wall_get_prop(struct power_supply *psy, + enum power_supply_property psp, + union power_supply_propval *val) +{ + struct wm831x_power *wm831x_power = dev_get_drvdata(psy->dev->parent); + struct wm831x *wm831x = wm831x_power->wm831x; + int ret = 0; + + switch (psp) { + case POWER_SUPPLY_PROP_ONLINE: + ret = wm831x_power_check_online(wm831x, WM831X_PWR_WALL, val); + break; + case POWER_SUPPLY_PROP_VOLTAGE_NOW: + ret = wm831x_power_read_voltage(wm831x, WM831X_AUX_WALL, val); + break; + default: + ret = -EINVAL; + break; + } + + return ret; +} + +static enum power_supply_property wm831x_wall_props[] = { + POWER_SUPPLY_PROP_ONLINE, + POWER_SUPPLY_PROP_VOLTAGE_NOW, +}; + +/********************************************************************* + * USB Power + *********************************************************************/ +static int wm831x_usb_get_prop(struct power_supply *psy, + enum power_supply_property psp, + union power_supply_propval *val) +{ + struct wm831x_power *wm831x_power = dev_get_drvdata(psy->dev->parent); + struct wm831x *wm831x = wm831x_power->wm831x; + int ret = 0; + + switch (psp) { + case POWER_SUPPLY_PROP_ONLINE: + ret = wm831x_power_check_online(wm831x, WM831X_PWR_USB, val); + break; + case POWER_SUPPLY_PROP_VOLTAGE_NOW: + ret = wm831x_power_read_voltage(wm831x, WM831X_AUX_USB, val); + break; + default: + ret = -EINVAL; + break; + } + + return ret; +} + +static enum power_supply_property wm831x_usb_props[] = { + POWER_SUPPLY_PROP_ONLINE, + POWER_SUPPLY_PROP_VOLTAGE_NOW, +}; + +/********************************************************************* + * Battery properties + *********************************************************************/ + +struct chg_map { + int val; + int reg_val; +}; + +static struct chg_map trickle_ilims[] = { + { 50, 0 << WM831X_CHG_TRKL_ILIM_SHIFT }, + { 100, 1 << WM831X_CHG_TRKL_ILIM_SHIFT }, + { 150, 2 << WM831X_CHG_TRKL_ILIM_SHIFT }, + { 200, 3 << WM831X_CHG_TRKL_ILIM_SHIFT }, +}; + +static struct chg_map vsels[] = { + { 4050, 0 << WM831X_CHG_VSEL_SHIFT }, + { 4100, 1 << WM831X_CHG_VSEL_SHIFT }, + { 4150, 2 << WM831X_CHG_VSEL_SHIFT }, + { 4200, 3 << WM831X_CHG_VSEL_SHIFT }, +}; + +static struct chg_map fast_ilims[] = { + { 0, 0 << WM831X_CHG_FAST_ILIM_SHIFT }, + { 50, 1 << WM831X_CHG_FAST_ILIM_SHIFT }, + { 100, 2 << WM831X_CHG_FAST_ILIM_SHIFT }, + { 150, 3 << WM831X_CHG_FAST_ILIM_SHIFT }, + { 200, 4 << WM831X_CHG_FAST_ILIM_SHIFT }, + { 250, 5 << WM831X_CHG_FAST_ILIM_SHIFT }, + { 300, 6 << WM831X_CHG_FAST_ILIM_SHIFT }, + { 350, 7 << WM831X_CHG_FAST_ILIM_SHIFT }, + { 400, 8 << WM831X_CHG_FAST_ILIM_SHIFT }, + { 450, 9 << WM831X_CHG_FAST_ILIM_SHIFT }, + { 500, 10 << WM831X_CHG_FAST_ILIM_SHIFT }, + { 600, 11 << WM831X_CHG_FAST_ILIM_SHIFT }, + { 700, 12 << WM831X_CHG_FAST_ILIM_SHIFT }, + { 800, 13 << WM831X_CHG_FAST_ILIM_SHIFT }, + { 900, 14 << WM831X_CHG_FAST_ILIM_SHIFT }, + { 1000, 15 << WM831X_CHG_FAST_ILIM_SHIFT }, +}; + +static struct chg_map eoc_iterms[] = { + { 20, 0 << WM831X_CHG_ITERM_SHIFT }, + { 30, 1 << WM831X_CHG_ITERM_SHIFT }, + { 40, 2 << WM831X_CHG_ITERM_SHIFT }, + { 50, 3 << WM831X_CHG_ITERM_SHIFT }, + { 60, 4 << WM831X_CHG_ITERM_SHIFT }, + { 70, 5 << WM831X_CHG_ITERM_SHIFT }, + { 80, 6 << WM831X_CHG_ITERM_SHIFT }, + { 90, 7 << WM831X_CHG_ITERM_SHIFT }, +}; + +static struct chg_map chg_times[] = { + { 60, 0 << WM831X_CHG_TIME_SHIFT }, + { 90, 1 << WM831X_CHG_TIME_SHIFT }, + { 120, 2 << WM831X_CHG_TIME_SHIFT }, + { 150, 3 << WM831X_CHG_TIME_SHIFT }, + { 180, 4 << WM831X_CHG_TIME_SHIFT }, + { 210, 5 << WM831X_CHG_TIME_SHIFT }, + { 240, 6 << WM831X_CHG_TIME_SHIFT }, + { 270, 7 << WM831X_CHG_TIME_SHIFT }, + { 300, 8 << WM831X_CHG_TIME_SHIFT }, + { 330, 9 << WM831X_CHG_TIME_SHIFT }, + { 360, 10 << WM831X_CHG_TIME_SHIFT }, + { 390, 11 << WM831X_CHG_TIME_SHIFT }, + { 420, 12 << WM831X_CHG_TIME_SHIFT }, + { 450, 13 << WM831X_CHG_TIME_SHIFT }, + { 480, 14 << WM831X_CHG_TIME_SHIFT }, + { 510, 15 << WM831X_CHG_TIME_SHIFT }, +}; + +static void wm831x_battey_apply_config(struct wm831x *wm831x, + struct chg_map *map, int count, int val, + int *reg, const char *name, + const char *units) +{ + int i; + + for (i = 0; i < count; i++) + if (val == map[i].val) + break; + if (i == count) { + dev_err(wm831x->dev, "Invalid %s %d%s\n", + name, val, units); + } else { + *reg |= map[i].reg_val; + dev_dbg(wm831x->dev, "Set %s of %d%s\n", name, val, units); + } +} + +static void wm831x_config_battery(struct wm831x *wm831x) +{ + struct wm831x_pdata *wm831x_pdata = wm831x->dev->platform_data; + struct wm831x_battery_pdata *pdata; + int ret, reg1, reg2; + + if (!wm831x_pdata || !wm831x_pdata->battery) { + dev_warn(wm831x->dev, + "No battery charger configuration\n"); + return; + } + + pdata = wm831x_pdata->battery; + + reg1 = 0; + reg2 = 0; + + if (!pdata->enable) { + dev_info(wm831x->dev, "Battery charger disabled\n"); + return; + } + + reg1 |= WM831X_CHG_ENA; + if (pdata->off_mask) + reg2 |= WM831X_CHG_OFF_MSK; + if (pdata->fast_enable) + reg1 |= WM831X_CHG_FAST; + + wm831x_battey_apply_config(wm831x, trickle_ilims, + ARRAY_SIZE(trickle_ilims), + pdata->trickle_ilim, ®2, + "trickle charge current limit", "mA"); + + wm831x_battey_apply_config(wm831x, vsels, ARRAY_SIZE(vsels), + pdata->vsel, ®2, + "target voltage", "mV"); + + wm831x_battey_apply_config(wm831x, fast_ilims, ARRAY_SIZE(fast_ilims), + pdata->fast_ilim, ®2, + "fast charge current limit", "mA"); + + wm831x_battey_apply_config(wm831x, eoc_iterms, ARRAY_SIZE(eoc_iterms), + pdata->eoc_iterm, ®1, + "end of charge current threshold", "mA"); + + wm831x_battey_apply_config(wm831x, chg_times, ARRAY_SIZE(chg_times), + pdata->timeout, ®2, + "charger timeout", "min"); + + ret = wm831x_reg_unlock(wm831x); + if (ret != 0) { + dev_err(wm831x->dev, "Failed to unlock registers: %d\n", ret); + return; + } + + ret = wm831x_set_bits(wm831x, WM831X_CHARGER_CONTROL_1, + WM831X_CHG_ENA_MASK | + WM831X_CHG_FAST_MASK | + WM831X_CHG_ITERM_MASK | + WM831X_CHG_ITERM_MASK, + reg1); + if (ret != 0) + dev_err(wm831x->dev, "Failed to set charger control 1: %d\n", + ret); + + ret = wm831x_set_bits(wm831x, WM831X_CHARGER_CONTROL_2, + WM831X_CHG_OFF_MSK | + WM831X_CHG_TIME_MASK | + WM831X_CHG_FAST_ILIM_MASK | + WM831X_CHG_TRKL_ILIM_MASK | + WM831X_CHG_VSEL_MASK, + reg2); + if (ret != 0) + dev_err(wm831x->dev, "Failed to set charger control 2: %d\n", + ret); + + wm831x_reg_lock(wm831x); +} + +static int wm831x_bat_check_status(struct wm831x *wm831x, int *status) +{ + int ret; + + ret = wm831x_reg_read(wm831x, WM831X_SYSTEM_STATUS); + if (ret < 0) + return ret; + + if (ret & WM831X_PWR_SRC_BATT) { + *status = POWER_SUPPLY_STATUS_DISCHARGING; + return 0; + } + + ret = wm831x_reg_read(wm831x, WM831X_CHARGER_STATUS); + if (ret < 0) + return ret; + + switch (ret & WM831X_CHG_STATE_MASK) { + case WM831X_CHG_STATE_OFF: + *status = POWER_SUPPLY_STATUS_NOT_CHARGING; + break; + case WM831X_CHG_STATE_TRICKLE: + case WM831X_CHG_STATE_FAST: + *status = POWER_SUPPLY_STATUS_CHARGING; + break; + + default: + *status = POWER_SUPPLY_STATUS_UNKNOWN; + break; + } + + return 0; +} + +static int wm831x_bat_check_type(struct wm831x *wm831x, int *type) +{ + int ret; + + ret = wm831x_reg_read(wm831x, WM831X_CHARGER_STATUS); + if (ret < 0) + return ret; + + switch (ret & WM831X_CHG_STATE_MASK) { + case WM831X_CHG_STATE_TRICKLE: + case WM831X_CHG_STATE_TRICKLE_OT: + *type = POWER_SUPPLY_CHARGE_TYPE_TRICKLE; + break; + case WM831X_CHG_STATE_FAST: + case WM831X_CHG_STATE_FAST_OT: + *type = POWER_SUPPLY_CHARGE_TYPE_FAST; + break; + default: + *type = POWER_SUPPLY_CHARGE_TYPE_NONE; + break; + } + + return 0; +} + +static int wm831x_bat_check_health(struct wm831x *wm831x, int *health) +{ + int ret; + + ret = wm831x_reg_read(wm831x, WM831X_CHARGER_STATUS); + if (ret < 0) + return ret; + + if (ret & WM831X_BATT_HOT_STS) { + *health = POWER_SUPPLY_HEALTH_OVERHEAT; + return 0; + } + + if (ret & WM831X_BATT_COLD_STS) { + *health = POWER_SUPPLY_HEALTH_COLD; + return 0; + } + + if (ret & WM831X_BATT_OV_STS) { + *health = POWER_SUPPLY_HEALTH_OVERVOLTAGE; + return 0; + } + + switch (ret & WM831X_CHG_STATE_MASK) { + case WM831X_CHG_STATE_TRICKLE_OT: + case WM831X_CHG_STATE_FAST_OT: + *health = POWER_SUPPLY_HEALTH_OVERHEAT; + break; + case WM831X_CHG_STATE_DEFECTIVE: + *health = POWER_SUPPLY_HEALTH_UNSPEC_FAILURE; + break; + default: + *health = POWER_SUPPLY_HEALTH_GOOD; + break; + } + + return 0; +} + +static int wm831x_bat_get_prop(struct power_supply *psy, + enum power_supply_property psp, + union power_supply_propval *val) +{ + struct wm831x_power *wm831x_power = dev_get_drvdata(psy->dev->parent); + struct wm831x *wm831x = wm831x_power->wm831x; + int ret = 0; + + switch (psp) { + case POWER_SUPPLY_PROP_STATUS: + ret = wm831x_bat_check_status(wm831x, &val->intval); + break; + case POWER_SUPPLY_PROP_ONLINE: + ret = wm831x_power_check_online(wm831x, WM831X_PWR_SRC_BATT, + val); + break; + case POWER_SUPPLY_PROP_VOLTAGE_NOW: + ret = wm831x_power_read_voltage(wm831x, WM831X_AUX_BATT, val); + break; + case POWER_SUPPLY_PROP_HEALTH: + ret = wm831x_bat_check_health(wm831x, &val->intval); + break; + case POWER_SUPPLY_PROP_CHARGE_TYPE: + ret = wm831x_bat_check_type(wm831x, &val->intval); + break; + default: + ret = -EINVAL; + break; + } + + return ret; +} + +static enum power_supply_property wm831x_bat_props[] = { + POWER_SUPPLY_PROP_STATUS, + POWER_SUPPLY_PROP_ONLINE, + POWER_SUPPLY_PROP_VOLTAGE_NOW, + POWER_SUPPLY_PROP_HEALTH, + POWER_SUPPLY_PROP_CHARGE_TYPE, +}; + +static const char *wm831x_bat_irqs[] = { + "BATT HOT", + "BATT COLD", + "BATT FAIL", + "OV", + "END", + "TO", + "MODE", + "START", +}; + +static irqreturn_t wm831x_bat_irq(int irq, void *data) +{ + struct wm831x_power *wm831x_power = data; + struct wm831x *wm831x = wm831x_power->wm831x; + + dev_dbg(wm831x->dev, "Battery status changed: %d\n", irq); + + /* The battery charger is autonomous so we don't need to do + * anything except kick user space */ + power_supply_changed(&wm831x_power->battery); + + return IRQ_HANDLED; +} + + +/********************************************************************* + * Backup supply properties + *********************************************************************/ + +static void wm831x_config_backup(struct wm831x *wm831x) +{ + struct wm831x_pdata *wm831x_pdata = wm831x->dev->platform_data; + struct wm831x_backup_pdata *pdata; + int ret, reg; + + if (!wm831x_pdata || !wm831x_pdata->backup) { + dev_warn(wm831x->dev, + "No backup battery charger configuration\n"); + return; + } + + pdata = wm831x_pdata->backup; + + reg = 0; + + if (pdata->charger_enable) + reg |= WM831X_BKUP_CHG_ENA | WM831X_BKUP_BATT_DET_ENA; + if (pdata->no_constant_voltage) + reg |= WM831X_BKUP_CHG_MODE; + + switch (pdata->vlim) { + case 2500: + break; + case 3100: + reg |= WM831X_BKUP_CHG_VLIM; + break; + default: + dev_err(wm831x->dev, "Invalid backup voltage limit %dmV\n", + pdata->vlim); + } + + switch (pdata->ilim) { + case 100: + break; + case 200: + reg |= 1; + break; + case 300: + reg |= 2; + break; + case 400: + reg |= 3; + break; + default: + dev_err(wm831x->dev, "Invalid backup current limit %duA\n", + pdata->ilim); + } + + ret = wm831x_reg_unlock(wm831x); + if (ret != 0) { + dev_err(wm831x->dev, "Failed to unlock registers: %d\n", ret); + return; + } + + ret = wm831x_set_bits(wm831x, WM831X_BACKUP_CHARGER_CONTROL, + WM831X_BKUP_CHG_ENA_MASK | + WM831X_BKUP_CHG_MODE_MASK | + WM831X_BKUP_BATT_DET_ENA_MASK | + WM831X_BKUP_CHG_VLIM_MASK | + WM831X_BKUP_CHG_ILIM_MASK, + reg); + if (ret != 0) + dev_err(wm831x->dev, + "Failed to set backup charger config: %d\n", ret); + + wm831x_reg_lock(wm831x); +} + +static int wm831x_backup_get_prop(struct power_supply *psy, + enum power_supply_property psp, + union power_supply_propval *val) +{ + struct wm831x_power *wm831x_power = dev_get_drvdata(psy->dev->parent); + struct wm831x *wm831x = wm831x_power->wm831x; + int ret = 0; + + ret = wm831x_reg_read(wm831x, WM831X_BACKUP_CHARGER_CONTROL); + if (ret < 0) + return ret; + + switch (psp) { + case POWER_SUPPLY_PROP_STATUS: + if (ret & WM831X_BKUP_CHG_STS) + val->intval = POWER_SUPPLY_STATUS_CHARGING; + else + val->intval = POWER_SUPPLY_STATUS_NOT_CHARGING; + break; + + case POWER_SUPPLY_PROP_VOLTAGE_NOW: + ret = wm831x_power_read_voltage(wm831x, WM831X_AUX_BKUP_BATT, + val); + break; + + case POWER_SUPPLY_PROP_PRESENT: + if (ret & WM831X_BKUP_CHG_STS) + val->intval = 1; + else + val->intval = 0; + break; + + default: + ret = -EINVAL; + break; + } + + return ret; +} + +static enum power_supply_property wm831x_backup_props[] = { + POWER_SUPPLY_PROP_STATUS, + POWER_SUPPLY_PROP_VOLTAGE_NOW, + POWER_SUPPLY_PROP_PRESENT, +}; + +/********************************************************************* + * Initialisation + *********************************************************************/ + +static irqreturn_t wm831x_syslo_irq(int irq, void *data) +{ + struct wm831x_power *wm831x_power = data; + struct wm831x *wm831x = wm831x_power->wm831x; + + /* Not much we can actually *do* but tell people for + * posterity, we're probably about to run out of power. */ + dev_crit(wm831x->dev, "SYSVDD under voltage\n"); + + return IRQ_HANDLED; +} + +static irqreturn_t wm831x_pwr_src_irq(int irq, void *data) +{ + struct wm831x_power *wm831x_power = data; + struct wm831x *wm831x = wm831x_power->wm831x; + + dev_dbg(wm831x->dev, "Power source changed\n"); + + /* Just notify for everything - little harm in overnotifying. + * The backup battery is not a power source while the system + * is running so skip that. + */ + power_supply_changed(&wm831x_power->battery); + power_supply_changed(&wm831x_power->usb); + power_supply_changed(&wm831x_power->wall); + + return IRQ_HANDLED; +} + +static __devinit int wm831x_power_probe(struct platform_device *pdev) +{ + struct wm831x *wm831x = dev_get_drvdata(pdev->dev.parent); + struct wm831x_power *power; + struct power_supply *usb; + struct power_supply *battery; + struct power_supply *wall; + struct power_supply *backup; + int ret, irq, i; + + power = kzalloc(sizeof(struct wm831x_power), GFP_KERNEL); + if (power == NULL) + return -ENOMEM; + + power->wm831x = wm831x; + platform_set_drvdata(pdev, power); + + usb = &power->usb; + battery = &power->battery; + wall = &power->wall; + backup = &power->backup; + + /* We ignore configuration failures since we can still read back + * the status without enabling either of the chargers. + */ + wm831x_config_battery(wm831x); + wm831x_config_backup(wm831x); + + wall->name = "wm831x-wall"; + wall->type = POWER_SUPPLY_TYPE_MAINS; + wall->properties = wm831x_wall_props; + wall->num_properties = ARRAY_SIZE(wm831x_wall_props); + wall->get_property = wm831x_wall_get_prop; + ret = power_supply_register(&pdev->dev, wall); + if (ret) + goto err_kmalloc; + + battery->name = "wm831x-battery"; + battery->properties = wm831x_bat_props; + battery->num_properties = ARRAY_SIZE(wm831x_bat_props); + battery->get_property = wm831x_bat_get_prop; + battery->use_for_apm = 1; + ret = power_supply_register(&pdev->dev, battery); + if (ret) + goto err_wall; + + usb->name = "wm831x-usb", + usb->type = POWER_SUPPLY_TYPE_USB; + usb->properties = wm831x_usb_props; + usb->num_properties = ARRAY_SIZE(wm831x_usb_props); + usb->get_property = wm831x_usb_get_prop; + ret = power_supply_register(&pdev->dev, usb); + if (ret) + goto err_battery; + + backup->name = "wm831x-backup"; + backup->type = POWER_SUPPLY_TYPE_BATTERY; + backup->properties = wm831x_backup_props; + backup->num_properties = ARRAY_SIZE(wm831x_backup_props); + backup->get_property = wm831x_backup_get_prop; + ret = power_supply_register(&pdev->dev, backup); + if (ret) + goto err_usb; + + irq = platform_get_irq_byname(pdev, "SYSLO"); + ret = wm831x_request_irq(wm831x, irq, wm831x_syslo_irq, + IRQF_TRIGGER_RISING, "SYSLO", + power); + if (ret != 0) { + dev_err(&pdev->dev, "Failed to request SYSLO IRQ %d: %d\n", + irq, ret); + goto err_backup; + } + + irq = platform_get_irq_byname(pdev, "PWR SRC"); + ret = wm831x_request_irq(wm831x, irq, wm831x_pwr_src_irq, + IRQF_TRIGGER_RISING, "Power source", + power); + if (ret != 0) { + dev_err(&pdev->dev, "Failed to request PWR SRC IRQ %d: %d\n", + irq, ret); + goto err_syslo; + } + + for (i = 0; i < ARRAY_SIZE(wm831x_bat_irqs); i++) { + irq = platform_get_irq_byname(pdev, wm831x_bat_irqs[i]); + ret = wm831x_request_irq(wm831x, irq, wm831x_bat_irq, + IRQF_TRIGGER_RISING, + wm831x_bat_irqs[i], + power); + if (ret != 0) { + dev_err(&pdev->dev, + "Failed to request %s IRQ %d: %d\n", + wm831x_bat_irqs[i], irq, ret); + goto err_bat_irq; + } + } + + return ret; + +err_bat_irq: + for (; i >= 0; i--) { + irq = platform_get_irq_byname(pdev, wm831x_bat_irqs[i]); + wm831x_free_irq(wm831x, irq, power); + } + irq = platform_get_irq_byname(pdev, "PWR SRC"); + wm831x_free_irq(wm831x, irq, power); +err_syslo: + irq = platform_get_irq_byname(pdev, "SYSLO"); + wm831x_free_irq(wm831x, irq, power); +err_backup: + power_supply_unregister(backup); +err_usb: + power_supply_unregister(usb); +err_battery: + power_supply_unregister(battery); +err_wall: + power_supply_unregister(wall); +err_kmalloc: + kfree(power); + return ret; +} + +static __devexit int wm831x_power_remove(struct platform_device *pdev) +{ + struct wm831x_power *wm831x_power = platform_get_drvdata(pdev); + struct wm831x *wm831x = wm831x_power->wm831x; + int irq, i; + + for (i = 0; i < ARRAY_SIZE(wm831x_bat_irqs); i++) { + irq = platform_get_irq_byname(pdev, wm831x_bat_irqs[i]); + wm831x_free_irq(wm831x, irq, wm831x_power); + } + + irq = platform_get_irq_byname(pdev, "PWR SRC"); + wm831x_free_irq(wm831x, irq, wm831x_power); + + irq = platform_get_irq_byname(pdev, "SYSLO"); + wm831x_free_irq(wm831x, irq, wm831x_power); + + power_supply_unregister(&wm831x_power->backup); + power_supply_unregister(&wm831x_power->battery); + power_supply_unregister(&wm831x_power->wall); + power_supply_unregister(&wm831x_power->usb); + return 0; +} + +static struct platform_driver wm831x_power_driver = { + .probe = wm831x_power_probe, + .remove = __devexit_p(wm831x_power_remove), + .driver = { + .name = "wm831x-power", + }, +}; + +static int __init wm831x_power_init(void) +{ + return platform_driver_register(&wm831x_power_driver); +} +module_init(wm831x_power_init); + +static void __exit wm831x_power_exit(void) +{ + platform_driver_unregister(&wm831x_power_driver); +} +module_exit(wm831x_power_exit); + +MODULE_DESCRIPTION("Power supply driver for WM831x PMICs"); +MODULE_AUTHOR("Mark Brown "); +MODULE_LICENSE("GPL"); +MODULE_ALIAS("platform:wm831x-power"); diff --git a/include/linux/mfd/wm831x/pmu.h b/include/linux/mfd/wm831x/pmu.h new file mode 100644 index 00000000000..b18cbb027bc --- /dev/null +++ b/include/linux/mfd/wm831x/pmu.h @@ -0,0 +1,189 @@ +/* + * include/linux/mfd/wm831x/pmu.h -- PMU for WM831x + * + * Copyright 2009 Wolfson Microelectronics PLC. + * + * Author: Mark Brown + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + */ + +#ifndef __MFD_WM831X_PMU_H__ +#define __MFD_WM831X_PMU_H__ + +/* + * R16387 (0x4003) - Power State + */ +#define WM831X_CHIP_ON 0x8000 /* CHIP_ON */ +#define WM831X_CHIP_ON_MASK 0x8000 /* CHIP_ON */ +#define WM831X_CHIP_ON_SHIFT 15 /* CHIP_ON */ +#define WM831X_CHIP_ON_WIDTH 1 /* CHIP_ON */ +#define WM831X_CHIP_SLP 0x4000 /* CHIP_SLP */ +#define WM831X_CHIP_SLP_MASK 0x4000 /* CHIP_SLP */ +#define WM831X_CHIP_SLP_SHIFT 14 /* CHIP_SLP */ +#define WM831X_CHIP_SLP_WIDTH 1 /* CHIP_SLP */ +#define WM831X_REF_LP 0x1000 /* REF_LP */ +#define WM831X_REF_LP_MASK 0x1000 /* REF_LP */ +#define WM831X_REF_LP_SHIFT 12 /* REF_LP */ +#define WM831X_REF_LP_WIDTH 1 /* REF_LP */ +#define WM831X_PWRSTATE_DLY_MASK 0x0C00 /* PWRSTATE_DLY - [11:10] */ +#define WM831X_PWRSTATE_DLY_SHIFT 10 /* PWRSTATE_DLY - [11:10] */ +#define WM831X_PWRSTATE_DLY_WIDTH 2 /* PWRSTATE_DLY - [11:10] */ +#define WM831X_SWRST_DLY 0x0200 /* SWRST_DLY */ +#define WM831X_SWRST_DLY_MASK 0x0200 /* SWRST_DLY */ +#define WM831X_SWRST_DLY_SHIFT 9 /* SWRST_DLY */ +#define WM831X_SWRST_DLY_WIDTH 1 /* SWRST_DLY */ +#define WM831X_USB100MA_STARTUP_MASK 0x0030 /* USB100MA_STARTUP - [5:4] */ +#define WM831X_USB100MA_STARTUP_SHIFT 4 /* USB100MA_STARTUP - [5:4] */ +#define WM831X_USB100MA_STARTUP_WIDTH 2 /* USB100MA_STARTUP - [5:4] */ +#define WM831X_USB_CURR_STS 0x0008 /* USB_CURR_STS */ +#define WM831X_USB_CURR_STS_MASK 0x0008 /* USB_CURR_STS */ +#define WM831X_USB_CURR_STS_SHIFT 3 /* USB_CURR_STS */ +#define WM831X_USB_CURR_STS_WIDTH 1 /* USB_CURR_STS */ +#define WM831X_USB_ILIM_MASK 0x0007 /* USB_ILIM - [2:0] */ +#define WM831X_USB_ILIM_SHIFT 0 /* USB_ILIM - [2:0] */ +#define WM831X_USB_ILIM_WIDTH 3 /* USB_ILIM - [2:0] */ + +/* + * R16397 (0x400D) - System Status + */ +#define WM831X_THW_STS 0x8000 /* THW_STS */ +#define WM831X_THW_STS_MASK 0x8000 /* THW_STS */ +#define WM831X_THW_STS_SHIFT 15 /* THW_STS */ +#define WM831X_THW_STS_WIDTH 1 /* THW_STS */ +#define WM831X_PWR_SRC_BATT 0x0400 /* PWR_SRC_BATT */ +#define WM831X_PWR_SRC_BATT_MASK 0x0400 /* PWR_SRC_BATT */ +#define WM831X_PWR_SRC_BATT_SHIFT 10 /* PWR_SRC_BATT */ +#define WM831X_PWR_SRC_BATT_WIDTH 1 /* PWR_SRC_BATT */ +#define WM831X_PWR_WALL 0x0200 /* PWR_WALL */ +#define WM831X_PWR_WALL_MASK 0x0200 /* PWR_WALL */ +#define WM831X_PWR_WALL_SHIFT 9 /* PWR_WALL */ +#define WM831X_PWR_WALL_WIDTH 1 /* PWR_WALL */ +#define WM831X_PWR_USB 0x0100 /* PWR_USB */ +#define WM831X_PWR_USB_MASK 0x0100 /* PWR_USB */ +#define WM831X_PWR_USB_SHIFT 8 /* PWR_USB */ +#define WM831X_PWR_USB_WIDTH 1 /* PWR_USB */ +#define WM831X_MAIN_STATE_MASK 0x001F /* MAIN_STATE - [4:0] */ +#define WM831X_MAIN_STATE_SHIFT 0 /* MAIN_STATE - [4:0] */ +#define WM831X_MAIN_STATE_WIDTH 5 /* MAIN_STATE - [4:0] */ + +/* + * R16456 (0x4048) - Charger Control 1 + */ +#define WM831X_CHG_ENA 0x8000 /* CHG_ENA */ +#define WM831X_CHG_ENA_MASK 0x8000 /* CHG_ENA */ +#define WM831X_CHG_ENA_SHIFT 15 /* CHG_ENA */ +#define WM831X_CHG_ENA_WIDTH 1 /* CHG_ENA */ +#define WM831X_CHG_FRC 0x4000 /* CHG_FRC */ +#define WM831X_CHG_FRC_MASK 0x4000 /* CHG_FRC */ +#define WM831X_CHG_FRC_SHIFT 14 /* CHG_FRC */ +#define WM831X_CHG_FRC_WIDTH 1 /* CHG_FRC */ +#define WM831X_CHG_ITERM_MASK 0x1C00 /* CHG_ITERM - [12:10] */ +#define WM831X_CHG_ITERM_SHIFT 10 /* CHG_ITERM - [12:10] */ +#define WM831X_CHG_ITERM_WIDTH 3 /* CHG_ITERM - [12:10] */ +#define WM831X_CHG_FAST 0x0020 /* CHG_FAST */ +#define WM831X_CHG_FAST_MASK 0x0020 /* CHG_FAST */ +#define WM831X_CHG_FAST_SHIFT 5 /* CHG_FAST */ +#define WM831X_CHG_FAST_WIDTH 1 /* CHG_FAST */ +#define WM831X_CHG_IMON_ENA 0x0002 /* CHG_IMON_ENA */ +#define WM831X_CHG_IMON_ENA_MASK 0x0002 /* CHG_IMON_ENA */ +#define WM831X_CHG_IMON_ENA_SHIFT 1 /* CHG_IMON_ENA */ +#define WM831X_CHG_IMON_ENA_WIDTH 1 /* CHG_IMON_ENA */ +#define WM831X_CHG_CHIP_TEMP_MON 0x0001 /* CHG_CHIP_TEMP_MON */ +#define WM831X_CHG_CHIP_TEMP_MON_MASK 0x0001 /* CHG_CHIP_TEMP_MON */ +#define WM831X_CHG_CHIP_TEMP_MON_SHIFT 0 /* CHG_CHIP_TEMP_MON */ +#define WM831X_CHG_CHIP_TEMP_MON_WIDTH 1 /* CHG_CHIP_TEMP_MON */ + +/* + * R16457 (0x4049) - Charger Control 2 + */ +#define WM831X_CHG_OFF_MSK 0x4000 /* CHG_OFF_MSK */ +#define WM831X_CHG_OFF_MSK_MASK 0x4000 /* CHG_OFF_MSK */ +#define WM831X_CHG_OFF_MSK_SHIFT 14 /* CHG_OFF_MSK */ +#define WM831X_CHG_OFF_MSK_WIDTH 1 /* CHG_OFF_MSK */ +#define WM831X_CHG_TIME_MASK 0x0F00 /* CHG_TIME - [11:8] */ +#define WM831X_CHG_TIME_SHIFT 8 /* CHG_TIME - [11:8] */ +#define WM831X_CHG_TIME_WIDTH 4 /* CHG_TIME - [11:8] */ +#define WM831X_CHG_TRKL_ILIM_MASK 0x00C0 /* CHG_TRKL_ILIM - [7:6] */ +#define WM831X_CHG_TRKL_ILIM_SHIFT 6 /* CHG_TRKL_ILIM - [7:6] */ +#define WM831X_CHG_TRKL_ILIM_WIDTH 2 /* CHG_TRKL_ILIM - [7:6] */ +#define WM831X_CHG_VSEL_MASK 0x0030 /* CHG_VSEL - [5:4] */ +#define WM831X_CHG_VSEL_SHIFT 4 /* CHG_VSEL - [5:4] */ +#define WM831X_CHG_VSEL_WIDTH 2 /* CHG_VSEL - [5:4] */ +#define WM831X_CHG_FAST_ILIM_MASK 0x000F /* CHG_FAST_ILIM - [3:0] */ +#define WM831X_CHG_FAST_ILIM_SHIFT 0 /* CHG_FAST_ILIM - [3:0] */ +#define WM831X_CHG_FAST_ILIM_WIDTH 4 /* CHG_FAST_ILIM - [3:0] */ + +/* + * R16458 (0x404A) - Charger Status + */ +#define WM831X_BATT_OV_STS 0x8000 /* BATT_OV_STS */ +#define WM831X_BATT_OV_STS_MASK 0x8000 /* BATT_OV_STS */ +#define WM831X_BATT_OV_STS_SHIFT 15 /* BATT_OV_STS */ +#define WM831X_BATT_OV_STS_WIDTH 1 /* BATT_OV_STS */ +#define WM831X_CHG_STATE_MASK 0x7000 /* CHG_STATE - [14:12] */ +#define WM831X_CHG_STATE_SHIFT 12 /* CHG_STATE - [14:12] */ +#define WM831X_CHG_STATE_WIDTH 3 /* CHG_STATE - [14:12] */ +#define WM831X_BATT_HOT_STS 0x0800 /* BATT_HOT_STS */ +#define WM831X_BATT_HOT_STS_MASK 0x0800 /* BATT_HOT_STS */ +#define WM831X_BATT_HOT_STS_SHIFT 11 /* BATT_HOT_STS */ +#define WM831X_BATT_HOT_STS_WIDTH 1 /* BATT_HOT_STS */ +#define WM831X_BATT_COLD_STS 0x0400 /* BATT_COLD_STS */ +#define WM831X_BATT_COLD_STS_MASK 0x0400 /* BATT_COLD_STS */ +#define WM831X_BATT_COLD_STS_SHIFT 10 /* BATT_COLD_STS */ +#define WM831X_BATT_COLD_STS_WIDTH 1 /* BATT_COLD_STS */ +#define WM831X_CHG_TOPOFF 0x0200 /* CHG_TOPOFF */ +#define WM831X_CHG_TOPOFF_MASK 0x0200 /* CHG_TOPOFF */ +#define WM831X_CHG_TOPOFF_SHIFT 9 /* CHG_TOPOFF */ +#define WM831X_CHG_TOPOFF_WIDTH 1 /* CHG_TOPOFF */ +#define WM831X_CHG_ACTIVE 0x0100 /* CHG_ACTIVE */ +#define WM831X_CHG_ACTIVE_MASK 0x0100 /* CHG_ACTIVE */ +#define WM831X_CHG_ACTIVE_SHIFT 8 /* CHG_ACTIVE */ +#define WM831X_CHG_ACTIVE_WIDTH 1 /* CHG_ACTIVE */ +#define WM831X_CHG_TIME_ELAPSED_MASK 0x00FF /* CHG_TIME_ELAPSED - [7:0] */ +#define WM831X_CHG_TIME_ELAPSED_SHIFT 0 /* CHG_TIME_ELAPSED - [7:0] */ +#define WM831X_CHG_TIME_ELAPSED_WIDTH 8 /* CHG_TIME_ELAPSED - [7:0] */ + +#define WM831X_CHG_STATE_OFF (0 << WM831X_CHG_STATE_SHIFT) +#define WM831X_CHG_STATE_TRICKLE (1 << WM831X_CHG_STATE_SHIFT) +#define WM831X_CHG_STATE_FAST (2 << WM831X_CHG_STATE_SHIFT) +#define WM831X_CHG_STATE_TRICKLE_OT (3 << WM831X_CHG_STATE_SHIFT) +#define WM831X_CHG_STATE_FAST_OT (4 << WM831X_CHG_STATE_SHIFT) +#define WM831X_CHG_STATE_DEFECTIVE (5 << WM831X_CHG_STATE_SHIFT) + +/* + * R16459 (0x404B) - Backup Charger Control + */ +#define WM831X_BKUP_CHG_ENA 0x8000 /* BKUP_CHG_ENA */ +#define WM831X_BKUP_CHG_ENA_MASK 0x8000 /* BKUP_CHG_ENA */ +#define WM831X_BKUP_CHG_ENA_SHIFT 15 /* BKUP_CHG_ENA */ +#define WM831X_BKUP_CHG_ENA_WIDTH 1 /* BKUP_CHG_ENA */ +#define WM831X_BKUP_CHG_STS 0x4000 /* BKUP_CHG_STS */ +#define WM831X_BKUP_CHG_STS_MASK 0x4000 /* BKUP_CHG_STS */ +#define WM831X_BKUP_CHG_STS_SHIFT 14 /* BKUP_CHG_STS */ +#define WM831X_BKUP_CHG_STS_WIDTH 1 /* BKUP_CHG_STS */ +#define WM831X_BKUP_CHG_MODE 0x1000 /* BKUP_CHG_MODE */ +#define WM831X_BKUP_CHG_MODE_MASK 0x1000 /* BKUP_CHG_MODE */ +#define WM831X_BKUP_CHG_MODE_SHIFT 12 /* BKUP_CHG_MODE */ +#define WM831X_BKUP_CHG_MODE_WIDTH 1 /* BKUP_CHG_MODE */ +#define WM831X_BKUP_BATT_DET_ENA 0x0800 /* BKUP_BATT_DET_ENA */ +#define WM831X_BKUP_BATT_DET_ENA_MASK 0x0800 /* BKUP_BATT_DET_ENA */ +#define WM831X_BKUP_BATT_DET_ENA_SHIFT 11 /* BKUP_BATT_DET_ENA */ +#define WM831X_BKUP_BATT_DET_ENA_WIDTH 1 /* BKUP_BATT_DET_ENA */ +#define WM831X_BKUP_BATT_STS 0x0400 /* BKUP_BATT_STS */ +#define WM831X_BKUP_BATT_STS_MASK 0x0400 /* BKUP_BATT_STS */ +#define WM831X_BKUP_BATT_STS_SHIFT 10 /* BKUP_BATT_STS */ +#define WM831X_BKUP_BATT_STS_WIDTH 1 /* BKUP_BATT_STS */ +#define WM831X_BKUP_CHG_VLIM 0x0010 /* BKUP_CHG_VLIM */ +#define WM831X_BKUP_CHG_VLIM_MASK 0x0010 /* BKUP_CHG_VLIM */ +#define WM831X_BKUP_CHG_VLIM_SHIFT 4 /* BKUP_CHG_VLIM */ +#define WM831X_BKUP_CHG_VLIM_WIDTH 1 /* BKUP_CHG_VLIM */ +#define WM831X_BKUP_CHG_ILIM_MASK 0x0003 /* BKUP_CHG_ILIM - [1:0] */ +#define WM831X_BKUP_CHG_ILIM_SHIFT 0 /* BKUP_CHG_ILIM - [1:0] */ +#define WM831X_BKUP_CHG_ILIM_WIDTH 2 /* BKUP_CHG_ILIM - [1:0] */ + +#endif -- cgit v1.2.3-70-g09d2 From a3710fd1ee8cd542c5de63cf2c39f8912031f867 Mon Sep 17 00:00:00 2001 From: Theodore Ts'o Date: Thu, 17 Sep 2009 08:50:18 -0400 Subject: ext4: fix tracepoint format string warnings Unlike on some other architectures ino_t is an unsigned int on s390. So add an explicit cast to avoid lots of compile warnings: In file included from include/trace/ftrace.h:285, from include/trace/define_trace.h:61, from include/trace/events/ext4.h:711, from fs/ext4/super.c:50: include/trace/events/ext4.h: In function 'ftrace_raw_output_ext4_free_inode': include/trace/events/ext4.h:12: warning: format '%lu' expects type 'long unsigned int', but argument 4 has type 'ino_t' Signed-off-by: Heiko Carstens Signed-off-by: "Theodore Ts'o" --- include/trace/events/ext4.h | 75 ++++++++++++++++++++++++--------------------- include/trace/events/jbd2.h | 2 +- 2 files changed, 41 insertions(+), 36 deletions(-) (limited to 'include') diff --git a/include/trace/events/ext4.h b/include/trace/events/ext4.h index dd43399288e..68b53c7ef8a 100644 --- a/include/trace/events/ext4.h +++ b/include/trace/events/ext4.h @@ -33,8 +33,8 @@ TRACE_EVENT(ext4_free_inode, ), TP_printk("dev %s ino %lu mode %d uid %u gid %u blocks %llu", - jbd2_dev_to_name(__entry->dev), __entry->ino, __entry->mode, - __entry->uid, __entry->gid, + jbd2_dev_to_name(__entry->dev), (unsigned long) __entry->ino, + __entry->mode, __entry->uid, __entry->gid, (unsigned long long) __entry->blocks) ); @@ -56,7 +56,8 @@ TRACE_EVENT(ext4_request_inode, ), TP_printk("dev %s dir %lu mode %d", - jbd2_dev_to_name(__entry->dev), __entry->dir, __entry->mode) + jbd2_dev_to_name(__entry->dev), (unsigned long) __entry->dir, + __entry->mode) ); TRACE_EVENT(ext4_allocate_inode, @@ -79,7 +80,8 @@ TRACE_EVENT(ext4_allocate_inode, ), TP_printk("dev %s ino %lu dir %lu mode %d", - jbd2_dev_to_name(__entry->dev), __entry->ino, __entry->dir, __entry->mode) + jbd2_dev_to_name(__entry->dev), (unsigned long) __entry->ino, + (unsigned long) __entry->dir, __entry->mode) ); TRACE_EVENT(ext4_write_begin, @@ -106,8 +108,8 @@ TRACE_EVENT(ext4_write_begin, ), TP_printk("dev %s ino %lu pos %llu len %u flags %u", - jbd2_dev_to_name(__entry->dev), __entry->ino, __entry->pos, __entry->len, - __entry->flags) + jbd2_dev_to_name(__entry->dev), (unsigned long) __entry->ino, + __entry->pos, __entry->len, __entry->flags) ); TRACE_EVENT(ext4_ordered_write_end, @@ -133,8 +135,8 @@ TRACE_EVENT(ext4_ordered_write_end, ), TP_printk("dev %s ino %lu pos %llu len %u copied %u", - jbd2_dev_to_name(__entry->dev), __entry->ino, __entry->pos, __entry->len, - __entry->copied) + jbd2_dev_to_name(__entry->dev), (unsigned long) __entry->ino, + __entry->pos, __entry->len, __entry->copied) ); TRACE_EVENT(ext4_writeback_write_end, @@ -160,8 +162,8 @@ TRACE_EVENT(ext4_writeback_write_end, ), TP_printk("dev %s ino %lu pos %llu len %u copied %u", - jbd2_dev_to_name(__entry->dev), __entry->ino, __entry->pos, __entry->len, - __entry->copied) + jbd2_dev_to_name(__entry->dev), (unsigned long) __entry->ino, + __entry->pos, __entry->len, __entry->copied) ); TRACE_EVENT(ext4_journalled_write_end, @@ -186,8 +188,8 @@ TRACE_EVENT(ext4_journalled_write_end, ), TP_printk("dev %s ino %lu pos %llu len %u copied %u", - jbd2_dev_to_name(__entry->dev), __entry->ino, __entry->pos, __entry->len, - __entry->copied) + jbd2_dev_to_name(__entry->dev), (unsigned long) __entry->ino, + __entry->pos, __entry->len, __entry->copied) ); TRACE_EVENT(ext4_writepage, @@ -209,7 +211,8 @@ TRACE_EVENT(ext4_writepage, ), TP_printk("dev %s ino %lu page_index %lu", - jbd2_dev_to_name(__entry->dev), __entry->ino, __entry->index) + jbd2_dev_to_name(__entry->dev), (unsigned long) __entry->ino, + __entry->index) ); TRACE_EVENT(ext4_da_writepages, @@ -244,7 +247,8 @@ TRACE_EVENT(ext4_da_writepages, ), TP_printk("dev %s ino %lu nr_to_write %ld pages_skipped %ld range_start %llu range_end %llu nonblocking %d for_kupdate %d for_reclaim %d range_cyclic %d", - jbd2_dev_to_name(__entry->dev), __entry->ino, __entry->nr_to_write, + jbd2_dev_to_name(__entry->dev), + (unsigned long) __entry->ino, __entry->nr_to_write, __entry->pages_skipped, __entry->range_start, __entry->range_end, __entry->nonblocking, __entry->for_kupdate, __entry->for_reclaim, @@ -279,7 +283,7 @@ TRACE_EVENT(ext4_da_write_pages, ), TP_printk("dev %s ino %lu b_blocknr %llu b_size %u b_state 0x%04x first_page %lu io_done %d pages_written %d", - jbd2_dev_to_name(__entry->dev), __entry->ino, + jbd2_dev_to_name(__entry->dev), (unsigned long) __entry->ino, __entry->b_blocknr, __entry->b_size, __entry->b_state, __entry->first_page, __entry->io_done, __entry->pages_written) @@ -314,7 +318,8 @@ TRACE_EVENT(ext4_da_writepages_result, ), TP_printk("dev %s ino %lu ret %d pages_written %d pages_skipped %ld congestion %d more_io %d no_nrwrite_index_update %d", - jbd2_dev_to_name(__entry->dev), __entry->ino, __entry->ret, + jbd2_dev_to_name(__entry->dev), + (unsigned long) __entry->ino, __entry->ret, __entry->pages_written, __entry->pages_skipped, __entry->encountered_congestion, __entry->more_io, __entry->no_nrwrite_index_update) @@ -343,8 +348,8 @@ TRACE_EVENT(ext4_da_write_begin, ), TP_printk("dev %s ino %lu pos %llu len %u flags %u", - jbd2_dev_to_name(__entry->dev), __entry->ino, __entry->pos, __entry->len, - __entry->flags) + jbd2_dev_to_name(__entry->dev), (unsigned long) __entry->ino, + __entry->pos, __entry->len, __entry->flags) ); TRACE_EVENT(ext4_da_write_end, @@ -370,8 +375,8 @@ TRACE_EVENT(ext4_da_write_end, ), TP_printk("dev %s ino %lu pos %llu len %u copied %u", - jbd2_dev_to_name(__entry->dev), __entry->ino, __entry->pos, __entry->len, - __entry->copied) + jbd2_dev_to_name(__entry->dev), (unsigned long) __entry->ino, + __entry->pos, __entry->len, __entry->copied) ); TRACE_EVENT(ext4_discard_blocks, @@ -421,8 +426,8 @@ TRACE_EVENT(ext4_mb_new_inode_pa, ), TP_printk("dev %s ino %lu pstart %llu len %u lstart %llu", - jbd2_dev_to_name(__entry->dev), __entry->ino, __entry->pa_pstart, - __entry->pa_len, __entry->pa_lstart) + jbd2_dev_to_name(__entry->dev), (unsigned long) __entry->ino, + __entry->pa_pstart, __entry->pa_len, __entry->pa_lstart) ); TRACE_EVENT(ext4_mb_new_group_pa, @@ -449,8 +454,8 @@ TRACE_EVENT(ext4_mb_new_group_pa, ), TP_printk("dev %s ino %lu pstart %llu len %u lstart %llu", - jbd2_dev_to_name(__entry->dev), __entry->ino, __entry->pa_pstart, - __entry->pa_len, __entry->pa_lstart) + jbd2_dev_to_name(__entry->dev), (unsigned long) __entry->ino, + __entry->pa_pstart, __entry->pa_len, __entry->pa_lstart) ); TRACE_EVENT(ext4_mb_release_inode_pa, @@ -476,8 +481,8 @@ TRACE_EVENT(ext4_mb_release_inode_pa, ), TP_printk("dev %s ino %lu block %llu count %u", - jbd2_dev_to_name(__entry->dev), __entry->ino, __entry->block, - __entry->count) + jbd2_dev_to_name(__entry->dev), (unsigned long) __entry->ino, + __entry->block, __entry->count) ); TRACE_EVENT(ext4_mb_release_group_pa, @@ -522,7 +527,7 @@ TRACE_EVENT(ext4_discard_preallocations, ), TP_printk("dev %s ino %lu", - jbd2_dev_to_name(__entry->dev), __entry->ino) + jbd2_dev_to_name(__entry->dev), (unsigned long) __entry->ino) ); TRACE_EVENT(ext4_mb_discard_preallocations, @@ -577,8 +582,8 @@ TRACE_EVENT(ext4_request_blocks, ), TP_printk("dev %s ino %lu flags %u len %u lblk %llu goal %llu lleft %llu lright %llu pleft %llu pright %llu ", - jbd2_dev_to_name(__entry->dev), __entry->ino, __entry->flags, - __entry->len, + jbd2_dev_to_name(__entry->dev), (unsigned long) __entry->ino, + __entry->flags, __entry->len, (unsigned long long) __entry->logical, (unsigned long long) __entry->goal, (unsigned long long) __entry->lleft, @@ -621,8 +626,8 @@ TRACE_EVENT(ext4_allocate_blocks, ), TP_printk("dev %s ino %lu flags %u len %u block %llu lblk %llu goal %llu lleft %llu lright %llu pleft %llu pright %llu ", - jbd2_dev_to_name(__entry->dev), __entry->ino, __entry->flags, - __entry->len, __entry->block, + jbd2_dev_to_name(__entry->dev), (unsigned long) __entry->ino, + __entry->flags, __entry->len, __entry->block, (unsigned long long) __entry->logical, (unsigned long long) __entry->goal, (unsigned long long) __entry->lleft, @@ -655,8 +660,8 @@ TRACE_EVENT(ext4_free_blocks, ), TP_printk("dev %s ino %lu block %llu count %lu metadata %d", - jbd2_dev_to_name(__entry->dev), __entry->ino, __entry->block, - __entry->count, __entry->metadata) + jbd2_dev_to_name(__entry->dev), (unsigned long) __entry->ino, + __entry->block, __entry->count, __entry->metadata) ); TRACE_EVENT(ext4_sync_file, @@ -679,8 +684,8 @@ TRACE_EVENT(ext4_sync_file, ), TP_printk("dev %s ino %ld parent %ld datasync %d ", - jbd2_dev_to_name(__entry->dev), __entry->ino, __entry->parent, - __entry->datasync) + jbd2_dev_to_name(__entry->dev), (unsigned long) __entry->ino, + (unsigned long) __entry->parent, __entry->datasync) ); TRACE_EVENT(ext4_sync_fs, diff --git a/include/trace/events/jbd2.h b/include/trace/events/jbd2.h index 10813fa0c8d..b851f0b4701 100644 --- a/include/trace/events/jbd2.h +++ b/include/trace/events/jbd2.h @@ -159,7 +159,7 @@ TRACE_EVENT(jbd2_submit_inode_data, ), TP_printk("dev %s ino %lu", - jbd2_dev_to_name(__entry->dev), __entry->ino) + jbd2_dev_to_name(__entry->dev), (unsigned long) __entry->ino) ); #endif /* _TRACE_JBD2_H */ -- cgit v1.2.3-70-g09d2 From f0fda0a47b26aba986fe65897891956c1792b526 Mon Sep 17 00:00:00 2001 From: Zhao Yakui Date: Thu, 3 Sep 2009 09:33:48 +0800 Subject: drm/kms: add a function that can add the mode for the output device without EDID Add a function that can be used to add the default mode for the output device without EDID. It will add the default mode that meets with the requirements of given hdisplay/vdisplay limit. Signed-off-by: Zhao Yakui Signed-off-by: Dave Airlie --- drivers/gpu/drm/drm_edid.c | 46 ++++++++++++++++++++++++++++++++++++++++++++++ include/drm/drm_crtc.h | 2 ++ 2 files changed, 48 insertions(+) (limited to 'include') diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c index f84a98f2e37..e2d5f515f7b 100644 --- a/drivers/gpu/drm/drm_edid.c +++ b/drivers/gpu/drm/drm_edid.c @@ -1215,3 +1215,49 @@ int drm_add_edid_modes(struct drm_connector *connector, struct edid *edid) return num_modes; } EXPORT_SYMBOL(drm_add_edid_modes); + +/** + * drm_add_modes_noedid - add modes for the connectors without EDID + * @connector: connector we're probing + * @hdisplay: the horizontal display limit + * @vdisplay: the vertical display limit + * + * Add the specified modes to the connector's mode list. Only when the + * hdisplay/vdisplay is not beyond the given limit, it will be added. + * + * Return number of modes added or 0 if we couldn't find any. + */ +int drm_add_modes_noedid(struct drm_connector *connector, + int hdisplay, int vdisplay) +{ + int i, count, num_modes = 0; + struct drm_display_mode *mode, *ptr; + struct drm_device *dev = connector->dev; + + count = sizeof(drm_dmt_modes) / sizeof(struct drm_display_mode); + if (hdisplay < 0) + hdisplay = 0; + if (vdisplay < 0) + vdisplay = 0; + + for (i = 0; i < count; i++) { + ptr = &drm_dmt_modes[i]; + if (hdisplay && vdisplay) { + /* + * Only when two are valid, they will be used to check + * whether the mode should be added to the mode list of + * the connector. + */ + if (ptr->hdisplay > hdisplay || + ptr->vdisplay > vdisplay) + continue; + } + mode = drm_mode_duplicate(dev, ptr); + if (mode) { + drm_mode_probed_add(connector, mode); + num_modes++; + } + } + return num_modes; +} +EXPORT_SYMBOL(drm_add_modes_noedid); diff --git a/include/drm/drm_crtc.h b/include/drm/drm_crtc.h index b0427a70fcb..ae1e9e16695 100644 --- a/include/drm/drm_crtc.h +++ b/include/drm/drm_crtc.h @@ -750,4 +750,6 @@ extern struct drm_display_mode *drm_cvt_mode(struct drm_device *dev, extern struct drm_display_mode *drm_gtf_mode(struct drm_device *dev, int hdisplay, int vdisplay, int vrefresh, bool interlaced, int margins); +extern int drm_add_modes_noedid(struct drm_connector *connector, + int hdisplay, int vdisplay); #endif /* __DRM_CRTC_H__ */ -- cgit v1.2.3-70-g09d2 From c746b5519a88b8803d43946ad06326ece4829116 Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Sat, 5 Sep 2009 14:09:21 +0100 Subject: leds: Add WM831x status LED driver The WM831x devices feature two software controlled status LEDs with hardware assisted blinking. The device can also autonomously control the LEDs based on a selection of sources. This can be configured at boot time using either platform data or the chip OTP. A sysfs file in the style of that for triggers allowing the control source to be configured at run time. Triggers can't be used here since they can't depend on the implementation details of a specific LED type. Signed-off-by: Mark Brown Signed-off-by: Richard Purdie --- drivers/leds/Kconfig | 7 + drivers/leds/Makefile | 1 + drivers/leds/leds-wm831x-status.c | 341 ++++++++++++++++++++++++++++++++++++++ include/linux/mfd/wm831x/status.h | 34 ++++ 4 files changed, 383 insertions(+) create mode 100644 drivers/leds/leds-wm831x-status.c create mode 100644 include/linux/mfd/wm831x/status.h (limited to 'include') diff --git a/drivers/leds/Kconfig b/drivers/leds/Kconfig index 7c8e7122aaa..edfd4e302be 100644 --- a/drivers/leds/Kconfig +++ b/drivers/leds/Kconfig @@ -195,6 +195,13 @@ config LEDS_PCA955X LED driver chips accessed via the I2C bus. Supported devices include PCA9550, PCA9551, PCA9552, and PCA9553. +config LEDS_WM831X_STATUS + tristate "LED support for status LEDs on WM831x PMICs" + depends on LEDS_CLASS && MFD_WM831X + help + This option enables support for the status LEDs of the WM831x + series of PMICs. + config LEDS_WM8350 tristate "LED Support for WM8350 AudioPlus PMIC" depends on LEDS_CLASS && MFD_WM8350 diff --git a/drivers/leds/Makefile b/drivers/leds/Makefile index e8cdcf77a4c..46d72704d60 100644 --- a/drivers/leds/Makefile +++ b/drivers/leds/Makefile @@ -26,6 +26,7 @@ obj-$(CONFIG_LEDS_HP6XX) += leds-hp6xx.o obj-$(CONFIG_LEDS_FSG) += leds-fsg.o obj-$(CONFIG_LEDS_PCA955X) += leds-pca955x.o obj-$(CONFIG_LEDS_DA903X) += leds-da903x.o +obj-$(CONFIG_LEDS_WM831X_STATUS) += leds-wm831x-status.o obj-$(CONFIG_LEDS_WM8350) += leds-wm8350.o obj-$(CONFIG_LEDS_PWM) += leds-pwm.o diff --git a/drivers/leds/leds-wm831x-status.c b/drivers/leds/leds-wm831x-status.c new file mode 100644 index 00000000000..c586d05e336 --- /dev/null +++ b/drivers/leds/leds-wm831x-status.c @@ -0,0 +1,341 @@ +/* + * LED driver for WM831x status LEDs + * + * Copyright(C) 2009 Wolfson Microelectronics PLC. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include + + +struct wm831x_status { + struct led_classdev cdev; + struct wm831x *wm831x; + struct work_struct work; + struct mutex mutex; + + spinlock_t value_lock; + int reg; /* Control register */ + int reg_val; /* Control register value */ + + int blink; + int blink_time; + int blink_cyc; + int src; + enum led_brightness brightness; +}; + +#define to_wm831x_status(led_cdev) \ + container_of(led_cdev, struct wm831x_status, cdev) + +static void wm831x_status_work(struct work_struct *work) +{ + struct wm831x_status *led = container_of(work, struct wm831x_status, + work); + unsigned long flags; + + mutex_lock(&led->mutex); + + led->reg_val &= ~(WM831X_LED_SRC_MASK | WM831X_LED_MODE_MASK | + WM831X_LED_DUTY_CYC_MASK | WM831X_LED_DUR_MASK); + + spin_lock_irqsave(&led->value_lock, flags); + + led->reg_val |= led->src << WM831X_LED_SRC_SHIFT; + if (led->blink) { + led->reg_val |= 2 << WM831X_LED_MODE_SHIFT; + led->reg_val |= led->blink_time << WM831X_LED_DUR_SHIFT; + led->reg_val |= led->blink_cyc; + } else { + if (led->brightness != LED_OFF) + led->reg_val |= 1 << WM831X_LED_MODE_SHIFT; + } + + spin_unlock_irqrestore(&led->value_lock, flags); + + wm831x_reg_write(led->wm831x, led->reg, led->reg_val); + + mutex_unlock(&led->mutex); +} + +static void wm831x_status_set(struct led_classdev *led_cdev, + enum led_brightness value) +{ + struct wm831x_status *led = to_wm831x_status(led_cdev); + unsigned long flags; + + spin_lock_irqsave(&led->value_lock, flags); + led->brightness = value; + if (value == LED_OFF) + led->blink = 0; + schedule_work(&led->work); + spin_unlock_irqrestore(&led->value_lock, flags); +} + +static int wm831x_status_blink_set(struct led_classdev *led_cdev, + unsigned long *delay_on, + unsigned long *delay_off) +{ + struct wm831x_status *led = to_wm831x_status(led_cdev); + unsigned long flags; + int ret = 0; + + /* Pick some defaults if we've not been given times */ + if (*delay_on == 0 && *delay_off == 0) { + *delay_on = 250; + *delay_off = 250; + } + + spin_lock_irqsave(&led->value_lock, flags); + + /* We only have a limited selection of settings, see if we can + * support the configuration we're being given */ + switch (*delay_on) { + case 1000: + led->blink_time = 0; + break; + case 250: + led->blink_time = 1; + break; + case 125: + led->blink_time = 2; + break; + case 62: + case 63: + /* Actually 62.5ms */ + led->blink_time = 3; + break; + default: + ret = -EINVAL; + break; + } + + if (ret == 0) { + switch (*delay_off / *delay_on) { + case 1: + led->blink_cyc = 0; + break; + case 3: + led->blink_cyc = 1; + break; + case 4: + led->blink_cyc = 2; + break; + case 8: + led->blink_cyc = 3; + break; + default: + ret = -EINVAL; + break; + } + } + + if (ret == 0) + led->blink = 1; + else + led->blink = 0; + + /* Always update; if we fail turn off blinking since we expect + * a software fallback. */ + schedule_work(&led->work); + + spin_unlock_irqrestore(&led->value_lock, flags); + + return ret; +} + +static const char *led_src_texts[] = { + "otp", + "power", + "charger", + "soft", +}; + +static ssize_t wm831x_status_src_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct led_classdev *led_cdev = dev_get_drvdata(dev); + struct wm831x_status *led = to_wm831x_status(led_cdev); + int i; + ssize_t ret = 0; + + mutex_lock(&led->mutex); + + for (i = 0; i < ARRAY_SIZE(led_src_texts); i++) + if (i == led->src) + ret += sprintf(&buf[ret], "[%s] ", led_src_texts[i]); + else + ret += sprintf(&buf[ret], "%s ", led_src_texts[i]); + + mutex_unlock(&led->mutex); + + ret += sprintf(&buf[ret], "\n"); + + return ret; +} + +static ssize_t wm831x_status_src_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t size) +{ + struct led_classdev *led_cdev = dev_get_drvdata(dev); + struct wm831x_status *led = to_wm831x_status(led_cdev); + char name[20]; + int i; + size_t len; + + name[sizeof(name) - 1] = '\0'; + strncpy(name, buf, sizeof(name) - 1); + len = strlen(name); + + if (len && name[len - 1] == '\n') + name[len - 1] = '\0'; + + for (i = 0; i < ARRAY_SIZE(led_src_texts); i++) { + if (!strcmp(name, led_src_texts[i])) { + mutex_lock(&led->mutex); + + led->src = i; + schedule_work(&led->work); + + mutex_unlock(&led->mutex); + } + } + + return size; +} + +static DEVICE_ATTR(src, 0644, wm831x_status_src_show, wm831x_status_src_store); + +static int wm831x_status_probe(struct platform_device *pdev) +{ + struct wm831x *wm831x = dev_get_drvdata(pdev->dev.parent); + struct wm831x_pdata *chip_pdata; + struct wm831x_status_pdata pdata; + struct wm831x_status *drvdata; + struct resource *res; + int id = pdev->id % ARRAY_SIZE(chip_pdata->status); + int ret; + + res = platform_get_resource(pdev, IORESOURCE_IO, 0); + if (res == NULL) { + dev_err(&pdev->dev, "No I/O resource\n"); + ret = -EINVAL; + goto err; + } + + drvdata = kzalloc(sizeof(struct wm831x_status), GFP_KERNEL); + if (!drvdata) + return -ENOMEM; + dev_set_drvdata(&pdev->dev, drvdata); + + drvdata->wm831x = wm831x; + drvdata->reg = res->start; + + if (wm831x->dev->platform_data) + chip_pdata = wm831x->dev->platform_data; + else + chip_pdata = NULL; + + memset(&pdata, 0, sizeof(pdata)); + if (chip_pdata && chip_pdata->status[id]) + memcpy(&pdata, chip_pdata->status[id], sizeof(pdata)); + else + pdata.name = dev_name(&pdev->dev); + + mutex_init(&drvdata->mutex); + INIT_WORK(&drvdata->work, wm831x_status_work); + spin_lock_init(&drvdata->value_lock); + + /* We cache the configuration register and read startup values + * from it. */ + drvdata->reg_val = wm831x_reg_read(wm831x, drvdata->reg); + + if (drvdata->reg_val & WM831X_LED_MODE_MASK) + drvdata->brightness = LED_FULL; + else + drvdata->brightness = LED_OFF; + + /* Set a default source if configured, otherwise leave the + * current hardware setting. + */ + if (pdata.default_src == WM831X_STATUS_PRESERVE) { + drvdata->src = drvdata->reg_val; + drvdata->src &= WM831X_LED_SRC_MASK; + drvdata->src >>= WM831X_LED_SRC_SHIFT; + } else { + drvdata->src = pdata.default_src - 1; + } + + drvdata->cdev.name = pdata.name; + drvdata->cdev.default_trigger = pdata.default_trigger; + drvdata->cdev.brightness_set = wm831x_status_set; + drvdata->cdev.blink_set = wm831x_status_blink_set; + + ret = led_classdev_register(wm831x->dev, &drvdata->cdev); + if (ret < 0) { + dev_err(&pdev->dev, "Failed to register LED: %d\n", ret); + goto err_led; + } + + ret = device_create_file(drvdata->cdev.dev, &dev_attr_src); + if (ret != 0) + dev_err(&pdev->dev, + "No source control for LED: %d\n", ret); + + return 0; + +err_led: + led_classdev_unregister(&drvdata->cdev); + kfree(drvdata); +err: + return ret; +} + +static int wm831x_status_remove(struct platform_device *pdev) +{ + struct wm831x_status *drvdata = platform_get_drvdata(pdev); + + device_remove_file(drvdata->cdev.dev, &dev_attr_src); + led_classdev_unregister(&drvdata->cdev); + kfree(drvdata); + + return 0; +} + +static struct platform_driver wm831x_status_driver = { + .driver = { + .name = "wm831x-status", + .owner = THIS_MODULE, + }, + .probe = wm831x_status_probe, + .remove = wm831x_status_remove, +}; + +static int __devinit wm831x_status_init(void) +{ + return platform_driver_register(&wm831x_status_driver); +} +module_init(wm831x_status_init); + +static void wm831x_status_exit(void) +{ + platform_driver_unregister(&wm831x_status_driver); +} +module_exit(wm831x_status_exit); + +MODULE_AUTHOR("Mark Brown "); +MODULE_DESCRIPTION("WM831x status LED driver"); +MODULE_LICENSE("GPL"); +MODULE_ALIAS("platform:wm831x-status"); diff --git a/include/linux/mfd/wm831x/status.h b/include/linux/mfd/wm831x/status.h new file mode 100644 index 00000000000..6bc090d0e3a --- /dev/null +++ b/include/linux/mfd/wm831x/status.h @@ -0,0 +1,34 @@ +/* + * include/linux/mfd/wm831x/status.h -- Status LEDs for WM831x + * + * Copyright 2009 Wolfson Microelectronics PLC. + * + * Author: Mark Brown + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + */ + +#ifndef __MFD_WM831X_STATUS_H__ +#define __MFD_WM831X_STATUS_H__ + +#define WM831X_LED_SRC_MASK 0xC000 /* LED_SRC - [15:14] */ +#define WM831X_LED_SRC_SHIFT 14 /* LED_SRC - [15:14] */ +#define WM831X_LED_SRC_WIDTH 2 /* LED_SRC - [15:14] */ +#define WM831X_LED_MODE_MASK 0x0300 /* LED_MODE - [9:8] */ +#define WM831X_LED_MODE_SHIFT 8 /* LED_MODE - [9:8] */ +#define WM831X_LED_MODE_WIDTH 2 /* LED_MODE - [9:8] */ +#define WM831X_LED_SEQ_LEN_MASK 0x0030 /* LED_SEQ_LEN - [5:4] */ +#define WM831X_LED_SEQ_LEN_SHIFT 4 /* LED_SEQ_LEN - [5:4] */ +#define WM831X_LED_SEQ_LEN_WIDTH 2 /* LED_SEQ_LEN - [5:4] */ +#define WM831X_LED_DUR_MASK 0x000C /* LED_DUR - [3:2] */ +#define WM831X_LED_DUR_SHIFT 2 /* LED_DUR - [3:2] */ +#define WM831X_LED_DUR_WIDTH 2 /* LED_DUR - [3:2] */ +#define WM831X_LED_DUTY_CYC_MASK 0x0003 /* LED_DUTY_CYC - [1:0] */ +#define WM831X_LED_DUTY_CYC_SHIFT 0 /* LED_DUTY_CYC - [1:0] */ +#define WM831X_LED_DUTY_CYC_WIDTH 2 /* LED_DUTY_CYC - [1:0] */ + +#endif -- cgit v1.2.3-70-g09d2 From 5036cc41e07d6614350e329666ee8a79cea6f793 Mon Sep 17 00:00:00 2001 From: Marek Vasut Date: Thu, 6 Aug 2009 16:07:10 -0700 Subject: backlight: spi driver for LMS283GF05 LCD ADd support for the SPI part of LMS283GF05 LCD. The LCD uses SPI for initialization and powerdown sequences. No further defails are specified in the datasheet about the initialization/powerdown sequence, just the magic numbers that have to be sent over SPI bus. This LCD can be found in the Aeronix Zipit Z2 handheld. Signed-off-by: Marek Vasut Signed-off-by: Andrew Morton Signed-off-by: Richard Purdie --- drivers/video/backlight/Kconfig | 7 + drivers/video/backlight/Makefile | 1 + drivers/video/backlight/lms283gf05.c | 242 +++++++++++++++++++++++++++++++++++ include/linux/spi/lms283gf05.h | 28 ++++ 4 files changed, 278 insertions(+) create mode 100644 drivers/video/backlight/lms283gf05.c create mode 100644 include/linux/spi/lms283gf05.h (limited to 'include') diff --git a/drivers/video/backlight/Kconfig b/drivers/video/backlight/Kconfig index f86dbfd209a..c0d4a536ea8 100644 --- a/drivers/video/backlight/Kconfig +++ b/drivers/video/backlight/Kconfig @@ -31,6 +31,13 @@ config LCD_CORGI Say y here to support the LCD panels usually found on SHARP corgi (C7x0) and spitz (Cxx00) models. +config LCD_LMS283GF05 + tristate "Samsung LMS283GF05 LCD" + depends on LCD_CLASS_DEVICE && SPI_MASTER && GENERIC_GPIO + help + SPI driver for Samsung LMS283GF05. This provides basic support + for powering the LCD up/down through a sysfs interface. + config LCD_LTV350QV tristate "Samsung LTV350QV LCD Panel" depends on LCD_CLASS_DEVICE && SPI_MASTER diff --git a/drivers/video/backlight/Makefile b/drivers/video/backlight/Makefile index df0b67ce88b..0abc014215b 100644 --- a/drivers/video/backlight/Makefile +++ b/drivers/video/backlight/Makefile @@ -3,6 +3,7 @@ obj-$(CONFIG_LCD_CLASS_DEVICE) += lcd.o obj-$(CONFIG_LCD_CORGI) += corgi_lcd.o obj-$(CONFIG_LCD_HP700) += jornada720_lcd.o +obj-$(CONFIG_LCD_LMS283GF05) += lms283gf05.o obj-$(CONFIG_LCD_LTV350QV) += ltv350qv.o obj-$(CONFIG_LCD_ILI9320) += ili9320.o obj-$(CONFIG_LCD_PLATFORM) += platform_lcd.o diff --git a/drivers/video/backlight/lms283gf05.c b/drivers/video/backlight/lms283gf05.c new file mode 100644 index 00000000000..447b542a20c --- /dev/null +++ b/drivers/video/backlight/lms283gf05.c @@ -0,0 +1,242 @@ +/* + * lms283gf05.c -- support for Samsung LMS283GF05 LCD + * + * Copyright (c) 2009 Marek Vasut + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include +#include +#include +#include +#include + +#include +#include + +struct lms283gf05_state { + struct spi_device *spi; + struct lcd_device *ld; +}; + +struct lms283gf05_seq { + unsigned char reg; + unsigned short value; + unsigned char delay; +}; + +/* Magic sequences supplied by manufacturer, for details refer to datasheet */ +static struct lms283gf05_seq disp_initseq[] = { + /* REG, VALUE, DELAY */ + { 0x07, 0x0000, 0 }, + { 0x13, 0x0000, 10 }, + + { 0x11, 0x3004, 0 }, + { 0x14, 0x200F, 0 }, + { 0x10, 0x1a20, 0 }, + { 0x13, 0x0040, 50 }, + + { 0x13, 0x0060, 0 }, + { 0x13, 0x0070, 200 }, + + { 0x01, 0x0127, 0 }, + { 0x02, 0x0700, 0 }, + { 0x03, 0x1030, 0 }, + { 0x08, 0x0208, 0 }, + { 0x0B, 0x0620, 0 }, + { 0x0C, 0x0110, 0 }, + { 0x30, 0x0120, 0 }, + { 0x31, 0x0127, 0 }, + { 0x32, 0x0000, 0 }, + { 0x33, 0x0503, 0 }, + { 0x34, 0x0727, 0 }, + { 0x35, 0x0124, 0 }, + { 0x36, 0x0706, 0 }, + { 0x37, 0x0701, 0 }, + { 0x38, 0x0F00, 0 }, + { 0x39, 0x0F00, 0 }, + { 0x40, 0x0000, 0 }, + { 0x41, 0x0000, 0 }, + { 0x42, 0x013f, 0 }, + { 0x43, 0x0000, 0 }, + { 0x44, 0x013f, 0 }, + { 0x45, 0x0000, 0 }, + { 0x46, 0xef00, 0 }, + { 0x47, 0x013f, 0 }, + { 0x48, 0x0000, 0 }, + { 0x07, 0x0015, 30 }, + + { 0x07, 0x0017, 0 }, + + { 0x20, 0x0000, 0 }, + { 0x21, 0x0000, 0 }, + { 0x22, 0x0000, 0 } +}; + +static struct lms283gf05_seq disp_pdwnseq[] = { + { 0x07, 0x0016, 30 }, + + { 0x07, 0x0004, 0 }, + { 0x10, 0x0220, 20 }, + + { 0x13, 0x0060, 50 }, + + { 0x13, 0x0040, 50 }, + + { 0x13, 0x0000, 0 }, + { 0x10, 0x0000, 0 } +}; + + +static void lms283gf05_reset(unsigned long gpio, bool inverted) +{ + gpio_set_value(gpio, !inverted); + mdelay(100); + gpio_set_value(gpio, inverted); + mdelay(20); + gpio_set_value(gpio, !inverted); + mdelay(20); +} + +static void lms283gf05_toggle(struct spi_device *spi, + struct lms283gf05_seq *seq, int sz) +{ + char buf[3]; + int i; + + for (i = 0; i < sz; i++) { + buf[0] = 0x74; + buf[1] = 0x00; + buf[2] = seq[i].reg; + spi_write(spi, buf, 3); + + buf[0] = 0x76; + buf[1] = seq[i].value >> 8; + buf[2] = seq[i].value & 0xff; + spi_write(spi, buf, 3); + + mdelay(seq[i].delay); + } +} + +static int lms283gf05_power_set(struct lcd_device *ld, int power) +{ + struct lms283gf05_state *st = lcd_get_data(ld); + struct spi_device *spi = st->spi; + struct lms283gf05_pdata *pdata = spi->dev.platform_data; + + if (power) { + if (pdata) + lms283gf05_reset(pdata->reset_gpio, + pdata->reset_inverted); + lms283gf05_toggle(spi, disp_initseq, ARRAY_SIZE(disp_initseq)); + } else { + lms283gf05_toggle(spi, disp_pdwnseq, ARRAY_SIZE(disp_pdwnseq)); + if (pdata) + gpio_set_value(pdata->reset_gpio, + pdata->reset_inverted); + } + + return 0; +} + +static struct lcd_ops lms_ops = { + .set_power = lms283gf05_power_set, + .get_power = NULL, +}; + +static int __devinit lms283gf05_probe(struct spi_device *spi) +{ + struct lms283gf05_state *st; + struct lms283gf05_pdata *pdata = spi->dev.platform_data; + struct lcd_device *ld; + int ret = 0; + + if (pdata != NULL) { + ret = gpio_request(pdata->reset_gpio, "LMS285GF05 RESET"); + if (ret) + return ret; + + ret = gpio_direction_output(pdata->reset_gpio, + !pdata->reset_inverted); + if (ret) + goto err; + } + + st = kzalloc(sizeof(struct lms283gf05_state), GFP_KERNEL); + if (st == NULL) { + dev_err(&spi->dev, "No memory for device state\n"); + ret = -ENOMEM; + goto err; + } + + ld = lcd_device_register("lms283gf05", &spi->dev, st, &lms_ops); + if (IS_ERR(ld)) { + ret = PTR_ERR(ld); + goto err2; + } + + st->spi = spi; + st->ld = ld; + + dev_set_drvdata(&spi->dev, st); + + /* kick in the LCD */ + if (pdata) + lms283gf05_reset(pdata->reset_gpio, pdata->reset_inverted); + lms283gf05_toggle(spi, disp_initseq, ARRAY_SIZE(disp_initseq)); + + return 0; + +err2: + kfree(st); +err: + if (pdata != NULL) + gpio_free(pdata->reset_gpio); + + return ret; +} + +static int __devexit lms283gf05_remove(struct spi_device *spi) +{ + struct lms283gf05_state *st = dev_get_drvdata(&spi->dev); + struct lms283gf05_pdata *pdata = st->spi->dev.platform_data; + + lcd_device_unregister(st->ld); + + if (pdata != NULL) + gpio_free(pdata->reset_gpio); + + kfree(st); + + return 0; +} + +static struct spi_driver lms283gf05_driver = { + .driver = { + .name = "lms283gf05", + .owner = THIS_MODULE, + }, + .probe = lms283gf05_probe, + .remove = __devexit_p(lms283gf05_remove), +}; + +static __init int lms283gf05_init(void) +{ + return spi_register_driver(&lms283gf05_driver); +} + +static __exit void lms283gf05_exit(void) +{ + spi_unregister_driver(&lms283gf05_driver); +} + +module_init(lms283gf05_init); +module_exit(lms283gf05_exit); + +MODULE_AUTHOR("Marek Vasut "); +MODULE_DESCRIPTION("LCD283GF05 LCD"); +MODULE_LICENSE("GPL v2"); diff --git a/include/linux/spi/lms283gf05.h b/include/linux/spi/lms283gf05.h new file mode 100644 index 00000000000..555d254e660 --- /dev/null +++ b/include/linux/spi/lms283gf05.h @@ -0,0 +1,28 @@ +/* + * lms283gf05.h - Platform glue for Samsung LMS283GF05 LCD + * + * Copyright (C) 2009 Marek Vasut + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +*/ + +#ifndef _INCLUDE_LINUX_SPI_LMS283GF05_H_ +#define _INCLUDE_LINUX_SPI_LMS283GF05_H_ + +struct lms283gf05_pdata { + unsigned long reset_gpio; + bool reset_inverted; +}; + +#endif /* _INCLUDE_LINUX_SPI_LMS283GF05_H_ */ -- cgit v1.2.3-70-g09d2 From f1938cd6e900a85de64184e46d841efc9efd3484 Mon Sep 17 00:00:00 2001 From: Dave Airlie Date: Tue, 8 Sep 2009 11:32:08 +1000 Subject: drm: include seq_file.h for debugfs builds. Fixes a warning seen on powerpc. Signed-off-by: Dave Airlie --- include/drm/drm_mm.h | 3 +++ 1 file changed, 3 insertions(+) (limited to 'include') diff --git a/include/drm/drm_mm.h b/include/drm/drm_mm.h index bc5a87e8aee..62329f9a42c 100644 --- a/include/drm/drm_mm.h +++ b/include/drm/drm_mm.h @@ -37,6 +37,9 @@ * Generic range manager structs */ #include +#ifdef CONFIG_DEBUG_FS +#include +#endif struct drm_mm_node { struct list_head fl_entry; -- cgit v1.2.3-70-g09d2 From 0403e3827788d878163f9ef0541b748b0f88ca5d Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Tue, 8 Sep 2009 17:42:50 -0700 Subject: dmaengine: add fence support Some engines optimize operation by reading ahead in the descriptor chain such that descriptor2 may start execution before descriptor1 completes. If descriptor2 depends on the result from descriptor1 then a fence is required (on descriptor2) to disable this optimization. The async_tx api could implicitly identify dependencies via the 'depend_tx' parameter, but that would constrain cases where the dependency chain only specifies a completion order rather than a data dependency. So, provide an ASYNC_TX_FENCE to explicitly identify data dependencies. Signed-off-by: Dan Williams --- crypto/async_tx/async_memcpy.c | 7 ++++-- crypto/async_tx/async_memset.c | 7 ++++-- crypto/async_tx/async_pq.c | 5 ++++ crypto/async_tx/async_raid6_recov.c | 47 +++++++++++++++++++++---------------- crypto/async_tx/async_xor.c | 11 ++++++--- drivers/md/raid5.c | 37 ++++++++++++++++++----------- include/linux/async_tx.h | 3 +++ include/linux/dmaengine.h | 3 +++ 8 files changed, 79 insertions(+), 41 deletions(-) (limited to 'include') diff --git a/crypto/async_tx/async_memcpy.c b/crypto/async_tx/async_memcpy.c index 98e15bd0dcb..b38cbb3fd52 100644 --- a/crypto/async_tx/async_memcpy.c +++ b/crypto/async_tx/async_memcpy.c @@ -52,9 +52,12 @@ async_memcpy(struct page *dest, struct page *src, unsigned int dest_offset, if (device) { dma_addr_t dma_dest, dma_src; - unsigned long dma_prep_flags; + unsigned long dma_prep_flags = 0; - dma_prep_flags = submit->cb_fn ? DMA_PREP_INTERRUPT : 0; + if (submit->cb_fn) + dma_prep_flags |= DMA_PREP_INTERRUPT; + if (submit->flags & ASYNC_TX_FENCE) + dma_prep_flags |= DMA_PREP_FENCE; dma_dest = dma_map_page(device->dev, dest, dest_offset, len, DMA_FROM_DEVICE); diff --git a/crypto/async_tx/async_memset.c b/crypto/async_tx/async_memset.c index b896a6e5f67..a374784e332 100644 --- a/crypto/async_tx/async_memset.c +++ b/crypto/async_tx/async_memset.c @@ -49,9 +49,12 @@ async_memset(struct page *dest, int val, unsigned int offset, size_t len, if (device) { dma_addr_t dma_dest; - unsigned long dma_prep_flags; + unsigned long dma_prep_flags = 0; - dma_prep_flags = submit->cb_fn ? DMA_PREP_INTERRUPT : 0; + if (submit->cb_fn) + dma_prep_flags |= DMA_PREP_INTERRUPT; + if (submit->flags & ASYNC_TX_FENCE) + dma_prep_flags |= DMA_PREP_FENCE; dma_dest = dma_map_page(device->dev, dest, offset, len, DMA_FROM_DEVICE); diff --git a/crypto/async_tx/async_pq.c b/crypto/async_tx/async_pq.c index 108b21efb49..a25e290c39f 100644 --- a/crypto/async_tx/async_pq.c +++ b/crypto/async_tx/async_pq.c @@ -101,6 +101,7 @@ do_async_gen_syndrome(struct dma_chan *chan, struct page **blocks, */ if (src_cnt > pq_src_cnt) { submit->flags &= ~ASYNC_TX_ACK; + submit->flags |= ASYNC_TX_FENCE; dma_flags |= DMA_COMPL_SKIP_DEST_UNMAP; submit->cb_fn = NULL; submit->cb_param = NULL; @@ -111,6 +112,8 @@ do_async_gen_syndrome(struct dma_chan *chan, struct page **blocks, if (cb_fn_orig) dma_flags |= DMA_PREP_INTERRUPT; } + if (submit->flags & ASYNC_TX_FENCE) + dma_flags |= DMA_PREP_FENCE; /* Since we have clobbered the src_list we are committed * to doing this asynchronously. Drivers force forward @@ -282,6 +285,8 @@ async_syndrome_val(struct page **blocks, unsigned int offset, int disks, dma_flags |= DMA_PREP_PQ_DISABLE_P; if (!Q(blocks, disks)) dma_flags |= DMA_PREP_PQ_DISABLE_Q; + if (submit->flags & ASYNC_TX_FENCE) + dma_flags |= DMA_PREP_FENCE; for (i = 0; i < disks; i++) if (likely(blocks[i])) { BUG_ON(is_raid6_zero_block(blocks[i])); diff --git a/crypto/async_tx/async_raid6_recov.c b/crypto/async_tx/async_raid6_recov.c index 0c14d48c989..822a42d1006 100644 --- a/crypto/async_tx/async_raid6_recov.c +++ b/crypto/async_tx/async_raid6_recov.c @@ -44,6 +44,8 @@ async_sum_product(struct page *dest, struct page **srcs, unsigned char *coef, struct dma_async_tx_descriptor *tx; enum dma_ctrl_flags dma_flags = DMA_PREP_PQ_DISABLE_P; + if (submit->flags & ASYNC_TX_FENCE) + dma_flags |= DMA_PREP_FENCE; dma_dest[1] = dma_map_page(dev, dest, 0, len, DMA_BIDIRECTIONAL); dma_src[0] = dma_map_page(dev, srcs[0], 0, len, DMA_TO_DEVICE); dma_src[1] = dma_map_page(dev, srcs[1], 0, len, DMA_TO_DEVICE); @@ -89,6 +91,8 @@ async_mult(struct page *dest, struct page *src, u8 coef, size_t len, struct dma_async_tx_descriptor *tx; enum dma_ctrl_flags dma_flags = DMA_PREP_PQ_DISABLE_P; + if (submit->flags & ASYNC_TX_FENCE) + dma_flags |= DMA_PREP_FENCE; dma_dest[1] = dma_map_page(dev, dest, 0, len, DMA_BIDIRECTIONAL); dma_src[0] = dma_map_page(dev, src, 0, len, DMA_TO_DEVICE); tx = dma->device_prep_dma_pq(chan, dma_dest, dma_src, 1, &coef, @@ -138,7 +142,7 @@ __2data_recov_4(size_t bytes, int faila, int failb, struct page **blocks, srcs[1] = q; coef[0] = raid6_gfexi[failb-faila]; coef[1] = raid6_gfinv[raid6_gfexp[faila]^raid6_gfexp[failb]]; - init_async_submit(submit, 0, tx, NULL, NULL, scribble); + init_async_submit(submit, ASYNC_TX_FENCE, tx, NULL, NULL, scribble); tx = async_sum_product(b, srcs, coef, bytes, submit); /* Dy = P+Pxy+Dx */ @@ -188,23 +192,23 @@ __2data_recov_5(size_t bytes, int faila, int failb, struct page **blocks, dp = blocks[faila]; dq = blocks[failb]; - init_async_submit(submit, 0, tx, NULL, NULL, scribble); + init_async_submit(submit, ASYNC_TX_FENCE, tx, NULL, NULL, scribble); tx = async_memcpy(dp, g, 0, 0, bytes, submit); - init_async_submit(submit, 0, tx, NULL, NULL, scribble); + init_async_submit(submit, ASYNC_TX_FENCE, tx, NULL, NULL, scribble); tx = async_mult(dq, g, raid6_gfexp[good], bytes, submit); /* compute P + Pxy */ srcs[0] = dp; srcs[1] = p; - init_async_submit(submit, ASYNC_TX_XOR_DROP_DST, tx, NULL, NULL, - scribble); + init_async_submit(submit, ASYNC_TX_FENCE|ASYNC_TX_XOR_DROP_DST, tx, + NULL, NULL, scribble); tx = async_xor(dp, srcs, 0, 2, bytes, submit); /* compute Q + Qxy */ srcs[0] = dq; srcs[1] = q; - init_async_submit(submit, ASYNC_TX_XOR_DROP_DST, tx, NULL, NULL, - scribble); + init_async_submit(submit, ASYNC_TX_FENCE|ASYNC_TX_XOR_DROP_DST, tx, + NULL, NULL, scribble); tx = async_xor(dq, srcs, 0, 2, bytes, submit); /* Dx = A*(P+Pxy) + B*(Q+Qxy) */ @@ -212,7 +216,7 @@ __2data_recov_5(size_t bytes, int faila, int failb, struct page **blocks, srcs[1] = dq; coef[0] = raid6_gfexi[failb-faila]; coef[1] = raid6_gfinv[raid6_gfexp[faila]^raid6_gfexp[failb]]; - init_async_submit(submit, 0, tx, NULL, NULL, scribble); + init_async_submit(submit, ASYNC_TX_FENCE, tx, NULL, NULL, scribble); tx = async_sum_product(dq, srcs, coef, bytes, submit); /* Dy = P+Pxy+Dx */ @@ -252,7 +256,7 @@ __2data_recov_n(int disks, size_t bytes, int faila, int failb, blocks[failb] = (void *)raid6_empty_zero_page; blocks[disks-1] = dq; - init_async_submit(submit, 0, tx, NULL, NULL, scribble); + init_async_submit(submit, ASYNC_TX_FENCE, tx, NULL, NULL, scribble); tx = async_gen_syndrome(blocks, 0, disks, bytes, submit); /* Restore pointer table */ @@ -264,15 +268,15 @@ __2data_recov_n(int disks, size_t bytes, int faila, int failb, /* compute P + Pxy */ srcs[0] = dp; srcs[1] = p; - init_async_submit(submit, ASYNC_TX_XOR_DROP_DST, tx, NULL, NULL, - scribble); + init_async_submit(submit, ASYNC_TX_FENCE|ASYNC_TX_XOR_DROP_DST, tx, + NULL, NULL, scribble); tx = async_xor(dp, srcs, 0, 2, bytes, submit); /* compute Q + Qxy */ srcs[0] = dq; srcs[1] = q; - init_async_submit(submit, ASYNC_TX_XOR_DROP_DST, tx, NULL, NULL, - scribble); + init_async_submit(submit, ASYNC_TX_FENCE|ASYNC_TX_XOR_DROP_DST, tx, + NULL, NULL, scribble); tx = async_xor(dq, srcs, 0, 2, bytes, submit); /* Dx = A*(P+Pxy) + B*(Q+Qxy) */ @@ -280,7 +284,7 @@ __2data_recov_n(int disks, size_t bytes, int faila, int failb, srcs[1] = dq; coef[0] = raid6_gfexi[failb-faila]; coef[1] = raid6_gfinv[raid6_gfexp[faila]^raid6_gfexp[failb]]; - init_async_submit(submit, 0, tx, NULL, NULL, scribble); + init_async_submit(submit, ASYNC_TX_FENCE, tx, NULL, NULL, scribble); tx = async_sum_product(dq, srcs, coef, bytes, submit); /* Dy = P+Pxy+Dx */ @@ -407,13 +411,16 @@ async_raid6_datap_recov(int disks, size_t bytes, int faila, int good = faila == 0 ? 1 : 0; struct page *g = blocks[good]; - init_async_submit(submit, 0, tx, NULL, NULL, scribble); + init_async_submit(submit, ASYNC_TX_FENCE, tx, NULL, NULL, + scribble); tx = async_memcpy(p, g, 0, 0, bytes, submit); - init_async_submit(submit, 0, tx, NULL, NULL, scribble); + init_async_submit(submit, ASYNC_TX_FENCE, tx, NULL, NULL, + scribble); tx = async_mult(dq, g, raid6_gfexp[good], bytes, submit); } else { - init_async_submit(submit, 0, tx, NULL, NULL, scribble); + init_async_submit(submit, ASYNC_TX_FENCE, tx, NULL, NULL, + scribble); tx = async_gen_syndrome(blocks, 0, disks, bytes, submit); } @@ -426,11 +433,11 @@ async_raid6_datap_recov(int disks, size_t bytes, int faila, srcs[0] = dq; srcs[1] = q; - init_async_submit(submit, ASYNC_TX_XOR_DROP_DST, tx, NULL, NULL, - scribble); + init_async_submit(submit, ASYNC_TX_FENCE|ASYNC_TX_XOR_DROP_DST, tx, + NULL, NULL, scribble); tx = async_xor(dq, srcs, 0, 2, bytes, submit); - init_async_submit(submit, 0, tx, NULL, NULL, scribble); + init_async_submit(submit, ASYNC_TX_FENCE, tx, NULL, NULL, scribble); tx = async_mult(dq, dq, coef, bytes, submit); srcs[0] = p; diff --git a/crypto/async_tx/async_xor.c b/crypto/async_tx/async_xor.c index 56b5f98da46..db279872ef3 100644 --- a/crypto/async_tx/async_xor.c +++ b/crypto/async_tx/async_xor.c @@ -69,6 +69,7 @@ do_async_xor(struct dma_chan *chan, struct page *dest, struct page **src_list, */ if (src_cnt > xor_src_cnt) { submit->flags &= ~ASYNC_TX_ACK; + submit->flags |= ASYNC_TX_FENCE; dma_flags = DMA_COMPL_SKIP_DEST_UNMAP; submit->cb_fn = NULL; submit->cb_param = NULL; @@ -78,7 +79,8 @@ do_async_xor(struct dma_chan *chan, struct page *dest, struct page **src_list, } if (submit->cb_fn) dma_flags |= DMA_PREP_INTERRUPT; - + if (submit->flags & ASYNC_TX_FENCE) + dma_flags |= DMA_PREP_FENCE; /* Since we have clobbered the src_list we are committed * to doing this asynchronously. Drivers force forward progress * in case they can not provide a descriptor @@ -264,12 +266,15 @@ async_xor_val(struct page *dest, struct page **src_list, unsigned int offset, dma_src = (dma_addr_t *) src_list; if (dma_src && device && src_cnt <= device->max_xor) { - unsigned long dma_prep_flags; + unsigned long dma_prep_flags = 0; int i; pr_debug("%s: (async) len: %zu\n", __func__, len); - dma_prep_flags = submit->cb_fn ? DMA_PREP_INTERRUPT : 0; + if (submit->cb_fn) + dma_prep_flags |= DMA_PREP_INTERRUPT; + if (submit->flags & ASYNC_TX_FENCE) + dma_prep_flags |= DMA_PREP_FENCE; for (i = 0; i < src_cnt; i++) dma_src[i] = dma_map_page(device->dev, src_list[i], offset, len, DMA_TO_DEVICE); diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index 0a5cf217121..54ef8d75541 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c @@ -502,13 +502,17 @@ async_copy_data(int frombio, struct bio *bio, struct page *page, int i; int page_offset; struct async_submit_ctl submit; + enum async_tx_flags flags = 0; if (bio->bi_sector >= sector) page_offset = (signed)(bio->bi_sector - sector) * 512; else page_offset = (signed)(sector - bio->bi_sector) * -512; - init_async_submit(&submit, 0, tx, NULL, NULL, NULL); + if (frombio) + flags |= ASYNC_TX_FENCE; + init_async_submit(&submit, flags, tx, NULL, NULL, NULL); + bio_for_each_segment(bvl, bio, i) { int len = bio_iovec_idx(bio, i)->bv_len; int clen; @@ -685,7 +689,7 @@ ops_run_compute5(struct stripe_head *sh, struct raid5_percpu *percpu) atomic_inc(&sh->count); - init_async_submit(&submit, ASYNC_TX_XOR_ZERO_DST, NULL, + init_async_submit(&submit, ASYNC_TX_FENCE|ASYNC_TX_XOR_ZERO_DST, NULL, ops_complete_compute, sh, to_addr_conv(sh, percpu)); if (unlikely(count == 1)) tx = async_memcpy(xor_dest, xor_srcs[0], 0, 0, STRIPE_SIZE, &submit); @@ -763,7 +767,8 @@ ops_run_compute6_1(struct stripe_head *sh, struct raid5_percpu *percpu) count = set_syndrome_sources(blocks, sh); blocks[count] = NULL; /* regenerating p is not necessary */ BUG_ON(blocks[count+1] != dest); /* q should already be set */ - init_async_submit(&submit, 0, NULL, ops_complete_compute, sh, + init_async_submit(&submit, ASYNC_TX_FENCE, NULL, + ops_complete_compute, sh, to_addr_conv(sh, percpu)); tx = async_gen_syndrome(blocks, 0, count+2, STRIPE_SIZE, &submit); } else { @@ -775,8 +780,8 @@ ops_run_compute6_1(struct stripe_head *sh, struct raid5_percpu *percpu) blocks[count++] = sh->dev[i].page; } - init_async_submit(&submit, ASYNC_TX_XOR_ZERO_DST, NULL, - ops_complete_compute, sh, + init_async_submit(&submit, ASYNC_TX_FENCE|ASYNC_TX_XOR_ZERO_DST, + NULL, ops_complete_compute, sh, to_addr_conv(sh, percpu)); tx = async_xor(dest, blocks, 0, count, STRIPE_SIZE, &submit); } @@ -837,8 +842,9 @@ ops_run_compute6_2(struct stripe_head *sh, struct raid5_percpu *percpu) /* Q disk is one of the missing disks */ if (faila == syndrome_disks) { /* Missing P+Q, just recompute */ - init_async_submit(&submit, 0, NULL, ops_complete_compute, - sh, to_addr_conv(sh, percpu)); + init_async_submit(&submit, ASYNC_TX_FENCE, NULL, + ops_complete_compute, sh, + to_addr_conv(sh, percpu)); return async_gen_syndrome(blocks, 0, count+2, STRIPE_SIZE, &submit); } else { @@ -859,21 +865,24 @@ ops_run_compute6_2(struct stripe_head *sh, struct raid5_percpu *percpu) blocks[count++] = sh->dev[i].page; } dest = sh->dev[data_target].page; - init_async_submit(&submit, ASYNC_TX_XOR_ZERO_DST, NULL, - NULL, NULL, to_addr_conv(sh, percpu)); + init_async_submit(&submit, + ASYNC_TX_FENCE|ASYNC_TX_XOR_ZERO_DST, + NULL, NULL, NULL, + to_addr_conv(sh, percpu)); tx = async_xor(dest, blocks, 0, count, STRIPE_SIZE, &submit); count = set_syndrome_sources(blocks, sh); - init_async_submit(&submit, 0, tx, ops_complete_compute, - sh, to_addr_conv(sh, percpu)); + init_async_submit(&submit, ASYNC_TX_FENCE, tx, + ops_complete_compute, sh, + to_addr_conv(sh, percpu)); return async_gen_syndrome(blocks, 0, count+2, STRIPE_SIZE, &submit); } } - init_async_submit(&submit, 0, NULL, ops_complete_compute, sh, - to_addr_conv(sh, percpu)); + init_async_submit(&submit, ASYNC_TX_FENCE, NULL, ops_complete_compute, + sh, to_addr_conv(sh, percpu)); if (failb == syndrome_disks) { /* We're missing D+P. */ return async_raid6_datap_recov(syndrome_disks+2, STRIPE_SIZE, @@ -916,7 +925,7 @@ ops_run_prexor(struct stripe_head *sh, struct raid5_percpu *percpu, xor_srcs[count++] = dev->page; } - init_async_submit(&submit, ASYNC_TX_XOR_DROP_DST, tx, + init_async_submit(&submit, ASYNC_TX_FENCE|ASYNC_TX_XOR_DROP_DST, tx, ops_complete_prexor, sh, to_addr_conv(sh, percpu)); tx = async_xor(xor_dest, xor_srcs, 0, count, STRIPE_SIZE, &submit); diff --git a/include/linux/async_tx.h b/include/linux/async_tx.h index 866e61c4e2e..a1c486a88e8 100644 --- a/include/linux/async_tx.h +++ b/include/linux/async_tx.h @@ -58,11 +58,14 @@ struct dma_chan_ref { * array. * @ASYNC_TX_ACK: immediately ack the descriptor, precludes setting up a * dependency chain + * @ASYNC_TX_FENCE: specify that the next operation in the dependency + * chain uses this operation's result as an input */ enum async_tx_flags { ASYNC_TX_XOR_ZERO_DST = (1 << 0), ASYNC_TX_XOR_DROP_DST = (1 << 1), ASYNC_TX_ACK = (1 << 2), + ASYNC_TX_FENCE = (1 << 3), }; /** diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h index 1012f1abcb5..4d6c1c925fd 100644 --- a/include/linux/dmaengine.h +++ b/include/linux/dmaengine.h @@ -87,6 +87,8 @@ enum dma_transaction_type { * @DMA_PREP_CONTINUE - indicate to a driver that it is reusing buffers as * sources that were the result of a previous operation, in the case of a PQ * operation it continues the calculation with new sources + * @DMA_PREP_FENCE - tell the driver that subsequent operations depend + * on the result of this operation */ enum dma_ctrl_flags { DMA_PREP_INTERRUPT = (1 << 0), @@ -98,6 +100,7 @@ enum dma_ctrl_flags { DMA_PREP_PQ_DISABLE_P = (1 << 6), DMA_PREP_PQ_DISABLE_Q = (1 << 7), DMA_PREP_CONTINUE = (1 << 8), + DMA_PREP_FENCE = (1 << 9), }; /** -- cgit v1.2.3-70-g09d2 From 138f4c359d23d2ec38d18bd70dd9613ae515fe93 Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Tue, 8 Sep 2009 17:42:51 -0700 Subject: dmaengine, async_tx: add a "no channel switch" allocator Channel switching is problematic for some dmaengine drivers as the architecture precludes separating the ->prep from ->submit. In these cases the driver can select ASYNC_TX_DISABLE_CHANNEL_SWITCH to modify the async_tx allocator to only return channels that support all of the required asynchronous operations. For example MD_RAID456=y selects support for asynchronous xor, xor validate, pq, pq validate, and memcpy. When ASYNC_TX_DISABLE_CHANNEL_SWITCH=y any channel with all these capabilities is marked DMA_ASYNC_TX allowing async_tx_find_channel() to quickly locate compatible channels with the guarantee that dependency chains will remain on one channel. When ASYNC_TX_DISABLE_CHANNEL_SWITCH=n async_tx_find_channel() may select channels that lead to operation chains that need to cross channel boundaries using the async_tx channel switch capability. Signed-off-by: Dan Williams --- crypto/async_tx/async_tx.c | 4 ++++ drivers/dma/Kconfig | 4 ++++ drivers/dma/dmaengine.c | 40 ++++++++++++++++++++++++++++++++++++++++ include/linux/dmaengine.h | 10 +++++++++- 4 files changed, 57 insertions(+), 1 deletion(-) (limited to 'include') diff --git a/crypto/async_tx/async_tx.c b/crypto/async_tx/async_tx.c index 60615fedcf5..f9cdf04fe7c 100644 --- a/crypto/async_tx/async_tx.c +++ b/crypto/async_tx/async_tx.c @@ -81,6 +81,10 @@ async_tx_channel_switch(struct dma_async_tx_descriptor *depend_tx, struct dma_device *device = chan->device; struct dma_async_tx_descriptor *intr_tx = (void *) ~0; + #ifdef CONFIG_ASYNC_TX_DISABLE_CHANNEL_SWITCH + BUG(); + #endif + /* first check to see if we can still append to depend_tx */ spin_lock_bh(&depend_tx->lock); if (depend_tx->parent && depend_tx->chan == tx->chan) { diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig index 912a51b5cbd..ddcd9793b25 100644 --- a/drivers/dma/Kconfig +++ b/drivers/dma/Kconfig @@ -17,11 +17,15 @@ if DMADEVICES comment "DMA Devices" +config ASYNC_TX_DISABLE_CHANNEL_SWITCH + bool + config INTEL_IOATDMA tristate "Intel I/OAT DMA support" depends on PCI && X86 select DMA_ENGINE select DCA + select ASYNC_TX_DISABLE_CHANNEL_SWITCH help Enable support for the Intel(R) I/OAT DMA engine present in recent Intel Xeon chipsets. diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c index 96598479eec..d5bc628d207 100644 --- a/drivers/dma/dmaengine.c +++ b/drivers/dma/dmaengine.c @@ -608,6 +608,40 @@ void dmaengine_put(void) } EXPORT_SYMBOL(dmaengine_put); +static bool device_has_all_tx_types(struct dma_device *device) +{ + /* A device that satisfies this test has channels that will never cause + * an async_tx channel switch event as all possible operation types can + * be handled. + */ + #ifdef CONFIG_ASYNC_TX_DMA + if (!dma_has_cap(DMA_INTERRUPT, device->cap_mask)) + return false; + #endif + + #if defined(CONFIG_ASYNC_MEMCPY) || defined(CONFIG_ASYNC_MEMCPY_MODULE) + if (!dma_has_cap(DMA_MEMCPY, device->cap_mask)) + return false; + #endif + + #if defined(CONFIG_ASYNC_MEMSET) || defined(CONFIG_ASYNC_MEMSET_MODULE) + if (!dma_has_cap(DMA_MEMSET, device->cap_mask)) + return false; + #endif + + #if defined(CONFIG_ASYNC_XOR) || defined(CONFIG_ASYNC_XOR_MODULE) + if (!dma_has_cap(DMA_XOR, device->cap_mask)) + return false; + #endif + + #if defined(CONFIG_ASYNC_PQ) || defined(CONFIG_ASYNC_PQ_MODULE) + if (!dma_has_cap(DMA_PQ, device->cap_mask)) + return false; + #endif + + return true; +} + static int get_dma_id(struct dma_device *device) { int rc; @@ -665,6 +699,12 @@ int dma_async_device_register(struct dma_device *device) BUG_ON(!device->device_issue_pending); BUG_ON(!device->dev); + /* note: this only matters in the + * CONFIG_ASYNC_TX_DISABLE_CHANNEL_SWITCH=y case + */ + if (device_has_all_tx_types(device)) + dma_cap_set(DMA_ASYNC_TX, device->cap_mask); + idr_ref = kmalloc(sizeof(*idr_ref), GFP_KERNEL); if (!idr_ref) return -ENOMEM; diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h index 4d6c1c925fd..86853ed7970 100644 --- a/include/linux/dmaengine.h +++ b/include/linux/dmaengine.h @@ -48,6 +48,9 @@ enum dma_status { /** * enum dma_transaction_type - DMA transaction types/indexes + * + * Note: The DMA_ASYNC_TX capability is not to be set by drivers. It is + * automatically set as dma devices are registered. */ enum dma_transaction_type { DMA_MEMCPY, @@ -61,6 +64,7 @@ enum dma_transaction_type { DMA_MEMCPY_CRC32C, DMA_INTERRUPT, DMA_PRIVATE, + DMA_ASYNC_TX, DMA_SLAVE, }; @@ -396,7 +400,11 @@ static inline void net_dmaengine_put(void) #ifdef CONFIG_ASYNC_TX_DMA #define async_dmaengine_get() dmaengine_get() #define async_dmaengine_put() dmaengine_put() +#ifdef CONFIG_ASYNC_TX_DISABLE_CHANNEL_SWITCH +#define async_dma_find_channel(type) dma_find_channel(DMA_ASYNC_TX) +#else #define async_dma_find_channel(type) dma_find_channel(type) +#endif /* CONFIG_ASYNC_TX_DISABLE_CHANNEL_SWITCH */ #else static inline void async_dmaengine_get(void) { @@ -409,7 +417,7 @@ async_dma_find_channel(enum dma_transaction_type type) { return NULL; } -#endif +#endif /* CONFIG_ASYNC_TX_DMA */ dma_cookie_t dma_async_memcpy_buf_to_buf(struct dma_chan *chan, void *dest, void *src, size_t len); -- cgit v1.2.3-70-g09d2 From 9308add6ea4fedeba37b0d7c4630a542bd34f214 Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Tue, 8 Sep 2009 17:42:52 -0700 Subject: dmaengine: cleanup unused transaction types No drivers currently implement these operation types, so they can be deleted. Signed-off-by: Dan Williams --- arch/arm/mach-iop13xx/setup.c | 7 ------- arch/arm/plat-iop/adma.c | 2 -- drivers/dma/iop-adma.c | 5 +---- include/linux/dmaengine.h | 3 --- 4 files changed, 1 insertion(+), 16 deletions(-) (limited to 'include') diff --git a/arch/arm/mach-iop13xx/setup.c b/arch/arm/mach-iop13xx/setup.c index faaef95342b..5c147fb66a0 100644 --- a/arch/arm/mach-iop13xx/setup.c +++ b/arch/arm/mach-iop13xx/setup.c @@ -477,10 +477,8 @@ void __init iop13xx_platform_init(void) plat_data = &iop13xx_adma_0_data; dma_cap_set(DMA_MEMCPY, plat_data->cap_mask); dma_cap_set(DMA_XOR, plat_data->cap_mask); - dma_cap_set(DMA_DUAL_XOR, plat_data->cap_mask); dma_cap_set(DMA_XOR_VAL, plat_data->cap_mask); dma_cap_set(DMA_MEMSET, plat_data->cap_mask); - dma_cap_set(DMA_MEMCPY_CRC32C, plat_data->cap_mask); dma_cap_set(DMA_INTERRUPT, plat_data->cap_mask); break; case IOP13XX_INIT_ADMA_1: @@ -489,10 +487,8 @@ void __init iop13xx_platform_init(void) plat_data = &iop13xx_adma_1_data; dma_cap_set(DMA_MEMCPY, plat_data->cap_mask); dma_cap_set(DMA_XOR, plat_data->cap_mask); - dma_cap_set(DMA_DUAL_XOR, plat_data->cap_mask); dma_cap_set(DMA_XOR_VAL, plat_data->cap_mask); dma_cap_set(DMA_MEMSET, plat_data->cap_mask); - dma_cap_set(DMA_MEMCPY_CRC32C, plat_data->cap_mask); dma_cap_set(DMA_INTERRUPT, plat_data->cap_mask); break; case IOP13XX_INIT_ADMA_2: @@ -501,13 +497,10 @@ void __init iop13xx_platform_init(void) plat_data = &iop13xx_adma_2_data; dma_cap_set(DMA_MEMCPY, plat_data->cap_mask); dma_cap_set(DMA_XOR, plat_data->cap_mask); - dma_cap_set(DMA_DUAL_XOR, plat_data->cap_mask); dma_cap_set(DMA_XOR_VAL, plat_data->cap_mask); dma_cap_set(DMA_MEMSET, plat_data->cap_mask); - dma_cap_set(DMA_MEMCPY_CRC32C, plat_data->cap_mask); dma_cap_set(DMA_INTERRUPT, plat_data->cap_mask); dma_cap_set(DMA_PQ, plat_data->cap_mask); - dma_cap_set(DMA_PQ_UPDATE, plat_data->cap_mask); dma_cap_set(DMA_PQ_VAL, plat_data->cap_mask); break; } diff --git a/arch/arm/plat-iop/adma.c b/arch/arm/plat-iop/adma.c index da1dd0dab07..1ff6a37e893 100644 --- a/arch/arm/plat-iop/adma.c +++ b/arch/arm/plat-iop/adma.c @@ -179,7 +179,6 @@ static int __init iop3xx_adma_cap_init(void) dma_cap_set(DMA_INTERRUPT, iop3xx_dma_0_data.cap_mask); #else dma_cap_set(DMA_MEMCPY, iop3xx_dma_0_data.cap_mask); - dma_cap_set(DMA_MEMCPY_CRC32C, iop3xx_dma_0_data.cap_mask); dma_cap_set(DMA_INTERRUPT, iop3xx_dma_0_data.cap_mask); #endif @@ -188,7 +187,6 @@ static int __init iop3xx_adma_cap_init(void) dma_cap_set(DMA_INTERRUPT, iop3xx_dma_1_data.cap_mask); #else dma_cap_set(DMA_MEMCPY, iop3xx_dma_1_data.cap_mask); - dma_cap_set(DMA_MEMCPY_CRC32C, iop3xx_dma_1_data.cap_mask); dma_cap_set(DMA_INTERRUPT, iop3xx_dma_1_data.cap_mask); #endif diff --git a/drivers/dma/iop-adma.c b/drivers/dma/iop-adma.c index 4496bc60666..cecb6d657d5 100644 --- a/drivers/dma/iop-adma.c +++ b/drivers/dma/iop-adma.c @@ -1256,15 +1256,12 @@ static int __devinit iop_adma_probe(struct platform_device *pdev) } dev_printk(KERN_INFO, &pdev->dev, "Intel(R) IOP: " - "( %s%s%s%s%s%s%s%s%s%s)\n", + "( %s%s%s%s%s%s%s)\n", dma_has_cap(DMA_PQ, dma_dev->cap_mask) ? "pq " : "", - dma_has_cap(DMA_PQ_UPDATE, dma_dev->cap_mask) ? "pq_update " : "", dma_has_cap(DMA_PQ_VAL, dma_dev->cap_mask) ? "pq_val " : "", dma_has_cap(DMA_XOR, dma_dev->cap_mask) ? "xor " : "", - dma_has_cap(DMA_DUAL_XOR, dma_dev->cap_mask) ? "dual_xor " : "", dma_has_cap(DMA_XOR_VAL, dma_dev->cap_mask) ? "xor_val " : "", dma_has_cap(DMA_MEMSET, dma_dev->cap_mask) ? "fill " : "", - dma_has_cap(DMA_MEMCPY_CRC32C, dma_dev->cap_mask) ? "cpy+crc " : "", dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask) ? "cpy " : "", dma_has_cap(DMA_INTERRUPT, dma_dev->cap_mask) ? "intr " : ""); diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h index 86853ed7970..db23fd583f9 100644 --- a/include/linux/dmaengine.h +++ b/include/linux/dmaengine.h @@ -56,12 +56,9 @@ enum dma_transaction_type { DMA_MEMCPY, DMA_XOR, DMA_PQ, - DMA_DUAL_XOR, - DMA_PQ_UPDATE, DMA_XOR_VAL, DMA_PQ_VAL, DMA_MEMSET, - DMA_MEMCPY_CRC32C, DMA_INTERRUPT, DMA_PRIVATE, DMA_ASYNC_TX, -- cgit v1.2.3-70-g09d2 From 83544ae9f3991bfc7d5e0fe9a3008cd05a8d57b7 Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Tue, 8 Sep 2009 17:42:53 -0700 Subject: dmaengine, async_tx: support alignment checks Some engines have transfer size and address alignment restrictions. Add a per-operation alignment property to struct dma_device that the async routines and dmatest can use to check alignment capabilities. Signed-off-by: Dan Williams --- crypto/async_tx/async_memcpy.c | 2 +- crypto/async_tx/async_memset.c | 2 +- crypto/async_tx/async_pq.c | 6 ++++-- crypto/async_tx/async_xor.c | 5 +++-- drivers/dma/dmatest.c | 14 ++++++++++++++ include/linux/dmaengine.h | 44 ++++++++++++++++++++++++++++++++++++++++++ 6 files changed, 67 insertions(+), 6 deletions(-) (limited to 'include') diff --git a/crypto/async_tx/async_memcpy.c b/crypto/async_tx/async_memcpy.c index b38cbb3fd52..0ec1fb69d4e 100644 --- a/crypto/async_tx/async_memcpy.c +++ b/crypto/async_tx/async_memcpy.c @@ -50,7 +50,7 @@ async_memcpy(struct page *dest, struct page *src, unsigned int dest_offset, struct dma_device *device = chan ? chan->device : NULL; struct dma_async_tx_descriptor *tx = NULL; - if (device) { + if (device && is_dma_copy_aligned(device, src_offset, dest_offset, len)) { dma_addr_t dma_dest, dma_src; unsigned long dma_prep_flags = 0; diff --git a/crypto/async_tx/async_memset.c b/crypto/async_tx/async_memset.c index a374784e332..58e4a8752ae 100644 --- a/crypto/async_tx/async_memset.c +++ b/crypto/async_tx/async_memset.c @@ -47,7 +47,7 @@ async_memset(struct page *dest, int val, unsigned int offset, size_t len, struct dma_device *device = chan ? chan->device : NULL; struct dma_async_tx_descriptor *tx = NULL; - if (device) { + if (device && is_dma_fill_aligned(device, offset, 0, len)) { dma_addr_t dma_dest; unsigned long dma_prep_flags = 0; diff --git a/crypto/async_tx/async_pq.c b/crypto/async_tx/async_pq.c index a25e290c39f..b88db6d1dc6 100644 --- a/crypto/async_tx/async_pq.c +++ b/crypto/async_tx/async_pq.c @@ -211,7 +211,8 @@ async_gen_syndrome(struct page **blocks, unsigned int offset, int disks, if (dma_src && device && (src_cnt <= dma_maxpq(device, 0) || - dma_maxpq(device, DMA_PREP_CONTINUE) > 0)) { + dma_maxpq(device, DMA_PREP_CONTINUE) > 0) && + is_dma_pq_aligned(device, offset, 0, len)) { /* run the p+q asynchronously */ pr_debug("%s: (async) disks: %d len: %zu\n", __func__, disks, len); @@ -274,7 +275,8 @@ async_syndrome_val(struct page **blocks, unsigned int offset, int disks, else if (sizeof(dma_addr_t) <= sizeof(struct page *)) dma_src = (dma_addr_t *) blocks; - if (dma_src && device && disks <= dma_maxpq(device, 0)) { + if (dma_src && device && disks <= dma_maxpq(device, 0) && + is_dma_pq_aligned(device, offset, 0, len)) { struct device *dev = device->dev; dma_addr_t *pq = &dma_src[disks-2]; int i; diff --git a/crypto/async_tx/async_xor.c b/crypto/async_tx/async_xor.c index db279872ef3..b459a9034aa 100644 --- a/crypto/async_tx/async_xor.c +++ b/crypto/async_tx/async_xor.c @@ -193,7 +193,7 @@ async_xor(struct page *dest, struct page **src_list, unsigned int offset, else if (sizeof(dma_addr_t) <= sizeof(struct page *)) dma_src = (dma_addr_t *) src_list; - if (dma_src && chan) { + if (dma_src && chan && is_dma_xor_aligned(chan->device, offset, 0, len)) { /* run the xor asynchronously */ pr_debug("%s (async): len: %zu\n", __func__, len); @@ -265,7 +265,8 @@ async_xor_val(struct page *dest, struct page **src_list, unsigned int offset, else if (sizeof(dma_addr_t) <= sizeof(struct page *)) dma_src = (dma_addr_t *) src_list; - if (dma_src && device && src_cnt <= device->max_xor) { + if (dma_src && device && src_cnt <= device->max_xor && + is_dma_xor_aligned(device, offset, 0, len)) { unsigned long dma_prep_flags = 0; int i; diff --git a/drivers/dma/dmatest.c b/drivers/dma/dmatest.c index 58e49e41c7a..a3722a7384b 100644 --- a/drivers/dma/dmatest.c +++ b/drivers/dma/dmatest.c @@ -288,6 +288,7 @@ static int dmatest_func(void *data) dma_addr_t dma_dsts[dst_cnt]; struct completion cmp; unsigned long tmo = msecs_to_jiffies(3000); + u8 align = 0; total_tests++; @@ -295,6 +296,18 @@ static int dmatest_func(void *data) src_off = dmatest_random() % (test_buf_size - len + 1); dst_off = dmatest_random() % (test_buf_size - len + 1); + /* honor alignment restrictions */ + if (thread->type == DMA_MEMCPY) + align = dev->copy_align; + else if (thread->type == DMA_XOR) + align = dev->xor_align; + else if (thread->type == DMA_PQ) + align = dev->pq_align; + + len = (len >> align) << align; + src_off = (src_off >> align) << align; + dst_off = (dst_off >> align) << align; + dmatest_init_srcs(thread->srcs, src_off, len); dmatest_init_dsts(thread->dsts, dst_off, len); @@ -311,6 +324,7 @@ static int dmatest_func(void *data) DMA_BIDIRECTIONAL); } + if (thread->type == DMA_MEMCPY) tx = dev->device_prep_dma_memcpy(chan, dma_dsts[0] + dst_off, diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h index db23fd583f9..835b9c7bf1c 100644 --- a/include/linux/dmaengine.h +++ b/include/linux/dmaengine.h @@ -245,6 +245,10 @@ struct dma_async_tx_descriptor { * @cap_mask: one or more dma_capability flags * @max_xor: maximum number of xor sources, 0 if no capability * @max_pq: maximum number of PQ sources and PQ-continue capability + * @copy_align: alignment shift for memcpy operations + * @xor_align: alignment shift for xor operations + * @pq_align: alignment shift for pq operations + * @fill_align: alignment shift for memset operations * @dev_id: unique device ID * @dev: struct device reference for dma mapping api * @device_alloc_chan_resources: allocate resources and return the @@ -271,6 +275,10 @@ struct dma_device { dma_cap_mask_t cap_mask; unsigned short max_xor; unsigned short max_pq; + u8 copy_align; + u8 xor_align; + u8 pq_align; + u8 fill_align; #define DMA_HAS_PQ_CONTINUE (1 << 15) int dev_id; @@ -314,6 +322,42 @@ struct dma_device { void (*device_issue_pending)(struct dma_chan *chan); }; +static inline bool dmaengine_check_align(u8 align, size_t off1, size_t off2, size_t len) +{ + size_t mask; + + if (!align) + return true; + mask = (1 << align) - 1; + if (mask & (off1 | off2 | len)) + return false; + return true; +} + +static inline bool is_dma_copy_aligned(struct dma_device *dev, size_t off1, + size_t off2, size_t len) +{ + return dmaengine_check_align(dev->copy_align, off1, off2, len); +} + +static inline bool is_dma_xor_aligned(struct dma_device *dev, size_t off1, + size_t off2, size_t len) +{ + return dmaengine_check_align(dev->xor_align, off1, off2, len); +} + +static inline bool is_dma_pq_aligned(struct dma_device *dev, size_t off1, + size_t off2, size_t len) +{ + return dmaengine_check_align(dev->pq_align, off1, off2, len); +} + +static inline bool is_dma_fill_aligned(struct dma_device *dev, size_t off1, + size_t off2, size_t len) +{ + return dmaengine_check_align(dev->fill_align, off1, off2, len); +} + static inline void dma_set_maxpq(struct dma_device *dma, int maxpq, int has_pq_continue) { -- cgit v1.2.3-70-g09d2 From b265b11fc1a0bd6ae5a7fde12e374583a52ab326 Mon Sep 17 00:00:00 2001 From: Tom Picard Date: Tue, 8 Sep 2009 17:43:01 -0700 Subject: ioat3: ioat3.2 pci ids for Jasper Forest Jasper Forest introduces raid offload support via ioat3.2 support. When raid offload is enabled two (out of 8 channels) will report raid5/raid6 offload capabilities. The remaining channels will only report ioat3.0 capabilities (memcpy). Signed-off-by: Tom Picard Signed-off-by: Dan Williams --- drivers/dma/ioat/pci.c | 13 +++++++++++++ include/linux/pci_ids.h | 10 ++++++++++ 2 files changed, 23 insertions(+) (limited to 'include') diff --git a/drivers/dma/ioat/pci.c b/drivers/dma/ioat/pci.c index 626a508a4f8..6c1aac5b359 100644 --- a/drivers/dma/ioat/pci.c +++ b/drivers/dma/ioat/pci.c @@ -58,6 +58,19 @@ static struct pci_device_id ioat_pci_tbl[] = { { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_TBG5) }, { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_TBG6) }, { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_TBG7) }, + + /* I/OAT v3.2 platforms */ + { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_JSF0) }, + { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_JSF1) }, + { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_JSF2) }, + { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_JSF3) }, + { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_JSF4) }, + { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_JSF5) }, + { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_JSF6) }, + { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_JSF7) }, + { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_JSF8) }, + { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_JSF9) }, + { 0, } }; diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h index 0f71812d67d..2b4b8ce5325 100644 --- a/include/linux/pci_ids.h +++ b/include/linux/pci_ids.h @@ -2529,6 +2529,16 @@ #define PCI_DEVICE_ID_INTEL_E7525_MCH 0x359e #define PCI_DEVICE_ID_INTEL_IOAT_CNB 0x360b #define PCI_DEVICE_ID_INTEL_FBD_CNB 0x360c +#define PCI_DEVICE_ID_INTEL_IOAT_JSF0 0x3710 +#define PCI_DEVICE_ID_INTEL_IOAT_JSF1 0x3711 +#define PCI_DEVICE_ID_INTEL_IOAT_JSF2 0x3712 +#define PCI_DEVICE_ID_INTEL_IOAT_JSF3 0x3713 +#define PCI_DEVICE_ID_INTEL_IOAT_JSF4 0x3714 +#define PCI_DEVICE_ID_INTEL_IOAT_JSF5 0x3715 +#define PCI_DEVICE_ID_INTEL_IOAT_JSF6 0x3716 +#define PCI_DEVICE_ID_INTEL_IOAT_JSF7 0x3717 +#define PCI_DEVICE_ID_INTEL_IOAT_JSF8 0x3718 +#define PCI_DEVICE_ID_INTEL_IOAT_JSF9 0x3719 #define PCI_DEVICE_ID_INTEL_ICH10_0 0x3a14 #define PCI_DEVICE_ID_INTEL_ICH10_1 0x3a16 #define PCI_DEVICE_ID_INTEL_ICH10_2 0x3a18 -- cgit v1.2.3-70-g09d2 From 0803172778901e24a75ab074798d98c2b7411559 Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Tue, 8 Sep 2009 17:53:04 -0700 Subject: dmaengine: kill tx_list The tx_list attribute of struct dma_async_tx_descriptor is common to most, but not all dma driver implementations. None of the upper level code (dmaengine/async_tx) uses it, so allow drivers to implement it locally if they need it. This saves sizeof(struct list_head) bytes for drivers that do not manage descriptors with a linked list (e.g.: ioatdma v2,3). Signed-off-by: Dan Williams --- drivers/dma/dmaengine.c | 1 - include/linux/dmaengine.h | 3 --- 2 files changed, 4 deletions(-) (limited to 'include') diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c index 5a87384ea4f..562d182eae6 100644 --- a/drivers/dma/dmaengine.c +++ b/drivers/dma/dmaengine.c @@ -933,7 +933,6 @@ void dma_async_tx_descriptor_init(struct dma_async_tx_descriptor *tx, { tx->chan = chan; spin_lock_init(&tx->lock); - INIT_LIST_HEAD(&tx->tx_list); } EXPORT_SYMBOL(dma_async_tx_descriptor_init); diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h index ffefba81c81..f114bc7790b 100644 --- a/include/linux/dmaengine.h +++ b/include/linux/dmaengine.h @@ -180,8 +180,6 @@ typedef void (*dma_async_tx_callback)(void *dma_async_param); * @flags: flags to augment operation preparation, control completion, and * communicate status * @phys: physical address of the descriptor - * @tx_list: driver common field for operations that require multiple - * descriptors * @chan: target channel for this operation * @tx_submit: set the prepared descriptor(s) to be executed by the engine * @callback: routine to call after this operation is complete @@ -195,7 +193,6 @@ struct dma_async_tx_descriptor { dma_cookie_t cookie; enum dma_ctrl_flags flags; /* not a 'long' to pack with cookie */ dma_addr_t phys; - struct list_head tx_list; struct dma_chan *chan; dma_cookie_t (*tx_submit)(struct dma_async_tx_descriptor *tx); dma_async_tx_callback callback; -- cgit v1.2.3-70-g09d2 From c9766237afa92e8d7f27bbcd4964f1b43fa0bce8 Mon Sep 17 00:00:00 2001 From: Bob Moore Date: Fri, 4 Sep 2009 08:56:17 +0800 Subject: ACPICA: Update version to 20090903. Version 20090903. Signed-off-by: Bob Moore Signed-off-by: Lin Ming Signed-off-by: Len Brown --- include/acpi/acpixf.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include') diff --git a/include/acpi/acpixf.h b/include/acpi/acpixf.h index f3b358b7432..e723b0fd8e4 100644 --- a/include/acpi/acpixf.h +++ b/include/acpi/acpixf.h @@ -47,7 +47,7 @@ /* Current ACPICA subsystem version in YYYYMMDD format */ -#define ACPI_CA_VERSION 0x20090730 +#define ACPI_CA_VERSION 0x20090903 #include "actypes.h" #include "actbl.h" -- cgit v1.2.3-70-g09d2 From 7839c5d5519b6d9e2ccf3cdbf1c39e3817ad0835 Mon Sep 17 00:00:00 2001 From: Fabian Henze Date: Tue, 8 Sep 2009 00:59:59 +0800 Subject: drm/i915: add B43 chipset support Signed-off-by: Fabian Henze Signed-off-by: Zhenyu Wang Signed-off-by: Eric Anholt --- drivers/gpu/drm/i915/i915_drv.h | 2 ++ include/drm/drm_pciids.h | 1 + 2 files changed, 3 insertions(+) (limited to 'include') diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 77ed060b429..e5f20e40ac5 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -863,6 +863,7 @@ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller); (dev)->pci_device == 0x2E12 || \ (dev)->pci_device == 0x2E22 || \ (dev)->pci_device == 0x2E32 || \ + (dev)->pci_device == 0x2E42 || \ (dev)->pci_device == 0x0042 || \ (dev)->pci_device == 0x0046) @@ -875,6 +876,7 @@ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller); (dev)->pci_device == 0x2E12 || \ (dev)->pci_device == 0x2E22 || \ (dev)->pci_device == 0x2E32 || \ + (dev)->pci_device == 0x2E42 || \ IS_GM45(dev)) #define IS_IGDG(dev) ((dev)->pci_device == 0xa001) diff --git a/include/drm/drm_pciids.h b/include/drm/drm_pciids.h index 853508499d2..3f6e545609b 100644 --- a/include/drm/drm_pciids.h +++ b/include/drm/drm_pciids.h @@ -552,6 +552,7 @@ {0x8086, 0x2e12, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, 0xffff00, 0}, \ {0x8086, 0x2e22, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, 0xffff00, 0}, \ {0x8086, 0x2e32, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, 0xffff00, 0}, \ + {0x8086, 0x2e42, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, 0xffff00, 0}, \ {0x8086, 0xa001, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, 0xffff00, 0}, \ {0x8086, 0xa011, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, 0xffff00, 0}, \ {0x8086, 0x35e8, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, 0xffff00, 0}, \ -- cgit v1.2.3-70-g09d2 From 1a5aeeecd550ee4344cfba1791f1134739b16dc6 Mon Sep 17 00:00:00 2001 From: Maciej Sosnowski Date: Thu, 10 Sep 2009 15:05:58 +0200 Subject: dca: registering requesters in multiple dca domains This patch enables DCA support on multiple-IOH/multiple-IIO architectures. It modifies dca module by replacing single dca_providers list with dca_domains list, each domain containing separate list of providers. This approach lets dca driver manage multiple domains, i.e. sets of providers and requesters mapped back to the same PCI root complex device. The driver takes care to register each requester to a provider from the same domain. Signed-off-by: Dan Williams Signed-off-by: Maciej Sosnowski --- drivers/dca/dca-core.c | 122 +++++++++++++++++++++++++++++++++++++++++++------ drivers/dma/ioat/pci.c | 2 +- include/linux/dca.h | 11 ++++- 3 files changed, 120 insertions(+), 15 deletions(-) (limited to 'include') diff --git a/drivers/dca/dca-core.c b/drivers/dca/dca-core.c index 25b743abfb5..7e318de0904 100644 --- a/drivers/dca/dca-core.c +++ b/drivers/dca/dca-core.c @@ -28,7 +28,7 @@ #include #include -#define DCA_VERSION "1.8" +#define DCA_VERSION "1.12.1" MODULE_VERSION(DCA_VERSION); MODULE_LICENSE("GPL"); @@ -36,20 +36,92 @@ MODULE_AUTHOR("Intel Corporation"); static DEFINE_SPINLOCK(dca_lock); -static LIST_HEAD(dca_providers); +static LIST_HEAD(dca_domains); -static struct dca_provider *dca_find_provider_by_dev(struct device *dev) +static struct pci_bus *dca_pci_rc_from_dev(struct device *dev) { - struct dca_provider *dca, *ret = NULL; + struct pci_dev *pdev = to_pci_dev(dev); + struct pci_bus *bus = pdev->bus; - list_for_each_entry(dca, &dca_providers, node) { - if ((!dev) || (dca->ops->dev_managed(dca, dev))) { - ret = dca; - break; - } + while (bus->parent) + bus = bus->parent; + + return bus; +} + +static struct dca_domain *dca_allocate_domain(struct pci_bus *rc) +{ + struct dca_domain *domain; + + domain = kzalloc(sizeof(*domain), GFP_NOWAIT); + if (!domain) + return NULL; + + INIT_LIST_HEAD(&domain->dca_providers); + domain->pci_rc = rc; + + return domain; +} + +static void dca_free_domain(struct dca_domain *domain) +{ + list_del(&domain->node); + kfree(domain); +} + +static struct dca_domain *dca_find_domain(struct pci_bus *rc) +{ + struct dca_domain *domain; + + list_for_each_entry(domain, &dca_domains, node) + if (domain->pci_rc == rc) + return domain; + + return NULL; +} + +static struct dca_domain *dca_get_domain(struct device *dev) +{ + struct pci_bus *rc; + struct dca_domain *domain; + + rc = dca_pci_rc_from_dev(dev); + domain = dca_find_domain(rc); + + if (!domain) { + domain = dca_allocate_domain(rc); + if (domain) + list_add(&domain->node, &dca_domains); + } + + return domain; +} + +static struct dca_provider *dca_find_provider_by_dev(struct device *dev) +{ + struct dca_provider *dca; + struct pci_bus *rc; + struct dca_domain *domain; + + if (dev) { + rc = dca_pci_rc_from_dev(dev); + domain = dca_find_domain(rc); + if (!domain) + return NULL; + } else { + if (!list_empty(&dca_domains)) + domain = list_first_entry(&dca_domains, + struct dca_domain, + node); + else + return NULL; } - return ret; + list_for_each_entry(dca, &domain->dca_providers, node) + if ((!dev) || (dca->ops->dev_managed(dca, dev))) + return dca; + + return NULL; } /** @@ -61,6 +133,8 @@ int dca_add_requester(struct device *dev) struct dca_provider *dca; int err, slot = -ENODEV; unsigned long flags; + struct pci_bus *pci_rc; + struct dca_domain *domain; if (!dev) return -EFAULT; @@ -74,7 +148,14 @@ int dca_add_requester(struct device *dev) return -EEXIST; } - list_for_each_entry(dca, &dca_providers, node) { + pci_rc = dca_pci_rc_from_dev(dev); + domain = dca_find_domain(pci_rc); + if (!domain) { + spin_unlock_irqrestore(&dca_lock, flags); + return -ENODEV; + } + + list_for_each_entry(dca, &domain->dca_providers, node) { slot = dca->ops->add_requester(dca, dev); if (slot >= 0) break; @@ -222,13 +303,19 @@ int register_dca_provider(struct dca_provider *dca, struct device *dev) { int err; unsigned long flags; + struct dca_domain *domain; err = dca_sysfs_add_provider(dca, dev); if (err) return err; spin_lock_irqsave(&dca_lock, flags); - list_add(&dca->node, &dca_providers); + domain = dca_get_domain(dev); + if (!domain) { + spin_unlock_irqrestore(&dca_lock, flags); + return -ENODEV; + } + list_add(&dca->node, &domain->dca_providers); spin_unlock_irqrestore(&dca_lock, flags); blocking_notifier_call_chain(&dca_provider_chain, @@ -241,15 +328,24 @@ EXPORT_SYMBOL_GPL(register_dca_provider); * unregister_dca_provider - remove a dca provider * @dca - struct created by alloc_dca_provider() */ -void unregister_dca_provider(struct dca_provider *dca) +void unregister_dca_provider(struct dca_provider *dca, struct device *dev) { unsigned long flags; + struct pci_bus *pci_rc; + struct dca_domain *domain; blocking_notifier_call_chain(&dca_provider_chain, DCA_PROVIDER_REMOVE, NULL); spin_lock_irqsave(&dca_lock, flags); + list_del(&dca->node); + + pci_rc = dca_pci_rc_from_dev(dev); + domain = dca_find_domain(pci_rc); + if (list_empty(&domain->dca_providers)) + dca_free_domain(domain); + spin_unlock_irqrestore(&dca_lock, flags); dca_sysfs_remove_provider(dca); diff --git a/drivers/dma/ioat/pci.c b/drivers/dma/ioat/pci.c index c788fa26647..d545fae30f3 100644 --- a/drivers/dma/ioat/pci.c +++ b/drivers/dma/ioat/pci.c @@ -175,7 +175,7 @@ static void __devexit ioat_remove(struct pci_dev *pdev) dev_err(&pdev->dev, "Removing dma and dca services\n"); if (device->dca) { - unregister_dca_provider(device->dca); + unregister_dca_provider(device->dca, &pdev->dev); free_dca_provider(device->dca); device->dca = NULL; } diff --git a/include/linux/dca.h b/include/linux/dca.h index 9c20c7e87d0..d27a7a05718 100644 --- a/include/linux/dca.h +++ b/include/linux/dca.h @@ -20,6 +20,9 @@ */ #ifndef DCA_H #define DCA_H + +#include + /* DCA Provider API */ /* DCA Notifier Interface */ @@ -36,6 +39,12 @@ struct dca_provider { int id; }; +struct dca_domain { + struct list_head node; + struct list_head dca_providers; + struct pci_bus *pci_rc; +}; + struct dca_ops { int (*add_requester) (struct dca_provider *, struct device *); int (*remove_requester) (struct dca_provider *, struct device *); @@ -47,7 +56,7 @@ struct dca_ops { struct dca_provider *alloc_dca_provider(struct dca_ops *ops, int priv_size); void free_dca_provider(struct dca_provider *dca); int register_dca_provider(struct dca_provider *dca, struct device *dev); -void unregister_dca_provider(struct dca_provider *dca); +void unregister_dca_provider(struct dca_provider *dca, struct device *dev); static inline void *dca_priv(struct dca_provider *dca) { -- cgit v1.2.3-70-g09d2 From 7e12715ecc47a8a59154afe2746e48998225bb69 Mon Sep 17 00:00:00 2001 From: Jesse Barnes Date: Thu, 10 Sep 2009 15:28:02 -0700 Subject: ACPI button: provide lid status functions Some drivers need to know when a lid event occurs and get the current status. This can be useful for when a platform firmware clobbers some hardware state at lid time, and a driver needs to restore things when the lid is opened again. Acked-by: Matthew Garrett Signed-off-by: Jesse Barnes Signed-off-by: Eric Anholt --- drivers/acpi/button.c | 45 +++++++++++++++++++++++++++++++++++++++++++-- include/acpi/button.h | 10 ++++++++++ 2 files changed, 53 insertions(+), 2 deletions(-) create mode 100644 include/acpi/button.h (limited to 'include') diff --git a/drivers/acpi/button.c b/drivers/acpi/button.c index 9195deba9d9..ebb593e9c38 100644 --- a/drivers/acpi/button.c +++ b/drivers/acpi/button.c @@ -113,6 +113,9 @@ static const struct file_operations acpi_button_state_fops = { .release = single_release, }; +static BLOCKING_NOTIFIER_HEAD(acpi_lid_notifier); +static struct acpi_device *lid_device; + /* -------------------------------------------------------------------------- FS Interface (/proc) -------------------------------------------------------------------------- */ @@ -229,11 +232,38 @@ static int acpi_button_remove_fs(struct acpi_device *device) /* -------------------------------------------------------------------------- Driver Interface -------------------------------------------------------------------------- */ +int acpi_lid_notifier_register(struct notifier_block *nb) +{ + return blocking_notifier_chain_register(&acpi_lid_notifier, nb); +} +EXPORT_SYMBOL(acpi_lid_notifier_register); + +int acpi_lid_notifier_unregister(struct notifier_block *nb) +{ + return blocking_notifier_chain_unregister(&acpi_lid_notifier, nb); +} +EXPORT_SYMBOL(acpi_lid_notifier_unregister); + +int acpi_lid_open(void) +{ + acpi_status status; + unsigned long long state; + + status = acpi_evaluate_integer(lid_device->handle, "_LID", NULL, + &state); + if (ACPI_FAILURE(status)) + return -ENODEV; + + return !!state; +} +EXPORT_SYMBOL(acpi_lid_open); + static int acpi_lid_send_state(struct acpi_device *device) { struct acpi_button *button = acpi_driver_data(device); unsigned long long state; acpi_status status; + int ret; status = acpi_evaluate_integer(device->handle, "_LID", NULL, &state); if (ACPI_FAILURE(status)) @@ -242,7 +272,12 @@ static int acpi_lid_send_state(struct acpi_device *device) /* input layer checks if event is redundant */ input_report_switch(button->input, SW_LID, !state); input_sync(button->input); - return 0; + + ret = blocking_notifier_call_chain(&acpi_lid_notifier, state, device); + if (ret == NOTIFY_DONE) + ret = blocking_notifier_call_chain(&acpi_lid_notifier, state, + device); + return ret; } static void acpi_button_notify(struct acpi_device *device, u32 event) @@ -364,8 +399,14 @@ static int acpi_button_add(struct acpi_device *device) error = input_register_device(input); if (error) goto err_remove_fs; - if (button->type == ACPI_BUTTON_TYPE_LID) + if (button->type == ACPI_BUTTON_TYPE_LID) { acpi_lid_send_state(device); + /* + * This assumes there's only one lid device, or if there are + * more we only care about the last one... + */ + lid_device = device; + } if (device->wakeup.flags.valid) { /* Button's GPE is run-wake GPE */ diff --git a/include/acpi/button.h b/include/acpi/button.h new file mode 100644 index 00000000000..bb643a79d65 --- /dev/null +++ b/include/acpi/button.h @@ -0,0 +1,10 @@ +#ifndef ACPI_BUTTON_H +#define ACPI_BUTTON_H + +#include + +extern int acpi_lid_notifier_register(struct notifier_block *nb); +extern int acpi_lid_notifier_unregister(struct notifier_block *nb); +extern int acpi_lid_open(void); + +#endif /* ACPI_BUTTON_H */ -- cgit v1.2.3-70-g09d2 From 074835f0143b83845af5044af2739c52c9f53808 Mon Sep 17 00:00:00 2001 From: Youquan Song Date: Wed, 9 Sep 2009 12:05:39 -0400 Subject: intel-iommu: Fix kernel hang if interrupt remapping disabled in BIOS BIOS clear DMAR table INTR_REMAP flag to disable interrupt remapping. Current kernel only check interrupt remapping(IR) flag in DRHD's extended capability register to decide interrupt remapping support or not. But IR flag will not change when BIOS disable/enable interrupt remapping. When user disable interrupt remapping in BIOS or BIOS often defaultly disable interrupt remapping feature when BIOS is not mature.Though BIOS disable interrupt remapping but intr_remapping_supported function will always report to OS support interrupt remapping if VT-d2 chipset populated. On this cases, kernel will continue enable interrupt remapping and result kernel panic. This bug exist on almost all platforms with interrupt remapping support. This patch add DMAR table INTR_REMAP flag check before enable interrupt remapping. Signed-off-by: Youquan Song Signed-off-by: David Woodhouse --- drivers/pci/dmar.c | 10 ++++++++++ drivers/pci/intr_remapping.c | 3 +++ include/linux/intel-iommu.h | 2 ++ 3 files changed, 15 insertions(+) (limited to 'include') diff --git a/drivers/pci/dmar.c b/drivers/pci/dmar.c index fba4f689168..270ed222a07 100644 --- a/drivers/pci/dmar.c +++ b/drivers/pci/dmar.c @@ -1316,3 +1316,13 @@ int dmar_reenable_qi(struct intel_iommu *iommu) return 0; } + +/* + * Check interrupt remapping support in DMAR table description. + */ +int dmar_ir_support(void) +{ + struct acpi_table_dmar *dmar; + dmar = (struct acpi_table_dmar *)dmar_tbl; + return dmar->flags & 0x1; +} diff --git a/drivers/pci/intr_remapping.c b/drivers/pci/intr_remapping.c index ebfa47b79c5..ac065144c01 100644 --- a/drivers/pci/intr_remapping.c +++ b/drivers/pci/intr_remapping.c @@ -611,6 +611,9 @@ int __init intr_remapping_supported(void) if (disable_intremap) return 0; + if (!dmar_ir_support()) + return 0; + for_each_drhd_unit(drhd) { struct intel_iommu *iommu = drhd->iommu; diff --git a/include/linux/intel-iommu.h b/include/linux/intel-iommu.h index 482dc91fd53..4f0a72a9740 100644 --- a/include/linux/intel-iommu.h +++ b/include/linux/intel-iommu.h @@ -360,4 +360,6 @@ extern void qi_flush_dev_iotlb(struct intel_iommu *iommu, u16 sid, u16 qdep, extern int qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu); +extern int dmar_ir_support(void); + #endif -- cgit v1.2.3-70-g09d2 From 52a7a1cec88acdaf3f8b36a6b1fe904f6eca7ee5 Mon Sep 17 00:00:00 2001 From: Daniel Mack Date: Thu, 10 Sep 2009 15:26:30 +0200 Subject: [ARM] pxafb: add accelerator ID for PXA3xx GCU Add ID 99 for PXA3xx frame buffers and report it in the pxa frame buffer conditionally, depending on a new flag in struct pxafb_mach_info. Signed-off-by: Daniel Mack Cc: linux-fbdev-devel@lists.sourceforge.net Cc: Dennis Oliver Kropp Cc: Sven Neumann Cc: Guennadi Liakhovetski Signed-off-by: Eric Miao --- arch/arm/mach-pxa/include/mach/pxafb.h | 3 ++- drivers/video/pxafb.c | 3 +++ include/linux/fb.h | 1 + 3 files changed, 6 insertions(+), 1 deletion(-) (limited to 'include') diff --git a/arch/arm/mach-pxa/include/mach/pxafb.h b/arch/arm/mach-pxa/include/mach/pxafb.h index 6932720ba04..f73061c90b5 100644 --- a/arch/arm/mach-pxa/include/mach/pxafb.h +++ b/arch/arm/mach-pxa/include/mach/pxafb.h @@ -118,7 +118,8 @@ struct pxafb_mach_info { u_int fixed_modes:1, cmap_inverse:1, cmap_static:1, - unused:29; + acceleration_enabled:1, + unused:28; /* The following should be defined in LCCR0 * LCCR0_Act or LCCR0_Pas Active or Passive diff --git a/drivers/video/pxafb.c b/drivers/video/pxafb.c index 3a002a634ec..1820c4a2443 100644 --- a/drivers/video/pxafb.c +++ b/drivers/video/pxafb.c @@ -2083,6 +2083,9 @@ static int __devinit pxafb_probe(struct platform_device *dev) goto failed; } + if (cpu_is_pxa3xx() && inf->acceleration_enabled) + fbi->fb.fix.accel = FB_ACCEL_PXA3XX; + fbi->backlight_power = inf->pxafb_backlight_power; fbi->lcd_power = inf->pxafb_lcd_power; diff --git a/include/linux/fb.h b/include/linux/fb.h index f847df9e99b..a34bdf5a9d2 100644 --- a/include/linux/fb.h +++ b/include/linux/fb.h @@ -133,6 +133,7 @@ struct dentry; #define FB_ACCEL_NEOMAGIC_NM2230 96 /* NeoMagic NM2230 */ #define FB_ACCEL_NEOMAGIC_NM2360 97 /* NeoMagic NM2360 */ #define FB_ACCEL_NEOMAGIC_NM2380 98 /* NeoMagic NM2380 */ +#define FB_ACCEL_PXA3XX 99 /* PXA3xx */ #define FB_ACCEL_SAVAGE4 0x80 /* S3 Savage4 */ #define FB_ACCEL_SAVAGE3D 0x81 /* S3 Savage3D */ -- cgit v1.2.3-70-g09d2 From 4cfc7e6019caa3e97d2a81c48c8d575d7b38d751 Mon Sep 17 00:00:00 2001 From: Rahul Iyer Date: Thu, 10 Sep 2009 17:32:28 +0300 Subject: nfsd41: sunrpc: Added rpc server-side backchannel handling When the call direction is a reply, copy the xid and call direction into the req->rq_private_buf.head[0].iov_base otherwise rpc_verify_header returns rpc_garbage. Signed-off-by: Rahul Iyer Signed-off-by: Mike Sager Signed-off-by: Marc Eshel Signed-off-by: Benny Halevy Signed-off-by: Ricardo Labiaga Signed-off-by: Andy Adamson Signed-off-by: Benny Halevy [get rid of CONFIG_NFSD_V4_1] [sunrpc: refactoring of svc_tcp_recvfrom] [nfsd41: sunrpc: create common send routine for the fore and the back channels] [nfsd41: sunrpc: Use free_page() to free server backchannel pages] [nfsd41: sunrpc: Document server backchannel locking] [nfsd41: sunrpc: remove bc_connect_worker()] [nfsd41: sunrpc: Define xprt_server_backchannel()[ [nfsd41: sunrpc: remove bc_close and bc_init_auto_disconnect dummy functions] [nfsd41: sunrpc: eliminate unneeded switch statement in xs_setup_tcp()] [nfsd41: sunrpc: Don't auto close the server backchannel connection] [nfsd41: sunrpc: Remove unused functions] Signed-off-by: Alexandros Batsakis Signed-off-by: Ricardo Labiaga Signed-off-by: Benny Halevy [nfsd41: change bc_sock to bc_xprt] [nfsd41: sunrpc: move struct rpc_buffer def into a common header file] [nfsd41: sunrpc: use rpc_sleep in bc_send_request so not to block on mutex] [removed cosmetic changes] Signed-off-by: Benny Halevy [sunrpc: add new xprt class for nfsv4.1 backchannel] [sunrpc: v2.1 change handling of auto_close and init_auto_disconnect operations for the nfsv4.1 backchannel] Signed-off-by: Alexandros Batsakis [reverted more cosmetic leftovers] [got rid of xprt_server_backchannel] [separated "nfsd41: sunrpc: add new xprt class for nfsv4.1 backchannel"] Signed-off-by: Benny Halevy Cc: Trond Myklebust [sunrpc: change idle timeout value for the backchannel] Signed-off-by: Alexandros Batsakis Signed-off-by: Benny Halevy Acked-by: Trond Myklebust Signed-off-by: J. Bruce Fields --- include/linux/sunrpc/svc_xprt.h | 1 + include/linux/sunrpc/svcsock.h | 1 + include/linux/sunrpc/xprt.h | 1 + net/sunrpc/sunrpc.h | 4 + net/sunrpc/svc_xprt.c | 2 + net/sunrpc/svcsock.c | 172 ++++++++++++++++++++++++++++++++-------- net/sunrpc/xprt.c | 15 +++- net/sunrpc/xprtsock.c | 146 ++++++++++++++++++++++++++++++++++ 8 files changed, 303 insertions(+), 39 deletions(-) (limited to 'include') diff --git a/include/linux/sunrpc/svc_xprt.h b/include/linux/sunrpc/svc_xprt.h index 2223ae0b5ed..5f4e18b3ce7 100644 --- a/include/linux/sunrpc/svc_xprt.h +++ b/include/linux/sunrpc/svc_xprt.h @@ -65,6 +65,7 @@ struct svc_xprt { size_t xpt_locallen; /* length of address */ struct sockaddr_storage xpt_remote; /* remote peer's address */ size_t xpt_remotelen; /* length of address */ + struct rpc_wait_queue xpt_bc_pending; /* backchannel wait queue */ }; int svc_reg_xprt_class(struct svc_xprt_class *); diff --git a/include/linux/sunrpc/svcsock.h b/include/linux/sunrpc/svcsock.h index 04dba23c59f..1b353a76c30 100644 --- a/include/linux/sunrpc/svcsock.h +++ b/include/linux/sunrpc/svcsock.h @@ -28,6 +28,7 @@ struct svc_sock { /* private TCP part */ u32 sk_reclen; /* length of record */ u32 sk_tcplen; /* current read length */ + struct rpc_xprt *sk_bc_xprt; /* NFSv4.1 backchannel xprt */ }; /* diff --git a/include/linux/sunrpc/xprt.h b/include/linux/sunrpc/xprt.h index c090df44257..228d694dbb9 100644 --- a/include/linux/sunrpc/xprt.h +++ b/include/linux/sunrpc/xprt.h @@ -179,6 +179,7 @@ struct rpc_xprt { spinlock_t reserve_lock; /* lock slot table */ u32 xid; /* Next XID value to use */ struct rpc_task * snd_task; /* Task blocked in send */ + struct svc_xprt *bc_xprt; /* NFSv4.1 backchannel */ #if defined(CONFIG_NFS_V4_1) struct svc_serv *bc_serv; /* The RPC service which will */ /* process the callback */ diff --git a/net/sunrpc/sunrpc.h b/net/sunrpc/sunrpc.h index 13171e63f51..90c292e2738 100644 --- a/net/sunrpc/sunrpc.h +++ b/net/sunrpc/sunrpc.h @@ -43,5 +43,9 @@ static inline int rpc_reply_expected(struct rpc_task *task) (task->tk_msg.rpc_proc->p_decode != NULL); } +int svc_send_common(struct socket *sock, struct xdr_buf *xdr, + struct page *headpage, unsigned long headoffset, + struct page *tailpage, unsigned long tailoffset); + #endif /* _NET_SUNRPC_SUNRPC_H */ diff --git a/net/sunrpc/svc_xprt.c b/net/sunrpc/svc_xprt.c index 912dea558cc..df124f78ee4 100644 --- a/net/sunrpc/svc_xprt.c +++ b/net/sunrpc/svc_xprt.c @@ -160,6 +160,7 @@ void svc_xprt_init(struct svc_xprt_class *xcl, struct svc_xprt *xprt, mutex_init(&xprt->xpt_mutex); spin_lock_init(&xprt->xpt_lock); set_bit(XPT_BUSY, &xprt->xpt_flags); + rpc_init_wait_queue(&xprt->xpt_bc_pending, "xpt_bc_pending"); } EXPORT_SYMBOL_GPL(svc_xprt_init); @@ -810,6 +811,7 @@ int svc_send(struct svc_rqst *rqstp) else len = xprt->xpt_ops->xpo_sendto(rqstp); mutex_unlock(&xprt->xpt_mutex); + rpc_wake_up(&xprt->xpt_bc_pending); svc_xprt_release(rqstp); if (len == -ECONNREFUSED || len == -ENOTCONN || len == -EAGAIN) diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c index 76a380d37de..ccc5e83cae5 100644 --- a/net/sunrpc/svcsock.c +++ b/net/sunrpc/svcsock.c @@ -49,6 +49,7 @@ #include #include #include +#include #define RPCDBG_FACILITY RPCDBG_SVCXPRT @@ -153,49 +154,27 @@ static void svc_set_cmsg_data(struct svc_rqst *rqstp, struct cmsghdr *cmh) } /* - * Generic sendto routine + * send routine intended to be shared by the fore- and back-channel */ -static int svc_sendto(struct svc_rqst *rqstp, struct xdr_buf *xdr) +int svc_send_common(struct socket *sock, struct xdr_buf *xdr, + struct page *headpage, unsigned long headoffset, + struct page *tailpage, unsigned long tailoffset) { - struct svc_sock *svsk = - container_of(rqstp->rq_xprt, struct svc_sock, sk_xprt); - struct socket *sock = svsk->sk_sock; - int slen; - union { - struct cmsghdr hdr; - long all[SVC_PKTINFO_SPACE / sizeof(long)]; - } buffer; - struct cmsghdr *cmh = &buffer.hdr; - int len = 0; int result; int size; struct page **ppage = xdr->pages; size_t base = xdr->page_base; unsigned int pglen = xdr->page_len; unsigned int flags = MSG_MORE; - RPC_IFDEBUG(char buf[RPC_MAX_ADDRBUFLEN]); + int slen; + int len = 0; slen = xdr->len; - if (rqstp->rq_prot == IPPROTO_UDP) { - struct msghdr msg = { - .msg_name = &rqstp->rq_addr, - .msg_namelen = rqstp->rq_addrlen, - .msg_control = cmh, - .msg_controllen = sizeof(buffer), - .msg_flags = MSG_MORE, - }; - - svc_set_cmsg_data(rqstp, cmh); - - if (sock_sendmsg(sock, &msg, 0) < 0) - goto out; - } - /* send head */ if (slen == xdr->head[0].iov_len) flags = 0; - len = kernel_sendpage(sock, rqstp->rq_respages[0], 0, + len = kernel_sendpage(sock, headpage, headoffset, xdr->head[0].iov_len, flags); if (len != xdr->head[0].iov_len) goto out; @@ -219,16 +198,58 @@ static int svc_sendto(struct svc_rqst *rqstp, struct xdr_buf *xdr) base = 0; ppage++; } + /* send tail */ if (xdr->tail[0].iov_len) { - result = kernel_sendpage(sock, rqstp->rq_respages[0], - ((unsigned long)xdr->tail[0].iov_base) - & (PAGE_SIZE-1), - xdr->tail[0].iov_len, 0); - + result = kernel_sendpage(sock, tailpage, tailoffset, + xdr->tail[0].iov_len, 0); if (result > 0) len += result; } + +out: + return len; +} + + +/* + * Generic sendto routine + */ +static int svc_sendto(struct svc_rqst *rqstp, struct xdr_buf *xdr) +{ + struct svc_sock *svsk = + container_of(rqstp->rq_xprt, struct svc_sock, sk_xprt); + struct socket *sock = svsk->sk_sock; + union { + struct cmsghdr hdr; + long all[SVC_PKTINFO_SPACE / sizeof(long)]; + } buffer; + struct cmsghdr *cmh = &buffer.hdr; + int len = 0; + unsigned long tailoff; + unsigned long headoff; + RPC_IFDEBUG(char buf[RPC_MAX_ADDRBUFLEN]); + + if (rqstp->rq_prot == IPPROTO_UDP) { + struct msghdr msg = { + .msg_name = &rqstp->rq_addr, + .msg_namelen = rqstp->rq_addrlen, + .msg_control = cmh, + .msg_controllen = sizeof(buffer), + .msg_flags = MSG_MORE, + }; + + svc_set_cmsg_data(rqstp, cmh); + + if (sock_sendmsg(sock, &msg, 0) < 0) + goto out; + } + + tailoff = ((unsigned long)xdr->tail[0].iov_base) & (PAGE_SIZE-1); + headoff = 0; + len = svc_send_common(sock, xdr, rqstp->rq_respages[0], headoff, + rqstp->rq_respages[0], tailoff); + out: dprintk("svc: socket %p sendto([%p %Zu... ], %d) = %d (addr %s)\n", svsk, xdr->head[0].iov_base, xdr->head[0].iov_len, @@ -951,6 +972,57 @@ static int svc_tcp_recv_record(struct svc_sock *svsk, struct svc_rqst *rqstp) return -EAGAIN; } +static int svc_process_calldir(struct svc_sock *svsk, struct svc_rqst *rqstp, + struct rpc_rqst **reqpp, struct kvec *vec) +{ + struct rpc_rqst *req = NULL; + u32 *p; + u32 xid; + u32 calldir; + int len; + + len = svc_recvfrom(rqstp, vec, 1, 8); + if (len < 0) + goto error; + + p = (u32 *)rqstp->rq_arg.head[0].iov_base; + xid = *p++; + calldir = *p; + + if (calldir == 0) { + /* REQUEST is the most common case */ + vec[0] = rqstp->rq_arg.head[0]; + } else { + /* REPLY */ + if (svsk->sk_bc_xprt) + req = xprt_lookup_rqst(svsk->sk_bc_xprt, xid); + + if (!req) { + printk(KERN_NOTICE + "%s: Got unrecognized reply: " + "calldir 0x%x sk_bc_xprt %p xid %08x\n", + __func__, ntohl(calldir), + svsk->sk_bc_xprt, xid); + vec[0] = rqstp->rq_arg.head[0]; + goto out; + } + + memcpy(&req->rq_private_buf, &req->rq_rcv_buf, + sizeof(struct xdr_buf)); + /* copy the xid and call direction */ + memcpy(req->rq_private_buf.head[0].iov_base, + rqstp->rq_arg.head[0].iov_base, 8); + vec[0] = req->rq_private_buf.head[0]; + } + out: + vec[0].iov_base += 8; + vec[0].iov_len -= 8; + len = svsk->sk_reclen - 8; + error: + *reqpp = req; + return len; +} + /* * Receive data from a TCP socket. */ @@ -962,6 +1034,7 @@ static int svc_tcp_recvfrom(struct svc_rqst *rqstp) int len; struct kvec *vec; int pnum, vlen; + struct rpc_rqst *req = NULL; dprintk("svc: tcp_recv %p data %d conn %d close %d\n", svsk, test_bit(XPT_DATA, &svsk->sk_xprt.xpt_flags), @@ -975,9 +1048,27 @@ static int svc_tcp_recvfrom(struct svc_rqst *rqstp) vec = rqstp->rq_vec; vec[0] = rqstp->rq_arg.head[0]; vlen = PAGE_SIZE; + + /* + * We have enough data for the whole tcp record. Let's try and read the + * first 8 bytes to get the xid and the call direction. We can use this + * to figure out if this is a call or a reply to a callback. If + * sk_reclen is < 8 (xid and calldir), then this is a malformed packet. + * In that case, don't bother with the calldir and just read the data. + * It will be rejected in svc_process. + */ + if (len >= 8) { + len = svc_process_calldir(svsk, rqstp, &req, vec); + if (len < 0) + goto err_again; + vlen -= 8; + } + pnum = 1; while (vlen < len) { - vec[pnum].iov_base = page_address(rqstp->rq_pages[pnum]); + vec[pnum].iov_base = (req) ? + page_address(req->rq_private_buf.pages[pnum - 1]) : + page_address(rqstp->rq_pages[pnum]); vec[pnum].iov_len = PAGE_SIZE; pnum++; vlen += PAGE_SIZE; @@ -989,6 +1080,16 @@ static int svc_tcp_recvfrom(struct svc_rqst *rqstp) if (len < 0) goto err_again; + /* + * Account for the 8 bytes we read earlier + */ + len += 8; + + if (req) { + xprt_complete_rqst(req->rq_task, len); + len = 0; + goto out; + } dprintk("svc: TCP complete record (%d bytes)\n", len); rqstp->rq_arg.len = len; rqstp->rq_arg.page_base = 0; @@ -1002,6 +1103,7 @@ static int svc_tcp_recvfrom(struct svc_rqst *rqstp) rqstp->rq_xprt_ctxt = NULL; rqstp->rq_prot = IPPROTO_TCP; +out: /* Reset TCP read info */ svsk->sk_reclen = 0; svsk->sk_tcplen = 0; diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c index f412a852bc7..fd46d42afa8 100644 --- a/net/sunrpc/xprt.c +++ b/net/sunrpc/xprt.c @@ -832,6 +832,11 @@ static void xprt_timer(struct rpc_task *task) spin_unlock_bh(&xprt->transport_lock); } +static inline int xprt_has_timer(struct rpc_xprt *xprt) +{ + return xprt->idle_timeout != 0; +} + /** * xprt_prepare_transmit - reserve the transport before sending a request * @task: RPC task about to send a request @@ -1013,7 +1018,7 @@ void xprt_release(struct rpc_task *task) if (!list_empty(&req->rq_list)) list_del(&req->rq_list); xprt->last_used = jiffies; - if (list_empty(&xprt->recv)) + if (list_empty(&xprt->recv) && xprt_has_timer(xprt)) mod_timer(&xprt->timer, xprt->last_used + xprt->idle_timeout); spin_unlock_bh(&xprt->transport_lock); @@ -1082,8 +1087,11 @@ found: #endif /* CONFIG_NFS_V4_1 */ INIT_WORK(&xprt->task_cleanup, xprt_autoclose); - setup_timer(&xprt->timer, xprt_init_autodisconnect, - (unsigned long)xprt); + if (xprt_has_timer(xprt)) + setup_timer(&xprt->timer, xprt_init_autodisconnect, + (unsigned long)xprt); + else + init_timer(&xprt->timer); xprt->last_used = jiffies; xprt->cwnd = RPC_INITCWND; xprt->bind_index = 0; @@ -1102,7 +1110,6 @@ found: dprintk("RPC: created transport %p with %u slots\n", xprt, xprt->max_reqs); - return xprt; } diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c index 62438f3a914..d9a2b815714 100644 --- a/net/sunrpc/xprtsock.c +++ b/net/sunrpc/xprtsock.c @@ -32,6 +32,7 @@ #include #include #include +#include #include #include #ifdef CONFIG_NFS_V4_1 @@ -43,6 +44,7 @@ #include #include +#include "sunrpc.h" /* * xprtsock tunables */ @@ -2098,6 +2100,134 @@ static void xs_tcp_print_stats(struct rpc_xprt *xprt, struct seq_file *seq) xprt->stat.bklog_u); } +/* + * Allocate a bunch of pages for a scratch buffer for the rpc code. The reason + * we allocate pages instead doing a kmalloc like rpc_malloc is because we want + * to use the server side send routines. + */ +void *bc_malloc(struct rpc_task *task, size_t size) +{ + struct page *page; + struct rpc_buffer *buf; + + BUG_ON(size > PAGE_SIZE - sizeof(struct rpc_buffer)); + page = alloc_page(GFP_KERNEL); + + if (!page) + return NULL; + + buf = page_address(page); + buf->len = PAGE_SIZE; + + return buf->data; +} + +/* + * Free the space allocated in the bc_alloc routine + */ +void bc_free(void *buffer) +{ + struct rpc_buffer *buf; + + if (!buffer) + return; + + buf = container_of(buffer, struct rpc_buffer, data); + free_page((unsigned long)buf); +} + +/* + * Use the svc_sock to send the callback. Must be called with svsk->sk_mutex + * held. Borrows heavily from svc_tcp_sendto and xs_tcp_send_request. + */ +static int bc_sendto(struct rpc_rqst *req) +{ + int len; + struct xdr_buf *xbufp = &req->rq_snd_buf; + struct rpc_xprt *xprt = req->rq_xprt; + struct sock_xprt *transport = + container_of(xprt, struct sock_xprt, xprt); + struct socket *sock = transport->sock; + unsigned long headoff; + unsigned long tailoff; + + /* + * Set up the rpc header and record marker stuff + */ + xs_encode_tcp_record_marker(xbufp); + + tailoff = (unsigned long)xbufp->tail[0].iov_base & ~PAGE_MASK; + headoff = (unsigned long)xbufp->head[0].iov_base & ~PAGE_MASK; + len = svc_send_common(sock, xbufp, + virt_to_page(xbufp->head[0].iov_base), headoff, + xbufp->tail[0].iov_base, tailoff); + + if (len != xbufp->len) { + printk(KERN_NOTICE "Error sending entire callback!\n"); + len = -EAGAIN; + } + + return len; +} + +/* + * The send routine. Borrows from svc_send + */ +static int bc_send_request(struct rpc_task *task) +{ + struct rpc_rqst *req = task->tk_rqstp; + struct svc_xprt *xprt; + struct svc_sock *svsk; + u32 len; + + dprintk("sending request with xid: %08x\n", ntohl(req->rq_xid)); + /* + * Get the server socket associated with this callback xprt + */ + xprt = req->rq_xprt->bc_xprt; + svsk = container_of(xprt, struct svc_sock, sk_xprt); + + /* + * Grab the mutex to serialize data as the connection is shared + * with the fore channel + */ + if (!mutex_trylock(&xprt->xpt_mutex)) { + rpc_sleep_on(&xprt->xpt_bc_pending, task, NULL); + if (!mutex_trylock(&xprt->xpt_mutex)) + return -EAGAIN; + rpc_wake_up_queued_task(&xprt->xpt_bc_pending, task); + } + if (test_bit(XPT_DEAD, &xprt->xpt_flags)) + len = -ENOTCONN; + else + len = bc_sendto(req); + mutex_unlock(&xprt->xpt_mutex); + + if (len > 0) + len = 0; + + return len; +} + +/* + * The close routine. Since this is client initiated, we do nothing + */ + +static void bc_close(struct rpc_xprt *xprt) +{ + return; +} + +/* + * The xprt destroy routine. Again, because this connection is client + * initiated, we do nothing + */ + +static void bc_destroy(struct rpc_xprt *xprt) +{ + return; +} + static struct rpc_xprt_ops xs_udp_ops = { .set_buffer_size = xs_udp_set_buffer_size, .reserve_xprt = xprt_reserve_xprt_cong, @@ -2134,6 +2264,22 @@ static struct rpc_xprt_ops xs_tcp_ops = { .print_stats = xs_tcp_print_stats, }; +/* + * The rpc_xprt_ops for the server backchannel + */ + +static struct rpc_xprt_ops bc_tcp_ops = { + .reserve_xprt = xprt_reserve_xprt, + .release_xprt = xprt_release_xprt, + .buf_alloc = bc_malloc, + .buf_free = bc_free, + .send_request = bc_send_request, + .set_retrans_timeout = xprt_set_retrans_timeout_def, + .close = bc_close, + .destroy = bc_destroy, + .print_stats = xs_tcp_print_stats, +}; + static struct rpc_xprt *xs_setup_xprt(struct xprt_create *args, unsigned int slot_table_size) { -- cgit v1.2.3-70-g09d2 From 18668ff9a3232d5f942a2f7abc1ad67d2760dcdf Mon Sep 17 00:00:00 2001 From: Stefan Richter Date: Sun, 6 Sep 2009 18:49:48 +0200 Subject: firewire: core: header file cleanup fw_card_get, fw_card_put, fw_card_release are currently not exported for use outside the firewire-core. Move their definitions/ declarations from the subsystem header file to the core header file. Signed-off-by: Stefan Richter --- drivers/firewire/core.h | 14 ++++++++++++++ include/linux/firewire.h | 14 -------------- 2 files changed, 14 insertions(+), 14 deletions(-) (limited to 'include') diff --git a/drivers/firewire/core.h b/drivers/firewire/core.h index 6052816be35..7ff6e758515 100644 --- a/drivers/firewire/core.h +++ b/drivers/firewire/core.h @@ -96,6 +96,20 @@ int fw_core_initiate_bus_reset(struct fw_card *card, int short_reset); int fw_compute_block_crc(u32 *block); void fw_schedule_bm_work(struct fw_card *card, unsigned long delay); +static inline struct fw_card *fw_card_get(struct fw_card *card) +{ + kref_get(&card->kref); + + return card; +} + +void fw_card_release(struct kref *kref); + +static inline void fw_card_put(struct fw_card *card) +{ + kref_put(&card->kref, fw_card_release); +} + /* -cdev */ diff --git a/include/linux/firewire.h b/include/linux/firewire.h index 192d1e43c43..7e1d4dec83e 100644 --- a/include/linux/firewire.h +++ b/include/linux/firewire.h @@ -134,20 +134,6 @@ struct fw_card { u32 topology_map[(CSR_TOPOLOGY_MAP_END - CSR_TOPOLOGY_MAP) / 4]; }; -static inline struct fw_card *fw_card_get(struct fw_card *card) -{ - kref_get(&card->kref); - - return card; -} - -void fw_card_release(struct kref *kref); - -static inline void fw_card_put(struct fw_card *card) -{ - kref_put(&card->kref, fw_card_release); -} - struct fw_attribute_group { struct attribute_group *groups[2]; struct attribute_group group; -- cgit v1.2.3-70-g09d2 From f977bb4937857994312fff4f9c2cad336a36a932 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Sun, 13 Sep 2009 18:15:54 +0200 Subject: perf_counter, sched: Add sched_stat_runtime tracepoint This allows more precise tracking of how the scheduler accounts (and acts upon) a task having spent N nanoseconds of CPU time. Cc: Peter Zijlstra Cc: Mike Galbraith Cc: Paul Mackerras Cc: Arnaldo Carvalho de Melo Cc: Frederic Weisbecker LKML-Reference: Signed-off-by: Ingo Molnar --- include/trace/events/sched.h | 33 +++++++++++++++++++++++++++++++++ kernel/sched_fair.c | 1 + 2 files changed, 34 insertions(+) (limited to 'include') diff --git a/include/trace/events/sched.h b/include/trace/events/sched.h index b48f1ad7c94..4069c43f418 100644 --- a/include/trace/events/sched.h +++ b/include/trace/events/sched.h @@ -379,6 +379,39 @@ TRACE_EVENT(sched_stat_wait, (unsigned long long)__entry->delay) ); +/* + * Tracepoint for accounting runtime (time the task is executing + * on a CPU). + */ +TRACE_EVENT(sched_stat_runtime, + + TP_PROTO(struct task_struct *tsk, u64 runtime, u64 vruntime), + + TP_ARGS(tsk, runtime, vruntime), + + TP_STRUCT__entry( + __array( char, comm, TASK_COMM_LEN ) + __field( pid_t, pid ) + __field( u64, runtime ) + __field( u64, vruntime ) + ), + + TP_fast_assign( + memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN); + __entry->pid = tsk->pid; + __entry->runtime = runtime; + __entry->vruntime = vruntime; + ) + TP_perf_assign( + __perf_count(runtime); + ), + + TP_printk("task: %s:%d runtime: %Lu [ns], vruntime: %Lu [ns]", + __entry->comm, __entry->pid, + (unsigned long long)__entry->runtime, + (unsigned long long)__entry->vruntime) +); + /* * Tracepoint for accounting sleep time (time the task is not runnable, * including iowait, see below). diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c index aa7f8412101..a097e909e80 100644 --- a/kernel/sched_fair.c +++ b/kernel/sched_fair.c @@ -513,6 +513,7 @@ static void update_curr(struct cfs_rq *cfs_rq) if (entity_is_task(curr)) { struct task_struct *curtask = task_of(curr); + trace_sched_stat_runtime(curtask, delta_exec, curr->vruntime); cpuacct_charge(curtask, delta_exec); account_group_exec_runtime(curtask, delta_exec); } -- cgit v1.2.3-70-g09d2 From f300baba5a1536070d6d77bf0c8c4ca999bb4f0f Mon Sep 17 00:00:00 2001 From: Alexandros Batsakis Date: Thu, 10 Sep 2009 17:33:30 +0300 Subject: nfsd41: sunrpc: add new xprt class for nfsv4.1 backchannel [sunrpc: change idle timeout value for the backchannel] Signed-off-by: Alexandros Batsakis Signed-off-by: Benny Halevy Acked-by: Trond Myklebust Signed-off-by: J. Bruce Fields --- include/linux/sunrpc/clnt.h | 1 + include/linux/sunrpc/xprt.h | 18 ++++++++ include/linux/sunrpc/xprtrdma.h | 5 --- include/linux/sunrpc/xprtsock.h | 11 ----- net/sunrpc/clnt.c | 1 + net/sunrpc/xprtsock.c | 96 ++++++++++++++++++++++++++++++++++++++++- 6 files changed, 114 insertions(+), 18 deletions(-) (limited to 'include') diff --git a/include/linux/sunrpc/clnt.h b/include/linux/sunrpc/clnt.h index 3d025588e56..8ed9642a5a7 100644 --- a/include/linux/sunrpc/clnt.h +++ b/include/linux/sunrpc/clnt.h @@ -114,6 +114,7 @@ struct rpc_create_args { rpc_authflavor_t authflavor; unsigned long flags; char *client_name; + struct svc_xprt *bc_xprt; /* NFSv4.1 backchannel */ }; /* Values for "flags" field */ diff --git a/include/linux/sunrpc/xprt.h b/include/linux/sunrpc/xprt.h index 228d694dbb9..6f9457a75b8 100644 --- a/include/linux/sunrpc/xprt.h +++ b/include/linux/sunrpc/xprt.h @@ -124,6 +124,23 @@ struct rpc_xprt_ops { void (*print_stats)(struct rpc_xprt *xprt, struct seq_file *seq); }; +/* + * RPC transport identifiers + * + * To preserve compatibility with the historical use of raw IP protocol + * id's for transport selection, UDP and TCP identifiers are specified + * with the previous values. No such restriction exists for new transports, + * except that they may not collide with these values (17 and 6, + * respectively). + */ +#define XPRT_TRANSPORT_BC (1 << 31) +enum xprt_transports { + XPRT_TRANSPORT_UDP = IPPROTO_UDP, + XPRT_TRANSPORT_TCP = IPPROTO_TCP, + XPRT_TRANSPORT_BC_TCP = IPPROTO_TCP | XPRT_TRANSPORT_BC, + XPRT_TRANSPORT_RDMA = 256 +}; + struct rpc_xprt { struct kref kref; /* Reference count */ struct rpc_xprt_ops * ops; /* transport methods */ @@ -232,6 +249,7 @@ struct xprt_create { struct sockaddr * srcaddr; /* optional local address */ struct sockaddr * dstaddr; /* remote peer address */ size_t addrlen; + struct svc_xprt *bc_xprt; /* NFSv4.1 backchannel */ }; struct xprt_class { diff --git a/include/linux/sunrpc/xprtrdma.h b/include/linux/sunrpc/xprtrdma.h index 54a379c9e8e..c2f04e1ae15 100644 --- a/include/linux/sunrpc/xprtrdma.h +++ b/include/linux/sunrpc/xprtrdma.h @@ -40,11 +40,6 @@ #ifndef _LINUX_SUNRPC_XPRTRDMA_H #define _LINUX_SUNRPC_XPRTRDMA_H -/* - * RPC transport identifier for RDMA - */ -#define XPRT_TRANSPORT_RDMA 256 - /* * rpcbind (v3+) RDMA netid. */ diff --git a/include/linux/sunrpc/xprtsock.h b/include/linux/sunrpc/xprtsock.h index c2a46c45c8f..3f14a02e9cc 100644 --- a/include/linux/sunrpc/xprtsock.h +++ b/include/linux/sunrpc/xprtsock.h @@ -12,17 +12,6 @@ int init_socket_xprt(void); void cleanup_socket_xprt(void); -/* - * RPC transport identifiers for UDP, TCP - * - * To preserve compatibility with the historical use of raw IP protocol - * id's for transport selection, these are specified with the previous - * values. No such restriction exists for new transports, except that - * they may not collide with these values (17 and 6, respectively). - */ -#define XPRT_TRANSPORT_UDP IPPROTO_UDP -#define XPRT_TRANSPORT_TCP IPPROTO_TCP - /* * RPC slot table sizes for UDP, TCP transports */ diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c index c1e467e1b07..7389804e3bb 100644 --- a/net/sunrpc/clnt.c +++ b/net/sunrpc/clnt.c @@ -288,6 +288,7 @@ struct rpc_clnt *rpc_create(struct rpc_create_args *args) .srcaddr = args->saddress, .dstaddr = args->address, .addrlen = args->addrsize, + .bc_xprt = args->bc_xprt, }; char servername[48]; diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c index d9a2b815714..bee41546575 100644 --- a/net/sunrpc/xprtsock.c +++ b/net/sunrpc/xprtsock.c @@ -2468,11 +2468,93 @@ static struct rpc_xprt *xs_setup_tcp(struct xprt_create *args) return ERR_PTR(-EINVAL); } +/** + * xs_setup_bc_tcp - Set up transport to use a TCP backchannel socket + * @args: rpc transport creation arguments + * + */ +static struct rpc_xprt *xs_setup_bc_tcp(struct xprt_create *args) +{ + struct sockaddr *addr = args->dstaddr; + struct rpc_xprt *xprt; + struct sock_xprt *transport; + struct svc_sock *bc_sock; + + if (!args->bc_xprt) + ERR_PTR(-EINVAL); + + xprt = xs_setup_xprt(args, xprt_tcp_slot_table_entries); + if (IS_ERR(xprt)) + return xprt; + transport = container_of(xprt, struct sock_xprt, xprt); + + xprt->prot = IPPROTO_TCP; + xprt->tsh_size = sizeof(rpc_fraghdr) / sizeof(u32); + xprt->max_payload = RPC_MAX_FRAGMENT_SIZE; + xprt->timeout = &xs_tcp_default_timeout; + + /* backchannel */ + xprt_set_bound(xprt); + xprt->bind_timeout = 0; + xprt->connect_timeout = 0; + xprt->reestablish_timeout = 0; + xprt->idle_timeout = 0; + + /* + * The backchannel uses the same socket connection as the + * forechannel + */ + xprt->bc_xprt = args->bc_xprt; + bc_sock = container_of(args->bc_xprt, struct svc_sock, sk_xprt); + bc_sock->sk_bc_xprt = xprt; + transport->sock = bc_sock->sk_sock; + transport->inet = bc_sock->sk_sk; + + xprt->ops = &bc_tcp_ops; + + switch (addr->sa_family) { + case AF_INET: + xs_format_peer_addresses(xprt, "tcp", + RPCBIND_NETID_TCP); + break; + case AF_INET6: + xs_format_peer_addresses(xprt, "tcp", + RPCBIND_NETID_TCP6); + break; + default: + kfree(xprt); + return ERR_PTR(-EAFNOSUPPORT); + } + + if (xprt_bound(xprt)) + dprintk("RPC: set up xprt to %s (port %s) via %s\n", + xprt->address_strings[RPC_DISPLAY_ADDR], + xprt->address_strings[RPC_DISPLAY_PORT], + xprt->address_strings[RPC_DISPLAY_PROTO]); + else + dprintk("RPC: set up xprt to %s (autobind) via %s\n", + xprt->address_strings[RPC_DISPLAY_ADDR], + xprt->address_strings[RPC_DISPLAY_PROTO]); + + /* + * Since we don't want connections for the backchannel, we set + * the xprt status to connected + */ + xprt_set_connected(xprt); + + + if (try_module_get(THIS_MODULE)) + return xprt; + kfree(xprt->slot); + kfree(xprt); + return ERR_PTR(-EINVAL); +} + static struct xprt_class xs_udp_transport = { .list = LIST_HEAD_INIT(xs_udp_transport.list), .name = "udp", .owner = THIS_MODULE, - .ident = IPPROTO_UDP, + .ident = XPRT_TRANSPORT_UDP, .setup = xs_setup_udp, }; @@ -2480,10 +2562,18 @@ static struct xprt_class xs_tcp_transport = { .list = LIST_HEAD_INIT(xs_tcp_transport.list), .name = "tcp", .owner = THIS_MODULE, - .ident = IPPROTO_TCP, + .ident = XPRT_TRANSPORT_TCP, .setup = xs_setup_tcp, }; +static struct xprt_class xs_bc_tcp_transport = { + .list = LIST_HEAD_INIT(xs_bc_tcp_transport.list), + .name = "tcp NFSv4.1 backchannel", + .owner = THIS_MODULE, + .ident = XPRT_TRANSPORT_BC_TCP, + .setup = xs_setup_bc_tcp, +}; + /** * init_socket_xprt - set up xprtsock's sysctls, register with RPC client * @@ -2497,6 +2587,7 @@ int init_socket_xprt(void) xprt_register_transport(&xs_udp_transport); xprt_register_transport(&xs_tcp_transport); + xprt_register_transport(&xs_bc_tcp_transport); return 0; } @@ -2516,6 +2607,7 @@ void cleanup_socket_xprt(void) xprt_unregister_transport(&xs_udp_transport); xprt_unregister_transport(&xs_tcp_transport); + xprt_unregister_transport(&xs_bc_tcp_transport); } static int param_set_uint_minmax(const char *val, struct kernel_param *kp, -- cgit v1.2.3-70-g09d2 From 3661d28615ea580c1db02a972fd4d3898df1cb01 Mon Sep 17 00:00:00 2001 From: Theodore Ts'o Date: Mon, 14 Sep 2009 22:59:50 -0400 Subject: ext4: Fix include/trace/events/ext4.h to work with Systemtap Using relative pathnames in #include statements interacts badly with SystemTap, since the fs/ext4/*.h header files are not packaged up as part of a distribution kernel's header files. Since systemtap doesn't use TP_fast_assign(), we can use a blind structure definition and then make sure the needed header files are defined before the ext4 source files #include the trace/events/ext4.h header file. https://bugzilla.redhat.com/show_bug.cgi?id=512478 Signed-off-by: "Theodore Ts'o" --- fs/ext4/super.c | 1 + include/trace/events/ext4.h | 6 ++++-- 2 files changed, 5 insertions(+), 2 deletions(-) (limited to 'include') diff --git a/fs/ext4/super.c b/fs/ext4/super.c index 04c69335733..af95dd8ba54 100644 --- a/fs/ext4/super.c +++ b/fs/ext4/super.c @@ -45,6 +45,7 @@ #include "ext4_jbd2.h" #include "xattr.h" #include "acl.h" +#include "mballoc.h" #define CREATE_TRACE_POINTS #include diff --git a/include/trace/events/ext4.h b/include/trace/events/ext4.h index 68b53c7ef8a..6fe6ce9ee07 100644 --- a/include/trace/events/ext4.h +++ b/include/trace/events/ext4.h @@ -5,10 +5,12 @@ #define _TRACE_EXT4_H #include -#include "../../../fs/ext4/ext4.h" -#include "../../../fs/ext4/mballoc.h" #include +struct ext4_allocation_context; +struct ext4_allocation_request; +struct ext4_prealloc_space; + TRACE_EVENT(ext4_free_inode, TP_PROTO(struct inode *inode), -- cgit v1.2.3-70-g09d2 From 12e09337fe238981cb0c87543306e23775d1a143 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Mon, 14 Sep 2009 23:37:40 +0200 Subject: time: Prevent 32 bit overflow with set_normalized_timespec() set_normalized_timespec() nsec argument is of type long. The recent timekeeping changes of ktime_get_ts() feed ts->tv_nsec + tomono.tv_nsec + nsecs to set_normalized_timespec(). On 32 bit machines that sum can be larger than (1 << 31) and therefor result in a negative value which screws up the result completely. Make the nsec argument of set_normalized_timespec() s64 to fix the problem at hand. This also prevents similar problems for future users of set_normalized_timespec(). Signed-off-by: Thomas Gleixner Tested-by: Carsten Emde LKML-Reference: Cc: Martin Schwidefsky Cc: John Stultz --- include/linux/time.h | 2 +- kernel/time.c | 9 ++++++++- 2 files changed, 9 insertions(+), 2 deletions(-) (limited to 'include') diff --git a/include/linux/time.h b/include/linux/time.h index 256232f7e5e..56787c09334 100644 --- a/include/linux/time.h +++ b/include/linux/time.h @@ -75,7 +75,7 @@ extern unsigned long mktime(const unsigned int year, const unsigned int mon, const unsigned int day, const unsigned int hour, const unsigned int min, const unsigned int sec); -extern void set_normalized_timespec(struct timespec *ts, time_t sec, long nsec); +extern void set_normalized_timespec(struct timespec *ts, time_t sec, s64 nsec); extern struct timespec timespec_add_safe(const struct timespec lhs, const struct timespec rhs); diff --git a/kernel/time.c b/kernel/time.c index 29511943871..2e2e469a7fe 100644 --- a/kernel/time.c +++ b/kernel/time.c @@ -370,13 +370,20 @@ EXPORT_SYMBOL(mktime); * 0 <= tv_nsec < NSEC_PER_SEC * For negative values only the tv_sec field is negative ! */ -void set_normalized_timespec(struct timespec *ts, time_t sec, long nsec) +void set_normalized_timespec(struct timespec *ts, time_t sec, s64 nsec) { while (nsec >= NSEC_PER_SEC) { + /* + * The following asm() prevents the compiler from + * optimising this loop into a modulo operation. See + * also __iter_div_u64_rem() in include/linux/time.h + */ + asm("" : "+rm"(nsec)); nsec -= NSEC_PER_SEC; ++sec; } while (nsec < 0) { + asm("" : "+rm"(nsec)); nsec += NSEC_PER_SEC; --sec; } -- cgit v1.2.3-70-g09d2 From 5d351754fcf58d1a604aa7cf95c2805e8a098ad9 Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Tue, 15 Sep 2009 13:32:13 -0400 Subject: SUNRPC: Defer the auth_gss upcall when the RPC call is asynchronous Otherwise, the upcall is going to be synchronous, which may not be what the caller wants... Signed-off-by: Trond Myklebust Signed-off-by: J. Bruce Fields --- include/linux/sunrpc/auth.h | 4 ++-- net/sunrpc/auth.c | 20 ++++++++++++-------- net/sunrpc/auth_generic.c | 4 ++-- 3 files changed, 16 insertions(+), 12 deletions(-) (limited to 'include') diff --git a/include/linux/sunrpc/auth.h b/include/linux/sunrpc/auth.h index 3f632182d8e..996df4dac7d 100644 --- a/include/linux/sunrpc/auth.h +++ b/include/linux/sunrpc/auth.h @@ -111,7 +111,7 @@ struct rpc_credops { void (*crdestroy)(struct rpc_cred *); int (*crmatch)(struct auth_cred *, struct rpc_cred *, int); - void (*crbind)(struct rpc_task *, struct rpc_cred *); + void (*crbind)(struct rpc_task *, struct rpc_cred *, int); __be32 * (*crmarshal)(struct rpc_task *, __be32 *); int (*crrefresh)(struct rpc_task *); __be32 * (*crvalidate)(struct rpc_task *, __be32 *); @@ -140,7 +140,7 @@ struct rpc_cred * rpcauth_lookup_credcache(struct rpc_auth *, struct auth_cred * void rpcauth_init_cred(struct rpc_cred *, const struct auth_cred *, struct rpc_auth *, const struct rpc_credops *); struct rpc_cred * rpcauth_lookupcred(struct rpc_auth *, int); void rpcauth_bindcred(struct rpc_task *, struct rpc_cred *, int); -void rpcauth_generic_bind_cred(struct rpc_task *, struct rpc_cred *); +void rpcauth_generic_bind_cred(struct rpc_task *, struct rpc_cred *, int); void put_rpccred(struct rpc_cred *); void rpcauth_unbindcred(struct rpc_task *); __be32 * rpcauth_marshcred(struct rpc_task *, __be32 *); diff --git a/net/sunrpc/auth.c b/net/sunrpc/auth.c index 0c431c277af..54a4e042f10 100644 --- a/net/sunrpc/auth.c +++ b/net/sunrpc/auth.c @@ -385,7 +385,7 @@ rpcauth_init_cred(struct rpc_cred *cred, const struct auth_cred *acred, EXPORT_SYMBOL_GPL(rpcauth_init_cred); void -rpcauth_generic_bind_cred(struct rpc_task *task, struct rpc_cred *cred) +rpcauth_generic_bind_cred(struct rpc_task *task, struct rpc_cred *cred, int lookupflags) { task->tk_msg.rpc_cred = get_rpccred(cred); dprintk("RPC: %5u holding %s cred %p\n", task->tk_pid, @@ -394,7 +394,7 @@ rpcauth_generic_bind_cred(struct rpc_task *task, struct rpc_cred *cred) EXPORT_SYMBOL_GPL(rpcauth_generic_bind_cred); static void -rpcauth_bind_root_cred(struct rpc_task *task) +rpcauth_bind_root_cred(struct rpc_task *task, int lookupflags) { struct rpc_auth *auth = task->tk_client->cl_auth; struct auth_cred acred = { @@ -405,7 +405,7 @@ rpcauth_bind_root_cred(struct rpc_task *task) dprintk("RPC: %5u looking up %s cred\n", task->tk_pid, task->tk_client->cl_auth->au_ops->au_name); - ret = auth->au_ops->lookup_cred(auth, &acred, 0); + ret = auth->au_ops->lookup_cred(auth, &acred, lookupflags); if (!IS_ERR(ret)) task->tk_msg.rpc_cred = ret; else @@ -413,14 +413,14 @@ rpcauth_bind_root_cred(struct rpc_task *task) } static void -rpcauth_bind_new_cred(struct rpc_task *task) +rpcauth_bind_new_cred(struct rpc_task *task, int lookupflags) { struct rpc_auth *auth = task->tk_client->cl_auth; struct rpc_cred *ret; dprintk("RPC: %5u looking up %s cred\n", task->tk_pid, auth->au_ops->au_name); - ret = rpcauth_lookupcred(auth, 0); + ret = rpcauth_lookupcred(auth, lookupflags); if (!IS_ERR(ret)) task->tk_msg.rpc_cred = ret; else @@ -430,12 +430,16 @@ rpcauth_bind_new_cred(struct rpc_task *task) void rpcauth_bindcred(struct rpc_task *task, struct rpc_cred *cred, int flags) { + int lookupflags = 0; + + if (flags & RPC_TASK_ASYNC) + lookupflags |= RPCAUTH_LOOKUP_NEW; if (cred != NULL) - cred->cr_ops->crbind(task, cred); + cred->cr_ops->crbind(task, cred, lookupflags); else if (flags & RPC_TASK_ROOTCREDS) - rpcauth_bind_root_cred(task); + rpcauth_bind_root_cred(task, lookupflags); else - rpcauth_bind_new_cred(task); + rpcauth_bind_new_cred(task, lookupflags); } void diff --git a/net/sunrpc/auth_generic.c b/net/sunrpc/auth_generic.c index 4028502f052..bf88bf8e936 100644 --- a/net/sunrpc/auth_generic.c +++ b/net/sunrpc/auth_generic.c @@ -55,13 +55,13 @@ struct rpc_cred *rpc_lookup_machine_cred(void) EXPORT_SYMBOL_GPL(rpc_lookup_machine_cred); static void -generic_bind_cred(struct rpc_task *task, struct rpc_cred *cred) +generic_bind_cred(struct rpc_task *task, struct rpc_cred *cred, int lookupflags) { struct rpc_auth *auth = task->tk_client->cl_auth; struct auth_cred *acred = &container_of(cred, struct generic_cred, gc_base)->acred; struct rpc_cred *ret; - ret = auth->au_ops->lookup_cred(auth, acred, 0); + ret = auth->au_ops->lookup_cred(auth, acred, lookupflags); if (!IS_ERR(ret)) task->tk_msg.rpc_cred = ret; else -- cgit v1.2.3-70-g09d2 From 29ab23cc5d351658d01a4327d55e9106a73fd04f Mon Sep 17 00:00:00 2001 From: "J. Bruce Fields" Date: Tue, 15 Sep 2009 15:56:50 -0400 Subject: nfsd4: allow nfs4 state startup to fail The failure here is pretty unlikely, but we should handle it anyway. Signed-off-by: J. Bruce Fields --- fs/nfsd/nfs4state.c | 17 ++++++++++++----- fs/nfsd/nfssvc.c | 4 +++- include/linux/nfsd/nfsd.h | 4 ++-- 3 files changed, 17 insertions(+), 8 deletions(-) (limited to 'include') diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c index 46e9ac52687..11db40cb2f2 100644 --- a/fs/nfsd/nfs4state.c +++ b/fs/nfsd/nfs4state.c @@ -4004,7 +4004,7 @@ set_max_delegations(void) /* initialization to perform when the nfsd service is started: */ -static void +static int __nfs4_state_start(void) { unsigned long grace_time; @@ -4016,19 +4016,26 @@ __nfs4_state_start(void) printk(KERN_INFO "NFSD: starting %ld-second grace period\n", grace_time/HZ); laundry_wq = create_singlethread_workqueue("nfsd4"); + if (laundry_wq == NULL) + return -ENOMEM; queue_delayed_work(laundry_wq, &laundromat_work, grace_time); set_max_delegations(); + return 0; } -void +int nfs4_state_start(void) { + int ret; + if (nfs4_init) - return; + return 0; nfsd4_load_reboot_recovery_data(); - __nfs4_state_start(); + ret = __nfs4_state_start(); + if (ret) + return ret; nfs4_init = 1; - return; + return 0; } time_t diff --git a/fs/nfsd/nfssvc.c b/fs/nfsd/nfssvc.c index 4472449c093..fcc00108826 100644 --- a/fs/nfsd/nfssvc.c +++ b/fs/nfsd/nfssvc.c @@ -411,7 +411,9 @@ nfsd_svc(unsigned short port, int nrservs) error = nfsd_racache_init(2*nrservs); if (error<0) goto out; - nfs4_state_start(); + error = nfs4_state_start(); + if (error) + goto out; nfsd_reset_versions(); diff --git a/include/linux/nfsd/nfsd.h b/include/linux/nfsd/nfsd.h index 2812ed52669..24fdf89cea8 100644 --- a/include/linux/nfsd/nfsd.h +++ b/include/linux/nfsd/nfsd.h @@ -166,7 +166,7 @@ extern int nfsd_max_blksize; extern unsigned int max_delegations; int nfs4_state_init(void); void nfsd4_free_slabs(void); -void nfs4_state_start(void); +int nfs4_state_start(void); void nfs4_state_shutdown(void); time_t nfs4_lease_time(void); void nfs4_reset_lease(time_t leasetime); @@ -174,7 +174,7 @@ int nfs4_reset_recoverydir(char *recdir); #else static inline int nfs4_state_init(void) { return 0; } static inline void nfsd4_free_slabs(void) { } -static inline void nfs4_state_start(void) { } +static inline int nfs4_state_start(void) { } static inline void nfs4_state_shutdown(void) { } static inline time_t nfs4_lease_time(void) { return 0; } static inline void nfs4_reset_lease(time_t leasetime) { } -- cgit v1.2.3-70-g09d2 From 80fc015bdfe1f5b870c1e1ee02d78e709523fee7 Mon Sep 17 00:00:00 2001 From: "J. Bruce Fields" Date: Tue, 15 Sep 2009 18:07:35 -0400 Subject: nfsd4: use common rpc_cred for all callbacks Callbacks are always made using the machine's identity, so we can use a single auth_generic credential shared among callbacks to all clients and let the rpc code take care of the rest. Signed-off-by: J. Bruce Fields --- fs/nfsd/nfs4callback.c | 33 ++++++++++----------------------- fs/nfsd/nfs4state.c | 6 +----- include/linux/nfsd/state.h | 2 +- 3 files changed, 12 insertions(+), 29 deletions(-) (limited to 'include') diff --git a/fs/nfsd/nfs4callback.c b/fs/nfsd/nfs4callback.c index 4abb88264c7..128519769ea 100644 --- a/fs/nfsd/nfs4callback.c +++ b/fs/nfsd/nfs4callback.c @@ -432,42 +432,29 @@ static const struct rpc_call_ops nfsd4_cb_probe_ops = { .rpc_call_done = nfsd4_cb_probe_done, }; -static struct rpc_cred *lookup_cb_cred(struct nfs4_cb_conn *cb) +static struct rpc_cred *callback_cred; + +int set_callback_cred(void) { - struct auth_cred acred = { - .machine_cred = 1 - }; - struct rpc_auth *auth = cb->cb_client->cl_auth; - - /* - * Note in the gss case this doesn't actually have to wait for a - * gss upcall (or any calls to the client); this just creates a - * non-uptodate cred which the rpc state machine will fill in with - * a refresh_upcall later. - */ - return auth->au_ops->lookup_cred(auth, &acred, RPCAUTH_LOOKUP_NEW); + callback_cred = rpc_lookup_machine_cred(); + if (!callback_cred) + return -ENOMEM; + return 0; } + void do_probe_callback(struct nfs4_client *clp) { struct nfs4_cb_conn *cb = &clp->cl_cb_conn; struct rpc_message msg = { .rpc_proc = &nfs4_cb_procedures[NFSPROC4_CLNT_CB_NULL], .rpc_argp = clp, + .rpc_cred = callback_cred }; - struct rpc_cred *cred; int status; - cred = lookup_cb_cred(cb); - if (IS_ERR(cred)) { - status = PTR_ERR(cred); - goto out; - } - cb->cb_cred = cred; - msg.rpc_cred = cb->cb_cred; status = rpc_call_async(cb->cb_client, &msg, RPC_TASK_SOFT, &nfsd4_cb_probe_ops, (void *)clp); -out: if (status) { warn_no_callback_path(clp, status); put_nfs4_client(clp); @@ -550,7 +537,7 @@ nfsd4_cb_recall(struct nfs4_delegation *dp) struct rpc_message msg = { .rpc_proc = &nfs4_cb_procedures[NFSPROC4_CLNT_CB_RECALL], .rpc_argp = dp, - .rpc_cred = clp->cl_cb_conn.cb_cred + .rpc_cred = callback_cred }; int status; diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c index 11db40cb2f2..0445192d660 100644 --- a/fs/nfsd/nfs4state.c +++ b/fs/nfsd/nfs4state.c @@ -696,10 +696,6 @@ shutdown_callback_client(struct nfs4_client *clp) clp->cl_cb_conn.cb_client = NULL; rpc_shutdown_client(clnt); } - if (clp->cl_cb_conn.cb_cred) { - put_rpccred(clp->cl_cb_conn.cb_cred); - clp->cl_cb_conn.cb_cred = NULL; - } } static inline void @@ -4020,7 +4016,7 @@ __nfs4_state_start(void) return -ENOMEM; queue_delayed_work(laundry_wq, &laundromat_work, grace_time); set_max_delegations(); - return 0; + return set_callback_cred(); } int diff --git a/include/linux/nfsd/state.h b/include/linux/nfsd/state.h index 70ef5f4abbb..9bf3aa8c5ae 100644 --- a/include/linux/nfsd/state.h +++ b/include/linux/nfsd/state.h @@ -89,7 +89,6 @@ struct nfs4_cb_conn { /* RPC client info */ atomic_t cb_set; /* successful CB_NULL call */ struct rpc_clnt * cb_client; - struct rpc_cred * cb_cred; }; /* Maximum number of slots per session. 160 is useful for long haul TCP */ @@ -362,6 +361,7 @@ extern int nfs4_in_grace(void); extern __be32 nfs4_check_open_reclaim(clientid_t *clid); extern void put_nfs4_client(struct nfs4_client *clp); extern void nfs4_free_stateowner(struct kref *kref); +extern int set_callback_cred(void); extern void nfsd4_probe_callback(struct nfs4_client *clp); extern void nfsd4_cb_recall(struct nfs4_delegation *dp); extern void nfs4_put_delegation(struct nfs4_delegation *dp); -- cgit v1.2.3-70-g09d2 From 38524ab38f2752beee262a97403d871665838172 Mon Sep 17 00:00:00 2001 From: Andy Adamson Date: Thu, 10 Sep 2009 12:25:59 +0300 Subject: nfsd41: Backchannel: callback infrastructure Keep the xprt used for create_session in cl_cb_xprt. Mark cl_callback.cb_minorversion = 1 and remember the client provided cl_callback.cb_prog rpc program number. Use it to probe the callback path. Use the client's network address to initialize as the callback's address as expected by the xprt creation routines. Define xdr sizes and code nfs4_cb_compound header to be able to send a null callback rpc. Signed-off-by: Andy Adamson Signed-off-by: Benny Halevy Signed-off-by: Ricardo Labiaga [get callback minorversion from fore channel's] Signed-off-by: Benny Halevy [nfsd41: change bc_sock to bc_xprt] Signed-off-by: Benny Halevy [pulled definition for cl_cb_xprt] Signed-off-by: Benny Halevy [nfsd41: set up backchannel's cb_addr] [moved rpc_create_args init to "nfsd: modify nfsd4.1 backchannel to use new xprt class"] Signed-off-by: Benny Halevy Signed-off-by: J. Bruce Fields --- fs/nfsd/nfs4callback.c | 17 +++++++++++++++-- fs/nfsd/nfs4state.c | 14 ++++++++++++++ include/linux/nfsd/state.h | 3 +++ 3 files changed, 32 insertions(+), 2 deletions(-) (limited to 'include') diff --git a/fs/nfsd/nfs4callback.c b/fs/nfsd/nfs4callback.c index 128519769ea..db4188ce9b0 100644 --- a/fs/nfsd/nfs4callback.c +++ b/fs/nfsd/nfs4callback.c @@ -43,6 +43,7 @@ #include #include #include +#include #include #include #include @@ -52,16 +53,19 @@ #define NFSPROC4_CB_NULL 0 #define NFSPROC4_CB_COMPOUND 1 +#define NFS4_STATEID_SIZE 16 /* Index of predefined Linux callback client operations */ enum { NFSPROC4_CLNT_CB_NULL = 0, NFSPROC4_CLNT_CB_RECALL, + NFSPROC4_CLNT_CB_SEQUENCE, }; enum nfs_cb_opnum4 { OP_CB_RECALL = 4, + OP_CB_SEQUENCE = 11, }; #define NFS4_MAXTAGLEN 20 @@ -70,15 +74,22 @@ enum nfs_cb_opnum4 { #define NFS4_dec_cb_null_sz 0 #define cb_compound_enc_hdr_sz 4 #define cb_compound_dec_hdr_sz (3 + (NFS4_MAXTAGLEN >> 2)) +#define sessionid_sz (NFS4_MAX_SESSIONID_LEN >> 2) +#define cb_sequence_enc_sz (sessionid_sz + 4 + \ + 1 /* no referring calls list yet */) +#define cb_sequence_dec_sz (op_dec_sz + sessionid_sz + 4) + #define op_enc_sz 1 #define op_dec_sz 2 #define enc_nfs4_fh_sz (1 + (NFS4_FHSIZE >> 2)) #define enc_stateid_sz (NFS4_STATEID_SIZE >> 2) #define NFS4_enc_cb_recall_sz (cb_compound_enc_hdr_sz + \ + cb_sequence_enc_sz + \ 1 + enc_stateid_sz + \ enc_nfs4_fh_sz) #define NFS4_dec_cb_recall_sz (cb_compound_dec_hdr_sz + \ + cb_sequence_dec_sz + \ op_dec_sz) /* @@ -137,11 +148,13 @@ xdr_error: \ } while (0) struct nfs4_cb_compound_hdr { - int status; - u32 ident; + /* args */ + u32 ident; /* minorversion 0 only */ u32 nops; __be32 *nops_p; u32 minorversion; + /* res */ + int status; u32 taglen; char *tag; }; diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c index 0445192d660..d8196b453f6 100644 --- a/fs/nfsd/nfs4state.c +++ b/fs/nfsd/nfs4state.c @@ -702,6 +702,8 @@ static inline void free_client(struct nfs4_client *clp) { shutdown_callback_client(clp); + if (clp->cl_cb_xprt) + svc_xprt_put(clp->cl_cb_xprt); if (clp->cl_cred.cr_group_info) put_group_info(clp->cl_cred.cr_group_info); kfree(clp->cl_principal); @@ -1317,6 +1319,18 @@ nfsd4_create_session(struct svc_rqst *rqstp, cr_ses->flags &= ~SESSION4_PERSIST; cr_ses->flags &= ~SESSION4_RDMA; + if (cr_ses->flags & SESSION4_BACK_CHAN) { + unconf->cl_cb_xprt = rqstp->rq_xprt; + svc_xprt_get(unconf->cl_cb_xprt); + rpc_copy_addr( + (struct sockaddr *)&unconf->cl_cb_conn.cb_addr, + sa); + unconf->cl_cb_conn.cb_addrlen = svc_addr_len(sa); + unconf->cl_cb_conn.cb_minorversion = + cstate->minorversion; + unconf->cl_cb_conn.cb_prog = cr_ses->callback_prog; + nfsd4_probe_callback(unconf); + } conf = unconf; } else { status = nfserr_stale_clientid; diff --git a/include/linux/nfsd/state.h b/include/linux/nfsd/state.h index 9bf3aa8c5ae..c916032570c 100644 --- a/include/linux/nfsd/state.h +++ b/include/linux/nfsd/state.h @@ -211,6 +211,9 @@ struct nfs4_client { struct nfsd4_clid_slot cl_cs_slot; /* create_session slot */ u32 cl_exchange_flags; struct nfs4_sessionid cl_sessionid; + + /* for nfs41 callbacks */ + struct svc_xprt *cl_cb_xprt; /* 4.1 callback transport */ }; /* struct nfs4_client_reset -- cgit v1.2.3-70-g09d2 From 132f97715c098393fb8de3c26b07b9fdbd2334f1 Mon Sep 17 00:00:00 2001 From: Ricardo Labiaga Date: Thu, 10 Sep 2009 12:26:12 +0300 Subject: nfsd41: Backchannel: Add sequence arguments to callback RPC arguments Follow the model we use in the client. Make the sequence arguments part of the regular RPC arguments. None of the callbacks that are soon to be implemented expect results that need to be passed back to the caller, so we don't define a separate RPC results structure. For session validation, the cb_sequence decoding will use a pointer to the sequence arguments that are part of the RPC argument. Signed-off-by: Ricardo Labiaga [define struct nfsd4_cb_sequence here] Signed-off-by: Benny Halevy Signed-off-by: J. Bruce Fields --- fs/nfsd/nfs4callback.c | 5 +++++ include/linux/nfsd/state.h | 6 ++++++ 2 files changed, 11 insertions(+) (limited to 'include') diff --git a/fs/nfsd/nfs4callback.c b/fs/nfsd/nfs4callback.c index db4188ce9b0..f31175717c1 100644 --- a/fs/nfsd/nfs4callback.c +++ b/fs/nfsd/nfs4callback.c @@ -92,6 +92,11 @@ enum nfs_cb_opnum4 { cb_sequence_dec_sz + \ op_dec_sz) +struct nfs4_rpc_args { + void *args_op; + struct nfsd4_cb_sequence args_seq; +}; + /* * Generic encode routines from fs/nfs/nfs4xdr.c */ diff --git a/include/linux/nfsd/state.h b/include/linux/nfsd/state.h index c916032570c..0e5b5aecde0 100644 --- a/include/linux/nfsd/state.h +++ b/include/linux/nfsd/state.h @@ -60,6 +60,12 @@ typedef struct { #define si_stateownerid si_opaque.so_stateownerid #define si_fileid si_opaque.so_fileid +struct nfsd4_cb_sequence { + /* args/res */ + u32 cbs_minorversion; + struct nfs4_client *cbs_clp; +}; + struct nfs4_delegation { struct list_head dl_perfile; struct list_head dl_perclnt; -- cgit v1.2.3-70-g09d2 From 199ff35e1c8724871e157c2e48556c2794946e82 Mon Sep 17 00:00:00 2001 From: Ricardo Labiaga Date: Thu, 10 Sep 2009 12:26:25 +0300 Subject: nfsd41: Backchannel: Server backchannel RPC wait queue RPC callback requests will wait on this wait queue if the backchannel is out of slots. Signed-off-by: Ricardo Labiaga Signed-off-by: Benny Halevy Signed-off-by: J. Bruce Fields --- fs/nfsd/nfs4state.c | 2 ++ include/linux/nfsd/state.h | 4 ++++ 2 files changed, 6 insertions(+) (limited to 'include') diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c index d8196b453f6..f4cebd9016b 100644 --- a/fs/nfsd/nfs4state.c +++ b/fs/nfsd/nfs4state.c @@ -775,6 +775,8 @@ static struct nfs4_client *create_client(struct xdr_netobj name, char *recdir) INIT_LIST_HEAD(&clp->cl_delegations); INIT_LIST_HEAD(&clp->cl_sessions); INIT_LIST_HEAD(&clp->cl_lru); + clear_bit(0, &clp->cl_cb_slot_busy); + rpc_init_wait_queue(&clp->cl_cb_waitq, "Backchannel slot table"); return clp; } diff --git a/include/linux/nfsd/state.h b/include/linux/nfsd/state.h index 0e5b5aecde0..9cc40a137c3 100644 --- a/include/linux/nfsd/state.h +++ b/include/linux/nfsd/state.h @@ -219,7 +219,11 @@ struct nfs4_client { struct nfs4_sessionid cl_sessionid; /* for nfs41 callbacks */ + /* We currently support a single back channel with a single slot */ + unsigned long cl_cb_slot_busy; struct svc_xprt *cl_cb_xprt; /* 4.1 callback transport */ + struct rpc_wait_queue cl_cb_waitq; /* backchannel callers may */ + /* wait here for slots */ }; /* struct nfs4_client_reset -- cgit v1.2.3-70-g09d2 From 2a1d1b593803d7c18a369bf148f3b48c5a3260fc Mon Sep 17 00:00:00 2001 From: Ricardo Labiaga Date: Thu, 10 Sep 2009 12:26:38 +0300 Subject: nfsd41: Backchannel: Setup sequence information Follows the model used by the NFS client. Setup the RPC prepare and done function pointers so that we can populate the sequence information if minorversion == 1. rpc_run_task() is then invoked directly just like existing NFS client operations do. nfsd4_cb_prepare() determines if the sequence information needs to be setup. If the slot is in use, it adds itself to the wait queue. nfsd4_cb_done() wakes anyone sleeping on the callback channel wait queue after our RPC reply has been received. It also sets the task message result pointer to NULL to clearly indicate we're done using it. Signed-off-by: Ricardo Labiaga [define and initialize cl_cb_seq_nr here] [pulled out unused defintion of nfsd4_cb_done] Signed-off-by: Benny Halevy Signed-off-by: J. Bruce Fields --- fs/nfsd/nfs4callback.c | 62 ++++++++++++++++++++++++++++++++++++++++++++++ fs/nfsd/nfs4state.c | 1 + include/linux/nfsd/state.h | 1 + 3 files changed, 64 insertions(+) (limited to 'include') diff --git a/fs/nfsd/nfs4callback.c b/fs/nfsd/nfs4callback.c index f31175717c1..25a09069e45 100644 --- a/fs/nfsd/nfs4callback.c +++ b/fs/nfsd/nfs4callback.c @@ -501,6 +501,67 @@ nfsd4_probe_callback(struct nfs4_client *clp) do_probe_callback(clp); } +/* + * There's currently a single callback channel slot. + * If the slot is available, then mark it busy. Otherwise, set the + * thread for sleeping on the callback RPC wait queue. + */ +static int nfsd41_cb_setup_sequence(struct nfs4_client *clp, + struct rpc_task *task) +{ + struct nfs4_rpc_args *args = task->tk_msg.rpc_argp; + u32 *ptr = (u32 *)clp->cl_sessionid.data; + int status = 0; + + dprintk("%s: %u:%u:%u:%u\n", __func__, + ptr[0], ptr[1], ptr[2], ptr[3]); + + if (test_and_set_bit(0, &clp->cl_cb_slot_busy) != 0) { + rpc_sleep_on(&clp->cl_cb_waitq, task, NULL); + dprintk("%s slot is busy\n", __func__); + status = -EAGAIN; + goto out; + } + + /* + * We'll need the clp during XDR encoding and decoding, + * and the sequence during decoding to verify the reply + */ + args->args_seq.cbs_clp = clp; + task->tk_msg.rpc_resp = &args->args_seq; + +out: + dprintk("%s status=%d\n", __func__, status); + return status; +} + +/* + * TODO: cb_sequence should support referring call lists, cachethis, multiple + * slots, and mark callback channel down on communication errors. + */ +static void nfsd4_cb_prepare(struct rpc_task *task, void *calldata) +{ + struct nfs4_delegation *dp = calldata; + struct nfs4_client *clp = dp->dl_client; + struct nfs4_rpc_args *args = task->tk_msg.rpc_argp; + u32 minorversion = clp->cl_cb_conn.cb_minorversion; + int status = 0; + + args->args_seq.cbs_minorversion = minorversion; + if (minorversion) { + status = nfsd41_cb_setup_sequence(clp, task); + if (status) { + if (status != -EAGAIN) { + /* terminate rpc task */ + task->tk_status = status; + task->tk_action = NULL; + } + return; + } + } + rpc_call_start(task); +} + static void nfsd4_cb_recall_done(struct rpc_task *task, void *calldata) { struct nfs4_delegation *dp = calldata; @@ -540,6 +601,7 @@ static void nfsd4_cb_recall_release(void *calldata) } static const struct rpc_call_ops nfsd4_cb_recall_ops = { + .rpc_call_prepare = nfsd4_cb_prepare, .rpc_call_done = nfsd4_cb_recall_done, .rpc_release = nfsd4_cb_recall_release, }; diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c index f4cebd9016b..76b7bcbb3f2 100644 --- a/fs/nfsd/nfs4state.c +++ b/fs/nfsd/nfs4state.c @@ -1331,6 +1331,7 @@ nfsd4_create_session(struct svc_rqst *rqstp, unconf->cl_cb_conn.cb_minorversion = cstate->minorversion; unconf->cl_cb_conn.cb_prog = cr_ses->callback_prog; + unconf->cl_cb_seq_nr = 1; nfsd4_probe_callback(unconf); } conf = unconf; diff --git a/include/linux/nfsd/state.h b/include/linux/nfsd/state.h index 9cc40a137c3..b38d1132418 100644 --- a/include/linux/nfsd/state.h +++ b/include/linux/nfsd/state.h @@ -221,6 +221,7 @@ struct nfs4_client { /* for nfs41 callbacks */ /* We currently support a single back channel with a single slot */ unsigned long cl_cb_slot_busy; + u32 cl_cb_seq_nr; struct svc_xprt *cl_cb_xprt; /* 4.1 callback transport */ struct rpc_wait_queue cl_cb_waitq; /* backchannel callers may */ /* wait here for slots */ -- cgit v1.2.3-70-g09d2 From d466f2fcb32cd97fd586bfa33f5dba3ac78aadb0 Mon Sep 17 00:00:00 2001 From: Andi Kleen Date: Wed, 16 Sep 2009 11:50:03 +0200 Subject: HWPOISON: Add page flag for poisoned pages Hardware poisoned pages need special handling in the VM and shouldn't be touched again. This requires a new page flag. Define it here. The page flags wars seem to be over, so it shouldn't be a problem to get a new one. v2: Add TestSetHWPoison (suggested by Johannes Weiner) Acked-by: Christoph Lameter Signed-off-by: Andi Kleen --- include/linux/page-flags.h | 17 ++++++++++++++++- 1 file changed, 16 insertions(+), 1 deletion(-) (limited to 'include') diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h index 2b87acfc5f8..9bc5fd9fdbf 100644 --- a/include/linux/page-flags.h +++ b/include/linux/page-flags.h @@ -51,6 +51,9 @@ * PG_buddy is set to indicate that the page is free and in the buddy system * (see mm/page_alloc.c). * + * PG_hwpoison indicates that a page got corrupted in hardware and contains + * data with incorrect ECC bits that triggered a machine check. Accessing is + * not safe since it may cause another machine check. Don't touch! */ /* @@ -101,6 +104,9 @@ enum pageflags { #endif #ifdef CONFIG_ARCH_USES_PG_UNCACHED PG_uncached, /* Page has been mapped as uncached */ +#endif +#ifdef CONFIG_MEMORY_FAILURE + PG_hwpoison, /* hardware poisoned page. Don't touch */ #endif __NR_PAGEFLAGS, @@ -263,6 +269,15 @@ PAGEFLAG(Uncached, uncached) PAGEFLAG_FALSE(Uncached) #endif +#ifdef CONFIG_MEMORY_FAILURE +PAGEFLAG(HWPoison, hwpoison) +TESTSETFLAG(HWPoison, hwpoison) +#define __PG_HWPOISON (1UL << PG_hwpoison) +#else +PAGEFLAG_FALSE(HWPoison) +#define __PG_HWPOISON 0 +#endif + static inline int PageUptodate(struct page *page) { int ret = test_bit(PG_uptodate, &(page)->flags); @@ -387,7 +402,7 @@ static inline void __ClearPageTail(struct page *page) 1 << PG_private | 1 << PG_private_2 | \ 1 << PG_buddy | 1 << PG_writeback | 1 << PG_reserved | \ 1 << PG_slab | 1 << PG_swapcache | 1 << PG_active | \ - 1 << PG_unevictable | __PG_MLOCKED) + 1 << PG_unevictable | __PG_MLOCKED | __PG_HWPOISON) /* * Flags checked when a page is prepped for return by the page allocator. -- cgit v1.2.3-70-g09d2 From 10be22dfe1e6ad978269dc275147e0ed049187bb Mon Sep 17 00:00:00 2001 From: Andi Kleen Date: Wed, 16 Sep 2009 11:50:04 +0200 Subject: HWPOISON: Export some rmap vma locking to outside world Needed for later patch that walks rmap entries on its own. This used to be very frowned upon, but memory-failure.c does some rather specialized rmap walking and rmap has been stable for quite some time, so I think it's ok now to export it. Signed-off-by: Andi Kleen --- include/linux/rmap.h | 6 ++++++ mm/rmap.c | 4 ++-- 2 files changed, 8 insertions(+), 2 deletions(-) (limited to 'include') diff --git a/include/linux/rmap.h b/include/linux/rmap.h index bf116d0dbf2..8dff2ffab82 100644 --- a/include/linux/rmap.h +++ b/include/linux/rmap.h @@ -112,6 +112,12 @@ int page_mkclean(struct page *); */ int try_to_munlock(struct page *); +/* + * Called by memory-failure.c to kill processes. + */ +struct anon_vma *page_lock_anon_vma(struct page *page); +void page_unlock_anon_vma(struct anon_vma *anon_vma); + #else /* !CONFIG_MMU */ #define anon_vma_init() do {} while (0) diff --git a/mm/rmap.c b/mm/rmap.c index 0895b5c7cbf..5a35c030e77 100644 --- a/mm/rmap.c +++ b/mm/rmap.c @@ -191,7 +191,7 @@ void __init anon_vma_init(void) * Getting a lock on a stable anon_vma from a page off the LRU is * tricky: page_lock_anon_vma rely on RCU to guard against the races. */ -static struct anon_vma *page_lock_anon_vma(struct page *page) +struct anon_vma *page_lock_anon_vma(struct page *page) { struct anon_vma *anon_vma; unsigned long anon_mapping; @@ -211,7 +211,7 @@ out: return NULL; } -static void page_unlock_anon_vma(struct anon_vma *anon_vma) +void page_unlock_anon_vma(struct anon_vma *anon_vma) { spin_unlock(&anon_vma->lock); rcu_read_unlock(); -- cgit v1.2.3-70-g09d2 From a7420aa54dbf699a5a05feba3c859b6baaa3938c Mon Sep 17 00:00:00 2001 From: Andi Kleen Date: Wed, 16 Sep 2009 11:50:05 +0200 Subject: HWPOISON: Add support for poison swap entries v2 Memory migration uses special swap entry types to trigger special actions on page faults. Extend this mechanism to also support poisoned swap entries, to trigger poison handling on page faults. This allows follow-on patches to prevent processes from faulting in poisoned pages again. v2: Fix overflow in MAX_SWAPFILES (Fengguang Wu) v3: Better overflow fix (Hidehiro Kawai) Signed-off-by: Andi Kleen --- include/linux/swap.h | 34 ++++++++++++++++++++++++++++------ include/linux/swapops.h | 38 ++++++++++++++++++++++++++++++++++++++ mm/swapfile.c | 4 ++-- 3 files changed, 68 insertions(+), 8 deletions(-) (limited to 'include') diff --git a/include/linux/swap.h b/include/linux/swap.h index 7c15334f3ff..f077e454c65 100644 --- a/include/linux/swap.h +++ b/include/linux/swap.h @@ -34,15 +34,37 @@ static inline int current_is_kswapd(void) * the type/offset into the pte as 5/27 as well. */ #define MAX_SWAPFILES_SHIFT 5 -#ifndef CONFIG_MIGRATION -#define MAX_SWAPFILES (1 << MAX_SWAPFILES_SHIFT) + +/* + * Use some of the swap files numbers for other purposes. This + * is a convenient way to hook into the VM to trigger special + * actions on faults. + */ + +/* + * NUMA node memory migration support + */ +#ifdef CONFIG_MIGRATION +#define SWP_MIGRATION_NUM 2 +#define SWP_MIGRATION_READ (MAX_SWAPFILES + SWP_HWPOISON_NUM) +#define SWP_MIGRATION_WRITE (MAX_SWAPFILES + SWP_HWPOISON_NUM + 1) #else -/* Use last two entries for page migration swap entries */ -#define MAX_SWAPFILES ((1 << MAX_SWAPFILES_SHIFT)-2) -#define SWP_MIGRATION_READ MAX_SWAPFILES -#define SWP_MIGRATION_WRITE (MAX_SWAPFILES + 1) +#define SWP_MIGRATION_NUM 0 #endif +/* + * Handling of hardware poisoned pages with memory corruption. + */ +#ifdef CONFIG_MEMORY_FAILURE +#define SWP_HWPOISON_NUM 1 +#define SWP_HWPOISON MAX_SWAPFILES +#else +#define SWP_HWPOISON_NUM 0 +#endif + +#define MAX_SWAPFILES \ + ((1 << MAX_SWAPFILES_SHIFT) - SWP_MIGRATION_NUM - SWP_HWPOISON_NUM) + /* * Magic header for a swap area. The first part of the union is * what the swap magic looks like for the old (limited to 128MB) diff --git a/include/linux/swapops.h b/include/linux/swapops.h index 6ec39ab27b4..cd42e30b7c6 100644 --- a/include/linux/swapops.h +++ b/include/linux/swapops.h @@ -131,3 +131,41 @@ static inline int is_write_migration_entry(swp_entry_t entry) #endif +#ifdef CONFIG_MEMORY_FAILURE +/* + * Support for hardware poisoned pages + */ +static inline swp_entry_t make_hwpoison_entry(struct page *page) +{ + BUG_ON(!PageLocked(page)); + return swp_entry(SWP_HWPOISON, page_to_pfn(page)); +} + +static inline int is_hwpoison_entry(swp_entry_t entry) +{ + return swp_type(entry) == SWP_HWPOISON; +} +#else + +static inline swp_entry_t make_hwpoison_entry(struct page *page) +{ + return swp_entry(0, 0); +} + +static inline int is_hwpoison_entry(swp_entry_t swp) +{ + return 0; +} +#endif + +#if defined(CONFIG_MEMORY_FAILURE) || defined(CONFIG_MIGRATION) +static inline int non_swap_entry(swp_entry_t entry) +{ + return swp_type(entry) >= MAX_SWAPFILES; +} +#else +static inline int non_swap_entry(swp_entry_t entry) +{ + return 0; +} +#endif diff --git a/mm/swapfile.c b/mm/swapfile.c index 74f1102e874..ce5dda6d604 100644 --- a/mm/swapfile.c +++ b/mm/swapfile.c @@ -699,7 +699,7 @@ int free_swap_and_cache(swp_entry_t entry) struct swap_info_struct *p; struct page *page = NULL; - if (is_migration_entry(entry)) + if (non_swap_entry(entry)) return 1; p = swap_info_get(entry); @@ -2085,7 +2085,7 @@ static int __swap_duplicate(swp_entry_t entry, bool cache) int count; bool has_cache; - if (is_migration_entry(entry)) + if (non_swap_entry(entry)) return -EINVAL; type = swp_type(entry); -- cgit v1.2.3-70-g09d2 From ad5fa913991e9e0f122b021e882b0d50051fbdbc Mon Sep 17 00:00:00 2001 From: Andi Kleen Date: Wed, 16 Sep 2009 11:50:06 +0200 Subject: HWPOISON: Add new SIGBUS error codes for hardware poison signals Add new SIGBUS codes for reporting machine checks as signals. When the hardware detects an uncorrected ECC error it can trigger these signals. This is needed for telling KVM's qemu about machine checks that happen to guests, so that it can inject them, but might be also useful for other programs. I find it useful in my test programs. This patch merely defines the new types. - Define two new si_codes for SIGBUS. BUS_MCEERR_AO and BUS_MCEERR_AR * BUS_MCEERR_AO is for "Action Optional" machine checks, which means that some corruption has been detected in the background, but nothing has been consumed so far. The program can ignore those if it wants (but most programs would already get killed) * BUS_MCEERR_AR is for "Action Required" machine checks. This happens when corrupted data is consumed or the application ran into an area which has been known to be corrupted earlier. These require immediate action and cannot just returned to. Most programs would kill themselves. - They report the address of the corruption in the user address space in si_addr. - Define a new si_addr_lsb field that reports the extent of the corruption to user space. That's currently always a (small) page. The user application cannot tell where in this page the corruption happened. AK: I plan to write a man page update before anyone asks. Signed-off-by: Andi Kleen --- include/asm-generic/siginfo.h | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) (limited to 'include') diff --git a/include/asm-generic/siginfo.h b/include/asm-generic/siginfo.h index c840719a8c5..942d30b5aab 100644 --- a/include/asm-generic/siginfo.h +++ b/include/asm-generic/siginfo.h @@ -82,6 +82,7 @@ typedef struct siginfo { #ifdef __ARCH_SI_TRAPNO int _trapno; /* TRAP # which caused the signal */ #endif + short _addr_lsb; /* LSB of the reported address */ } _sigfault; /* SIGPOLL */ @@ -112,6 +113,7 @@ typedef struct siginfo { #ifdef __ARCH_SI_TRAPNO #define si_trapno _sifields._sigfault._trapno #endif +#define si_addr_lsb _sifields._sigfault._addr_lsb #define si_band _sifields._sigpoll._band #define si_fd _sifields._sigpoll._fd @@ -192,7 +194,11 @@ typedef struct siginfo { #define BUS_ADRALN (__SI_FAULT|1) /* invalid address alignment */ #define BUS_ADRERR (__SI_FAULT|2) /* non-existant physical address */ #define BUS_OBJERR (__SI_FAULT|3) /* object specific hardware error */ -#define NSIGBUS 3 +/* hardware memory error consumed on a machine check: action required */ +#define BUS_MCEERR_AR (__SI_FAULT|4) +/* hardware memory error detected in process but not consumed: action optional*/ +#define BUS_MCEERR_AO (__SI_FAULT|5) +#define NSIGBUS 5 /* * SIGTRAP si_codes -- cgit v1.2.3-70-g09d2 From d1737fdbec7f90edc52dd0c5c3767457f28e78d8 Mon Sep 17 00:00:00 2001 From: Andi Kleen Date: Wed, 16 Sep 2009 11:50:06 +0200 Subject: HWPOISON: Add basic support for poisoned pages in fault handler v3 - Add a new VM_FAULT_HWPOISON error code to handle_mm_fault. Right now architectures have to explicitely enable poison page support, so this is forward compatible to all architectures. They only need to add it when they enable poison page support. - Add poison page handling in swap in fault code v2: Add missing delayacct_clear_flag (Hidehiro Kawai) v3: Really use delayacct_clear_flag (Hidehiro Kawai) Signed-off-by: Andi Kleen --- include/linux/mm.h | 3 ++- mm/memory.c | 18 +++++++++++++++--- 2 files changed, 17 insertions(+), 4 deletions(-) (limited to 'include') diff --git a/include/linux/mm.h b/include/linux/mm.h index 9a72cc78e6b..082b68cb5ff 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -685,11 +685,12 @@ static inline int page_mapped(struct page *page) #define VM_FAULT_SIGBUS 0x0002 #define VM_FAULT_MAJOR 0x0004 #define VM_FAULT_WRITE 0x0008 /* Special case for get_user_pages */ +#define VM_FAULT_HWPOISON 0x0010 /* Hit poisoned page */ #define VM_FAULT_NOPAGE 0x0100 /* ->fault installed the pte, not return page */ #define VM_FAULT_LOCKED 0x0200 /* ->fault locked the returned page */ -#define VM_FAULT_ERROR (VM_FAULT_OOM | VM_FAULT_SIGBUS) +#define VM_FAULT_ERROR (VM_FAULT_OOM | VM_FAULT_SIGBUS | VM_FAULT_HWPOISON) /* * Can be called by the pagefault handler when it gets a VM_FAULT_OOM. diff --git a/mm/memory.c b/mm/memory.c index aede2ce3aba..02bae2d540d 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -1319,7 +1319,8 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm, if (ret & VM_FAULT_ERROR) { if (ret & VM_FAULT_OOM) return i ? i : -ENOMEM; - else if (ret & VM_FAULT_SIGBUS) + if (ret & + (VM_FAULT_HWPOISON|VM_FAULT_SIGBUS)) return i ? i : -EFAULT; BUG(); } @@ -2511,8 +2512,15 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma, goto out; entry = pte_to_swp_entry(orig_pte); - if (is_migration_entry(entry)) { - migration_entry_wait(mm, pmd, address); + if (unlikely(non_swap_entry(entry))) { + if (is_migration_entry(entry)) { + migration_entry_wait(mm, pmd, address); + } else if (is_hwpoison_entry(entry)) { + ret = VM_FAULT_HWPOISON; + } else { + print_bad_pte(vma, address, orig_pte, NULL); + ret = VM_FAULT_OOM; + } goto out; } delayacct_set_flag(DELAYACCT_PF_SWAPIN); @@ -2536,6 +2544,10 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma, /* Had to read the page from swap area: Major fault */ ret = VM_FAULT_MAJOR; count_vm_event(PGMAJFAULT); + } else if (PageHWPoison(page)) { + ret = VM_FAULT_HWPOISON; + delayacct_clear_flag(DELAYACCT_PF_SWAPIN); + goto out; } lock_page(page); -- cgit v1.2.3-70-g09d2 From 14fa31b89c5ae79e4131da41761378a6df674352 Mon Sep 17 00:00:00 2001 From: Andi Kleen Date: Wed, 16 Sep 2009 11:50:10 +0200 Subject: HWPOISON: Use bitmask/action code for try_to_unmap behaviour try_to_unmap currently has multiple modi (migration, munlock, normal unmap) which are selected by magic flag variables. The logic is not very straight forward, because each of these flag change multiple behaviours (e.g. migration turns off aging, not only sets up migration ptes etc.) Also the different flags interact in magic ways. A later patch in this series adds another mode to try_to_unmap, so this becomes quickly unmanageable. Replace the different flags with a action code (migration, munlock, munmap) and some additional flags as modifiers (ignore mlock, ignore aging). This makes the logic more straight forward and allows easier extension to new behaviours. Change all the caller to declare what they want to do. This patch is supposed to be a nop in behaviour. If anyone can prove it is not that would be a bug. Cc: Lee.Schermerhorn@hp.com Cc: npiggin@suse.de Signed-off-by: Andi Kleen --- include/linux/rmap.h | 13 ++++++++++++- mm/migrate.c | 2 +- mm/rmap.c | 40 ++++++++++++++++++++++------------------ mm/vmscan.c | 2 +- 4 files changed, 36 insertions(+), 21 deletions(-) (limited to 'include') diff --git a/include/linux/rmap.h b/include/linux/rmap.h index 8dff2ffab82..4c4a2d4d289 100644 --- a/include/linux/rmap.h +++ b/include/linux/rmap.h @@ -85,7 +85,18 @@ static inline void page_dup_rmap(struct page *page, struct vm_area_struct *vma, */ int page_referenced(struct page *, int is_locked, struct mem_cgroup *cnt, unsigned long *vm_flags); -int try_to_unmap(struct page *, int ignore_refs); +enum ttu_flags { + TTU_UNMAP = 0, /* unmap mode */ + TTU_MIGRATION = 1, /* migration mode */ + TTU_MUNLOCK = 2, /* munlock mode */ + TTU_ACTION_MASK = 0xff, + + TTU_IGNORE_MLOCK = (1 << 8), /* ignore mlock */ + TTU_IGNORE_ACCESS = (1 << 9), /* don't age */ +}; +#define TTU_ACTION(x) ((x) & TTU_ACTION_MASK) + +int try_to_unmap(struct page *, enum ttu_flags flags); /* * Called from mm/filemap_xip.c to unmap empty zero page diff --git a/mm/migrate.c b/mm/migrate.c index 939888f9dda..e3a0cd3859a 100644 --- a/mm/migrate.c +++ b/mm/migrate.c @@ -669,7 +669,7 @@ static int unmap_and_move(new_page_t get_new_page, unsigned long private, } /* Establish migration ptes or remove ptes */ - try_to_unmap(page, 1); + try_to_unmap(page, TTU_MIGRATION|TTU_IGNORE_MLOCK|TTU_IGNORE_ACCESS); if (!page_mapped(page)) rc = move_to_new_page(newpage, page); diff --git a/mm/rmap.c b/mm/rmap.c index 5a35c030e77..08c112a776a 100644 --- a/mm/rmap.c +++ b/mm/rmap.c @@ -774,7 +774,7 @@ void page_remove_rmap(struct page *page) * repeatedly from either try_to_unmap_anon or try_to_unmap_file. */ static int try_to_unmap_one(struct page *page, struct vm_area_struct *vma, - int migration) + enum ttu_flags flags) { struct mm_struct *mm = vma->vm_mm; unsigned long address; @@ -796,11 +796,13 @@ static int try_to_unmap_one(struct page *page, struct vm_area_struct *vma, * If it's recently referenced (perhaps page_referenced * skipped over this mm) then we should reactivate it. */ - if (!migration) { + if (!(flags & TTU_IGNORE_MLOCK)) { if (vma->vm_flags & VM_LOCKED) { ret = SWAP_MLOCK; goto out_unmap; } + } + if (!(flags & TTU_IGNORE_ACCESS)) { if (ptep_clear_flush_young_notify(vma, address, pte)) { ret = SWAP_FAIL; goto out_unmap; @@ -840,12 +842,12 @@ static int try_to_unmap_one(struct page *page, struct vm_area_struct *vma, * pte. do_swap_page() will wait until the migration * pte is removed and then restart fault handling. */ - BUG_ON(!migration); + BUG_ON(TTU_ACTION(flags) != TTU_MIGRATION); entry = make_migration_entry(page, pte_write(pteval)); } set_pte_at(mm, address, pte, swp_entry_to_pte(entry)); BUG_ON(pte_file(*pte)); - } else if (PAGE_MIGRATION && migration) { + } else if (PAGE_MIGRATION && (TTU_ACTION(flags) == TTU_MIGRATION)) { /* Establish migration entry for a file page */ swp_entry_t entry; entry = make_migration_entry(page, pte_write(pteval)); @@ -1014,12 +1016,13 @@ static int try_to_mlock_page(struct page *page, struct vm_area_struct *vma) * vm_flags for that VMA. That should be OK, because that vma shouldn't be * 'LOCKED. */ -static int try_to_unmap_anon(struct page *page, int unlock, int migration) +static int try_to_unmap_anon(struct page *page, enum ttu_flags flags) { struct anon_vma *anon_vma; struct vm_area_struct *vma; unsigned int mlocked = 0; int ret = SWAP_AGAIN; + int unlock = TTU_ACTION(flags) == TTU_MUNLOCK; if (MLOCK_PAGES && unlikely(unlock)) ret = SWAP_SUCCESS; /* default for try_to_munlock() */ @@ -1035,7 +1038,7 @@ static int try_to_unmap_anon(struct page *page, int unlock, int migration) continue; /* must visit all unlocked vmas */ ret = SWAP_MLOCK; /* saw at least one mlocked vma */ } else { - ret = try_to_unmap_one(page, vma, migration); + ret = try_to_unmap_one(page, vma, flags); if (ret == SWAP_FAIL || !page_mapped(page)) break; } @@ -1059,8 +1062,7 @@ static int try_to_unmap_anon(struct page *page, int unlock, int migration) /** * try_to_unmap_file - unmap/unlock file page using the object-based rmap method * @page: the page to unmap/unlock - * @unlock: request for unlock rather than unmap [unlikely] - * @migration: unmapping for migration - ignored if @unlock + * @flags: action and flags * * Find all the mappings of a page using the mapping pointer and the vma chains * contained in the address_space struct it points to. @@ -1072,7 +1074,7 @@ static int try_to_unmap_anon(struct page *page, int unlock, int migration) * vm_flags for that VMA. That should be OK, because that vma shouldn't be * 'LOCKED. */ -static int try_to_unmap_file(struct page *page, int unlock, int migration) +static int try_to_unmap_file(struct page *page, enum ttu_flags flags) { struct address_space *mapping = page->mapping; pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT); @@ -1084,6 +1086,7 @@ static int try_to_unmap_file(struct page *page, int unlock, int migration) unsigned long max_nl_size = 0; unsigned int mapcount; unsigned int mlocked = 0; + int unlock = TTU_ACTION(flags) == TTU_MUNLOCK; if (MLOCK_PAGES && unlikely(unlock)) ret = SWAP_SUCCESS; /* default for try_to_munlock() */ @@ -1096,7 +1099,7 @@ static int try_to_unmap_file(struct page *page, int unlock, int migration) continue; /* must visit all vmas */ ret = SWAP_MLOCK; } else { - ret = try_to_unmap_one(page, vma, migration); + ret = try_to_unmap_one(page, vma, flags); if (ret == SWAP_FAIL || !page_mapped(page)) goto out; } @@ -1121,7 +1124,8 @@ static int try_to_unmap_file(struct page *page, int unlock, int migration) ret = SWAP_MLOCK; /* leave mlocked == 0 */ goto out; /* no need to look further */ } - if (!MLOCK_PAGES && !migration && (vma->vm_flags & VM_LOCKED)) + if (!MLOCK_PAGES && !(flags & TTU_IGNORE_MLOCK) && + (vma->vm_flags & VM_LOCKED)) continue; cursor = (unsigned long) vma->vm_private_data; if (cursor > max_nl_cursor) @@ -1155,7 +1159,7 @@ static int try_to_unmap_file(struct page *page, int unlock, int migration) do { list_for_each_entry(vma, &mapping->i_mmap_nonlinear, shared.vm_set.list) { - if (!MLOCK_PAGES && !migration && + if (!MLOCK_PAGES && !(flags & TTU_IGNORE_MLOCK) && (vma->vm_flags & VM_LOCKED)) continue; cursor = (unsigned long) vma->vm_private_data; @@ -1195,7 +1199,7 @@ out: /** * try_to_unmap - try to remove all page table mappings to a page * @page: the page to get unmapped - * @migration: migration flag + * @flags: action and flags * * Tries to remove all the page table entries which are mapping this * page, used in the pageout path. Caller must hold the page lock. @@ -1206,16 +1210,16 @@ out: * SWAP_FAIL - the page is unswappable * SWAP_MLOCK - page is mlocked. */ -int try_to_unmap(struct page *page, int migration) +int try_to_unmap(struct page *page, enum ttu_flags flags) { int ret; BUG_ON(!PageLocked(page)); if (PageAnon(page)) - ret = try_to_unmap_anon(page, 0, migration); + ret = try_to_unmap_anon(page, flags); else - ret = try_to_unmap_file(page, 0, migration); + ret = try_to_unmap_file(page, flags); if (ret != SWAP_MLOCK && !page_mapped(page)) ret = SWAP_SUCCESS; return ret; @@ -1240,8 +1244,8 @@ int try_to_munlock(struct page *page) VM_BUG_ON(!PageLocked(page) || PageLRU(page)); if (PageAnon(page)) - return try_to_unmap_anon(page, 1, 0); + return try_to_unmap_anon(page, TTU_MUNLOCK); else - return try_to_unmap_file(page, 1, 0); + return try_to_unmap_file(page, TTU_MUNLOCK); } diff --git a/mm/vmscan.c b/mm/vmscan.c index ba8228e0a80..ab3b0ad3ce5 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -659,7 +659,7 @@ static unsigned long shrink_page_list(struct list_head *page_list, * processes. Try to unmap it here. */ if (page_mapped(page) && mapping) { - switch (try_to_unmap(page, 0)) { + switch (try_to_unmap(page, TTU_UNMAP)) { case SWAP_FAIL: goto activate_locked; case SWAP_AGAIN: -- cgit v1.2.3-70-g09d2 From 888b9f7c58ebe8303bad817cd554df887a683957 Mon Sep 17 00:00:00 2001 From: Andi Kleen Date: Wed, 16 Sep 2009 11:50:11 +0200 Subject: HWPOISON: Handle hardware poisoned pages in try_to_unmap When a page has the poison bit set replace the PTE with a poison entry. This causes the right error handling to be done later when a process runs into it. v2: add a new flag to not do that (needed for the memory-failure handler later) (Fengguang) v3: remove unnecessary is_migration_entry() test (Fengguang, Minchan) Reviewed-by: Minchan Kim Reviewed-by: Wu Fengguang Signed-off-by: Andi Kleen --- include/linux/rmap.h | 1 + mm/rmap.c | 9 ++++++++- 2 files changed, 9 insertions(+), 1 deletion(-) (limited to 'include') diff --git a/include/linux/rmap.h b/include/linux/rmap.h index 4c4a2d4d289..ce989f1fc2e 100644 --- a/include/linux/rmap.h +++ b/include/linux/rmap.h @@ -93,6 +93,7 @@ enum ttu_flags { TTU_IGNORE_MLOCK = (1 << 8), /* ignore mlock */ TTU_IGNORE_ACCESS = (1 << 9), /* don't age */ + TTU_IGNORE_HWPOISON = (1 << 10),/* corrupted page is recoverable */ }; #define TTU_ACTION(x) ((x) & TTU_ACTION_MASK) diff --git a/mm/rmap.c b/mm/rmap.c index 08c112a776a..7e72ca19d68 100644 --- a/mm/rmap.c +++ b/mm/rmap.c @@ -820,7 +820,14 @@ static int try_to_unmap_one(struct page *page, struct vm_area_struct *vma, /* Update high watermark before we lower rss */ update_hiwater_rss(mm); - if (PageAnon(page)) { + if (PageHWPoison(page) && !(flags & TTU_IGNORE_HWPOISON)) { + if (PageAnon(page)) + dec_mm_counter(mm, anon_rss); + else + dec_mm_counter(mm, file_rss); + set_pte_at(mm, address, pte, + swp_entry_to_pte(make_hwpoison_entry(page))); + } else if (PageAnon(page)) { swp_entry_t entry = { .val = page_private(page) }; if (PageSwapCache(page)) { -- cgit v1.2.3-70-g09d2 From 750b4987b0cd4d408e54cb83a80a067cbe690feb Mon Sep 17 00:00:00 2001 From: Nick Piggin Date: Wed, 16 Sep 2009 11:50:12 +0200 Subject: HWPOISON: Refactor truncate to allow direct truncating of page v2 Extract out truncate_inode_page() out of the truncate path so that it can be used by memory-failure.c [AK: description, headers, fix typos] v2: Some white space changes from Fengguang Wu Signed-off-by: Andi Kleen --- include/linux/mm.h | 2 ++ mm/truncate.c | 29 +++++++++++++++-------------- 2 files changed, 17 insertions(+), 14 deletions(-) (limited to 'include') diff --git a/include/linux/mm.h b/include/linux/mm.h index 082b68cb5ff..8cbc0aafd5b 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -794,6 +794,8 @@ static inline void unmap_shared_mapping_range(struct address_space *mapping, extern int vmtruncate(struct inode * inode, loff_t offset); extern int vmtruncate_range(struct inode * inode, loff_t offset, loff_t end); +int truncate_inode_page(struct address_space *mapping, struct page *page); + #ifdef CONFIG_MMU extern int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long address, unsigned int flags); diff --git a/mm/truncate.c b/mm/truncate.c index ccc3ecf7cb9..2519a7c9287 100644 --- a/mm/truncate.c +++ b/mm/truncate.c @@ -93,11 +93,11 @@ EXPORT_SYMBOL(cancel_dirty_page); * its lock, b) when a concurrent invalidate_mapping_pages got there first and * c) when tmpfs swizzles a page between a tmpfs inode and swapper_space. */ -static void +static int truncate_complete_page(struct address_space *mapping, struct page *page) { if (page->mapping != mapping) - return; + return -EIO; if (page_has_private(page)) do_invalidatepage(page, 0); @@ -108,6 +108,7 @@ truncate_complete_page(struct address_space *mapping, struct page *page) remove_from_page_cache(page); ClearPageMappedToDisk(page); page_cache_release(page); /* pagecache ref */ + return 0; } /* @@ -135,6 +136,16 @@ invalidate_complete_page(struct address_space *mapping, struct page *page) return ret; } +int truncate_inode_page(struct address_space *mapping, struct page *page) +{ + if (page_mapped(page)) { + unmap_mapping_range(mapping, + (loff_t)page->index << PAGE_CACHE_SHIFT, + PAGE_CACHE_SIZE, 0); + } + return truncate_complete_page(mapping, page); +} + /** * truncate_inode_pages - truncate range of pages specified by start & end byte offsets * @mapping: mapping to truncate @@ -196,12 +207,7 @@ void truncate_inode_pages_range(struct address_space *mapping, unlock_page(page); continue; } - if (page_mapped(page)) { - unmap_mapping_range(mapping, - (loff_t)page_index<index<index > next) next = page->index; next++; - truncate_complete_page(mapping, page); unlock_page(page); } pagevec_release(&pvec); -- cgit v1.2.3-70-g09d2 From 83f786680aec8d030184f7ced1a0a3dd8ac81764 Mon Sep 17 00:00:00 2001 From: Wu Fengguang Date: Wed, 16 Sep 2009 11:50:13 +0200 Subject: HWPOISON: Add invalidate_inode_page Add a simple way to invalidate a single page This is just a refactoring of the truncate.c code. Originally from Fengguang, modified by Andi Kleen. Signed-off-by: Andi Kleen --- include/linux/mm.h | 2 ++ mm/truncate.c | 26 ++++++++++++++++++++------ 2 files changed, 22 insertions(+), 6 deletions(-) (limited to 'include') diff --git a/include/linux/mm.h b/include/linux/mm.h index 8cbc0aafd5b..b05bbde0296 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -796,6 +796,8 @@ extern int vmtruncate_range(struct inode * inode, loff_t offset, loff_t end); int truncate_inode_page(struct address_space *mapping, struct page *page); +int invalidate_inode_page(struct page *page); + #ifdef CONFIG_MMU extern int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long address, unsigned int flags); diff --git a/mm/truncate.c b/mm/truncate.c index 2519a7c9287..ea132f7ea2d 100644 --- a/mm/truncate.c +++ b/mm/truncate.c @@ -146,6 +146,24 @@ int truncate_inode_page(struct address_space *mapping, struct page *page) return truncate_complete_page(mapping, page); } +/* + * Safely invalidate one page from its pagecache mapping. + * It only drops clean, unused pages. The page must be locked. + * + * Returns 1 if the page is successfully invalidated, otherwise 0. + */ +int invalidate_inode_page(struct page *page) +{ + struct address_space *mapping = page_mapping(page); + if (!mapping) + return 0; + if (PageDirty(page) || PageWriteback(page)) + return 0; + if (page_mapped(page)) + return 0; + return invalidate_complete_page(mapping, page); +} + /** * truncate_inode_pages - truncate range of pages specified by start & end byte offsets * @mapping: mapping to truncate @@ -312,12 +330,8 @@ unsigned long invalidate_mapping_pages(struct address_space *mapping, if (lock_failed) continue; - if (PageDirty(page) || PageWriteback(page)) - goto unlock; - if (page_mapped(page)) - goto unlock; - ret += invalidate_complete_page(mapping, page); -unlock: + ret += invalidate_inode_page(page); + unlock_page(page); if (next > end) break; -- cgit v1.2.3-70-g09d2 From 257187362123f15d9d1e09918cf87cebbea4e786 Mon Sep 17 00:00:00 2001 From: Andi Kleen Date: Wed, 16 Sep 2009 11:50:13 +0200 Subject: HWPOISON: Define a new error_remove_page address space op for async truncation Truncating metadata pages is not safe right now before we haven't audited all file systems. To enable truncation only for data address space define a new address_space callback error_remove_page. This is used for memory_failure.c memory error handling. This can be then set to truncate_inode_page() This patch just defines the new operation and adds documentation. Callers and users come in followon patches. Signed-off-by: Andi Kleen --- Documentation/filesystems/vfs.txt | 7 +++++++ include/linux/fs.h | 1 + include/linux/mm.h | 1 + mm/truncate.c | 17 +++++++++++++++++ 4 files changed, 26 insertions(+) (limited to 'include') diff --git a/Documentation/filesystems/vfs.txt b/Documentation/filesystems/vfs.txt index f49eecf2e57..623f094c9d8 100644 --- a/Documentation/filesystems/vfs.txt +++ b/Documentation/filesystems/vfs.txt @@ -536,6 +536,7 @@ struct address_space_operations { /* migrate the contents of a page to the specified target */ int (*migratepage) (struct page *, struct page *); int (*launder_page) (struct page *); + int (*error_remove_page) (struct mapping *mapping, struct page *page); }; writepage: called by the VM to write a dirty page to backing store. @@ -694,6 +695,12 @@ struct address_space_operations { prevent redirtying the page, it is kept locked during the whole operation. + error_remove_page: normally set to generic_error_remove_page if truncation + is ok for this address space. Used for memory failure handling. + Setting this implies you deal with pages going away under you, + unless you have them locked or reference counts increased. + + The File Object =============== diff --git a/include/linux/fs.h b/include/linux/fs.h index b21cf6b9c80..4f47afd3764 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -595,6 +595,7 @@ struct address_space_operations { int (*launder_page) (struct page *); int (*is_partially_uptodate) (struct page *, read_descriptor_t *, unsigned long); + int (*error_remove_page)(struct address_space *, struct page *); }; /* diff --git a/include/linux/mm.h b/include/linux/mm.h index b05bbde0296..a16018f7d61 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -795,6 +795,7 @@ extern int vmtruncate(struct inode * inode, loff_t offset); extern int vmtruncate_range(struct inode * inode, loff_t offset, loff_t end); int truncate_inode_page(struct address_space *mapping, struct page *page); +int generic_error_remove_page(struct address_space *mapping, struct page *page); int invalidate_inode_page(struct page *page); diff --git a/mm/truncate.c b/mm/truncate.c index ea132f7ea2d..a17b3977cfd 100644 --- a/mm/truncate.c +++ b/mm/truncate.c @@ -146,6 +146,23 @@ int truncate_inode_page(struct address_space *mapping, struct page *page) return truncate_complete_page(mapping, page); } +/* + * Used to get rid of pages on hardware memory corruption. + */ +int generic_error_remove_page(struct address_space *mapping, struct page *page) +{ + if (!mapping) + return -EINVAL; + /* + * Only punch for normal data pages for now. + * Handling other types like directories would need more auditing. + */ + if (!S_ISREG(mapping->host->i_mode)) + return -EIO; + return truncate_inode_page(mapping, page); +} +EXPORT_SYMBOL(generic_error_remove_page); + /* * Safely invalidate one page from its pagecache mapping. * It only drops clean, unused pages. The page must be locked. -- cgit v1.2.3-70-g09d2 From 4db96cf077aa938b11fe7ac79ecc9b29ec00fbab Mon Sep 17 00:00:00 2001 From: Andi Kleen Date: Wed, 16 Sep 2009 11:50:14 +0200 Subject: HWPOISON: Add PR_MCE_KILL prctl to control early kill behaviour per process This allows processes to override their early/late kill behaviour on hardware memory errors. Typically applications which are memory error aware is better of with early kill (see the error as soon as possible), all others with late kill (only see the error when the error is really impacting execution) There's a global sysctl, but this way an application can set its specific policy. We're using two bits, one to signify that the process stated its intention and that I also made the prctl future proof by enforcing the unused arguments are 0. The state is inherited to children. Note this makes us officially run out of process flags on 32bit, but the next patch can easily add another field. Manpage patch will be supplied separately. Signed-off-by: Andi Kleen --- include/linux/prctl.h | 2 ++ include/linux/sched.h | 2 ++ kernel/sys.c | 22 ++++++++++++++++++++++ 3 files changed, 26 insertions(+) (limited to 'include') diff --git a/include/linux/prctl.h b/include/linux/prctl.h index b00df4c79c6..3dc303197e6 100644 --- a/include/linux/prctl.h +++ b/include/linux/prctl.h @@ -88,4 +88,6 @@ #define PR_TASK_PERF_COUNTERS_DISABLE 31 #define PR_TASK_PERF_COUNTERS_ENABLE 32 +#define PR_MCE_KILL 33 + #endif /* _LINUX_PRCTL_H */ diff --git a/include/linux/sched.h b/include/linux/sched.h index f3d74bd04d1..29eae73c951 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -1687,6 +1687,7 @@ extern cputime_t task_gtime(struct task_struct *p); #define PF_EXITPIDONE 0x00000008 /* pi exit done on shut down */ #define PF_VCPU 0x00000010 /* I'm a virtual CPU */ #define PF_FORKNOEXEC 0x00000040 /* forked but didn't exec */ +#define PF_MCE_PROCESS 0x00000080 /* process policy on mce errors */ #define PF_SUPERPRIV 0x00000100 /* used super-user privileges */ #define PF_DUMPCORE 0x00000200 /* dumped core */ #define PF_SIGNALED 0x00000400 /* killed by a signal */ @@ -1706,6 +1707,7 @@ extern cputime_t task_gtime(struct task_struct *p); #define PF_SPREAD_PAGE 0x01000000 /* Spread page cache over cpuset */ #define PF_SPREAD_SLAB 0x02000000 /* Spread some slab caches over cpuset */ #define PF_THREAD_BOUND 0x04000000 /* Thread bound to specific cpu */ +#define PF_MCE_EARLY 0x08000000 /* Early kill for mce process policy */ #define PF_MEMPOLICY 0x10000000 /* Non-default NUMA mempolicy */ #define PF_MUTEX_TESTER 0x20000000 /* Thread belongs to the rt mutex tester */ #define PF_FREEZER_SKIP 0x40000000 /* Freezer should not count it as freezeable */ diff --git a/kernel/sys.c b/kernel/sys.c index b3f1097c76f..41e02eff339 100644 --- a/kernel/sys.c +++ b/kernel/sys.c @@ -1528,6 +1528,28 @@ SYSCALL_DEFINE5(prctl, int, option, unsigned long, arg2, unsigned long, arg3, current->timer_slack_ns = arg2; error = 0; break; + case PR_MCE_KILL: + if (arg4 | arg5) + return -EINVAL; + switch (arg2) { + case 0: + if (arg3 != 0) + return -EINVAL; + current->flags &= ~PF_MCE_PROCESS; + break; + case 1: + current->flags |= PF_MCE_PROCESS; + if (arg3 != 0) + current->flags |= PF_MCE_EARLY; + else + current->flags &= ~PF_MCE_EARLY; + break; + default: + return -EINVAL; + } + error = 0; + break; + default: error = -EINVAL; break; -- cgit v1.2.3-70-g09d2 From 6a46079cf57a7f7758e8b926980a4f852f89b34d Mon Sep 17 00:00:00 2001 From: Andi Kleen Date: Wed, 16 Sep 2009 11:50:15 +0200 Subject: HWPOISON: The high level memory error handler in the VM v7 Add the high level memory handler that poisons pages that got corrupted by hardware (typically by a two bit flip in a DIMM or a cache) on the Linux level. The goal is to prevent everyone from accessing these pages in the future. This done at the VM level by marking a page hwpoisoned and doing the appropriate action based on the type of page it is. The code that does this is portable and lives in mm/memory-failure.c To quote the overview comment: High level machine check handler. Handles pages reported by the hardware as being corrupted usually due to a 2bit ECC memory or cache failure. This focuses on pages detected as corrupted in the background. When the current CPU tries to consume corruption the currently running process can just be killed directly instead. This implies that if the error cannot be handled for some reason it's safe to just ignore it because no corruption has been consumed yet. Instead when that happens another machine check will happen. Handles page cache pages in various states. The tricky part here is that we can access any page asynchronous to other VM users, because memory failures could happen anytime and anywhere, possibly violating some of their assumptions. This is why this code has to be extremely careful. Generally it tries to use normal locking rules, as in get the standard locks, even if that means the error handling takes potentially a long time. Some of the operations here are somewhat inefficient and have non linear algorithmic complexity, because the data structures have not been optimized for this case. This is in particular the case for the mapping from a vma to a process. Since this case is expected to be rare we hope we can get away with this. There are in principle two strategies to kill processes on poison: - just unmap the data and wait for an actual reference before killing - kill as soon as corruption is detected. Both have advantages and disadvantages and should be used in different situations. Right now both are implemented and can be switched with a new sysctl vm.memory_failure_early_kill The default is early kill. The patch does some rmap data structure walking on its own to collect processes to kill. This is unusual because normally all rmap data structure knowledge is in rmap.c only. I put it here for now to keep everything together and rmap knowledge has been seeping out anyways Includes contributions from Johannes Weiner, Chris Mason, Fengguang Wu, Nick Piggin (who did a lot of great work) and others. Cc: npiggin@suse.de Cc: riel@redhat.com Signed-off-by: Andi Kleen Acked-by: Rik van Riel Reviewed-by: Hidehiro Kawai --- Documentation/sysctl/vm.txt | 41 ++- fs/proc/meminfo.c | 9 +- include/linux/mm.h | 7 + include/linux/rmap.h | 1 + kernel/sysctl.c | 25 ++ mm/Kconfig | 10 + mm/Makefile | 1 + mm/filemap.c | 4 + mm/memory-failure.c | 832 ++++++++++++++++++++++++++++++++++++++++++++ mm/rmap.c | 7 +- 10 files changed, 934 insertions(+), 3 deletions(-) create mode 100644 mm/memory-failure.c (limited to 'include') diff --git a/Documentation/sysctl/vm.txt b/Documentation/sysctl/vm.txt index c4de6359d44..faf62740aa2 100644 --- a/Documentation/sysctl/vm.txt +++ b/Documentation/sysctl/vm.txt @@ -32,6 +32,8 @@ Currently, these files are in /proc/sys/vm: - legacy_va_layout - lowmem_reserve_ratio - max_map_count +- memory_failure_early_kill +- memory_failure_recovery - min_free_kbytes - min_slab_ratio - min_unmapped_ratio @@ -53,7 +55,6 @@ Currently, these files are in /proc/sys/vm: - vfs_cache_pressure - zone_reclaim_mode - ============================================================== block_dump @@ -275,6 +276,44 @@ e.g., up to one or two maps per allocation. The default value is 65536. +============================================================= + +memory_failure_early_kill: + +Control how to kill processes when uncorrected memory error (typically +a 2bit error in a memory module) is detected in the background by hardware +that cannot be handled by the kernel. In some cases (like the page +still having a valid copy on disk) the kernel will handle the failure +transparently without affecting any applications. But if there is +no other uptodate copy of the data it will kill to prevent any data +corruptions from propagating. + +1: Kill all processes that have the corrupted and not reloadable page mapped +as soon as the corruption is detected. Note this is not supported +for a few types of pages, like kernel internally allocated data or +the swap cache, but works for the majority of user pages. + +0: Only unmap the corrupted page from all processes and only kill a process +who tries to access it. + +The kill is done using a catchable SIGBUS with BUS_MCEERR_AO, so processes can +handle this if they want to. + +This is only active on architectures/platforms with advanced machine +check handling and depends on the hardware capabilities. + +Applications can override this setting individually with the PR_MCE_KILL prctl + +============================================================== + +memory_failure_recovery + +Enable memory failure recovery (when supported by the platform) + +1: Attempt recovery. + +0: Always panic on a memory failure. + ============================================================== min_free_kbytes: diff --git a/fs/proc/meminfo.c b/fs/proc/meminfo.c index d5c410d47fa..78faedcb0a8 100644 --- a/fs/proc/meminfo.c +++ b/fs/proc/meminfo.c @@ -95,7 +95,11 @@ static int meminfo_proc_show(struct seq_file *m, void *v) "Committed_AS: %8lu kB\n" "VmallocTotal: %8lu kB\n" "VmallocUsed: %8lu kB\n" - "VmallocChunk: %8lu kB\n", + "VmallocChunk: %8lu kB\n" +#ifdef CONFIG_MEMORY_FAILURE + "HardwareCorrupted: %8lu kB\n" +#endif + , K(i.totalram), K(i.freeram), K(i.bufferram), @@ -140,6 +144,9 @@ static int meminfo_proc_show(struct seq_file *m, void *v) (unsigned long)VMALLOC_TOTAL >> 10, vmi.used >> 10, vmi.largest_chunk >> 10 +#ifdef CONFIG_MEMORY_FAILURE + ,atomic_long_read(&mce_bad_pages) << (PAGE_SHIFT - 10) +#endif ); hugetlb_report_meminfo(m); diff --git a/include/linux/mm.h b/include/linux/mm.h index a16018f7d61..1ffca03f34b 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -1309,5 +1309,12 @@ void vmemmap_populate_print_last(void); extern int account_locked_memory(struct mm_struct *mm, struct rlimit *rlim, size_t size); extern void refund_locked_memory(struct mm_struct *mm, size_t size); + +extern void memory_failure(unsigned long pfn, int trapno); +extern int __memory_failure(unsigned long pfn, int trapno, int ref); +extern int sysctl_memory_failure_early_kill; +extern int sysctl_memory_failure_recovery; +extern atomic_long_t mce_bad_pages; + #endif /* __KERNEL__ */ #endif /* _LINUX_MM_H */ diff --git a/include/linux/rmap.h b/include/linux/rmap.h index ce989f1fc2e..3c1004e5074 100644 --- a/include/linux/rmap.h +++ b/include/linux/rmap.h @@ -129,6 +129,7 @@ int try_to_munlock(struct page *); */ struct anon_vma *page_lock_anon_vma(struct page *page); void page_unlock_anon_vma(struct anon_vma *anon_vma); +int page_mapped_in_vma(struct page *page, struct vm_area_struct *vma); #else /* !CONFIG_MMU */ diff --git a/kernel/sysctl.c b/kernel/sysctl.c index 6bb59f70740..eacae77ac9f 100644 --- a/kernel/sysctl.c +++ b/kernel/sysctl.c @@ -1372,6 +1372,31 @@ static struct ctl_table vm_table[] = { .mode = 0644, .proc_handler = &scan_unevictable_handler, }, +#ifdef CONFIG_MEMORY_FAILURE + { + .ctl_name = CTL_UNNUMBERED, + .procname = "memory_failure_early_kill", + .data = &sysctl_memory_failure_early_kill, + .maxlen = sizeof(sysctl_memory_failure_early_kill), + .mode = 0644, + .proc_handler = &proc_dointvec_minmax, + .strategy = &sysctl_intvec, + .extra1 = &zero, + .extra2 = &one, + }, + { + .ctl_name = CTL_UNNUMBERED, + .procname = "memory_failure_recovery", + .data = &sysctl_memory_failure_recovery, + .maxlen = sizeof(sysctl_memory_failure_recovery), + .mode = 0644, + .proc_handler = &proc_dointvec_minmax, + .strategy = &sysctl_intvec, + .extra1 = &zero, + .extra2 = &one, + }, +#endif + /* * NOTE: do not add new entries to this table unless you have read * Documentation/sysctl/ctl_unnumbered.txt diff --git a/mm/Kconfig b/mm/Kconfig index 3aa519f52e1..ea2d8b61c63 100644 --- a/mm/Kconfig +++ b/mm/Kconfig @@ -233,6 +233,16 @@ config DEFAULT_MMAP_MIN_ADDR /proc/sys/vm/mmap_min_addr tunable. +config MEMORY_FAILURE + depends on MMU + depends on X86_MCE + bool "Enable recovery from hardware memory errors" + help + Enables code to recover from some memory failures on systems + with MCA recovery. This allows a system to continue running + even when some of its memory has uncorrected errors. This requires + special hardware support and typically ECC memory. + config NOMMU_INITIAL_TRIM_EXCESS int "Turn on mmap() excess space trimming before booting" depends on !MMU diff --git a/mm/Makefile b/mm/Makefile index ea4b18bd396..dc2551e7d00 100644 --- a/mm/Makefile +++ b/mm/Makefile @@ -40,5 +40,6 @@ obj-$(CONFIG_SMP) += allocpercpu.o endif obj-$(CONFIG_QUICKLIST) += quicklist.o obj-$(CONFIG_CGROUP_MEM_RES_CTLR) += memcontrol.o page_cgroup.o +obj-$(CONFIG_MEMORY_FAILURE) += memory-failure.o obj-$(CONFIG_DEBUG_KMEMLEAK) += kmemleak.o obj-$(CONFIG_DEBUG_KMEMLEAK_TEST) += kmemleak-test.o diff --git a/mm/filemap.c b/mm/filemap.c index dd51c68e2b8..75575c39216 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -104,6 +104,10 @@ * * ->task->proc_lock * ->dcache_lock (proc_pid_lookup) + * + * (code doesn't rely on that order, so you could switch it around) + * ->tasklist_lock (memory_failure, collect_procs_ao) + * ->i_mmap_lock */ /* diff --git a/mm/memory-failure.c b/mm/memory-failure.c new file mode 100644 index 00000000000..729d4b15b64 --- /dev/null +++ b/mm/memory-failure.c @@ -0,0 +1,832 @@ +/* + * Copyright (C) 2008, 2009 Intel Corporation + * Authors: Andi Kleen, Fengguang Wu + * + * This software may be redistributed and/or modified under the terms of + * the GNU General Public License ("GPL") version 2 only as published by the + * Free Software Foundation. + * + * High level machine check handler. Handles pages reported by the + * hardware as being corrupted usually due to a 2bit ECC memory or cache + * failure. + * + * Handles page cache pages in various states. The tricky part + * here is that we can access any page asynchronous to other VM + * users, because memory failures could happen anytime and anywhere, + * possibly violating some of their assumptions. This is why this code + * has to be extremely careful. Generally it tries to use normal locking + * rules, as in get the standard locks, even if that means the + * error handling takes potentially a long time. + * + * The operation to map back from RMAP chains to processes has to walk + * the complete process list and has non linear complexity with the number + * mappings. In short it can be quite slow. But since memory corruptions + * are rare we hope to get away with this. + */ + +/* + * Notebook: + * - hugetlb needs more code + * - kcore/oldmem/vmcore/mem/kmem check for hwpoison pages + * - pass bad pages to kdump next kernel + */ +#define DEBUG 1 /* remove me in 2.6.34 */ +#include +#include +#include +#include +#include +#include +#include +#include +#include "internal.h" + +int sysctl_memory_failure_early_kill __read_mostly = 0; + +int sysctl_memory_failure_recovery __read_mostly = 1; + +atomic_long_t mce_bad_pages __read_mostly = ATOMIC_LONG_INIT(0); + +/* + * Send all the processes who have the page mapped an ``action optional'' + * signal. + */ +static int kill_proc_ao(struct task_struct *t, unsigned long addr, int trapno, + unsigned long pfn) +{ + struct siginfo si; + int ret; + + printk(KERN_ERR + "MCE %#lx: Killing %s:%d early due to hardware memory corruption\n", + pfn, t->comm, t->pid); + si.si_signo = SIGBUS; + si.si_errno = 0; + si.si_code = BUS_MCEERR_AO; + si.si_addr = (void *)addr; +#ifdef __ARCH_SI_TRAPNO + si.si_trapno = trapno; +#endif + si.si_addr_lsb = PAGE_SHIFT; + /* + * Don't use force here, it's convenient if the signal + * can be temporarily blocked. + * This could cause a loop when the user sets SIGBUS + * to SIG_IGN, but hopefully noone will do that? + */ + ret = send_sig_info(SIGBUS, &si, t); /* synchronous? */ + if (ret < 0) + printk(KERN_INFO "MCE: Error sending signal to %s:%d: %d\n", + t->comm, t->pid, ret); + return ret; +} + +/* + * Kill all processes that have a poisoned page mapped and then isolate + * the page. + * + * General strategy: + * Find all processes having the page mapped and kill them. + * But we keep a page reference around so that the page is not + * actually freed yet. + * Then stash the page away + * + * There's no convenient way to get back to mapped processes + * from the VMAs. So do a brute-force search over all + * running processes. + * + * Remember that machine checks are not common (or rather + * if they are common you have other problems), so this shouldn't + * be a performance issue. + * + * Also there are some races possible while we get from the + * error detection to actually handle it. + */ + +struct to_kill { + struct list_head nd; + struct task_struct *tsk; + unsigned long addr; + unsigned addr_valid:1; +}; + +/* + * Failure handling: if we can't find or can't kill a process there's + * not much we can do. We just print a message and ignore otherwise. + */ + +/* + * Schedule a process for later kill. + * Uses GFP_ATOMIC allocations to avoid potential recursions in the VM. + * TBD would GFP_NOIO be enough? + */ +static void add_to_kill(struct task_struct *tsk, struct page *p, + struct vm_area_struct *vma, + struct list_head *to_kill, + struct to_kill **tkc) +{ + struct to_kill *tk; + + if (*tkc) { + tk = *tkc; + *tkc = NULL; + } else { + tk = kmalloc(sizeof(struct to_kill), GFP_ATOMIC); + if (!tk) { + printk(KERN_ERR + "MCE: Out of memory while machine check handling\n"); + return; + } + } + tk->addr = page_address_in_vma(p, vma); + tk->addr_valid = 1; + + /* + * In theory we don't have to kill when the page was + * munmaped. But it could be also a mremap. Since that's + * likely very rare kill anyways just out of paranoia, but use + * a SIGKILL because the error is not contained anymore. + */ + if (tk->addr == -EFAULT) { + pr_debug("MCE: Unable to find user space address %lx in %s\n", + page_to_pfn(p), tsk->comm); + tk->addr_valid = 0; + } + get_task_struct(tsk); + tk->tsk = tsk; + list_add_tail(&tk->nd, to_kill); +} + +/* + * Kill the processes that have been collected earlier. + * + * Only do anything when DOIT is set, otherwise just free the list + * (this is used for clean pages which do not need killing) + * Also when FAIL is set do a force kill because something went + * wrong earlier. + */ +static void kill_procs_ao(struct list_head *to_kill, int doit, int trapno, + int fail, unsigned long pfn) +{ + struct to_kill *tk, *next; + + list_for_each_entry_safe (tk, next, to_kill, nd) { + if (doit) { + /* + * In case something went wrong with munmaping + * make sure the process doesn't catch the + * signal and then access the memory. Just kill it. + * the signal handlers + */ + if (fail || tk->addr_valid == 0) { + printk(KERN_ERR + "MCE %#lx: forcibly killing %s:%d because of failure to unmap corrupted page\n", + pfn, tk->tsk->comm, tk->tsk->pid); + force_sig(SIGKILL, tk->tsk); + } + + /* + * In theory the process could have mapped + * something else on the address in-between. We could + * check for that, but we need to tell the + * process anyways. + */ + else if (kill_proc_ao(tk->tsk, tk->addr, trapno, + pfn) < 0) + printk(KERN_ERR + "MCE %#lx: Cannot send advisory machine check signal to %s:%d\n", + pfn, tk->tsk->comm, tk->tsk->pid); + } + put_task_struct(tk->tsk); + kfree(tk); + } +} + +static int task_early_kill(struct task_struct *tsk) +{ + if (!tsk->mm) + return 0; + if (tsk->flags & PF_MCE_PROCESS) + return !!(tsk->flags & PF_MCE_EARLY); + return sysctl_memory_failure_early_kill; +} + +/* + * Collect processes when the error hit an anonymous page. + */ +static void collect_procs_anon(struct page *page, struct list_head *to_kill, + struct to_kill **tkc) +{ + struct vm_area_struct *vma; + struct task_struct *tsk; + struct anon_vma *av; + + read_lock(&tasklist_lock); + av = page_lock_anon_vma(page); + if (av == NULL) /* Not actually mapped anymore */ + goto out; + for_each_process (tsk) { + if (!task_early_kill(tsk)) + continue; + list_for_each_entry (vma, &av->head, anon_vma_node) { + if (!page_mapped_in_vma(page, vma)) + continue; + if (vma->vm_mm == tsk->mm) + add_to_kill(tsk, page, vma, to_kill, tkc); + } + } + page_unlock_anon_vma(av); +out: + read_unlock(&tasklist_lock); +} + +/* + * Collect processes when the error hit a file mapped page. + */ +static void collect_procs_file(struct page *page, struct list_head *to_kill, + struct to_kill **tkc) +{ + struct vm_area_struct *vma; + struct task_struct *tsk; + struct prio_tree_iter iter; + struct address_space *mapping = page->mapping; + + /* + * A note on the locking order between the two locks. + * We don't rely on this particular order. + * If you have some other code that needs a different order + * feel free to switch them around. Or add a reverse link + * from mm_struct to task_struct, then this could be all + * done without taking tasklist_lock and looping over all tasks. + */ + + read_lock(&tasklist_lock); + spin_lock(&mapping->i_mmap_lock); + for_each_process(tsk) { + pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT); + + if (!task_early_kill(tsk)) + continue; + + vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, pgoff, + pgoff) { + /* + * Send early kill signal to tasks where a vma covers + * the page but the corrupted page is not necessarily + * mapped it in its pte. + * Assume applications who requested early kill want + * to be informed of all such data corruptions. + */ + if (vma->vm_mm == tsk->mm) + add_to_kill(tsk, page, vma, to_kill, tkc); + } + } + spin_unlock(&mapping->i_mmap_lock); + read_unlock(&tasklist_lock); +} + +/* + * Collect the processes who have the corrupted page mapped to kill. + * This is done in two steps for locking reasons. + * First preallocate one tokill structure outside the spin locks, + * so that we can kill at least one process reasonably reliable. + */ +static void collect_procs(struct page *page, struct list_head *tokill) +{ + struct to_kill *tk; + + if (!page->mapping) + return; + + tk = kmalloc(sizeof(struct to_kill), GFP_NOIO); + if (!tk) + return; + if (PageAnon(page)) + collect_procs_anon(page, tokill, &tk); + else + collect_procs_file(page, tokill, &tk); + kfree(tk); +} + +/* + * Error handlers for various types of pages. + */ + +enum outcome { + FAILED, /* Error handling failed */ + DELAYED, /* Will be handled later */ + IGNORED, /* Error safely ignored */ + RECOVERED, /* Successfully recovered */ +}; + +static const char *action_name[] = { + [FAILED] = "Failed", + [DELAYED] = "Delayed", + [IGNORED] = "Ignored", + [RECOVERED] = "Recovered", +}; + +/* + * Error hit kernel page. + * Do nothing, try to be lucky and not touch this instead. For a few cases we + * could be more sophisticated. + */ +static int me_kernel(struct page *p, unsigned long pfn) +{ + return DELAYED; +} + +/* + * Already poisoned page. + */ +static int me_ignore(struct page *p, unsigned long pfn) +{ + return IGNORED; +} + +/* + * Page in unknown state. Do nothing. + */ +static int me_unknown(struct page *p, unsigned long pfn) +{ + printk(KERN_ERR "MCE %#lx: Unknown page state\n", pfn); + return FAILED; +} + +/* + * Free memory + */ +static int me_free(struct page *p, unsigned long pfn) +{ + return DELAYED; +} + +/* + * Clean (or cleaned) page cache page. + */ +static int me_pagecache_clean(struct page *p, unsigned long pfn) +{ + int err; + int ret = FAILED; + struct address_space *mapping; + + if (!isolate_lru_page(p)) + page_cache_release(p); + + /* + * For anonymous pages we're done the only reference left + * should be the one m_f() holds. + */ + if (PageAnon(p)) + return RECOVERED; + + /* + * Now truncate the page in the page cache. This is really + * more like a "temporary hole punch" + * Don't do this for block devices when someone else + * has a reference, because it could be file system metadata + * and that's not safe to truncate. + */ + mapping = page_mapping(p); + if (!mapping) { + /* + * Page has been teared down in the meanwhile + */ + return FAILED; + } + + /* + * Truncation is a bit tricky. Enable it per file system for now. + * + * Open: to take i_mutex or not for this? Right now we don't. + */ + if (mapping->a_ops->error_remove_page) { + err = mapping->a_ops->error_remove_page(mapping, p); + if (err != 0) { + printk(KERN_INFO "MCE %#lx: Failed to punch page: %d\n", + pfn, err); + } else if (page_has_private(p) && + !try_to_release_page(p, GFP_NOIO)) { + pr_debug("MCE %#lx: failed to release buffers\n", pfn); + } else { + ret = RECOVERED; + } + } else { + /* + * If the file system doesn't support it just invalidate + * This fails on dirty or anything with private pages + */ + if (invalidate_inode_page(p)) + ret = RECOVERED; + else + printk(KERN_INFO "MCE %#lx: Failed to invalidate\n", + pfn); + } + return ret; +} + +/* + * Dirty cache page page + * Issues: when the error hit a hole page the error is not properly + * propagated. + */ +static int me_pagecache_dirty(struct page *p, unsigned long pfn) +{ + struct address_space *mapping = page_mapping(p); + + SetPageError(p); + /* TBD: print more information about the file. */ + if (mapping) { + /* + * IO error will be reported by write(), fsync(), etc. + * who check the mapping. + * This way the application knows that something went + * wrong with its dirty file data. + * + * There's one open issue: + * + * The EIO will be only reported on the next IO + * operation and then cleared through the IO map. + * Normally Linux has two mechanisms to pass IO error + * first through the AS_EIO flag in the address space + * and then through the PageError flag in the page. + * Since we drop pages on memory failure handling the + * only mechanism open to use is through AS_AIO. + * + * This has the disadvantage that it gets cleared on + * the first operation that returns an error, while + * the PageError bit is more sticky and only cleared + * when the page is reread or dropped. If an + * application assumes it will always get error on + * fsync, but does other operations on the fd before + * and the page is dropped inbetween then the error + * will not be properly reported. + * + * This can already happen even without hwpoisoned + * pages: first on metadata IO errors (which only + * report through AS_EIO) or when the page is dropped + * at the wrong time. + * + * So right now we assume that the application DTRT on + * the first EIO, but we're not worse than other parts + * of the kernel. + */ + mapping_set_error(mapping, EIO); + } + + return me_pagecache_clean(p, pfn); +} + +/* + * Clean and dirty swap cache. + * + * Dirty swap cache page is tricky to handle. The page could live both in page + * cache and swap cache(ie. page is freshly swapped in). So it could be + * referenced concurrently by 2 types of PTEs: + * normal PTEs and swap PTEs. We try to handle them consistently by calling + * try_to_unmap(TTU_IGNORE_HWPOISON) to convert the normal PTEs to swap PTEs, + * and then + * - clear dirty bit to prevent IO + * - remove from LRU + * - but keep in the swap cache, so that when we return to it on + * a later page fault, we know the application is accessing + * corrupted data and shall be killed (we installed simple + * interception code in do_swap_page to catch it). + * + * Clean swap cache pages can be directly isolated. A later page fault will + * bring in the known good data from disk. + */ +static int me_swapcache_dirty(struct page *p, unsigned long pfn) +{ + int ret = FAILED; + + ClearPageDirty(p); + /* Trigger EIO in shmem: */ + ClearPageUptodate(p); + + if (!isolate_lru_page(p)) { + page_cache_release(p); + ret = DELAYED; + } + + return ret; +} + +static int me_swapcache_clean(struct page *p, unsigned long pfn) +{ + int ret = FAILED; + + if (!isolate_lru_page(p)) { + page_cache_release(p); + ret = RECOVERED; + } + delete_from_swap_cache(p); + return ret; +} + +/* + * Huge pages. Needs work. + * Issues: + * No rmap support so we cannot find the original mapper. In theory could walk + * all MMs and look for the mappings, but that would be non atomic and racy. + * Need rmap for hugepages for this. Alternatively we could employ a heuristic, + * like just walking the current process and hoping it has it mapped (that + * should be usually true for the common "shared database cache" case) + * Should handle free huge pages and dequeue them too, but this needs to + * handle huge page accounting correctly. + */ +static int me_huge_page(struct page *p, unsigned long pfn) +{ + return FAILED; +} + +/* + * Various page states we can handle. + * + * A page state is defined by its current page->flags bits. + * The table matches them in order and calls the right handler. + * + * This is quite tricky because we can access page at any time + * in its live cycle, so all accesses have to be extremly careful. + * + * This is not complete. More states could be added. + * For any missing state don't attempt recovery. + */ + +#define dirty (1UL << PG_dirty) +#define sc (1UL << PG_swapcache) +#define unevict (1UL << PG_unevictable) +#define mlock (1UL << PG_mlocked) +#define writeback (1UL << PG_writeback) +#define lru (1UL << PG_lru) +#define swapbacked (1UL << PG_swapbacked) +#define head (1UL << PG_head) +#define tail (1UL << PG_tail) +#define compound (1UL << PG_compound) +#define slab (1UL << PG_slab) +#define buddy (1UL << PG_buddy) +#define reserved (1UL << PG_reserved) + +static struct page_state { + unsigned long mask; + unsigned long res; + char *msg; + int (*action)(struct page *p, unsigned long pfn); +} error_states[] = { + { reserved, reserved, "reserved kernel", me_ignore }, + { buddy, buddy, "free kernel", me_free }, + + /* + * Could in theory check if slab page is free or if we can drop + * currently unused objects without touching them. But just + * treat it as standard kernel for now. + */ + { slab, slab, "kernel slab", me_kernel }, + +#ifdef CONFIG_PAGEFLAGS_EXTENDED + { head, head, "huge", me_huge_page }, + { tail, tail, "huge", me_huge_page }, +#else + { compound, compound, "huge", me_huge_page }, +#endif + + { sc|dirty, sc|dirty, "swapcache", me_swapcache_dirty }, + { sc|dirty, sc, "swapcache", me_swapcache_clean }, + + { unevict|dirty, unevict|dirty, "unevictable LRU", me_pagecache_dirty}, + { unevict, unevict, "unevictable LRU", me_pagecache_clean}, + +#ifdef CONFIG_HAVE_MLOCKED_PAGE_BIT + { mlock|dirty, mlock|dirty, "mlocked LRU", me_pagecache_dirty }, + { mlock, mlock, "mlocked LRU", me_pagecache_clean }, +#endif + + { lru|dirty, lru|dirty, "LRU", me_pagecache_dirty }, + { lru|dirty, lru, "clean LRU", me_pagecache_clean }, + { swapbacked, swapbacked, "anonymous", me_pagecache_clean }, + + /* + * Catchall entry: must be at end. + */ + { 0, 0, "unknown page state", me_unknown }, +}; + +#undef lru + +static void action_result(unsigned long pfn, char *msg, int result) +{ + struct page *page = NULL; + if (pfn_valid(pfn)) + page = pfn_to_page(pfn); + + printk(KERN_ERR "MCE %#lx: %s%s page recovery: %s\n", + pfn, + page && PageDirty(page) ? "dirty " : "", + msg, action_name[result]); +} + +static int page_action(struct page_state *ps, struct page *p, + unsigned long pfn, int ref) +{ + int result; + + result = ps->action(p, pfn); + action_result(pfn, ps->msg, result); + if (page_count(p) != 1 + ref) + printk(KERN_ERR + "MCE %#lx: %s page still referenced by %d users\n", + pfn, ps->msg, page_count(p) - 1); + + /* Could do more checks here if page looks ok */ + /* + * Could adjust zone counters here to correct for the missing page. + */ + + return result == RECOVERED ? 0 : -EBUSY; +} + +#define N_UNMAP_TRIES 5 + +/* + * Do all that is necessary to remove user space mappings. Unmap + * the pages and send SIGBUS to the processes if the data was dirty. + */ +static void hwpoison_user_mappings(struct page *p, unsigned long pfn, + int trapno) +{ + enum ttu_flags ttu = TTU_UNMAP | TTU_IGNORE_MLOCK | TTU_IGNORE_ACCESS; + struct address_space *mapping; + LIST_HEAD(tokill); + int ret; + int i; + int kill = 1; + + if (PageReserved(p) || PageCompound(p) || PageSlab(p)) + return; + + if (!PageLRU(p)) + lru_add_drain_all(); + + /* + * This check implies we don't kill processes if their pages + * are in the swap cache early. Those are always late kills. + */ + if (!page_mapped(p)) + return; + + if (PageSwapCache(p)) { + printk(KERN_ERR + "MCE %#lx: keeping poisoned page in swap cache\n", pfn); + ttu |= TTU_IGNORE_HWPOISON; + } + + /* + * Propagate the dirty bit from PTEs to struct page first, because we + * need this to decide if we should kill or just drop the page. + */ + mapping = page_mapping(p); + if (!PageDirty(p) && mapping && mapping_cap_writeback_dirty(mapping)) { + if (page_mkclean(p)) { + SetPageDirty(p); + } else { + kill = 0; + ttu |= TTU_IGNORE_HWPOISON; + printk(KERN_INFO + "MCE %#lx: corrupted page was clean: dropped without side effects\n", + pfn); + } + } + + /* + * First collect all the processes that have the page + * mapped in dirty form. This has to be done before try_to_unmap, + * because ttu takes the rmap data structures down. + * + * Error handling: We ignore errors here because + * there's nothing that can be done. + */ + if (kill) + collect_procs(p, &tokill); + + /* + * try_to_unmap can fail temporarily due to races. + * Try a few times (RED-PEN better strategy?) + */ + for (i = 0; i < N_UNMAP_TRIES; i++) { + ret = try_to_unmap(p, ttu); + if (ret == SWAP_SUCCESS) + break; + pr_debug("MCE %#lx: try_to_unmap retry needed %d\n", pfn, ret); + } + + if (ret != SWAP_SUCCESS) + printk(KERN_ERR "MCE %#lx: failed to unmap page (mapcount=%d)\n", + pfn, page_mapcount(p)); + + /* + * Now that the dirty bit has been propagated to the + * struct page and all unmaps done we can decide if + * killing is needed or not. Only kill when the page + * was dirty, otherwise the tokill list is merely + * freed. When there was a problem unmapping earlier + * use a more force-full uncatchable kill to prevent + * any accesses to the poisoned memory. + */ + kill_procs_ao(&tokill, !!PageDirty(p), trapno, + ret != SWAP_SUCCESS, pfn); +} + +int __memory_failure(unsigned long pfn, int trapno, int ref) +{ + struct page_state *ps; + struct page *p; + int res; + + if (!sysctl_memory_failure_recovery) + panic("Memory failure from trap %d on page %lx", trapno, pfn); + + if (!pfn_valid(pfn)) { + action_result(pfn, "memory outside kernel control", IGNORED); + return -EIO; + } + + p = pfn_to_page(pfn); + if (TestSetPageHWPoison(p)) { + action_result(pfn, "already hardware poisoned", IGNORED); + return 0; + } + + atomic_long_add(1, &mce_bad_pages); + + /* + * We need/can do nothing about count=0 pages. + * 1) it's a free page, and therefore in safe hand: + * prep_new_page() will be the gate keeper. + * 2) it's part of a non-compound high order page. + * Implies some kernel user: cannot stop them from + * R/W the page; let's pray that the page has been + * used and will be freed some time later. + * In fact it's dangerous to directly bump up page count from 0, + * that may make page_freeze_refs()/page_unfreeze_refs() mismatch. + */ + if (!get_page_unless_zero(compound_head(p))) { + action_result(pfn, "free or high order kernel", IGNORED); + return PageBuddy(compound_head(p)) ? 0 : -EBUSY; + } + + /* + * Lock the page and wait for writeback to finish. + * It's very difficult to mess with pages currently under IO + * and in many cases impossible, so we just avoid it here. + */ + lock_page_nosync(p); + wait_on_page_writeback(p); + + /* + * Now take care of user space mappings. + */ + hwpoison_user_mappings(p, pfn, trapno); + + /* + * Torn down by someone else? + */ + if (PageLRU(p) && !PageSwapCache(p) && p->mapping == NULL) { + action_result(pfn, "already truncated LRU", IGNORED); + res = 0; + goto out; + } + + res = -EBUSY; + for (ps = error_states;; ps++) { + if ((p->flags & ps->mask) == ps->res) { + res = page_action(ps, p, pfn, ref); + break; + } + } +out: + unlock_page(p); + return res; +} +EXPORT_SYMBOL_GPL(__memory_failure); + +/** + * memory_failure - Handle memory failure of a page. + * @pfn: Page Number of the corrupted page + * @trapno: Trap number reported in the signal to user space. + * + * This function is called by the low level machine check code + * of an architecture when it detects hardware memory corruption + * of a page. It tries its best to recover, which includes + * dropping pages, killing processes etc. + * + * The function is primarily of use for corruptions that + * happen outside the current execution context (e.g. when + * detected by a background scrubber) + * + * Must run in process context (e.g. a work queue) with interrupts + * enabled and no spinlocks hold. + */ +void memory_failure(unsigned long pfn, int trapno) +{ + __memory_failure(pfn, trapno, 0); +} diff --git a/mm/rmap.c b/mm/rmap.c index 7e72ca19d68..09c3d0b9611 100644 --- a/mm/rmap.c +++ b/mm/rmap.c @@ -36,6 +36,11 @@ * mapping->tree_lock (widely used, in set_page_dirty, * in arch-dependent flush_dcache_mmap_lock, * within inode_lock in __sync_single_inode) + * + * (code doesn't rely on that order so it could be switched around) + * ->tasklist_lock + * anon_vma->lock (memory_failure, collect_procs_anon) + * pte map lock */ #include @@ -311,7 +316,7 @@ pte_t *page_check_address(struct page *page, struct mm_struct *mm, * if the page is not mapped into the page tables of this VMA. Only * valid for normal file or anonymous VMAs. */ -static int page_mapped_in_vma(struct page *page, struct vm_area_struct *vma) +int page_mapped_in_vma(struct page *page, struct vm_area_struct *vma) { unsigned long address; pte_t *pte; -- cgit v1.2.3-70-g09d2 From 9893e49d64a4874ea67849ee2cfbf3f3d6817573 Mon Sep 17 00:00:00 2001 From: Andi Kleen Date: Wed, 16 Sep 2009 11:50:17 +0200 Subject: HWPOISON: Add madvise() based injector for hardware poisoned pages v4 Impact: optional, useful for debugging Add a new madvice sub command to inject poison for some pages in a process' address space. This is useful for testing the poison page handling. This patch can allow root to tie up large amounts of memory. I got feedback from container developers and they didn't see any problem. v2: Use write flag for get_user_pages to make sure to always get a fresh page v3: Don't request write mapping (Fengguang Wu) v4: Move MADV_* number to avoid conflict with KSM (Hugh Dickins) Signed-off-by: Andi Kleen --- include/asm-generic/mman-common.h | 1 + mm/madvise.c | 30 ++++++++++++++++++++++++++++++ 2 files changed, 31 insertions(+) (limited to 'include') diff --git a/include/asm-generic/mman-common.h b/include/asm-generic/mman-common.h index 3b69ad34189..c325d1ef42a 100644 --- a/include/asm-generic/mman-common.h +++ b/include/asm-generic/mman-common.h @@ -34,6 +34,7 @@ #define MADV_REMOVE 9 /* remove these pages & resources */ #define MADV_DONTFORK 10 /* don't inherit across fork */ #define MADV_DOFORK 11 /* do inherit across fork */ +#define MADV_HWPOISON 100 /* poison a page for testing */ /* compatibility flags */ #define MAP_FILE 0 diff --git a/mm/madvise.c b/mm/madvise.c index 76eb4193acd..8dbd38b8e4a 100644 --- a/mm/madvise.c +++ b/mm/madvise.c @@ -207,6 +207,32 @@ static long madvise_remove(struct vm_area_struct *vma, return error; } +#ifdef CONFIG_MEMORY_FAILURE +/* + * Error injection support for memory error handling. + */ +static int madvise_hwpoison(unsigned long start, unsigned long end) +{ + int ret = 0; + + if (!capable(CAP_SYS_ADMIN)) + return -EPERM; + for (; start < end; start += PAGE_SIZE) { + struct page *p; + int ret = get_user_pages(current, current->mm, start, 1, + 0, 0, &p, NULL); + if (ret != 1) + return ret; + printk(KERN_INFO "Injecting memory failure for page %lx at %lx\n", + page_to_pfn(p), start); + /* Ignore return value for now */ + __memory_failure(page_to_pfn(p), 0, 1); + put_page(p); + } + return ret; +} +#endif + static long madvise_vma(struct vm_area_struct *vma, struct vm_area_struct **prev, unsigned long start, unsigned long end, int behavior) @@ -307,6 +333,10 @@ SYSCALL_DEFINE3(madvise, unsigned long, start, size_t, len_in, int, behavior) int write; size_t len; +#ifdef CONFIG_MEMORY_FAILURE + if (behavior == MADV_HWPOISON) + return madvise_hwpoison(start, start+len_in); +#endif if (!madvise_behavior_valid(behavior)) return error; -- cgit v1.2.3-70-g09d2 From 9c28cbccec66a5ca292c6659bf5a0fe0c8459fa7 Mon Sep 17 00:00:00 2001 From: Jan Kara Date: Mon, 3 Aug 2009 19:21:00 +0200 Subject: jbd: Journal block numbers can ever be only 32-bit use unsigned int for them It does not make sense to store block number for journal as unsigned long since they can be only 32-bit (because of on-disk format limitation). So change in-memory structures and variables to use unsigned int instead. Signed-off-by: Jan Kara --- fs/jbd/checkpoint.c | 6 +++--- fs/jbd/commit.c | 2 +- fs/jbd/journal.c | 30 +++++++++++++++--------------- fs/jbd/recovery.c | 18 +++++++++--------- fs/jbd/revoke.c | 16 ++++++++-------- include/linux/jbd.h | 26 +++++++++++++------------- 6 files changed, 49 insertions(+), 49 deletions(-) (limited to 'include') diff --git a/fs/jbd/checkpoint.c b/fs/jbd/checkpoint.c index 61f32f3868c..b0435dd0654 100644 --- a/fs/jbd/checkpoint.c +++ b/fs/jbd/checkpoint.c @@ -456,7 +456,7 @@ int cleanup_journal_tail(journal_t *journal) { transaction_t * transaction; tid_t first_tid; - unsigned long blocknr, freed; + unsigned int blocknr, freed; if (is_journal_aborted(journal)) return 1; @@ -502,8 +502,8 @@ int cleanup_journal_tail(journal_t *journal) freed = freed + journal->j_last - journal->j_first; jbd_debug(1, - "Cleaning journal tail from %d to %d (offset %lu), " - "freeing %lu\n", + "Cleaning journal tail from %d to %d (offset %u), " + "freeing %u\n", journal->j_tail_sequence, first_tid, blocknr, freed); journal->j_free += freed; diff --git a/fs/jbd/commit.c b/fs/jbd/commit.c index 618e21c0b7a..4bd882548c4 100644 --- a/fs/jbd/commit.c +++ b/fs/jbd/commit.c @@ -308,7 +308,7 @@ void journal_commit_transaction(journal_t *journal) int bufs; int flags; int err; - unsigned long blocknr; + unsigned int blocknr; ktime_t start_time; u64 commit_time; char *tagp = NULL; diff --git a/fs/jbd/journal.c b/fs/jbd/journal.c index f96f85092d1..bd3c073b485 100644 --- a/fs/jbd/journal.c +++ b/fs/jbd/journal.c @@ -276,7 +276,7 @@ static void journal_kill_thread(journal_t *journal) int journal_write_metadata_buffer(transaction_t *transaction, struct journal_head *jh_in, struct journal_head **jh_out, - unsigned long blocknr) + unsigned int blocknr) { int need_copy_out = 0; int done_copy_out = 0; @@ -567,9 +567,9 @@ int log_wait_commit(journal_t *journal, tid_t tid) * Log buffer allocation routines: */ -int journal_next_log_block(journal_t *journal, unsigned long *retp) +int journal_next_log_block(journal_t *journal, unsigned int *retp) { - unsigned long blocknr; + unsigned int blocknr; spin_lock(&journal->j_state_lock); J_ASSERT(journal->j_free > 1); @@ -590,11 +590,11 @@ int journal_next_log_block(journal_t *journal, unsigned long *retp) * this is a no-op. If needed, we can use j_blk_offset - everything is * ready. */ -int journal_bmap(journal_t *journal, unsigned long blocknr, - unsigned long *retp) +int journal_bmap(journal_t *journal, unsigned int blocknr, + unsigned int *retp) { int err = 0; - unsigned long ret; + unsigned int ret; if (journal->j_inode) { ret = bmap(journal->j_inode, blocknr); @@ -604,7 +604,7 @@ int journal_bmap(journal_t *journal, unsigned long blocknr, char b[BDEVNAME_SIZE]; printk(KERN_ALERT "%s: journal block not found " - "at offset %lu on %s\n", + "at offset %u on %s\n", __func__, blocknr, bdevname(journal->j_dev, b)); @@ -630,7 +630,7 @@ int journal_bmap(journal_t *journal, unsigned long blocknr, struct journal_head *journal_get_descriptor_buffer(journal_t *journal) { struct buffer_head *bh; - unsigned long blocknr; + unsigned int blocknr; int err; err = journal_next_log_block(journal, &blocknr); @@ -774,7 +774,7 @@ journal_t * journal_init_inode (struct inode *inode) journal_t *journal = journal_init_common(); int err; int n; - unsigned long blocknr; + unsigned int blocknr; if (!journal) return NULL; @@ -846,12 +846,12 @@ static void journal_fail_superblock (journal_t *journal) static int journal_reset(journal_t *journal) { journal_superblock_t *sb = journal->j_superblock; - unsigned long first, last; + unsigned int first, last; first = be32_to_cpu(sb->s_first); last = be32_to_cpu(sb->s_maxlen); if (first + JFS_MIN_JOURNAL_BLOCKS > last + 1) { - printk(KERN_ERR "JBD: Journal too short (blocks %lu-%lu).\n", + printk(KERN_ERR "JBD: Journal too short (blocks %u-%u).\n", first, last); journal_fail_superblock(journal); return -EINVAL; @@ -885,7 +885,7 @@ static int journal_reset(journal_t *journal) **/ int journal_create(journal_t *journal) { - unsigned long blocknr; + unsigned int blocknr; struct buffer_head *bh; journal_superblock_t *sb; int i, err; @@ -969,14 +969,14 @@ void journal_update_superblock(journal_t *journal, int wait) if (sb->s_start == 0 && journal->j_tail_sequence == journal->j_transaction_sequence) { jbd_debug(1,"JBD: Skipping superblock update on recovered sb " - "(start %ld, seq %d, errno %d)\n", + "(start %u, seq %d, errno %d)\n", journal->j_tail, journal->j_tail_sequence, journal->j_errno); goto out; } spin_lock(&journal->j_state_lock); - jbd_debug(1,"JBD: updating superblock (start %ld, seq %d, errno %d)\n", + jbd_debug(1,"JBD: updating superblock (start %u, seq %d, errno %d)\n", journal->j_tail, journal->j_tail_sequence, journal->j_errno); sb->s_sequence = cpu_to_be32(journal->j_tail_sequence); @@ -1371,7 +1371,7 @@ int journal_flush(journal_t *journal) { int err = 0; transaction_t *transaction = NULL; - unsigned long old_tail; + unsigned int old_tail; spin_lock(&journal->j_state_lock); diff --git a/fs/jbd/recovery.c b/fs/jbd/recovery.c index db5e982c5dd..cb1a49ae605 100644 --- a/fs/jbd/recovery.c +++ b/fs/jbd/recovery.c @@ -70,7 +70,7 @@ static int do_readahead(journal_t *journal, unsigned int start) { int err; unsigned int max, nbufs, next; - unsigned long blocknr; + unsigned int blocknr; struct buffer_head *bh; struct buffer_head * bufs[MAXBUF]; @@ -132,7 +132,7 @@ static int jread(struct buffer_head **bhp, journal_t *journal, unsigned int offset) { int err; - unsigned long blocknr; + unsigned int blocknr; struct buffer_head *bh; *bhp = NULL; @@ -314,7 +314,7 @@ static int do_one_pass(journal_t *journal, struct recovery_info *info, enum passtype pass) { unsigned int first_commit_ID, next_commit_ID; - unsigned long next_log_block; + unsigned int next_log_block; int err, success = 0; journal_superblock_t * sb; journal_header_t * tmp; @@ -367,14 +367,14 @@ static int do_one_pass(journal_t *journal, if (tid_geq(next_commit_ID, info->end_transaction)) break; - jbd_debug(2, "Scanning for sequence ID %u at %lu/%lu\n", + jbd_debug(2, "Scanning for sequence ID %u at %u/%u\n", next_commit_ID, next_log_block, journal->j_last); /* Skip over each chunk of the transaction looking * either the next descriptor block or the final commit * record. */ - jbd_debug(3, "JBD: checking block %ld\n", next_log_block); + jbd_debug(3, "JBD: checking block %u\n", next_log_block); err = jread(&bh, journal, next_log_block); if (err) goto failed; @@ -429,7 +429,7 @@ static int do_one_pass(journal_t *journal, tagp = &bh->b_data[sizeof(journal_header_t)]; while ((tagp - bh->b_data +sizeof(journal_block_tag_t)) <= journal->j_blocksize) { - unsigned long io_block; + unsigned int io_block; tag = (journal_block_tag_t *) tagp; flags = be32_to_cpu(tag->t_flags); @@ -443,10 +443,10 @@ static int do_one_pass(journal_t *journal, success = err; printk (KERN_ERR "JBD: IO error %d recovering " - "block %ld in log\n", + "block %u in log\n", err, io_block); } else { - unsigned long blocknr; + unsigned int blocknr; J_ASSERT(obh != NULL); blocknr = be32_to_cpu(tag->t_blocknr); @@ -581,7 +581,7 @@ static int scan_revoke_records(journal_t *journal, struct buffer_head *bh, max = be32_to_cpu(header->r_count); while (offset < max) { - unsigned long blocknr; + unsigned int blocknr; int err; blocknr = be32_to_cpu(* ((__be32 *) (bh->b_data+offset))); diff --git a/fs/jbd/revoke.c b/fs/jbd/revoke.c index da6cd9bdaab..ad717328343 100644 --- a/fs/jbd/revoke.c +++ b/fs/jbd/revoke.c @@ -101,7 +101,7 @@ struct jbd_revoke_record_s { struct list_head hash; tid_t sequence; /* Used for recovery only */ - unsigned long blocknr; + unsigned int blocknr; }; @@ -126,7 +126,7 @@ static void flush_descriptor(journal_t *, struct journal_head *, int, int); /* Utility functions to maintain the revoke table */ /* Borrowed from buffer.c: this is a tried and tested block hash function */ -static inline int hash(journal_t *journal, unsigned long block) +static inline int hash(journal_t *journal, unsigned int block) { struct jbd_revoke_table_s *table = journal->j_revoke; int hash_shift = table->hash_shift; @@ -136,7 +136,7 @@ static inline int hash(journal_t *journal, unsigned long block) (block << (hash_shift - 12))) & (table->hash_size - 1); } -static int insert_revoke_hash(journal_t *journal, unsigned long blocknr, +static int insert_revoke_hash(journal_t *journal, unsigned int blocknr, tid_t seq) { struct list_head *hash_list; @@ -166,7 +166,7 @@ oom: /* Find a revoke record in the journal's hash table. */ static struct jbd_revoke_record_s *find_revoke_record(journal_t *journal, - unsigned long blocknr) + unsigned int blocknr) { struct list_head *hash_list; struct jbd_revoke_record_s *record; @@ -332,7 +332,7 @@ void journal_destroy_revoke(journal_t *journal) * by one. */ -int journal_revoke(handle_t *handle, unsigned long blocknr, +int journal_revoke(handle_t *handle, unsigned int blocknr, struct buffer_head *bh_in) { struct buffer_head *bh = NULL; @@ -401,7 +401,7 @@ int journal_revoke(handle_t *handle, unsigned long blocknr, } } - jbd_debug(2, "insert revoke for block %lu, bh_in=%p\n", blocknr, bh_in); + jbd_debug(2, "insert revoke for block %u, bh_in=%p\n", blocknr, bh_in); err = insert_revoke_hash(journal, blocknr, handle->h_transaction->t_tid); BUFFER_TRACE(bh_in, "exit"); @@ -644,7 +644,7 @@ static void flush_descriptor(journal_t *journal, */ int journal_set_revoke(journal_t *journal, - unsigned long blocknr, + unsigned int blocknr, tid_t sequence) { struct jbd_revoke_record_s *record; @@ -668,7 +668,7 @@ int journal_set_revoke(journal_t *journal, */ int journal_test_revoke(journal_t *journal, - unsigned long blocknr, + unsigned int blocknr, tid_t sequence) { struct jbd_revoke_record_s *record; diff --git a/include/linux/jbd.h b/include/linux/jbd.h index c2049a04fa0..a1187a0c99b 100644 --- a/include/linux/jbd.h +++ b/include/linux/jbd.h @@ -446,7 +446,7 @@ struct transaction_s /* * Where in the log does this transaction's commit start? [no locking] */ - unsigned long t_log_start; + unsigned int t_log_start; /* Number of buffers on the t_buffers list [j_list_lock] */ int t_nr_buffers; @@ -701,26 +701,26 @@ struct journal_s * Journal head: identifies the first unused block in the journal. * [j_state_lock] */ - unsigned long j_head; + unsigned int j_head; /* * Journal tail: identifies the oldest still-used block in the journal. * [j_state_lock] */ - unsigned long j_tail; + unsigned int j_tail; /* * Journal free: how many free blocks are there in the journal? * [j_state_lock] */ - unsigned long j_free; + unsigned int j_free; /* * Journal start and end: the block numbers of the first usable block * and one beyond the last usable block in the journal. [j_state_lock] */ - unsigned long j_first; - unsigned long j_last; + unsigned int j_first; + unsigned int j_last; /* * Device, blocksize and starting block offset for the location where we @@ -728,7 +728,7 @@ struct journal_s */ struct block_device *j_dev; int j_blocksize; - unsigned long j_blk_offset; + unsigned int j_blk_offset; /* * Device which holds the client fs. For internal journal this will be @@ -859,7 +859,7 @@ extern void __journal_clean_data_list(transaction_t *transaction); /* Log buffer allocation */ extern struct journal_head * journal_get_descriptor_buffer(journal_t *); -int journal_next_log_block(journal_t *, unsigned long *); +int journal_next_log_block(journal_t *, unsigned int *); /* Commit management */ extern void journal_commit_transaction(journal_t *); @@ -874,7 +874,7 @@ extern int journal_write_metadata_buffer(transaction_t *transaction, struct journal_head *jh_in, struct journal_head **jh_out, - unsigned long blocknr); + unsigned int blocknr); /* Transaction locking */ extern void __wait_on_journal (journal_t *); @@ -942,7 +942,7 @@ extern void journal_abort (journal_t *, int); extern int journal_errno (journal_t *); extern void journal_ack_err (journal_t *); extern int journal_clear_err (journal_t *); -extern int journal_bmap(journal_t *, unsigned long, unsigned long *); +extern int journal_bmap(journal_t *, unsigned int, unsigned int *); extern int journal_force_commit(journal_t *); /* @@ -976,14 +976,14 @@ extern int journal_init_revoke_caches(void); extern void journal_destroy_revoke(journal_t *); extern int journal_revoke (handle_t *, - unsigned long, struct buffer_head *); + unsigned int, struct buffer_head *); extern int journal_cancel_revoke(handle_t *, struct journal_head *); extern void journal_write_revoke_records(journal_t *, transaction_t *, int); /* Recovery revoke support */ -extern int journal_set_revoke(journal_t *, unsigned long, tid_t); -extern int journal_test_revoke(journal_t *, unsigned long, tid_t); +extern int journal_set_revoke(journal_t *, unsigned int, tid_t); +extern int journal_test_revoke(journal_t *, unsigned int, tid_t); extern void journal_clear_revoke(journal_t *); extern void journal_switch_revoke_table(journal_t *journal); -- cgit v1.2.3-70-g09d2 From fb40ba0d98968bc3454731360363d725b4f1064c Mon Sep 17 00:00:00 2001 From: Theodore Ts'o Date: Wed, 16 Sep 2009 19:30:40 -0400 Subject: ext4: Add a tracepoint for ext4_alloc_da_blocks() Signed-off-by: "Theodore Ts'o" --- fs/ext4/inode.c | 2 ++ include/trace/events/ext4.h | 27 +++++++++++++++++++++++++++ 2 files changed, 29 insertions(+) (limited to 'include') diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c index a5b4ce40cc6..9887a0c562d 100644 --- a/fs/ext4/inode.c +++ b/fs/ext4/inode.c @@ -3128,6 +3128,8 @@ out: */ int ext4_alloc_da_blocks(struct inode *inode) { + trace_ext4_alloc_da_blocks(inode); + if (!EXT4_I(inode)->i_reserved_data_blocks && !EXT4_I(inode)->i_reserved_meta_blocks) return 0; diff --git a/include/trace/events/ext4.h b/include/trace/events/ext4.h index 6fe6ce9ee07..c1bd8f1e8b9 100644 --- a/include/trace/events/ext4.h +++ b/include/trace/events/ext4.h @@ -10,6 +10,9 @@ struct ext4_allocation_context; struct ext4_allocation_request; struct ext4_prealloc_space; +struct ext4_inode_info; + +#define EXT4_I(inode) (container_of(inode, struct ext4_inode_info, vfs_inode)) TRACE_EVENT(ext4_free_inode, TP_PROTO(struct inode *inode), @@ -710,6 +713,30 @@ TRACE_EVENT(ext4_sync_fs, __entry->wait) ); +TRACE_EVENT(ext4_alloc_da_blocks, + TP_PROTO(struct inode *inode), + + TP_ARGS(inode), + + TP_STRUCT__entry( + __field( dev_t, dev ) + __field( ino_t, ino ) + __field( unsigned int, data_blocks ) + __field( unsigned int, meta_blocks ) + ), + + TP_fast_assign( + __entry->dev = inode->i_sb->s_dev; + __entry->ino = inode->i_ino; + __entry->data_blocks = EXT4_I(inode)->i_reserved_data_blocks; + __entry->meta_blocks = EXT4_I(inode)->i_reserved_meta_blocks; + ), + + TP_printk("dev %s ino %lu data_blocks %u meta_blocks %u", + jbd2_dev_to_name(__entry->dev), (unsigned long) __entry->ino, + __entry->data_blocks, __entry->meta_blocks) +); + #endif /* _TRACE_EXT4_H */ /* This part must be outside protection */ -- cgit v1.2.3-70-g09d2 From 9f7b07d6cc3ed14783c9427a5b2a69794eb2de64 Mon Sep 17 00:00:00 2001 From: Daniel Ribeiro Date: Tue, 23 Jun 2009 12:32:11 -0300 Subject: mfd: Introduce irq_to_pcap() Export an irq_to_pcap function to get pcap irq number, for the keypad driver. Signed-off-by: Daniel Ribeiro Signed-off-by: Samuel Ortiz --- drivers/mfd/ezx-pcap.c | 9 +++++---- include/linux/mfd/ezx-pcap.h | 1 + 2 files changed, 6 insertions(+), 4 deletions(-) (limited to 'include') diff --git a/drivers/mfd/ezx-pcap.c b/drivers/mfd/ezx-pcap.c index c1de4afa89a..de7e63706ab 100644 --- a/drivers/mfd/ezx-pcap.c +++ b/drivers/mfd/ezx-pcap.c @@ -107,10 +107,11 @@ int ezx_pcap_read(struct pcap_chip *pcap, u8 reg_num, u32 *value) EXPORT_SYMBOL_GPL(ezx_pcap_read); /* IRQ */ -static inline unsigned int irq2pcap(struct pcap_chip *pcap, int irq) +int irq_to_pcap(struct pcap_chip *pcap, int irq) { - return 1 << (irq - pcap->irq_base); + return irq - pcap->irq_base; } +EXPORT_SYMBOL_GPL(irq_to_pcap); int pcap_to_irq(struct pcap_chip *pcap, int irq) { @@ -122,7 +123,7 @@ static void pcap_mask_irq(unsigned int irq) { struct pcap_chip *pcap = get_irq_chip_data(irq); - pcap->msr |= irq2pcap(pcap, irq); + pcap->msr |= 1 << irq_to_pcap(pcap, irq); queue_work(pcap->workqueue, &pcap->msr_work); } @@ -130,7 +131,7 @@ static void pcap_unmask_irq(unsigned int irq) { struct pcap_chip *pcap = get_irq_chip_data(irq); - pcap->msr &= ~irq2pcap(pcap, irq); + pcap->msr &= ~(1 << irq_to_pcap(pcap, irq)); queue_work(pcap->workqueue, &pcap->msr_work); } diff --git a/include/linux/mfd/ezx-pcap.h b/include/linux/mfd/ezx-pcap.h index c12c3c0932b..6296b4935a1 100644 --- a/include/linux/mfd/ezx-pcap.h +++ b/include/linux/mfd/ezx-pcap.h @@ -26,6 +26,7 @@ struct pcap_chip; int ezx_pcap_write(struct pcap_chip *, u8, u32); int ezx_pcap_read(struct pcap_chip *, u8, u32 *); int pcap_to_irq(struct pcap_chip *, int); +int irq_to_pcap(struct pcap_chip *, int); int pcap_adc_async(struct pcap_chip *, u8, u32, u8[], void *, void *); int pcap_adc_sync(struct pcap_chip *, u8, u32, u8[], u16[]); -- cgit v1.2.3-70-g09d2 From ecd78cbdb989fd593bf4fd69cdb572200e70a553 Mon Sep 17 00:00:00 2001 From: Daniel Ribeiro Date: Tue, 23 Jun 2009 12:33:10 -0300 Subject: mfd: add set_ts_bits for pcap Some TS controller bits are on the same register as the ADC control, save TS specific bits and export a set_ts_bits function so the TS driver can set it with the adc_mutex lock held. Signed-off-by: Daniel Ribeiro Signed-off-by: Samuel Ortiz --- drivers/mfd/ezx-pcap.c | 22 ++++++++++++++++++---- include/linux/mfd/ezx-pcap.h | 1 + 2 files changed, 19 insertions(+), 4 deletions(-) (limited to 'include') diff --git a/drivers/mfd/ezx-pcap.c b/drivers/mfd/ezx-pcap.c index de7e63706ab..c5122024f05 100644 --- a/drivers/mfd/ezx-pcap.c +++ b/drivers/mfd/ezx-pcap.c @@ -195,6 +195,19 @@ static void pcap_irq_handler(unsigned int irq, struct irq_desc *desc) } /* ADC */ +void pcap_set_ts_bits(struct pcap_chip *pcap, u32 bits) +{ + u32 tmp; + + mutex_lock(&pcap->adc_mutex); + ezx_pcap_read(pcap, PCAP_REG_ADC, &tmp); + tmp &= ~(PCAP_ADC_TS_M_MASK | PCAP_ADC_TS_REF_LOWPWR); + tmp |= bits & (PCAP_ADC_TS_M_MASK | PCAP_ADC_TS_REF_LOWPWR); + ezx_pcap_write(pcap, PCAP_REG_ADC, tmp); + mutex_unlock(&pcap->adc_mutex); +} +EXPORT_SYMBOL_GPL(pcap_set_ts_bits); + static void pcap_disable_adc(struct pcap_chip *pcap) { u32 tmp; @@ -217,15 +230,16 @@ static void pcap_adc_trigger(struct pcap_chip *pcap) mutex_unlock(&pcap->adc_mutex); return; } - mutex_unlock(&pcap->adc_mutex); - - /* start conversion on requested bank */ - tmp = pcap->adc_queue[head]->flags | PCAP_ADC_ADEN; + /* start conversion on requested bank, save TS_M bits */ + ezx_pcap_read(pcap, PCAP_REG_ADC, &tmp); + tmp &= (PCAP_ADC_TS_M_MASK | PCAP_ADC_TS_REF_LOWPWR); + tmp |= pcap->adc_queue[head]->flags | PCAP_ADC_ADEN; if (pcap->adc_queue[head]->bank == PCAP_ADC_BANK_1) tmp |= PCAP_ADC_AD_SEL1; ezx_pcap_write(pcap, PCAP_REG_ADC, tmp); + mutex_unlock(&pcap->adc_mutex); ezx_pcap_write(pcap, PCAP_REG_ADR, PCAP_ADR_ASC); } diff --git a/include/linux/mfd/ezx-pcap.h b/include/linux/mfd/ezx-pcap.h index 6296b4935a1..b15caacf072 100644 --- a/include/linux/mfd/ezx-pcap.h +++ b/include/linux/mfd/ezx-pcap.h @@ -29,6 +29,7 @@ int pcap_to_irq(struct pcap_chip *, int); int irq_to_pcap(struct pcap_chip *, int); int pcap_adc_async(struct pcap_chip *, u8, u32, u8[], void *, void *); int pcap_adc_sync(struct pcap_chip *, u8, u32, u8[], u16[]); +void pcap_set_ts_bits(struct pcap_chip *, u32); #define PCAP_SECOND_PORT 1 #define PCAP_CS_AH 2 -- cgit v1.2.3-70-g09d2 From e9a22635b0d794d0cb242ffb0249f7b2a410bca2 Mon Sep 17 00:00:00 2001 From: Daniel Ribeiro Date: Sat, 27 Jun 2009 00:17:20 -0300 Subject: mfd: add ezx_pcap_setbits Provides an atomic set_bits functions, as needed by the pcap-regulator driver. Signed-off-by: Daniel Ribeiro Signed-off-by: Samuel Ortiz --- drivers/mfd/ezx-pcap.c | 23 +++++++++++++++++++++++ include/linux/mfd/ezx-pcap.h | 1 + 2 files changed, 24 insertions(+) (limited to 'include') diff --git a/drivers/mfd/ezx-pcap.c b/drivers/mfd/ezx-pcap.c index 732664f238f..86d394894d8 100644 --- a/drivers/mfd/ezx-pcap.c +++ b/drivers/mfd/ezx-pcap.c @@ -107,6 +107,29 @@ int ezx_pcap_read(struct pcap_chip *pcap, u8 reg_num, u32 *value) } EXPORT_SYMBOL_GPL(ezx_pcap_read); +int ezx_pcap_set_bits(struct pcap_chip *pcap, u8 reg_num, u32 mask, u32 val) +{ + int ret; + u32 tmp = PCAP_REGISTER_READ_OP_BIT | + (reg_num << PCAP_REGISTER_ADDRESS_SHIFT); + + mutex_lock(&pcap->io_mutex); + ret = ezx_pcap_putget(pcap, &tmp); + if (ret) + goto out_unlock; + + tmp &= (PCAP_REGISTER_VALUE_MASK & ~mask); + tmp |= (val & mask) | PCAP_REGISTER_WRITE_OP_BIT | + (reg_num << PCAP_REGISTER_ADDRESS_SHIFT); + + ret = ezx_pcap_putget(pcap, &tmp); +out_unlock: + mutex_unlock(&pcap->io_mutex); + + return ret; +} +EXPORT_SYMBOL_GPL(ezx_pcap_set_bits); + /* IRQ */ int irq_to_pcap(struct pcap_chip *pcap, int irq) { diff --git a/include/linux/mfd/ezx-pcap.h b/include/linux/mfd/ezx-pcap.h index b15caacf072..dec82b0b05f 100644 --- a/include/linux/mfd/ezx-pcap.h +++ b/include/linux/mfd/ezx-pcap.h @@ -25,6 +25,7 @@ struct pcap_chip; int ezx_pcap_write(struct pcap_chip *, u8, u32); int ezx_pcap_read(struct pcap_chip *, u8, u32 *); +int ezx_pcap_set_bits(struct pcap_chip *, u8, u32, u32); int pcap_to_irq(struct pcap_chip *, int); int irq_to_pcap(struct pcap_chip *, int); int pcap_adc_async(struct pcap_chip *, u8, u32, u8[], void *, void *); -- cgit v1.2.3-70-g09d2 From fb6c023a2b845df1ec383b74644ac35a4bbb76b6 Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Mon, 20 Jul 2009 12:43:45 +0100 Subject: hwmon: Add WM835x PMIC hardware monitoring driver This driver provides reporting of the status supply voltage rails of the WM835x series of PMICs via the hwmon API. Signed-off-by: Mark Brown Acked-by: Jean Delvare Signed-off-by: Samuel Ortiz --- Documentation/hwmon/wm8350 | 26 +++++++ drivers/hwmon/Kconfig | 10 +++ drivers/hwmon/Makefile | 1 + drivers/hwmon/wm8350-hwmon.c | 151 ++++++++++++++++++++++++++++++++++++++++ drivers/mfd/wm8350-core.c | 3 + include/linux/mfd/wm8350/core.h | 6 ++ 6 files changed, 197 insertions(+) create mode 100644 Documentation/hwmon/wm8350 create mode 100644 drivers/hwmon/wm8350-hwmon.c (limited to 'include') diff --git a/Documentation/hwmon/wm8350 b/Documentation/hwmon/wm8350 new file mode 100644 index 00000000000..98f923bd2e9 --- /dev/null +++ b/Documentation/hwmon/wm8350 @@ -0,0 +1,26 @@ +Kernel driver wm8350-hwmon +========================== + +Supported chips: + * Wolfson Microelectronics WM835x PMICs + Prefix: 'wm8350' + Datasheet: + http://www.wolfsonmicro.com/products/WM8350 + http://www.wolfsonmicro.com/products/WM8351 + http://www.wolfsonmicro.com/products/WM8352 + +Authors: Mark Brown + +Description +----------- + +The WM835x series of PMICs include an AUXADC which can be used to +monitor a range of system operating parameters, including the voltages +of the major supplies within the system. Currently the driver provides +simple access to these major supplies. + +Voltage Monitoring +------------------ + +Voltages are sampled by a 12 bit ADC. For the internal supplies the ADC +is referenced to the system VRTC. diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig index 2e25b7a827d..120085ce651 100644 --- a/drivers/hwmon/Kconfig +++ b/drivers/hwmon/Kconfig @@ -937,6 +937,16 @@ config SENSORS_W83627EHF This driver can also be built as a module. If so, the module will be called w83627ehf. +config SENSORS_WM8350 + tristate "Wolfson Microelectronics WM835x" + depends on MFD_WM8350 + help + If you say yes here you get support for the hardware + monitoring features of the WM835x series of PMICs. + + This driver can also be built as a module. If so, the module + will be called wm8350-hwmon. + config SENSORS_ULTRA45 tristate "Sun Ultra45 PIC16F747" depends on SPARC64 diff --git a/drivers/hwmon/Makefile b/drivers/hwmon/Makefile index 7f239a247c3..ed5f1781b8c 100644 --- a/drivers/hwmon/Makefile +++ b/drivers/hwmon/Makefile @@ -90,6 +90,7 @@ obj-$(CONFIG_SENSORS_VT8231) += vt8231.o obj-$(CONFIG_SENSORS_W83627EHF) += w83627ehf.o obj-$(CONFIG_SENSORS_W83L785TS) += w83l785ts.o obj-$(CONFIG_SENSORS_W83L786NG) += w83l786ng.o +obj-$(CONFIG_SENSORS_WM8350) += wm8350-hwmon.o ifeq ($(CONFIG_HWMON_DEBUG_CHIP),y) EXTRA_CFLAGS += -DDEBUG diff --git a/drivers/hwmon/wm8350-hwmon.c b/drivers/hwmon/wm8350-hwmon.c new file mode 100644 index 00000000000..13290595ca8 --- /dev/null +++ b/drivers/hwmon/wm8350-hwmon.c @@ -0,0 +1,151 @@ +/* + * drivers/hwmon/wm8350-hwmon.c - Wolfson Microelectronics WM8350 PMIC + * hardware monitoring features. + * + * Copyright (C) 2009 Wolfson Microelectronics plc + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License v2 as published by the + * Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA + */ + +#include +#include +#include +#include +#include +#include + +#include +#include + +static ssize_t show_name(struct device *dev, + struct device_attribute *attr, char *buf) +{ + return sprintf(buf, "wm8350\n"); +} + +static const char *input_names[] = { + [WM8350_AUXADC_USB] = "USB", + [WM8350_AUXADC_LINE] = "Line", + [WM8350_AUXADC_BATT] = "Battery", +}; + + +static ssize_t show_voltage(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct wm8350 *wm8350 = dev_get_drvdata(dev); + int channel = to_sensor_dev_attr(attr)->index; + int val; + + val = wm8350_read_auxadc(wm8350, channel, 0, 0) * WM8350_AUX_COEFF; + val = DIV_ROUND_CLOSEST(val, 1000); + + return sprintf(buf, "%d\n", val); +} + +static ssize_t show_label(struct device *dev, + struct device_attribute *attr, char *buf) +{ + int channel = to_sensor_dev_attr(attr)->index; + + return sprintf(buf, "%s\n", input_names[channel]); +} + +#define WM8350_NAMED_VOLTAGE(id, name) \ + static SENSOR_DEVICE_ATTR(in##id##_input, S_IRUGO, show_voltage,\ + NULL, name); \ + static SENSOR_DEVICE_ATTR(in##id##_label, S_IRUGO, show_label, \ + NULL, name) + +static DEVICE_ATTR(name, S_IRUGO, show_name, NULL); + +WM8350_NAMED_VOLTAGE(0, WM8350_AUXADC_USB); +WM8350_NAMED_VOLTAGE(1, WM8350_AUXADC_BATT); +WM8350_NAMED_VOLTAGE(2, WM8350_AUXADC_LINE); + +static struct attribute *wm8350_attributes[] = { + &dev_attr_name.attr, + + &sensor_dev_attr_in0_input.dev_attr.attr, + &sensor_dev_attr_in0_label.dev_attr.attr, + &sensor_dev_attr_in1_input.dev_attr.attr, + &sensor_dev_attr_in1_label.dev_attr.attr, + &sensor_dev_attr_in2_input.dev_attr.attr, + &sensor_dev_attr_in2_label.dev_attr.attr, + + NULL, +}; + +static const struct attribute_group wm8350_attr_group = { + .attrs = wm8350_attributes, +}; + +static int __devinit wm8350_hwmon_probe(struct platform_device *pdev) +{ + struct wm8350 *wm8350 = platform_get_drvdata(pdev); + int ret; + + ret = sysfs_create_group(&pdev->dev.kobj, &wm8350_attr_group); + if (ret) + goto err; + + wm8350->hwmon.classdev = hwmon_device_register(&pdev->dev); + if (IS_ERR(wm8350->hwmon.classdev)) { + ret = PTR_ERR(wm8350->hwmon.classdev); + goto err_group; + } + + return 0; + +err_group: + sysfs_remove_group(&pdev->dev.kobj, &wm8350_attr_group); +err: + return ret; +} + +static int __devexit wm8350_hwmon_remove(struct platform_device *pdev) +{ + struct wm8350 *wm8350 = platform_get_drvdata(pdev); + + hwmon_device_unregister(wm8350->hwmon.classdev); + sysfs_remove_group(&pdev->dev.kobj, &wm8350_attr_group); + + return 0; +} + +static struct platform_driver wm8350_hwmon_driver = { + .probe = wm8350_hwmon_probe, + .remove = __devexit_p(wm8350_hwmon_remove), + .driver = { + .name = "wm8350-hwmon", + .owner = THIS_MODULE, + }, +}; + +static int __init wm8350_hwmon_init(void) +{ + return platform_driver_register(&wm8350_hwmon_driver); +} +module_init(wm8350_hwmon_init); + +static void __exit wm8350_hwmon_exit(void) +{ + platform_driver_unregister(&wm8350_hwmon_driver); +} +module_exit(wm8350_hwmon_exit); + +MODULE_AUTHOR("Mark Brown "); +MODULE_DESCRIPTION("WM8350 Hardware Monitoring"); +MODULE_LICENSE("GPL"); +MODULE_ALIAS("platform:wm8350-hwmon"); diff --git a/drivers/mfd/wm8350-core.c b/drivers/mfd/wm8350-core.c index fe24079387c..9d662a576a4 100644 --- a/drivers/mfd/wm8350-core.c +++ b/drivers/mfd/wm8350-core.c @@ -1472,6 +1472,8 @@ int wm8350_device_init(struct wm8350 *wm8350, int irq, &(wm8350->codec.pdev)); wm8350_client_dev_register(wm8350, "wm8350-gpio", &(wm8350->gpio.pdev)); + wm8350_client_dev_register(wm8350, "wm8350-hwmon", + &(wm8350->hwmon.pdev)); wm8350_client_dev_register(wm8350, "wm8350-power", &(wm8350->power.pdev)); wm8350_client_dev_register(wm8350, "wm8350-rtc", &(wm8350->rtc.pdev)); @@ -1498,6 +1500,7 @@ void wm8350_device_exit(struct wm8350 *wm8350) platform_device_unregister(wm8350->wdt.pdev); platform_device_unregister(wm8350->rtc.pdev); platform_device_unregister(wm8350->power.pdev); + platform_device_unregister(wm8350->hwmon.pdev); platform_device_unregister(wm8350->gpio.pdev); platform_device_unregister(wm8350->codec.pdev); diff --git a/include/linux/mfd/wm8350/core.h b/include/linux/mfd/wm8350/core.h index 42cca672f34..969b0b55615 100644 --- a/include/linux/mfd/wm8350/core.h +++ b/include/linux/mfd/wm8350/core.h @@ -605,6 +605,11 @@ struct wm8350_irq { void *data; }; +struct wm8350_hwmon { + struct platform_device *pdev; + struct device *classdev; +}; + struct wm8350 { struct device *dev; @@ -629,6 +634,7 @@ struct wm8350 { /* Client devices */ struct wm8350_codec codec; struct wm8350_gpio gpio; + struct wm8350_hwmon hwmon; struct wm8350_pmic pmic; struct wm8350_power power; struct wm8350_rtc rtc; -- cgit v1.2.3-70-g09d2 From ed52e62ebec9e703eb0b69704feaf1b6e847d882 Mon Sep 17 00:00:00 2001 From: Paul Fertser Date: Tue, 28 Jul 2009 00:41:15 +0400 Subject: mfd: use a dedicated workqueue for pcf50633 irq processing Using the default kernel "events" workqueue causes problems with synchronous adc readings if initiated from some task on the same workqueue. I had a deadlock trying to use pcf50633_adc_sync_read from a power_supply class driver because the reading was initiated from the workqueue and it waited for the irq processing to complete (to get the result) and that was put on the same workqueue. Signed-off-by: Paul Fertser Signed-off-by: Samuel Ortiz --- drivers/mfd/pcf50633-core.c | 5 ++++- include/linux/mfd/pcf50633/core.h | 1 + 2 files changed, 5 insertions(+), 1 deletion(-) (limited to 'include') diff --git a/drivers/mfd/pcf50633-core.c b/drivers/mfd/pcf50633-core.c index 8d3c38bf971..d26d7747175 100644 --- a/drivers/mfd/pcf50633-core.c +++ b/drivers/mfd/pcf50633-core.c @@ -444,7 +444,7 @@ static irqreturn_t pcf50633_irq(int irq, void *data) get_device(pcf->dev); disable_irq_nosync(pcf->irq); - schedule_work(&pcf->irq_work); + queue_work(pcf->work_queue, &pcf->irq_work); return IRQ_HANDLED; } @@ -575,6 +575,7 @@ static int __devinit pcf50633_probe(struct i2c_client *client, pcf->dev = &client->dev; pcf->i2c_client = client; pcf->irq = client->irq; + pcf->work_queue = create_singlethread_workqueue("pcf50633"); INIT_WORK(&pcf->irq_work, pcf50633_irq_worker); @@ -651,6 +652,7 @@ static int __devinit pcf50633_probe(struct i2c_client *client, return 0; err: + destroy_workqueue(pcf->work_queue); kfree(pcf); return ret; } @@ -661,6 +663,7 @@ static int __devexit pcf50633_remove(struct i2c_client *client) int i; free_irq(pcf->irq, pcf); + destroy_workqueue(pcf->work_queue); platform_device_unregister(pcf->input_pdev); platform_device_unregister(pcf->rtc_pdev); diff --git a/include/linux/mfd/pcf50633/core.h b/include/linux/mfd/pcf50633/core.h index c8f51c3c0a7..9aba7b779fb 100644 --- a/include/linux/mfd/pcf50633/core.h +++ b/include/linux/mfd/pcf50633/core.h @@ -136,6 +136,7 @@ struct pcf50633 { int irq; struct pcf50633_irq irq_handler[PCF50633_NUM_IRQ]; struct work_struct irq_work; + struct workqueue_struct *work_queue; struct mutex lock; u8 mask_regs[5]; -- cgit v1.2.3-70-g09d2 From 8d360d8c03e1e8514bbaf606b1cd3b818dfc445d Mon Sep 17 00:00:00 2001 From: Paul Fertser Date: Tue, 28 Jul 2009 01:09:04 +0400 Subject: mfd: fix wrong define for 10bit pcf50633 ADC mode The 10 bits definition was the 8 bits one. Signed-off-by: Paul Fertser Signed-off-by: Samuel Ortiz --- include/linux/mfd/pcf50633/adc.h | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'include') diff --git a/include/linux/mfd/pcf50633/adc.h b/include/linux/mfd/pcf50633/adc.h index 56669b4183a..b35e62801ff 100644 --- a/include/linux/mfd/pcf50633/adc.h +++ b/include/linux/mfd/pcf50633/adc.h @@ -25,7 +25,8 @@ #define PCF50633_REG_ADCS3 0x57 #define PCF50633_ADCC1_ADCSTART 0x01 -#define PCF50633_ADCC1_RES_10BIT 0x02 +#define PCF50633_ADCC1_RES_8BIT 0x02 +#define PCF50633_ADCC1_RES_10BIT 0x00 #define PCF50633_ADCC1_AVERAGE_NO 0x00 #define PCF50633_ADCC1_AVERAGE_4 0x04 #define PCF50633_ADCC1_AVERAGE_8 0x08 -- cgit v1.2.3-70-g09d2 From 327bc3a3efa408fb285948bfef112a6c58dfb375 Mon Sep 17 00:00:00 2001 From: Daniel Ribeiro Date: Fri, 31 Jul 2009 12:38:02 +0200 Subject: mfd: Remove VIB defines from pcap header file Vibrator will be accessed via the pcap-regulator driver, no need to expose its bits in the header file. Signed-off-by: Daniel Ribeiro Signed-off-by: Samuel Ortiz --- include/linux/mfd/ezx-pcap.h | 4 ---- 1 file changed, 4 deletions(-) (limited to 'include') diff --git a/include/linux/mfd/ezx-pcap.h b/include/linux/mfd/ezx-pcap.h index dec82b0b05f..e5124ceea76 100644 --- a/include/linux/mfd/ezx-pcap.h +++ b/include/linux/mfd/ezx-pcap.h @@ -227,7 +227,6 @@ void pcap_set_ts_bits(struct pcap_chip *, u32); #define PCAP_LED1 1 #define PCAP_BL0 2 #define PCAP_BL1 3 -#define PCAP_VIB 4 #define PCAP_LED_3MA 0 #define PCAP_LED_4MA 1 #define PCAP_LED_5MA 2 @@ -246,9 +245,6 @@ void pcap_set_ts_bits(struct pcap_chip *, u32); #define PCAP_LED0_C_SHIFT 15 #define PCAP_LED1_C_SHIFT 17 #define PCAP_BL1_SHIFT 20 -#define PCAP_VIB_MASK 0x3 -#define PCAP_VIB_SHIFT 20 -#define PCAP_VIB_EN (1 << 19) /* RTC */ #define PCAP_RTC_DAY_MASK 0x3fff -- cgit v1.2.3-70-g09d2 From 3bed6e415fc2cbf8d706848a62a48aebe84435e5 Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Mon, 27 Jul 2009 14:45:51 +0100 Subject: mfd: Allow multiple MFD cells with the same name Provide basic support for MFDs having multiple cells of a given type with different IDs by adding an id to the mfd_cell structure and then adding that to the id passed in to mfd_add_devices(). As it stands this approach requires that MFDs using this feature deal with ensuring that there aren't any ID collisions resulting from multiple MFDs of the same type being instantiated. This needs to happen with the existing code too, but with this approach there is a knock on effect on the IDs for non-duplicated devices. Signed-off-by: Mark Brown Signed-off-by: Samuel Ortiz --- drivers/mfd/mfd-core.c | 2 +- include/linux/mfd/core.h | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) (limited to 'include') diff --git a/drivers/mfd/mfd-core.c b/drivers/mfd/mfd-core.c index 54ddf3772e0..ae15e495e20 100644 --- a/drivers/mfd/mfd-core.c +++ b/drivers/mfd/mfd-core.c @@ -25,7 +25,7 @@ static int mfd_add_device(struct device *parent, int id, int ret = -ENOMEM; int r; - pdev = platform_device_alloc(cell->name, id); + pdev = platform_device_alloc(cell->name, id + cell->id); if (!pdev) goto fail_alloc; diff --git a/include/linux/mfd/core.h b/include/linux/mfd/core.h index 49ef857cdb2..11d740b8831 100644 --- a/include/linux/mfd/core.h +++ b/include/linux/mfd/core.h @@ -23,6 +23,7 @@ */ struct mfd_cell { const char *name; + int id; int (*enable)(struct platform_device *dev); int (*disable)(struct platform_device *dev); -- cgit v1.2.3-70-g09d2 From d2bedfe7a8b2f34beee2cad9cae74a088ee8ed07 Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Mon, 27 Jul 2009 14:45:52 +0100 Subject: mfd: Initial core support for WM831x series devices The WM831x series of devices are register compatible processor power management subsystems, providing regulator and power path management facilities along with other services like watchdog, RTC and touch panel controllers. This patch adds very basic support, providing basic single register I2C access, handling of the security key and registration of the devices. Signed-off-by: Mark Brown Signed-off-by: Samuel Ortiz --- drivers/mfd/wm831x-core.c | 1357 ++++++++++++++++++++++++++++++++++++++ include/linux/mfd/wm831x/core.h | 247 +++++++ include/linux/mfd/wm831x/pdata.h | 107 +++ 3 files changed, 1711 insertions(+) create mode 100644 drivers/mfd/wm831x-core.c create mode 100644 include/linux/mfd/wm831x/core.h create mode 100644 include/linux/mfd/wm831x/pdata.h (limited to 'include') diff --git a/drivers/mfd/wm831x-core.c b/drivers/mfd/wm831x-core.c new file mode 100644 index 00000000000..cc1040c9d46 --- /dev/null +++ b/drivers/mfd/wm831x-core.c @@ -0,0 +1,1357 @@ +/* + * wm831x-core.c -- Device access for Wolfson WM831x PMICs + * + * Copyright 2009 Wolfson Microelectronics PLC. + * + * Author: Mark Brown + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + */ + +#include +#include +#include +#include + +#include +#include + +enum wm831x_parent { + WM8310 = 0, + WM8311 = 1, + WM8312 = 2, +}; + +static int wm831x_reg_locked(struct wm831x *wm831x, unsigned short reg) +{ + if (!wm831x->locked) + return 0; + + switch (reg) { + case WM831X_WATCHDOG: + case WM831X_DC4_CONTROL: + case WM831X_ON_PIN_CONTROL: + case WM831X_BACKUP_CHARGER_CONTROL: + case WM831X_CHARGER_CONTROL_1: + case WM831X_CHARGER_CONTROL_2: + return 1; + + default: + return 0; + } +} + +/** + * wm831x_reg_unlock: Unlock user keyed registers + * + * The WM831x has a user key preventing writes to particularly + * critical registers. This function locks those registers, + * allowing writes to them. + */ +void wm831x_reg_lock(struct wm831x *wm831x) +{ + int ret; + + ret = wm831x_reg_write(wm831x, WM831X_SECURITY_KEY, 0); + if (ret == 0) { + dev_vdbg(wm831x->dev, "Registers locked\n"); + + mutex_lock(&wm831x->io_lock); + WARN_ON(wm831x->locked); + wm831x->locked = 1; + mutex_unlock(&wm831x->io_lock); + } else { + dev_err(wm831x->dev, "Failed to lock registers: %d\n", ret); + } + +} +EXPORT_SYMBOL_GPL(wm831x_reg_lock); + +/** + * wm831x_reg_unlock: Unlock user keyed registers + * + * The WM831x has a user key preventing writes to particularly + * critical registers. This function locks those registers, + * preventing spurious writes. + */ +int wm831x_reg_unlock(struct wm831x *wm831x) +{ + int ret; + + /* 0x9716 is the value required to unlock the registers */ + ret = wm831x_reg_write(wm831x, WM831X_SECURITY_KEY, 0x9716); + if (ret == 0) { + dev_vdbg(wm831x->dev, "Registers unlocked\n"); + + mutex_lock(&wm831x->io_lock); + WARN_ON(!wm831x->locked); + wm831x->locked = 0; + mutex_unlock(&wm831x->io_lock); + } + + return ret; +} +EXPORT_SYMBOL_GPL(wm831x_reg_unlock); + +static int wm831x_read(struct wm831x *wm831x, unsigned short reg, + int bytes, void *dest) +{ + int ret, i; + u16 *buf = dest; + + BUG_ON(bytes % 2); + BUG_ON(bytes <= 0); + + ret = wm831x->read_dev(wm831x, reg, bytes, dest); + if (ret < 0) + return ret; + + for (i = 0; i < bytes / 2; i++) { + buf[i] = be16_to_cpu(buf[i]); + + dev_vdbg(wm831x->dev, "Read %04x from R%d(0x%x)\n", + buf[i], reg + i, reg + i); + } + + return 0; +} + +/** + * wm831x_reg_read: Read a single WM831x register. + * + * @wm831x: Device to read from. + * @reg: Register to read. + */ +int wm831x_reg_read(struct wm831x *wm831x, unsigned short reg) +{ + unsigned short val; + int ret; + + mutex_lock(&wm831x->io_lock); + + ret = wm831x_read(wm831x, reg, 2, &val); + + mutex_unlock(&wm831x->io_lock); + + if (ret < 0) + return ret; + else + return val; +} +EXPORT_SYMBOL_GPL(wm831x_reg_read); + +/** + * wm831x_bulk_read: Read multiple WM831x registers + * + * @wm831x: Device to read from + * @reg: First register + * @count: Number of registers + * @buf: Buffer to fill. + */ +int wm831x_bulk_read(struct wm831x *wm831x, unsigned short reg, + int count, u16 *buf) +{ + int ret; + + mutex_lock(&wm831x->io_lock); + + ret = wm831x_read(wm831x, reg, count * 2, buf); + + mutex_unlock(&wm831x->io_lock); + + return ret; +} +EXPORT_SYMBOL_GPL(wm831x_bulk_read); + +static int wm831x_write(struct wm831x *wm831x, unsigned short reg, + int bytes, void *src) +{ + u16 *buf = src; + int i; + + BUG_ON(bytes % 2); + BUG_ON(bytes <= 0); + + for (i = 0; i < bytes / 2; i++) { + if (wm831x_reg_locked(wm831x, reg)) + return -EPERM; + + dev_vdbg(wm831x->dev, "Write %04x to R%d(0x%x)\n", + buf[i], reg + i, reg + i); + + buf[i] = cpu_to_be16(buf[i]); + } + + return wm831x->write_dev(wm831x, reg, bytes, src); +} + +/** + * wm831x_reg_write: Write a single WM831x register. + * + * @wm831x: Device to write to. + * @reg: Register to write to. + * @val: Value to write. + */ +int wm831x_reg_write(struct wm831x *wm831x, unsigned short reg, + unsigned short val) +{ + int ret; + + mutex_lock(&wm831x->io_lock); + + ret = wm831x_write(wm831x, reg, 2, &val); + + mutex_unlock(&wm831x->io_lock); + + return ret; +} +EXPORT_SYMBOL_GPL(wm831x_reg_write); + +/** + * wm831x_set_bits: Set the value of a bitfield in a WM831x register + * + * @wm831x: Device to write to. + * @reg: Register to write to. + * @mask: Mask of bits to set. + * @val: Value to set (unshifted) + */ +int wm831x_set_bits(struct wm831x *wm831x, unsigned short reg, + unsigned short mask, unsigned short val) +{ + int ret; + u16 r; + + mutex_lock(&wm831x->io_lock); + + ret = wm831x_read(wm831x, reg, 2, &r); + if (ret < 0) + goto out; + + r &= ~mask; + r |= val; + + ret = wm831x_write(wm831x, reg, 2, &r); + +out: + mutex_unlock(&wm831x->io_lock); + + return ret; +} +EXPORT_SYMBOL_GPL(wm831x_set_bits); + +static struct resource wm831x_dcdc1_resources[] = { + { + .start = WM831X_DC1_CONTROL_1, + .end = WM831X_DC1_DVS_CONTROL, + .flags = IORESOURCE_IO, + }, + { + .name = "UV", + .start = WM831X_IRQ_UV_DC1, + .end = WM831X_IRQ_UV_DC1, + .flags = IORESOURCE_IRQ, + }, + { + .name = "HC", + .start = WM831X_IRQ_HC_DC1, + .end = WM831X_IRQ_HC_DC1, + .flags = IORESOURCE_IRQ, + }, +}; + + +static struct resource wm831x_dcdc2_resources[] = { + { + .start = WM831X_DC2_CONTROL_1, + .end = WM831X_DC2_DVS_CONTROL, + .flags = IORESOURCE_IO, + }, + { + .name = "UV", + .start = WM831X_IRQ_UV_DC2, + .end = WM831X_IRQ_UV_DC2, + .flags = IORESOURCE_IRQ, + }, + { + .name = "HC", + .start = WM831X_IRQ_HC_DC2, + .end = WM831X_IRQ_HC_DC2, + .flags = IORESOURCE_IRQ, + }, +}; + +static struct resource wm831x_dcdc3_resources[] = { + { + .start = WM831X_DC3_CONTROL_1, + .end = WM831X_DC3_SLEEP_CONTROL, + .flags = IORESOURCE_IO, + }, + { + .name = "UV", + .start = WM831X_IRQ_UV_DC3, + .end = WM831X_IRQ_UV_DC3, + .flags = IORESOURCE_IRQ, + }, +}; + +static struct resource wm831x_dcdc4_resources[] = { + { + .start = WM831X_DC4_CONTROL, + .end = WM831X_DC4_SLEEP_CONTROL, + .flags = IORESOURCE_IO, + }, + { + .name = "UV", + .start = WM831X_IRQ_UV_DC4, + .end = WM831X_IRQ_UV_DC4, + .flags = IORESOURCE_IRQ, + }, +}; + +static struct resource wm831x_gpio_resources[] = { + { + .start = WM831X_IRQ_GPIO_1, + .end = WM831X_IRQ_GPIO_16, + .flags = IORESOURCE_IRQ, + }, +}; + +static struct resource wm831x_isink1_resources[] = { + { + .start = WM831X_CURRENT_SINK_1, + .end = WM831X_CURRENT_SINK_1, + .flags = IORESOURCE_IO, + }, + { + .start = WM831X_IRQ_CS1, + .end = WM831X_IRQ_CS1, + .flags = IORESOURCE_IRQ, + }, +}; + +static struct resource wm831x_isink2_resources[] = { + { + .start = WM831X_CURRENT_SINK_2, + .end = WM831X_CURRENT_SINK_2, + .flags = IORESOURCE_IO, + }, + { + .start = WM831X_IRQ_CS2, + .end = WM831X_IRQ_CS2, + .flags = IORESOURCE_IRQ, + }, +}; + +static struct resource wm831x_ldo1_resources[] = { + { + .start = WM831X_LDO1_CONTROL, + .end = WM831X_LDO1_SLEEP_CONTROL, + .flags = IORESOURCE_IO, + }, + { + .name = "UV", + .start = WM831X_IRQ_UV_LDO1, + .end = WM831X_IRQ_UV_LDO1, + .flags = IORESOURCE_IRQ, + }, +}; + +static struct resource wm831x_ldo2_resources[] = { + { + .start = WM831X_LDO2_CONTROL, + .end = WM831X_LDO2_SLEEP_CONTROL, + .flags = IORESOURCE_IO, + }, + { + .name = "UV", + .start = WM831X_IRQ_UV_LDO2, + .end = WM831X_IRQ_UV_LDO2, + .flags = IORESOURCE_IRQ, + }, +}; + +static struct resource wm831x_ldo3_resources[] = { + { + .start = WM831X_LDO3_CONTROL, + .end = WM831X_LDO3_SLEEP_CONTROL, + .flags = IORESOURCE_IO, + }, + { + .name = "UV", + .start = WM831X_IRQ_UV_LDO3, + .end = WM831X_IRQ_UV_LDO3, + .flags = IORESOURCE_IRQ, + }, +}; + +static struct resource wm831x_ldo4_resources[] = { + { + .start = WM831X_LDO4_CONTROL, + .end = WM831X_LDO4_SLEEP_CONTROL, + .flags = IORESOURCE_IO, + }, + { + .name = "UV", + .start = WM831X_IRQ_UV_LDO4, + .end = WM831X_IRQ_UV_LDO4, + .flags = IORESOURCE_IRQ, + }, +}; + +static struct resource wm831x_ldo5_resources[] = { + { + .start = WM831X_LDO5_CONTROL, + .end = WM831X_LDO5_SLEEP_CONTROL, + .flags = IORESOURCE_IO, + }, + { + .name = "UV", + .start = WM831X_IRQ_UV_LDO5, + .end = WM831X_IRQ_UV_LDO5, + .flags = IORESOURCE_IRQ, + }, +}; + +static struct resource wm831x_ldo6_resources[] = { + { + .start = WM831X_LDO6_CONTROL, + .end = WM831X_LDO6_SLEEP_CONTROL, + .flags = IORESOURCE_IO, + }, + { + .name = "UV", + .start = WM831X_IRQ_UV_LDO6, + .end = WM831X_IRQ_UV_LDO6, + .flags = IORESOURCE_IRQ, + }, +}; + +static struct resource wm831x_ldo7_resources[] = { + { + .start = WM831X_LDO7_CONTROL, + .end = WM831X_LDO7_SLEEP_CONTROL, + .flags = IORESOURCE_IO, + }, + { + .name = "UV", + .start = WM831X_IRQ_UV_LDO7, + .end = WM831X_IRQ_UV_LDO7, + .flags = IORESOURCE_IRQ, + }, +}; + +static struct resource wm831x_ldo8_resources[] = { + { + .start = WM831X_LDO8_CONTROL, + .end = WM831X_LDO8_SLEEP_CONTROL, + .flags = IORESOURCE_IO, + }, + { + .name = "UV", + .start = WM831X_IRQ_UV_LDO8, + .end = WM831X_IRQ_UV_LDO8, + .flags = IORESOURCE_IRQ, + }, +}; + +static struct resource wm831x_ldo9_resources[] = { + { + .start = WM831X_LDO9_CONTROL, + .end = WM831X_LDO9_SLEEP_CONTROL, + .flags = IORESOURCE_IO, + }, + { + .name = "UV", + .start = WM831X_IRQ_UV_LDO9, + .end = WM831X_IRQ_UV_LDO9, + .flags = IORESOURCE_IRQ, + }, +}; + +static struct resource wm831x_ldo10_resources[] = { + { + .start = WM831X_LDO10_CONTROL, + .end = WM831X_LDO10_SLEEP_CONTROL, + .flags = IORESOURCE_IO, + }, + { + .name = "UV", + .start = WM831X_IRQ_UV_LDO10, + .end = WM831X_IRQ_UV_LDO10, + .flags = IORESOURCE_IRQ, + }, +}; + +static struct resource wm831x_ldo11_resources[] = { + { + .start = WM831X_LDO11_ON_CONTROL, + .end = WM831X_LDO11_SLEEP_CONTROL, + .flags = IORESOURCE_IO, + }, +}; + +static struct resource wm831x_on_resources[] = { + { + .start = WM831X_IRQ_ON, + .end = WM831X_IRQ_ON, + .flags = IORESOURCE_IRQ, + }, +}; + + +static struct resource wm831x_power_resources[] = { + { + .name = "SYSLO", + .start = WM831X_IRQ_PPM_SYSLO, + .end = WM831X_IRQ_PPM_SYSLO, + .flags = IORESOURCE_IRQ, + }, + { + .name = "PWR SRC", + .start = WM831X_IRQ_PPM_PWR_SRC, + .end = WM831X_IRQ_PPM_PWR_SRC, + .flags = IORESOURCE_IRQ, + }, + { + .name = "USB CURR", + .start = WM831X_IRQ_PPM_USB_CURR, + .end = WM831X_IRQ_PPM_USB_CURR, + .flags = IORESOURCE_IRQ, + }, + { + .name = "BATT HOT", + .start = WM831X_IRQ_CHG_BATT_HOT, + .end = WM831X_IRQ_CHG_BATT_HOT, + .flags = IORESOURCE_IRQ, + }, + { + .name = "BATT COLD", + .start = WM831X_IRQ_CHG_BATT_COLD, + .end = WM831X_IRQ_CHG_BATT_COLD, + .flags = IORESOURCE_IRQ, + }, + { + .name = "BATT FAIL", + .start = WM831X_IRQ_CHG_BATT_FAIL, + .end = WM831X_IRQ_CHG_BATT_FAIL, + .flags = IORESOURCE_IRQ, + }, + { + .name = "OV", + .start = WM831X_IRQ_CHG_OV, + .end = WM831X_IRQ_CHG_OV, + .flags = IORESOURCE_IRQ, + }, + { + .name = "END", + .start = WM831X_IRQ_CHG_END, + .end = WM831X_IRQ_CHG_END, + .flags = IORESOURCE_IRQ, + }, + { + .name = "TO", + .start = WM831X_IRQ_CHG_TO, + .end = WM831X_IRQ_CHG_TO, + .flags = IORESOURCE_IRQ, + }, + { + .name = "MODE", + .start = WM831X_IRQ_CHG_MODE, + .end = WM831X_IRQ_CHG_MODE, + .flags = IORESOURCE_IRQ, + }, + { + .name = "START", + .start = WM831X_IRQ_CHG_START, + .end = WM831X_IRQ_CHG_START, + .flags = IORESOURCE_IRQ, + }, +}; + +static struct resource wm831x_rtc_resources[] = { + { + .name = "PER", + .start = WM831X_IRQ_RTC_PER, + .end = WM831X_IRQ_RTC_PER, + .flags = IORESOURCE_IRQ, + }, + { + .name = "ALM", + .start = WM831X_IRQ_RTC_ALM, + .end = WM831X_IRQ_RTC_ALM, + .flags = IORESOURCE_IRQ, + }, +}; + +static struct resource wm831x_status1_resources[] = { + { + .start = WM831X_STATUS_LED_1, + .end = WM831X_STATUS_LED_1, + .flags = IORESOURCE_IO, + }, +}; + +static struct resource wm831x_status2_resources[] = { + { + .start = WM831X_STATUS_LED_2, + .end = WM831X_STATUS_LED_2, + .flags = IORESOURCE_IO, + }, +}; + +static struct resource wm831x_touch_resources[] = { + { + .name = "TCHPD", + .start = WM831X_IRQ_TCHPD, + .end = WM831X_IRQ_TCHPD, + .flags = IORESOURCE_IRQ, + }, + { + .name = "TCHDATA", + .start = WM831X_IRQ_TCHDATA, + .end = WM831X_IRQ_TCHDATA, + .flags = IORESOURCE_IRQ, + }, +}; + +static struct resource wm831x_wdt_resources[] = { + { + .start = WM831X_IRQ_WDOG_TO, + .end = WM831X_IRQ_WDOG_TO, + .flags = IORESOURCE_IRQ, + }, +}; + +static struct mfd_cell wm8310_devs[] = { + { + .name = "wm831x-buckv", + .id = 1, + .num_resources = ARRAY_SIZE(wm831x_dcdc1_resources), + .resources = wm831x_dcdc1_resources, + }, + { + .name = "wm831x-buckv", + .id = 2, + .num_resources = ARRAY_SIZE(wm831x_dcdc2_resources), + .resources = wm831x_dcdc2_resources, + }, + { + .name = "wm831x-buckp", + .id = 3, + .num_resources = ARRAY_SIZE(wm831x_dcdc3_resources), + .resources = wm831x_dcdc3_resources, + }, + { + .name = "wm831x-boostp", + .id = 4, + .num_resources = ARRAY_SIZE(wm831x_dcdc4_resources), + .resources = wm831x_dcdc4_resources, + }, + { + .name = "wm831x-epe", + .id = 1, + }, + { + .name = "wm831x-epe", + .id = 2, + }, + { + .name = "wm831x-gpio", + .num_resources = ARRAY_SIZE(wm831x_gpio_resources), + .resources = wm831x_gpio_resources, + }, + { + .name = "wm831x-hwmon", + }, + { + .name = "wm831x-isink", + .id = 1, + .num_resources = ARRAY_SIZE(wm831x_isink1_resources), + .resources = wm831x_isink1_resources, + }, + { + .name = "wm831x-isink", + .id = 2, + .num_resources = ARRAY_SIZE(wm831x_isink2_resources), + .resources = wm831x_isink2_resources, + }, + { + .name = "wm831x-ldo", + .id = 1, + .num_resources = ARRAY_SIZE(wm831x_ldo1_resources), + .resources = wm831x_ldo1_resources, + }, + { + .name = "wm831x-ldo", + .id = 2, + .num_resources = ARRAY_SIZE(wm831x_ldo2_resources), + .resources = wm831x_ldo2_resources, + }, + { + .name = "wm831x-ldo", + .id = 3, + .num_resources = ARRAY_SIZE(wm831x_ldo3_resources), + .resources = wm831x_ldo3_resources, + }, + { + .name = "wm831x-ldo", + .id = 4, + .num_resources = ARRAY_SIZE(wm831x_ldo4_resources), + .resources = wm831x_ldo4_resources, + }, + { + .name = "wm831x-ldo", + .id = 5, + .num_resources = ARRAY_SIZE(wm831x_ldo5_resources), + .resources = wm831x_ldo5_resources, + }, + { + .name = "wm831x-ldo", + .id = 6, + .num_resources = ARRAY_SIZE(wm831x_ldo6_resources), + .resources = wm831x_ldo6_resources, + }, + { + .name = "wm831x-aldo", + .id = 7, + .num_resources = ARRAY_SIZE(wm831x_ldo7_resources), + .resources = wm831x_ldo7_resources, + }, + { + .name = "wm831x-aldo", + .id = 8, + .num_resources = ARRAY_SIZE(wm831x_ldo8_resources), + .resources = wm831x_ldo8_resources, + }, + { + .name = "wm831x-aldo", + .id = 9, + .num_resources = ARRAY_SIZE(wm831x_ldo9_resources), + .resources = wm831x_ldo9_resources, + }, + { + .name = "wm831x-aldo", + .id = 10, + .num_resources = ARRAY_SIZE(wm831x_ldo10_resources), + .resources = wm831x_ldo10_resources, + }, + { + .name = "wm831x-alive-ldo", + .id = 11, + .num_resources = ARRAY_SIZE(wm831x_ldo11_resources), + .resources = wm831x_ldo11_resources, + }, + { + .name = "wm831x-on", + .num_resources = ARRAY_SIZE(wm831x_on_resources), + .resources = wm831x_on_resources, + }, + { + .name = "wm831x-power", + .num_resources = ARRAY_SIZE(wm831x_power_resources), + .resources = wm831x_power_resources, + }, + { + .name = "wm831x-rtc", + .num_resources = ARRAY_SIZE(wm831x_rtc_resources), + .resources = wm831x_rtc_resources, + }, + { + .name = "wm831x-status", + .id = 1, + .num_resources = ARRAY_SIZE(wm831x_status1_resources), + .resources = wm831x_status1_resources, + }, + { + .name = "wm831x-status", + .id = 2, + .num_resources = ARRAY_SIZE(wm831x_status2_resources), + .resources = wm831x_status2_resources, + }, + { + .name = "wm831x-watchdog", + .num_resources = ARRAY_SIZE(wm831x_wdt_resources), + .resources = wm831x_wdt_resources, + }, +}; + +static struct mfd_cell wm8311_devs[] = { + { + .name = "wm831x-buckv", + .id = 1, + .num_resources = ARRAY_SIZE(wm831x_dcdc1_resources), + .resources = wm831x_dcdc1_resources, + }, + { + .name = "wm831x-buckv", + .id = 2, + .num_resources = ARRAY_SIZE(wm831x_dcdc2_resources), + .resources = wm831x_dcdc2_resources, + }, + { + .name = "wm831x-buckp", + .id = 3, + .num_resources = ARRAY_SIZE(wm831x_dcdc3_resources), + .resources = wm831x_dcdc3_resources, + }, + { + .name = "wm831x-boostp", + .id = 4, + .num_resources = ARRAY_SIZE(wm831x_dcdc4_resources), + .resources = wm831x_dcdc4_resources, + }, + { + .name = "wm831x-epe", + .id = 1, + }, + { + .name = "wm831x-epe", + .id = 2, + }, + { + .name = "wm831x-gpio", + .num_resources = ARRAY_SIZE(wm831x_gpio_resources), + .resources = wm831x_gpio_resources, + }, + { + .name = "wm831x-hwmon", + }, + { + .name = "wm831x-isink", + .id = 1, + .num_resources = ARRAY_SIZE(wm831x_isink1_resources), + .resources = wm831x_isink1_resources, + }, + { + .name = "wm831x-isink", + .id = 2, + .num_resources = ARRAY_SIZE(wm831x_isink2_resources), + .resources = wm831x_isink2_resources, + }, + { + .name = "wm831x-ldo", + .id = 1, + .num_resources = ARRAY_SIZE(wm831x_ldo1_resources), + .resources = wm831x_ldo1_resources, + }, + { + .name = "wm831x-ldo", + .id = 2, + .num_resources = ARRAY_SIZE(wm831x_ldo2_resources), + .resources = wm831x_ldo2_resources, + }, + { + .name = "wm831x-ldo", + .id = 3, + .num_resources = ARRAY_SIZE(wm831x_ldo3_resources), + .resources = wm831x_ldo3_resources, + }, + { + .name = "wm831x-ldo", + .id = 4, + .num_resources = ARRAY_SIZE(wm831x_ldo4_resources), + .resources = wm831x_ldo4_resources, + }, + { + .name = "wm831x-ldo", + .id = 5, + .num_resources = ARRAY_SIZE(wm831x_ldo5_resources), + .resources = wm831x_ldo5_resources, + }, + { + .name = "wm831x-aldo", + .id = 7, + .num_resources = ARRAY_SIZE(wm831x_ldo7_resources), + .resources = wm831x_ldo7_resources, + }, + { + .name = "wm831x-alive-ldo", + .id = 11, + .num_resources = ARRAY_SIZE(wm831x_ldo11_resources), + .resources = wm831x_ldo11_resources, + }, + { + .name = "wm831x-on", + .num_resources = ARRAY_SIZE(wm831x_on_resources), + .resources = wm831x_on_resources, + }, + { + .name = "wm831x-power", + .num_resources = ARRAY_SIZE(wm831x_power_resources), + .resources = wm831x_power_resources, + }, + { + .name = "wm831x-rtc", + .num_resources = ARRAY_SIZE(wm831x_rtc_resources), + .resources = wm831x_rtc_resources, + }, + { + .name = "wm831x-status", + .id = 1, + .num_resources = ARRAY_SIZE(wm831x_status1_resources), + .resources = wm831x_status1_resources, + }, + { + .name = "wm831x-status", + .id = 2, + .num_resources = ARRAY_SIZE(wm831x_status2_resources), + .resources = wm831x_status2_resources, + }, + { + .name = "wm831x-touch", + .num_resources = ARRAY_SIZE(wm831x_touch_resources), + .resources = wm831x_touch_resources, + }, + { + .name = "wm831x-watchdog", + .num_resources = ARRAY_SIZE(wm831x_wdt_resources), + .resources = wm831x_wdt_resources, + }, +}; + +static struct mfd_cell wm8312_devs[] = { + { + .name = "wm831x-buckv", + .id = 1, + .num_resources = ARRAY_SIZE(wm831x_dcdc1_resources), + .resources = wm831x_dcdc1_resources, + }, + { + .name = "wm831x-buckv", + .id = 2, + .num_resources = ARRAY_SIZE(wm831x_dcdc2_resources), + .resources = wm831x_dcdc2_resources, + }, + { + .name = "wm831x-buckp", + .id = 3, + .num_resources = ARRAY_SIZE(wm831x_dcdc3_resources), + .resources = wm831x_dcdc3_resources, + }, + { + .name = "wm831x-boostp", + .id = 4, + .num_resources = ARRAY_SIZE(wm831x_dcdc4_resources), + .resources = wm831x_dcdc4_resources, + }, + { + .name = "wm831x-epe", + .id = 1, + }, + { + .name = "wm831x-epe", + .id = 2, + }, + { + .name = "wm831x-gpio", + .num_resources = ARRAY_SIZE(wm831x_gpio_resources), + .resources = wm831x_gpio_resources, + }, + { + .name = "wm831x-hwmon", + }, + { + .name = "wm831x-isink", + .id = 1, + .num_resources = ARRAY_SIZE(wm831x_isink1_resources), + .resources = wm831x_isink1_resources, + }, + { + .name = "wm831x-isink", + .id = 2, + .num_resources = ARRAY_SIZE(wm831x_isink2_resources), + .resources = wm831x_isink2_resources, + }, + { + .name = "wm831x-ldo", + .id = 1, + .num_resources = ARRAY_SIZE(wm831x_ldo1_resources), + .resources = wm831x_ldo1_resources, + }, + { + .name = "wm831x-ldo", + .id = 2, + .num_resources = ARRAY_SIZE(wm831x_ldo2_resources), + .resources = wm831x_ldo2_resources, + }, + { + .name = "wm831x-ldo", + .id = 3, + .num_resources = ARRAY_SIZE(wm831x_ldo3_resources), + .resources = wm831x_ldo3_resources, + }, + { + .name = "wm831x-ldo", + .id = 4, + .num_resources = ARRAY_SIZE(wm831x_ldo4_resources), + .resources = wm831x_ldo4_resources, + }, + { + .name = "wm831x-ldo", + .id = 5, + .num_resources = ARRAY_SIZE(wm831x_ldo5_resources), + .resources = wm831x_ldo5_resources, + }, + { + .name = "wm831x-ldo", + .id = 6, + .num_resources = ARRAY_SIZE(wm831x_ldo6_resources), + .resources = wm831x_ldo6_resources, + }, + { + .name = "wm831x-aldo", + .id = 7, + .num_resources = ARRAY_SIZE(wm831x_ldo7_resources), + .resources = wm831x_ldo7_resources, + }, + { + .name = "wm831x-aldo", + .id = 8, + .num_resources = ARRAY_SIZE(wm831x_ldo8_resources), + .resources = wm831x_ldo8_resources, + }, + { + .name = "wm831x-aldo", + .id = 9, + .num_resources = ARRAY_SIZE(wm831x_ldo9_resources), + .resources = wm831x_ldo9_resources, + }, + { + .name = "wm831x-aldo", + .id = 10, + .num_resources = ARRAY_SIZE(wm831x_ldo10_resources), + .resources = wm831x_ldo10_resources, + }, + { + .name = "wm831x-alive-ldo", + .id = 11, + .num_resources = ARRAY_SIZE(wm831x_ldo11_resources), + .resources = wm831x_ldo11_resources, + }, + { + .name = "wm831x-on", + .num_resources = ARRAY_SIZE(wm831x_on_resources), + .resources = wm831x_on_resources, + }, + { + .name = "wm831x-power", + .num_resources = ARRAY_SIZE(wm831x_power_resources), + .resources = wm831x_power_resources, + }, + { + .name = "wm831x-rtc", + .num_resources = ARRAY_SIZE(wm831x_rtc_resources), + .resources = wm831x_rtc_resources, + }, + { + .name = "wm831x-status", + .id = 1, + .num_resources = ARRAY_SIZE(wm831x_status1_resources), + .resources = wm831x_status1_resources, + }, + { + .name = "wm831x-status", + .id = 2, + .num_resources = ARRAY_SIZE(wm831x_status2_resources), + .resources = wm831x_status2_resources, + }, + { + .name = "wm831x-touch", + .num_resources = ARRAY_SIZE(wm831x_touch_resources), + .resources = wm831x_touch_resources, + }, + { + .name = "wm831x-watchdog", + .num_resources = ARRAY_SIZE(wm831x_wdt_resources), + .resources = wm831x_wdt_resources, + }, +}; + +/* + * Instantiate the generic non-control parts of the device. + */ +static int wm831x_device_init(struct wm831x *wm831x, unsigned long id, int irq) +{ + struct wm831x_pdata *pdata = wm831x->dev->platform_data; + int rev; + enum wm831x_parent parent; + int ret; + + mutex_init(&wm831x->io_lock); + mutex_init(&wm831x->key_lock); + dev_set_drvdata(wm831x->dev, wm831x); + + ret = wm831x_reg_read(wm831x, WM831X_PARENT_ID); + if (ret < 0) { + dev_err(wm831x->dev, "Failed to read parent ID: %d\n", ret); + goto err; + } + if (ret != 0x6204) { + dev_err(wm831x->dev, "Device is not a WM831x: ID %x\n", ret); + ret = -EINVAL; + goto err; + } + + ret = wm831x_reg_read(wm831x, WM831X_REVISION); + if (ret < 0) { + dev_err(wm831x->dev, "Failed to read revision: %d\n", ret); + goto err; + } + rev = (ret & WM831X_PARENT_REV_MASK) >> WM831X_PARENT_REV_SHIFT; + + ret = wm831x_reg_read(wm831x, WM831X_RESET_ID); + if (ret < 0) { + dev_err(wm831x->dev, "Failed to read device ID: %d\n", ret); + goto err; + } + + switch (ret) { + case 0x8310: + parent = WM8310; + switch (rev) { + case 0: + dev_info(wm831x->dev, "WM8310 revision %c\n", + 'A' + rev); + break; + } + break; + + case 0x8311: + parent = WM8311; + switch (rev) { + case 0: + dev_info(wm831x->dev, "WM8311 revision %c\n", + 'A' + rev); + break; + } + break; + + case 0x8312: + parent = WM8312; + switch (rev) { + case 0: + dev_info(wm831x->dev, "WM8312 revision %c\n", + 'A' + rev); + break; + } + break; + + case 0: + /* Some engineering samples do not have the ID set, + * rely on the device being registered correctly. + * This will need revisiting for future devices with + * multiple dies. + */ + parent = id; + switch (rev) { + case 0: + dev_info(wm831x->dev, "WM831%d ES revision %c\n", + parent, 'A' + rev); + break; + } + break; + + default: + dev_err(wm831x->dev, "Unknown WM831x device %04x\n", ret); + ret = -EINVAL; + goto err; + } + + /* This will need revisiting in future but is OK for all + * current parts. + */ + if (parent != id) + dev_warn(wm831x->dev, "Device was registered as a WM831%lu\n", + id); + + /* Bootstrap the user key */ + ret = wm831x_reg_read(wm831x, WM831X_SECURITY_KEY); + if (ret < 0) { + dev_err(wm831x->dev, "Failed to read security key: %d\n", ret); + goto err; + } + if (ret != 0) { + dev_warn(wm831x->dev, "Security key had non-zero value %x\n", + ret); + wm831x_reg_write(wm831x, WM831X_SECURITY_KEY, 0); + } + wm831x->locked = 1; + + if (pdata && pdata->pre_init) { + ret = pdata->pre_init(wm831x); + if (ret != 0) { + dev_err(wm831x->dev, "pre_init() failed: %d\n", ret); + goto err; + } + } + + /* The core device is up, instantiate the subdevices. */ + switch (parent) { + case WM8310: + ret = mfd_add_devices(wm831x->dev, -1, + wm8310_devs, ARRAY_SIZE(wm8310_devs), + NULL, 0); + break; + + case WM8311: + ret = mfd_add_devices(wm831x->dev, -1, + wm8311_devs, ARRAY_SIZE(wm8311_devs), + NULL, 0); + break; + + case WM8312: + ret = mfd_add_devices(wm831x->dev, -1, + wm8312_devs, ARRAY_SIZE(wm8312_devs), + NULL, 0); + break; + + default: + /* If this happens the bus probe function is buggy */ + BUG(); + } + + if (ret != 0) { + dev_err(wm831x->dev, "Failed to add children\n"); + goto err; + } + + if (pdata && pdata->post_init) { + ret = pdata->post_init(wm831x); + if (ret != 0) { + dev_err(wm831x->dev, "post_init() failed: %d\n", ret); + goto err; + } + } + + return 0; + +err: + mfd_remove_devices(wm831x->dev); + kfree(wm831x); + return ret; +} + +static void wm831x_device_exit(struct wm831x *wm831x) +{ + mfd_remove_devices(wm831x->dev); + kfree(wm831x); +} + +static int wm831x_i2c_read_device(struct wm831x *wm831x, unsigned short reg, + int bytes, void *dest) +{ + struct i2c_client *i2c = wm831x->control_data; + int ret; + u16 r = cpu_to_be16(reg); + + ret = i2c_master_send(i2c, (unsigned char *)&r, 2); + if (ret < 0) + return ret; + if (ret != 2) + return -EIO; + + ret = i2c_master_recv(i2c, dest, bytes); + if (ret < 0) + return ret; + if (ret != bytes) + return -EIO; + return 0; +} + +/* Currently we allocate the write buffer on the stack; this is OK for + * small writes - if we need to do large writes this will need to be + * revised. + */ +static int wm831x_i2c_write_device(struct wm831x *wm831x, unsigned short reg, + int bytes, void *src) +{ + struct i2c_client *i2c = wm831x->control_data; + unsigned char msg[bytes + 2]; + int ret; + + reg = cpu_to_be16(reg); + memcpy(&msg[0], ®, 2); + memcpy(&msg[2], src, bytes); + + ret = i2c_master_send(i2c, msg, bytes + 2); + if (ret < 0) + return ret; + if (ret < bytes + 2) + return -EIO; + + return 0; +} + +static int wm831x_i2c_probe(struct i2c_client *i2c, + const struct i2c_device_id *id) +{ + struct wm831x *wm831x; + + wm831x = kzalloc(sizeof(struct wm831x), GFP_KERNEL); + if (wm831x == NULL) { + kfree(i2c); + return -ENOMEM; + } + + i2c_set_clientdata(i2c, wm831x); + wm831x->dev = &i2c->dev; + wm831x->control_data = i2c; + wm831x->read_dev = wm831x_i2c_read_device; + wm831x->write_dev = wm831x_i2c_write_device; + + return wm831x_device_init(wm831x, id->driver_data, i2c->irq); +} + +static int wm831x_i2c_remove(struct i2c_client *i2c) +{ + struct wm831x *wm831x = i2c_get_clientdata(i2c); + + wm831x_device_exit(wm831x); + + return 0; +} + +static const struct i2c_device_id wm831x_i2c_id[] = { + { "wm8310", WM8310 }, + { "wm8311", WM8311 }, + { "wm8312", WM8312 }, + { } +}; +MODULE_DEVICE_TABLE(i2c, wm831x_i2c_id); + + +static struct i2c_driver wm831x_i2c_driver = { + .driver = { + .name = "wm831x", + .owner = THIS_MODULE, + }, + .probe = wm831x_i2c_probe, + .remove = wm831x_i2c_remove, + .id_table = wm831x_i2c_id, +}; + +static int __init wm831x_i2c_init(void) +{ + int ret; + + ret = i2c_add_driver(&wm831x_i2c_driver); + if (ret != 0) + pr_err("Failed to register wm831x I2C driver: %d\n", ret); + + return ret; +} +subsys_initcall(wm831x_i2c_init); + +static void __exit wm831x_i2c_exit(void) +{ + i2c_del_driver(&wm831x_i2c_driver); +} +module_exit(wm831x_i2c_exit); + +MODULE_DESCRIPTION("I2C support for the WM831X AudioPlus PMIC"); +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Mark Brown"); diff --git a/include/linux/mfd/wm831x/core.h b/include/linux/mfd/wm831x/core.h new file mode 100644 index 00000000000..d90e693053b --- /dev/null +++ b/include/linux/mfd/wm831x/core.h @@ -0,0 +1,247 @@ +/* + * include/linux/mfd/wm831x/core.h -- Core interface for WM831x + * + * Copyright 2009 Wolfson Microelectronics PLC. + * + * Author: Mark Brown + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + */ + +#ifndef __MFD_WM831X_CORE_H__ +#define __MFD_WM831X_CORE_H__ + +/* + * Register values. + */ +#define WM831X_RESET_ID 0x00 +#define WM831X_REVISION 0x01 +#define WM831X_PARENT_ID 0x4000 +#define WM831X_SYSVDD_CONTROL 0x4001 +#define WM831X_THERMAL_MONITORING 0x4002 +#define WM831X_POWER_STATE 0x4003 +#define WM831X_WATCHDOG 0x4004 +#define WM831X_ON_PIN_CONTROL 0x4005 +#define WM831X_RESET_CONTROL 0x4006 +#define WM831X_CONTROL_INTERFACE 0x4007 +#define WM831X_SECURITY_KEY 0x4008 +#define WM831X_SOFTWARE_SCRATCH 0x4009 +#define WM831X_OTP_CONTROL 0x400A +#define WM831X_GPIO_LEVEL 0x400C +#define WM831X_SYSTEM_STATUS 0x400D +#define WM831X_ON_SOURCE 0x400E +#define WM831X_OFF_SOURCE 0x400F +#define WM831X_SYSTEM_INTERRUPTS 0x4010 +#define WM831X_INTERRUPT_STATUS_1 0x4011 +#define WM831X_INTERRUPT_STATUS_2 0x4012 +#define WM831X_INTERRUPT_STATUS_3 0x4013 +#define WM831X_INTERRUPT_STATUS_4 0x4014 +#define WM831X_INTERRUPT_STATUS_5 0x4015 +#define WM831X_IRQ_CONFIG 0x4017 +#define WM831X_SYSTEM_INTERRUPTS_MASK 0x4018 +#define WM831X_INTERRUPT_STATUS_1_MASK 0x4019 +#define WM831X_INTERRUPT_STATUS_2_MASK 0x401A +#define WM831X_INTERRUPT_STATUS_3_MASK 0x401B +#define WM831X_INTERRUPT_STATUS_4_MASK 0x401C +#define WM831X_INTERRUPT_STATUS_5_MASK 0x401D +#define WM831X_RTC_WRITE_COUNTER 0x4020 +#define WM831X_RTC_TIME_1 0x4021 +#define WM831X_RTC_TIME_2 0x4022 +#define WM831X_RTC_ALARM_1 0x4023 +#define WM831X_RTC_ALARM_2 0x4024 +#define WM831X_RTC_CONTROL 0x4025 +#define WM831X_RTC_TRIM 0x4026 +#define WM831X_TOUCH_CONTROL_1 0x4028 +#define WM831X_TOUCH_CONTROL_2 0x4029 +#define WM831X_TOUCH_DATA_X 0x402A +#define WM831X_TOUCH_DATA_Y 0x402B +#define WM831X_TOUCH_DATA_Z 0x402C +#define WM831X_AUXADC_DATA 0x402D +#define WM831X_AUXADC_CONTROL 0x402E +#define WM831X_AUXADC_SOURCE 0x402F +#define WM831X_COMPARATOR_CONTROL 0x4030 +#define WM831X_COMPARATOR_1 0x4031 +#define WM831X_COMPARATOR_2 0x4032 +#define WM831X_COMPARATOR_3 0x4033 +#define WM831X_COMPARATOR_4 0x4034 +#define WM831X_GPIO1_CONTROL 0x4038 +#define WM831X_GPIO2_CONTROL 0x4039 +#define WM831X_GPIO3_CONTROL 0x403A +#define WM831X_GPIO4_CONTROL 0x403B +#define WM831X_GPIO5_CONTROL 0x403C +#define WM831X_GPIO6_CONTROL 0x403D +#define WM831X_GPIO7_CONTROL 0x403E +#define WM831X_GPIO8_CONTROL 0x403F +#define WM831X_GPIO9_CONTROL 0x4040 +#define WM831X_GPIO10_CONTROL 0x4041 +#define WM831X_GPIO11_CONTROL 0x4042 +#define WM831X_GPIO12_CONTROL 0x4043 +#define WM831X_GPIO13_CONTROL 0x4044 +#define WM831X_GPIO14_CONTROL 0x4045 +#define WM831X_GPIO15_CONTROL 0x4046 +#define WM831X_GPIO16_CONTROL 0x4047 +#define WM831X_CHARGER_CONTROL_1 0x4048 +#define WM831X_CHARGER_CONTROL_2 0x4049 +#define WM831X_CHARGER_STATUS 0x404A +#define WM831X_BACKUP_CHARGER_CONTROL 0x404B +#define WM831X_STATUS_LED_1 0x404C +#define WM831X_STATUS_LED_2 0x404D +#define WM831X_CURRENT_SINK_1 0x404E +#define WM831X_CURRENT_SINK_2 0x404F +#define WM831X_DCDC_ENABLE 0x4050 +#define WM831X_LDO_ENABLE 0x4051 +#define WM831X_DCDC_STATUS 0x4052 +#define WM831X_LDO_STATUS 0x4053 +#define WM831X_DCDC_UV_STATUS 0x4054 +#define WM831X_LDO_UV_STATUS 0x4055 +#define WM831X_DC1_CONTROL_1 0x4056 +#define WM831X_DC1_CONTROL_2 0x4057 +#define WM831X_DC1_ON_CONFIG 0x4058 +#define WM831X_DC1_SLEEP_CONTROL 0x4059 +#define WM831X_DC1_DVS_CONTROL 0x405A +#define WM831X_DC2_CONTROL_1 0x405B +#define WM831X_DC2_CONTROL_2 0x405C +#define WM831X_DC2_ON_CONFIG 0x405D +#define WM831X_DC2_SLEEP_CONTROL 0x405E +#define WM831X_DC2_DVS_CONTROL 0x405F +#define WM831X_DC3_CONTROL_1 0x4060 +#define WM831X_DC3_CONTROL_2 0x4061 +#define WM831X_DC3_ON_CONFIG 0x4062 +#define WM831X_DC3_SLEEP_CONTROL 0x4063 +#define WM831X_DC4_CONTROL 0x4064 +#define WM831X_DC4_SLEEP_CONTROL 0x4065 +#define WM831X_EPE1_CONTROL 0x4066 +#define WM831X_EPE2_CONTROL 0x4067 +#define WM831X_LDO1_CONTROL 0x4068 +#define WM831X_LDO1_ON_CONTROL 0x4069 +#define WM831X_LDO1_SLEEP_CONTROL 0x406A +#define WM831X_LDO2_CONTROL 0x406B +#define WM831X_LDO2_ON_CONTROL 0x406C +#define WM831X_LDO2_SLEEP_CONTROL 0x406D +#define WM831X_LDO3_CONTROL 0x406E +#define WM831X_LDO3_ON_CONTROL 0x406F +#define WM831X_LDO3_SLEEP_CONTROL 0x4070 +#define WM831X_LDO4_CONTROL 0x4071 +#define WM831X_LDO4_ON_CONTROL 0x4072 +#define WM831X_LDO4_SLEEP_CONTROL 0x4073 +#define WM831X_LDO5_CONTROL 0x4074 +#define WM831X_LDO5_ON_CONTROL 0x4075 +#define WM831X_LDO5_SLEEP_CONTROL 0x4076 +#define WM831X_LDO6_CONTROL 0x4077 +#define WM831X_LDO6_ON_CONTROL 0x4078 +#define WM831X_LDO6_SLEEP_CONTROL 0x4079 +#define WM831X_LDO7_CONTROL 0x407A +#define WM831X_LDO7_ON_CONTROL 0x407B +#define WM831X_LDO7_SLEEP_CONTROL 0x407C +#define WM831X_LDO8_CONTROL 0x407D +#define WM831X_LDO8_ON_CONTROL 0x407E +#define WM831X_LDO8_SLEEP_CONTROL 0x407F +#define WM831X_LDO9_CONTROL 0x4080 +#define WM831X_LDO9_ON_CONTROL 0x4081 +#define WM831X_LDO9_SLEEP_CONTROL 0x4082 +#define WM831X_LDO10_CONTROL 0x4083 +#define WM831X_LDO10_ON_CONTROL 0x4084 +#define WM831X_LDO10_SLEEP_CONTROL 0x4085 +#define WM831X_LDO11_ON_CONTROL 0x4087 +#define WM831X_LDO11_SLEEP_CONTROL 0x4088 +#define WM831X_POWER_GOOD_SOURCE_1 0x408E +#define WM831X_POWER_GOOD_SOURCE_2 0x408F +#define WM831X_CLOCK_CONTROL_1 0x4090 +#define WM831X_CLOCK_CONTROL_2 0x4091 +#define WM831X_FLL_CONTROL_1 0x4092 +#define WM831X_FLL_CONTROL_2 0x4093 +#define WM831X_FLL_CONTROL_3 0x4094 +#define WM831X_FLL_CONTROL_4 0x4095 +#define WM831X_FLL_CONTROL_5 0x4096 +#define WM831X_UNIQUE_ID_1 0x7800 +#define WM831X_UNIQUE_ID_2 0x7801 +#define WM831X_UNIQUE_ID_3 0x7802 +#define WM831X_UNIQUE_ID_4 0x7803 +#define WM831X_UNIQUE_ID_5 0x7804 +#define WM831X_UNIQUE_ID_6 0x7805 +#define WM831X_UNIQUE_ID_7 0x7806 +#define WM831X_UNIQUE_ID_8 0x7807 +#define WM831X_FACTORY_OTP_ID 0x7808 +#define WM831X_FACTORY_OTP_1 0x7809 +#define WM831X_FACTORY_OTP_2 0x780A +#define WM831X_FACTORY_OTP_3 0x780B +#define WM831X_FACTORY_OTP_4 0x780C +#define WM831X_FACTORY_OTP_5 0x780D +#define WM831X_CUSTOMER_OTP_ID 0x7810 +#define WM831X_DC1_OTP_CONTROL 0x7811 +#define WM831X_DC2_OTP_CONTROL 0x7812 +#define WM831X_DC3_OTP_CONTROL 0x7813 +#define WM831X_LDO1_2_OTP_CONTROL 0x7814 +#define WM831X_LDO3_4_OTP_CONTROL 0x7815 +#define WM831X_LDO5_6_OTP_CONTROL 0x7816 +#define WM831X_LDO7_8_OTP_CONTROL 0x7817 +#define WM831X_LDO9_10_OTP_CONTROL 0x7818 +#define WM831X_LDO11_EPE_CONTROL 0x7819 +#define WM831X_GPIO1_OTP_CONTROL 0x781A +#define WM831X_GPIO2_OTP_CONTROL 0x781B +#define WM831X_GPIO3_OTP_CONTROL 0x781C +#define WM831X_GPIO4_OTP_CONTROL 0x781D +#define WM831X_GPIO5_OTP_CONTROL 0x781E +#define WM831X_GPIO6_OTP_CONTROL 0x781F +#define WM831X_DBE_CHECK_DATA 0x7827 + +/* + * R0 (0x00) - Reset ID + */ +#define WM831X_CHIP_ID_MASK 0xFFFF /* CHIP_ID - [15:0] */ +#define WM831X_CHIP_ID_SHIFT 0 /* CHIP_ID - [15:0] */ +#define WM831X_CHIP_ID_WIDTH 16 /* CHIP_ID - [15:0] */ + +/* + * R1 (0x01) - Revision + */ +#define WM831X_PARENT_REV_MASK 0xFF00 /* PARENT_REV - [15:8] */ +#define WM831X_PARENT_REV_SHIFT 8 /* PARENT_REV - [15:8] */ +#define WM831X_PARENT_REV_WIDTH 8 /* PARENT_REV - [15:8] */ +#define WM831X_CHILD_REV_MASK 0x00FF /* CHILD_REV - [7:0] */ +#define WM831X_CHILD_REV_SHIFT 0 /* CHILD_REV - [7:0] */ +#define WM831X_CHILD_REV_WIDTH 8 /* CHILD_REV - [7:0] */ + +/* + * R16384 (0x4000) - Parent ID + */ +#define WM831X_PARENT_ID_MASK 0xFFFF /* PARENT_ID - [15:0] */ +#define WM831X_PARENT_ID_SHIFT 0 /* PARENT_ID - [15:0] */ +#define WM831X_PARENT_ID_WIDTH 16 /* PARENT_ID - [15:0] */ + +struct wm831x { + struct mutex io_lock; + + struct device *dev; + int (*read_dev)(struct wm831x *wm831x, unsigned short reg, + int bytes, void *dest); + int (*write_dev)(struct wm831x *wm831x, unsigned short reg, + int bytes, void *src); + + void *control_data; + + /* The WM831x has a security key blocking access to certain + * registers. The mutex is taken by the accessors for locking + * and unlocking the security key, locked is used to fail + * writes if the lock is held. + */ + struct mutex key_lock; + unsigned int locked:1; +}; + +/* Device I/O API */ +int wm831x_reg_read(struct wm831x *wm831x, unsigned short reg); +int wm831x_reg_write(struct wm831x *wm831x, unsigned short reg, + unsigned short val); +void wm831x_reg_lock(struct wm831x *wm831x); +int wm831x_reg_unlock(struct wm831x *wm831x); +int wm831x_set_bits(struct wm831x *wm831x, unsigned short reg, + unsigned short mask, unsigned short val); +int wm831x_bulk_read(struct wm831x *wm831x, unsigned short reg, + int count, u16 *buf); + +#endif diff --git a/include/linux/mfd/wm831x/pdata.h b/include/linux/mfd/wm831x/pdata.h new file mode 100644 index 00000000000..571e6013626 --- /dev/null +++ b/include/linux/mfd/wm831x/pdata.h @@ -0,0 +1,107 @@ +/* + * include/linux/mfd/wm831x/pdata.h -- Platform data for WM831x + * + * Copyright 2009 Wolfson Microelectronics PLC. + * + * Author: Mark Brown + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + */ + +#ifndef __MFD_WM831X_PDATA_H__ +#define __MFD_WM831X_PDATA_H__ + +struct wm831x; +struct regulator_init_data; + +struct wm831x_backup_pdata { + int charger_enable; + int no_constant_voltage; /** Disable constant voltage charging */ + int vlim; /** Voltage limit in milivolts */ + int ilim; /** Current limit in microamps */ +}; + +struct wm831x_battery_pdata { + int enable; /** Enable charging */ + int fast_enable; /** Enable fast charging */ + int off_mask; /** Mask OFF while charging */ + int trickle_ilim; /** Trickle charge current limit, in mA */ + int vsel; /** Target voltage, in mV */ + int eoc_iterm; /** End of trickle charge current, in mA */ + int fast_ilim; /** Fast charge current limit, in mA */ + int timeout; /** Charge cycle timeout, in minutes */ +}; + +/* Sources for status LED configuration. Values are register values + * plus 1 to allow for a zero default for preserve. + */ +enum wm831x_status_src { + WM831X_STATUS_PRESERVE = 0, /* Keep the current hardware setting */ + WM831X_STATUS_OTP = 1, + WM831X_STATUS_POWER = 2, + WM831X_STATUS_CHARGER = 3, + WM831X_STATUS_MANUAL = 4, +}; + +struct wm831x_status_pdata { + enum wm831x_status_src default_src; + const char *name; + const char *default_trigger; +}; + +struct wm831x_touch_pdata { + int fivewire; /** 1 for five wire mode, 0 for 4 wire */ + int isel; /** Current for pen down (uA) */ + int rpu; /** Pen down sensitivity resistor divider */ + int pressure; /** Report pressure (boolean) */ + int data_irq; /** Touch data ready IRQ */ +}; + +enum wm831x_watchdog_action { + WM831X_WDOG_NONE = 0, + WM831X_WDOG_INTERRUPT = 1, + WM831X_WDOG_RESET = 2, + WM831X_WDOG_WAKE = 3, +}; + +struct wm831x_watchdog_pdata { + enum wm831x_watchdog_action primary, secondary; + int update_gpio; + unsigned int software:1; +}; + +#define WM831X_MAX_STATUS 2 +#define WM831X_MAX_DCDC 4 +#define WM831X_MAX_EPE 2 +#define WM831X_MAX_LDO 11 +#define WM831X_MAX_ISINK 2 + +struct wm831x_pdata { + /** Called before subdevices are set up */ + int (*pre_init)(struct wm831x *wm831x); + /** Called after subdevices are set up */ + int (*post_init)(struct wm831x *wm831x); + + int gpio_base; + struct wm831x_backup_pdata *backup; + struct wm831x_battery_pdata *battery; + struct wm831x_touch_pdata *touch; + struct wm831x_watchdog_pdata *watchdog; + + /** LED1 = 0 and so on */ + struct wm831x_status_pdata *status[WM831X_MAX_STATUS]; + /** DCDC1 = 0 and so on */ + struct regulator_init_data *dcdc[WM831X_MAX_DCDC]; + /** EPE1 = 0 and so on */ + struct regulator_init_data *epe[WM831X_MAX_EPE]; + /** LDO1 = 0 and so on */ + struct regulator_init_data *ldo[WM831X_MAX_LDO]; + /** ISINK1 = 0 and so on*/ + struct regulator_init_data *isink[WM831X_MAX_ISINK]; +}; + +#endif -- cgit v1.2.3-70-g09d2 From 7d4d0a3e7343e3190afaa17253073db58e3d9bff Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Mon, 27 Jul 2009 14:45:53 +0100 Subject: mfd: Add WM831x interrupt support The WM831x includes an interrupt controller managing interrupts for the various functions on the chip. This patch adds support for the core interrupt block on the device. Ideally this would be supported by genirq, particularly for the GPIOs, but currently genirq is unable to cope with controllers on interrupt driven buses so we cut'n'paste the generic interface. Once genirq is able to cope chips like this it should be a case of filing the prefixes off the code and redoing wm831x-irq.c to move over. Signed-off-by: Mark Brown Signed-off-by: Samuel Ortiz --- drivers/mfd/wm831x-core.c | 12 +- drivers/mfd/wm831x-irq.c | 559 +++++++++++++++++++++++++++++ include/linux/mfd/wm831x/core.h | 21 ++ include/linux/mfd/wm831x/irq.h | 764 ++++++++++++++++++++++++++++++++++++++++ 4 files changed, 1354 insertions(+), 2 deletions(-) create mode 100644 drivers/mfd/wm831x-irq.c create mode 100644 include/linux/mfd/wm831x/irq.h (limited to 'include') diff --git a/drivers/mfd/wm831x-core.c b/drivers/mfd/wm831x-core.c index cc1040c9d46..eb63d22160d 100644 --- a/drivers/mfd/wm831x-core.c +++ b/drivers/mfd/wm831x-core.c @@ -19,6 +19,7 @@ #include #include +#include enum wm831x_parent { WM8310 = 0, @@ -1189,6 +1190,10 @@ static int wm831x_device_init(struct wm831x *wm831x, unsigned long id, int irq) } } + ret = wm831x_irq_init(wm831x, irq); + if (ret != 0) + goto err; + /* The core device is up, instantiate the subdevices. */ switch (parent) { case WM8310: @@ -1216,19 +1221,21 @@ static int wm831x_device_init(struct wm831x *wm831x, unsigned long id, int irq) if (ret != 0) { dev_err(wm831x->dev, "Failed to add children\n"); - goto err; + goto err_irq; } if (pdata && pdata->post_init) { ret = pdata->post_init(wm831x); if (ret != 0) { dev_err(wm831x->dev, "post_init() failed: %d\n", ret); - goto err; + goto err_irq; } } return 0; +err_irq: + wm831x_irq_exit(wm831x); err: mfd_remove_devices(wm831x->dev); kfree(wm831x); @@ -1238,6 +1245,7 @@ err: static void wm831x_device_exit(struct wm831x *wm831x) { mfd_remove_devices(wm831x->dev); + wm831x_irq_exit(wm831x); kfree(wm831x); } diff --git a/drivers/mfd/wm831x-irq.c b/drivers/mfd/wm831x-irq.c new file mode 100644 index 00000000000..d3015dfb913 --- /dev/null +++ b/drivers/mfd/wm831x-irq.c @@ -0,0 +1,559 @@ +/* + * wm831x-irq.c -- Interrupt controller support for Wolfson WM831x PMICs + * + * Copyright 2009 Wolfson Microelectronics PLC. + * + * Author: Mark Brown + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + */ + +#include +#include +#include +#include +#include + +#include +#include +#include + +#include + +/* + * Since generic IRQs don't currently support interrupt controllers on + * interrupt driven buses we don't use genirq but instead provide an + * interface that looks very much like the standard ones. This leads + * to some bodges, including storing interrupt handler information in + * the static irq_data table we use to look up the data for individual + * interrupts, but hopefully won't last too long. + */ + +struct wm831x_irq_data { + int primary; + int reg; + int mask; + irq_handler_t handler; + void *handler_data; +}; + +static struct wm831x_irq_data wm831x_irqs[] = { + [WM831X_IRQ_TEMP_THW] = { + .primary = WM831X_TEMP_INT, + .reg = 1, + .mask = WM831X_TEMP_THW_EINT, + }, + [WM831X_IRQ_GPIO_1] = { + .primary = WM831X_GP_INT, + .reg = 5, + .mask = WM831X_GP1_EINT, + }, + [WM831X_IRQ_GPIO_2] = { + .primary = WM831X_GP_INT, + .reg = 5, + .mask = WM831X_GP2_EINT, + }, + [WM831X_IRQ_GPIO_3] = { + .primary = WM831X_GP_INT, + .reg = 5, + .mask = WM831X_GP3_EINT, + }, + [WM831X_IRQ_GPIO_4] = { + .primary = WM831X_GP_INT, + .reg = 5, + .mask = WM831X_GP4_EINT, + }, + [WM831X_IRQ_GPIO_5] = { + .primary = WM831X_GP_INT, + .reg = 5, + .mask = WM831X_GP5_EINT, + }, + [WM831X_IRQ_GPIO_6] = { + .primary = WM831X_GP_INT, + .reg = 5, + .mask = WM831X_GP6_EINT, + }, + [WM831X_IRQ_GPIO_7] = { + .primary = WM831X_GP_INT, + .reg = 5, + .mask = WM831X_GP7_EINT, + }, + [WM831X_IRQ_GPIO_8] = { + .primary = WM831X_GP_INT, + .reg = 5, + .mask = WM831X_GP8_EINT, + }, + [WM831X_IRQ_GPIO_9] = { + .primary = WM831X_GP_INT, + .reg = 5, + .mask = WM831X_GP9_EINT, + }, + [WM831X_IRQ_GPIO_10] = { + .primary = WM831X_GP_INT, + .reg = 5, + .mask = WM831X_GP10_EINT, + }, + [WM831X_IRQ_GPIO_11] = { + .primary = WM831X_GP_INT, + .reg = 5, + .mask = WM831X_GP11_EINT, + }, + [WM831X_IRQ_GPIO_12] = { + .primary = WM831X_GP_INT, + .reg = 5, + .mask = WM831X_GP12_EINT, + }, + [WM831X_IRQ_GPIO_13] = { + .primary = WM831X_GP_INT, + .reg = 5, + .mask = WM831X_GP13_EINT, + }, + [WM831X_IRQ_GPIO_14] = { + .primary = WM831X_GP_INT, + .reg = 5, + .mask = WM831X_GP14_EINT, + }, + [WM831X_IRQ_GPIO_15] = { + .primary = WM831X_GP_INT, + .reg = 5, + .mask = WM831X_GP15_EINT, + }, + [WM831X_IRQ_GPIO_16] = { + .primary = WM831X_GP_INT, + .reg = 5, + .mask = WM831X_GP16_EINT, + }, + [WM831X_IRQ_ON] = { + .primary = WM831X_ON_PIN_INT, + .reg = 1, + .mask = WM831X_ON_PIN_EINT, + }, + [WM831X_IRQ_PPM_SYSLO] = { + .primary = WM831X_PPM_INT, + .reg = 1, + .mask = WM831X_PPM_SYSLO_EINT, + }, + [WM831X_IRQ_PPM_PWR_SRC] = { + .primary = WM831X_PPM_INT, + .reg = 1, + .mask = WM831X_PPM_PWR_SRC_EINT, + }, + [WM831X_IRQ_PPM_USB_CURR] = { + .primary = WM831X_PPM_INT, + .reg = 1, + .mask = WM831X_PPM_USB_CURR_EINT, + }, + [WM831X_IRQ_WDOG_TO] = { + .primary = WM831X_WDOG_INT, + .reg = 1, + .mask = WM831X_WDOG_TO_EINT, + }, + [WM831X_IRQ_RTC_PER] = { + .primary = WM831X_RTC_INT, + .reg = 1, + .mask = WM831X_RTC_PER_EINT, + }, + [WM831X_IRQ_RTC_ALM] = { + .primary = WM831X_RTC_INT, + .reg = 1, + .mask = WM831X_RTC_ALM_EINT, + }, + [WM831X_IRQ_CHG_BATT_HOT] = { + .primary = WM831X_CHG_INT, + .reg = 2, + .mask = WM831X_CHG_BATT_HOT_EINT, + }, + [WM831X_IRQ_CHG_BATT_COLD] = { + .primary = WM831X_CHG_INT, + .reg = 2, + .mask = WM831X_CHG_BATT_COLD_EINT, + }, + [WM831X_IRQ_CHG_BATT_FAIL] = { + .primary = WM831X_CHG_INT, + .reg = 2, + .mask = WM831X_CHG_BATT_FAIL_EINT, + }, + [WM831X_IRQ_CHG_OV] = { + .primary = WM831X_CHG_INT, + .reg = 2, + .mask = WM831X_CHG_OV_EINT, + }, + [WM831X_IRQ_CHG_END] = { + .primary = WM831X_CHG_INT, + .reg = 2, + .mask = WM831X_CHG_END_EINT, + }, + [WM831X_IRQ_CHG_TO] = { + .primary = WM831X_CHG_INT, + .reg = 2, + .mask = WM831X_CHG_TO_EINT, + }, + [WM831X_IRQ_CHG_MODE] = { + .primary = WM831X_CHG_INT, + .reg = 2, + .mask = WM831X_CHG_MODE_EINT, + }, + [WM831X_IRQ_CHG_START] = { + .primary = WM831X_CHG_INT, + .reg = 2, + .mask = WM831X_CHG_START_EINT, + }, + [WM831X_IRQ_TCHDATA] = { + .primary = WM831X_TCHDATA_INT, + .reg = 1, + .mask = WM831X_TCHDATA_EINT, + }, + [WM831X_IRQ_TCHPD] = { + .primary = WM831X_TCHPD_INT, + .reg = 1, + .mask = WM831X_TCHPD_EINT, + }, + [WM831X_IRQ_AUXADC_DATA] = { + .primary = WM831X_AUXADC_INT, + .reg = 1, + .mask = WM831X_AUXADC_DATA_EINT, + }, + [WM831X_IRQ_AUXADC_DCOMP1] = { + .primary = WM831X_AUXADC_INT, + .reg = 1, + .mask = WM831X_AUXADC_DCOMP1_EINT, + }, + [WM831X_IRQ_AUXADC_DCOMP2] = { + .primary = WM831X_AUXADC_INT, + .reg = 1, + .mask = WM831X_AUXADC_DCOMP2_EINT, + }, + [WM831X_IRQ_AUXADC_DCOMP3] = { + .primary = WM831X_AUXADC_INT, + .reg = 1, + .mask = WM831X_AUXADC_DCOMP3_EINT, + }, + [WM831X_IRQ_AUXADC_DCOMP4] = { + .primary = WM831X_AUXADC_INT, + .reg = 1, + .mask = WM831X_AUXADC_DCOMP4_EINT, + }, + [WM831X_IRQ_CS1] = { + .primary = WM831X_CS_INT, + .reg = 2, + .mask = WM831X_CS1_EINT, + }, + [WM831X_IRQ_CS2] = { + .primary = WM831X_CS_INT, + .reg = 2, + .mask = WM831X_CS2_EINT, + }, + [WM831X_IRQ_HC_DC1] = { + .primary = WM831X_HC_INT, + .reg = 4, + .mask = WM831X_HC_DC1_EINT, + }, + [WM831X_IRQ_HC_DC2] = { + .primary = WM831X_HC_INT, + .reg = 4, + .mask = WM831X_HC_DC2_EINT, + }, + [WM831X_IRQ_UV_LDO1] = { + .primary = WM831X_UV_INT, + .reg = 3, + .mask = WM831X_UV_LDO1_EINT, + }, + [WM831X_IRQ_UV_LDO2] = { + .primary = WM831X_UV_INT, + .reg = 3, + .mask = WM831X_UV_LDO2_EINT, + }, + [WM831X_IRQ_UV_LDO3] = { + .primary = WM831X_UV_INT, + .reg = 3, + .mask = WM831X_UV_LDO3_EINT, + }, + [WM831X_IRQ_UV_LDO4] = { + .primary = WM831X_UV_INT, + .reg = 3, + .mask = WM831X_UV_LDO4_EINT, + }, + [WM831X_IRQ_UV_LDO5] = { + .primary = WM831X_UV_INT, + .reg = 3, + .mask = WM831X_UV_LDO5_EINT, + }, + [WM831X_IRQ_UV_LDO6] = { + .primary = WM831X_UV_INT, + .reg = 3, + .mask = WM831X_UV_LDO6_EINT, + }, + [WM831X_IRQ_UV_LDO7] = { + .primary = WM831X_UV_INT, + .reg = 3, + .mask = WM831X_UV_LDO7_EINT, + }, + [WM831X_IRQ_UV_LDO8] = { + .primary = WM831X_UV_INT, + .reg = 3, + .mask = WM831X_UV_LDO8_EINT, + }, + [WM831X_IRQ_UV_LDO9] = { + .primary = WM831X_UV_INT, + .reg = 3, + .mask = WM831X_UV_LDO9_EINT, + }, + [WM831X_IRQ_UV_LDO10] = { + .primary = WM831X_UV_INT, + .reg = 3, + .mask = WM831X_UV_LDO10_EINT, + }, + [WM831X_IRQ_UV_DC1] = { + .primary = WM831X_UV_INT, + .reg = 4, + .mask = WM831X_UV_DC1_EINT, + }, + [WM831X_IRQ_UV_DC2] = { + .primary = WM831X_UV_INT, + .reg = 4, + .mask = WM831X_UV_DC2_EINT, + }, + [WM831X_IRQ_UV_DC3] = { + .primary = WM831X_UV_INT, + .reg = 4, + .mask = WM831X_UV_DC3_EINT, + }, + [WM831X_IRQ_UV_DC4] = { + .primary = WM831X_UV_INT, + .reg = 4, + .mask = WM831X_UV_DC4_EINT, + }, +}; + +static inline int irq_data_to_status_reg(struct wm831x_irq_data *irq_data) +{ + return WM831X_INTERRUPT_STATUS_1 - 1 + irq_data->reg; +} + +static inline int irq_data_to_mask_reg(struct wm831x_irq_data *irq_data) +{ + return WM831X_INTERRUPT_STATUS_1_MASK - 1 + irq_data->reg; +} + +static void __wm831x_enable_irq(struct wm831x *wm831x, int irq) +{ + struct wm831x_irq_data *irq_data = &wm831x_irqs[irq]; + + wm831x->irq_masks[irq_data->reg - 1] &= ~irq_data->mask; + wm831x_reg_write(wm831x, irq_data_to_mask_reg(irq_data), + wm831x->irq_masks[irq_data->reg - 1]); +} + +void wm831x_enable_irq(struct wm831x *wm831x, int irq) +{ + mutex_lock(&wm831x->irq_lock); + __wm831x_enable_irq(wm831x, irq); + mutex_unlock(&wm831x->irq_lock); +} +EXPORT_SYMBOL_GPL(wm831x_enable_irq); + +static void __wm831x_disable_irq(struct wm831x *wm831x, int irq) +{ + struct wm831x_irq_data *irq_data = &wm831x_irqs[irq]; + + wm831x->irq_masks[irq_data->reg - 1] |= irq_data->mask; + wm831x_reg_write(wm831x, irq_data_to_mask_reg(irq_data), + wm831x->irq_masks[irq_data->reg - 1]); +} + +void wm831x_disable_irq(struct wm831x *wm831x, int irq) +{ + mutex_lock(&wm831x->irq_lock); + __wm831x_disable_irq(wm831x, irq); + mutex_unlock(&wm831x->irq_lock); +} +EXPORT_SYMBOL_GPL(wm831x_disable_irq); + +int wm831x_request_irq(struct wm831x *wm831x, + unsigned int irq, irq_handler_t handler, + unsigned long flags, const char *name, + void *dev) +{ + int ret = 0; + + if (irq < 0 || irq >= WM831X_NUM_IRQS) + return -EINVAL; + + mutex_lock(&wm831x->irq_lock); + + if (wm831x_irqs[irq].handler) { + dev_err(wm831x->dev, "Already have handler for IRQ %d\n", irq); + ret = -EINVAL; + goto out; + } + + wm831x_irqs[irq].handler = handler; + wm831x_irqs[irq].handler_data = dev; + + __wm831x_enable_irq(wm831x, irq); + +out: + mutex_unlock(&wm831x->irq_lock); + + return ret; +} +EXPORT_SYMBOL_GPL(wm831x_request_irq); + +void wm831x_free_irq(struct wm831x *wm831x, unsigned int irq, void *data) +{ + if (irq < 0 || irq >= WM831X_NUM_IRQS) + return; + + mutex_lock(&wm831x->irq_lock); + + wm831x_irqs[irq].handler = NULL; + wm831x_irqs[irq].handler_data = NULL; + + __wm831x_disable_irq(wm831x, irq); + + mutex_unlock(&wm831x->irq_lock); +} +EXPORT_SYMBOL_GPL(wm831x_free_irq); + + +static void wm831x_handle_irq(struct wm831x *wm831x, int irq, int status) +{ + struct wm831x_irq_data *irq_data = &wm831x_irqs[irq]; + + if (irq_data->handler) { + irq_data->handler(irq, irq_data->handler_data); + wm831x_reg_write(wm831x, irq_data_to_status_reg(irq_data), + irq_data->mask); + } else { + dev_err(wm831x->dev, "Unhandled IRQ %d, masking\n", irq); + __wm831x_disable_irq(wm831x, irq); + } +} + +/* Main interrupt handling occurs in a workqueue since we need + * interrupts enabled to interact with the chip. */ +static void wm831x_irq_worker(struct work_struct *work) +{ + struct wm831x *wm831x = container_of(work, struct wm831x, irq_work); + unsigned int i; + int primary; + int status_regs[5]; + int read[5] = { 0 }; + int *status; + + primary = wm831x_reg_read(wm831x, WM831X_SYSTEM_INTERRUPTS); + if (primary < 0) { + dev_err(wm831x->dev, "Failed to read system interrupt: %d\n", + primary); + goto out; + } + + mutex_lock(&wm831x->irq_lock); + + for (i = 0; i < ARRAY_SIZE(wm831x_irqs); i++) { + int offset = wm831x_irqs[i].reg - 1; + + if (!(primary & wm831x_irqs[i].primary)) + continue; + + status = &status_regs[offset]; + + /* Hopefully there should only be one register to read + * each time otherwise we ought to do a block read. */ + if (!read[offset]) { + *status = wm831x_reg_read(wm831x, + irq_data_to_status_reg(&wm831x_irqs[i])); + if (*status < 0) { + dev_err(wm831x->dev, + "Failed to read IRQ status: %d\n", + *status); + goto out_lock; + } + + /* Mask out the disabled IRQs */ + *status &= ~wm831x->irq_masks[offset]; + read[offset] = 1; + } + + if (*status & wm831x_irqs[i].mask) + wm831x_handle_irq(wm831x, i, *status); + } + +out_lock: + mutex_unlock(&wm831x->irq_lock); +out: + enable_irq(wm831x->irq); +} + + +static irqreturn_t wm831x_cpu_irq(int irq, void *data) +{ + struct wm831x *wm831x = data; + + /* Shut the interrupt to the CPU up and schedule the actual + * handler; we can't check that the IRQ is asserted. */ + disable_irq_nosync(irq); + + queue_work(wm831x->irq_wq, &wm831x->irq_work); + + return IRQ_HANDLED; +} + +int wm831x_irq_init(struct wm831x *wm831x, int irq) +{ + int i, ret; + + if (!irq) { + dev_warn(wm831x->dev, + "No interrupt specified - functionality limited\n"); + return 0; + } + + + wm831x->irq_wq = create_singlethread_workqueue("wm831x-irq"); + if (!wm831x->irq_wq) { + dev_err(wm831x->dev, "Failed to allocate IRQ worker\n"); + return -ESRCH; + } + + wm831x->irq = irq; + mutex_init(&wm831x->irq_lock); + INIT_WORK(&wm831x->irq_work, wm831x_irq_worker); + + /* Mask the individual interrupt sources */ + for (i = 0; i < ARRAY_SIZE(wm831x->irq_masks); i++) { + wm831x->irq_masks[i] = 0xffff; + wm831x_reg_write(wm831x, WM831X_INTERRUPT_STATUS_1_MASK + i, + 0xffff); + } + + /* Enable top level interrupts, we mask at secondary level */ + wm831x_reg_write(wm831x, WM831X_SYSTEM_INTERRUPTS_MASK, 0); + + /* We're good to go. We set IRQF_SHARED since there's a + * chance the driver will interoperate with another driver but + * the need to disable the IRQ while handing via I2C/SPI means + * that this may break and performance will be impacted. If + * this does happen it's a hardware design issue and the only + * other alternative would be polling. + */ + ret = request_irq(irq, wm831x_cpu_irq, IRQF_TRIGGER_LOW | IRQF_SHARED, + "wm831x", wm831x); + if (ret != 0) { + dev_err(wm831x->dev, "Failed to request IRQ %d: %d\n", + irq, ret); + return ret; + } + + return 0; +} + +void wm831x_irq_exit(struct wm831x *wm831x) +{ + if (wm831x->irq) + free_irq(wm831x->irq, wm831x); +} diff --git a/include/linux/mfd/wm831x/core.h b/include/linux/mfd/wm831x/core.h index d90e693053b..b96c9355b16 100644 --- a/include/linux/mfd/wm831x/core.h +++ b/include/linux/mfd/wm831x/core.h @@ -15,6 +15,9 @@ #ifndef __MFD_WM831X_CORE_H__ #define __MFD_WM831X_CORE_H__ +#include +#include + /* * Register values. */ @@ -224,6 +227,13 @@ struct wm831x { void *control_data; + int irq; /* Our chip IRQ */ + struct mutex irq_lock; + struct workqueue_struct *irq_wq; + struct work_struct irq_work; + unsigned int irq_base; + int irq_masks[5]; + /* The WM831x has a security key blocking access to certain * registers. The mutex is taken by the accessors for locking * and unlocking the security key, locked is used to fail @@ -244,4 +254,15 @@ int wm831x_set_bits(struct wm831x *wm831x, unsigned short reg, int wm831x_bulk_read(struct wm831x *wm831x, unsigned short reg, int count, u16 *buf); +int wm831x_irq_init(struct wm831x *wm831x, int irq); +void wm831x_irq_exit(struct wm831x *wm831x); + +int __must_check wm831x_request_irq(struct wm831x *wm831x, + unsigned int irq, irq_handler_t handler, + unsigned long flags, const char *name, + void *dev); +void wm831x_free_irq(struct wm831x *wm831x, unsigned int, void *); +void wm831x_disable_irq(struct wm831x *wm831x, int irq); +void wm831x_enable_irq(struct wm831x *wm831x, int irq); + #endif diff --git a/include/linux/mfd/wm831x/irq.h b/include/linux/mfd/wm831x/irq.h new file mode 100644 index 00000000000..3a8c97656fd --- /dev/null +++ b/include/linux/mfd/wm831x/irq.h @@ -0,0 +1,764 @@ +/* + * include/linux/mfd/wm831x/irq.h -- Interrupt controller for WM831x + * + * Copyright 2009 Wolfson Microelectronics PLC. + * + * Author: Mark Brown + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + */ + +#ifndef __MFD_WM831X_IRQ_H__ +#define __MFD_WM831X_IRQ_H__ + +/* Interrupt number assignments within Linux */ +#define WM831X_IRQ_TEMP_THW 0 +#define WM831X_IRQ_GPIO_1 1 +#define WM831X_IRQ_GPIO_2 2 +#define WM831X_IRQ_GPIO_3 3 +#define WM831X_IRQ_GPIO_4 4 +#define WM831X_IRQ_GPIO_5 5 +#define WM831X_IRQ_GPIO_6 6 +#define WM831X_IRQ_GPIO_7 7 +#define WM831X_IRQ_GPIO_8 8 +#define WM831X_IRQ_GPIO_9 9 +#define WM831X_IRQ_GPIO_10 10 +#define WM831X_IRQ_GPIO_11 11 +#define WM831X_IRQ_GPIO_12 12 +#define WM831X_IRQ_GPIO_13 13 +#define WM831X_IRQ_GPIO_14 14 +#define WM831X_IRQ_GPIO_15 15 +#define WM831X_IRQ_GPIO_16 16 +#define WM831X_IRQ_ON 17 +#define WM831X_IRQ_PPM_SYSLO 18 +#define WM831X_IRQ_PPM_PWR_SRC 19 +#define WM831X_IRQ_PPM_USB_CURR 20 +#define WM831X_IRQ_WDOG_TO 21 +#define WM831X_IRQ_RTC_PER 22 +#define WM831X_IRQ_RTC_ALM 23 +#define WM831X_IRQ_CHG_BATT_HOT 24 +#define WM831X_IRQ_CHG_BATT_COLD 25 +#define WM831X_IRQ_CHG_BATT_FAIL 26 +#define WM831X_IRQ_CHG_OV 27 +#define WM831X_IRQ_CHG_END 29 +#define WM831X_IRQ_CHG_TO 30 +#define WM831X_IRQ_CHG_MODE 31 +#define WM831X_IRQ_CHG_START 32 +#define WM831X_IRQ_TCHDATA 33 +#define WM831X_IRQ_TCHPD 34 +#define WM831X_IRQ_AUXADC_DATA 35 +#define WM831X_IRQ_AUXADC_DCOMP1 36 +#define WM831X_IRQ_AUXADC_DCOMP2 37 +#define WM831X_IRQ_AUXADC_DCOMP3 38 +#define WM831X_IRQ_AUXADC_DCOMP4 39 +#define WM831X_IRQ_CS1 40 +#define WM831X_IRQ_CS2 41 +#define WM831X_IRQ_HC_DC1 42 +#define WM831X_IRQ_HC_DC2 43 +#define WM831X_IRQ_UV_LDO1 44 +#define WM831X_IRQ_UV_LDO2 45 +#define WM831X_IRQ_UV_LDO3 46 +#define WM831X_IRQ_UV_LDO4 47 +#define WM831X_IRQ_UV_LDO5 48 +#define WM831X_IRQ_UV_LDO6 49 +#define WM831X_IRQ_UV_LDO7 50 +#define WM831X_IRQ_UV_LDO8 51 +#define WM831X_IRQ_UV_LDO9 52 +#define WM831X_IRQ_UV_LDO10 53 +#define WM831X_IRQ_UV_DC1 54 +#define WM831X_IRQ_UV_DC2 55 +#define WM831X_IRQ_UV_DC3 56 +#define WM831X_IRQ_UV_DC4 57 + +#define WM831X_NUM_IRQS 58 + +/* + * R16400 (0x4010) - System Interrupts + */ +#define WM831X_PS_INT 0x8000 /* PS_INT */ +#define WM831X_PS_INT_MASK 0x8000 /* PS_INT */ +#define WM831X_PS_INT_SHIFT 15 /* PS_INT */ +#define WM831X_PS_INT_WIDTH 1 /* PS_INT */ +#define WM831X_TEMP_INT 0x4000 /* TEMP_INT */ +#define WM831X_TEMP_INT_MASK 0x4000 /* TEMP_INT */ +#define WM831X_TEMP_INT_SHIFT 14 /* TEMP_INT */ +#define WM831X_TEMP_INT_WIDTH 1 /* TEMP_INT */ +#define WM831X_GP_INT 0x2000 /* GP_INT */ +#define WM831X_GP_INT_MASK 0x2000 /* GP_INT */ +#define WM831X_GP_INT_SHIFT 13 /* GP_INT */ +#define WM831X_GP_INT_WIDTH 1 /* GP_INT */ +#define WM831X_ON_PIN_INT 0x1000 /* ON_PIN_INT */ +#define WM831X_ON_PIN_INT_MASK 0x1000 /* ON_PIN_INT */ +#define WM831X_ON_PIN_INT_SHIFT 12 /* ON_PIN_INT */ +#define WM831X_ON_PIN_INT_WIDTH 1 /* ON_PIN_INT */ +#define WM831X_WDOG_INT 0x0800 /* WDOG_INT */ +#define WM831X_WDOG_INT_MASK 0x0800 /* WDOG_INT */ +#define WM831X_WDOG_INT_SHIFT 11 /* WDOG_INT */ +#define WM831X_WDOG_INT_WIDTH 1 /* WDOG_INT */ +#define WM831X_TCHDATA_INT 0x0400 /* TCHDATA_INT */ +#define WM831X_TCHDATA_INT_MASK 0x0400 /* TCHDATA_INT */ +#define WM831X_TCHDATA_INT_SHIFT 10 /* TCHDATA_INT */ +#define WM831X_TCHDATA_INT_WIDTH 1 /* TCHDATA_INT */ +#define WM831X_TCHPD_INT 0x0200 /* TCHPD_INT */ +#define WM831X_TCHPD_INT_MASK 0x0200 /* TCHPD_INT */ +#define WM831X_TCHPD_INT_SHIFT 9 /* TCHPD_INT */ +#define WM831X_TCHPD_INT_WIDTH 1 /* TCHPD_INT */ +#define WM831X_AUXADC_INT 0x0100 /* AUXADC_INT */ +#define WM831X_AUXADC_INT_MASK 0x0100 /* AUXADC_INT */ +#define WM831X_AUXADC_INT_SHIFT 8 /* AUXADC_INT */ +#define WM831X_AUXADC_INT_WIDTH 1 /* AUXADC_INT */ +#define WM831X_PPM_INT 0x0080 /* PPM_INT */ +#define WM831X_PPM_INT_MASK 0x0080 /* PPM_INT */ +#define WM831X_PPM_INT_SHIFT 7 /* PPM_INT */ +#define WM831X_PPM_INT_WIDTH 1 /* PPM_INT */ +#define WM831X_CS_INT 0x0040 /* CS_INT */ +#define WM831X_CS_INT_MASK 0x0040 /* CS_INT */ +#define WM831X_CS_INT_SHIFT 6 /* CS_INT */ +#define WM831X_CS_INT_WIDTH 1 /* CS_INT */ +#define WM831X_RTC_INT 0x0020 /* RTC_INT */ +#define WM831X_RTC_INT_MASK 0x0020 /* RTC_INT */ +#define WM831X_RTC_INT_SHIFT 5 /* RTC_INT */ +#define WM831X_RTC_INT_WIDTH 1 /* RTC_INT */ +#define WM831X_OTP_INT 0x0010 /* OTP_INT */ +#define WM831X_OTP_INT_MASK 0x0010 /* OTP_INT */ +#define WM831X_OTP_INT_SHIFT 4 /* OTP_INT */ +#define WM831X_OTP_INT_WIDTH 1 /* OTP_INT */ +#define WM831X_CHILD_INT 0x0008 /* CHILD_INT */ +#define WM831X_CHILD_INT_MASK 0x0008 /* CHILD_INT */ +#define WM831X_CHILD_INT_SHIFT 3 /* CHILD_INT */ +#define WM831X_CHILD_INT_WIDTH 1 /* CHILD_INT */ +#define WM831X_CHG_INT 0x0004 /* CHG_INT */ +#define WM831X_CHG_INT_MASK 0x0004 /* CHG_INT */ +#define WM831X_CHG_INT_SHIFT 2 /* CHG_INT */ +#define WM831X_CHG_INT_WIDTH 1 /* CHG_INT */ +#define WM831X_HC_INT 0x0002 /* HC_INT */ +#define WM831X_HC_INT_MASK 0x0002 /* HC_INT */ +#define WM831X_HC_INT_SHIFT 1 /* HC_INT */ +#define WM831X_HC_INT_WIDTH 1 /* HC_INT */ +#define WM831X_UV_INT 0x0001 /* UV_INT */ +#define WM831X_UV_INT_MASK 0x0001 /* UV_INT */ +#define WM831X_UV_INT_SHIFT 0 /* UV_INT */ +#define WM831X_UV_INT_WIDTH 1 /* UV_INT */ + +/* + * R16401 (0x4011) - Interrupt Status 1 + */ +#define WM831X_PPM_SYSLO_EINT 0x8000 /* PPM_SYSLO_EINT */ +#define WM831X_PPM_SYSLO_EINT_MASK 0x8000 /* PPM_SYSLO_EINT */ +#define WM831X_PPM_SYSLO_EINT_SHIFT 15 /* PPM_SYSLO_EINT */ +#define WM831X_PPM_SYSLO_EINT_WIDTH 1 /* PPM_SYSLO_EINT */ +#define WM831X_PPM_PWR_SRC_EINT 0x4000 /* PPM_PWR_SRC_EINT */ +#define WM831X_PPM_PWR_SRC_EINT_MASK 0x4000 /* PPM_PWR_SRC_EINT */ +#define WM831X_PPM_PWR_SRC_EINT_SHIFT 14 /* PPM_PWR_SRC_EINT */ +#define WM831X_PPM_PWR_SRC_EINT_WIDTH 1 /* PPM_PWR_SRC_EINT */ +#define WM831X_PPM_USB_CURR_EINT 0x2000 /* PPM_USB_CURR_EINT */ +#define WM831X_PPM_USB_CURR_EINT_MASK 0x2000 /* PPM_USB_CURR_EINT */ +#define WM831X_PPM_USB_CURR_EINT_SHIFT 13 /* PPM_USB_CURR_EINT */ +#define WM831X_PPM_USB_CURR_EINT_WIDTH 1 /* PPM_USB_CURR_EINT */ +#define WM831X_ON_PIN_EINT 0x1000 /* ON_PIN_EINT */ +#define WM831X_ON_PIN_EINT_MASK 0x1000 /* ON_PIN_EINT */ +#define WM831X_ON_PIN_EINT_SHIFT 12 /* ON_PIN_EINT */ +#define WM831X_ON_PIN_EINT_WIDTH 1 /* ON_PIN_EINT */ +#define WM831X_WDOG_TO_EINT 0x0800 /* WDOG_TO_EINT */ +#define WM831X_WDOG_TO_EINT_MASK 0x0800 /* WDOG_TO_EINT */ +#define WM831X_WDOG_TO_EINT_SHIFT 11 /* WDOG_TO_EINT */ +#define WM831X_WDOG_TO_EINT_WIDTH 1 /* WDOG_TO_EINT */ +#define WM831X_TCHDATA_EINT 0x0400 /* TCHDATA_EINT */ +#define WM831X_TCHDATA_EINT_MASK 0x0400 /* TCHDATA_EINT */ +#define WM831X_TCHDATA_EINT_SHIFT 10 /* TCHDATA_EINT */ +#define WM831X_TCHDATA_EINT_WIDTH 1 /* TCHDATA_EINT */ +#define WM831X_TCHPD_EINT 0x0200 /* TCHPD_EINT */ +#define WM831X_TCHPD_EINT_MASK 0x0200 /* TCHPD_EINT */ +#define WM831X_TCHPD_EINT_SHIFT 9 /* TCHPD_EINT */ +#define WM831X_TCHPD_EINT_WIDTH 1 /* TCHPD_EINT */ +#define WM831X_AUXADC_DATA_EINT 0x0100 /* AUXADC_DATA_EINT */ +#define WM831X_AUXADC_DATA_EINT_MASK 0x0100 /* AUXADC_DATA_EINT */ +#define WM831X_AUXADC_DATA_EINT_SHIFT 8 /* AUXADC_DATA_EINT */ +#define WM831X_AUXADC_DATA_EINT_WIDTH 1 /* AUXADC_DATA_EINT */ +#define WM831X_AUXADC_DCOMP4_EINT 0x0080 /* AUXADC_DCOMP4_EINT */ +#define WM831X_AUXADC_DCOMP4_EINT_MASK 0x0080 /* AUXADC_DCOMP4_EINT */ +#define WM831X_AUXADC_DCOMP4_EINT_SHIFT 7 /* AUXADC_DCOMP4_EINT */ +#define WM831X_AUXADC_DCOMP4_EINT_WIDTH 1 /* AUXADC_DCOMP4_EINT */ +#define WM831X_AUXADC_DCOMP3_EINT 0x0040 /* AUXADC_DCOMP3_EINT */ +#define WM831X_AUXADC_DCOMP3_EINT_MASK 0x0040 /* AUXADC_DCOMP3_EINT */ +#define WM831X_AUXADC_DCOMP3_EINT_SHIFT 6 /* AUXADC_DCOMP3_EINT */ +#define WM831X_AUXADC_DCOMP3_EINT_WIDTH 1 /* AUXADC_DCOMP3_EINT */ +#define WM831X_AUXADC_DCOMP2_EINT 0x0020 /* AUXADC_DCOMP2_EINT */ +#define WM831X_AUXADC_DCOMP2_EINT_MASK 0x0020 /* AUXADC_DCOMP2_EINT */ +#define WM831X_AUXADC_DCOMP2_EINT_SHIFT 5 /* AUXADC_DCOMP2_EINT */ +#define WM831X_AUXADC_DCOMP2_EINT_WIDTH 1 /* AUXADC_DCOMP2_EINT */ +#define WM831X_AUXADC_DCOMP1_EINT 0x0010 /* AUXADC_DCOMP1_EINT */ +#define WM831X_AUXADC_DCOMP1_EINT_MASK 0x0010 /* AUXADC_DCOMP1_EINT */ +#define WM831X_AUXADC_DCOMP1_EINT_SHIFT 4 /* AUXADC_DCOMP1_EINT */ +#define WM831X_AUXADC_DCOMP1_EINT_WIDTH 1 /* AUXADC_DCOMP1_EINT */ +#define WM831X_RTC_PER_EINT 0x0008 /* RTC_PER_EINT */ +#define WM831X_RTC_PER_EINT_MASK 0x0008 /* RTC_PER_EINT */ +#define WM831X_RTC_PER_EINT_SHIFT 3 /* RTC_PER_EINT */ +#define WM831X_RTC_PER_EINT_WIDTH 1 /* RTC_PER_EINT */ +#define WM831X_RTC_ALM_EINT 0x0004 /* RTC_ALM_EINT */ +#define WM831X_RTC_ALM_EINT_MASK 0x0004 /* RTC_ALM_EINT */ +#define WM831X_RTC_ALM_EINT_SHIFT 2 /* RTC_ALM_EINT */ +#define WM831X_RTC_ALM_EINT_WIDTH 1 /* RTC_ALM_EINT */ +#define WM831X_TEMP_THW_EINT 0x0002 /* TEMP_THW_EINT */ +#define WM831X_TEMP_THW_EINT_MASK 0x0002 /* TEMP_THW_EINT */ +#define WM831X_TEMP_THW_EINT_SHIFT 1 /* TEMP_THW_EINT */ +#define WM831X_TEMP_THW_EINT_WIDTH 1 /* TEMP_THW_EINT */ + +/* + * R16402 (0x4012) - Interrupt Status 2 + */ +#define WM831X_CHG_BATT_HOT_EINT 0x8000 /* CHG_BATT_HOT_EINT */ +#define WM831X_CHG_BATT_HOT_EINT_MASK 0x8000 /* CHG_BATT_HOT_EINT */ +#define WM831X_CHG_BATT_HOT_EINT_SHIFT 15 /* CHG_BATT_HOT_EINT */ +#define WM831X_CHG_BATT_HOT_EINT_WIDTH 1 /* CHG_BATT_HOT_EINT */ +#define WM831X_CHG_BATT_COLD_EINT 0x4000 /* CHG_BATT_COLD_EINT */ +#define WM831X_CHG_BATT_COLD_EINT_MASK 0x4000 /* CHG_BATT_COLD_EINT */ +#define WM831X_CHG_BATT_COLD_EINT_SHIFT 14 /* CHG_BATT_COLD_EINT */ +#define WM831X_CHG_BATT_COLD_EINT_WIDTH 1 /* CHG_BATT_COLD_EINT */ +#define WM831X_CHG_BATT_FAIL_EINT 0x2000 /* CHG_BATT_FAIL_EINT */ +#define WM831X_CHG_BATT_FAIL_EINT_MASK 0x2000 /* CHG_BATT_FAIL_EINT */ +#define WM831X_CHG_BATT_FAIL_EINT_SHIFT 13 /* CHG_BATT_FAIL_EINT */ +#define WM831X_CHG_BATT_FAIL_EINT_WIDTH 1 /* CHG_BATT_FAIL_EINT */ +#define WM831X_CHG_OV_EINT 0x1000 /* CHG_OV_EINT */ +#define WM831X_CHG_OV_EINT_MASK 0x1000 /* CHG_OV_EINT */ +#define WM831X_CHG_OV_EINT_SHIFT 12 /* CHG_OV_EINT */ +#define WM831X_CHG_OV_EINT_WIDTH 1 /* CHG_OV_EINT */ +#define WM831X_CHG_END_EINT 0x0800 /* CHG_END_EINT */ +#define WM831X_CHG_END_EINT_MASK 0x0800 /* CHG_END_EINT */ +#define WM831X_CHG_END_EINT_SHIFT 11 /* CHG_END_EINT */ +#define WM831X_CHG_END_EINT_WIDTH 1 /* CHG_END_EINT */ +#define WM831X_CHG_TO_EINT 0x0400 /* CHG_TO_EINT */ +#define WM831X_CHG_TO_EINT_MASK 0x0400 /* CHG_TO_EINT */ +#define WM831X_CHG_TO_EINT_SHIFT 10 /* CHG_TO_EINT */ +#define WM831X_CHG_TO_EINT_WIDTH 1 /* CHG_TO_EINT */ +#define WM831X_CHG_MODE_EINT 0x0200 /* CHG_MODE_EINT */ +#define WM831X_CHG_MODE_EINT_MASK 0x0200 /* CHG_MODE_EINT */ +#define WM831X_CHG_MODE_EINT_SHIFT 9 /* CHG_MODE_EINT */ +#define WM831X_CHG_MODE_EINT_WIDTH 1 /* CHG_MODE_EINT */ +#define WM831X_CHG_START_EINT 0x0100 /* CHG_START_EINT */ +#define WM831X_CHG_START_EINT_MASK 0x0100 /* CHG_START_EINT */ +#define WM831X_CHG_START_EINT_SHIFT 8 /* CHG_START_EINT */ +#define WM831X_CHG_START_EINT_WIDTH 1 /* CHG_START_EINT */ +#define WM831X_CS2_EINT 0x0080 /* CS2_EINT */ +#define WM831X_CS2_EINT_MASK 0x0080 /* CS2_EINT */ +#define WM831X_CS2_EINT_SHIFT 7 /* CS2_EINT */ +#define WM831X_CS2_EINT_WIDTH 1 /* CS2_EINT */ +#define WM831X_CS1_EINT 0x0040 /* CS1_EINT */ +#define WM831X_CS1_EINT_MASK 0x0040 /* CS1_EINT */ +#define WM831X_CS1_EINT_SHIFT 6 /* CS1_EINT */ +#define WM831X_CS1_EINT_WIDTH 1 /* CS1_EINT */ +#define WM831X_OTP_CMD_END_EINT 0x0020 /* OTP_CMD_END_EINT */ +#define WM831X_OTP_CMD_END_EINT_MASK 0x0020 /* OTP_CMD_END_EINT */ +#define WM831X_OTP_CMD_END_EINT_SHIFT 5 /* OTP_CMD_END_EINT */ +#define WM831X_OTP_CMD_END_EINT_WIDTH 1 /* OTP_CMD_END_EINT */ +#define WM831X_OTP_ERR_EINT 0x0010 /* OTP_ERR_EINT */ +#define WM831X_OTP_ERR_EINT_MASK 0x0010 /* OTP_ERR_EINT */ +#define WM831X_OTP_ERR_EINT_SHIFT 4 /* OTP_ERR_EINT */ +#define WM831X_OTP_ERR_EINT_WIDTH 1 /* OTP_ERR_EINT */ +#define WM831X_PS_POR_EINT 0x0004 /* PS_POR_EINT */ +#define WM831X_PS_POR_EINT_MASK 0x0004 /* PS_POR_EINT */ +#define WM831X_PS_POR_EINT_SHIFT 2 /* PS_POR_EINT */ +#define WM831X_PS_POR_EINT_WIDTH 1 /* PS_POR_EINT */ +#define WM831X_PS_SLEEP_OFF_EINT 0x0002 /* PS_SLEEP_OFF_EINT */ +#define WM831X_PS_SLEEP_OFF_EINT_MASK 0x0002 /* PS_SLEEP_OFF_EINT */ +#define WM831X_PS_SLEEP_OFF_EINT_SHIFT 1 /* PS_SLEEP_OFF_EINT */ +#define WM831X_PS_SLEEP_OFF_EINT_WIDTH 1 /* PS_SLEEP_OFF_EINT */ +#define WM831X_PS_ON_WAKE_EINT 0x0001 /* PS_ON_WAKE_EINT */ +#define WM831X_PS_ON_WAKE_EINT_MASK 0x0001 /* PS_ON_WAKE_EINT */ +#define WM831X_PS_ON_WAKE_EINT_SHIFT 0 /* PS_ON_WAKE_EINT */ +#define WM831X_PS_ON_WAKE_EINT_WIDTH 1 /* PS_ON_WAKE_EINT */ + +/* + * R16403 (0x4013) - Interrupt Status 3 + */ +#define WM831X_UV_LDO10_EINT 0x0200 /* UV_LDO10_EINT */ +#define WM831X_UV_LDO10_EINT_MASK 0x0200 /* UV_LDO10_EINT */ +#define WM831X_UV_LDO10_EINT_SHIFT 9 /* UV_LDO10_EINT */ +#define WM831X_UV_LDO10_EINT_WIDTH 1 /* UV_LDO10_EINT */ +#define WM831X_UV_LDO9_EINT 0x0100 /* UV_LDO9_EINT */ +#define WM831X_UV_LDO9_EINT_MASK 0x0100 /* UV_LDO9_EINT */ +#define WM831X_UV_LDO9_EINT_SHIFT 8 /* UV_LDO9_EINT */ +#define WM831X_UV_LDO9_EINT_WIDTH 1 /* UV_LDO9_EINT */ +#define WM831X_UV_LDO8_EINT 0x0080 /* UV_LDO8_EINT */ +#define WM831X_UV_LDO8_EINT_MASK 0x0080 /* UV_LDO8_EINT */ +#define WM831X_UV_LDO8_EINT_SHIFT 7 /* UV_LDO8_EINT */ +#define WM831X_UV_LDO8_EINT_WIDTH 1 /* UV_LDO8_EINT */ +#define WM831X_UV_LDO7_EINT 0x0040 /* UV_LDO7_EINT */ +#define WM831X_UV_LDO7_EINT_MASK 0x0040 /* UV_LDO7_EINT */ +#define WM831X_UV_LDO7_EINT_SHIFT 6 /* UV_LDO7_EINT */ +#define WM831X_UV_LDO7_EINT_WIDTH 1 /* UV_LDO7_EINT */ +#define WM831X_UV_LDO6_EINT 0x0020 /* UV_LDO6_EINT */ +#define WM831X_UV_LDO6_EINT_MASK 0x0020 /* UV_LDO6_EINT */ +#define WM831X_UV_LDO6_EINT_SHIFT 5 /* UV_LDO6_EINT */ +#define WM831X_UV_LDO6_EINT_WIDTH 1 /* UV_LDO6_EINT */ +#define WM831X_UV_LDO5_EINT 0x0010 /* UV_LDO5_EINT */ +#define WM831X_UV_LDO5_EINT_MASK 0x0010 /* UV_LDO5_EINT */ +#define WM831X_UV_LDO5_EINT_SHIFT 4 /* UV_LDO5_EINT */ +#define WM831X_UV_LDO5_EINT_WIDTH 1 /* UV_LDO5_EINT */ +#define WM831X_UV_LDO4_EINT 0x0008 /* UV_LDO4_EINT */ +#define WM831X_UV_LDO4_EINT_MASK 0x0008 /* UV_LDO4_EINT */ +#define WM831X_UV_LDO4_EINT_SHIFT 3 /* UV_LDO4_EINT */ +#define WM831X_UV_LDO4_EINT_WIDTH 1 /* UV_LDO4_EINT */ +#define WM831X_UV_LDO3_EINT 0x0004 /* UV_LDO3_EINT */ +#define WM831X_UV_LDO3_EINT_MASK 0x0004 /* UV_LDO3_EINT */ +#define WM831X_UV_LDO3_EINT_SHIFT 2 /* UV_LDO3_EINT */ +#define WM831X_UV_LDO3_EINT_WIDTH 1 /* UV_LDO3_EINT */ +#define WM831X_UV_LDO2_EINT 0x0002 /* UV_LDO2_EINT */ +#define WM831X_UV_LDO2_EINT_MASK 0x0002 /* UV_LDO2_EINT */ +#define WM831X_UV_LDO2_EINT_SHIFT 1 /* UV_LDO2_EINT */ +#define WM831X_UV_LDO2_EINT_WIDTH 1 /* UV_LDO2_EINT */ +#define WM831X_UV_LDO1_EINT 0x0001 /* UV_LDO1_EINT */ +#define WM831X_UV_LDO1_EINT_MASK 0x0001 /* UV_LDO1_EINT */ +#define WM831X_UV_LDO1_EINT_SHIFT 0 /* UV_LDO1_EINT */ +#define WM831X_UV_LDO1_EINT_WIDTH 1 /* UV_LDO1_EINT */ + +/* + * R16404 (0x4014) - Interrupt Status 4 + */ +#define WM831X_HC_DC2_EINT 0x0200 /* HC_DC2_EINT */ +#define WM831X_HC_DC2_EINT_MASK 0x0200 /* HC_DC2_EINT */ +#define WM831X_HC_DC2_EINT_SHIFT 9 /* HC_DC2_EINT */ +#define WM831X_HC_DC2_EINT_WIDTH 1 /* HC_DC2_EINT */ +#define WM831X_HC_DC1_EINT 0x0100 /* HC_DC1_EINT */ +#define WM831X_HC_DC1_EINT_MASK 0x0100 /* HC_DC1_EINT */ +#define WM831X_HC_DC1_EINT_SHIFT 8 /* HC_DC1_EINT */ +#define WM831X_HC_DC1_EINT_WIDTH 1 /* HC_DC1_EINT */ +#define WM831X_UV_DC4_EINT 0x0008 /* UV_DC4_EINT */ +#define WM831X_UV_DC4_EINT_MASK 0x0008 /* UV_DC4_EINT */ +#define WM831X_UV_DC4_EINT_SHIFT 3 /* UV_DC4_EINT */ +#define WM831X_UV_DC4_EINT_WIDTH 1 /* UV_DC4_EINT */ +#define WM831X_UV_DC3_EINT 0x0004 /* UV_DC3_EINT */ +#define WM831X_UV_DC3_EINT_MASK 0x0004 /* UV_DC3_EINT */ +#define WM831X_UV_DC3_EINT_SHIFT 2 /* UV_DC3_EINT */ +#define WM831X_UV_DC3_EINT_WIDTH 1 /* UV_DC3_EINT */ +#define WM831X_UV_DC2_EINT 0x0002 /* UV_DC2_EINT */ +#define WM831X_UV_DC2_EINT_MASK 0x0002 /* UV_DC2_EINT */ +#define WM831X_UV_DC2_EINT_SHIFT 1 /* UV_DC2_EINT */ +#define WM831X_UV_DC2_EINT_WIDTH 1 /* UV_DC2_EINT */ +#define WM831X_UV_DC1_EINT 0x0001 /* UV_DC1_EINT */ +#define WM831X_UV_DC1_EINT_MASK 0x0001 /* UV_DC1_EINT */ +#define WM831X_UV_DC1_EINT_SHIFT 0 /* UV_DC1_EINT */ +#define WM831X_UV_DC1_EINT_WIDTH 1 /* UV_DC1_EINT */ + +/* + * R16405 (0x4015) - Interrupt Status 5 + */ +#define WM831X_GP16_EINT 0x8000 /* GP16_EINT */ +#define WM831X_GP16_EINT_MASK 0x8000 /* GP16_EINT */ +#define WM831X_GP16_EINT_SHIFT 15 /* GP16_EINT */ +#define WM831X_GP16_EINT_WIDTH 1 /* GP16_EINT */ +#define WM831X_GP15_EINT 0x4000 /* GP15_EINT */ +#define WM831X_GP15_EINT_MASK 0x4000 /* GP15_EINT */ +#define WM831X_GP15_EINT_SHIFT 14 /* GP15_EINT */ +#define WM831X_GP15_EINT_WIDTH 1 /* GP15_EINT */ +#define WM831X_GP14_EINT 0x2000 /* GP14_EINT */ +#define WM831X_GP14_EINT_MASK 0x2000 /* GP14_EINT */ +#define WM831X_GP14_EINT_SHIFT 13 /* GP14_EINT */ +#define WM831X_GP14_EINT_WIDTH 1 /* GP14_EINT */ +#define WM831X_GP13_EINT 0x1000 /* GP13_EINT */ +#define WM831X_GP13_EINT_MASK 0x1000 /* GP13_EINT */ +#define WM831X_GP13_EINT_SHIFT 12 /* GP13_EINT */ +#define WM831X_GP13_EINT_WIDTH 1 /* GP13_EINT */ +#define WM831X_GP12_EINT 0x0800 /* GP12_EINT */ +#define WM831X_GP12_EINT_MASK 0x0800 /* GP12_EINT */ +#define WM831X_GP12_EINT_SHIFT 11 /* GP12_EINT */ +#define WM831X_GP12_EINT_WIDTH 1 /* GP12_EINT */ +#define WM831X_GP11_EINT 0x0400 /* GP11_EINT */ +#define WM831X_GP11_EINT_MASK 0x0400 /* GP11_EINT */ +#define WM831X_GP11_EINT_SHIFT 10 /* GP11_EINT */ +#define WM831X_GP11_EINT_WIDTH 1 /* GP11_EINT */ +#define WM831X_GP10_EINT 0x0200 /* GP10_EINT */ +#define WM831X_GP10_EINT_MASK 0x0200 /* GP10_EINT */ +#define WM831X_GP10_EINT_SHIFT 9 /* GP10_EINT */ +#define WM831X_GP10_EINT_WIDTH 1 /* GP10_EINT */ +#define WM831X_GP9_EINT 0x0100 /* GP9_EINT */ +#define WM831X_GP9_EINT_MASK 0x0100 /* GP9_EINT */ +#define WM831X_GP9_EINT_SHIFT 8 /* GP9_EINT */ +#define WM831X_GP9_EINT_WIDTH 1 /* GP9_EINT */ +#define WM831X_GP8_EINT 0x0080 /* GP8_EINT */ +#define WM831X_GP8_EINT_MASK 0x0080 /* GP8_EINT */ +#define WM831X_GP8_EINT_SHIFT 7 /* GP8_EINT */ +#define WM831X_GP8_EINT_WIDTH 1 /* GP8_EINT */ +#define WM831X_GP7_EINT 0x0040 /* GP7_EINT */ +#define WM831X_GP7_EINT_MASK 0x0040 /* GP7_EINT */ +#define WM831X_GP7_EINT_SHIFT 6 /* GP7_EINT */ +#define WM831X_GP7_EINT_WIDTH 1 /* GP7_EINT */ +#define WM831X_GP6_EINT 0x0020 /* GP6_EINT */ +#define WM831X_GP6_EINT_MASK 0x0020 /* GP6_EINT */ +#define WM831X_GP6_EINT_SHIFT 5 /* GP6_EINT */ +#define WM831X_GP6_EINT_WIDTH 1 /* GP6_EINT */ +#define WM831X_GP5_EINT 0x0010 /* GP5_EINT */ +#define WM831X_GP5_EINT_MASK 0x0010 /* GP5_EINT */ +#define WM831X_GP5_EINT_SHIFT 4 /* GP5_EINT */ +#define WM831X_GP5_EINT_WIDTH 1 /* GP5_EINT */ +#define WM831X_GP4_EINT 0x0008 /* GP4_EINT */ +#define WM831X_GP4_EINT_MASK 0x0008 /* GP4_EINT */ +#define WM831X_GP4_EINT_SHIFT 3 /* GP4_EINT */ +#define WM831X_GP4_EINT_WIDTH 1 /* GP4_EINT */ +#define WM831X_GP3_EINT 0x0004 /* GP3_EINT */ +#define WM831X_GP3_EINT_MASK 0x0004 /* GP3_EINT */ +#define WM831X_GP3_EINT_SHIFT 2 /* GP3_EINT */ +#define WM831X_GP3_EINT_WIDTH 1 /* GP3_EINT */ +#define WM831X_GP2_EINT 0x0002 /* GP2_EINT */ +#define WM831X_GP2_EINT_MASK 0x0002 /* GP2_EINT */ +#define WM831X_GP2_EINT_SHIFT 1 /* GP2_EINT */ +#define WM831X_GP2_EINT_WIDTH 1 /* GP2_EINT */ +#define WM831X_GP1_EINT 0x0001 /* GP1_EINT */ +#define WM831X_GP1_EINT_MASK 0x0001 /* GP1_EINT */ +#define WM831X_GP1_EINT_SHIFT 0 /* GP1_EINT */ +#define WM831X_GP1_EINT_WIDTH 1 /* GP1_EINT */ + +/* + * R16407 (0x4017) - IRQ Config + */ +#define WM831X_IRQ_OD 0x0002 /* IRQ_OD */ +#define WM831X_IRQ_OD_MASK 0x0002 /* IRQ_OD */ +#define WM831X_IRQ_OD_SHIFT 1 /* IRQ_OD */ +#define WM831X_IRQ_OD_WIDTH 1 /* IRQ_OD */ +#define WM831X_IM_IRQ 0x0001 /* IM_IRQ */ +#define WM831X_IM_IRQ_MASK 0x0001 /* IM_IRQ */ +#define WM831X_IM_IRQ_SHIFT 0 /* IM_IRQ */ +#define WM831X_IM_IRQ_WIDTH 1 /* IM_IRQ */ + +/* + * R16408 (0x4018) - System Interrupts Mask + */ +#define WM831X_IM_PS_INT 0x8000 /* IM_PS_INT */ +#define WM831X_IM_PS_INT_MASK 0x8000 /* IM_PS_INT */ +#define WM831X_IM_PS_INT_SHIFT 15 /* IM_PS_INT */ +#define WM831X_IM_PS_INT_WIDTH 1 /* IM_PS_INT */ +#define WM831X_IM_TEMP_INT 0x4000 /* IM_TEMP_INT */ +#define WM831X_IM_TEMP_INT_MASK 0x4000 /* IM_TEMP_INT */ +#define WM831X_IM_TEMP_INT_SHIFT 14 /* IM_TEMP_INT */ +#define WM831X_IM_TEMP_INT_WIDTH 1 /* IM_TEMP_INT */ +#define WM831X_IM_GP_INT 0x2000 /* IM_GP_INT */ +#define WM831X_IM_GP_INT_MASK 0x2000 /* IM_GP_INT */ +#define WM831X_IM_GP_INT_SHIFT 13 /* IM_GP_INT */ +#define WM831X_IM_GP_INT_WIDTH 1 /* IM_GP_INT */ +#define WM831X_IM_ON_PIN_INT 0x1000 /* IM_ON_PIN_INT */ +#define WM831X_IM_ON_PIN_INT_MASK 0x1000 /* IM_ON_PIN_INT */ +#define WM831X_IM_ON_PIN_INT_SHIFT 12 /* IM_ON_PIN_INT */ +#define WM831X_IM_ON_PIN_INT_WIDTH 1 /* IM_ON_PIN_INT */ +#define WM831X_IM_WDOG_INT 0x0800 /* IM_WDOG_INT */ +#define WM831X_IM_WDOG_INT_MASK 0x0800 /* IM_WDOG_INT */ +#define WM831X_IM_WDOG_INT_SHIFT 11 /* IM_WDOG_INT */ +#define WM831X_IM_WDOG_INT_WIDTH 1 /* IM_WDOG_INT */ +#define WM831X_IM_TCHDATA_INT 0x0400 /* IM_TCHDATA_INT */ +#define WM831X_IM_TCHDATA_INT_MASK 0x0400 /* IM_TCHDATA_INT */ +#define WM831X_IM_TCHDATA_INT_SHIFT 10 /* IM_TCHDATA_INT */ +#define WM831X_IM_TCHDATA_INT_WIDTH 1 /* IM_TCHDATA_INT */ +#define WM831X_IM_TCHPD_INT 0x0200 /* IM_TCHPD_INT */ +#define WM831X_IM_TCHPD_INT_MASK 0x0200 /* IM_TCHPD_INT */ +#define WM831X_IM_TCHPD_INT_SHIFT 9 /* IM_TCHPD_INT */ +#define WM831X_IM_TCHPD_INT_WIDTH 1 /* IM_TCHPD_INT */ +#define WM831X_IM_AUXADC_INT 0x0100 /* IM_AUXADC_INT */ +#define WM831X_IM_AUXADC_INT_MASK 0x0100 /* IM_AUXADC_INT */ +#define WM831X_IM_AUXADC_INT_SHIFT 8 /* IM_AUXADC_INT */ +#define WM831X_IM_AUXADC_INT_WIDTH 1 /* IM_AUXADC_INT */ +#define WM831X_IM_PPM_INT 0x0080 /* IM_PPM_INT */ +#define WM831X_IM_PPM_INT_MASK 0x0080 /* IM_PPM_INT */ +#define WM831X_IM_PPM_INT_SHIFT 7 /* IM_PPM_INT */ +#define WM831X_IM_PPM_INT_WIDTH 1 /* IM_PPM_INT */ +#define WM831X_IM_CS_INT 0x0040 /* IM_CS_INT */ +#define WM831X_IM_CS_INT_MASK 0x0040 /* IM_CS_INT */ +#define WM831X_IM_CS_INT_SHIFT 6 /* IM_CS_INT */ +#define WM831X_IM_CS_INT_WIDTH 1 /* IM_CS_INT */ +#define WM831X_IM_RTC_INT 0x0020 /* IM_RTC_INT */ +#define WM831X_IM_RTC_INT_MASK 0x0020 /* IM_RTC_INT */ +#define WM831X_IM_RTC_INT_SHIFT 5 /* IM_RTC_INT */ +#define WM831X_IM_RTC_INT_WIDTH 1 /* IM_RTC_INT */ +#define WM831X_IM_OTP_INT 0x0010 /* IM_OTP_INT */ +#define WM831X_IM_OTP_INT_MASK 0x0010 /* IM_OTP_INT */ +#define WM831X_IM_OTP_INT_SHIFT 4 /* IM_OTP_INT */ +#define WM831X_IM_OTP_INT_WIDTH 1 /* IM_OTP_INT */ +#define WM831X_IM_CHILD_INT 0x0008 /* IM_CHILD_INT */ +#define WM831X_IM_CHILD_INT_MASK 0x0008 /* IM_CHILD_INT */ +#define WM831X_IM_CHILD_INT_SHIFT 3 /* IM_CHILD_INT */ +#define WM831X_IM_CHILD_INT_WIDTH 1 /* IM_CHILD_INT */ +#define WM831X_IM_CHG_INT 0x0004 /* IM_CHG_INT */ +#define WM831X_IM_CHG_INT_MASK 0x0004 /* IM_CHG_INT */ +#define WM831X_IM_CHG_INT_SHIFT 2 /* IM_CHG_INT */ +#define WM831X_IM_CHG_INT_WIDTH 1 /* IM_CHG_INT */ +#define WM831X_IM_HC_INT 0x0002 /* IM_HC_INT */ +#define WM831X_IM_HC_INT_MASK 0x0002 /* IM_HC_INT */ +#define WM831X_IM_HC_INT_SHIFT 1 /* IM_HC_INT */ +#define WM831X_IM_HC_INT_WIDTH 1 /* IM_HC_INT */ +#define WM831X_IM_UV_INT 0x0001 /* IM_UV_INT */ +#define WM831X_IM_UV_INT_MASK 0x0001 /* IM_UV_INT */ +#define WM831X_IM_UV_INT_SHIFT 0 /* IM_UV_INT */ +#define WM831X_IM_UV_INT_WIDTH 1 /* IM_UV_INT */ + +/* + * R16409 (0x4019) - Interrupt Status 1 Mask + */ +#define WM831X_IM_PPM_SYSLO_EINT 0x8000 /* IM_PPM_SYSLO_EINT */ +#define WM831X_IM_PPM_SYSLO_EINT_MASK 0x8000 /* IM_PPM_SYSLO_EINT */ +#define WM831X_IM_PPM_SYSLO_EINT_SHIFT 15 /* IM_PPM_SYSLO_EINT */ +#define WM831X_IM_PPM_SYSLO_EINT_WIDTH 1 /* IM_PPM_SYSLO_EINT */ +#define WM831X_IM_PPM_PWR_SRC_EINT 0x4000 /* IM_PPM_PWR_SRC_EINT */ +#define WM831X_IM_PPM_PWR_SRC_EINT_MASK 0x4000 /* IM_PPM_PWR_SRC_EINT */ +#define WM831X_IM_PPM_PWR_SRC_EINT_SHIFT 14 /* IM_PPM_PWR_SRC_EINT */ +#define WM831X_IM_PPM_PWR_SRC_EINT_WIDTH 1 /* IM_PPM_PWR_SRC_EINT */ +#define WM831X_IM_PPM_USB_CURR_EINT 0x2000 /* IM_PPM_USB_CURR_EINT */ +#define WM831X_IM_PPM_USB_CURR_EINT_MASK 0x2000 /* IM_PPM_USB_CURR_EINT */ +#define WM831X_IM_PPM_USB_CURR_EINT_SHIFT 13 /* IM_PPM_USB_CURR_EINT */ +#define WM831X_IM_PPM_USB_CURR_EINT_WIDTH 1 /* IM_PPM_USB_CURR_EINT */ +#define WM831X_IM_ON_PIN_EINT 0x1000 /* IM_ON_PIN_EINT */ +#define WM831X_IM_ON_PIN_EINT_MASK 0x1000 /* IM_ON_PIN_EINT */ +#define WM831X_IM_ON_PIN_EINT_SHIFT 12 /* IM_ON_PIN_EINT */ +#define WM831X_IM_ON_PIN_EINT_WIDTH 1 /* IM_ON_PIN_EINT */ +#define WM831X_IM_WDOG_TO_EINT 0x0800 /* IM_WDOG_TO_EINT */ +#define WM831X_IM_WDOG_TO_EINT_MASK 0x0800 /* IM_WDOG_TO_EINT */ +#define WM831X_IM_WDOG_TO_EINT_SHIFT 11 /* IM_WDOG_TO_EINT */ +#define WM831X_IM_WDOG_TO_EINT_WIDTH 1 /* IM_WDOG_TO_EINT */ +#define WM831X_IM_TCHDATA_EINT 0x0400 /* IM_TCHDATA_EINT */ +#define WM831X_IM_TCHDATA_EINT_MASK 0x0400 /* IM_TCHDATA_EINT */ +#define WM831X_IM_TCHDATA_EINT_SHIFT 10 /* IM_TCHDATA_EINT */ +#define WM831X_IM_TCHDATA_EINT_WIDTH 1 /* IM_TCHDATA_EINT */ +#define WM831X_IM_TCHPD_EINT 0x0200 /* IM_TCHPD_EINT */ +#define WM831X_IM_TCHPD_EINT_MASK 0x0200 /* IM_TCHPD_EINT */ +#define WM831X_IM_TCHPD_EINT_SHIFT 9 /* IM_TCHPD_EINT */ +#define WM831X_IM_TCHPD_EINT_WIDTH 1 /* IM_TCHPD_EINT */ +#define WM831X_IM_AUXADC_DATA_EINT 0x0100 /* IM_AUXADC_DATA_EINT */ +#define WM831X_IM_AUXADC_DATA_EINT_MASK 0x0100 /* IM_AUXADC_DATA_EINT */ +#define WM831X_IM_AUXADC_DATA_EINT_SHIFT 8 /* IM_AUXADC_DATA_EINT */ +#define WM831X_IM_AUXADC_DATA_EINT_WIDTH 1 /* IM_AUXADC_DATA_EINT */ +#define WM831X_IM_AUXADC_DCOMP4_EINT 0x0080 /* IM_AUXADC_DCOMP4_EINT */ +#define WM831X_IM_AUXADC_DCOMP4_EINT_MASK 0x0080 /* IM_AUXADC_DCOMP4_EINT */ +#define WM831X_IM_AUXADC_DCOMP4_EINT_SHIFT 7 /* IM_AUXADC_DCOMP4_EINT */ +#define WM831X_IM_AUXADC_DCOMP4_EINT_WIDTH 1 /* IM_AUXADC_DCOMP4_EINT */ +#define WM831X_IM_AUXADC_DCOMP3_EINT 0x0040 /* IM_AUXADC_DCOMP3_EINT */ +#define WM831X_IM_AUXADC_DCOMP3_EINT_MASK 0x0040 /* IM_AUXADC_DCOMP3_EINT */ +#define WM831X_IM_AUXADC_DCOMP3_EINT_SHIFT 6 /* IM_AUXADC_DCOMP3_EINT */ +#define WM831X_IM_AUXADC_DCOMP3_EINT_WIDTH 1 /* IM_AUXADC_DCOMP3_EINT */ +#define WM831X_IM_AUXADC_DCOMP2_EINT 0x0020 /* IM_AUXADC_DCOMP2_EINT */ +#define WM831X_IM_AUXADC_DCOMP2_EINT_MASK 0x0020 /* IM_AUXADC_DCOMP2_EINT */ +#define WM831X_IM_AUXADC_DCOMP2_EINT_SHIFT 5 /* IM_AUXADC_DCOMP2_EINT */ +#define WM831X_IM_AUXADC_DCOMP2_EINT_WIDTH 1 /* IM_AUXADC_DCOMP2_EINT */ +#define WM831X_IM_AUXADC_DCOMP1_EINT 0x0010 /* IM_AUXADC_DCOMP1_EINT */ +#define WM831X_IM_AUXADC_DCOMP1_EINT_MASK 0x0010 /* IM_AUXADC_DCOMP1_EINT */ +#define WM831X_IM_AUXADC_DCOMP1_EINT_SHIFT 4 /* IM_AUXADC_DCOMP1_EINT */ +#define WM831X_IM_AUXADC_DCOMP1_EINT_WIDTH 1 /* IM_AUXADC_DCOMP1_EINT */ +#define WM831X_IM_RTC_PER_EINT 0x0008 /* IM_RTC_PER_EINT */ +#define WM831X_IM_RTC_PER_EINT_MASK 0x0008 /* IM_RTC_PER_EINT */ +#define WM831X_IM_RTC_PER_EINT_SHIFT 3 /* IM_RTC_PER_EINT */ +#define WM831X_IM_RTC_PER_EINT_WIDTH 1 /* IM_RTC_PER_EINT */ +#define WM831X_IM_RTC_ALM_EINT 0x0004 /* IM_RTC_ALM_EINT */ +#define WM831X_IM_RTC_ALM_EINT_MASK 0x0004 /* IM_RTC_ALM_EINT */ +#define WM831X_IM_RTC_ALM_EINT_SHIFT 2 /* IM_RTC_ALM_EINT */ +#define WM831X_IM_RTC_ALM_EINT_WIDTH 1 /* IM_RTC_ALM_EINT */ +#define WM831X_IM_TEMP_THW_EINT 0x0002 /* IM_TEMP_THW_EINT */ +#define WM831X_IM_TEMP_THW_EINT_MASK 0x0002 /* IM_TEMP_THW_EINT */ +#define WM831X_IM_TEMP_THW_EINT_SHIFT 1 /* IM_TEMP_THW_EINT */ +#define WM831X_IM_TEMP_THW_EINT_WIDTH 1 /* IM_TEMP_THW_EINT */ + +/* + * R16410 (0x401A) - Interrupt Status 2 Mask + */ +#define WM831X_IM_CHG_BATT_HOT_EINT 0x8000 /* IM_CHG_BATT_HOT_EINT */ +#define WM831X_IM_CHG_BATT_HOT_EINT_MASK 0x8000 /* IM_CHG_BATT_HOT_EINT */ +#define WM831X_IM_CHG_BATT_HOT_EINT_SHIFT 15 /* IM_CHG_BATT_HOT_EINT */ +#define WM831X_IM_CHG_BATT_HOT_EINT_WIDTH 1 /* IM_CHG_BATT_HOT_EINT */ +#define WM831X_IM_CHG_BATT_COLD_EINT 0x4000 /* IM_CHG_BATT_COLD_EINT */ +#define WM831X_IM_CHG_BATT_COLD_EINT_MASK 0x4000 /* IM_CHG_BATT_COLD_EINT */ +#define WM831X_IM_CHG_BATT_COLD_EINT_SHIFT 14 /* IM_CHG_BATT_COLD_EINT */ +#define WM831X_IM_CHG_BATT_COLD_EINT_WIDTH 1 /* IM_CHG_BATT_COLD_EINT */ +#define WM831X_IM_CHG_BATT_FAIL_EINT 0x2000 /* IM_CHG_BATT_FAIL_EINT */ +#define WM831X_IM_CHG_BATT_FAIL_EINT_MASK 0x2000 /* IM_CHG_BATT_FAIL_EINT */ +#define WM831X_IM_CHG_BATT_FAIL_EINT_SHIFT 13 /* IM_CHG_BATT_FAIL_EINT */ +#define WM831X_IM_CHG_BATT_FAIL_EINT_WIDTH 1 /* IM_CHG_BATT_FAIL_EINT */ +#define WM831X_IM_CHG_OV_EINT 0x1000 /* IM_CHG_OV_EINT */ +#define WM831X_IM_CHG_OV_EINT_MASK 0x1000 /* IM_CHG_OV_EINT */ +#define WM831X_IM_CHG_OV_EINT_SHIFT 12 /* IM_CHG_OV_EINT */ +#define WM831X_IM_CHG_OV_EINT_WIDTH 1 /* IM_CHG_OV_EINT */ +#define WM831X_IM_CHG_END_EINT 0x0800 /* IM_CHG_END_EINT */ +#define WM831X_IM_CHG_END_EINT_MASK 0x0800 /* IM_CHG_END_EINT */ +#define WM831X_IM_CHG_END_EINT_SHIFT 11 /* IM_CHG_END_EINT */ +#define WM831X_IM_CHG_END_EINT_WIDTH 1 /* IM_CHG_END_EINT */ +#define WM831X_IM_CHG_TO_EINT 0x0400 /* IM_CHG_TO_EINT */ +#define WM831X_IM_CHG_TO_EINT_MASK 0x0400 /* IM_CHG_TO_EINT */ +#define WM831X_IM_CHG_TO_EINT_SHIFT 10 /* IM_CHG_TO_EINT */ +#define WM831X_IM_CHG_TO_EINT_WIDTH 1 /* IM_CHG_TO_EINT */ +#define WM831X_IM_CHG_MODE_EINT 0x0200 /* IM_CHG_MODE_EINT */ +#define WM831X_IM_CHG_MODE_EINT_MASK 0x0200 /* IM_CHG_MODE_EINT */ +#define WM831X_IM_CHG_MODE_EINT_SHIFT 9 /* IM_CHG_MODE_EINT */ +#define WM831X_IM_CHG_MODE_EINT_WIDTH 1 /* IM_CHG_MODE_EINT */ +#define WM831X_IM_CHG_START_EINT 0x0100 /* IM_CHG_START_EINT */ +#define WM831X_IM_CHG_START_EINT_MASK 0x0100 /* IM_CHG_START_EINT */ +#define WM831X_IM_CHG_START_EINT_SHIFT 8 /* IM_CHG_START_EINT */ +#define WM831X_IM_CHG_START_EINT_WIDTH 1 /* IM_CHG_START_EINT */ +#define WM831X_IM_CS2_EINT 0x0080 /* IM_CS2_EINT */ +#define WM831X_IM_CS2_EINT_MASK 0x0080 /* IM_CS2_EINT */ +#define WM831X_IM_CS2_EINT_SHIFT 7 /* IM_CS2_EINT */ +#define WM831X_IM_CS2_EINT_WIDTH 1 /* IM_CS2_EINT */ +#define WM831X_IM_CS1_EINT 0x0040 /* IM_CS1_EINT */ +#define WM831X_IM_CS1_EINT_MASK 0x0040 /* IM_CS1_EINT */ +#define WM831X_IM_CS1_EINT_SHIFT 6 /* IM_CS1_EINT */ +#define WM831X_IM_CS1_EINT_WIDTH 1 /* IM_CS1_EINT */ +#define WM831X_IM_OTP_CMD_END_EINT 0x0020 /* IM_OTP_CMD_END_EINT */ +#define WM831X_IM_OTP_CMD_END_EINT_MASK 0x0020 /* IM_OTP_CMD_END_EINT */ +#define WM831X_IM_OTP_CMD_END_EINT_SHIFT 5 /* IM_OTP_CMD_END_EINT */ +#define WM831X_IM_OTP_CMD_END_EINT_WIDTH 1 /* IM_OTP_CMD_END_EINT */ +#define WM831X_IM_OTP_ERR_EINT 0x0010 /* IM_OTP_ERR_EINT */ +#define WM831X_IM_OTP_ERR_EINT_MASK 0x0010 /* IM_OTP_ERR_EINT */ +#define WM831X_IM_OTP_ERR_EINT_SHIFT 4 /* IM_OTP_ERR_EINT */ +#define WM831X_IM_OTP_ERR_EINT_WIDTH 1 /* IM_OTP_ERR_EINT */ +#define WM831X_IM_PS_POR_EINT 0x0004 /* IM_PS_POR_EINT */ +#define WM831X_IM_PS_POR_EINT_MASK 0x0004 /* IM_PS_POR_EINT */ +#define WM831X_IM_PS_POR_EINT_SHIFT 2 /* IM_PS_POR_EINT */ +#define WM831X_IM_PS_POR_EINT_WIDTH 1 /* IM_PS_POR_EINT */ +#define WM831X_IM_PS_SLEEP_OFF_EINT 0x0002 /* IM_PS_SLEEP_OFF_EINT */ +#define WM831X_IM_PS_SLEEP_OFF_EINT_MASK 0x0002 /* IM_PS_SLEEP_OFF_EINT */ +#define WM831X_IM_PS_SLEEP_OFF_EINT_SHIFT 1 /* IM_PS_SLEEP_OFF_EINT */ +#define WM831X_IM_PS_SLEEP_OFF_EINT_WIDTH 1 /* IM_PS_SLEEP_OFF_EINT */ +#define WM831X_IM_PS_ON_WAKE_EINT 0x0001 /* IM_PS_ON_WAKE_EINT */ +#define WM831X_IM_PS_ON_WAKE_EINT_MASK 0x0001 /* IM_PS_ON_WAKE_EINT */ +#define WM831X_IM_PS_ON_WAKE_EINT_SHIFT 0 /* IM_PS_ON_WAKE_EINT */ +#define WM831X_IM_PS_ON_WAKE_EINT_WIDTH 1 /* IM_PS_ON_WAKE_EINT */ + +/* + * R16411 (0x401B) - Interrupt Status 3 Mask + */ +#define WM831X_IM_UV_LDO10_EINT 0x0200 /* IM_UV_LDO10_EINT */ +#define WM831X_IM_UV_LDO10_EINT_MASK 0x0200 /* IM_UV_LDO10_EINT */ +#define WM831X_IM_UV_LDO10_EINT_SHIFT 9 /* IM_UV_LDO10_EINT */ +#define WM831X_IM_UV_LDO10_EINT_WIDTH 1 /* IM_UV_LDO10_EINT */ +#define WM831X_IM_UV_LDO9_EINT 0x0100 /* IM_UV_LDO9_EINT */ +#define WM831X_IM_UV_LDO9_EINT_MASK 0x0100 /* IM_UV_LDO9_EINT */ +#define WM831X_IM_UV_LDO9_EINT_SHIFT 8 /* IM_UV_LDO9_EINT */ +#define WM831X_IM_UV_LDO9_EINT_WIDTH 1 /* IM_UV_LDO9_EINT */ +#define WM831X_IM_UV_LDO8_EINT 0x0080 /* IM_UV_LDO8_EINT */ +#define WM831X_IM_UV_LDO8_EINT_MASK 0x0080 /* IM_UV_LDO8_EINT */ +#define WM831X_IM_UV_LDO8_EINT_SHIFT 7 /* IM_UV_LDO8_EINT */ +#define WM831X_IM_UV_LDO8_EINT_WIDTH 1 /* IM_UV_LDO8_EINT */ +#define WM831X_IM_UV_LDO7_EINT 0x0040 /* IM_UV_LDO7_EINT */ +#define WM831X_IM_UV_LDO7_EINT_MASK 0x0040 /* IM_UV_LDO7_EINT */ +#define WM831X_IM_UV_LDO7_EINT_SHIFT 6 /* IM_UV_LDO7_EINT */ +#define WM831X_IM_UV_LDO7_EINT_WIDTH 1 /* IM_UV_LDO7_EINT */ +#define WM831X_IM_UV_LDO6_EINT 0x0020 /* IM_UV_LDO6_EINT */ +#define WM831X_IM_UV_LDO6_EINT_MASK 0x0020 /* IM_UV_LDO6_EINT */ +#define WM831X_IM_UV_LDO6_EINT_SHIFT 5 /* IM_UV_LDO6_EINT */ +#define WM831X_IM_UV_LDO6_EINT_WIDTH 1 /* IM_UV_LDO6_EINT */ +#define WM831X_IM_UV_LDO5_EINT 0x0010 /* IM_UV_LDO5_EINT */ +#define WM831X_IM_UV_LDO5_EINT_MASK 0x0010 /* IM_UV_LDO5_EINT */ +#define WM831X_IM_UV_LDO5_EINT_SHIFT 4 /* IM_UV_LDO5_EINT */ +#define WM831X_IM_UV_LDO5_EINT_WIDTH 1 /* IM_UV_LDO5_EINT */ +#define WM831X_IM_UV_LDO4_EINT 0x0008 /* IM_UV_LDO4_EINT */ +#define WM831X_IM_UV_LDO4_EINT_MASK 0x0008 /* IM_UV_LDO4_EINT */ +#define WM831X_IM_UV_LDO4_EINT_SHIFT 3 /* IM_UV_LDO4_EINT */ +#define WM831X_IM_UV_LDO4_EINT_WIDTH 1 /* IM_UV_LDO4_EINT */ +#define WM831X_IM_UV_LDO3_EINT 0x0004 /* IM_UV_LDO3_EINT */ +#define WM831X_IM_UV_LDO3_EINT_MASK 0x0004 /* IM_UV_LDO3_EINT */ +#define WM831X_IM_UV_LDO3_EINT_SHIFT 2 /* IM_UV_LDO3_EINT */ +#define WM831X_IM_UV_LDO3_EINT_WIDTH 1 /* IM_UV_LDO3_EINT */ +#define WM831X_IM_UV_LDO2_EINT 0x0002 /* IM_UV_LDO2_EINT */ +#define WM831X_IM_UV_LDO2_EINT_MASK 0x0002 /* IM_UV_LDO2_EINT */ +#define WM831X_IM_UV_LDO2_EINT_SHIFT 1 /* IM_UV_LDO2_EINT */ +#define WM831X_IM_UV_LDO2_EINT_WIDTH 1 /* IM_UV_LDO2_EINT */ +#define WM831X_IM_UV_LDO1_EINT 0x0001 /* IM_UV_LDO1_EINT */ +#define WM831X_IM_UV_LDO1_EINT_MASK 0x0001 /* IM_UV_LDO1_EINT */ +#define WM831X_IM_UV_LDO1_EINT_SHIFT 0 /* IM_UV_LDO1_EINT */ +#define WM831X_IM_UV_LDO1_EINT_WIDTH 1 /* IM_UV_LDO1_EINT */ + +/* + * R16412 (0x401C) - Interrupt Status 4 Mask + */ +#define WM831X_IM_HC_DC2_EINT 0x0200 /* IM_HC_DC2_EINT */ +#define WM831X_IM_HC_DC2_EINT_MASK 0x0200 /* IM_HC_DC2_EINT */ +#define WM831X_IM_HC_DC2_EINT_SHIFT 9 /* IM_HC_DC2_EINT */ +#define WM831X_IM_HC_DC2_EINT_WIDTH 1 /* IM_HC_DC2_EINT */ +#define WM831X_IM_HC_DC1_EINT 0x0100 /* IM_HC_DC1_EINT */ +#define WM831X_IM_HC_DC1_EINT_MASK 0x0100 /* IM_HC_DC1_EINT */ +#define WM831X_IM_HC_DC1_EINT_SHIFT 8 /* IM_HC_DC1_EINT */ +#define WM831X_IM_HC_DC1_EINT_WIDTH 1 /* IM_HC_DC1_EINT */ +#define WM831X_IM_UV_DC4_EINT 0x0008 /* IM_UV_DC4_EINT */ +#define WM831X_IM_UV_DC4_EINT_MASK 0x0008 /* IM_UV_DC4_EINT */ +#define WM831X_IM_UV_DC4_EINT_SHIFT 3 /* IM_UV_DC4_EINT */ +#define WM831X_IM_UV_DC4_EINT_WIDTH 1 /* IM_UV_DC4_EINT */ +#define WM831X_IM_UV_DC3_EINT 0x0004 /* IM_UV_DC3_EINT */ +#define WM831X_IM_UV_DC3_EINT_MASK 0x0004 /* IM_UV_DC3_EINT */ +#define WM831X_IM_UV_DC3_EINT_SHIFT 2 /* IM_UV_DC3_EINT */ +#define WM831X_IM_UV_DC3_EINT_WIDTH 1 /* IM_UV_DC3_EINT */ +#define WM831X_IM_UV_DC2_EINT 0x0002 /* IM_UV_DC2_EINT */ +#define WM831X_IM_UV_DC2_EINT_MASK 0x0002 /* IM_UV_DC2_EINT */ +#define WM831X_IM_UV_DC2_EINT_SHIFT 1 /* IM_UV_DC2_EINT */ +#define WM831X_IM_UV_DC2_EINT_WIDTH 1 /* IM_UV_DC2_EINT */ +#define WM831X_IM_UV_DC1_EINT 0x0001 /* IM_UV_DC1_EINT */ +#define WM831X_IM_UV_DC1_EINT_MASK 0x0001 /* IM_UV_DC1_EINT */ +#define WM831X_IM_UV_DC1_EINT_SHIFT 0 /* IM_UV_DC1_EINT */ +#define WM831X_IM_UV_DC1_EINT_WIDTH 1 /* IM_UV_DC1_EINT */ + +/* + * R16413 (0x401D) - Interrupt Status 5 Mask + */ +#define WM831X_IM_GP16_EINT 0x8000 /* IM_GP16_EINT */ +#define WM831X_IM_GP16_EINT_MASK 0x8000 /* IM_GP16_EINT */ +#define WM831X_IM_GP16_EINT_SHIFT 15 /* IM_GP16_EINT */ +#define WM831X_IM_GP16_EINT_WIDTH 1 /* IM_GP16_EINT */ +#define WM831X_IM_GP15_EINT 0x4000 /* IM_GP15_EINT */ +#define WM831X_IM_GP15_EINT_MASK 0x4000 /* IM_GP15_EINT */ +#define WM831X_IM_GP15_EINT_SHIFT 14 /* IM_GP15_EINT */ +#define WM831X_IM_GP15_EINT_WIDTH 1 /* IM_GP15_EINT */ +#define WM831X_IM_GP14_EINT 0x2000 /* IM_GP14_EINT */ +#define WM831X_IM_GP14_EINT_MASK 0x2000 /* IM_GP14_EINT */ +#define WM831X_IM_GP14_EINT_SHIFT 13 /* IM_GP14_EINT */ +#define WM831X_IM_GP14_EINT_WIDTH 1 /* IM_GP14_EINT */ +#define WM831X_IM_GP13_EINT 0x1000 /* IM_GP13_EINT */ +#define WM831X_IM_GP13_EINT_MASK 0x1000 /* IM_GP13_EINT */ +#define WM831X_IM_GP13_EINT_SHIFT 12 /* IM_GP13_EINT */ +#define WM831X_IM_GP13_EINT_WIDTH 1 /* IM_GP13_EINT */ +#define WM831X_IM_GP12_EINT 0x0800 /* IM_GP12_EINT */ +#define WM831X_IM_GP12_EINT_MASK 0x0800 /* IM_GP12_EINT */ +#define WM831X_IM_GP12_EINT_SHIFT 11 /* IM_GP12_EINT */ +#define WM831X_IM_GP12_EINT_WIDTH 1 /* IM_GP12_EINT */ +#define WM831X_IM_GP11_EINT 0x0400 /* IM_GP11_EINT */ +#define WM831X_IM_GP11_EINT_MASK 0x0400 /* IM_GP11_EINT */ +#define WM831X_IM_GP11_EINT_SHIFT 10 /* IM_GP11_EINT */ +#define WM831X_IM_GP11_EINT_WIDTH 1 /* IM_GP11_EINT */ +#define WM831X_IM_GP10_EINT 0x0200 /* IM_GP10_EINT */ +#define WM831X_IM_GP10_EINT_MASK 0x0200 /* IM_GP10_EINT */ +#define WM831X_IM_GP10_EINT_SHIFT 9 /* IM_GP10_EINT */ +#define WM831X_IM_GP10_EINT_WIDTH 1 /* IM_GP10_EINT */ +#define WM831X_IM_GP9_EINT 0x0100 /* IM_GP9_EINT */ +#define WM831X_IM_GP9_EINT_MASK 0x0100 /* IM_GP9_EINT */ +#define WM831X_IM_GP9_EINT_SHIFT 8 /* IM_GP9_EINT */ +#define WM831X_IM_GP9_EINT_WIDTH 1 /* IM_GP9_EINT */ +#define WM831X_IM_GP8_EINT 0x0080 /* IM_GP8_EINT */ +#define WM831X_IM_GP8_EINT_MASK 0x0080 /* IM_GP8_EINT */ +#define WM831X_IM_GP8_EINT_SHIFT 7 /* IM_GP8_EINT */ +#define WM831X_IM_GP8_EINT_WIDTH 1 /* IM_GP8_EINT */ +#define WM831X_IM_GP7_EINT 0x0040 /* IM_GP7_EINT */ +#define WM831X_IM_GP7_EINT_MASK 0x0040 /* IM_GP7_EINT */ +#define WM831X_IM_GP7_EINT_SHIFT 6 /* IM_GP7_EINT */ +#define WM831X_IM_GP7_EINT_WIDTH 1 /* IM_GP7_EINT */ +#define WM831X_IM_GP6_EINT 0x0020 /* IM_GP6_EINT */ +#define WM831X_IM_GP6_EINT_MASK 0x0020 /* IM_GP6_EINT */ +#define WM831X_IM_GP6_EINT_SHIFT 5 /* IM_GP6_EINT */ +#define WM831X_IM_GP6_EINT_WIDTH 1 /* IM_GP6_EINT */ +#define WM831X_IM_GP5_EINT 0x0010 /* IM_GP5_EINT */ +#define WM831X_IM_GP5_EINT_MASK 0x0010 /* IM_GP5_EINT */ +#define WM831X_IM_GP5_EINT_SHIFT 4 /* IM_GP5_EINT */ +#define WM831X_IM_GP5_EINT_WIDTH 1 /* IM_GP5_EINT */ +#define WM831X_IM_GP4_EINT 0x0008 /* IM_GP4_EINT */ +#define WM831X_IM_GP4_EINT_MASK 0x0008 /* IM_GP4_EINT */ +#define WM831X_IM_GP4_EINT_SHIFT 3 /* IM_GP4_EINT */ +#define WM831X_IM_GP4_EINT_WIDTH 1 /* IM_GP4_EINT */ +#define WM831X_IM_GP3_EINT 0x0004 /* IM_GP3_EINT */ +#define WM831X_IM_GP3_EINT_MASK 0x0004 /* IM_GP3_EINT */ +#define WM831X_IM_GP3_EINT_SHIFT 2 /* IM_GP3_EINT */ +#define WM831X_IM_GP3_EINT_WIDTH 1 /* IM_GP3_EINT */ +#define WM831X_IM_GP2_EINT 0x0002 /* IM_GP2_EINT */ +#define WM831X_IM_GP2_EINT_MASK 0x0002 /* IM_GP2_EINT */ +#define WM831X_IM_GP2_EINT_SHIFT 1 /* IM_GP2_EINT */ +#define WM831X_IM_GP2_EINT_WIDTH 1 /* IM_GP2_EINT */ +#define WM831X_IM_GP1_EINT 0x0001 /* IM_GP1_EINT */ +#define WM831X_IM_GP1_EINT_MASK 0x0001 /* IM_GP1_EINT */ +#define WM831X_IM_GP1_EINT_SHIFT 0 /* IM_GP1_EINT */ +#define WM831X_IM_GP1_EINT_WIDTH 1 /* IM_GP1_EINT */ + + +#endif -- cgit v1.2.3-70-g09d2 From 7e9f9fd4b8285c52c0950a1929864346de5caa6d Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Mon, 27 Jul 2009 14:45:54 +0100 Subject: mfd: Add WM831x AUXADC support The WM831x contains an auxiliary ADC with a number of switchable inputs which is used to monitor some of the voltages and temperatures in the system and has some external inputs which can be used for machine specific purposes. Provide an API allowing drivers to read values from the ADC. An internal reference voltage is provided to allow callibration of the ADC. This is used to calibrate the device at startup. The hardware also supports continuous readings and digital comparators. These are not yet supported by the driver. Signed-off-by: Mark Brown Signed-off-by: Samuel Ortiz --- drivers/mfd/wm831x-core.c | 101 ++++++++++++++++++ include/linux/mfd/wm831x/auxadc.h | 216 ++++++++++++++++++++++++++++++++++++++ include/linux/mfd/wm831x/core.h | 2 + 3 files changed, 319 insertions(+) create mode 100644 include/linux/mfd/wm831x/auxadc.h (limited to 'include') diff --git a/drivers/mfd/wm831x-core.c b/drivers/mfd/wm831x-core.c index eb63d22160d..42bef1dd2ca 100644 --- a/drivers/mfd/wm831x-core.c +++ b/drivers/mfd/wm831x-core.c @@ -15,11 +15,14 @@ #include #include #include +#include +#include #include #include #include #include +#include enum wm831x_parent { WM8310 = 0, @@ -244,6 +247,103 @@ out: } EXPORT_SYMBOL_GPL(wm831x_set_bits); +/** + * wm831x_auxadc_read: Read a value from the WM831x AUXADC + * + * @wm831x: Device to read from. + * @input: AUXADC input to read. + */ +int wm831x_auxadc_read(struct wm831x *wm831x, enum wm831x_auxadc input) +{ + int tries = 10; + int ret, src; + + mutex_lock(&wm831x->auxadc_lock); + + ret = wm831x_set_bits(wm831x, WM831X_AUXADC_CONTROL, + WM831X_AUX_ENA, WM831X_AUX_ENA); + if (ret < 0) { + dev_err(wm831x->dev, "Failed to enable AUXADC: %d\n", ret); + goto out; + } + + /* We force a single source at present */ + src = input; + ret = wm831x_reg_write(wm831x, WM831X_AUXADC_SOURCE, + 1 << src); + if (ret < 0) { + dev_err(wm831x->dev, "Failed to set AUXADC source: %d\n", ret); + goto out; + } + + ret = wm831x_set_bits(wm831x, WM831X_AUXADC_CONTROL, + WM831X_AUX_CVT_ENA, WM831X_AUX_CVT_ENA); + if (ret < 0) { + dev_err(wm831x->dev, "Failed to start AUXADC: %d\n", ret); + goto disable; + } + + do { + msleep(1); + + ret = wm831x_reg_read(wm831x, WM831X_AUXADC_CONTROL); + if (ret < 0) + ret = WM831X_AUX_CVT_ENA; + } while ((ret & WM831X_AUX_CVT_ENA) && --tries); + + if (ret & WM831X_AUX_CVT_ENA) { + dev_err(wm831x->dev, "Timed out reading AUXADC\n"); + ret = -EBUSY; + goto disable; + } + + ret = wm831x_reg_read(wm831x, WM831X_AUXADC_DATA); + if (ret < 0) { + dev_err(wm831x->dev, "Failed to read AUXADC data: %d\n", ret); + } else { + src = ((ret & WM831X_AUX_DATA_SRC_MASK) + >> WM831X_AUX_DATA_SRC_SHIFT) - 1; + + if (src == 14) + src = WM831X_AUX_CAL; + + if (src != input) { + dev_err(wm831x->dev, "Data from source %d not %d\n", + src, input); + ret = -EINVAL; + } else { + ret &= WM831X_AUX_DATA_MASK; + } + } + +disable: + wm831x_set_bits(wm831x, WM831X_AUXADC_CONTROL, WM831X_AUX_ENA, 0); +out: + mutex_unlock(&wm831x->auxadc_lock); + return ret; +} +EXPORT_SYMBOL_GPL(wm831x_auxadc_read); + +/** + * wm831x_auxadc_read_uv: Read a voltage from the WM831x AUXADC + * + * @wm831x: Device to read from. + * @input: AUXADC input to read. + */ +int wm831x_auxadc_read_uv(struct wm831x *wm831x, enum wm831x_auxadc input) +{ + int ret; + + ret = wm831x_auxadc_read(wm831x, input); + if (ret < 0) + return ret; + + ret *= 1465; + + return ret; +} +EXPORT_SYMBOL_GPL(wm831x_auxadc_read_uv); + static struct resource wm831x_dcdc1_resources[] = { { .start = WM831X_DC1_CONTROL_1, @@ -1084,6 +1184,7 @@ static int wm831x_device_init(struct wm831x *wm831x, unsigned long id, int irq) mutex_init(&wm831x->io_lock); mutex_init(&wm831x->key_lock); + mutex_init(&wm831x->auxadc_lock); dev_set_drvdata(wm831x->dev, wm831x); ret = wm831x_reg_read(wm831x, WM831X_PARENT_ID); diff --git a/include/linux/mfd/wm831x/auxadc.h b/include/linux/mfd/wm831x/auxadc.h new file mode 100644 index 00000000000..b132067e9e9 --- /dev/null +++ b/include/linux/mfd/wm831x/auxadc.h @@ -0,0 +1,216 @@ +/* + * include/linux/mfd/wm831x/auxadc.h -- Auxiliary ADC interface for WM831x + * + * Copyright 2009 Wolfson Microelectronics PLC. + * + * Author: Mark Brown + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + */ + +#ifndef __MFD_WM831X_AUXADC_H__ +#define __MFD_WM831X_AUXADC_H__ + +/* + * R16429 (0x402D) - AuxADC Data + */ +#define WM831X_AUX_DATA_SRC_MASK 0xF000 /* AUX_DATA_SRC - [15:12] */ +#define WM831X_AUX_DATA_SRC_SHIFT 12 /* AUX_DATA_SRC - [15:12] */ +#define WM831X_AUX_DATA_SRC_WIDTH 4 /* AUX_DATA_SRC - [15:12] */ +#define WM831X_AUX_DATA_MASK 0x0FFF /* AUX_DATA - [11:0] */ +#define WM831X_AUX_DATA_SHIFT 0 /* AUX_DATA - [11:0] */ +#define WM831X_AUX_DATA_WIDTH 12 /* AUX_DATA - [11:0] */ + +/* + * R16430 (0x402E) - AuxADC Control + */ +#define WM831X_AUX_ENA 0x8000 /* AUX_ENA */ +#define WM831X_AUX_ENA_MASK 0x8000 /* AUX_ENA */ +#define WM831X_AUX_ENA_SHIFT 15 /* AUX_ENA */ +#define WM831X_AUX_ENA_WIDTH 1 /* AUX_ENA */ +#define WM831X_AUX_CVT_ENA 0x4000 /* AUX_CVT_ENA */ +#define WM831X_AUX_CVT_ENA_MASK 0x4000 /* AUX_CVT_ENA */ +#define WM831X_AUX_CVT_ENA_SHIFT 14 /* AUX_CVT_ENA */ +#define WM831X_AUX_CVT_ENA_WIDTH 1 /* AUX_CVT_ENA */ +#define WM831X_AUX_SLPENA 0x1000 /* AUX_SLPENA */ +#define WM831X_AUX_SLPENA_MASK 0x1000 /* AUX_SLPENA */ +#define WM831X_AUX_SLPENA_SHIFT 12 /* AUX_SLPENA */ +#define WM831X_AUX_SLPENA_WIDTH 1 /* AUX_SLPENA */ +#define WM831X_AUX_FRC_ENA 0x0800 /* AUX_FRC_ENA */ +#define WM831X_AUX_FRC_ENA_MASK 0x0800 /* AUX_FRC_ENA */ +#define WM831X_AUX_FRC_ENA_SHIFT 11 /* AUX_FRC_ENA */ +#define WM831X_AUX_FRC_ENA_WIDTH 1 /* AUX_FRC_ENA */ +#define WM831X_AUX_RATE_MASK 0x003F /* AUX_RATE - [5:0] */ +#define WM831X_AUX_RATE_SHIFT 0 /* AUX_RATE - [5:0] */ +#define WM831X_AUX_RATE_WIDTH 6 /* AUX_RATE - [5:0] */ + +/* + * R16431 (0x402F) - AuxADC Source + */ +#define WM831X_AUX_CAL_SEL 0x8000 /* AUX_CAL_SEL */ +#define WM831X_AUX_CAL_SEL_MASK 0x8000 /* AUX_CAL_SEL */ +#define WM831X_AUX_CAL_SEL_SHIFT 15 /* AUX_CAL_SEL */ +#define WM831X_AUX_CAL_SEL_WIDTH 1 /* AUX_CAL_SEL */ +#define WM831X_AUX_BKUP_BATT_SEL 0x0400 /* AUX_BKUP_BATT_SEL */ +#define WM831X_AUX_BKUP_BATT_SEL_MASK 0x0400 /* AUX_BKUP_BATT_SEL */ +#define WM831X_AUX_BKUP_BATT_SEL_SHIFT 10 /* AUX_BKUP_BATT_SEL */ +#define WM831X_AUX_BKUP_BATT_SEL_WIDTH 1 /* AUX_BKUP_BATT_SEL */ +#define WM831X_AUX_WALL_SEL 0x0200 /* AUX_WALL_SEL */ +#define WM831X_AUX_WALL_SEL_MASK 0x0200 /* AUX_WALL_SEL */ +#define WM831X_AUX_WALL_SEL_SHIFT 9 /* AUX_WALL_SEL */ +#define WM831X_AUX_WALL_SEL_WIDTH 1 /* AUX_WALL_SEL */ +#define WM831X_AUX_BATT_SEL 0x0100 /* AUX_BATT_SEL */ +#define WM831X_AUX_BATT_SEL_MASK 0x0100 /* AUX_BATT_SEL */ +#define WM831X_AUX_BATT_SEL_SHIFT 8 /* AUX_BATT_SEL */ +#define WM831X_AUX_BATT_SEL_WIDTH 1 /* AUX_BATT_SEL */ +#define WM831X_AUX_USB_SEL 0x0080 /* AUX_USB_SEL */ +#define WM831X_AUX_USB_SEL_MASK 0x0080 /* AUX_USB_SEL */ +#define WM831X_AUX_USB_SEL_SHIFT 7 /* AUX_USB_SEL */ +#define WM831X_AUX_USB_SEL_WIDTH 1 /* AUX_USB_SEL */ +#define WM831X_AUX_SYSVDD_SEL 0x0040 /* AUX_SYSVDD_SEL */ +#define WM831X_AUX_SYSVDD_SEL_MASK 0x0040 /* AUX_SYSVDD_SEL */ +#define WM831X_AUX_SYSVDD_SEL_SHIFT 6 /* AUX_SYSVDD_SEL */ +#define WM831X_AUX_SYSVDD_SEL_WIDTH 1 /* AUX_SYSVDD_SEL */ +#define WM831X_AUX_BATT_TEMP_SEL 0x0020 /* AUX_BATT_TEMP_SEL */ +#define WM831X_AUX_BATT_TEMP_SEL_MASK 0x0020 /* AUX_BATT_TEMP_SEL */ +#define WM831X_AUX_BATT_TEMP_SEL_SHIFT 5 /* AUX_BATT_TEMP_SEL */ +#define WM831X_AUX_BATT_TEMP_SEL_WIDTH 1 /* AUX_BATT_TEMP_SEL */ +#define WM831X_AUX_CHIP_TEMP_SEL 0x0010 /* AUX_CHIP_TEMP_SEL */ +#define WM831X_AUX_CHIP_TEMP_SEL_MASK 0x0010 /* AUX_CHIP_TEMP_SEL */ +#define WM831X_AUX_CHIP_TEMP_SEL_SHIFT 4 /* AUX_CHIP_TEMP_SEL */ +#define WM831X_AUX_CHIP_TEMP_SEL_WIDTH 1 /* AUX_CHIP_TEMP_SEL */ +#define WM831X_AUX_AUX4_SEL 0x0008 /* AUX_AUX4_SEL */ +#define WM831X_AUX_AUX4_SEL_MASK 0x0008 /* AUX_AUX4_SEL */ +#define WM831X_AUX_AUX4_SEL_SHIFT 3 /* AUX_AUX4_SEL */ +#define WM831X_AUX_AUX4_SEL_WIDTH 1 /* AUX_AUX4_SEL */ +#define WM831X_AUX_AUX3_SEL 0x0004 /* AUX_AUX3_SEL */ +#define WM831X_AUX_AUX3_SEL_MASK 0x0004 /* AUX_AUX3_SEL */ +#define WM831X_AUX_AUX3_SEL_SHIFT 2 /* AUX_AUX3_SEL */ +#define WM831X_AUX_AUX3_SEL_WIDTH 1 /* AUX_AUX3_SEL */ +#define WM831X_AUX_AUX2_SEL 0x0002 /* AUX_AUX2_SEL */ +#define WM831X_AUX_AUX2_SEL_MASK 0x0002 /* AUX_AUX2_SEL */ +#define WM831X_AUX_AUX2_SEL_SHIFT 1 /* AUX_AUX2_SEL */ +#define WM831X_AUX_AUX2_SEL_WIDTH 1 /* AUX_AUX2_SEL */ +#define WM831X_AUX_AUX1_SEL 0x0001 /* AUX_AUX1_SEL */ +#define WM831X_AUX_AUX1_SEL_MASK 0x0001 /* AUX_AUX1_SEL */ +#define WM831X_AUX_AUX1_SEL_SHIFT 0 /* AUX_AUX1_SEL */ +#define WM831X_AUX_AUX1_SEL_WIDTH 1 /* AUX_AUX1_SEL */ + +/* + * R16432 (0x4030) - Comparator Control + */ +#define WM831X_DCOMP4_STS 0x0800 /* DCOMP4_STS */ +#define WM831X_DCOMP4_STS_MASK 0x0800 /* DCOMP4_STS */ +#define WM831X_DCOMP4_STS_SHIFT 11 /* DCOMP4_STS */ +#define WM831X_DCOMP4_STS_WIDTH 1 /* DCOMP4_STS */ +#define WM831X_DCOMP3_STS 0x0400 /* DCOMP3_STS */ +#define WM831X_DCOMP3_STS_MASK 0x0400 /* DCOMP3_STS */ +#define WM831X_DCOMP3_STS_SHIFT 10 /* DCOMP3_STS */ +#define WM831X_DCOMP3_STS_WIDTH 1 /* DCOMP3_STS */ +#define WM831X_DCOMP2_STS 0x0200 /* DCOMP2_STS */ +#define WM831X_DCOMP2_STS_MASK 0x0200 /* DCOMP2_STS */ +#define WM831X_DCOMP2_STS_SHIFT 9 /* DCOMP2_STS */ +#define WM831X_DCOMP2_STS_WIDTH 1 /* DCOMP2_STS */ +#define WM831X_DCOMP1_STS 0x0100 /* DCOMP1_STS */ +#define WM831X_DCOMP1_STS_MASK 0x0100 /* DCOMP1_STS */ +#define WM831X_DCOMP1_STS_SHIFT 8 /* DCOMP1_STS */ +#define WM831X_DCOMP1_STS_WIDTH 1 /* DCOMP1_STS */ +#define WM831X_DCMP4_ENA 0x0008 /* DCMP4_ENA */ +#define WM831X_DCMP4_ENA_MASK 0x0008 /* DCMP4_ENA */ +#define WM831X_DCMP4_ENA_SHIFT 3 /* DCMP4_ENA */ +#define WM831X_DCMP4_ENA_WIDTH 1 /* DCMP4_ENA */ +#define WM831X_DCMP3_ENA 0x0004 /* DCMP3_ENA */ +#define WM831X_DCMP3_ENA_MASK 0x0004 /* DCMP3_ENA */ +#define WM831X_DCMP3_ENA_SHIFT 2 /* DCMP3_ENA */ +#define WM831X_DCMP3_ENA_WIDTH 1 /* DCMP3_ENA */ +#define WM831X_DCMP2_ENA 0x0002 /* DCMP2_ENA */ +#define WM831X_DCMP2_ENA_MASK 0x0002 /* DCMP2_ENA */ +#define WM831X_DCMP2_ENA_SHIFT 1 /* DCMP2_ENA */ +#define WM831X_DCMP2_ENA_WIDTH 1 /* DCMP2_ENA */ +#define WM831X_DCMP1_ENA 0x0001 /* DCMP1_ENA */ +#define WM831X_DCMP1_ENA_MASK 0x0001 /* DCMP1_ENA */ +#define WM831X_DCMP1_ENA_SHIFT 0 /* DCMP1_ENA */ +#define WM831X_DCMP1_ENA_WIDTH 1 /* DCMP1_ENA */ + +/* + * R16433 (0x4031) - Comparator 1 + */ +#define WM831X_DCMP1_SRC_MASK 0xE000 /* DCMP1_SRC - [15:13] */ +#define WM831X_DCMP1_SRC_SHIFT 13 /* DCMP1_SRC - [15:13] */ +#define WM831X_DCMP1_SRC_WIDTH 3 /* DCMP1_SRC - [15:13] */ +#define WM831X_DCMP1_GT 0x1000 /* DCMP1_GT */ +#define WM831X_DCMP1_GT_MASK 0x1000 /* DCMP1_GT */ +#define WM831X_DCMP1_GT_SHIFT 12 /* DCMP1_GT */ +#define WM831X_DCMP1_GT_WIDTH 1 /* DCMP1_GT */ +#define WM831X_DCMP1_THR_MASK 0x0FFF /* DCMP1_THR - [11:0] */ +#define WM831X_DCMP1_THR_SHIFT 0 /* DCMP1_THR - [11:0] */ +#define WM831X_DCMP1_THR_WIDTH 12 /* DCMP1_THR - [11:0] */ + +/* + * R16434 (0x4032) - Comparator 2 + */ +#define WM831X_DCMP2_SRC_MASK 0xE000 /* DCMP2_SRC - [15:13] */ +#define WM831X_DCMP2_SRC_SHIFT 13 /* DCMP2_SRC - [15:13] */ +#define WM831X_DCMP2_SRC_WIDTH 3 /* DCMP2_SRC - [15:13] */ +#define WM831X_DCMP2_GT 0x1000 /* DCMP2_GT */ +#define WM831X_DCMP2_GT_MASK 0x1000 /* DCMP2_GT */ +#define WM831X_DCMP2_GT_SHIFT 12 /* DCMP2_GT */ +#define WM831X_DCMP2_GT_WIDTH 1 /* DCMP2_GT */ +#define WM831X_DCMP2_THR_MASK 0x0FFF /* DCMP2_THR - [11:0] */ +#define WM831X_DCMP2_THR_SHIFT 0 /* DCMP2_THR - [11:0] */ +#define WM831X_DCMP2_THR_WIDTH 12 /* DCMP2_THR - [11:0] */ + +/* + * R16435 (0x4033) - Comparator 3 + */ +#define WM831X_DCMP3_SRC_MASK 0xE000 /* DCMP3_SRC - [15:13] */ +#define WM831X_DCMP3_SRC_SHIFT 13 /* DCMP3_SRC - [15:13] */ +#define WM831X_DCMP3_SRC_WIDTH 3 /* DCMP3_SRC - [15:13] */ +#define WM831X_DCMP3_GT 0x1000 /* DCMP3_GT */ +#define WM831X_DCMP3_GT_MASK 0x1000 /* DCMP3_GT */ +#define WM831X_DCMP3_GT_SHIFT 12 /* DCMP3_GT */ +#define WM831X_DCMP3_GT_WIDTH 1 /* DCMP3_GT */ +#define WM831X_DCMP3_THR_MASK 0x0FFF /* DCMP3_THR - [11:0] */ +#define WM831X_DCMP3_THR_SHIFT 0 /* DCMP3_THR - [11:0] */ +#define WM831X_DCMP3_THR_WIDTH 12 /* DCMP3_THR - [11:0] */ + +/* + * R16436 (0x4034) - Comparator 4 + */ +#define WM831X_DCMP4_SRC_MASK 0xE000 /* DCMP4_SRC - [15:13] */ +#define WM831X_DCMP4_SRC_SHIFT 13 /* DCMP4_SRC - [15:13] */ +#define WM831X_DCMP4_SRC_WIDTH 3 /* DCMP4_SRC - [15:13] */ +#define WM831X_DCMP4_GT 0x1000 /* DCMP4_GT */ +#define WM831X_DCMP4_GT_MASK 0x1000 /* DCMP4_GT */ +#define WM831X_DCMP4_GT_SHIFT 12 /* DCMP4_GT */ +#define WM831X_DCMP4_GT_WIDTH 1 /* DCMP4_GT */ +#define WM831X_DCMP4_THR_MASK 0x0FFF /* DCMP4_THR - [11:0] */ +#define WM831X_DCMP4_THR_SHIFT 0 /* DCMP4_THR - [11:0] */ +#define WM831X_DCMP4_THR_WIDTH 12 /* DCMP4_THR - [11:0] */ + +#define WM831X_AUX_CAL_FACTOR 0xfff +#define WM831X_AUX_CAL_NOMINAL 0x222 + +enum wm831x_auxadc { + WM831X_AUX_CAL = 15, + WM831X_AUX_BKUP_BATT = 10, + WM831X_AUX_WALL = 9, + WM831X_AUX_BATT = 8, + WM831X_AUX_USB = 7, + WM831X_AUX_SYSVDD = 6, + WM831X_AUX_BATT_TEMP = 5, + WM831X_AUX_CHIP_TEMP = 4, + WM831X_AUX_AUX4 = 3, + WM831X_AUX_AUX3 = 2, + WM831X_AUX_AUX2 = 1, + WM831X_AUX_AUX1 = 0, +}; + +int wm831x_auxadc_read(struct wm831x *wm831x, enum wm831x_auxadc input); +int wm831x_auxadc_read_uv(struct wm831x *wm831x, enum wm831x_auxadc input); + +#endif diff --git a/include/linux/mfd/wm831x/core.h b/include/linux/mfd/wm831x/core.h index b96c9355b16..d7134dfba56 100644 --- a/include/linux/mfd/wm831x/core.h +++ b/include/linux/mfd/wm831x/core.h @@ -234,6 +234,8 @@ struct wm831x { unsigned int irq_base; int irq_masks[5]; + struct mutex auxadc_lock; + /* The WM831x has a security key blocking access to certain * registers. The mutex is taken by the accessors for locking * and unlocking the security key, locked is used to fail -- cgit v1.2.3-70-g09d2 From 63aed85e3535b4603798184cc941e49de386d354 Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Mon, 27 Jul 2009 14:45:55 +0100 Subject: mfd: Conditionally add WM831x backlight subdevice The WM831x backlight driver requires at least the specification of the current sink to use and a maximum current to allow them to function and will actively interfere with other users of the regulators it uses if misconfigured so only register the subdevice for it if this platform data has been supplied. Signed-off-by: Mark Brown Signed-off-by: Samuel Ortiz --- drivers/mfd/wm831x-core.c | 15 +++++++++++++++ include/linux/mfd/wm831x/pdata.h | 6 ++++++ 2 files changed, 21 insertions(+) (limited to 'include') diff --git a/drivers/mfd/wm831x-core.c b/drivers/mfd/wm831x-core.c index 42bef1dd2ca..bc40ea315cc 100644 --- a/drivers/mfd/wm831x-core.c +++ b/drivers/mfd/wm831x-core.c @@ -1172,6 +1172,12 @@ static struct mfd_cell wm8312_devs[] = { }, }; +static struct mfd_cell backlight_devs[] = { + { + .name = "wm831x-backlight", + }, +}; + /* * Instantiate the generic non-control parts of the device. */ @@ -1325,6 +1331,15 @@ static int wm831x_device_init(struct wm831x *wm831x, unsigned long id, int irq) goto err_irq; } + if (pdata && pdata->backlight) { + /* Treat errors as non-critical */ + ret = mfd_add_devices(wm831x->dev, -1, backlight_devs, + ARRAY_SIZE(backlight_devs), NULL, 0); + if (ret < 0) + dev_err(wm831x->dev, "Failed to add backlight: %d\n", + ret); + } + if (pdata && pdata->post_init) { ret = pdata->post_init(wm831x); if (ret != 0) { diff --git a/include/linux/mfd/wm831x/pdata.h b/include/linux/mfd/wm831x/pdata.h index 571e6013626..90d820260aa 100644 --- a/include/linux/mfd/wm831x/pdata.h +++ b/include/linux/mfd/wm831x/pdata.h @@ -18,6 +18,11 @@ struct wm831x; struct regulator_init_data; +struct wm831x_backlight_pdata { + int isink; /** ISINK to use, 1 or 2 */ + int max_uA; /** Maximum current to allow */ +}; + struct wm831x_backup_pdata { int charger_enable; int no_constant_voltage; /** Disable constant voltage charging */ @@ -87,6 +92,7 @@ struct wm831x_pdata { int (*post_init)(struct wm831x *wm831x); int gpio_base; + struct wm831x_backlight_pdata *backlight; struct wm831x_backup_pdata *backup; struct wm831x_battery_pdata *battery; struct wm831x_touch_pdata *touch; -- cgit v1.2.3-70-g09d2 From 6704e5171ba9053ba173bcd807c7392d2076bdb4 Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Mon, 27 Jul 2009 14:45:56 +0100 Subject: mfd: Add basic WM831x OTP support The WM831x series of devices use OTP (One Time Programmable, a type of PROM) to store system configuration. At run time this data is visible via registers. Currently the only explicitly supported feature is that the unique ID provided by every WM831x device is exported to user space via sysfs. Other configuration data may be read by system-specific code in the pre_init() and post_init() platform data operations. Signed-off-by: Mark Brown Signed-off-by: Samuel Ortiz --- drivers/mfd/wm831x-core.c | 4 + drivers/mfd/wm831x-otp.c | 83 +++++++++++++++++++++ include/linux/mfd/wm831x/otp.h | 162 +++++++++++++++++++++++++++++++++++++++++ 3 files changed, 249 insertions(+) create mode 100644 drivers/mfd/wm831x-otp.c create mode 100644 include/linux/mfd/wm831x/otp.h (limited to 'include') diff --git a/drivers/mfd/wm831x-core.c b/drivers/mfd/wm831x-core.c index bc40ea315cc..33eaea2f988 100644 --- a/drivers/mfd/wm831x-core.c +++ b/drivers/mfd/wm831x-core.c @@ -23,6 +23,7 @@ #include #include #include +#include enum wm831x_parent { WM8310 = 0, @@ -1340,6 +1341,8 @@ static int wm831x_device_init(struct wm831x *wm831x, unsigned long id, int irq) ret); } + wm831x_otp_init(wm831x); + if (pdata && pdata->post_init) { ret = pdata->post_init(wm831x); if (ret != 0) { @@ -1360,6 +1363,7 @@ err: static void wm831x_device_exit(struct wm831x *wm831x) { + wm831x_otp_exit(wm831x); mfd_remove_devices(wm831x->dev); wm831x_irq_exit(wm831x); kfree(wm831x); diff --git a/drivers/mfd/wm831x-otp.c b/drivers/mfd/wm831x-otp.c new file mode 100644 index 00000000000..f742745ff35 --- /dev/null +++ b/drivers/mfd/wm831x-otp.c @@ -0,0 +1,83 @@ +/* + * wm831x-otp.c -- OTP for Wolfson WM831x PMICs + * + * Copyright 2009 Wolfson Microelectronics PLC. + * + * Author: Mark Brown + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + */ + +#include +#include +#include +#include +#include +#include + +#include +#include + +/* In bytes */ +#define WM831X_UNIQUE_ID_LEN 16 + +/* Read the unique ID from the chip into id */ +static int wm831x_unique_id_read(struct wm831x *wm831x, char *id) +{ + int i, val; + + for (i = 0; i < WM831X_UNIQUE_ID_LEN / 2; i++) { + val = wm831x_reg_read(wm831x, WM831X_UNIQUE_ID_1 + i); + if (val < 0) + return val; + + id[i * 2] = (val >> 8) & 0xff; + id[(i * 2) + 1] = val & 0xff; + } + + return 0; +} + +static ssize_t wm831x_unique_id_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct wm831x *wm831x = dev_get_drvdata(dev); + int i, rval; + char id[WM831X_UNIQUE_ID_LEN]; + ssize_t ret = 0; + + rval = wm831x_unique_id_read(wm831x, id); + if (rval < 0) + return 0; + + for (i = 0; i < WM831X_UNIQUE_ID_LEN; i++) + ret += sprintf(&buf[ret], "%02x", buf[i]); + + ret += sprintf(&buf[ret], "\n"); + + return ret; +} + +static DEVICE_ATTR(unique_id, 0444, wm831x_unique_id_show, NULL); + +int wm831x_otp_init(struct wm831x *wm831x) +{ + int ret; + + ret = device_create_file(wm831x->dev, &dev_attr_unique_id); + if (ret != 0) + dev_err(wm831x->dev, "Unique ID attribute not created: %d\n", + ret); + + return ret; +} + +void wm831x_otp_exit(struct wm831x *wm831x) +{ + device_remove_file(wm831x->dev, &dev_attr_unique_id); +} + diff --git a/include/linux/mfd/wm831x/otp.h b/include/linux/mfd/wm831x/otp.h new file mode 100644 index 00000000000..ce1f81a39bf --- /dev/null +++ b/include/linux/mfd/wm831x/otp.h @@ -0,0 +1,162 @@ +/* + * include/linux/mfd/wm831x/otp.h -- OTP interface for WM831x + * + * Copyright 2009 Wolfson Microelectronics PLC. + * + * Author: Mark Brown + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + */ + +#ifndef __MFD_WM831X_OTP_H__ +#define __MFD_WM831X_OTP_H__ + +int wm831x_otp_init(struct wm831x *wm831x); +void wm831x_otp_exit(struct wm831x *wm831x); + +/* + * R30720 (0x7800) - Unique ID 1 + */ +#define WM831X_UNIQUE_ID_MASK 0xFFFF /* UNIQUE_ID - [15:0] */ +#define WM831X_UNIQUE_ID_SHIFT 0 /* UNIQUE_ID - [15:0] */ +#define WM831X_UNIQUE_ID_WIDTH 16 /* UNIQUE_ID - [15:0] */ + +/* + * R30721 (0x7801) - Unique ID 2 + */ +#define WM831X_UNIQUE_ID_MASK 0xFFFF /* UNIQUE_ID - [15:0] */ +#define WM831X_UNIQUE_ID_SHIFT 0 /* UNIQUE_ID - [15:0] */ +#define WM831X_UNIQUE_ID_WIDTH 16 /* UNIQUE_ID - [15:0] */ + +/* + * R30722 (0x7802) - Unique ID 3 + */ +#define WM831X_UNIQUE_ID_MASK 0xFFFF /* UNIQUE_ID - [15:0] */ +#define WM831X_UNIQUE_ID_SHIFT 0 /* UNIQUE_ID - [15:0] */ +#define WM831X_UNIQUE_ID_WIDTH 16 /* UNIQUE_ID - [15:0] */ + +/* + * R30723 (0x7803) - Unique ID 4 + */ +#define WM831X_UNIQUE_ID_MASK 0xFFFF /* UNIQUE_ID - [15:0] */ +#define WM831X_UNIQUE_ID_SHIFT 0 /* UNIQUE_ID - [15:0] */ +#define WM831X_UNIQUE_ID_WIDTH 16 /* UNIQUE_ID - [15:0] */ + +/* + * R30724 (0x7804) - Unique ID 5 + */ +#define WM831X_UNIQUE_ID_MASK 0xFFFF /* UNIQUE_ID - [15:0] */ +#define WM831X_UNIQUE_ID_SHIFT 0 /* UNIQUE_ID - [15:0] */ +#define WM831X_UNIQUE_ID_WIDTH 16 /* UNIQUE_ID - [15:0] */ + +/* + * R30725 (0x7805) - Unique ID 6 + */ +#define WM831X_UNIQUE_ID_MASK 0xFFFF /* UNIQUE_ID - [15:0] */ +#define WM831X_UNIQUE_ID_SHIFT 0 /* UNIQUE_ID - [15:0] */ +#define WM831X_UNIQUE_ID_WIDTH 16 /* UNIQUE_ID - [15:0] */ + +/* + * R30726 (0x7806) - Unique ID 7 + */ +#define WM831X_UNIQUE_ID_MASK 0xFFFF /* UNIQUE_ID - [15:0] */ +#define WM831X_UNIQUE_ID_SHIFT 0 /* UNIQUE_ID - [15:0] */ +#define WM831X_UNIQUE_ID_WIDTH 16 /* UNIQUE_ID - [15:0] */ + +/* + * R30727 (0x7807) - Unique ID 8 + */ +#define WM831X_UNIQUE_ID_MASK 0xFFFF /* UNIQUE_ID - [15:0] */ +#define WM831X_UNIQUE_ID_SHIFT 0 /* UNIQUE_ID - [15:0] */ +#define WM831X_UNIQUE_ID_WIDTH 16 /* UNIQUE_ID - [15:0] */ + +/* + * R30728 (0x7808) - Factory OTP ID + */ +#define WM831X_OTP_FACT_ID_MASK 0xFFFE /* OTP_FACT_ID - [15:1] */ +#define WM831X_OTP_FACT_ID_SHIFT 1 /* OTP_FACT_ID - [15:1] */ +#define WM831X_OTP_FACT_ID_WIDTH 15 /* OTP_FACT_ID - [15:1] */ +#define WM831X_OTP_FACT_FINAL 0x0001 /* OTP_FACT_FINAL */ +#define WM831X_OTP_FACT_FINAL_MASK 0x0001 /* OTP_FACT_FINAL */ +#define WM831X_OTP_FACT_FINAL_SHIFT 0 /* OTP_FACT_FINAL */ +#define WM831X_OTP_FACT_FINAL_WIDTH 1 /* OTP_FACT_FINAL */ + +/* + * R30729 (0x7809) - Factory OTP 1 + */ +#define WM831X_DC3_TRIM_MASK 0xF000 /* DC3_TRIM - [15:12] */ +#define WM831X_DC3_TRIM_SHIFT 12 /* DC3_TRIM - [15:12] */ +#define WM831X_DC3_TRIM_WIDTH 4 /* DC3_TRIM - [15:12] */ +#define WM831X_DC2_TRIM_MASK 0x0FC0 /* DC2_TRIM - [11:6] */ +#define WM831X_DC2_TRIM_SHIFT 6 /* DC2_TRIM - [11:6] */ +#define WM831X_DC2_TRIM_WIDTH 6 /* DC2_TRIM - [11:6] */ +#define WM831X_DC1_TRIM_MASK 0x003F /* DC1_TRIM - [5:0] */ +#define WM831X_DC1_TRIM_SHIFT 0 /* DC1_TRIM - [5:0] */ +#define WM831X_DC1_TRIM_WIDTH 6 /* DC1_TRIM - [5:0] */ + +/* + * R30730 (0x780A) - Factory OTP 2 + */ +#define WM831X_CHIP_ID_MASK 0xFFFF /* CHIP_ID - [15:0] */ +#define WM831X_CHIP_ID_SHIFT 0 /* CHIP_ID - [15:0] */ +#define WM831X_CHIP_ID_WIDTH 16 /* CHIP_ID - [15:0] */ + +/* + * R30731 (0x780B) - Factory OTP 3 + */ +#define WM831X_OSC_TRIM_MASK 0x0780 /* OSC_TRIM - [10:7] */ +#define WM831X_OSC_TRIM_SHIFT 7 /* OSC_TRIM - [10:7] */ +#define WM831X_OSC_TRIM_WIDTH 4 /* OSC_TRIM - [10:7] */ +#define WM831X_BG_TRIM_MASK 0x0078 /* BG_TRIM - [6:3] */ +#define WM831X_BG_TRIM_SHIFT 3 /* BG_TRIM - [6:3] */ +#define WM831X_BG_TRIM_WIDTH 4 /* BG_TRIM - [6:3] */ +#define WM831X_LPBG_TRIM_MASK 0x0007 /* LPBG_TRIM - [2:0] */ +#define WM831X_LPBG_TRIM_SHIFT 0 /* LPBG_TRIM - [2:0] */ +#define WM831X_LPBG_TRIM_WIDTH 3 /* LPBG_TRIM - [2:0] */ + +/* + * R30732 (0x780C) - Factory OTP 4 + */ +#define WM831X_CHILD_I2C_ADDR_MASK 0x00FE /* CHILD_I2C_ADDR - [7:1] */ +#define WM831X_CHILD_I2C_ADDR_SHIFT 1 /* CHILD_I2C_ADDR - [7:1] */ +#define WM831X_CHILD_I2C_ADDR_WIDTH 7 /* CHILD_I2C_ADDR - [7:1] */ +#define WM831X_CH_AW 0x0001 /* CH_AW */ +#define WM831X_CH_AW_MASK 0x0001 /* CH_AW */ +#define WM831X_CH_AW_SHIFT 0 /* CH_AW */ +#define WM831X_CH_AW_WIDTH 1 /* CH_AW */ + +/* + * R30733 (0x780D) - Factory OTP 5 + */ +#define WM831X_CHARGE_TRIM_MASK 0x003F /* CHARGE_TRIM - [5:0] */ +#define WM831X_CHARGE_TRIM_SHIFT 0 /* CHARGE_TRIM - [5:0] */ +#define WM831X_CHARGE_TRIM_WIDTH 6 /* CHARGE_TRIM - [5:0] */ + +/* + * R30736 (0x7810) - Customer OTP ID + */ +#define WM831X_OTP_AUTO_PROG 0x8000 /* OTP_AUTO_PROG */ +#define WM831X_OTP_AUTO_PROG_MASK 0x8000 /* OTP_AUTO_PROG */ +#define WM831X_OTP_AUTO_PROG_SHIFT 15 /* OTP_AUTO_PROG */ +#define WM831X_OTP_AUTO_PROG_WIDTH 1 /* OTP_AUTO_PROG */ +#define WM831X_OTP_CUST_ID_MASK 0x7FFE /* OTP_CUST_ID - [14:1] */ +#define WM831X_OTP_CUST_ID_SHIFT 1 /* OTP_CUST_ID - [14:1] */ +#define WM831X_OTP_CUST_ID_WIDTH 14 /* OTP_CUST_ID - [14:1] */ +#define WM831X_OTP_CUST_FINAL 0x0001 /* OTP_CUST_FINAL */ +#define WM831X_OTP_CUST_FINAL_MASK 0x0001 /* OTP_CUST_FINAL */ +#define WM831X_OTP_CUST_FINAL_SHIFT 0 /* OTP_CUST_FINAL */ +#define WM831X_OTP_CUST_FINAL_WIDTH 1 /* OTP_CUST_FINAL */ + +/* + * R30759 (0x7827) - DBE CHECK DATA + */ +#define WM831X_DBE_VALID_DATA_MASK 0xFFFF /* DBE_VALID_DATA - [15:0] */ +#define WM831X_DBE_VALID_DATA_SHIFT 0 /* DBE_VALID_DATA - [15:0] */ +#define WM831X_DBE_VALID_DATA_WIDTH 16 /* DBE_VALID_DATA - [15:0] */ + + +#endif -- cgit v1.2.3-70-g09d2 From 698659d5f78606c698781574773f433c60176e40 Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Mon, 27 Jul 2009 14:45:57 +0100 Subject: mfd: Export ISEL values from WM831x core The current settings which can be used with the WM831x current sinks can't easily be mapped between register values and currents at run time without a lookup table since the values scale logarithmically to match the way the human eye interprets brightness. This lookup table is inclided in the core since several drivers need to use it. Signed-off-by: Mark Brown Signed-off-by: Samuel Ortiz --- drivers/mfd/wm831x-core.c | 64 ++++++++++++++++++++++++++++++++++++ include/linux/mfd/wm831x/regulator.h | 21 ++++++++++++ 2 files changed, 85 insertions(+) create mode 100644 include/linux/mfd/wm831x/regulator.h (limited to 'include') diff --git a/drivers/mfd/wm831x-core.c b/drivers/mfd/wm831x-core.c index 33eaea2f988..49b7885c270 100644 --- a/drivers/mfd/wm831x-core.c +++ b/drivers/mfd/wm831x-core.c @@ -24,6 +24,70 @@ #include #include #include +#include + +/* Current settings - values are 2*2^(reg_val/4) microamps. These are + * exported since they are used by multiple drivers. + */ +int wm831x_isinkv_values[WM831X_ISINK_MAX_ISEL] = { + 2, + 2, + 3, + 3, + 4, + 5, + 6, + 7, + 8, + 10, + 11, + 13, + 16, + 19, + 23, + 27, + 32, + 38, + 45, + 54, + 64, + 76, + 91, + 108, + 128, + 152, + 181, + 215, + 256, + 304, + 362, + 431, + 512, + 609, + 724, + 861, + 1024, + 1218, + 1448, + 1722, + 2048, + 2435, + 2896, + 3444, + 4096, + 4871, + 5793, + 6889, + 8192, + 9742, + 11585, + 13777, + 16384, + 19484, + 23170, + 27554, +}; +EXPORT_SYMBOL_GPL(wm831x_isinkv_values); enum wm831x_parent { WM8310 = 0, diff --git a/include/linux/mfd/wm831x/regulator.h b/include/linux/mfd/wm831x/regulator.h new file mode 100644 index 00000000000..b5d58fb38b4 --- /dev/null +++ b/include/linux/mfd/wm831x/regulator.h @@ -0,0 +1,21 @@ +/* + * linux/mfd/wm831x/regulator.h -- Regulator definitons for wm831x + * + * Copyright 2009 Wolfson Microelectronics PLC. + * + * Author: Mark Brown + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + */ + +#ifndef __MFD_WM831X_REGULATOR_H__ +#define __MFD_WM831X_REGULATOR_H__ + +#define WM831X_ISINK_MAX_ISEL 56 +extern int wm831x_isinkv_values[WM831X_ISINK_MAX_ISEL]; + +#endif -- cgit v1.2.3-70-g09d2 From e4b736f18f338daae141325c818187c4ab3e244c Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Mon, 27 Jul 2009 14:46:00 +0100 Subject: gpio: Add WM831X GPIO driver Add support for the GPIO pins on the WM831x. No direct support is currently supplied for configuring non-gpiolib functionality such as pull configuration and alternate functions, soft configuration of these will be provided in a future patch. Currently use of these pins as interrupts is not supported due to the ongoing issues with generic irq not support interrupt controllers on interrupt driven buses. Users can directly request the interrupts with the wm831x-specific APIs currently provided if required. Signed-off-by: Mark Brown Acked-by: David Brownell Signed-off-by: Samuel Ortiz --- drivers/gpio/Kconfig | 7 ++ drivers/gpio/Makefile | 1 + drivers/gpio/wm831x-gpio.c | 252 ++++++++++++++++++++++++++++++++++++++++ include/linux/mfd/wm831x/gpio.h | 55 +++++++++ 4 files changed, 315 insertions(+) create mode 100644 drivers/gpio/wm831x-gpio.c create mode 100644 include/linux/mfd/wm831x/gpio.h (limited to 'include') diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig index 96dda81c922..6b4c484a699 100644 --- a/drivers/gpio/Kconfig +++ b/drivers/gpio/Kconfig @@ -155,6 +155,13 @@ config GPIO_TWL4030 Say yes here to access the GPIO signals of various multi-function power management chips from Texas Instruments. +config GPIO_WM831X + tristate "WM831x GPIOs" + depends on MFD_WM831X + help + Say yes here to access the GPIO signals of WM831x power management + chips from Wolfson Microelectronics. + comment "PCI GPIO expanders:" config GPIO_BT8XX diff --git a/drivers/gpio/Makefile b/drivers/gpio/Makefile index 9244c6fcd8b..ea7c745f26a 100644 --- a/drivers/gpio/Makefile +++ b/drivers/gpio/Makefile @@ -14,3 +14,4 @@ obj-$(CONFIG_GPIO_TWL4030) += twl4030-gpio.o obj-$(CONFIG_GPIO_XILINX) += xilinx_gpio.o obj-$(CONFIG_GPIO_BT8XX) += bt8xxgpio.o obj-$(CONFIG_GPIO_VR41XX) += vr41xx_giu.o +obj-$(CONFIG_GPIO_WM831X) += wm831x-gpio.o diff --git a/drivers/gpio/wm831x-gpio.c b/drivers/gpio/wm831x-gpio.c new file mode 100644 index 00000000000..f9c09a54ec7 --- /dev/null +++ b/drivers/gpio/wm831x-gpio.c @@ -0,0 +1,252 @@ +/* + * wm831x-gpio.c -- gpiolib support for Wolfson WM831x PMICs + * + * Copyright 2009 Wolfson Microelectronics PLC. + * + * Author: Mark Brown + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + */ + +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#define WM831X_GPIO_MAX 16 + +struct wm831x_gpio { + struct wm831x *wm831x; + struct gpio_chip gpio_chip; +}; + +static inline struct wm831x_gpio *to_wm831x_gpio(struct gpio_chip *chip) +{ + return container_of(chip, struct wm831x_gpio, gpio_chip); +} + +static int wm831x_gpio_direction_in(struct gpio_chip *chip, unsigned offset) +{ + struct wm831x_gpio *wm831x_gpio = to_wm831x_gpio(chip); + struct wm831x *wm831x = wm831x_gpio->wm831x; + + return wm831x_set_bits(wm831x, WM831X_GPIO1_CONTROL + offset, + WM831X_GPN_DIR | WM831X_GPN_TRI, + WM831X_GPN_DIR); +} + +static int wm831x_gpio_get(struct gpio_chip *chip, unsigned offset) +{ + struct wm831x_gpio *wm831x_gpio = to_wm831x_gpio(chip); + struct wm831x *wm831x = wm831x_gpio->wm831x; + int ret; + + ret = wm831x_reg_read(wm831x, WM831X_GPIO_LEVEL); + if (ret < 0) + return ret; + + if (ret & 1 << offset) + return 1; + else + return 0; +} + +static int wm831x_gpio_direction_out(struct gpio_chip *chip, + unsigned offset, int value) +{ + struct wm831x_gpio *wm831x_gpio = to_wm831x_gpio(chip); + struct wm831x *wm831x = wm831x_gpio->wm831x; + + return wm831x_set_bits(wm831x, WM831X_GPIO1_CONTROL + offset, + WM831X_GPN_DIR | WM831X_GPN_TRI, 0); +} + +static void wm831x_gpio_set(struct gpio_chip *chip, unsigned offset, int value) +{ + struct wm831x_gpio *wm831x_gpio = to_wm831x_gpio(chip); + struct wm831x *wm831x = wm831x_gpio->wm831x; + + wm831x_set_bits(wm831x, WM831X_GPIO_LEVEL, 1 << offset, + value << offset); +} + +#ifdef CONFIG_DEBUG_FS +static void wm831x_gpio_dbg_show(struct seq_file *s, struct gpio_chip *chip) +{ + struct wm831x_gpio *wm831x_gpio = to_wm831x_gpio(chip); + struct wm831x *wm831x = wm831x_gpio->wm831x; + int i; + + for (i = 0; i < chip->ngpio; i++) { + int gpio = i + chip->base; + int reg; + const char *label, *pull, *powerdomain; + + /* We report the GPIO even if it's not requested since + * we're also reporting things like alternate + * functions which apply even when the GPIO is not in + * use as a GPIO. + */ + label = gpiochip_is_requested(chip, i); + if (!label) + label = "Unrequested"; + + seq_printf(s, " gpio-%-3d (%-20.20s) ", gpio, label); + + reg = wm831x_reg_read(wm831x, WM831X_GPIO1_CONTROL + i); + if (reg < 0) { + dev_err(wm831x->dev, + "GPIO control %d read failed: %d\n", + gpio, reg); + seq_printf(s, "\n"); + continue; + } + + switch (reg & WM831X_GPN_PULL_MASK) { + case WM831X_GPIO_PULL_NONE: + pull = "nopull"; + break; + case WM831X_GPIO_PULL_DOWN: + pull = "pulldown"; + break; + case WM831X_GPIO_PULL_UP: + pull = "pullup"; + default: + pull = "INVALID PULL"; + break; + } + + switch (i + 1) { + case 1 ... 3: + case 7 ... 9: + if (reg & WM831X_GPN_PWR_DOM) + powerdomain = "VPMIC"; + else + powerdomain = "DBVDD"; + break; + + case 4 ... 6: + case 10 ... 12: + if (reg & WM831X_GPN_PWR_DOM) + powerdomain = "SYSVDD"; + else + powerdomain = "DBVDD"; + break; + + case 13 ... 16: + powerdomain = "TPVDD"; + break; + + default: + BUG(); + break; + } + + seq_printf(s, " %s %s %s %s%s\n" + " %s%s (0x%4x)\n", + reg & WM831X_GPN_DIR ? "in" : "out", + wm831x_gpio_get(chip, i) ? "high" : "low", + pull, + powerdomain, + reg & WM831X_GPN_POL ? " inverted" : "", + reg & WM831X_GPN_OD ? "open-drain" : "CMOS", + reg & WM831X_GPN_TRI ? " tristated" : "", + reg); + } +} +#else +#define wm831x_gpio_dbg_show NULL +#endif + +static struct gpio_chip template_chip = { + .label = "wm831x", + .owner = THIS_MODULE, + .direction_input = wm831x_gpio_direction_in, + .get = wm831x_gpio_get, + .direction_output = wm831x_gpio_direction_out, + .set = wm831x_gpio_set, + .dbg_show = wm831x_gpio_dbg_show, + .can_sleep = 1, +}; + +static int __devinit wm831x_gpio_probe(struct platform_device *pdev) +{ + struct wm831x *wm831x = dev_get_drvdata(pdev->dev.parent); + struct wm831x_pdata *pdata = wm831x->dev->platform_data; + struct wm831x_gpio *wm831x_gpio; + int ret; + + wm831x_gpio = kzalloc(sizeof(*wm831x_gpio), GFP_KERNEL); + if (wm831x_gpio == NULL) + return -ENOMEM; + + wm831x_gpio->wm831x = wm831x; + wm831x_gpio->gpio_chip = template_chip; + wm831x_gpio->gpio_chip.ngpio = WM831X_GPIO_MAX; + wm831x_gpio->gpio_chip.dev = &pdev->dev; + if (pdata && pdata->gpio_base) + wm831x_gpio->gpio_chip.base = pdata->gpio_base; + else + wm831x_gpio->gpio_chip.base = -1; + + ret = gpiochip_add(&wm831x_gpio->gpio_chip); + if (ret < 0) { + dev_err(&pdev->dev, "Could not register gpiochip, %d\n", + ret); + goto err; + } + + platform_set_drvdata(pdev, wm831x_gpio); + + return ret; + +err: + kfree(wm831x_gpio); + return ret; +} + +static int __devexit wm831x_gpio_remove(struct platform_device *pdev) +{ + struct wm831x_gpio *wm831x_gpio = platform_get_drvdata(pdev); + int ret; + + ret = gpiochip_remove(&wm831x_gpio->gpio_chip); + if (ret == 0) + kfree(wm831x_gpio); + + return ret; +} + +static struct platform_driver wm831x_gpio_driver = { + .driver.name = "wm831x-gpio", + .driver.owner = THIS_MODULE, + .probe = wm831x_gpio_probe, + .remove = __devexit_p(wm831x_gpio_remove), +}; + +static int __init wm831x_gpio_init(void) +{ + return platform_driver_register(&wm831x_gpio_driver); +} +subsys_initcall(wm831x_gpio_init); + +static void __exit wm831x_gpio_exit(void) +{ + platform_driver_unregister(&wm831x_gpio_driver); +} +module_exit(wm831x_gpio_exit); + +MODULE_AUTHOR("Mark Brown "); +MODULE_DESCRIPTION("GPIO interface for WM831x PMICs"); +MODULE_LICENSE("GPL"); +MODULE_ALIAS("platform:wm831x-gpio"); diff --git a/include/linux/mfd/wm831x/gpio.h b/include/linux/mfd/wm831x/gpio.h new file mode 100644 index 00000000000..2835614af0e --- /dev/null +++ b/include/linux/mfd/wm831x/gpio.h @@ -0,0 +1,55 @@ +/* + * include/linux/mfd/wm831x/gpio.h -- GPIO for WM831x + * + * Copyright 2009 Wolfson Microelectronics PLC. + * + * Author: Mark Brown + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + */ + +#ifndef __MFD_WM831X_GPIO_H__ +#define __MFD_WM831X_GPIO_H__ + +/* + * R16440-16455 (0x4038-0x4047) - GPIOx Control + */ +#define WM831X_GPN_DIR 0x8000 /* GPN_DIR */ +#define WM831X_GPN_DIR_MASK 0x8000 /* GPN_DIR */ +#define WM831X_GPN_DIR_SHIFT 15 /* GPN_DIR */ +#define WM831X_GPN_DIR_WIDTH 1 /* GPN_DIR */ +#define WM831X_GPN_PULL_MASK 0x6000 /* GPN_PULL - [14:13] */ +#define WM831X_GPN_PULL_SHIFT 13 /* GPN_PULL - [14:13] */ +#define WM831X_GPN_PULL_WIDTH 2 /* GPN_PULL - [14:13] */ +#define WM831X_GPN_INT_MODE 0x1000 /* GPN_INT_MODE */ +#define WM831X_GPN_INT_MODE_MASK 0x1000 /* GPN_INT_MODE */ +#define WM831X_GPN_INT_MODE_SHIFT 12 /* GPN_INT_MODE */ +#define WM831X_GPN_INT_MODE_WIDTH 1 /* GPN_INT_MODE */ +#define WM831X_GPN_PWR_DOM 0x0800 /* GPN_PWR_DOM */ +#define WM831X_GPN_PWR_DOM_MASK 0x0800 /* GPN_PWR_DOM */ +#define WM831X_GPN_PWR_DOM_SHIFT 11 /* GPN_PWR_DOM */ +#define WM831X_GPN_PWR_DOM_WIDTH 1 /* GPN_PWR_DOM */ +#define WM831X_GPN_POL 0x0400 /* GPN_POL */ +#define WM831X_GPN_POL_MASK 0x0400 /* GPN_POL */ +#define WM831X_GPN_POL_SHIFT 10 /* GPN_POL */ +#define WM831X_GPN_POL_WIDTH 1 /* GPN_POL */ +#define WM831X_GPN_OD 0x0200 /* GPN_OD */ +#define WM831X_GPN_OD_MASK 0x0200 /* GPN_OD */ +#define WM831X_GPN_OD_SHIFT 9 /* GPN_OD */ +#define WM831X_GPN_OD_WIDTH 1 /* GPN_OD */ +#define WM831X_GPN_TRI 0x0080 /* GPN_TRI */ +#define WM831X_GPN_TRI_MASK 0x0080 /* GPN_TRI */ +#define WM831X_GPN_TRI_SHIFT 7 /* GPN_TRI */ +#define WM831X_GPN_TRI_WIDTH 1 /* GPN_TRI */ +#define WM831X_GPN_FN_MASK 0x000F /* GPN_FN - [3:0] */ +#define WM831X_GPN_FN_SHIFT 0 /* GPN_FN - [3:0] */ +#define WM831X_GPN_FN_WIDTH 4 /* GPN_FN - [3:0] */ + +#define WM831X_GPIO_PULL_NONE (0 << WM831X_GPN_PULL_SHIFT) +#define WM831X_GPIO_PULL_DOWN (1 << WM831X_GPN_PULL_SHIFT) +#define WM831X_GPIO_PULL_UP (2 << WM831X_GPN_PULL_SHIFT) +#endif -- cgit v1.2.3-70-g09d2 From 0c73b992dd4c645f050344cb13142c0fd3496824 Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Tue, 15 Sep 2009 12:07:12 +0200 Subject: input: Add support for the WM831x ON pin The WM831x series of PMICs support control of initial power on through the ON pin on the device with soft control of the pin at other times. Represent this to userspace as KEY_POWER. Signed-off-by: Mark Brown Acked-by: Dmitry Torokhov Signed-off-by: Samuel Ortiz --- drivers/input/misc/Kconfig | 10 +++ drivers/input/misc/Makefile | 1 + drivers/input/misc/wm831x-on.c | 163 ++++++++++++++++++++++++++++++++++++++++ include/linux/mfd/wm831x/core.h | 19 +++++ 4 files changed, 193 insertions(+) create mode 100644 drivers/input/misc/wm831x-on.c (limited to 'include') diff --git a/drivers/input/misc/Kconfig b/drivers/input/misc/Kconfig index cbe21bc96b5..852941d108f 100644 --- a/drivers/input/misc/Kconfig +++ b/drivers/input/misc/Kconfig @@ -279,4 +279,14 @@ config INPUT_BFIN_ROTARY To compile this driver as a module, choose M here: the module will be called bfin-rotary. +config INPUT_WM831X_ON + tristate "WM831X ON pin" + depends on MFD_WM831X + help + Support the ON pin of WM831X PMICs as an input device + reporting power button status. + + To compile this driver as a module, choose M here: the module + will be called wm831x_on. + endif diff --git a/drivers/input/misc/Makefile b/drivers/input/misc/Makefile index 79c1e9a5ea3..c97533fb83c 100644 --- a/drivers/input/misc/Makefile +++ b/drivers/input/misc/Makefile @@ -26,4 +26,5 @@ obj-$(CONFIG_INPUT_SPARCSPKR) += sparcspkr.o obj-$(CONFIG_INPUT_TWL4030_PWRBUTTON) += twl4030-pwrbutton.o obj-$(CONFIG_INPUT_UINPUT) += uinput.o obj-$(CONFIG_INPUT_WISTRON_BTNS) += wistron_btns.o +obj-$(CONFIG_INPUT_WM831X_ON) += wm831x-on.o obj-$(CONFIG_INPUT_YEALINK) += yealink.o diff --git a/drivers/input/misc/wm831x-on.c b/drivers/input/misc/wm831x-on.c new file mode 100644 index 00000000000..ba4f5dd7c60 --- /dev/null +++ b/drivers/input/misc/wm831x-on.c @@ -0,0 +1,163 @@ +/** + * wm831x-on.c - WM831X ON pin driver + * + * Copyright (C) 2009 Wolfson Microelectronics plc + * + * This file is subject to the terms and conditions of the GNU General + * Public License. See the file "COPYING" in the main directory of this + * archive for more details. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +struct wm831x_on { + struct input_dev *dev; + struct delayed_work work; + struct wm831x *wm831x; +}; + +/* + * The chip gives us an interrupt when the ON pin is asserted but we + * then need to poll to see when the pin is deasserted. + */ +static void wm831x_poll_on(struct work_struct *work) +{ + struct wm831x_on *wm831x_on = container_of(work, struct wm831x_on, + work.work); + struct wm831x *wm831x = wm831x_on->wm831x; + int poll, ret; + + ret = wm831x_reg_read(wm831x, WM831X_ON_PIN_CONTROL); + if (ret >= 0) { + poll = !(ret & WM831X_ON_PIN_STS); + + input_report_key(wm831x_on->dev, KEY_POWER, poll); + input_sync(wm831x_on->dev); + } else { + dev_err(wm831x->dev, "Failed to read ON status: %d\n", ret); + poll = 1; + } + + if (poll) + schedule_delayed_work(&wm831x_on->work, 100); +} + +static irqreturn_t wm831x_on_irq(int irq, void *data) +{ + struct wm831x_on *wm831x_on = data; + + schedule_delayed_work(&wm831x_on->work, 0); + + return IRQ_HANDLED; +} + +static int __devinit wm831x_on_probe(struct platform_device *pdev) +{ + struct wm831x *wm831x = dev_get_drvdata(pdev->dev.parent); + struct wm831x_on *wm831x_on; + int irq = platform_get_irq(pdev, 0); + int ret; + + wm831x_on = kzalloc(sizeof(struct wm831x_on), GFP_KERNEL); + if (!wm831x_on) { + dev_err(&pdev->dev, "Can't allocate data\n"); + return -ENOMEM; + } + + wm831x_on->wm831x = wm831x; + INIT_DELAYED_WORK(&wm831x_on->work, wm831x_poll_on); + + wm831x_on->dev = input_allocate_device(); + if (!wm831x_on->dev) { + dev_err(&pdev->dev, "Can't allocate input dev\n"); + ret = -ENOMEM; + goto err; + } + + wm831x_on->dev->evbit[0] = BIT_MASK(EV_KEY); + wm831x_on->dev->keybit[BIT_WORD(KEY_POWER)] = BIT_MASK(KEY_POWER); + wm831x_on->dev->name = "wm831x_on"; + wm831x_on->dev->phys = "wm831x_on/input0"; + wm831x_on->dev->dev.parent = &pdev->dev; + + ret = wm831x_request_irq(wm831x, irq, wm831x_on_irq, + IRQF_TRIGGER_RISING, "wm831x_on", wm831x_on); + if (ret < 0) { + dev_err(&pdev->dev, "Unable to request IRQ: %d\n", ret); + goto err_input_dev; + } + ret = input_register_device(wm831x_on->dev); + if (ret) { + dev_dbg(&pdev->dev, "Can't register input device: %d\n", ret); + goto err_irq; + } + + platform_set_drvdata(pdev, wm831x_on); + + return 0; + +err_irq: + wm831x_free_irq(wm831x, irq, NULL); +err_input_dev: + input_free_device(wm831x_on->dev); +err: + kfree(wm831x_on); + return ret; +} + +static int __devexit wm831x_on_remove(struct platform_device *pdev) +{ + struct wm831x_on *wm831x_on = platform_get_drvdata(pdev); + int irq = platform_get_irq(pdev, 0); + + wm831x_free_irq(wm831x_on->wm831x, irq, wm831x_on); + cancel_delayed_work_sync(&wm831x_on->work); + input_unregister_device(wm831x_on->dev); + kfree(wm831x_on); + + return 0; +} + +static struct platform_driver wm831x_on_driver = { + .probe = wm831x_on_probe, + .remove = __devexit_p(wm831x_on_remove), + .driver = { + .name = "wm831x-on", + .owner = THIS_MODULE, + }, +}; + +static int __init wm831x_on_init(void) +{ + return platform_driver_register(&wm831x_on_driver); +} +module_init(wm831x_on_init); + +static void __exit wm831x_on_exit(void) +{ + platform_driver_unregister(&wm831x_on_driver); +} +module_exit(wm831x_on_exit); + +MODULE_ALIAS("platform:wm831x-on"); +MODULE_DESCRIPTION("WM831x ON pin"); +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Mark Brown "); + diff --git a/include/linux/mfd/wm831x/core.h b/include/linux/mfd/wm831x/core.h index d7134dfba56..91eb493bf14 100644 --- a/include/linux/mfd/wm831x/core.h +++ b/include/linux/mfd/wm831x/core.h @@ -216,6 +216,25 @@ #define WM831X_PARENT_ID_SHIFT 0 /* PARENT_ID - [15:0] */ #define WM831X_PARENT_ID_WIDTH 16 /* PARENT_ID - [15:0] */ +/* + * R16389 (0x4005) - ON Pin Control + */ +#define WM831X_ON_PIN_SECACT_MASK 0x0300 /* ON_PIN_SECACT - [9:8] */ +#define WM831X_ON_PIN_SECACT_SHIFT 8 /* ON_PIN_SECACT - [9:8] */ +#define WM831X_ON_PIN_SECACT_WIDTH 2 /* ON_PIN_SECACT - [9:8] */ +#define WM831X_ON_PIN_PRIMACT_MASK 0x0030 /* ON_PIN_PRIMACT - [5:4] */ +#define WM831X_ON_PIN_PRIMACT_SHIFT 4 /* ON_PIN_PRIMACT - [5:4] */ +#define WM831X_ON_PIN_PRIMACT_WIDTH 2 /* ON_PIN_PRIMACT - [5:4] */ +#define WM831X_ON_PIN_STS 0x0008 /* ON_PIN_STS */ +#define WM831X_ON_PIN_STS_MASK 0x0008 /* ON_PIN_STS */ +#define WM831X_ON_PIN_STS_SHIFT 3 /* ON_PIN_STS */ +#define WM831X_ON_PIN_STS_WIDTH 1 /* ON_PIN_STS */ +#define WM831X_ON_PIN_TO_MASK 0x0003 /* ON_PIN_TO - [1:0] */ +#define WM831X_ON_PIN_TO_SHIFT 0 /* ON_PIN_TO - [1:0] */ +#define WM831X_ON_PIN_TO_WIDTH 2 /* ON_PIN_TO - [1:0] */ + +struct regulator_dev; + struct wm831x { struct mutex io_lock; -- cgit v1.2.3-70-g09d2 From be721979dd6b335e4ab6f83abb5cc11c33662aa8 Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Tue, 4 Aug 2009 20:09:52 +0200 Subject: regulator: Provide mode to status conversion function This is useful for implementing get_status() in terms of get_mode(). Signed-off-by: Mark Brown Signed-off-by: Samuel Ortiz --- drivers/regulator/core.c | 24 ++++++++++++++++++++++++ include/linux/regulator/driver.h | 2 ++ 2 files changed, 26 insertions(+) (limited to 'include') diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c index 98c3a74e994..91ba9bfaa70 100644 --- a/drivers/regulator/core.c +++ b/drivers/regulator/core.c @@ -1864,6 +1864,30 @@ int regulator_notifier_call_chain(struct regulator_dev *rdev, } EXPORT_SYMBOL_GPL(regulator_notifier_call_chain); +/** + * regulator_mode_to_status - convert a regulator mode into a status + * + * @mode: Mode to convert + * + * Convert a regulator mode into a status. + */ +int regulator_mode_to_status(unsigned int mode) +{ + switch (mode) { + case REGULATOR_MODE_FAST: + return REGULATOR_STATUS_FAST; + case REGULATOR_MODE_NORMAL: + return REGULATOR_STATUS_NORMAL; + case REGULATOR_MODE_IDLE: + return REGULATOR_STATUS_IDLE; + case REGULATOR_STATUS_STANDBY: + return REGULATOR_STATUS_STANDBY; + default: + return 0; + } +} +EXPORT_SYMBOL_GPL(regulator_mode_to_status); + /* * To avoid cluttering sysfs (and memory) with useless state, only * create attributes that can be meaningfully displayed. diff --git a/include/linux/regulator/driver.h b/include/linux/regulator/driver.h index 225f733e753..ce1be708ca1 100644 --- a/include/linux/regulator/driver.h +++ b/include/linux/regulator/driver.h @@ -193,6 +193,8 @@ void *rdev_get_drvdata(struct regulator_dev *rdev); struct device *rdev_get_dev(struct regulator_dev *rdev); int rdev_get_id(struct regulator_dev *rdev); +int regulator_mode_to_status(unsigned int); + void *regulator_get_init_drvdata(struct regulator_init_data *reg_init_data); #endif -- cgit v1.2.3-70-g09d2 From e4ee831f949a7c7746a56bcf1e7ca057d6f69e2a Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Tue, 28 Jul 2009 15:21:49 +0100 Subject: regulator: Add WM831x DC-DC buck convertor support The WM831x series of devices all have 3 DC-DC buck convertors. This driver implements software control for these regulators via the regulator API. Use with split hardware/software control of individual regulators is not supported, though regulators not controlled by software may be controlled via the hardware control interfaces. Signed-off-by: Mark Brown Acked-by: Liam Girdwood Signed-off-by: Samuel Ortiz --- drivers/regulator/Kconfig | 7 + drivers/regulator/Makefile | 1 + drivers/regulator/wm831x-dcdc.c | 643 +++++++++++++++++++++++++++++++++++ include/linux/mfd/wm831x/regulator.h | 571 +++++++++++++++++++++++++++++++ 4 files changed, 1222 insertions(+) create mode 100644 drivers/regulator/wm831x-dcdc.c (limited to 'include') diff --git a/drivers/regulator/Kconfig b/drivers/regulator/Kconfig index da7483ef32b..38ea5dc8e14 100644 --- a/drivers/regulator/Kconfig +++ b/drivers/regulator/Kconfig @@ -82,6 +82,13 @@ config REGULATOR_TWL4030 This driver supports the voltage regulators provided by this family of companion chips. +config REGULATOR_WM831X + tristate "Wolfson Microelcronics WM831x PMIC regulators" + depends on MFD_WM831X + help + Support the voltage and current regulators of the WM831x series + of PMIC devices. + config REGULATOR_WM8350 tristate "Wolfson Microelectroncis WM8350 AudioPlus PMIC" depends on MFD_WM8350 diff --git a/drivers/regulator/Makefile b/drivers/regulator/Makefile index 3a9748ff860..b1d2b826f53 100644 --- a/drivers/regulator/Makefile +++ b/drivers/regulator/Makefile @@ -12,6 +12,7 @@ obj-$(CONFIG_REGULATOR_BQ24022) += bq24022.o obj-$(CONFIG_REGULATOR_LP3971) += lp3971.o obj-$(CONFIG_REGULATOR_MAX1586) += max1586.o obj-$(CONFIG_REGULATOR_TWL4030) += twl4030-regulator.o +obj-$(CONFIG_REGULATOR_WM831X) += wm831x-dcdc.o obj-$(CONFIG_REGULATOR_WM8350) += wm8350-regulator.o obj-$(CONFIG_REGULATOR_WM8400) += wm8400-regulator.o obj-$(CONFIG_REGULATOR_DA903X) += da903x.o diff --git a/drivers/regulator/wm831x-dcdc.c b/drivers/regulator/wm831x-dcdc.c new file mode 100644 index 00000000000..fa5126e38ac --- /dev/null +++ b/drivers/regulator/wm831x-dcdc.c @@ -0,0 +1,643 @@ +/* + * wm831x-dcdc.c -- DC-DC buck convertor driver for the WM831x series + * + * Copyright 2009 Wolfson Microelectronics PLC. + * + * Author: Mark Brown + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#define WM831X_BUCKV_MAX_SELECTOR 0x68 +#define WM831X_BUCKP_MAX_SELECTOR 0x66 + +#define WM831X_DCDC_MODE_FAST 0 +#define WM831X_DCDC_MODE_NORMAL 1 +#define WM831X_DCDC_MODE_IDLE 2 +#define WM831X_DCDC_MODE_STANDBY 3 + +#define WM831X_DCDC_MAX_NAME 6 + +/* Register offsets in control block */ +#define WM831X_DCDC_CONTROL_1 0 +#define WM831X_DCDC_CONTROL_2 1 +#define WM831X_DCDC_ON_CONFIG 2 +#define WM831X_DCDC_SLEEP_CONTROL 3 + +/* + * Shared + */ + +struct wm831x_dcdc { + char name[WM831X_DCDC_MAX_NAME]; + struct regulator_desc desc; + int base; + struct wm831x *wm831x; + struct regulator_dev *regulator; +}; + +static int wm831x_dcdc_is_enabled(struct regulator_dev *rdev) +{ + struct wm831x_dcdc *dcdc = rdev_get_drvdata(rdev); + struct wm831x *wm831x = dcdc->wm831x; + int mask = 1 << rdev_get_id(rdev); + int reg; + + reg = wm831x_reg_read(wm831x, WM831X_DCDC_ENABLE); + if (reg < 0) + return reg; + + if (reg & mask) + return 1; + else + return 0; +} + +static int wm831x_dcdc_enable(struct regulator_dev *rdev) +{ + struct wm831x_dcdc *dcdc = rdev_get_drvdata(rdev); + struct wm831x *wm831x = dcdc->wm831x; + int mask = 1 << rdev_get_id(rdev); + + return wm831x_set_bits(wm831x, WM831X_DCDC_ENABLE, mask, mask); +} + +static int wm831x_dcdc_disable(struct regulator_dev *rdev) +{ + struct wm831x_dcdc *dcdc = rdev_get_drvdata(rdev); + struct wm831x *wm831x = dcdc->wm831x; + int mask = 1 << rdev_get_id(rdev); + + return wm831x_set_bits(wm831x, WM831X_DCDC_ENABLE, mask, 0); +} + +static unsigned int wm831x_dcdc_get_mode(struct regulator_dev *rdev) + +{ + struct wm831x_dcdc *dcdc = rdev_get_drvdata(rdev); + struct wm831x *wm831x = dcdc->wm831x; + u16 reg = dcdc->base + WM831X_DCDC_ON_CONFIG; + int val; + + val = wm831x_reg_read(wm831x, reg); + if (val < 0) + return val; + + val = (val & WM831X_DC1_ON_MODE_MASK) >> WM831X_DC1_ON_MODE_SHIFT; + + switch (val) { + case WM831X_DCDC_MODE_FAST: + return REGULATOR_MODE_FAST; + case WM831X_DCDC_MODE_NORMAL: + return REGULATOR_MODE_NORMAL; + case WM831X_DCDC_MODE_STANDBY: + return REGULATOR_MODE_STANDBY; + case WM831X_DCDC_MODE_IDLE: + return REGULATOR_MODE_IDLE; + default: + BUG(); + } +} + +static int wm831x_dcdc_set_mode_int(struct wm831x *wm831x, int reg, + unsigned int mode) +{ + int val; + + switch (mode) { + case REGULATOR_MODE_FAST: + val = WM831X_DCDC_MODE_FAST; + break; + case REGULATOR_MODE_NORMAL: + val = WM831X_DCDC_MODE_NORMAL; + break; + case REGULATOR_MODE_STANDBY: + val = WM831X_DCDC_MODE_STANDBY; + break; + case REGULATOR_MODE_IDLE: + val = WM831X_DCDC_MODE_IDLE; + break; + default: + return -EINVAL; + } + + return wm831x_set_bits(wm831x, reg, WM831X_DC1_ON_MODE_MASK, + val << WM831X_DC1_ON_MODE_SHIFT); +} + +static int wm831x_dcdc_set_mode(struct regulator_dev *rdev, unsigned int mode) +{ + struct wm831x_dcdc *dcdc = rdev_get_drvdata(rdev); + struct wm831x *wm831x = dcdc->wm831x; + u16 reg = dcdc->base + WM831X_DCDC_ON_CONFIG; + + return wm831x_dcdc_set_mode_int(wm831x, reg, mode); +} + +static int wm831x_dcdc_set_suspend_mode(struct regulator_dev *rdev, + unsigned int mode) +{ + struct wm831x_dcdc *dcdc = rdev_get_drvdata(rdev); + struct wm831x *wm831x = dcdc->wm831x; + u16 reg = dcdc->base + WM831X_DCDC_SLEEP_CONTROL; + + return wm831x_dcdc_set_mode_int(wm831x, reg, mode); +} + +static int wm831x_dcdc_get_status(struct regulator_dev *rdev) +{ + struct wm831x_dcdc *dcdc = rdev_get_drvdata(rdev); + struct wm831x *wm831x = dcdc->wm831x; + int ret; + + /* First, check for errors */ + ret = wm831x_reg_read(wm831x, WM831X_DCDC_UV_STATUS); + if (ret < 0) + return ret; + + if (ret & (1 << rdev_get_id(rdev))) { + dev_dbg(wm831x->dev, "DCDC%d under voltage\n", + rdev_get_id(rdev) + 1); + return REGULATOR_STATUS_ERROR; + } + + /* DCDC1 and DCDC2 can additionally detect high voltage/current */ + if (rdev_get_id(rdev) < 2) { + if (ret & (WM831X_DC1_OV_STS << rdev_get_id(rdev))) { + dev_dbg(wm831x->dev, "DCDC%d over voltage\n", + rdev_get_id(rdev) + 1); + return REGULATOR_STATUS_ERROR; + } + + if (ret & (WM831X_DC1_HC_STS << rdev_get_id(rdev))) { + dev_dbg(wm831x->dev, "DCDC%d over current\n", + rdev_get_id(rdev) + 1); + return REGULATOR_STATUS_ERROR; + } + } + + /* Is the regulator on? */ + ret = wm831x_reg_read(wm831x, WM831X_DCDC_STATUS); + if (ret < 0) + return ret; + if (!(ret & (1 << rdev_get_id(rdev)))) + return REGULATOR_STATUS_OFF; + + /* TODO: When we handle hardware control modes so we can report the + * current mode. */ + return REGULATOR_STATUS_ON; +} + +static irqreturn_t wm831x_dcdc_uv_irq(int irq, void *data) +{ + struct wm831x_dcdc *dcdc = data; + + regulator_notifier_call_chain(dcdc->regulator, + REGULATOR_EVENT_UNDER_VOLTAGE, + NULL); + + return IRQ_HANDLED; +} + +static irqreturn_t wm831x_dcdc_oc_irq(int irq, void *data) +{ + struct wm831x_dcdc *dcdc = data; + + regulator_notifier_call_chain(dcdc->regulator, + REGULATOR_EVENT_OVER_CURRENT, + NULL); + + return IRQ_HANDLED; +} + +/* + * BUCKV specifics + */ + +static int wm831x_buckv_list_voltage(struct regulator_dev *rdev, + unsigned selector) +{ + if (selector <= 0x8) + return 600000; + if (selector <= WM831X_BUCKV_MAX_SELECTOR) + return 600000 + ((selector - 0x8) * 12500); + return -EINVAL; +} + +static int wm831x_buckv_set_voltage_int(struct regulator_dev *rdev, int reg, + int min_uV, int max_uV) +{ + struct wm831x_dcdc *dcdc = rdev_get_drvdata(rdev); + struct wm831x *wm831x = dcdc->wm831x; + u16 vsel; + + if (min_uV < 600000) + vsel = 0; + else if (min_uV <= 1800000) + vsel = ((min_uV - 600000) / 12500) + 8; + else + return -EINVAL; + + if (wm831x_buckv_list_voltage(rdev, vsel) > max_uV) + return -EINVAL; + + return wm831x_set_bits(wm831x, reg, WM831X_DC1_ON_VSEL_MASK, vsel); +} + +static int wm831x_buckv_set_voltage(struct regulator_dev *rdev, + int min_uV, int max_uV) +{ + struct wm831x_dcdc *dcdc = rdev_get_drvdata(rdev); + u16 reg = dcdc->base + WM831X_DCDC_ON_CONFIG; + + return wm831x_buckv_set_voltage_int(rdev, reg, min_uV, max_uV); +} + +static int wm831x_buckv_set_suspend_voltage(struct regulator_dev *rdev, + int uV) +{ + struct wm831x_dcdc *dcdc = rdev_get_drvdata(rdev); + u16 reg = dcdc->base + WM831X_DCDC_SLEEP_CONTROL; + + return wm831x_buckv_set_voltage_int(rdev, reg, uV, uV); +} + +static int wm831x_buckv_get_voltage(struct regulator_dev *rdev) +{ + struct wm831x_dcdc *dcdc = rdev_get_drvdata(rdev); + struct wm831x *wm831x = dcdc->wm831x; + u16 reg = dcdc->base + WM831X_DCDC_ON_CONFIG; + int val; + + val = wm831x_reg_read(wm831x, reg); + if (val < 0) + return val; + + return wm831x_buckv_list_voltage(rdev, val & WM831X_DC1_ON_VSEL_MASK); +} + +/* Current limit options */ +static u16 wm831x_dcdc_ilim[] = { + 125, 250, 375, 500, 625, 750, 875, 1000 +}; + +static int wm831x_buckv_set_current_limit(struct regulator_dev *rdev, + int min_uA, int max_uA) +{ + struct wm831x_dcdc *dcdc = rdev_get_drvdata(rdev); + struct wm831x *wm831x = dcdc->wm831x; + u16 reg = dcdc->base + WM831X_DCDC_CONTROL_2; + int i; + + for (i = 0; i < ARRAY_SIZE(wm831x_dcdc_ilim); i++) { + if (max_uA <= wm831x_dcdc_ilim[i]) + break; + } + if (i == ARRAY_SIZE(wm831x_dcdc_ilim)) + return -EINVAL; + + return wm831x_set_bits(wm831x, reg, WM831X_DC1_HC_THR_MASK, i); +} + +static int wm831x_buckv_get_current_limit(struct regulator_dev *rdev) +{ + struct wm831x_dcdc *dcdc = rdev_get_drvdata(rdev); + struct wm831x *wm831x = dcdc->wm831x; + u16 reg = dcdc->base + WM831X_DCDC_CONTROL_2; + int val; + + val = wm831x_reg_read(wm831x, reg); + if (val < 0) + return val; + + return wm831x_dcdc_ilim[val & WM831X_DC1_HC_THR_MASK]; +} + +static struct regulator_ops wm831x_buckv_ops = { + .set_voltage = wm831x_buckv_set_voltage, + .get_voltage = wm831x_buckv_get_voltage, + .list_voltage = wm831x_buckv_list_voltage, + .set_suspend_voltage = wm831x_buckv_set_suspend_voltage, + .set_current_limit = wm831x_buckv_set_current_limit, + .get_current_limit = wm831x_buckv_get_current_limit, + + .is_enabled = wm831x_dcdc_is_enabled, + .enable = wm831x_dcdc_enable, + .disable = wm831x_dcdc_disable, + .get_status = wm831x_dcdc_get_status, + .get_mode = wm831x_dcdc_get_mode, + .set_mode = wm831x_dcdc_set_mode, + .set_suspend_mode = wm831x_dcdc_set_suspend_mode, +}; + +static __devinit int wm831x_buckv_probe(struct platform_device *pdev) +{ + struct wm831x *wm831x = dev_get_drvdata(pdev->dev.parent); + struct wm831x_pdata *pdata = wm831x->dev->platform_data; + int id = pdev->id % ARRAY_SIZE(pdata->dcdc); + struct wm831x_dcdc *dcdc; + struct resource *res; + int ret, irq; + + dev_dbg(&pdev->dev, "Probing DCDC%d\n", id + 1); + + if (pdata == NULL || pdata->dcdc[id] == NULL) + return -ENODEV; + + dcdc = kzalloc(sizeof(struct wm831x_dcdc), GFP_KERNEL); + if (dcdc == NULL) { + dev_err(&pdev->dev, "Unable to allocate private data\n"); + return -ENOMEM; + } + + dcdc->wm831x = wm831x; + + res = platform_get_resource(pdev, IORESOURCE_IO, 0); + if (res == NULL) { + dev_err(&pdev->dev, "No I/O resource\n"); + ret = -EINVAL; + goto err; + } + dcdc->base = res->start; + + snprintf(dcdc->name, sizeof(dcdc->name), "DCDC%d", id + 1); + dcdc->desc.name = dcdc->name; + dcdc->desc.id = id; + dcdc->desc.type = REGULATOR_VOLTAGE; + dcdc->desc.n_voltages = WM831X_BUCKV_MAX_SELECTOR + 1; + dcdc->desc.ops = &wm831x_buckv_ops; + dcdc->desc.owner = THIS_MODULE; + + dcdc->regulator = regulator_register(&dcdc->desc, &pdev->dev, + pdata->dcdc[id], dcdc); + if (IS_ERR(dcdc->regulator)) { + ret = PTR_ERR(dcdc->regulator); + dev_err(wm831x->dev, "Failed to register DCDC%d: %d\n", + id + 1, ret); + goto err; + } + + irq = platform_get_irq_byname(pdev, "UV"); + ret = wm831x_request_irq(wm831x, irq, wm831x_dcdc_uv_irq, + IRQF_TRIGGER_RISING, dcdc->name, + dcdc); + if (ret != 0) { + dev_err(&pdev->dev, "Failed to request UV IRQ %d: %d\n", + irq, ret); + goto err_regulator; + } + + irq = platform_get_irq_byname(pdev, "HC"); + ret = wm831x_request_irq(wm831x, irq, wm831x_dcdc_oc_irq, + IRQF_TRIGGER_RISING, dcdc->name, + dcdc); + if (ret != 0) { + dev_err(&pdev->dev, "Failed to request HC IRQ %d: %d\n", + irq, ret); + goto err_uv; + } + + platform_set_drvdata(pdev, dcdc); + + return 0; + +err_uv: + wm831x_free_irq(wm831x, platform_get_irq_byname(pdev, "UV"), dcdc); +err_regulator: + regulator_unregister(dcdc->regulator); +err: + kfree(dcdc); + return ret; +} + +static __devexit int wm831x_buckv_remove(struct platform_device *pdev) +{ + struct wm831x_dcdc *dcdc = platform_get_drvdata(pdev); + struct wm831x *wm831x = dcdc->wm831x; + + wm831x_free_irq(wm831x, platform_get_irq_byname(pdev, "HC"), dcdc); + wm831x_free_irq(wm831x, platform_get_irq_byname(pdev, "UV"), dcdc); + regulator_unregister(dcdc->regulator); + kfree(dcdc); + + return 0; +} + +static struct platform_driver wm831x_buckv_driver = { + .probe = wm831x_buckv_probe, + .remove = __devexit_p(wm831x_buckv_remove), + .driver = { + .name = "wm831x-buckv", + }, +}; + +/* + * BUCKP specifics + */ + +static int wm831x_buckp_list_voltage(struct regulator_dev *rdev, + unsigned selector) +{ + if (selector <= WM831X_BUCKP_MAX_SELECTOR) + return 850000 + (selector * 25000); + else + return -EINVAL; +} + +static int wm831x_buckp_set_voltage_int(struct regulator_dev *rdev, int reg, + int min_uV, int max_uV) +{ + struct wm831x_dcdc *dcdc = rdev_get_drvdata(rdev); + struct wm831x *wm831x = dcdc->wm831x; + u16 vsel; + + if (min_uV <= 34000000) + vsel = (min_uV - 850000) / 25000; + else + return -EINVAL; + + if (wm831x_buckp_list_voltage(rdev, vsel) > max_uV) + return -EINVAL; + + return wm831x_set_bits(wm831x, reg, WM831X_DC3_ON_VSEL_MASK, vsel); +} + +static int wm831x_buckp_set_voltage(struct regulator_dev *rdev, + int min_uV, int max_uV) +{ + struct wm831x_dcdc *dcdc = rdev_get_drvdata(rdev); + u16 reg = dcdc->base + WM831X_DCDC_ON_CONFIG; + + return wm831x_buckp_set_voltage_int(rdev, reg, min_uV, max_uV); +} + +static int wm831x_buckp_set_suspend_voltage(struct regulator_dev *rdev, + int uV) +{ + struct wm831x_dcdc *dcdc = rdev_get_drvdata(rdev); + u16 reg = dcdc->base + WM831X_DCDC_SLEEP_CONTROL; + + return wm831x_buckp_set_voltage_int(rdev, reg, uV, uV); +} + +static int wm831x_buckp_get_voltage(struct regulator_dev *rdev) +{ + struct wm831x_dcdc *dcdc = rdev_get_drvdata(rdev); + struct wm831x *wm831x = dcdc->wm831x; + u16 reg = dcdc->base + WM831X_DCDC_ON_CONFIG; + int val; + + val = wm831x_reg_read(wm831x, reg); + if (val < 0) + return val; + + return wm831x_buckp_list_voltage(rdev, val & WM831X_DC3_ON_VSEL_MASK); +} + +static struct regulator_ops wm831x_buckp_ops = { + .set_voltage = wm831x_buckp_set_voltage, + .get_voltage = wm831x_buckp_get_voltage, + .list_voltage = wm831x_buckp_list_voltage, + .set_suspend_voltage = wm831x_buckp_set_suspend_voltage, + + .is_enabled = wm831x_dcdc_is_enabled, + .enable = wm831x_dcdc_enable, + .disable = wm831x_dcdc_disable, + .get_status = wm831x_dcdc_get_status, + .get_mode = wm831x_dcdc_get_mode, + .set_mode = wm831x_dcdc_set_mode, + .set_suspend_mode = wm831x_dcdc_set_suspend_mode, +}; + +static __devinit int wm831x_buckp_probe(struct platform_device *pdev) +{ + struct wm831x *wm831x = dev_get_drvdata(pdev->dev.parent); + struct wm831x_pdata *pdata = wm831x->dev->platform_data; + int id = pdev->id % ARRAY_SIZE(pdata->dcdc); + struct wm831x_dcdc *dcdc; + struct resource *res; + int ret, irq; + + dev_dbg(&pdev->dev, "Probing DCDC%d\n", id + 1); + + if (pdata == NULL || pdata->dcdc[id] == NULL) + return -ENODEV; + + dcdc = kzalloc(sizeof(struct wm831x_dcdc), GFP_KERNEL); + if (dcdc == NULL) { + dev_err(&pdev->dev, "Unable to allocate private data\n"); + return -ENOMEM; + } + + dcdc->wm831x = wm831x; + + res = platform_get_resource(pdev, IORESOURCE_IO, 0); + if (res == NULL) { + dev_err(&pdev->dev, "No I/O resource\n"); + ret = -EINVAL; + goto err; + } + dcdc->base = res->start; + + snprintf(dcdc->name, sizeof(dcdc->name), "DCDC%d", id + 1); + dcdc->desc.name = dcdc->name; + dcdc->desc.id = id; + dcdc->desc.type = REGULATOR_VOLTAGE; + dcdc->desc.n_voltages = WM831X_BUCKP_MAX_SELECTOR + 1; + dcdc->desc.ops = &wm831x_buckp_ops; + dcdc->desc.owner = THIS_MODULE; + + dcdc->regulator = regulator_register(&dcdc->desc, &pdev->dev, + pdata->dcdc[id], dcdc); + if (IS_ERR(dcdc->regulator)) { + ret = PTR_ERR(dcdc->regulator); + dev_err(wm831x->dev, "Failed to register DCDC%d: %d\n", + id + 1, ret); + goto err; + } + + irq = platform_get_irq_byname(pdev, "UV"); + ret = wm831x_request_irq(wm831x, irq, wm831x_dcdc_uv_irq, + IRQF_TRIGGER_RISING, dcdc->name, + dcdc); + if (ret != 0) { + dev_err(&pdev->dev, "Failed to request UV IRQ %d: %d\n", + irq, ret); + goto err_regulator; + } + + platform_set_drvdata(pdev, dcdc); + + return 0; + +err_regulator: + regulator_unregister(dcdc->regulator); +err: + kfree(dcdc); + return ret; +} + +static __devexit int wm831x_buckp_remove(struct platform_device *pdev) +{ + struct wm831x_dcdc *dcdc = platform_get_drvdata(pdev); + struct wm831x *wm831x = dcdc->wm831x; + + wm831x_free_irq(wm831x, platform_get_irq_byname(pdev, "UV"), dcdc); + regulator_unregister(dcdc->regulator); + kfree(dcdc); + + return 0; +} + +static struct platform_driver wm831x_buckp_driver = { + .probe = wm831x_buckp_probe, + .remove = __devexit_p(wm831x_buckp_remove), + .driver = { + .name = "wm831x-buckp", + }, +}; + +static int __init wm831x_dcdc_init(void) +{ + int ret; + ret = platform_driver_register(&wm831x_buckv_driver); + if (ret != 0) + pr_err("Failed to register WM831x BUCKV driver: %d\n", ret); + + ret = platform_driver_register(&wm831x_buckp_driver); + if (ret != 0) + pr_err("Failed to register WM831x BUCKP driver: %d\n", ret); + + return 0; +} +subsys_initcall(wm831x_dcdc_init); + +static void __exit wm831x_dcdc_exit(void) +{ + platform_driver_unregister(&wm831x_buckp_driver); + platform_driver_unregister(&wm831x_buckv_driver); +} +module_exit(wm831x_dcdc_exit); + +/* Module information */ +MODULE_AUTHOR("Mark Brown"); +MODULE_DESCRIPTION("WM831x DC-DC convertor driver"); +MODULE_LICENSE("GPL"); +MODULE_ALIAS("platform:wm831x-buckv"); +MODULE_ALIAS("platform:wm831x-buckp"); diff --git a/include/linux/mfd/wm831x/regulator.h b/include/linux/mfd/wm831x/regulator.h index b5d58fb38b4..c74d6aafdca 100644 --- a/include/linux/mfd/wm831x/regulator.h +++ b/include/linux/mfd/wm831x/regulator.h @@ -15,6 +15,577 @@ #ifndef __MFD_WM831X_REGULATOR_H__ #define __MFD_WM831X_REGULATOR_H__ +/* + * R16462 (0x404E) - Current Sink 1 + */ +#define WM831X_CS1_ENA 0x8000 /* CS1_ENA */ +#define WM831X_CS1_ENA_MASK 0x8000 /* CS1_ENA */ +#define WM831X_CS1_ENA_SHIFT 15 /* CS1_ENA */ +#define WM831X_CS1_ENA_WIDTH 1 /* CS1_ENA */ +#define WM831X_CS1_DRIVE 0x4000 /* CS1_DRIVE */ +#define WM831X_CS1_DRIVE_MASK 0x4000 /* CS1_DRIVE */ +#define WM831X_CS1_DRIVE_SHIFT 14 /* CS1_DRIVE */ +#define WM831X_CS1_DRIVE_WIDTH 1 /* CS1_DRIVE */ +#define WM831X_CS1_SLPENA 0x1000 /* CS1_SLPENA */ +#define WM831X_CS1_SLPENA_MASK 0x1000 /* CS1_SLPENA */ +#define WM831X_CS1_SLPENA_SHIFT 12 /* CS1_SLPENA */ +#define WM831X_CS1_SLPENA_WIDTH 1 /* CS1_SLPENA */ +#define WM831X_CS1_OFF_RAMP_MASK 0x0C00 /* CS1_OFF_RAMP - [11:10] */ +#define WM831X_CS1_OFF_RAMP_SHIFT 10 /* CS1_OFF_RAMP - [11:10] */ +#define WM831X_CS1_OFF_RAMP_WIDTH 2 /* CS1_OFF_RAMP - [11:10] */ +#define WM831X_CS1_ON_RAMP_MASK 0x0300 /* CS1_ON_RAMP - [9:8] */ +#define WM831X_CS1_ON_RAMP_SHIFT 8 /* CS1_ON_RAMP - [9:8] */ +#define WM831X_CS1_ON_RAMP_WIDTH 2 /* CS1_ON_RAMP - [9:8] */ +#define WM831X_CS1_ISEL_MASK 0x003F /* CS1_ISEL - [5:0] */ +#define WM831X_CS1_ISEL_SHIFT 0 /* CS1_ISEL - [5:0] */ +#define WM831X_CS1_ISEL_WIDTH 6 /* CS1_ISEL - [5:0] */ + +/* + * R16463 (0x404F) - Current Sink 2 + */ +#define WM831X_CS2_ENA 0x8000 /* CS2_ENA */ +#define WM831X_CS2_ENA_MASK 0x8000 /* CS2_ENA */ +#define WM831X_CS2_ENA_SHIFT 15 /* CS2_ENA */ +#define WM831X_CS2_ENA_WIDTH 1 /* CS2_ENA */ +#define WM831X_CS2_DRIVE 0x4000 /* CS2_DRIVE */ +#define WM831X_CS2_DRIVE_MASK 0x4000 /* CS2_DRIVE */ +#define WM831X_CS2_DRIVE_SHIFT 14 /* CS2_DRIVE */ +#define WM831X_CS2_DRIVE_WIDTH 1 /* CS2_DRIVE */ +#define WM831X_CS2_SLPENA 0x1000 /* CS2_SLPENA */ +#define WM831X_CS2_SLPENA_MASK 0x1000 /* CS2_SLPENA */ +#define WM831X_CS2_SLPENA_SHIFT 12 /* CS2_SLPENA */ +#define WM831X_CS2_SLPENA_WIDTH 1 /* CS2_SLPENA */ +#define WM831X_CS2_OFF_RAMP_MASK 0x0C00 /* CS2_OFF_RAMP - [11:10] */ +#define WM831X_CS2_OFF_RAMP_SHIFT 10 /* CS2_OFF_RAMP - [11:10] */ +#define WM831X_CS2_OFF_RAMP_WIDTH 2 /* CS2_OFF_RAMP - [11:10] */ +#define WM831X_CS2_ON_RAMP_MASK 0x0300 /* CS2_ON_RAMP - [9:8] */ +#define WM831X_CS2_ON_RAMP_SHIFT 8 /* CS2_ON_RAMP - [9:8] */ +#define WM831X_CS2_ON_RAMP_WIDTH 2 /* CS2_ON_RAMP - [9:8] */ +#define WM831X_CS2_ISEL_MASK 0x003F /* CS2_ISEL - [5:0] */ +#define WM831X_CS2_ISEL_SHIFT 0 /* CS2_ISEL - [5:0] */ +#define WM831X_CS2_ISEL_WIDTH 6 /* CS2_ISEL - [5:0] */ + +/* + * R16464 (0x4050) - DCDC Enable + */ +#define WM831X_EPE2_ENA 0x0080 /* EPE2_ENA */ +#define WM831X_EPE2_ENA_MASK 0x0080 /* EPE2_ENA */ +#define WM831X_EPE2_ENA_SHIFT 7 /* EPE2_ENA */ +#define WM831X_EPE2_ENA_WIDTH 1 /* EPE2_ENA */ +#define WM831X_EPE1_ENA 0x0040 /* EPE1_ENA */ +#define WM831X_EPE1_ENA_MASK 0x0040 /* EPE1_ENA */ +#define WM831X_EPE1_ENA_SHIFT 6 /* EPE1_ENA */ +#define WM831X_EPE1_ENA_WIDTH 1 /* EPE1_ENA */ +#define WM831X_DC4_ENA 0x0008 /* DC4_ENA */ +#define WM831X_DC4_ENA_MASK 0x0008 /* DC4_ENA */ +#define WM831X_DC4_ENA_SHIFT 3 /* DC4_ENA */ +#define WM831X_DC4_ENA_WIDTH 1 /* DC4_ENA */ +#define WM831X_DC3_ENA 0x0004 /* DC3_ENA */ +#define WM831X_DC3_ENA_MASK 0x0004 /* DC3_ENA */ +#define WM831X_DC3_ENA_SHIFT 2 /* DC3_ENA */ +#define WM831X_DC3_ENA_WIDTH 1 /* DC3_ENA */ +#define WM831X_DC2_ENA 0x0002 /* DC2_ENA */ +#define WM831X_DC2_ENA_MASK 0x0002 /* DC2_ENA */ +#define WM831X_DC2_ENA_SHIFT 1 /* DC2_ENA */ +#define WM831X_DC2_ENA_WIDTH 1 /* DC2_ENA */ +#define WM831X_DC1_ENA 0x0001 /* DC1_ENA */ +#define WM831X_DC1_ENA_MASK 0x0001 /* DC1_ENA */ +#define WM831X_DC1_ENA_SHIFT 0 /* DC1_ENA */ +#define WM831X_DC1_ENA_WIDTH 1 /* DC1_ENA */ + +/* + * R16465 (0x4051) - LDO Enable + */ +#define WM831X_LDO11_ENA 0x0400 /* LDO11_ENA */ +#define WM831X_LDO11_ENA_MASK 0x0400 /* LDO11_ENA */ +#define WM831X_LDO11_ENA_SHIFT 10 /* LDO11_ENA */ +#define WM831X_LDO11_ENA_WIDTH 1 /* LDO11_ENA */ +#define WM831X_LDO10_ENA 0x0200 /* LDO10_ENA */ +#define WM831X_LDO10_ENA_MASK 0x0200 /* LDO10_ENA */ +#define WM831X_LDO10_ENA_SHIFT 9 /* LDO10_ENA */ +#define WM831X_LDO10_ENA_WIDTH 1 /* LDO10_ENA */ +#define WM831X_LDO9_ENA 0x0100 /* LDO9_ENA */ +#define WM831X_LDO9_ENA_MASK 0x0100 /* LDO9_ENA */ +#define WM831X_LDO9_ENA_SHIFT 8 /* LDO9_ENA */ +#define WM831X_LDO9_ENA_WIDTH 1 /* LDO9_ENA */ +#define WM831X_LDO8_ENA 0x0080 /* LDO8_ENA */ +#define WM831X_LDO8_ENA_MASK 0x0080 /* LDO8_ENA */ +#define WM831X_LDO8_ENA_SHIFT 7 /* LDO8_ENA */ +#define WM831X_LDO8_ENA_WIDTH 1 /* LDO8_ENA */ +#define WM831X_LDO7_ENA 0x0040 /* LDO7_ENA */ +#define WM831X_LDO7_ENA_MASK 0x0040 /* LDO7_ENA */ +#define WM831X_LDO7_ENA_SHIFT 6 /* LDO7_ENA */ +#define WM831X_LDO7_ENA_WIDTH 1 /* LDO7_ENA */ +#define WM831X_LDO6_ENA 0x0020 /* LDO6_ENA */ +#define WM831X_LDO6_ENA_MASK 0x0020 /* LDO6_ENA */ +#define WM831X_LDO6_ENA_SHIFT 5 /* LDO6_ENA */ +#define WM831X_LDO6_ENA_WIDTH 1 /* LDO6_ENA */ +#define WM831X_LDO5_ENA 0x0010 /* LDO5_ENA */ +#define WM831X_LDO5_ENA_MASK 0x0010 /* LDO5_ENA */ +#define WM831X_LDO5_ENA_SHIFT 4 /* LDO5_ENA */ +#define WM831X_LDO5_ENA_WIDTH 1 /* LDO5_ENA */ +#define WM831X_LDO4_ENA 0x0008 /* LDO4_ENA */ +#define WM831X_LDO4_ENA_MASK 0x0008 /* LDO4_ENA */ +#define WM831X_LDO4_ENA_SHIFT 3 /* LDO4_ENA */ +#define WM831X_LDO4_ENA_WIDTH 1 /* LDO4_ENA */ +#define WM831X_LDO3_ENA 0x0004 /* LDO3_ENA */ +#define WM831X_LDO3_ENA_MASK 0x0004 /* LDO3_ENA */ +#define WM831X_LDO3_ENA_SHIFT 2 /* LDO3_ENA */ +#define WM831X_LDO3_ENA_WIDTH 1 /* LDO3_ENA */ +#define WM831X_LDO2_ENA 0x0002 /* LDO2_ENA */ +#define WM831X_LDO2_ENA_MASK 0x0002 /* LDO2_ENA */ +#define WM831X_LDO2_ENA_SHIFT 1 /* LDO2_ENA */ +#define WM831X_LDO2_ENA_WIDTH 1 /* LDO2_ENA */ +#define WM831X_LDO1_ENA 0x0001 /* LDO1_ENA */ +#define WM831X_LDO1_ENA_MASK 0x0001 /* LDO1_ENA */ +#define WM831X_LDO1_ENA_SHIFT 0 /* LDO1_ENA */ +#define WM831X_LDO1_ENA_WIDTH 1 /* LDO1_ENA */ + +/* + * R16466 (0x4052) - DCDC Status + */ +#define WM831X_EPE2_STS 0x0080 /* EPE2_STS */ +#define WM831X_EPE2_STS_MASK 0x0080 /* EPE2_STS */ +#define WM831X_EPE2_STS_SHIFT 7 /* EPE2_STS */ +#define WM831X_EPE2_STS_WIDTH 1 /* EPE2_STS */ +#define WM831X_EPE1_STS 0x0040 /* EPE1_STS */ +#define WM831X_EPE1_STS_MASK 0x0040 /* EPE1_STS */ +#define WM831X_EPE1_STS_SHIFT 6 /* EPE1_STS */ +#define WM831X_EPE1_STS_WIDTH 1 /* EPE1_STS */ +#define WM831X_DC4_STS 0x0008 /* DC4_STS */ +#define WM831X_DC4_STS_MASK 0x0008 /* DC4_STS */ +#define WM831X_DC4_STS_SHIFT 3 /* DC4_STS */ +#define WM831X_DC4_STS_WIDTH 1 /* DC4_STS */ +#define WM831X_DC3_STS 0x0004 /* DC3_STS */ +#define WM831X_DC3_STS_MASK 0x0004 /* DC3_STS */ +#define WM831X_DC3_STS_SHIFT 2 /* DC3_STS */ +#define WM831X_DC3_STS_WIDTH 1 /* DC3_STS */ +#define WM831X_DC2_STS 0x0002 /* DC2_STS */ +#define WM831X_DC2_STS_MASK 0x0002 /* DC2_STS */ +#define WM831X_DC2_STS_SHIFT 1 /* DC2_STS */ +#define WM831X_DC2_STS_WIDTH 1 /* DC2_STS */ +#define WM831X_DC1_STS 0x0001 /* DC1_STS */ +#define WM831X_DC1_STS_MASK 0x0001 /* DC1_STS */ +#define WM831X_DC1_STS_SHIFT 0 /* DC1_STS */ +#define WM831X_DC1_STS_WIDTH 1 /* DC1_STS */ + +/* + * R16467 (0x4053) - LDO Status + */ +#define WM831X_LDO11_STS 0x0400 /* LDO11_STS */ +#define WM831X_LDO11_STS_MASK 0x0400 /* LDO11_STS */ +#define WM831X_LDO11_STS_SHIFT 10 /* LDO11_STS */ +#define WM831X_LDO11_STS_WIDTH 1 /* LDO11_STS */ +#define WM831X_LDO10_STS 0x0200 /* LDO10_STS */ +#define WM831X_LDO10_STS_MASK 0x0200 /* LDO10_STS */ +#define WM831X_LDO10_STS_SHIFT 9 /* LDO10_STS */ +#define WM831X_LDO10_STS_WIDTH 1 /* LDO10_STS */ +#define WM831X_LDO9_STS 0x0100 /* LDO9_STS */ +#define WM831X_LDO9_STS_MASK 0x0100 /* LDO9_STS */ +#define WM831X_LDO9_STS_SHIFT 8 /* LDO9_STS */ +#define WM831X_LDO9_STS_WIDTH 1 /* LDO9_STS */ +#define WM831X_LDO8_STS 0x0080 /* LDO8_STS */ +#define WM831X_LDO8_STS_MASK 0x0080 /* LDO8_STS */ +#define WM831X_LDO8_STS_SHIFT 7 /* LDO8_STS */ +#define WM831X_LDO8_STS_WIDTH 1 /* LDO8_STS */ +#define WM831X_LDO7_STS 0x0040 /* LDO7_STS */ +#define WM831X_LDO7_STS_MASK 0x0040 /* LDO7_STS */ +#define WM831X_LDO7_STS_SHIFT 6 /* LDO7_STS */ +#define WM831X_LDO7_STS_WIDTH 1 /* LDO7_STS */ +#define WM831X_LDO6_STS 0x0020 /* LDO6_STS */ +#define WM831X_LDO6_STS_MASK 0x0020 /* LDO6_STS */ +#define WM831X_LDO6_STS_SHIFT 5 /* LDO6_STS */ +#define WM831X_LDO6_STS_WIDTH 1 /* LDO6_STS */ +#define WM831X_LDO5_STS 0x0010 /* LDO5_STS */ +#define WM831X_LDO5_STS_MASK 0x0010 /* LDO5_STS */ +#define WM831X_LDO5_STS_SHIFT 4 /* LDO5_STS */ +#define WM831X_LDO5_STS_WIDTH 1 /* LDO5_STS */ +#define WM831X_LDO4_STS 0x0008 /* LDO4_STS */ +#define WM831X_LDO4_STS_MASK 0x0008 /* LDO4_STS */ +#define WM831X_LDO4_STS_SHIFT 3 /* LDO4_STS */ +#define WM831X_LDO4_STS_WIDTH 1 /* LDO4_STS */ +#define WM831X_LDO3_STS 0x0004 /* LDO3_STS */ +#define WM831X_LDO3_STS_MASK 0x0004 /* LDO3_STS */ +#define WM831X_LDO3_STS_SHIFT 2 /* LDO3_STS */ +#define WM831X_LDO3_STS_WIDTH 1 /* LDO3_STS */ +#define WM831X_LDO2_STS 0x0002 /* LDO2_STS */ +#define WM831X_LDO2_STS_MASK 0x0002 /* LDO2_STS */ +#define WM831X_LDO2_STS_SHIFT 1 /* LDO2_STS */ +#define WM831X_LDO2_STS_WIDTH 1 /* LDO2_STS */ +#define WM831X_LDO1_STS 0x0001 /* LDO1_STS */ +#define WM831X_LDO1_STS_MASK 0x0001 /* LDO1_STS */ +#define WM831X_LDO1_STS_SHIFT 0 /* LDO1_STS */ +#define WM831X_LDO1_STS_WIDTH 1 /* LDO1_STS */ + +/* + * R16468 (0x4054) - DCDC UV Status + */ +#define WM831X_DC2_OV_STS 0x2000 /* DC2_OV_STS */ +#define WM831X_DC2_OV_STS_MASK 0x2000 /* DC2_OV_STS */ +#define WM831X_DC2_OV_STS_SHIFT 13 /* DC2_OV_STS */ +#define WM831X_DC2_OV_STS_WIDTH 1 /* DC2_OV_STS */ +#define WM831X_DC1_OV_STS 0x1000 /* DC1_OV_STS */ +#define WM831X_DC1_OV_STS_MASK 0x1000 /* DC1_OV_STS */ +#define WM831X_DC1_OV_STS_SHIFT 12 /* DC1_OV_STS */ +#define WM831X_DC1_OV_STS_WIDTH 1 /* DC1_OV_STS */ +#define WM831X_DC2_HC_STS 0x0200 /* DC2_HC_STS */ +#define WM831X_DC2_HC_STS_MASK 0x0200 /* DC2_HC_STS */ +#define WM831X_DC2_HC_STS_SHIFT 9 /* DC2_HC_STS */ +#define WM831X_DC2_HC_STS_WIDTH 1 /* DC2_HC_STS */ +#define WM831X_DC1_HC_STS 0x0100 /* DC1_HC_STS */ +#define WM831X_DC1_HC_STS_MASK 0x0100 /* DC1_HC_STS */ +#define WM831X_DC1_HC_STS_SHIFT 8 /* DC1_HC_STS */ +#define WM831X_DC1_HC_STS_WIDTH 1 /* DC1_HC_STS */ +#define WM831X_DC4_UV_STS 0x0008 /* DC4_UV_STS */ +#define WM831X_DC4_UV_STS_MASK 0x0008 /* DC4_UV_STS */ +#define WM831X_DC4_UV_STS_SHIFT 3 /* DC4_UV_STS */ +#define WM831X_DC4_UV_STS_WIDTH 1 /* DC4_UV_STS */ +#define WM831X_DC3_UV_STS 0x0004 /* DC3_UV_STS */ +#define WM831X_DC3_UV_STS_MASK 0x0004 /* DC3_UV_STS */ +#define WM831X_DC3_UV_STS_SHIFT 2 /* DC3_UV_STS */ +#define WM831X_DC3_UV_STS_WIDTH 1 /* DC3_UV_STS */ +#define WM831X_DC2_UV_STS 0x0002 /* DC2_UV_STS */ +#define WM831X_DC2_UV_STS_MASK 0x0002 /* DC2_UV_STS */ +#define WM831X_DC2_UV_STS_SHIFT 1 /* DC2_UV_STS */ +#define WM831X_DC2_UV_STS_WIDTH 1 /* DC2_UV_STS */ +#define WM831X_DC1_UV_STS 0x0001 /* DC1_UV_STS */ +#define WM831X_DC1_UV_STS_MASK 0x0001 /* DC1_UV_STS */ +#define WM831X_DC1_UV_STS_SHIFT 0 /* DC1_UV_STS */ +#define WM831X_DC1_UV_STS_WIDTH 1 /* DC1_UV_STS */ + +/* + * R16469 (0x4055) - LDO UV Status + */ +#define WM831X_INTLDO_UV_STS 0x8000 /* INTLDO_UV_STS */ +#define WM831X_INTLDO_UV_STS_MASK 0x8000 /* INTLDO_UV_STS */ +#define WM831X_INTLDO_UV_STS_SHIFT 15 /* INTLDO_UV_STS */ +#define WM831X_INTLDO_UV_STS_WIDTH 1 /* INTLDO_UV_STS */ +#define WM831X_LDO10_UV_STS 0x0200 /* LDO10_UV_STS */ +#define WM831X_LDO10_UV_STS_MASK 0x0200 /* LDO10_UV_STS */ +#define WM831X_LDO10_UV_STS_SHIFT 9 /* LDO10_UV_STS */ +#define WM831X_LDO10_UV_STS_WIDTH 1 /* LDO10_UV_STS */ +#define WM831X_LDO9_UV_STS 0x0100 /* LDO9_UV_STS */ +#define WM831X_LDO9_UV_STS_MASK 0x0100 /* LDO9_UV_STS */ +#define WM831X_LDO9_UV_STS_SHIFT 8 /* LDO9_UV_STS */ +#define WM831X_LDO9_UV_STS_WIDTH 1 /* LDO9_UV_STS */ +#define WM831X_LDO8_UV_STS 0x0080 /* LDO8_UV_STS */ +#define WM831X_LDO8_UV_STS_MASK 0x0080 /* LDO8_UV_STS */ +#define WM831X_LDO8_UV_STS_SHIFT 7 /* LDO8_UV_STS */ +#define WM831X_LDO8_UV_STS_WIDTH 1 /* LDO8_UV_STS */ +#define WM831X_LDO7_UV_STS 0x0040 /* LDO7_UV_STS */ +#define WM831X_LDO7_UV_STS_MASK 0x0040 /* LDO7_UV_STS */ +#define WM831X_LDO7_UV_STS_SHIFT 6 /* LDO7_UV_STS */ +#define WM831X_LDO7_UV_STS_WIDTH 1 /* LDO7_UV_STS */ +#define WM831X_LDO6_UV_STS 0x0020 /* LDO6_UV_STS */ +#define WM831X_LDO6_UV_STS_MASK 0x0020 /* LDO6_UV_STS */ +#define WM831X_LDO6_UV_STS_SHIFT 5 /* LDO6_UV_STS */ +#define WM831X_LDO6_UV_STS_WIDTH 1 /* LDO6_UV_STS */ +#define WM831X_LDO5_UV_STS 0x0010 /* LDO5_UV_STS */ +#define WM831X_LDO5_UV_STS_MASK 0x0010 /* LDO5_UV_STS */ +#define WM831X_LDO5_UV_STS_SHIFT 4 /* LDO5_UV_STS */ +#define WM831X_LDO5_UV_STS_WIDTH 1 /* LDO5_UV_STS */ +#define WM831X_LDO4_UV_STS 0x0008 /* LDO4_UV_STS */ +#define WM831X_LDO4_UV_STS_MASK 0x0008 /* LDO4_UV_STS */ +#define WM831X_LDO4_UV_STS_SHIFT 3 /* LDO4_UV_STS */ +#define WM831X_LDO4_UV_STS_WIDTH 1 /* LDO4_UV_STS */ +#define WM831X_LDO3_UV_STS 0x0004 /* LDO3_UV_STS */ +#define WM831X_LDO3_UV_STS_MASK 0x0004 /* LDO3_UV_STS */ +#define WM831X_LDO3_UV_STS_SHIFT 2 /* LDO3_UV_STS */ +#define WM831X_LDO3_UV_STS_WIDTH 1 /* LDO3_UV_STS */ +#define WM831X_LDO2_UV_STS 0x0002 /* LDO2_UV_STS */ +#define WM831X_LDO2_UV_STS_MASK 0x0002 /* LDO2_UV_STS */ +#define WM831X_LDO2_UV_STS_SHIFT 1 /* LDO2_UV_STS */ +#define WM831X_LDO2_UV_STS_WIDTH 1 /* LDO2_UV_STS */ +#define WM831X_LDO1_UV_STS 0x0001 /* LDO1_UV_STS */ +#define WM831X_LDO1_UV_STS_MASK 0x0001 /* LDO1_UV_STS */ +#define WM831X_LDO1_UV_STS_SHIFT 0 /* LDO1_UV_STS */ +#define WM831X_LDO1_UV_STS_WIDTH 1 /* LDO1_UV_STS */ + +/* + * R16470 (0x4056) - DC1 Control 1 + */ +#define WM831X_DC1_RATE_MASK 0xC000 /* DC1_RATE - [15:14] */ +#define WM831X_DC1_RATE_SHIFT 14 /* DC1_RATE - [15:14] */ +#define WM831X_DC1_RATE_WIDTH 2 /* DC1_RATE - [15:14] */ +#define WM831X_DC1_PHASE 0x1000 /* DC1_PHASE */ +#define WM831X_DC1_PHASE_MASK 0x1000 /* DC1_PHASE */ +#define WM831X_DC1_PHASE_SHIFT 12 /* DC1_PHASE */ +#define WM831X_DC1_PHASE_WIDTH 1 /* DC1_PHASE */ +#define WM831X_DC1_FREQ_MASK 0x0300 /* DC1_FREQ - [9:8] */ +#define WM831X_DC1_FREQ_SHIFT 8 /* DC1_FREQ - [9:8] */ +#define WM831X_DC1_FREQ_WIDTH 2 /* DC1_FREQ - [9:8] */ +#define WM831X_DC1_FLT 0x0080 /* DC1_FLT */ +#define WM831X_DC1_FLT_MASK 0x0080 /* DC1_FLT */ +#define WM831X_DC1_FLT_SHIFT 7 /* DC1_FLT */ +#define WM831X_DC1_FLT_WIDTH 1 /* DC1_FLT */ +#define WM831X_DC1_SOFT_START_MASK 0x0030 /* DC1_SOFT_START - [5:4] */ +#define WM831X_DC1_SOFT_START_SHIFT 4 /* DC1_SOFT_START - [5:4] */ +#define WM831X_DC1_SOFT_START_WIDTH 2 /* DC1_SOFT_START - [5:4] */ +#define WM831X_DC1_CAP_MASK 0x0003 /* DC1_CAP - [1:0] */ +#define WM831X_DC1_CAP_SHIFT 0 /* DC1_CAP - [1:0] */ +#define WM831X_DC1_CAP_WIDTH 2 /* DC1_CAP - [1:0] */ + +/* + * R16471 (0x4057) - DC1 Control 2 + */ +#define WM831X_DC1_ERR_ACT_MASK 0xC000 /* DC1_ERR_ACT - [15:14] */ +#define WM831X_DC1_ERR_ACT_SHIFT 14 /* DC1_ERR_ACT - [15:14] */ +#define WM831X_DC1_ERR_ACT_WIDTH 2 /* DC1_ERR_ACT - [15:14] */ +#define WM831X_DC1_HWC_SRC_MASK 0x1800 /* DC1_HWC_SRC - [12:11] */ +#define WM831X_DC1_HWC_SRC_SHIFT 11 /* DC1_HWC_SRC - [12:11] */ +#define WM831X_DC1_HWC_SRC_WIDTH 2 /* DC1_HWC_SRC - [12:11] */ +#define WM831X_DC1_HWC_VSEL 0x0400 /* DC1_HWC_VSEL */ +#define WM831X_DC1_HWC_VSEL_MASK 0x0400 /* DC1_HWC_VSEL */ +#define WM831X_DC1_HWC_VSEL_SHIFT 10 /* DC1_HWC_VSEL */ +#define WM831X_DC1_HWC_VSEL_WIDTH 1 /* DC1_HWC_VSEL */ +#define WM831X_DC1_HWC_MODE_MASK 0x0300 /* DC1_HWC_MODE - [9:8] */ +#define WM831X_DC1_HWC_MODE_SHIFT 8 /* DC1_HWC_MODE - [9:8] */ +#define WM831X_DC1_HWC_MODE_WIDTH 2 /* DC1_HWC_MODE - [9:8] */ +#define WM831X_DC1_HC_THR_MASK 0x0070 /* DC1_HC_THR - [6:4] */ +#define WM831X_DC1_HC_THR_SHIFT 4 /* DC1_HC_THR - [6:4] */ +#define WM831X_DC1_HC_THR_WIDTH 3 /* DC1_HC_THR - [6:4] */ +#define WM831X_DC1_HC_IND_ENA 0x0001 /* DC1_HC_IND_ENA */ +#define WM831X_DC1_HC_IND_ENA_MASK 0x0001 /* DC1_HC_IND_ENA */ +#define WM831X_DC1_HC_IND_ENA_SHIFT 0 /* DC1_HC_IND_ENA */ +#define WM831X_DC1_HC_IND_ENA_WIDTH 1 /* DC1_HC_IND_ENA */ + +/* + * R16472 (0x4058) - DC1 ON Config + */ +#define WM831X_DC1_ON_SLOT_MASK 0xE000 /* DC1_ON_SLOT - [15:13] */ +#define WM831X_DC1_ON_SLOT_SHIFT 13 /* DC1_ON_SLOT - [15:13] */ +#define WM831X_DC1_ON_SLOT_WIDTH 3 /* DC1_ON_SLOT - [15:13] */ +#define WM831X_DC1_ON_MODE_MASK 0x0300 /* DC1_ON_MODE - [9:8] */ +#define WM831X_DC1_ON_MODE_SHIFT 8 /* DC1_ON_MODE - [9:8] */ +#define WM831X_DC1_ON_MODE_WIDTH 2 /* DC1_ON_MODE - [9:8] */ +#define WM831X_DC1_ON_VSEL_MASK 0x007F /* DC1_ON_VSEL - [6:0] */ +#define WM831X_DC1_ON_VSEL_SHIFT 0 /* DC1_ON_VSEL - [6:0] */ +#define WM831X_DC1_ON_VSEL_WIDTH 7 /* DC1_ON_VSEL - [6:0] */ + +/* + * R16473 (0x4059) - DC1 SLEEP Control + */ +#define WM831X_DC1_SLP_SLOT_MASK 0xE000 /* DC1_SLP_SLOT - [15:13] */ +#define WM831X_DC1_SLP_SLOT_SHIFT 13 /* DC1_SLP_SLOT - [15:13] */ +#define WM831X_DC1_SLP_SLOT_WIDTH 3 /* DC1_SLP_SLOT - [15:13] */ +#define WM831X_DC1_SLP_MODE_MASK 0x0300 /* DC1_SLP_MODE - [9:8] */ +#define WM831X_DC1_SLP_MODE_SHIFT 8 /* DC1_SLP_MODE - [9:8] */ +#define WM831X_DC1_SLP_MODE_WIDTH 2 /* DC1_SLP_MODE - [9:8] */ +#define WM831X_DC1_SLP_VSEL_MASK 0x007F /* DC1_SLP_VSEL - [6:0] */ +#define WM831X_DC1_SLP_VSEL_SHIFT 0 /* DC1_SLP_VSEL - [6:0] */ +#define WM831X_DC1_SLP_VSEL_WIDTH 7 /* DC1_SLP_VSEL - [6:0] */ + +/* + * R16474 (0x405A) - DC1 DVS Control + */ +#define WM831X_DC1_DVS_SRC_MASK 0x1800 /* DC1_DVS_SRC - [12:11] */ +#define WM831X_DC1_DVS_SRC_SHIFT 11 /* DC1_DVS_SRC - [12:11] */ +#define WM831X_DC1_DVS_SRC_WIDTH 2 /* DC1_DVS_SRC - [12:11] */ +#define WM831X_DC1_DVS_VSEL_MASK 0x007F /* DC1_DVS_VSEL - [6:0] */ +#define WM831X_DC1_DVS_VSEL_SHIFT 0 /* DC1_DVS_VSEL - [6:0] */ +#define WM831X_DC1_DVS_VSEL_WIDTH 7 /* DC1_DVS_VSEL - [6:0] */ + +/* + * R16475 (0x405B) - DC2 Control 1 + */ +#define WM831X_DC2_RATE_MASK 0xC000 /* DC2_RATE - [15:14] */ +#define WM831X_DC2_RATE_SHIFT 14 /* DC2_RATE - [15:14] */ +#define WM831X_DC2_RATE_WIDTH 2 /* DC2_RATE - [15:14] */ +#define WM831X_DC2_PHASE 0x1000 /* DC2_PHASE */ +#define WM831X_DC2_PHASE_MASK 0x1000 /* DC2_PHASE */ +#define WM831X_DC2_PHASE_SHIFT 12 /* DC2_PHASE */ +#define WM831X_DC2_PHASE_WIDTH 1 /* DC2_PHASE */ +#define WM831X_DC2_FREQ_MASK 0x0300 /* DC2_FREQ - [9:8] */ +#define WM831X_DC2_FREQ_SHIFT 8 /* DC2_FREQ - [9:8] */ +#define WM831X_DC2_FREQ_WIDTH 2 /* DC2_FREQ - [9:8] */ +#define WM831X_DC2_FLT 0x0080 /* DC2_FLT */ +#define WM831X_DC2_FLT_MASK 0x0080 /* DC2_FLT */ +#define WM831X_DC2_FLT_SHIFT 7 /* DC2_FLT */ +#define WM831X_DC2_FLT_WIDTH 1 /* DC2_FLT */ +#define WM831X_DC2_SOFT_START_MASK 0x0030 /* DC2_SOFT_START - [5:4] */ +#define WM831X_DC2_SOFT_START_SHIFT 4 /* DC2_SOFT_START - [5:4] */ +#define WM831X_DC2_SOFT_START_WIDTH 2 /* DC2_SOFT_START - [5:4] */ +#define WM831X_DC2_CAP_MASK 0x0003 /* DC2_CAP - [1:0] */ +#define WM831X_DC2_CAP_SHIFT 0 /* DC2_CAP - [1:0] */ +#define WM831X_DC2_CAP_WIDTH 2 /* DC2_CAP - [1:0] */ + +/* + * R16476 (0x405C) - DC2 Control 2 + */ +#define WM831X_DC2_ERR_ACT_MASK 0xC000 /* DC2_ERR_ACT - [15:14] */ +#define WM831X_DC2_ERR_ACT_SHIFT 14 /* DC2_ERR_ACT - [15:14] */ +#define WM831X_DC2_ERR_ACT_WIDTH 2 /* DC2_ERR_ACT - [15:14] */ +#define WM831X_DC2_HWC_SRC_MASK 0x1800 /* DC2_HWC_SRC - [12:11] */ +#define WM831X_DC2_HWC_SRC_SHIFT 11 /* DC2_HWC_SRC - [12:11] */ +#define WM831X_DC2_HWC_SRC_WIDTH 2 /* DC2_HWC_SRC - [12:11] */ +#define WM831X_DC2_HWC_VSEL 0x0400 /* DC2_HWC_VSEL */ +#define WM831X_DC2_HWC_VSEL_MASK 0x0400 /* DC2_HWC_VSEL */ +#define WM831X_DC2_HWC_VSEL_SHIFT 10 /* DC2_HWC_VSEL */ +#define WM831X_DC2_HWC_VSEL_WIDTH 1 /* DC2_HWC_VSEL */ +#define WM831X_DC2_HWC_MODE_MASK 0x0300 /* DC2_HWC_MODE - [9:8] */ +#define WM831X_DC2_HWC_MODE_SHIFT 8 /* DC2_HWC_MODE - [9:8] */ +#define WM831X_DC2_HWC_MODE_WIDTH 2 /* DC2_HWC_MODE - [9:8] */ +#define WM831X_DC2_HC_THR_MASK 0x0070 /* DC2_HC_THR - [6:4] */ +#define WM831X_DC2_HC_THR_SHIFT 4 /* DC2_HC_THR - [6:4] */ +#define WM831X_DC2_HC_THR_WIDTH 3 /* DC2_HC_THR - [6:4] */ +#define WM831X_DC2_HC_IND_ENA 0x0001 /* DC2_HC_IND_ENA */ +#define WM831X_DC2_HC_IND_ENA_MASK 0x0001 /* DC2_HC_IND_ENA */ +#define WM831X_DC2_HC_IND_ENA_SHIFT 0 /* DC2_HC_IND_ENA */ +#define WM831X_DC2_HC_IND_ENA_WIDTH 1 /* DC2_HC_IND_ENA */ + +/* + * R16477 (0x405D) - DC2 ON Config + */ +#define WM831X_DC2_ON_SLOT_MASK 0xE000 /* DC2_ON_SLOT - [15:13] */ +#define WM831X_DC2_ON_SLOT_SHIFT 13 /* DC2_ON_SLOT - [15:13] */ +#define WM831X_DC2_ON_SLOT_WIDTH 3 /* DC2_ON_SLOT - [15:13] */ +#define WM831X_DC2_ON_MODE_MASK 0x0300 /* DC2_ON_MODE - [9:8] */ +#define WM831X_DC2_ON_MODE_SHIFT 8 /* DC2_ON_MODE - [9:8] */ +#define WM831X_DC2_ON_MODE_WIDTH 2 /* DC2_ON_MODE - [9:8] */ +#define WM831X_DC2_ON_VSEL_MASK 0x007F /* DC2_ON_VSEL - [6:0] */ +#define WM831X_DC2_ON_VSEL_SHIFT 0 /* DC2_ON_VSEL - [6:0] */ +#define WM831X_DC2_ON_VSEL_WIDTH 7 /* DC2_ON_VSEL - [6:0] */ + +/* + * R16478 (0x405E) - DC2 SLEEP Control + */ +#define WM831X_DC2_SLP_SLOT_MASK 0xE000 /* DC2_SLP_SLOT - [15:13] */ +#define WM831X_DC2_SLP_SLOT_SHIFT 13 /* DC2_SLP_SLOT - [15:13] */ +#define WM831X_DC2_SLP_SLOT_WIDTH 3 /* DC2_SLP_SLOT - [15:13] */ +#define WM831X_DC2_SLP_MODE_MASK 0x0300 /* DC2_SLP_MODE - [9:8] */ +#define WM831X_DC2_SLP_MODE_SHIFT 8 /* DC2_SLP_MODE - [9:8] */ +#define WM831X_DC2_SLP_MODE_WIDTH 2 /* DC2_SLP_MODE - [9:8] */ +#define WM831X_DC2_SLP_VSEL_MASK 0x007F /* DC2_SLP_VSEL - [6:0] */ +#define WM831X_DC2_SLP_VSEL_SHIFT 0 /* DC2_SLP_VSEL - [6:0] */ +#define WM831X_DC2_SLP_VSEL_WIDTH 7 /* DC2_SLP_VSEL - [6:0] */ + +/* + * R16479 (0x405F) - DC2 DVS Control + */ +#define WM831X_DC2_DVS_SRC_MASK 0x1800 /* DC2_DVS_SRC - [12:11] */ +#define WM831X_DC2_DVS_SRC_SHIFT 11 /* DC2_DVS_SRC - [12:11] */ +#define WM831X_DC2_DVS_SRC_WIDTH 2 /* DC2_DVS_SRC - [12:11] */ +#define WM831X_DC2_DVS_VSEL_MASK 0x007F /* DC2_DVS_VSEL - [6:0] */ +#define WM831X_DC2_DVS_VSEL_SHIFT 0 /* DC2_DVS_VSEL - [6:0] */ +#define WM831X_DC2_DVS_VSEL_WIDTH 7 /* DC2_DVS_VSEL - [6:0] */ + +/* + * R16480 (0x4060) - DC3 Control 1 + */ +#define WM831X_DC3_PHASE 0x1000 /* DC3_PHASE */ +#define WM831X_DC3_PHASE_MASK 0x1000 /* DC3_PHASE */ +#define WM831X_DC3_PHASE_SHIFT 12 /* DC3_PHASE */ +#define WM831X_DC3_PHASE_WIDTH 1 /* DC3_PHASE */ +#define WM831X_DC3_FLT 0x0080 /* DC3_FLT */ +#define WM831X_DC3_FLT_MASK 0x0080 /* DC3_FLT */ +#define WM831X_DC3_FLT_SHIFT 7 /* DC3_FLT */ +#define WM831X_DC3_FLT_WIDTH 1 /* DC3_FLT */ +#define WM831X_DC3_SOFT_START_MASK 0x0030 /* DC3_SOFT_START - [5:4] */ +#define WM831X_DC3_SOFT_START_SHIFT 4 /* DC3_SOFT_START - [5:4] */ +#define WM831X_DC3_SOFT_START_WIDTH 2 /* DC3_SOFT_START - [5:4] */ +#define WM831X_DC3_STNBY_LIM_MASK 0x000C /* DC3_STNBY_LIM - [3:2] */ +#define WM831X_DC3_STNBY_LIM_SHIFT 2 /* DC3_STNBY_LIM - [3:2] */ +#define WM831X_DC3_STNBY_LIM_WIDTH 2 /* DC3_STNBY_LIM - [3:2] */ +#define WM831X_DC3_CAP_MASK 0x0003 /* DC3_CAP - [1:0] */ +#define WM831X_DC3_CAP_SHIFT 0 /* DC3_CAP - [1:0] */ +#define WM831X_DC3_CAP_WIDTH 2 /* DC3_CAP - [1:0] */ + +/* + * R16481 (0x4061) - DC3 Control 2 + */ +#define WM831X_DC3_ERR_ACT_MASK 0xC000 /* DC3_ERR_ACT - [15:14] */ +#define WM831X_DC3_ERR_ACT_SHIFT 14 /* DC3_ERR_ACT - [15:14] */ +#define WM831X_DC3_ERR_ACT_WIDTH 2 /* DC3_ERR_ACT - [15:14] */ +#define WM831X_DC3_HWC_SRC_MASK 0x1800 /* DC3_HWC_SRC - [12:11] */ +#define WM831X_DC3_HWC_SRC_SHIFT 11 /* DC3_HWC_SRC - [12:11] */ +#define WM831X_DC3_HWC_SRC_WIDTH 2 /* DC3_HWC_SRC - [12:11] */ +#define WM831X_DC3_HWC_VSEL 0x0400 /* DC3_HWC_VSEL */ +#define WM831X_DC3_HWC_VSEL_MASK 0x0400 /* DC3_HWC_VSEL */ +#define WM831X_DC3_HWC_VSEL_SHIFT 10 /* DC3_HWC_VSEL */ +#define WM831X_DC3_HWC_VSEL_WIDTH 1 /* DC3_HWC_VSEL */ +#define WM831X_DC3_HWC_MODE_MASK 0x0300 /* DC3_HWC_MODE - [9:8] */ +#define WM831X_DC3_HWC_MODE_SHIFT 8 /* DC3_HWC_MODE - [9:8] */ +#define WM831X_DC3_HWC_MODE_WIDTH 2 /* DC3_HWC_MODE - [9:8] */ +#define WM831X_DC3_OVP 0x0080 /* DC3_OVP */ +#define WM831X_DC3_OVP_MASK 0x0080 /* DC3_OVP */ +#define WM831X_DC3_OVP_SHIFT 7 /* DC3_OVP */ +#define WM831X_DC3_OVP_WIDTH 1 /* DC3_OVP */ + +/* + * R16482 (0x4062) - DC3 ON Config + */ +#define WM831X_DC3_ON_SLOT_MASK 0xE000 /* DC3_ON_SLOT - [15:13] */ +#define WM831X_DC3_ON_SLOT_SHIFT 13 /* DC3_ON_SLOT - [15:13] */ +#define WM831X_DC3_ON_SLOT_WIDTH 3 /* DC3_ON_SLOT - [15:13] */ +#define WM831X_DC3_ON_MODE_MASK 0x0300 /* DC3_ON_MODE - [9:8] */ +#define WM831X_DC3_ON_MODE_SHIFT 8 /* DC3_ON_MODE - [9:8] */ +#define WM831X_DC3_ON_MODE_WIDTH 2 /* DC3_ON_MODE - [9:8] */ +#define WM831X_DC3_ON_VSEL_MASK 0x007F /* DC3_ON_VSEL - [6:0] */ +#define WM831X_DC3_ON_VSEL_SHIFT 0 /* DC3_ON_VSEL - [6:0] */ +#define WM831X_DC3_ON_VSEL_WIDTH 7 /* DC3_ON_VSEL - [6:0] */ + +/* + * R16483 (0x4063) - DC3 SLEEP Control + */ +#define WM831X_DC3_SLP_SLOT_MASK 0xE000 /* DC3_SLP_SLOT - [15:13] */ +#define WM831X_DC3_SLP_SLOT_SHIFT 13 /* DC3_SLP_SLOT - [15:13] */ +#define WM831X_DC3_SLP_SLOT_WIDTH 3 /* DC3_SLP_SLOT - [15:13] */ +#define WM831X_DC3_SLP_MODE_MASK 0x0300 /* DC3_SLP_MODE - [9:8] */ +#define WM831X_DC3_SLP_MODE_SHIFT 8 /* DC3_SLP_MODE - [9:8] */ +#define WM831X_DC3_SLP_MODE_WIDTH 2 /* DC3_SLP_MODE - [9:8] */ +#define WM831X_DC3_SLP_VSEL_MASK 0x007F /* DC3_SLP_VSEL - [6:0] */ +#define WM831X_DC3_SLP_VSEL_SHIFT 0 /* DC3_SLP_VSEL - [6:0] */ +#define WM831X_DC3_SLP_VSEL_WIDTH 7 /* DC3_SLP_VSEL - [6:0] */ + +/* + * R16484 (0x4064) - DC4 Control + */ +#define WM831X_DC4_ERR_ACT_MASK 0xC000 /* DC4_ERR_ACT - [15:14] */ +#define WM831X_DC4_ERR_ACT_SHIFT 14 /* DC4_ERR_ACT - [15:14] */ +#define WM831X_DC4_ERR_ACT_WIDTH 2 /* DC4_ERR_ACT - [15:14] */ +#define WM831X_DC4_HWC_SRC_MASK 0x1800 /* DC4_HWC_SRC - [12:11] */ +#define WM831X_DC4_HWC_SRC_SHIFT 11 /* DC4_HWC_SRC - [12:11] */ +#define WM831X_DC4_HWC_SRC_WIDTH 2 /* DC4_HWC_SRC - [12:11] */ +#define WM831X_DC4_HWC_MODE 0x0100 /* DC4_HWC_MODE */ +#define WM831X_DC4_HWC_MODE_MASK 0x0100 /* DC4_HWC_MODE */ +#define WM831X_DC4_HWC_MODE_SHIFT 8 /* DC4_HWC_MODE */ +#define WM831X_DC4_HWC_MODE_WIDTH 1 /* DC4_HWC_MODE */ +#define WM831X_DC4_RANGE_MASK 0x000C /* DC4_RANGE - [3:2] */ +#define WM831X_DC4_RANGE_SHIFT 2 /* DC4_RANGE - [3:2] */ +#define WM831X_DC4_RANGE_WIDTH 2 /* DC4_RANGE - [3:2] */ +#define WM831X_DC4_FBSRC 0x0001 /* DC4_FBSRC */ +#define WM831X_DC4_FBSRC_MASK 0x0001 /* DC4_FBSRC */ +#define WM831X_DC4_FBSRC_SHIFT 0 /* DC4_FBSRC */ +#define WM831X_DC4_FBSRC_WIDTH 1 /* DC4_FBSRC */ + +/* + * R16485 (0x4065) - DC4 SLEEP Control + */ +#define WM831X_DC4_SLPENA 0x0100 /* DC4_SLPENA */ +#define WM831X_DC4_SLPENA_MASK 0x0100 /* DC4_SLPENA */ +#define WM831X_DC4_SLPENA_SHIFT 8 /* DC4_SLPENA */ +#define WM831X_DC4_SLPENA_WIDTH 1 /* DC4_SLPENA */ + +/* + * R16526 (0x408E) - Power Good Source 1 + */ +#define WM831X_DC4_OK 0x0008 /* DC4_OK */ +#define WM831X_DC4_OK_MASK 0x0008 /* DC4_OK */ +#define WM831X_DC4_OK_SHIFT 3 /* DC4_OK */ +#define WM831X_DC4_OK_WIDTH 1 /* DC4_OK */ +#define WM831X_DC3_OK 0x0004 /* DC3_OK */ +#define WM831X_DC3_OK_MASK 0x0004 /* DC3_OK */ +#define WM831X_DC3_OK_SHIFT 2 /* DC3_OK */ +#define WM831X_DC3_OK_WIDTH 1 /* DC3_OK */ +#define WM831X_DC2_OK 0x0002 /* DC2_OK */ +#define WM831X_DC2_OK_MASK 0x0002 /* DC2_OK */ +#define WM831X_DC2_OK_SHIFT 1 /* DC2_OK */ +#define WM831X_DC2_OK_WIDTH 1 /* DC2_OK */ +#define WM831X_DC1_OK 0x0001 /* DC1_OK */ +#define WM831X_DC1_OK_MASK 0x0001 /* DC1_OK */ +#define WM831X_DC1_OK_SHIFT 0 /* DC1_OK */ +#define WM831X_DC1_OK_WIDTH 1 /* DC1_OK */ + #define WM831X_ISINK_MAX_ISEL 56 extern int wm831x_isinkv_values[WM831X_ISINK_MAX_ISEL]; -- cgit v1.2.3-70-g09d2 From d1c6b4fe668b2ae02f490deee86eaab60822a362 Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Tue, 28 Jul 2009 15:22:02 +0100 Subject: regulator: Add WM831x LDO support The WM831x series of devices provide three types of LDO: - General purpose LDOs supporting voltages from 0.9-3.3V - High performance analogue LDOs supporting voltages from 1-3.5V - Very low power consumption LDOs intended to support always on functionality. This patch adds support for all three kinds of LDO. Each regulator is probed as an individual platform device with resources used to provide the register map location of the regulator. Mixed hardware and software control of regulators is not current supported. Signed-off-by: Mark Brown Acked-by: Liam Girdwood Signed-off-by: Samuel Ortiz --- drivers/regulator/Makefile | 1 + drivers/regulator/wm831x-ldo.c | 852 +++++++++++++++++++++++++++++++++++ include/linux/mfd/wm831x/regulator.h | 626 +++++++++++++++++++++++++ 3 files changed, 1479 insertions(+) create mode 100644 drivers/regulator/wm831x-ldo.c (limited to 'include') diff --git a/drivers/regulator/Makefile b/drivers/regulator/Makefile index b1d2b826f53..a0a635fdae8 100644 --- a/drivers/regulator/Makefile +++ b/drivers/regulator/Makefile @@ -13,6 +13,7 @@ obj-$(CONFIG_REGULATOR_LP3971) += lp3971.o obj-$(CONFIG_REGULATOR_MAX1586) += max1586.o obj-$(CONFIG_REGULATOR_TWL4030) += twl4030-regulator.o obj-$(CONFIG_REGULATOR_WM831X) += wm831x-dcdc.o +obj-$(CONFIG_REGULATOR_WM831X) += wm831x-ldo.o obj-$(CONFIG_REGULATOR_WM8350) += wm8350-regulator.o obj-$(CONFIG_REGULATOR_WM8400) += wm8400-regulator.o obj-$(CONFIG_REGULATOR_DA903X) += da903x.o diff --git a/drivers/regulator/wm831x-ldo.c b/drivers/regulator/wm831x-ldo.c new file mode 100644 index 00000000000..bb61aede480 --- /dev/null +++ b/drivers/regulator/wm831x-ldo.c @@ -0,0 +1,852 @@ +/* + * wm831x-ldo.c -- LDO driver for the WM831x series + * + * Copyright 2009 Wolfson Microelectronics PLC. + * + * Author: Mark Brown + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#define WM831X_LDO_MAX_NAME 6 + +#define WM831X_LDO_CONTROL 0 +#define WM831X_LDO_ON_CONTROL 1 +#define WM831X_LDO_SLEEP_CONTROL 2 + +#define WM831X_ALIVE_LDO_ON_CONTROL 0 +#define WM831X_ALIVE_LDO_SLEEP_CONTROL 1 + +struct wm831x_ldo { + char name[WM831X_LDO_MAX_NAME]; + struct regulator_desc desc; + int base; + struct wm831x *wm831x; + struct regulator_dev *regulator; +}; + +/* + * Shared + */ + +static int wm831x_ldo_is_enabled(struct regulator_dev *rdev) +{ + struct wm831x_ldo *ldo = rdev_get_drvdata(rdev); + struct wm831x *wm831x = ldo->wm831x; + int mask = 1 << rdev_get_id(rdev); + int reg; + + reg = wm831x_reg_read(wm831x, WM831X_LDO_ENABLE); + if (reg < 0) + return reg; + + if (reg & mask) + return 1; + else + return 0; +} + +static int wm831x_ldo_enable(struct regulator_dev *rdev) +{ + struct wm831x_ldo *ldo = rdev_get_drvdata(rdev); + struct wm831x *wm831x = ldo->wm831x; + int mask = 1 << rdev_get_id(rdev); + + return wm831x_set_bits(wm831x, WM831X_LDO_ENABLE, mask, mask); +} + +static int wm831x_ldo_disable(struct regulator_dev *rdev) +{ + struct wm831x_ldo *ldo = rdev_get_drvdata(rdev); + struct wm831x *wm831x = ldo->wm831x; + int mask = 1 << rdev_get_id(rdev); + + return wm831x_set_bits(wm831x, WM831X_LDO_ENABLE, mask, 0); +} + +static irqreturn_t wm831x_ldo_uv_irq(int irq, void *data) +{ + struct wm831x_ldo *ldo = data; + + regulator_notifier_call_chain(ldo->regulator, + REGULATOR_EVENT_UNDER_VOLTAGE, + NULL); + + return IRQ_HANDLED; +} + +/* + * General purpose LDOs + */ + +#define WM831X_GP_LDO_SELECTOR_LOW 0xe +#define WM831X_GP_LDO_MAX_SELECTOR 0x1f + +static int wm831x_gp_ldo_list_voltage(struct regulator_dev *rdev, + unsigned int selector) +{ + /* 0.9-1.6V in 50mV steps */ + if (selector <= WM831X_GP_LDO_SELECTOR_LOW) + return 900000 + (selector * 50000); + /* 1.7-3.3V in 50mV steps */ + if (selector <= WM831X_GP_LDO_MAX_SELECTOR) + return 1600000 + ((selector - WM831X_GP_LDO_SELECTOR_LOW) + * 100000); + return -EINVAL; +} + +static int wm831x_gp_ldo_set_voltage_int(struct regulator_dev *rdev, int reg, + int min_uV, int max_uV) +{ + struct wm831x_ldo *ldo = rdev_get_drvdata(rdev); + struct wm831x *wm831x = ldo->wm831x; + int vsel, ret; + + if (min_uV < 900000) + vsel = 0; + else if (min_uV < 1700000) + vsel = ((min_uV - 900000) / 50000); + else + vsel = ((min_uV - 1700000) / 100000) + + WM831X_GP_LDO_SELECTOR_LOW + 1; + + ret = wm831x_gp_ldo_list_voltage(rdev, vsel); + if (ret < 0) + return ret; + if (ret < min_uV || ret > max_uV) + return -EINVAL; + + return wm831x_set_bits(wm831x, reg, WM831X_LDO1_ON_VSEL_MASK, vsel); +} + +static int wm831x_gp_ldo_set_voltage(struct regulator_dev *rdev, + int min_uV, int max_uV) +{ + struct wm831x_ldo *ldo = rdev_get_drvdata(rdev); + int reg = ldo->base + WM831X_LDO_ON_CONTROL; + + return wm831x_gp_ldo_set_voltage_int(rdev, reg, min_uV, max_uV); +} + +static int wm831x_gp_ldo_set_suspend_voltage(struct regulator_dev *rdev, + int uV) +{ + struct wm831x_ldo *ldo = rdev_get_drvdata(rdev); + int reg = ldo->base + WM831X_LDO_SLEEP_CONTROL; + + return wm831x_gp_ldo_set_voltage_int(rdev, reg, uV, uV); +} + +static int wm831x_gp_ldo_get_voltage(struct regulator_dev *rdev) +{ + struct wm831x_ldo *ldo = rdev_get_drvdata(rdev); + struct wm831x *wm831x = ldo->wm831x; + int reg = ldo->base + WM831X_LDO_ON_CONTROL; + int ret; + + ret = wm831x_reg_read(wm831x, reg); + if (ret < 0) + return ret; + + ret &= WM831X_LDO1_ON_VSEL_MASK; + + return wm831x_gp_ldo_list_voltage(rdev, ret); +} + +static unsigned int wm831x_gp_ldo_get_mode(struct regulator_dev *rdev) +{ + struct wm831x_ldo *ldo = rdev_get_drvdata(rdev); + struct wm831x *wm831x = ldo->wm831x; + int ctrl_reg = ldo->base + WM831X_LDO_CONTROL; + int on_reg = ldo->base + WM831X_LDO_ON_CONTROL; + unsigned int ret; + + ret = wm831x_reg_read(wm831x, on_reg); + if (ret < 0) + return 0; + + if (!(ret & WM831X_LDO1_ON_MODE)) + return REGULATOR_MODE_NORMAL; + + ret = wm831x_reg_read(wm831x, ctrl_reg); + if (ret < 0) + return 0; + + if (ret & WM831X_LDO1_LP_MODE) + return REGULATOR_MODE_STANDBY; + else + return REGULATOR_MODE_IDLE; +} + +static int wm831x_gp_ldo_set_mode(struct regulator_dev *rdev, + unsigned int mode) +{ + struct wm831x_ldo *ldo = rdev_get_drvdata(rdev); + struct wm831x *wm831x = ldo->wm831x; + int ctrl_reg = ldo->base + WM831X_LDO_CONTROL; + int on_reg = ldo->base + WM831X_LDO_ON_CONTROL; + int ret; + + + switch (mode) { + case REGULATOR_MODE_NORMAL: + ret = wm831x_set_bits(wm831x, on_reg, + WM831X_LDO1_ON_MODE, 0); + if (ret < 0) + return ret; + break; + + case REGULATOR_MODE_IDLE: + ret = wm831x_set_bits(wm831x, ctrl_reg, + WM831X_LDO1_LP_MODE, + WM831X_LDO1_LP_MODE); + if (ret < 0) + return ret; + + ret = wm831x_set_bits(wm831x, on_reg, + WM831X_LDO1_ON_MODE, + WM831X_LDO1_ON_MODE); + if (ret < 0) + return ret; + + case REGULATOR_MODE_STANDBY: + ret = wm831x_set_bits(wm831x, ctrl_reg, + WM831X_LDO1_LP_MODE, 0); + if (ret < 0) + return ret; + + ret = wm831x_set_bits(wm831x, on_reg, + WM831X_LDO1_ON_MODE, + WM831X_LDO1_ON_MODE); + if (ret < 0) + return ret; + break; + + default: + return -EINVAL; + } + + return 0; +} + +static int wm831x_gp_ldo_get_status(struct regulator_dev *rdev) +{ + struct wm831x_ldo *ldo = rdev_get_drvdata(rdev); + struct wm831x *wm831x = ldo->wm831x; + int mask = 1 << rdev_get_id(rdev); + int ret; + + /* Is the regulator on? */ + ret = wm831x_reg_read(wm831x, WM831X_LDO_STATUS); + if (ret < 0) + return ret; + if (!(ret & mask)) + return REGULATOR_STATUS_OFF; + + /* Is it reporting under voltage? */ + ret = wm831x_reg_read(wm831x, WM831X_LDO_UV_STATUS); + if (ret & mask) + return REGULATOR_STATUS_ERROR; + + ret = wm831x_gp_ldo_get_mode(rdev); + if (ret < 0) + return ret; + else + return regulator_mode_to_status(ret); +} + +static unsigned int wm831x_gp_ldo_get_optimum_mode(struct regulator_dev *rdev, + int input_uV, + int output_uV, int load_uA) +{ + if (load_uA < 20000) + return REGULATOR_MODE_STANDBY; + if (load_uA < 50000) + return REGULATOR_MODE_IDLE; + return REGULATOR_MODE_NORMAL; +} + + +static struct regulator_ops wm831x_gp_ldo_ops = { + .list_voltage = wm831x_gp_ldo_list_voltage, + .get_voltage = wm831x_gp_ldo_get_voltage, + .set_voltage = wm831x_gp_ldo_set_voltage, + .set_suspend_voltage = wm831x_gp_ldo_set_suspend_voltage, + .get_mode = wm831x_gp_ldo_get_mode, + .set_mode = wm831x_gp_ldo_set_mode, + .get_status = wm831x_gp_ldo_get_status, + .get_optimum_mode = wm831x_gp_ldo_get_optimum_mode, + + .is_enabled = wm831x_ldo_is_enabled, + .enable = wm831x_ldo_enable, + .disable = wm831x_ldo_disable, +}; + +static __devinit int wm831x_gp_ldo_probe(struct platform_device *pdev) +{ + struct wm831x *wm831x = dev_get_drvdata(pdev->dev.parent); + struct wm831x_pdata *pdata = wm831x->dev->platform_data; + int id = pdev->id % ARRAY_SIZE(pdata->ldo); + struct wm831x_ldo *ldo; + struct resource *res; + int ret, irq; + + dev_dbg(&pdev->dev, "Probing LDO%d\n", id + 1); + + if (pdata == NULL || pdata->ldo[id] == NULL) + return -ENODEV; + + ldo = kzalloc(sizeof(struct wm831x_ldo), GFP_KERNEL); + if (ldo == NULL) { + dev_err(&pdev->dev, "Unable to allocate private data\n"); + return -ENOMEM; + } + + ldo->wm831x = wm831x; + + res = platform_get_resource(pdev, IORESOURCE_IO, 0); + if (res == NULL) { + dev_err(&pdev->dev, "No I/O resource\n"); + ret = -EINVAL; + goto err; + } + ldo->base = res->start; + + snprintf(ldo->name, sizeof(ldo->name), "LDO%d", id + 1); + ldo->desc.name = ldo->name; + ldo->desc.id = id; + ldo->desc.type = REGULATOR_VOLTAGE; + ldo->desc.n_voltages = WM831X_GP_LDO_MAX_SELECTOR + 1; + ldo->desc.ops = &wm831x_gp_ldo_ops; + ldo->desc.owner = THIS_MODULE; + + ldo->regulator = regulator_register(&ldo->desc, &pdev->dev, + pdata->ldo[id], ldo); + if (IS_ERR(ldo->regulator)) { + ret = PTR_ERR(ldo->regulator); + dev_err(wm831x->dev, "Failed to register LDO%d: %d\n", + id + 1, ret); + goto err; + } + + irq = platform_get_irq_byname(pdev, "UV"); + ret = wm831x_request_irq(wm831x, irq, wm831x_ldo_uv_irq, + IRQF_TRIGGER_RISING, ldo->name, + ldo); + if (ret != 0) { + dev_err(&pdev->dev, "Failed to request UV IRQ %d: %d\n", + irq, ret); + goto err_regulator; + } + + platform_set_drvdata(pdev, ldo); + + return 0; + +err_regulator: + regulator_unregister(ldo->regulator); +err: + kfree(ldo); + return ret; +} + +static __devexit int wm831x_gp_ldo_remove(struct platform_device *pdev) +{ + struct wm831x_ldo *ldo = platform_get_drvdata(pdev); + struct wm831x *wm831x = ldo->wm831x; + + wm831x_free_irq(wm831x, platform_get_irq_byname(pdev, "UV"), ldo); + regulator_unregister(ldo->regulator); + kfree(ldo); + + return 0; +} + +static struct platform_driver wm831x_gp_ldo_driver = { + .probe = wm831x_gp_ldo_probe, + .remove = __devexit_p(wm831x_gp_ldo_remove), + .driver = { + .name = "wm831x-ldo", + }, +}; + +/* + * Analogue LDOs + */ + + +#define WM831X_ALDO_SELECTOR_LOW 0xc +#define WM831X_ALDO_MAX_SELECTOR 0x1f + +static int wm831x_aldo_list_voltage(struct regulator_dev *rdev, + unsigned int selector) +{ + /* 1-1.6V in 50mV steps */ + if (selector <= WM831X_ALDO_SELECTOR_LOW) + return 1000000 + (selector * 50000); + /* 1.7-3.5V in 50mV steps */ + if (selector <= WM831X_ALDO_MAX_SELECTOR) + return 1600000 + ((selector - WM831X_ALDO_SELECTOR_LOW) + * 100000); + return -EINVAL; +} + +static int wm831x_aldo_set_voltage_int(struct regulator_dev *rdev, int reg, + int min_uV, int max_uV) +{ + struct wm831x_ldo *ldo = rdev_get_drvdata(rdev); + struct wm831x *wm831x = ldo->wm831x; + int vsel, ret; + + if (min_uV < 1000000) + vsel = 0; + else if (min_uV < 1700000) + vsel = ((min_uV - 1000000) / 50000); + else + vsel = ((min_uV - 1700000) / 100000) + + WM831X_ALDO_SELECTOR_LOW + 1; + + ret = wm831x_aldo_list_voltage(rdev, vsel); + if (ret < 0) + return ret; + if (ret < min_uV || ret > max_uV) + return -EINVAL; + + return wm831x_set_bits(wm831x, reg, WM831X_LDO7_ON_VSEL_MASK, vsel); +} + +static int wm831x_aldo_set_voltage(struct regulator_dev *rdev, + int min_uV, int max_uV) +{ + struct wm831x_ldo *ldo = rdev_get_drvdata(rdev); + int reg = ldo->base + WM831X_LDO_ON_CONTROL; + + return wm831x_aldo_set_voltage_int(rdev, reg, min_uV, max_uV); +} + +static int wm831x_aldo_set_suspend_voltage(struct regulator_dev *rdev, + int uV) +{ + struct wm831x_ldo *ldo = rdev_get_drvdata(rdev); + int reg = ldo->base + WM831X_LDO_SLEEP_CONTROL; + + return wm831x_aldo_set_voltage_int(rdev, reg, uV, uV); +} + +static int wm831x_aldo_get_voltage(struct regulator_dev *rdev) +{ + struct wm831x_ldo *ldo = rdev_get_drvdata(rdev); + struct wm831x *wm831x = ldo->wm831x; + int reg = ldo->base + WM831X_LDO_ON_CONTROL; + int ret; + + ret = wm831x_reg_read(wm831x, reg); + if (ret < 0) + return ret; + + ret &= WM831X_LDO7_ON_VSEL_MASK; + + return wm831x_aldo_list_voltage(rdev, ret); +} + +static unsigned int wm831x_aldo_get_mode(struct regulator_dev *rdev) +{ + struct wm831x_ldo *ldo = rdev_get_drvdata(rdev); + struct wm831x *wm831x = ldo->wm831x; + int on_reg = ldo->base + WM831X_LDO_ON_CONTROL; + unsigned int ret; + + ret = wm831x_reg_read(wm831x, on_reg); + if (ret < 0) + return 0; + + if (ret & WM831X_LDO7_ON_MODE) + return REGULATOR_MODE_IDLE; + else + return REGULATOR_MODE_NORMAL; +} + +static int wm831x_aldo_set_mode(struct regulator_dev *rdev, + unsigned int mode) +{ + struct wm831x_ldo *ldo = rdev_get_drvdata(rdev); + struct wm831x *wm831x = ldo->wm831x; + int ctrl_reg = ldo->base + WM831X_LDO_CONTROL; + int on_reg = ldo->base + WM831X_LDO_ON_CONTROL; + int ret; + + + switch (mode) { + case REGULATOR_MODE_NORMAL: + ret = wm831x_set_bits(wm831x, on_reg, + WM831X_LDO7_ON_MODE, 0); + if (ret < 0) + return ret; + break; + + case REGULATOR_MODE_IDLE: + ret = wm831x_set_bits(wm831x, ctrl_reg, + WM831X_LDO7_ON_MODE, + WM831X_LDO7_ON_MODE); + if (ret < 0) + return ret; + break; + + default: + return -EINVAL; + } + + return 0; +} + +static int wm831x_aldo_get_status(struct regulator_dev *rdev) +{ + struct wm831x_ldo *ldo = rdev_get_drvdata(rdev); + struct wm831x *wm831x = ldo->wm831x; + int mask = 1 << rdev_get_id(rdev); + int ret; + + /* Is the regulator on? */ + ret = wm831x_reg_read(wm831x, WM831X_LDO_STATUS); + if (ret < 0) + return ret; + if (!(ret & mask)) + return REGULATOR_STATUS_OFF; + + /* Is it reporting under voltage? */ + ret = wm831x_reg_read(wm831x, WM831X_LDO_UV_STATUS); + if (ret & mask) + return REGULATOR_STATUS_ERROR; + + ret = wm831x_aldo_get_mode(rdev); + if (ret < 0) + return ret; + else + return regulator_mode_to_status(ret); +} + +static struct regulator_ops wm831x_aldo_ops = { + .list_voltage = wm831x_aldo_list_voltage, + .get_voltage = wm831x_aldo_get_voltage, + .set_voltage = wm831x_aldo_set_voltage, + .set_suspend_voltage = wm831x_aldo_set_suspend_voltage, + .get_mode = wm831x_aldo_get_mode, + .set_mode = wm831x_aldo_set_mode, + .get_status = wm831x_aldo_get_status, + + .is_enabled = wm831x_ldo_is_enabled, + .enable = wm831x_ldo_enable, + .disable = wm831x_ldo_disable, +}; + +static __devinit int wm831x_aldo_probe(struct platform_device *pdev) +{ + struct wm831x *wm831x = dev_get_drvdata(pdev->dev.parent); + struct wm831x_pdata *pdata = wm831x->dev->platform_data; + int id = pdev->id % ARRAY_SIZE(pdata->ldo); + struct wm831x_ldo *ldo; + struct resource *res; + int ret, irq; + + dev_dbg(&pdev->dev, "Probing LDO%d\n", id + 1); + + if (pdata == NULL || pdata->ldo[id] == NULL) + return -ENODEV; + + ldo = kzalloc(sizeof(struct wm831x_ldo), GFP_KERNEL); + if (ldo == NULL) { + dev_err(&pdev->dev, "Unable to allocate private data\n"); + return -ENOMEM; + } + + ldo->wm831x = wm831x; + + res = platform_get_resource(pdev, IORESOURCE_IO, 0); + if (res == NULL) { + dev_err(&pdev->dev, "No I/O resource\n"); + ret = -EINVAL; + goto err; + } + ldo->base = res->start; + + snprintf(ldo->name, sizeof(ldo->name), "LDO%d", id + 1); + ldo->desc.name = ldo->name; + ldo->desc.id = id; + ldo->desc.type = REGULATOR_VOLTAGE; + ldo->desc.n_voltages = WM831X_ALDO_MAX_SELECTOR + 1; + ldo->desc.ops = &wm831x_aldo_ops; + ldo->desc.owner = THIS_MODULE; + + ldo->regulator = regulator_register(&ldo->desc, &pdev->dev, + pdata->ldo[id], ldo); + if (IS_ERR(ldo->regulator)) { + ret = PTR_ERR(ldo->regulator); + dev_err(wm831x->dev, "Failed to register LDO%d: %d\n", + id + 1, ret); + goto err; + } + + irq = platform_get_irq_byname(pdev, "UV"); + ret = wm831x_request_irq(wm831x, irq, wm831x_ldo_uv_irq, + IRQF_TRIGGER_RISING, ldo->name, + ldo); + if (ret != 0) { + dev_err(&pdev->dev, "Failed to request UV IRQ %d: %d\n", + irq, ret); + goto err_regulator; + } + + platform_set_drvdata(pdev, ldo); + + return 0; + +err_regulator: + regulator_unregister(ldo->regulator); +err: + kfree(ldo); + return ret; +} + +static __devexit int wm831x_aldo_remove(struct platform_device *pdev) +{ + struct wm831x_ldo *ldo = platform_get_drvdata(pdev); + struct wm831x *wm831x = ldo->wm831x; + + wm831x_free_irq(wm831x, platform_get_irq_byname(pdev, "UV"), ldo); + regulator_unregister(ldo->regulator); + kfree(ldo); + + return 0; +} + +static struct platform_driver wm831x_aldo_driver = { + .probe = wm831x_aldo_probe, + .remove = __devexit_p(wm831x_aldo_remove), + .driver = { + .name = "wm831x-aldo", + }, +}; + +/* + * Alive LDO + */ + +#define WM831X_ALIVE_LDO_MAX_SELECTOR 0xf + +static int wm831x_alive_ldo_list_voltage(struct regulator_dev *rdev, + unsigned int selector) +{ + /* 0.8-1.55V in 50mV steps */ + if (selector <= WM831X_ALIVE_LDO_MAX_SELECTOR) + return 800000 + (selector * 50000); + return -EINVAL; +} + +static int wm831x_alive_ldo_set_voltage_int(struct regulator_dev *rdev, + int reg, + int min_uV, int max_uV) +{ + struct wm831x_ldo *ldo = rdev_get_drvdata(rdev); + struct wm831x *wm831x = ldo->wm831x; + int vsel, ret; + + vsel = (min_uV - 800000) / 50000; + + ret = wm831x_alive_ldo_list_voltage(rdev, vsel); + if (ret < 0) + return ret; + if (ret < min_uV || ret > max_uV) + return -EINVAL; + + return wm831x_set_bits(wm831x, reg, WM831X_LDO11_ON_VSEL_MASK, vsel); +} + +static int wm831x_alive_ldo_set_voltage(struct regulator_dev *rdev, + int min_uV, int max_uV) +{ + struct wm831x_ldo *ldo = rdev_get_drvdata(rdev); + int reg = ldo->base + WM831X_ALIVE_LDO_ON_CONTROL; + + return wm831x_alive_ldo_set_voltage_int(rdev, reg, min_uV, max_uV); +} + +static int wm831x_alive_ldo_set_suspend_voltage(struct regulator_dev *rdev, + int uV) +{ + struct wm831x_ldo *ldo = rdev_get_drvdata(rdev); + int reg = ldo->base + WM831X_ALIVE_LDO_SLEEP_CONTROL; + + return wm831x_alive_ldo_set_voltage_int(rdev, reg, uV, uV); +} + +static int wm831x_alive_ldo_get_voltage(struct regulator_dev *rdev) +{ + struct wm831x_ldo *ldo = rdev_get_drvdata(rdev); + struct wm831x *wm831x = ldo->wm831x; + int reg = ldo->base + WM831X_ALIVE_LDO_ON_CONTROL; + int ret; + + ret = wm831x_reg_read(wm831x, reg); + if (ret < 0) + return ret; + + ret &= WM831X_LDO11_ON_VSEL_MASK; + + return wm831x_alive_ldo_list_voltage(rdev, ret); +} + +static int wm831x_alive_ldo_get_status(struct regulator_dev *rdev) +{ + struct wm831x_ldo *ldo = rdev_get_drvdata(rdev); + struct wm831x *wm831x = ldo->wm831x; + int mask = 1 << rdev_get_id(rdev); + int ret; + + /* Is the regulator on? */ + ret = wm831x_reg_read(wm831x, WM831X_LDO_STATUS); + if (ret < 0) + return ret; + if (ret & mask) + return REGULATOR_STATUS_ON; + else + return REGULATOR_STATUS_OFF; +} + +static struct regulator_ops wm831x_alive_ldo_ops = { + .list_voltage = wm831x_alive_ldo_list_voltage, + .get_voltage = wm831x_alive_ldo_get_voltage, + .set_voltage = wm831x_alive_ldo_set_voltage, + .set_suspend_voltage = wm831x_alive_ldo_set_suspend_voltage, + .get_status = wm831x_alive_ldo_get_status, + + .is_enabled = wm831x_ldo_is_enabled, + .enable = wm831x_ldo_enable, + .disable = wm831x_ldo_disable, +}; + +static __devinit int wm831x_alive_ldo_probe(struct platform_device *pdev) +{ + struct wm831x *wm831x = dev_get_drvdata(pdev->dev.parent); + struct wm831x_pdata *pdata = wm831x->dev->platform_data; + int id = pdev->id % ARRAY_SIZE(pdata->ldo); + struct wm831x_ldo *ldo; + struct resource *res; + int ret; + + dev_dbg(&pdev->dev, "Probing LDO%d\n", id + 1); + + if (pdata == NULL || pdata->ldo[id] == NULL) + return -ENODEV; + + ldo = kzalloc(sizeof(struct wm831x_ldo), GFP_KERNEL); + if (ldo == NULL) { + dev_err(&pdev->dev, "Unable to allocate private data\n"); + return -ENOMEM; + } + + ldo->wm831x = wm831x; + + res = platform_get_resource(pdev, IORESOURCE_IO, 0); + if (res == NULL) { + dev_err(&pdev->dev, "No I/O resource\n"); + ret = -EINVAL; + goto err; + } + ldo->base = res->start; + + snprintf(ldo->name, sizeof(ldo->name), "LDO%d", id + 1); + ldo->desc.name = ldo->name; + ldo->desc.id = id; + ldo->desc.type = REGULATOR_VOLTAGE; + ldo->desc.n_voltages = WM831X_ALIVE_LDO_MAX_SELECTOR + 1; + ldo->desc.ops = &wm831x_alive_ldo_ops; + ldo->desc.owner = THIS_MODULE; + + ldo->regulator = regulator_register(&ldo->desc, &pdev->dev, + pdata->ldo[id], ldo); + if (IS_ERR(ldo->regulator)) { + ret = PTR_ERR(ldo->regulator); + dev_err(wm831x->dev, "Failed to register LDO%d: %d\n", + id + 1, ret); + goto err; + } + + platform_set_drvdata(pdev, ldo); + + return 0; + +err: + kfree(ldo); + return ret; +} + +static __devexit int wm831x_alive_ldo_remove(struct platform_device *pdev) +{ + struct wm831x_ldo *ldo = platform_get_drvdata(pdev); + + regulator_unregister(ldo->regulator); + kfree(ldo); + + return 0; +} + +static struct platform_driver wm831x_alive_ldo_driver = { + .probe = wm831x_alive_ldo_probe, + .remove = __devexit_p(wm831x_alive_ldo_remove), + .driver = { + .name = "wm831x-alive-ldo", + }, +}; + +static int __init wm831x_ldo_init(void) +{ + int ret; + + ret = platform_driver_register(&wm831x_gp_ldo_driver); + if (ret != 0) + pr_err("Failed to register WM831x GP LDO driver: %d\n", ret); + + ret = platform_driver_register(&wm831x_aldo_driver); + if (ret != 0) + pr_err("Failed to register WM831x ALDO driver: %d\n", ret); + + ret = platform_driver_register(&wm831x_alive_ldo_driver); + if (ret != 0) + pr_err("Failed to register WM831x alive LDO driver: %d\n", + ret); + + return 0; +} +subsys_initcall(wm831x_ldo_init); + +static void __exit wm831x_ldo_exit(void) +{ + platform_driver_unregister(&wm831x_alive_ldo_driver); + platform_driver_unregister(&wm831x_aldo_driver); + platform_driver_unregister(&wm831x_gp_ldo_driver); +} +module_exit(wm831x_ldo_exit); + +/* Module information */ +MODULE_AUTHOR("Mark Brown "); +MODULE_DESCRIPTION("WM831x LDO driver"); +MODULE_LICENSE("GPL"); +MODULE_ALIAS("platform:wm831x-ldo"); +MODULE_ALIAS("platform:wm831x-aldo"); +MODULE_ALIAS("platform:wm831x-aliveldo"); diff --git a/include/linux/mfd/wm831x/regulator.h b/include/linux/mfd/wm831x/regulator.h index c74d6aafdca..f95466343fb 100644 --- a/include/linux/mfd/wm831x/regulator.h +++ b/include/linux/mfd/wm831x/regulator.h @@ -566,6 +566,588 @@ #define WM831X_DC4_SLPENA_SHIFT 8 /* DC4_SLPENA */ #define WM831X_DC4_SLPENA_WIDTH 1 /* DC4_SLPENA */ +/* + * R16488 (0x4068) - LDO1 Control + */ +#define WM831X_LDO1_ERR_ACT_MASK 0xC000 /* LDO1_ERR_ACT - [15:14] */ +#define WM831X_LDO1_ERR_ACT_SHIFT 14 /* LDO1_ERR_ACT - [15:14] */ +#define WM831X_LDO1_ERR_ACT_WIDTH 2 /* LDO1_ERR_ACT - [15:14] */ +#define WM831X_LDO1_HWC_SRC_MASK 0x1800 /* LDO1_HWC_SRC - [12:11] */ +#define WM831X_LDO1_HWC_SRC_SHIFT 11 /* LDO1_HWC_SRC - [12:11] */ +#define WM831X_LDO1_HWC_SRC_WIDTH 2 /* LDO1_HWC_SRC - [12:11] */ +#define WM831X_LDO1_HWC_VSEL 0x0400 /* LDO1_HWC_VSEL */ +#define WM831X_LDO1_HWC_VSEL_MASK 0x0400 /* LDO1_HWC_VSEL */ +#define WM831X_LDO1_HWC_VSEL_SHIFT 10 /* LDO1_HWC_VSEL */ +#define WM831X_LDO1_HWC_VSEL_WIDTH 1 /* LDO1_HWC_VSEL */ +#define WM831X_LDO1_HWC_MODE_MASK 0x0300 /* LDO1_HWC_MODE - [9:8] */ +#define WM831X_LDO1_HWC_MODE_SHIFT 8 /* LDO1_HWC_MODE - [9:8] */ +#define WM831X_LDO1_HWC_MODE_WIDTH 2 /* LDO1_HWC_MODE - [9:8] */ +#define WM831X_LDO1_FLT 0x0080 /* LDO1_FLT */ +#define WM831X_LDO1_FLT_MASK 0x0080 /* LDO1_FLT */ +#define WM831X_LDO1_FLT_SHIFT 7 /* LDO1_FLT */ +#define WM831X_LDO1_FLT_WIDTH 1 /* LDO1_FLT */ +#define WM831X_LDO1_SWI 0x0040 /* LDO1_SWI */ +#define WM831X_LDO1_SWI_MASK 0x0040 /* LDO1_SWI */ +#define WM831X_LDO1_SWI_SHIFT 6 /* LDO1_SWI */ +#define WM831X_LDO1_SWI_WIDTH 1 /* LDO1_SWI */ +#define WM831X_LDO1_LP_MODE 0x0001 /* LDO1_LP_MODE */ +#define WM831X_LDO1_LP_MODE_MASK 0x0001 /* LDO1_LP_MODE */ +#define WM831X_LDO1_LP_MODE_SHIFT 0 /* LDO1_LP_MODE */ +#define WM831X_LDO1_LP_MODE_WIDTH 1 /* LDO1_LP_MODE */ + +/* + * R16489 (0x4069) - LDO1 ON Control + */ +#define WM831X_LDO1_ON_SLOT_MASK 0xE000 /* LDO1_ON_SLOT - [15:13] */ +#define WM831X_LDO1_ON_SLOT_SHIFT 13 /* LDO1_ON_SLOT - [15:13] */ +#define WM831X_LDO1_ON_SLOT_WIDTH 3 /* LDO1_ON_SLOT - [15:13] */ +#define WM831X_LDO1_ON_MODE 0x0100 /* LDO1_ON_MODE */ +#define WM831X_LDO1_ON_MODE_MASK 0x0100 /* LDO1_ON_MODE */ +#define WM831X_LDO1_ON_MODE_SHIFT 8 /* LDO1_ON_MODE */ +#define WM831X_LDO1_ON_MODE_WIDTH 1 /* LDO1_ON_MODE */ +#define WM831X_LDO1_ON_VSEL_MASK 0x001F /* LDO1_ON_VSEL - [4:0] */ +#define WM831X_LDO1_ON_VSEL_SHIFT 0 /* LDO1_ON_VSEL - [4:0] */ +#define WM831X_LDO1_ON_VSEL_WIDTH 5 /* LDO1_ON_VSEL - [4:0] */ + +/* + * R16490 (0x406A) - LDO1 SLEEP Control + */ +#define WM831X_LDO1_SLP_SLOT_MASK 0xE000 /* LDO1_SLP_SLOT - [15:13] */ +#define WM831X_LDO1_SLP_SLOT_SHIFT 13 /* LDO1_SLP_SLOT - [15:13] */ +#define WM831X_LDO1_SLP_SLOT_WIDTH 3 /* LDO1_SLP_SLOT - [15:13] */ +#define WM831X_LDO1_SLP_MODE 0x0100 /* LDO1_SLP_MODE */ +#define WM831X_LDO1_SLP_MODE_MASK 0x0100 /* LDO1_SLP_MODE */ +#define WM831X_LDO1_SLP_MODE_SHIFT 8 /* LDO1_SLP_MODE */ +#define WM831X_LDO1_SLP_MODE_WIDTH 1 /* LDO1_SLP_MODE */ +#define WM831X_LDO1_SLP_VSEL_MASK 0x001F /* LDO1_SLP_VSEL - [4:0] */ +#define WM831X_LDO1_SLP_VSEL_SHIFT 0 /* LDO1_SLP_VSEL - [4:0] */ +#define WM831X_LDO1_SLP_VSEL_WIDTH 5 /* LDO1_SLP_VSEL - [4:0] */ + +/* + * R16491 (0x406B) - LDO2 Control + */ +#define WM831X_LDO2_ERR_ACT_MASK 0xC000 /* LDO2_ERR_ACT - [15:14] */ +#define WM831X_LDO2_ERR_ACT_SHIFT 14 /* LDO2_ERR_ACT - [15:14] */ +#define WM831X_LDO2_ERR_ACT_WIDTH 2 /* LDO2_ERR_ACT - [15:14] */ +#define WM831X_LDO2_HWC_SRC_MASK 0x1800 /* LDO2_HWC_SRC - [12:11] */ +#define WM831X_LDO2_HWC_SRC_SHIFT 11 /* LDO2_HWC_SRC - [12:11] */ +#define WM831X_LDO2_HWC_SRC_WIDTH 2 /* LDO2_HWC_SRC - [12:11] */ +#define WM831X_LDO2_HWC_VSEL 0x0400 /* LDO2_HWC_VSEL */ +#define WM831X_LDO2_HWC_VSEL_MASK 0x0400 /* LDO2_HWC_VSEL */ +#define WM831X_LDO2_HWC_VSEL_SHIFT 10 /* LDO2_HWC_VSEL */ +#define WM831X_LDO2_HWC_VSEL_WIDTH 1 /* LDO2_HWC_VSEL */ +#define WM831X_LDO2_HWC_MODE_MASK 0x0300 /* LDO2_HWC_MODE - [9:8] */ +#define WM831X_LDO2_HWC_MODE_SHIFT 8 /* LDO2_HWC_MODE - [9:8] */ +#define WM831X_LDO2_HWC_MODE_WIDTH 2 /* LDO2_HWC_MODE - [9:8] */ +#define WM831X_LDO2_FLT 0x0080 /* LDO2_FLT */ +#define WM831X_LDO2_FLT_MASK 0x0080 /* LDO2_FLT */ +#define WM831X_LDO2_FLT_SHIFT 7 /* LDO2_FLT */ +#define WM831X_LDO2_FLT_WIDTH 1 /* LDO2_FLT */ +#define WM831X_LDO2_SWI 0x0040 /* LDO2_SWI */ +#define WM831X_LDO2_SWI_MASK 0x0040 /* LDO2_SWI */ +#define WM831X_LDO2_SWI_SHIFT 6 /* LDO2_SWI */ +#define WM831X_LDO2_SWI_WIDTH 1 /* LDO2_SWI */ +#define WM831X_LDO2_LP_MODE 0x0001 /* LDO2_LP_MODE */ +#define WM831X_LDO2_LP_MODE_MASK 0x0001 /* LDO2_LP_MODE */ +#define WM831X_LDO2_LP_MODE_SHIFT 0 /* LDO2_LP_MODE */ +#define WM831X_LDO2_LP_MODE_WIDTH 1 /* LDO2_LP_MODE */ + +/* + * R16492 (0x406C) - LDO2 ON Control + */ +#define WM831X_LDO2_ON_SLOT_MASK 0xE000 /* LDO2_ON_SLOT - [15:13] */ +#define WM831X_LDO2_ON_SLOT_SHIFT 13 /* LDO2_ON_SLOT - [15:13] */ +#define WM831X_LDO2_ON_SLOT_WIDTH 3 /* LDO2_ON_SLOT - [15:13] */ +#define WM831X_LDO2_ON_MODE 0x0100 /* LDO2_ON_MODE */ +#define WM831X_LDO2_ON_MODE_MASK 0x0100 /* LDO2_ON_MODE */ +#define WM831X_LDO2_ON_MODE_SHIFT 8 /* LDO2_ON_MODE */ +#define WM831X_LDO2_ON_MODE_WIDTH 1 /* LDO2_ON_MODE */ +#define WM831X_LDO2_ON_VSEL_MASK 0x001F /* LDO2_ON_VSEL - [4:0] */ +#define WM831X_LDO2_ON_VSEL_SHIFT 0 /* LDO2_ON_VSEL - [4:0] */ +#define WM831X_LDO2_ON_VSEL_WIDTH 5 /* LDO2_ON_VSEL - [4:0] */ + +/* + * R16493 (0x406D) - LDO2 SLEEP Control + */ +#define WM831X_LDO2_SLP_SLOT_MASK 0xE000 /* LDO2_SLP_SLOT - [15:13] */ +#define WM831X_LDO2_SLP_SLOT_SHIFT 13 /* LDO2_SLP_SLOT - [15:13] */ +#define WM831X_LDO2_SLP_SLOT_WIDTH 3 /* LDO2_SLP_SLOT - [15:13] */ +#define WM831X_LDO2_SLP_MODE 0x0100 /* LDO2_SLP_MODE */ +#define WM831X_LDO2_SLP_MODE_MASK 0x0100 /* LDO2_SLP_MODE */ +#define WM831X_LDO2_SLP_MODE_SHIFT 8 /* LDO2_SLP_MODE */ +#define WM831X_LDO2_SLP_MODE_WIDTH 1 /* LDO2_SLP_MODE */ +#define WM831X_LDO2_SLP_VSEL_MASK 0x001F /* LDO2_SLP_VSEL - [4:0] */ +#define WM831X_LDO2_SLP_VSEL_SHIFT 0 /* LDO2_SLP_VSEL - [4:0] */ +#define WM831X_LDO2_SLP_VSEL_WIDTH 5 /* LDO2_SLP_VSEL - [4:0] */ + +/* + * R16494 (0x406E) - LDO3 Control + */ +#define WM831X_LDO3_ERR_ACT_MASK 0xC000 /* LDO3_ERR_ACT - [15:14] */ +#define WM831X_LDO3_ERR_ACT_SHIFT 14 /* LDO3_ERR_ACT - [15:14] */ +#define WM831X_LDO3_ERR_ACT_WIDTH 2 /* LDO3_ERR_ACT - [15:14] */ +#define WM831X_LDO3_HWC_SRC_MASK 0x1800 /* LDO3_HWC_SRC - [12:11] */ +#define WM831X_LDO3_HWC_SRC_SHIFT 11 /* LDO3_HWC_SRC - [12:11] */ +#define WM831X_LDO3_HWC_SRC_WIDTH 2 /* LDO3_HWC_SRC - [12:11] */ +#define WM831X_LDO3_HWC_VSEL 0x0400 /* LDO3_HWC_VSEL */ +#define WM831X_LDO3_HWC_VSEL_MASK 0x0400 /* LDO3_HWC_VSEL */ +#define WM831X_LDO3_HWC_VSEL_SHIFT 10 /* LDO3_HWC_VSEL */ +#define WM831X_LDO3_HWC_VSEL_WIDTH 1 /* LDO3_HWC_VSEL */ +#define WM831X_LDO3_HWC_MODE_MASK 0x0300 /* LDO3_HWC_MODE - [9:8] */ +#define WM831X_LDO3_HWC_MODE_SHIFT 8 /* LDO3_HWC_MODE - [9:8] */ +#define WM831X_LDO3_HWC_MODE_WIDTH 2 /* LDO3_HWC_MODE - [9:8] */ +#define WM831X_LDO3_FLT 0x0080 /* LDO3_FLT */ +#define WM831X_LDO3_FLT_MASK 0x0080 /* LDO3_FLT */ +#define WM831X_LDO3_FLT_SHIFT 7 /* LDO3_FLT */ +#define WM831X_LDO3_FLT_WIDTH 1 /* LDO3_FLT */ +#define WM831X_LDO3_SWI 0x0040 /* LDO3_SWI */ +#define WM831X_LDO3_SWI_MASK 0x0040 /* LDO3_SWI */ +#define WM831X_LDO3_SWI_SHIFT 6 /* LDO3_SWI */ +#define WM831X_LDO3_SWI_WIDTH 1 /* LDO3_SWI */ +#define WM831X_LDO3_LP_MODE 0x0001 /* LDO3_LP_MODE */ +#define WM831X_LDO3_LP_MODE_MASK 0x0001 /* LDO3_LP_MODE */ +#define WM831X_LDO3_LP_MODE_SHIFT 0 /* LDO3_LP_MODE */ +#define WM831X_LDO3_LP_MODE_WIDTH 1 /* LDO3_LP_MODE */ + +/* + * R16495 (0x406F) - LDO3 ON Control + */ +#define WM831X_LDO3_ON_SLOT_MASK 0xE000 /* LDO3_ON_SLOT - [15:13] */ +#define WM831X_LDO3_ON_SLOT_SHIFT 13 /* LDO3_ON_SLOT - [15:13] */ +#define WM831X_LDO3_ON_SLOT_WIDTH 3 /* LDO3_ON_SLOT - [15:13] */ +#define WM831X_LDO3_ON_MODE 0x0100 /* LDO3_ON_MODE */ +#define WM831X_LDO3_ON_MODE_MASK 0x0100 /* LDO3_ON_MODE */ +#define WM831X_LDO3_ON_MODE_SHIFT 8 /* LDO3_ON_MODE */ +#define WM831X_LDO3_ON_MODE_WIDTH 1 /* LDO3_ON_MODE */ +#define WM831X_LDO3_ON_VSEL_MASK 0x001F /* LDO3_ON_VSEL - [4:0] */ +#define WM831X_LDO3_ON_VSEL_SHIFT 0 /* LDO3_ON_VSEL - [4:0] */ +#define WM831X_LDO3_ON_VSEL_WIDTH 5 /* LDO3_ON_VSEL - [4:0] */ + +/* + * R16496 (0x4070) - LDO3 SLEEP Control + */ +#define WM831X_LDO3_SLP_SLOT_MASK 0xE000 /* LDO3_SLP_SLOT - [15:13] */ +#define WM831X_LDO3_SLP_SLOT_SHIFT 13 /* LDO3_SLP_SLOT - [15:13] */ +#define WM831X_LDO3_SLP_SLOT_WIDTH 3 /* LDO3_SLP_SLOT - [15:13] */ +#define WM831X_LDO3_SLP_MODE 0x0100 /* LDO3_SLP_MODE */ +#define WM831X_LDO3_SLP_MODE_MASK 0x0100 /* LDO3_SLP_MODE */ +#define WM831X_LDO3_SLP_MODE_SHIFT 8 /* LDO3_SLP_MODE */ +#define WM831X_LDO3_SLP_MODE_WIDTH 1 /* LDO3_SLP_MODE */ +#define WM831X_LDO3_SLP_VSEL_MASK 0x001F /* LDO3_SLP_VSEL - [4:0] */ +#define WM831X_LDO3_SLP_VSEL_SHIFT 0 /* LDO3_SLP_VSEL - [4:0] */ +#define WM831X_LDO3_SLP_VSEL_WIDTH 5 /* LDO3_SLP_VSEL - [4:0] */ + +/* + * R16497 (0x4071) - LDO4 Control + */ +#define WM831X_LDO4_ERR_ACT_MASK 0xC000 /* LDO4_ERR_ACT - [15:14] */ +#define WM831X_LDO4_ERR_ACT_SHIFT 14 /* LDO4_ERR_ACT - [15:14] */ +#define WM831X_LDO4_ERR_ACT_WIDTH 2 /* LDO4_ERR_ACT - [15:14] */ +#define WM831X_LDO4_HWC_SRC_MASK 0x1800 /* LDO4_HWC_SRC - [12:11] */ +#define WM831X_LDO4_HWC_SRC_SHIFT 11 /* LDO4_HWC_SRC - [12:11] */ +#define WM831X_LDO4_HWC_SRC_WIDTH 2 /* LDO4_HWC_SRC - [12:11] */ +#define WM831X_LDO4_HWC_VSEL 0x0400 /* LDO4_HWC_VSEL */ +#define WM831X_LDO4_HWC_VSEL_MASK 0x0400 /* LDO4_HWC_VSEL */ +#define WM831X_LDO4_HWC_VSEL_SHIFT 10 /* LDO4_HWC_VSEL */ +#define WM831X_LDO4_HWC_VSEL_WIDTH 1 /* LDO4_HWC_VSEL */ +#define WM831X_LDO4_HWC_MODE_MASK 0x0300 /* LDO4_HWC_MODE - [9:8] */ +#define WM831X_LDO4_HWC_MODE_SHIFT 8 /* LDO4_HWC_MODE - [9:8] */ +#define WM831X_LDO4_HWC_MODE_WIDTH 2 /* LDO4_HWC_MODE - [9:8] */ +#define WM831X_LDO4_FLT 0x0080 /* LDO4_FLT */ +#define WM831X_LDO4_FLT_MASK 0x0080 /* LDO4_FLT */ +#define WM831X_LDO4_FLT_SHIFT 7 /* LDO4_FLT */ +#define WM831X_LDO4_FLT_WIDTH 1 /* LDO4_FLT */ +#define WM831X_LDO4_SWI 0x0040 /* LDO4_SWI */ +#define WM831X_LDO4_SWI_MASK 0x0040 /* LDO4_SWI */ +#define WM831X_LDO4_SWI_SHIFT 6 /* LDO4_SWI */ +#define WM831X_LDO4_SWI_WIDTH 1 /* LDO4_SWI */ +#define WM831X_LDO4_LP_MODE 0x0001 /* LDO4_LP_MODE */ +#define WM831X_LDO4_LP_MODE_MASK 0x0001 /* LDO4_LP_MODE */ +#define WM831X_LDO4_LP_MODE_SHIFT 0 /* LDO4_LP_MODE */ +#define WM831X_LDO4_LP_MODE_WIDTH 1 /* LDO4_LP_MODE */ + +/* + * R16498 (0x4072) - LDO4 ON Control + */ +#define WM831X_LDO4_ON_SLOT_MASK 0xE000 /* LDO4_ON_SLOT - [15:13] */ +#define WM831X_LDO4_ON_SLOT_SHIFT 13 /* LDO4_ON_SLOT - [15:13] */ +#define WM831X_LDO4_ON_SLOT_WIDTH 3 /* LDO4_ON_SLOT - [15:13] */ +#define WM831X_LDO4_ON_MODE 0x0100 /* LDO4_ON_MODE */ +#define WM831X_LDO4_ON_MODE_MASK 0x0100 /* LDO4_ON_MODE */ +#define WM831X_LDO4_ON_MODE_SHIFT 8 /* LDO4_ON_MODE */ +#define WM831X_LDO4_ON_MODE_WIDTH 1 /* LDO4_ON_MODE */ +#define WM831X_LDO4_ON_VSEL_MASK 0x001F /* LDO4_ON_VSEL - [4:0] */ +#define WM831X_LDO4_ON_VSEL_SHIFT 0 /* LDO4_ON_VSEL - [4:0] */ +#define WM831X_LDO4_ON_VSEL_WIDTH 5 /* LDO4_ON_VSEL - [4:0] */ + +/* + * R16499 (0x4073) - LDO4 SLEEP Control + */ +#define WM831X_LDO4_SLP_SLOT_MASK 0xE000 /* LDO4_SLP_SLOT - [15:13] */ +#define WM831X_LDO4_SLP_SLOT_SHIFT 13 /* LDO4_SLP_SLOT - [15:13] */ +#define WM831X_LDO4_SLP_SLOT_WIDTH 3 /* LDO4_SLP_SLOT - [15:13] */ +#define WM831X_LDO4_SLP_MODE 0x0100 /* LDO4_SLP_MODE */ +#define WM831X_LDO4_SLP_MODE_MASK 0x0100 /* LDO4_SLP_MODE */ +#define WM831X_LDO4_SLP_MODE_SHIFT 8 /* LDO4_SLP_MODE */ +#define WM831X_LDO4_SLP_MODE_WIDTH 1 /* LDO4_SLP_MODE */ +#define WM831X_LDO4_SLP_VSEL_MASK 0x001F /* LDO4_SLP_VSEL - [4:0] */ +#define WM831X_LDO4_SLP_VSEL_SHIFT 0 /* LDO4_SLP_VSEL - [4:0] */ +#define WM831X_LDO4_SLP_VSEL_WIDTH 5 /* LDO4_SLP_VSEL - [4:0] */ + +/* + * R16500 (0x4074) - LDO5 Control + */ +#define WM831X_LDO5_ERR_ACT_MASK 0xC000 /* LDO5_ERR_ACT - [15:14] */ +#define WM831X_LDO5_ERR_ACT_SHIFT 14 /* LDO5_ERR_ACT - [15:14] */ +#define WM831X_LDO5_ERR_ACT_WIDTH 2 /* LDO5_ERR_ACT - [15:14] */ +#define WM831X_LDO5_HWC_SRC_MASK 0x1800 /* LDO5_HWC_SRC - [12:11] */ +#define WM831X_LDO5_HWC_SRC_SHIFT 11 /* LDO5_HWC_SRC - [12:11] */ +#define WM831X_LDO5_HWC_SRC_WIDTH 2 /* LDO5_HWC_SRC - [12:11] */ +#define WM831X_LDO5_HWC_VSEL 0x0400 /* LDO5_HWC_VSEL */ +#define WM831X_LDO5_HWC_VSEL_MASK 0x0400 /* LDO5_HWC_VSEL */ +#define WM831X_LDO5_HWC_VSEL_SHIFT 10 /* LDO5_HWC_VSEL */ +#define WM831X_LDO5_HWC_VSEL_WIDTH 1 /* LDO5_HWC_VSEL */ +#define WM831X_LDO5_HWC_MODE_MASK 0x0300 /* LDO5_HWC_MODE - [9:8] */ +#define WM831X_LDO5_HWC_MODE_SHIFT 8 /* LDO5_HWC_MODE - [9:8] */ +#define WM831X_LDO5_HWC_MODE_WIDTH 2 /* LDO5_HWC_MODE - [9:8] */ +#define WM831X_LDO5_FLT 0x0080 /* LDO5_FLT */ +#define WM831X_LDO5_FLT_MASK 0x0080 /* LDO5_FLT */ +#define WM831X_LDO5_FLT_SHIFT 7 /* LDO5_FLT */ +#define WM831X_LDO5_FLT_WIDTH 1 /* LDO5_FLT */ +#define WM831X_LDO5_SWI 0x0040 /* LDO5_SWI */ +#define WM831X_LDO5_SWI_MASK 0x0040 /* LDO5_SWI */ +#define WM831X_LDO5_SWI_SHIFT 6 /* LDO5_SWI */ +#define WM831X_LDO5_SWI_WIDTH 1 /* LDO5_SWI */ +#define WM831X_LDO5_LP_MODE 0x0001 /* LDO5_LP_MODE */ +#define WM831X_LDO5_LP_MODE_MASK 0x0001 /* LDO5_LP_MODE */ +#define WM831X_LDO5_LP_MODE_SHIFT 0 /* LDO5_LP_MODE */ +#define WM831X_LDO5_LP_MODE_WIDTH 1 /* LDO5_LP_MODE */ + +/* + * R16501 (0x4075) - LDO5 ON Control + */ +#define WM831X_LDO5_ON_SLOT_MASK 0xE000 /* LDO5_ON_SLOT - [15:13] */ +#define WM831X_LDO5_ON_SLOT_SHIFT 13 /* LDO5_ON_SLOT - [15:13] */ +#define WM831X_LDO5_ON_SLOT_WIDTH 3 /* LDO5_ON_SLOT - [15:13] */ +#define WM831X_LDO5_ON_MODE 0x0100 /* LDO5_ON_MODE */ +#define WM831X_LDO5_ON_MODE_MASK 0x0100 /* LDO5_ON_MODE */ +#define WM831X_LDO5_ON_MODE_SHIFT 8 /* LDO5_ON_MODE */ +#define WM831X_LDO5_ON_MODE_WIDTH 1 /* LDO5_ON_MODE */ +#define WM831X_LDO5_ON_VSEL_MASK 0x001F /* LDO5_ON_VSEL - [4:0] */ +#define WM831X_LDO5_ON_VSEL_SHIFT 0 /* LDO5_ON_VSEL - [4:0] */ +#define WM831X_LDO5_ON_VSEL_WIDTH 5 /* LDO5_ON_VSEL - [4:0] */ + +/* + * R16502 (0x4076) - LDO5 SLEEP Control + */ +#define WM831X_LDO5_SLP_SLOT_MASK 0xE000 /* LDO5_SLP_SLOT - [15:13] */ +#define WM831X_LDO5_SLP_SLOT_SHIFT 13 /* LDO5_SLP_SLOT - [15:13] */ +#define WM831X_LDO5_SLP_SLOT_WIDTH 3 /* LDO5_SLP_SLOT - [15:13] */ +#define WM831X_LDO5_SLP_MODE 0x0100 /* LDO5_SLP_MODE */ +#define WM831X_LDO5_SLP_MODE_MASK 0x0100 /* LDO5_SLP_MODE */ +#define WM831X_LDO5_SLP_MODE_SHIFT 8 /* LDO5_SLP_MODE */ +#define WM831X_LDO5_SLP_MODE_WIDTH 1 /* LDO5_SLP_MODE */ +#define WM831X_LDO5_SLP_VSEL_MASK 0x001F /* LDO5_SLP_VSEL - [4:0] */ +#define WM831X_LDO5_SLP_VSEL_SHIFT 0 /* LDO5_SLP_VSEL - [4:0] */ +#define WM831X_LDO5_SLP_VSEL_WIDTH 5 /* LDO5_SLP_VSEL - [4:0] */ + +/* + * R16503 (0x4077) - LDO6 Control + */ +#define WM831X_LDO6_ERR_ACT_MASK 0xC000 /* LDO6_ERR_ACT - [15:14] */ +#define WM831X_LDO6_ERR_ACT_SHIFT 14 /* LDO6_ERR_ACT - [15:14] */ +#define WM831X_LDO6_ERR_ACT_WIDTH 2 /* LDO6_ERR_ACT - [15:14] */ +#define WM831X_LDO6_HWC_SRC_MASK 0x1800 /* LDO6_HWC_SRC - [12:11] */ +#define WM831X_LDO6_HWC_SRC_SHIFT 11 /* LDO6_HWC_SRC - [12:11] */ +#define WM831X_LDO6_HWC_SRC_WIDTH 2 /* LDO6_HWC_SRC - [12:11] */ +#define WM831X_LDO6_HWC_VSEL 0x0400 /* LDO6_HWC_VSEL */ +#define WM831X_LDO6_HWC_VSEL_MASK 0x0400 /* LDO6_HWC_VSEL */ +#define WM831X_LDO6_HWC_VSEL_SHIFT 10 /* LDO6_HWC_VSEL */ +#define WM831X_LDO6_HWC_VSEL_WIDTH 1 /* LDO6_HWC_VSEL */ +#define WM831X_LDO6_HWC_MODE_MASK 0x0300 /* LDO6_HWC_MODE - [9:8] */ +#define WM831X_LDO6_HWC_MODE_SHIFT 8 /* LDO6_HWC_MODE - [9:8] */ +#define WM831X_LDO6_HWC_MODE_WIDTH 2 /* LDO6_HWC_MODE - [9:8] */ +#define WM831X_LDO6_FLT 0x0080 /* LDO6_FLT */ +#define WM831X_LDO6_FLT_MASK 0x0080 /* LDO6_FLT */ +#define WM831X_LDO6_FLT_SHIFT 7 /* LDO6_FLT */ +#define WM831X_LDO6_FLT_WIDTH 1 /* LDO6_FLT */ +#define WM831X_LDO6_SWI 0x0040 /* LDO6_SWI */ +#define WM831X_LDO6_SWI_MASK 0x0040 /* LDO6_SWI */ +#define WM831X_LDO6_SWI_SHIFT 6 /* LDO6_SWI */ +#define WM831X_LDO6_SWI_WIDTH 1 /* LDO6_SWI */ +#define WM831X_LDO6_LP_MODE 0x0001 /* LDO6_LP_MODE */ +#define WM831X_LDO6_LP_MODE_MASK 0x0001 /* LDO6_LP_MODE */ +#define WM831X_LDO6_LP_MODE_SHIFT 0 /* LDO6_LP_MODE */ +#define WM831X_LDO6_LP_MODE_WIDTH 1 /* LDO6_LP_MODE */ + +/* + * R16504 (0x4078) - LDO6 ON Control + */ +#define WM831X_LDO6_ON_SLOT_MASK 0xE000 /* LDO6_ON_SLOT - [15:13] */ +#define WM831X_LDO6_ON_SLOT_SHIFT 13 /* LDO6_ON_SLOT - [15:13] */ +#define WM831X_LDO6_ON_SLOT_WIDTH 3 /* LDO6_ON_SLOT - [15:13] */ +#define WM831X_LDO6_ON_MODE 0x0100 /* LDO6_ON_MODE */ +#define WM831X_LDO6_ON_MODE_MASK 0x0100 /* LDO6_ON_MODE */ +#define WM831X_LDO6_ON_MODE_SHIFT 8 /* LDO6_ON_MODE */ +#define WM831X_LDO6_ON_MODE_WIDTH 1 /* LDO6_ON_MODE */ +#define WM831X_LDO6_ON_VSEL_MASK 0x001F /* LDO6_ON_VSEL - [4:0] */ +#define WM831X_LDO6_ON_VSEL_SHIFT 0 /* LDO6_ON_VSEL - [4:0] */ +#define WM831X_LDO6_ON_VSEL_WIDTH 5 /* LDO6_ON_VSEL - [4:0] */ + +/* + * R16505 (0x4079) - LDO6 SLEEP Control + */ +#define WM831X_LDO6_SLP_SLOT_MASK 0xE000 /* LDO6_SLP_SLOT - [15:13] */ +#define WM831X_LDO6_SLP_SLOT_SHIFT 13 /* LDO6_SLP_SLOT - [15:13] */ +#define WM831X_LDO6_SLP_SLOT_WIDTH 3 /* LDO6_SLP_SLOT - [15:13] */ +#define WM831X_LDO6_SLP_MODE 0x0100 /* LDO6_SLP_MODE */ +#define WM831X_LDO6_SLP_MODE_MASK 0x0100 /* LDO6_SLP_MODE */ +#define WM831X_LDO6_SLP_MODE_SHIFT 8 /* LDO6_SLP_MODE */ +#define WM831X_LDO6_SLP_MODE_WIDTH 1 /* LDO6_SLP_MODE */ +#define WM831X_LDO6_SLP_VSEL_MASK 0x001F /* LDO6_SLP_VSEL - [4:0] */ +#define WM831X_LDO6_SLP_VSEL_SHIFT 0 /* LDO6_SLP_VSEL - [4:0] */ +#define WM831X_LDO6_SLP_VSEL_WIDTH 5 /* LDO6_SLP_VSEL - [4:0] */ + +/* + * R16506 (0x407A) - LDO7 Control + */ +#define WM831X_LDO7_ERR_ACT_MASK 0xC000 /* LDO7_ERR_ACT - [15:14] */ +#define WM831X_LDO7_ERR_ACT_SHIFT 14 /* LDO7_ERR_ACT - [15:14] */ +#define WM831X_LDO7_ERR_ACT_WIDTH 2 /* LDO7_ERR_ACT - [15:14] */ +#define WM831X_LDO7_HWC_SRC_MASK 0x1800 /* LDO7_HWC_SRC - [12:11] */ +#define WM831X_LDO7_HWC_SRC_SHIFT 11 /* LDO7_HWC_SRC - [12:11] */ +#define WM831X_LDO7_HWC_SRC_WIDTH 2 /* LDO7_HWC_SRC - [12:11] */ +#define WM831X_LDO7_HWC_VSEL 0x0400 /* LDO7_HWC_VSEL */ +#define WM831X_LDO7_HWC_VSEL_MASK 0x0400 /* LDO7_HWC_VSEL */ +#define WM831X_LDO7_HWC_VSEL_SHIFT 10 /* LDO7_HWC_VSEL */ +#define WM831X_LDO7_HWC_VSEL_WIDTH 1 /* LDO7_HWC_VSEL */ +#define WM831X_LDO7_HWC_MODE_MASK 0x0300 /* LDO7_HWC_MODE - [9:8] */ +#define WM831X_LDO7_HWC_MODE_SHIFT 8 /* LDO7_HWC_MODE - [9:8] */ +#define WM831X_LDO7_HWC_MODE_WIDTH 2 /* LDO7_HWC_MODE - [9:8] */ +#define WM831X_LDO7_FLT 0x0080 /* LDO7_FLT */ +#define WM831X_LDO7_FLT_MASK 0x0080 /* LDO7_FLT */ +#define WM831X_LDO7_FLT_SHIFT 7 /* LDO7_FLT */ +#define WM831X_LDO7_FLT_WIDTH 1 /* LDO7_FLT */ +#define WM831X_LDO7_SWI 0x0040 /* LDO7_SWI */ +#define WM831X_LDO7_SWI_MASK 0x0040 /* LDO7_SWI */ +#define WM831X_LDO7_SWI_SHIFT 6 /* LDO7_SWI */ +#define WM831X_LDO7_SWI_WIDTH 1 /* LDO7_SWI */ + +/* + * R16507 (0x407B) - LDO7 ON Control + */ +#define WM831X_LDO7_ON_SLOT_MASK 0xE000 /* LDO7_ON_SLOT - [15:13] */ +#define WM831X_LDO7_ON_SLOT_SHIFT 13 /* LDO7_ON_SLOT - [15:13] */ +#define WM831X_LDO7_ON_SLOT_WIDTH 3 /* LDO7_ON_SLOT - [15:13] */ +#define WM831X_LDO7_ON_MODE 0x0100 /* LDO7_ON_MODE */ +#define WM831X_LDO7_ON_MODE_MASK 0x0100 /* LDO7_ON_MODE */ +#define WM831X_LDO7_ON_MODE_SHIFT 8 /* LDO7_ON_MODE */ +#define WM831X_LDO7_ON_MODE_WIDTH 1 /* LDO7_ON_MODE */ +#define WM831X_LDO7_ON_VSEL_MASK 0x001F /* LDO7_ON_VSEL - [4:0] */ +#define WM831X_LDO7_ON_VSEL_SHIFT 0 /* LDO7_ON_VSEL - [4:0] */ +#define WM831X_LDO7_ON_VSEL_WIDTH 5 /* LDO7_ON_VSEL - [4:0] */ + +/* + * R16508 (0x407C) - LDO7 SLEEP Control + */ +#define WM831X_LDO7_SLP_SLOT_MASK 0xE000 /* LDO7_SLP_SLOT - [15:13] */ +#define WM831X_LDO7_SLP_SLOT_SHIFT 13 /* LDO7_SLP_SLOT - [15:13] */ +#define WM831X_LDO7_SLP_SLOT_WIDTH 3 /* LDO7_SLP_SLOT - [15:13] */ +#define WM831X_LDO7_SLP_MODE 0x0100 /* LDO7_SLP_MODE */ +#define WM831X_LDO7_SLP_MODE_MASK 0x0100 /* LDO7_SLP_MODE */ +#define WM831X_LDO7_SLP_MODE_SHIFT 8 /* LDO7_SLP_MODE */ +#define WM831X_LDO7_SLP_MODE_WIDTH 1 /* LDO7_SLP_MODE */ +#define WM831X_LDO7_SLP_VSEL_MASK 0x001F /* LDO7_SLP_VSEL - [4:0] */ +#define WM831X_LDO7_SLP_VSEL_SHIFT 0 /* LDO7_SLP_VSEL - [4:0] */ +#define WM831X_LDO7_SLP_VSEL_WIDTH 5 /* LDO7_SLP_VSEL - [4:0] */ + +/* + * R16509 (0x407D) - LDO8 Control + */ +#define WM831X_LDO8_ERR_ACT_MASK 0xC000 /* LDO8_ERR_ACT - [15:14] */ +#define WM831X_LDO8_ERR_ACT_SHIFT 14 /* LDO8_ERR_ACT - [15:14] */ +#define WM831X_LDO8_ERR_ACT_WIDTH 2 /* LDO8_ERR_ACT - [15:14] */ +#define WM831X_LDO8_HWC_SRC_MASK 0x1800 /* LDO8_HWC_SRC - [12:11] */ +#define WM831X_LDO8_HWC_SRC_SHIFT 11 /* LDO8_HWC_SRC - [12:11] */ +#define WM831X_LDO8_HWC_SRC_WIDTH 2 /* LDO8_HWC_SRC - [12:11] */ +#define WM831X_LDO8_HWC_VSEL 0x0400 /* LDO8_HWC_VSEL */ +#define WM831X_LDO8_HWC_VSEL_MASK 0x0400 /* LDO8_HWC_VSEL */ +#define WM831X_LDO8_HWC_VSEL_SHIFT 10 /* LDO8_HWC_VSEL */ +#define WM831X_LDO8_HWC_VSEL_WIDTH 1 /* LDO8_HWC_VSEL */ +#define WM831X_LDO8_HWC_MODE_MASK 0x0300 /* LDO8_HWC_MODE - [9:8] */ +#define WM831X_LDO8_HWC_MODE_SHIFT 8 /* LDO8_HWC_MODE - [9:8] */ +#define WM831X_LDO8_HWC_MODE_WIDTH 2 /* LDO8_HWC_MODE - [9:8] */ +#define WM831X_LDO8_FLT 0x0080 /* LDO8_FLT */ +#define WM831X_LDO8_FLT_MASK 0x0080 /* LDO8_FLT */ +#define WM831X_LDO8_FLT_SHIFT 7 /* LDO8_FLT */ +#define WM831X_LDO8_FLT_WIDTH 1 /* LDO8_FLT */ +#define WM831X_LDO8_SWI 0x0040 /* LDO8_SWI */ +#define WM831X_LDO8_SWI_MASK 0x0040 /* LDO8_SWI */ +#define WM831X_LDO8_SWI_SHIFT 6 /* LDO8_SWI */ +#define WM831X_LDO8_SWI_WIDTH 1 /* LDO8_SWI */ + +/* + * R16510 (0x407E) - LDO8 ON Control + */ +#define WM831X_LDO8_ON_SLOT_MASK 0xE000 /* LDO8_ON_SLOT - [15:13] */ +#define WM831X_LDO8_ON_SLOT_SHIFT 13 /* LDO8_ON_SLOT - [15:13] */ +#define WM831X_LDO8_ON_SLOT_WIDTH 3 /* LDO8_ON_SLOT - [15:13] */ +#define WM831X_LDO8_ON_MODE 0x0100 /* LDO8_ON_MODE */ +#define WM831X_LDO8_ON_MODE_MASK 0x0100 /* LDO8_ON_MODE */ +#define WM831X_LDO8_ON_MODE_SHIFT 8 /* LDO8_ON_MODE */ +#define WM831X_LDO8_ON_MODE_WIDTH 1 /* LDO8_ON_MODE */ +#define WM831X_LDO8_ON_VSEL_MASK 0x001F /* LDO8_ON_VSEL - [4:0] */ +#define WM831X_LDO8_ON_VSEL_SHIFT 0 /* LDO8_ON_VSEL - [4:0] */ +#define WM831X_LDO8_ON_VSEL_WIDTH 5 /* LDO8_ON_VSEL - [4:0] */ + +/* + * R16511 (0x407F) - LDO8 SLEEP Control + */ +#define WM831X_LDO8_SLP_SLOT_MASK 0xE000 /* LDO8_SLP_SLOT - [15:13] */ +#define WM831X_LDO8_SLP_SLOT_SHIFT 13 /* LDO8_SLP_SLOT - [15:13] */ +#define WM831X_LDO8_SLP_SLOT_WIDTH 3 /* LDO8_SLP_SLOT - [15:13] */ +#define WM831X_LDO8_SLP_MODE 0x0100 /* LDO8_SLP_MODE */ +#define WM831X_LDO8_SLP_MODE_MASK 0x0100 /* LDO8_SLP_MODE */ +#define WM831X_LDO8_SLP_MODE_SHIFT 8 /* LDO8_SLP_MODE */ +#define WM831X_LDO8_SLP_MODE_WIDTH 1 /* LDO8_SLP_MODE */ +#define WM831X_LDO8_SLP_VSEL_MASK 0x001F /* LDO8_SLP_VSEL - [4:0] */ +#define WM831X_LDO8_SLP_VSEL_SHIFT 0 /* LDO8_SLP_VSEL - [4:0] */ +#define WM831X_LDO8_SLP_VSEL_WIDTH 5 /* LDO8_SLP_VSEL - [4:0] */ + +/* + * R16512 (0x4080) - LDO9 Control + */ +#define WM831X_LDO9_ERR_ACT_MASK 0xC000 /* LDO9_ERR_ACT - [15:14] */ +#define WM831X_LDO9_ERR_ACT_SHIFT 14 /* LDO9_ERR_ACT - [15:14] */ +#define WM831X_LDO9_ERR_ACT_WIDTH 2 /* LDO9_ERR_ACT - [15:14] */ +#define WM831X_LDO9_HWC_SRC_MASK 0x1800 /* LDO9_HWC_SRC - [12:11] */ +#define WM831X_LDO9_HWC_SRC_SHIFT 11 /* LDO9_HWC_SRC - [12:11] */ +#define WM831X_LDO9_HWC_SRC_WIDTH 2 /* LDO9_HWC_SRC - [12:11] */ +#define WM831X_LDO9_HWC_VSEL 0x0400 /* LDO9_HWC_VSEL */ +#define WM831X_LDO9_HWC_VSEL_MASK 0x0400 /* LDO9_HWC_VSEL */ +#define WM831X_LDO9_HWC_VSEL_SHIFT 10 /* LDO9_HWC_VSEL */ +#define WM831X_LDO9_HWC_VSEL_WIDTH 1 /* LDO9_HWC_VSEL */ +#define WM831X_LDO9_HWC_MODE_MASK 0x0300 /* LDO9_HWC_MODE - [9:8] */ +#define WM831X_LDO9_HWC_MODE_SHIFT 8 /* LDO9_HWC_MODE - [9:8] */ +#define WM831X_LDO9_HWC_MODE_WIDTH 2 /* LDO9_HWC_MODE - [9:8] */ +#define WM831X_LDO9_FLT 0x0080 /* LDO9_FLT */ +#define WM831X_LDO9_FLT_MASK 0x0080 /* LDO9_FLT */ +#define WM831X_LDO9_FLT_SHIFT 7 /* LDO9_FLT */ +#define WM831X_LDO9_FLT_WIDTH 1 /* LDO9_FLT */ +#define WM831X_LDO9_SWI 0x0040 /* LDO9_SWI */ +#define WM831X_LDO9_SWI_MASK 0x0040 /* LDO9_SWI */ +#define WM831X_LDO9_SWI_SHIFT 6 /* LDO9_SWI */ +#define WM831X_LDO9_SWI_WIDTH 1 /* LDO9_SWI */ + +/* + * R16513 (0x4081) - LDO9 ON Control + */ +#define WM831X_LDO9_ON_SLOT_MASK 0xE000 /* LDO9_ON_SLOT - [15:13] */ +#define WM831X_LDO9_ON_SLOT_SHIFT 13 /* LDO9_ON_SLOT - [15:13] */ +#define WM831X_LDO9_ON_SLOT_WIDTH 3 /* LDO9_ON_SLOT - [15:13] */ +#define WM831X_LDO9_ON_MODE 0x0100 /* LDO9_ON_MODE */ +#define WM831X_LDO9_ON_MODE_MASK 0x0100 /* LDO9_ON_MODE */ +#define WM831X_LDO9_ON_MODE_SHIFT 8 /* LDO9_ON_MODE */ +#define WM831X_LDO9_ON_MODE_WIDTH 1 /* LDO9_ON_MODE */ +#define WM831X_LDO9_ON_VSEL_MASK 0x001F /* LDO9_ON_VSEL - [4:0] */ +#define WM831X_LDO9_ON_VSEL_SHIFT 0 /* LDO9_ON_VSEL - [4:0] */ +#define WM831X_LDO9_ON_VSEL_WIDTH 5 /* LDO9_ON_VSEL - [4:0] */ + +/* + * R16514 (0x4082) - LDO9 SLEEP Control + */ +#define WM831X_LDO9_SLP_SLOT_MASK 0xE000 /* LDO9_SLP_SLOT - [15:13] */ +#define WM831X_LDO9_SLP_SLOT_SHIFT 13 /* LDO9_SLP_SLOT - [15:13] */ +#define WM831X_LDO9_SLP_SLOT_WIDTH 3 /* LDO9_SLP_SLOT - [15:13] */ +#define WM831X_LDO9_SLP_MODE 0x0100 /* LDO9_SLP_MODE */ +#define WM831X_LDO9_SLP_MODE_MASK 0x0100 /* LDO9_SLP_MODE */ +#define WM831X_LDO9_SLP_MODE_SHIFT 8 /* LDO9_SLP_MODE */ +#define WM831X_LDO9_SLP_MODE_WIDTH 1 /* LDO9_SLP_MODE */ +#define WM831X_LDO9_SLP_VSEL_MASK 0x001F /* LDO9_SLP_VSEL - [4:0] */ +#define WM831X_LDO9_SLP_VSEL_SHIFT 0 /* LDO9_SLP_VSEL - [4:0] */ +#define WM831X_LDO9_SLP_VSEL_WIDTH 5 /* LDO9_SLP_VSEL - [4:0] */ + +/* + * R16515 (0x4083) - LDO10 Control + */ +#define WM831X_LDO10_ERR_ACT_MASK 0xC000 /* LDO10_ERR_ACT - [15:14] */ +#define WM831X_LDO10_ERR_ACT_SHIFT 14 /* LDO10_ERR_ACT - [15:14] */ +#define WM831X_LDO10_ERR_ACT_WIDTH 2 /* LDO10_ERR_ACT - [15:14] */ +#define WM831X_LDO10_HWC_SRC_MASK 0x1800 /* LDO10_HWC_SRC - [12:11] */ +#define WM831X_LDO10_HWC_SRC_SHIFT 11 /* LDO10_HWC_SRC - [12:11] */ +#define WM831X_LDO10_HWC_SRC_WIDTH 2 /* LDO10_HWC_SRC - [12:11] */ +#define WM831X_LDO10_HWC_VSEL 0x0400 /* LDO10_HWC_VSEL */ +#define WM831X_LDO10_HWC_VSEL_MASK 0x0400 /* LDO10_HWC_VSEL */ +#define WM831X_LDO10_HWC_VSEL_SHIFT 10 /* LDO10_HWC_VSEL */ +#define WM831X_LDO10_HWC_VSEL_WIDTH 1 /* LDO10_HWC_VSEL */ +#define WM831X_LDO10_HWC_MODE_MASK 0x0300 /* LDO10_HWC_MODE - [9:8] */ +#define WM831X_LDO10_HWC_MODE_SHIFT 8 /* LDO10_HWC_MODE - [9:8] */ +#define WM831X_LDO10_HWC_MODE_WIDTH 2 /* LDO10_HWC_MODE - [9:8] */ +#define WM831X_LDO10_FLT 0x0080 /* LDO10_FLT */ +#define WM831X_LDO10_FLT_MASK 0x0080 /* LDO10_FLT */ +#define WM831X_LDO10_FLT_SHIFT 7 /* LDO10_FLT */ +#define WM831X_LDO10_FLT_WIDTH 1 /* LDO10_FLT */ +#define WM831X_LDO10_SWI 0x0040 /* LDO10_SWI */ +#define WM831X_LDO10_SWI_MASK 0x0040 /* LDO10_SWI */ +#define WM831X_LDO10_SWI_SHIFT 6 /* LDO10_SWI */ +#define WM831X_LDO10_SWI_WIDTH 1 /* LDO10_SWI */ + +/* + * R16516 (0x4084) - LDO10 ON Control + */ +#define WM831X_LDO10_ON_SLOT_MASK 0xE000 /* LDO10_ON_SLOT - [15:13] */ +#define WM831X_LDO10_ON_SLOT_SHIFT 13 /* LDO10_ON_SLOT - [15:13] */ +#define WM831X_LDO10_ON_SLOT_WIDTH 3 /* LDO10_ON_SLOT - [15:13] */ +#define WM831X_LDO10_ON_MODE 0x0100 /* LDO10_ON_MODE */ +#define WM831X_LDO10_ON_MODE_MASK 0x0100 /* LDO10_ON_MODE */ +#define WM831X_LDO10_ON_MODE_SHIFT 8 /* LDO10_ON_MODE */ +#define WM831X_LDO10_ON_MODE_WIDTH 1 /* LDO10_ON_MODE */ +#define WM831X_LDO10_ON_VSEL_MASK 0x001F /* LDO10_ON_VSEL - [4:0] */ +#define WM831X_LDO10_ON_VSEL_SHIFT 0 /* LDO10_ON_VSEL - [4:0] */ +#define WM831X_LDO10_ON_VSEL_WIDTH 5 /* LDO10_ON_VSEL - [4:0] */ + +/* + * R16517 (0x4085) - LDO10 SLEEP Control + */ +#define WM831X_LDO10_SLP_SLOT_MASK 0xE000 /* LDO10_SLP_SLOT - [15:13] */ +#define WM831X_LDO10_SLP_SLOT_SHIFT 13 /* LDO10_SLP_SLOT - [15:13] */ +#define WM831X_LDO10_SLP_SLOT_WIDTH 3 /* LDO10_SLP_SLOT - [15:13] */ +#define WM831X_LDO10_SLP_MODE 0x0100 /* LDO10_SLP_MODE */ +#define WM831X_LDO10_SLP_MODE_MASK 0x0100 /* LDO10_SLP_MODE */ +#define WM831X_LDO10_SLP_MODE_SHIFT 8 /* LDO10_SLP_MODE */ +#define WM831X_LDO10_SLP_MODE_WIDTH 1 /* LDO10_SLP_MODE */ +#define WM831X_LDO10_SLP_VSEL_MASK 0x001F /* LDO10_SLP_VSEL - [4:0] */ +#define WM831X_LDO10_SLP_VSEL_SHIFT 0 /* LDO10_SLP_VSEL - [4:0] */ +#define WM831X_LDO10_SLP_VSEL_WIDTH 5 /* LDO10_SLP_VSEL - [4:0] */ + +/* + * R16519 (0x4087) - LDO11 ON Control + */ +#define WM831X_LDO11_ON_SLOT_MASK 0xE000 /* LDO11_ON_SLOT - [15:13] */ +#define WM831X_LDO11_ON_SLOT_SHIFT 13 /* LDO11_ON_SLOT - [15:13] */ +#define WM831X_LDO11_ON_SLOT_WIDTH 3 /* LDO11_ON_SLOT - [15:13] */ +#define WM831X_LDO11_OFFENA 0x1000 /* LDO11_OFFENA */ +#define WM831X_LDO11_OFFENA_MASK 0x1000 /* LDO11_OFFENA */ +#define WM831X_LDO11_OFFENA_SHIFT 12 /* LDO11_OFFENA */ +#define WM831X_LDO11_OFFENA_WIDTH 1 /* LDO11_OFFENA */ +#define WM831X_LDO11_VSEL_SRC 0x0080 /* LDO11_VSEL_SRC */ +#define WM831X_LDO11_VSEL_SRC_MASK 0x0080 /* LDO11_VSEL_SRC */ +#define WM831X_LDO11_VSEL_SRC_SHIFT 7 /* LDO11_VSEL_SRC */ +#define WM831X_LDO11_VSEL_SRC_WIDTH 1 /* LDO11_VSEL_SRC */ +#define WM831X_LDO11_ON_VSEL_MASK 0x000F /* LDO11_ON_VSEL - [3:0] */ +#define WM831X_LDO11_ON_VSEL_SHIFT 0 /* LDO11_ON_VSEL - [3:0] */ +#define WM831X_LDO11_ON_VSEL_WIDTH 4 /* LDO11_ON_VSEL - [3:0] */ + +/* + * R16520 (0x4088) - LDO11 SLEEP Control + */ +#define WM831X_LDO11_SLP_SLOT_MASK 0xE000 /* LDO11_SLP_SLOT - [15:13] */ +#define WM831X_LDO11_SLP_SLOT_SHIFT 13 /* LDO11_SLP_SLOT - [15:13] */ +#define WM831X_LDO11_SLP_SLOT_WIDTH 3 /* LDO11_SLP_SLOT - [15:13] */ +#define WM831X_LDO11_SLP_VSEL_MASK 0x000F /* LDO11_SLP_VSEL - [3:0] */ +#define WM831X_LDO11_SLP_VSEL_SHIFT 0 /* LDO11_SLP_VSEL - [3:0] */ +#define WM831X_LDO11_SLP_VSEL_WIDTH 4 /* LDO11_SLP_VSEL - [3:0] */ + /* * R16526 (0x408E) - Power Good Source 1 */ @@ -586,6 +1168,50 @@ #define WM831X_DC1_OK_SHIFT 0 /* DC1_OK */ #define WM831X_DC1_OK_WIDTH 1 /* DC1_OK */ +/* + * R16527 (0x408F) - Power Good Source 2 + */ +#define WM831X_LDO10_OK 0x0200 /* LDO10_OK */ +#define WM831X_LDO10_OK_MASK 0x0200 /* LDO10_OK */ +#define WM831X_LDO10_OK_SHIFT 9 /* LDO10_OK */ +#define WM831X_LDO10_OK_WIDTH 1 /* LDO10_OK */ +#define WM831X_LDO9_OK 0x0100 /* LDO9_OK */ +#define WM831X_LDO9_OK_MASK 0x0100 /* LDO9_OK */ +#define WM831X_LDO9_OK_SHIFT 8 /* LDO9_OK */ +#define WM831X_LDO9_OK_WIDTH 1 /* LDO9_OK */ +#define WM831X_LDO8_OK 0x0080 /* LDO8_OK */ +#define WM831X_LDO8_OK_MASK 0x0080 /* LDO8_OK */ +#define WM831X_LDO8_OK_SHIFT 7 /* LDO8_OK */ +#define WM831X_LDO8_OK_WIDTH 1 /* LDO8_OK */ +#define WM831X_LDO7_OK 0x0040 /* LDO7_OK */ +#define WM831X_LDO7_OK_MASK 0x0040 /* LDO7_OK */ +#define WM831X_LDO7_OK_SHIFT 6 /* LDO7_OK */ +#define WM831X_LDO7_OK_WIDTH 1 /* LDO7_OK */ +#define WM831X_LDO6_OK 0x0020 /* LDO6_OK */ +#define WM831X_LDO6_OK_MASK 0x0020 /* LDO6_OK */ +#define WM831X_LDO6_OK_SHIFT 5 /* LDO6_OK */ +#define WM831X_LDO6_OK_WIDTH 1 /* LDO6_OK */ +#define WM831X_LDO5_OK 0x0010 /* LDO5_OK */ +#define WM831X_LDO5_OK_MASK 0x0010 /* LDO5_OK */ +#define WM831X_LDO5_OK_SHIFT 4 /* LDO5_OK */ +#define WM831X_LDO5_OK_WIDTH 1 /* LDO5_OK */ +#define WM831X_LDO4_OK 0x0008 /* LDO4_OK */ +#define WM831X_LDO4_OK_MASK 0x0008 /* LDO4_OK */ +#define WM831X_LDO4_OK_SHIFT 3 /* LDO4_OK */ +#define WM831X_LDO4_OK_WIDTH 1 /* LDO4_OK */ +#define WM831X_LDO3_OK 0x0004 /* LDO3_OK */ +#define WM831X_LDO3_OK_MASK 0x0004 /* LDO3_OK */ +#define WM831X_LDO3_OK_SHIFT 2 /* LDO3_OK */ +#define WM831X_LDO3_OK_WIDTH 1 /* LDO3_OK */ +#define WM831X_LDO2_OK 0x0002 /* LDO2_OK */ +#define WM831X_LDO2_OK_MASK 0x0002 /* LDO2_OK */ +#define WM831X_LDO2_OK_SHIFT 1 /* LDO2_OK */ +#define WM831X_LDO2_OK_WIDTH 1 /* LDO2_OK */ +#define WM831X_LDO1_OK 0x0001 /* LDO1_OK */ +#define WM831X_LDO1_OK_MASK 0x0001 /* LDO1_OK */ +#define WM831X_LDO1_OK_SHIFT 0 /* LDO1_OK */ +#define WM831X_LDO1_OK_WIDTH 1 /* LDO1_OK */ + #define WM831X_ISINK_MAX_ISEL 56 extern int wm831x_isinkv_values[WM831X_ISINK_MAX_ISEL]; -- cgit v1.2.3-70-g09d2 From 956f25a6778a2510d52973ab8a3ac2e03e2c3704 Mon Sep 17 00:00:00 2001 From: Linus Walleij Date: Thu, 13 Aug 2009 11:49:23 +0200 Subject: mfd: AB3100 accessor function cleanups This adds the _interruptible suffix to the AB3100 accessor functions on par with mutex_lock_interruptible() that's used for blocking simultaneous calls to the AB3100 acessor functions. Since these accesses are slow on a 100kHz I2C bus and may line up waiting for the mutex, we need to handle interruption by system shutdown or kill signals and may just as well denote that in the function names. Signed-off-by: Linus Walleij Signed-off-by: Samuel Ortiz --- drivers/mfd/ab3100-core.c | 43 ++++++++++++++++++++++++------------------- include/linux/mfd/ab3100.h | 8 ++++---- 2 files changed, 28 insertions(+), 23 deletions(-) (limited to 'include') diff --git a/drivers/mfd/ab3100-core.c b/drivers/mfd/ab3100-core.c index 8ff10cb77ca..ffe4b641546 100644 --- a/drivers/mfd/ab3100-core.c +++ b/drivers/mfd/ab3100-core.c @@ -77,7 +77,7 @@ u8 ab3100_get_chip_type(struct ab3100 *ab3100) } EXPORT_SYMBOL(ab3100_get_chip_type); -int ab3100_set_register(struct ab3100 *ab3100, u8 reg, u8 regval) +int ab3100_set_register_interruptible(struct ab3100 *ab3100, u8 reg, u8 regval) { u8 regandval[2] = {reg, regval}; int err; @@ -109,7 +109,8 @@ int ab3100_set_register(struct ab3100 *ab3100, u8 reg, u8 regval) mutex_unlock(&ab3100->access_mutex); return 0; } -EXPORT_SYMBOL(ab3100_set_register); +EXPORT_SYMBOL(ab3100_set_register_interruptible); + /* * The test registers exist at an I2C bus address up one @@ -118,7 +119,7 @@ EXPORT_SYMBOL(ab3100_set_register); * anyway. It's currently only used from this file so declare * it static and do not export. */ -static int ab3100_set_test_register(struct ab3100 *ab3100, +static int ab3100_set_test_register_interruptible(struct ab3100 *ab3100, u8 reg, u8 regval) { u8 regandval[2] = {reg, regval}; @@ -148,7 +149,8 @@ static int ab3100_set_test_register(struct ab3100 *ab3100, return err; } -int ab3100_get_register(struct ab3100 *ab3100, u8 reg, u8 *regval) + +int ab3100_get_register_interruptible(struct ab3100 *ab3100, u8 reg, u8 *regval) { int err; @@ -202,9 +204,10 @@ int ab3100_get_register(struct ab3100 *ab3100, u8 reg, u8 *regval) mutex_unlock(&ab3100->access_mutex); return err; } -EXPORT_SYMBOL(ab3100_get_register); +EXPORT_SYMBOL(ab3100_get_register_interruptible); + -int ab3100_get_register_page(struct ab3100 *ab3100, +int ab3100_get_register_page_interruptible(struct ab3100 *ab3100, u8 first_reg, u8 *regvals, u8 numregs) { int err; @@ -258,9 +261,10 @@ int ab3100_get_register_page(struct ab3100 *ab3100, mutex_unlock(&ab3100->access_mutex); return err; } -EXPORT_SYMBOL(ab3100_get_register_page); +EXPORT_SYMBOL(ab3100_get_register_page_interruptible); -int ab3100_mask_and_set_register(struct ab3100 *ab3100, + +int ab3100_mask_and_set_register_interruptible(struct ab3100 *ab3100, u8 reg, u8 andmask, u8 ormask) { u8 regandval[2] = {reg, 0}; @@ -328,7 +332,8 @@ int ab3100_mask_and_set_register(struct ab3100 *ab3100, mutex_unlock(&ab3100->access_mutex); return err; } -EXPORT_SYMBOL(ab3100_mask_and_set_register); +EXPORT_SYMBOL(ab3100_mask_and_set_register_interruptible); + /* * Register a simple callback for handling any AB3100 events. @@ -371,7 +376,7 @@ static void ab3100_work(struct work_struct *work) u32 fatevent; int err; - err = ab3100_get_register_page(ab3100, AB3100_EVENTA1, + err = ab3100_get_register_page_interruptible(ab3100, AB3100_EVENTA1, event_regs, 3); if (err) goto err_event_wq; @@ -435,7 +440,7 @@ static int ab3100_registers_print(struct seq_file *s, void *p) seq_printf(s, "AB3100 registers:\n"); for (reg = 0; reg < 0xff; reg++) { - ab3100_get_register(ab3100, reg, &value); + ab3100_get_register_interruptible(ab3100, reg, &value); seq_printf(s, "[0x%x]: 0x%x\n", reg, value); } return 0; @@ -515,7 +520,7 @@ static ssize_t ab3100_get_set_reg(struct file *file, u8 reg = (u8) user_reg; u8 regvalue; - ab3100_get_register(ab3100, reg, ®value); + ab3100_get_register_interruptible(ab3100, reg, ®value); dev_info(ab3100->dev, "debug read AB3100 reg[0x%02x]: 0x%02x\n", @@ -547,8 +552,8 @@ static ssize_t ab3100_get_set_reg(struct file *file, return -EINVAL; value = (u8) user_value; - ab3100_set_register(ab3100, reg, value); - ab3100_get_register(ab3100, reg, ®value); + ab3100_set_register_interruptible(ab3100, reg, value); + ab3100_get_register_interruptible(ab3100, reg, ®value); dev_info(ab3100->dev, "debug write reg[0x%02x] with 0x%02x, " @@ -696,7 +701,7 @@ static int __init ab3100_setup(struct ab3100 *ab3100) int i; for (i = 0; i < ARRAY_SIZE(ab3100_init_settings); i++) { - err = ab3100_set_register(ab3100, + err = ab3100_set_register_interruptible(ab3100, ab3100_init_settings[i].abreg, ab3100_init_settings[i].setting); if (err) @@ -705,14 +710,14 @@ static int __init ab3100_setup(struct ab3100 *ab3100) /* * Special trick to make the AB3100 use the 32kHz clock (RTC) - * bit 3 in test registe 0x02 is a special, undocumented test + * bit 3 in test register 0x02 is a special, undocumented test * register bit that only exist in AB3100 P1E */ if (ab3100->chip_id == 0xc4) { dev_warn(ab3100->dev, "AB3100 P1E variant detected, " "forcing chip to 32KHz\n"); - err = ab3100_set_test_register(ab3100, 0x02, 0x08); + err = ab3100_set_test_register_interruptible(ab3100, 0x02, 0x08); } exit_no_setup: @@ -852,8 +857,8 @@ static int __init ab3100_probe(struct i2c_client *client, i2c_set_clientdata(client, ab3100); /* Read chip ID register */ - err = ab3100_get_register(ab3100, AB3100_CID, - &ab3100->chip_id); + err = ab3100_get_register_interruptible(ab3100, AB3100_CID, + &ab3100->chip_id); if (err) { dev_err(&client->dev, "could not communicate with the AB3100 analog " diff --git a/include/linux/mfd/ab3100.h b/include/linux/mfd/ab3100.h index 7a3f316e384..56343b8013b 100644 --- a/include/linux/mfd/ab3100.h +++ b/include/linux/mfd/ab3100.h @@ -86,11 +86,11 @@ struct ab3100 { bool startup_events_read; }; -int ab3100_set_register(struct ab3100 *ab3100, u8 reg, u8 regval); -int ab3100_get_register(struct ab3100 *ab3100, u8 reg, u8 *regval); -int ab3100_get_register_page(struct ab3100 *ab3100, +int ab3100_set_register_interruptible(struct ab3100 *ab3100, u8 reg, u8 regval); +int ab3100_get_register_interruptible(struct ab3100 *ab3100, u8 reg, u8 *regval); +int ab3100_get_register_page_interruptible(struct ab3100 *ab3100, u8 first_reg, u8 *regvals, u8 numregs); -int ab3100_mask_and_set_register(struct ab3100 *ab3100, +int ab3100_mask_and_set_register_interruptible(struct ab3100 *ab3100, u8 reg, u8 andmask, u8 ormask); u8 ab3100_get_chip_type(struct ab3100 *ab3100); int ab3100_event_register(struct ab3100 *ab3100, -- cgit v1.2.3-70-g09d2 From 8238addcc52c94c59b10c3c1e9850d3a7921f825 Mon Sep 17 00:00:00 2001 From: Sascha Hauer Date: Wed, 19 Aug 2009 01:40:28 +0200 Subject: mfd: Add Freescale MC13783 driver This driver provides the core Freescale MC13783 support. It registers the client platform_devices and provides access to the A/D converter. Signed-off-by: Sascha Hauer Signed-off-by: Samuel Ortiz --- drivers/mfd/Kconfig | 10 + drivers/mfd/Makefile | 2 + drivers/mfd/mc13783-core.c | 427 ++++++++++++++++++++++++++++++++++++ include/linux/mfd/mc13783-private.h | 396 +++++++++++++++++++++++++++++++++ include/linux/mfd/mc13783.h | 84 +++++++ 5 files changed, 919 insertions(+) create mode 100644 drivers/mfd/mc13783-core.c create mode 100644 include/linux/mfd/mc13783-private.h create mode 100644 include/linux/mfd/mc13783.h (limited to 'include') diff --git a/drivers/mfd/Kconfig b/drivers/mfd/Kconfig index 0273456af77..a4f3dff30ba 100644 --- a/drivers/mfd/Kconfig +++ b/drivers/mfd/Kconfig @@ -238,6 +238,16 @@ config MFD_PCF50633 facilities, and registers devices for the various functions so that function-specific drivers can bind to them. +config MFD_MC13783 + tristate "Support Freescale MC13783" + depends on SPI_MASTER + select MFD_CORE + help + Support for the Freescale (Atlas) MC13783 PMIC and audio CODEC. + This driver provides common support for accessing the device, + additional drivers must be enabled in order to use the + functionality of the device. + config PCF50633_ADC tristate "Support for NXP PCF50633 ADC" depends on MFD_PCF50633 diff --git a/drivers/mfd/Makefile b/drivers/mfd/Makefile index 285cb320e6a..7fec04fb5f4 100644 --- a/drivers/mfd/Makefile +++ b/drivers/mfd/Makefile @@ -26,6 +26,8 @@ obj-$(CONFIG_MENELAUS) += menelaus.o obj-$(CONFIG_TWL4030_CORE) += twl4030-core.o twl4030-irq.o +obj-$(CONFIG_MFD_MC13783) += mc13783-core.o + obj-$(CONFIG_MFD_CORE) += mfd-core.o obj-$(CONFIG_EZX_PCAP) += ezx-pcap.o diff --git a/drivers/mfd/mc13783-core.c b/drivers/mfd/mc13783-core.c new file mode 100644 index 00000000000..e354d2912ef --- /dev/null +++ b/drivers/mfd/mc13783-core.c @@ -0,0 +1,427 @@ +/* + * Copyright 2009 Pengutronix, Sascha Hauer + * + * This code is in parts based on wm8350-core.c and pcf50633-core.c + * + * Initial development of this code was funded by + * Phytec Messtechnik GmbH, http://www.phytec.de + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define MC13783_MAX_REG_NUM 0x3f +#define MC13783_FRAME_MASK 0x00ffffff +#define MC13783_MAX_REG_NUM 0x3f +#define MC13783_REG_NUM_SHIFT 0x19 +#define MC13783_WRITE_BIT_SHIFT 31 + +static inline int spi_rw(struct spi_device *spi, u8 * buf, size_t len) +{ + struct spi_transfer t = { + .tx_buf = (const void *)buf, + .rx_buf = buf, + .len = len, + .cs_change = 0, + .delay_usecs = 0, + }; + struct spi_message m; + + spi_message_init(&m); + spi_message_add_tail(&t, &m); + if (spi_sync(spi, &m) != 0 || m.status != 0) + return -EINVAL; + return len - m.actual_length; +} + +static int mc13783_read(struct mc13783 *mc13783, int reg_num, u32 *reg_val) +{ + unsigned int frame = 0; + int ret = 0; + + if (reg_num > MC13783_MAX_REG_NUM) + return -EINVAL; + + frame |= reg_num << MC13783_REG_NUM_SHIFT; + + ret = spi_rw(mc13783->spi_device, (u8 *)&frame, 4); + + *reg_val = frame & MC13783_FRAME_MASK; + + return ret; +} + +static int mc13783_write(struct mc13783 *mc13783, int reg_num, u32 reg_val) +{ + unsigned int frame = 0; + + if (reg_num > MC13783_MAX_REG_NUM) + return -EINVAL; + + frame |= (1 << MC13783_WRITE_BIT_SHIFT); + frame |= reg_num << MC13783_REG_NUM_SHIFT; + frame |= reg_val & MC13783_FRAME_MASK; + + return spi_rw(mc13783->spi_device, (u8 *)&frame, 4); +} + +int mc13783_reg_read(struct mc13783 *mc13783, int reg_num, u32 *reg_val) +{ + int ret; + + mutex_lock(&mc13783->io_lock); + ret = mc13783_read(mc13783, reg_num, reg_val); + mutex_unlock(&mc13783->io_lock); + + return ret; +} +EXPORT_SYMBOL_GPL(mc13783_reg_read); + +int mc13783_reg_write(struct mc13783 *mc13783, int reg_num, u32 reg_val) +{ + int ret; + + mutex_lock(&mc13783->io_lock); + ret = mc13783_write(mc13783, reg_num, reg_val); + mutex_unlock(&mc13783->io_lock); + + return ret; +} +EXPORT_SYMBOL_GPL(mc13783_reg_write); + +/** + * mc13783_set_bits - Bitmask write + * + * @mc13783: Pointer to mc13783 control structure + * @reg: Register to access + * @mask: Mask of bits to change + * @val: Value to set for masked bits + */ +int mc13783_set_bits(struct mc13783 *mc13783, int reg, u32 mask, u32 val) +{ + u32 tmp; + int ret; + + mutex_lock(&mc13783->io_lock); + + ret = mc13783_read(mc13783, reg, &tmp); + tmp = (tmp & ~mask) | val; + if (ret == 0) + ret = mc13783_write(mc13783, reg, tmp); + + mutex_unlock(&mc13783->io_lock); + + return ret; +} +EXPORT_SYMBOL_GPL(mc13783_set_bits); + +int mc13783_register_irq(struct mc13783 *mc13783, int irq, + void (*handler) (int, void *), void *data) +{ + if (irq < 0 || irq > MC13783_NUM_IRQ || !handler) + return -EINVAL; + + if (WARN_ON(mc13783->irq_handler[irq].handler)) + return -EBUSY; + + mutex_lock(&mc13783->io_lock); + mc13783->irq_handler[irq].handler = handler; + mc13783->irq_handler[irq].data = data; + mutex_unlock(&mc13783->io_lock); + + return 0; +} +EXPORT_SYMBOL_GPL(mc13783_register_irq); + +int mc13783_free_irq(struct mc13783 *mc13783, int irq) +{ + if (irq < 0 || irq > MC13783_NUM_IRQ) + return -EINVAL; + + mutex_lock(&mc13783->io_lock); + mc13783->irq_handler[irq].handler = NULL; + mutex_unlock(&mc13783->io_lock); + + return 0; +} +EXPORT_SYMBOL_GPL(mc13783_free_irq); + +static void mc13783_irq_work(struct work_struct *work) +{ + struct mc13783 *mc13783 = container_of(work, struct mc13783, work); + int i; + unsigned int adc_sts; + + /* check if the adc has finished any completion */ + mc13783_reg_read(mc13783, MC13783_REG_INTERRUPT_STATUS_0, &adc_sts); + mc13783_reg_write(mc13783, MC13783_REG_INTERRUPT_STATUS_0, + adc_sts & MC13783_INT_STAT_ADCDONEI); + + if (adc_sts & MC13783_INT_STAT_ADCDONEI) + complete_all(&mc13783->adc_done); + + for (i = 0; i < MC13783_NUM_IRQ; i++) + if (mc13783->irq_handler[i].handler) + mc13783->irq_handler[i].handler(i, + mc13783->irq_handler[i].data); + enable_irq(mc13783->irq); +} + +static irqreturn_t mc13783_interrupt(int irq, void *dev_id) +{ + struct mc13783 *mc13783 = dev_id; + + disable_irq_nosync(irq); + + schedule_work(&mc13783->work); + return IRQ_HANDLED; +} + +/* set adc to ts interrupt mode, which generates touchscreen wakeup interrupt */ +static inline void mc13783_adc_set_ts_irq_mode(struct mc13783 *mc13783) +{ + unsigned int reg_adc0, reg_adc1; + + reg_adc0 = MC13783_ADC0_ADREFEN | MC13783_ADC0_ADREFMODE + | MC13783_ADC0_TSMOD0; + reg_adc1 = MC13783_ADC1_ADEN | MC13783_ADC1_ADTRIGIGN; + + mc13783_reg_write(mc13783, MC13783_REG_ADC_0, reg_adc0); + mc13783_reg_write(mc13783, MC13783_REG_ADC_1, reg_adc1); +} + +int mc13783_adc_do_conversion(struct mc13783 *mc13783, unsigned int mode, + unsigned int channel, unsigned int *sample) +{ + unsigned int reg_adc0, reg_adc1; + int i; + + mutex_lock(&mc13783->adc_conv_lock); + + /* set up auto incrementing anyway to make quick read */ + reg_adc0 = MC13783_ADC0_ADINC1 | MC13783_ADC0_ADINC2; + /* enable the adc, ignore external triggering and set ASC to trigger + * conversion */ + reg_adc1 = MC13783_ADC1_ADEN | MC13783_ADC1_ADTRIGIGN + | MC13783_ADC1_ASC; + + /* setup channel number */ + if (channel > 7) + reg_adc1 |= MC13783_ADC1_ADSEL; + + switch (mode) { + case MC13783_ADC_MODE_TS: + /* enables touch screen reference mode and set touchscreen mode + * to position mode */ + reg_adc0 |= MC13783_ADC0_ADREFEN | MC13783_ADC0_ADREFMODE + | MC13783_ADC0_TSMOD0 | MC13783_ADC0_TSMOD1; + reg_adc1 |= 4 << MC13783_ADC1_CHAN1_SHIFT; + break; + case MC13783_ADC_MODE_SINGLE_CHAN: + reg_adc1 |= (channel & 0x7) << MC13783_ADC1_CHAN0_SHIFT; + reg_adc1 |= MC13783_ADC1_RAND; + break; + case MC13783_ADC_MODE_MULT_CHAN: + reg_adc1 |= 4 << MC13783_ADC1_CHAN1_SHIFT; + break; + default: + return -EINVAL; + } + + mc13783_reg_write(mc13783, MC13783_REG_ADC_0, reg_adc0); + mc13783_reg_write(mc13783, MC13783_REG_ADC_1, reg_adc1); + + wait_for_completion_interruptible(&mc13783->adc_done); + + for (i = 0; i < 4; i++) + mc13783_reg_read(mc13783, MC13783_REG_ADC_2, &sample[i]); + + if (mc13783->ts_active) + mc13783_adc_set_ts_irq_mode(mc13783); + + mutex_unlock(&mc13783->adc_conv_lock); + + return 0; +} +EXPORT_SYMBOL_GPL(mc13783_adc_do_conversion); + +void mc13783_adc_set_ts_status(struct mc13783 *mc13783, unsigned int status) +{ + mc13783->ts_active = status; +} +EXPORT_SYMBOL_GPL(mc13783_adc_set_ts_status); + +static int mc13783_check_revision(struct mc13783 *mc13783) +{ + u32 rev_id, rev1, rev2, finid, icid; + + mc13783_read(mc13783, MC13783_REG_REVISION, &rev_id); + + rev1 = (rev_id & 0x018) >> 3; + rev2 = (rev_id & 0x007); + icid = (rev_id & 0x01C0) >> 6; + finid = (rev_id & 0x01E00) >> 9; + + /* Ver 0.2 is actually 3.2a. Report as 3.2 */ + if ((rev1 == 0) && (rev2 == 2)) + rev1 = 3; + + if (rev1 == 0 || icid != 2) { + dev_err(mc13783->dev, "No MC13783 detected.\n"); + return -ENODEV; + } + + mc13783->revision = ((rev1 * 10) + rev2); + dev_info(mc13783->dev, "MC13783 Rev %d.%d FinVer %x detected\n", rev1, + rev2, finid); + + return 0; +} + +/* + * Register a client device. This is non-fatal since there is no need to + * fail the entire device init due to a single platform device failing. + */ +static void mc13783_client_dev_register(struct mc13783 *mc13783, + const char *name) +{ + struct mfd_cell cell = {}; + + cell.name = name; + + mfd_add_devices(mc13783->dev, -1, &cell, 1, NULL, 0); +} + +static int __devinit mc13783_probe(struct spi_device *spi) +{ + struct mc13783 *mc13783; + struct mc13783_platform_data *pdata = spi->dev.platform_data; + int ret; + + mc13783 = kzalloc(sizeof(struct mc13783), GFP_KERNEL); + if (!mc13783) + return -ENOMEM; + + dev_set_drvdata(&spi->dev, mc13783); + spi->mode = SPI_MODE_0 | SPI_CS_HIGH; + spi->bits_per_word = 32; + spi_setup(spi); + + mc13783->spi_device = spi; + mc13783->dev = &spi->dev; + mc13783->irq = spi->irq; + + INIT_WORK(&mc13783->work, mc13783_irq_work); + mutex_init(&mc13783->io_lock); + mutex_init(&mc13783->adc_conv_lock); + init_completion(&mc13783->adc_done); + + if (pdata) { + mc13783->flags = pdata->flags; + mc13783->regulators = pdata->regulators; + mc13783->num_regulators = pdata->num_regulators; + } + + if (mc13783_check_revision(mc13783)) { + ret = -ENODEV; + goto err_out; + } + + /* clear and mask all interrupts */ + mc13783_reg_write(mc13783, MC13783_REG_INTERRUPT_STATUS_0, 0x00ffffff); + mc13783_reg_write(mc13783, MC13783_REG_INTERRUPT_MASK_0, 0x00ffffff); + mc13783_reg_write(mc13783, MC13783_REG_INTERRUPT_STATUS_1, 0x00ffffff); + mc13783_reg_write(mc13783, MC13783_REG_INTERRUPT_MASK_1, 0x00ffffff); + + /* unmask adcdone interrupts */ + mc13783_set_bits(mc13783, MC13783_REG_INTERRUPT_MASK_0, + MC13783_INT_MASK_ADCDONEM, 0); + + ret = request_irq(mc13783->irq, mc13783_interrupt, + IRQF_DISABLED | IRQF_TRIGGER_HIGH, "mc13783", + mc13783); + if (ret) + goto err_out; + + if (mc13783->flags & MC13783_USE_CODEC) + mc13783_client_dev_register(mc13783, "mc13783-codec"); + if (mc13783->flags & MC13783_USE_ADC) + mc13783_client_dev_register(mc13783, "mc13783-adc"); + if (mc13783->flags & MC13783_USE_RTC) + mc13783_client_dev_register(mc13783, "mc13783-rtc"); + if (mc13783->flags & MC13783_USE_REGULATOR) + mc13783_client_dev_register(mc13783, "mc13783-regulator"); + if (mc13783->flags & MC13783_USE_TOUCHSCREEN) + mc13783_client_dev_register(mc13783, "mc13783-ts"); + + return 0; + +err_out: + kfree(mc13783); + return ret; +} + +static int __devexit mc13783_remove(struct spi_device *spi) +{ + struct mc13783 *mc13783; + + mc13783 = dev_get_drvdata(&spi->dev); + + free_irq(mc13783->irq, mc13783); + + mfd_remove_devices(&spi->dev); + + return 0; +} + +static struct spi_driver pmic_driver = { + .driver = { + .name = "mc13783", + .bus = &spi_bus_type, + .owner = THIS_MODULE, + }, + .probe = mc13783_probe, + .remove = __devexit_p(mc13783_remove), +}; + +static int __init pmic_init(void) +{ + return spi_register_driver(&pmic_driver); +} +subsys_initcall(pmic_init); + +static void __exit pmic_exit(void) +{ + spi_unregister_driver(&pmic_driver); +} +module_exit(pmic_exit); + +MODULE_DESCRIPTION("Core/Protocol driver for Freescale MC13783 PMIC"); +MODULE_AUTHOR("Sascha Hauer "); +MODULE_LICENSE("GPL"); + diff --git a/include/linux/mfd/mc13783-private.h b/include/linux/mfd/mc13783-private.h new file mode 100644 index 00000000000..47e698cb0f1 --- /dev/null +++ b/include/linux/mfd/mc13783-private.h @@ -0,0 +1,396 @@ +/* + * Copyright 2009 Pengutronix, Sascha Hauer + * + * Initial development of this code was funded by + * Phytec Messtechnik GmbH, http://www.phytec.de + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ + +#ifndef __LINUX_MFD_MC13783_PRIV_H +#define __LINUX_MFD_MC13783_PRIV_H + +#include +#include +#include +#include + +struct mc13783_irq { + void (*handler)(int, void *); + void *data; +}; + +#define MC13783_NUM_IRQ 2 +#define MC13783_IRQ_TS 0 +#define MC13783_IRQ_REGULATOR 1 + +#define MC13783_ADC_MODE_TS 1 +#define MC13783_ADC_MODE_SINGLE_CHAN 2 +#define MC13783_ADC_MODE_MULT_CHAN 3 + +struct mc13783 { + int revision; + struct device *dev; + struct spi_device *spi_device; + + int (*read_dev)(void *data, char reg, int count, u32 *dst); + int (*write_dev)(void *data, char reg, int count, const u32 *src); + + struct mutex io_lock; + void *io_data; + int irq; + unsigned int flags; + + struct mc13783_irq irq_handler[MC13783_NUM_IRQ]; + struct work_struct work; + struct completion adc_done; + unsigned int ts_active; + struct mutex adc_conv_lock; + + struct mc13783_regulator_init_data *regulators; + int num_regulators; +}; + +int mc13783_reg_read(struct mc13783 *, int reg_num, u32 *); +int mc13783_reg_write(struct mc13783 *, int, u32); +int mc13783_set_bits(struct mc13783 *, int, u32, u32); +int mc13783_free_irq(struct mc13783 *mc13783, int irq); +int mc13783_register_irq(struct mc13783 *mc13783, int irq, + void (*handler) (int, void *), void *data); + +#define MC13783_REG_INTERRUPT_STATUS_0 0 +#define MC13783_REG_INTERRUPT_MASK_0 1 +#define MC13783_REG_INTERRUPT_SENSE_0 2 +#define MC13783_REG_INTERRUPT_STATUS_1 3 +#define MC13783_REG_INTERRUPT_MASK_1 4 +#define MC13783_REG_INTERRUPT_SENSE_1 5 +#define MC13783_REG_POWER_UP_MODE_SENSE 6 +#define MC13783_REG_REVISION 7 +#define MC13783_REG_SEMAPHORE 8 +#define MC13783_REG_ARBITRATION_PERIPHERAL_AUDIO 9 +#define MC13783_REG_ARBITRATION_SWITCHERS 10 +#define MC13783_REG_ARBITRATION_REGULATORS_0 11 +#define MC13783_REG_ARBITRATION_REGULATORS_1 12 +#define MC13783_REG_POWER_CONTROL_0 13 +#define MC13783_REG_POWER_CONTROL_1 14 +#define MC13783_REG_POWER_CONTROL_2 15 +#define MC13783_REG_REGEN_ASSIGNMENT 16 +#define MC13783_REG_CONTROL_SPARE 17 +#define MC13783_REG_MEMORY_A 18 +#define MC13783_REG_MEMORY_B 19 +#define MC13783_REG_RTC_TIME 20 +#define MC13783_REG_RTC_ALARM 21 +#define MC13783_REG_RTC_DAY 22 +#define MC13783_REG_RTC_DAY_ALARM 23 +#define MC13783_REG_SWITCHERS_0 24 +#define MC13783_REG_SWITCHERS_1 25 +#define MC13783_REG_SWITCHERS_2 26 +#define MC13783_REG_SWITCHERS_3 27 +#define MC13783_REG_SWITCHERS_4 28 +#define MC13783_REG_SWITCHERS_5 29 +#define MC13783_REG_REGULATOR_SETTING_0 30 +#define MC13783_REG_REGULATOR_SETTING_1 31 +#define MC13783_REG_REGULATOR_MODE_0 32 +#define MC13783_REG_REGULATOR_MODE_1 33 +#define MC13783_REG_POWER_MISCELLANEOUS 34 +#define MC13783_REG_POWER_SPARE 35 +#define MC13783_REG_AUDIO_RX_0 36 +#define MC13783_REG_AUDIO_RX_1 37 +#define MC13783_REG_AUDIO_TX 38 +#define MC13783_REG_AUDIO_SSI_NETWORK 39 +#define MC13783_REG_AUDIO_CODEC 40 +#define MC13783_REG_AUDIO_STEREO_DAC 41 +#define MC13783_REG_AUDIO_SPARE 42 +#define MC13783_REG_ADC_0 43 +#define MC13783_REG_ADC_1 44 +#define MC13783_REG_ADC_2 45 +#define MC13783_REG_ADC_3 46 +#define MC13783_REG_ADC_4 47 +#define MC13783_REG_CHARGER 48 +#define MC13783_REG_USB 49 +#define MC13783_REG_CHARGE_USB_SPARE 50 +#define MC13783_REG_LED_CONTROL_0 51 +#define MC13783_REG_LED_CONTROL_1 52 +#define MC13783_REG_LED_CONTROL_2 53 +#define MC13783_REG_LED_CONTROL_3 54 +#define MC13783_REG_LED_CONTROL_4 55 +#define MC13783_REG_LED_CONTROL_5 56 +#define MC13783_REG_SPARE 57 +#define MC13783_REG_TRIM_0 58 +#define MC13783_REG_TRIM_1 59 +#define MC13783_REG_TEST_0 60 +#define MC13783_REG_TEST_1 61 +#define MC13783_REG_TEST_2 62 +#define MC13783_REG_TEST_3 63 +#define MC13783_REG_NB 64 + + +/* + * Interrupt Status + */ +#define MC13783_INT_STAT_ADCDONEI (1 << 0) +#define MC13783_INT_STAT_ADCBISDONEI (1 << 1) +#define MC13783_INT_STAT_TSI (1 << 2) +#define MC13783_INT_STAT_WHIGHI (1 << 3) +#define MC13783_INT_STAT_WLOWI (1 << 4) +#define MC13783_INT_STAT_CHGDETI (1 << 6) +#define MC13783_INT_STAT_CHGOVI (1 << 7) +#define MC13783_INT_STAT_CHGREVI (1 << 8) +#define MC13783_INT_STAT_CHGSHORTI (1 << 9) +#define MC13783_INT_STAT_CCCVI (1 << 10) +#define MC13783_INT_STAT_CHGCURRI (1 << 11) +#define MC13783_INT_STAT_BPONI (1 << 12) +#define MC13783_INT_STAT_LOBATLI (1 << 13) +#define MC13783_INT_STAT_LOBATHI (1 << 14) +#define MC13783_INT_STAT_UDPI (1 << 15) +#define MC13783_INT_STAT_USBI (1 << 16) +#define MC13783_INT_STAT_IDI (1 << 19) +#define MC13783_INT_STAT_Unused (1 << 20) +#define MC13783_INT_STAT_SE1I (1 << 21) +#define MC13783_INT_STAT_CKDETI (1 << 22) +#define MC13783_INT_STAT_UDMI (1 << 23) + +/* + * Interrupt Mask + */ +#define MC13783_INT_MASK_ADCDONEM (1 << 0) +#define MC13783_INT_MASK_ADCBISDONEM (1 << 1) +#define MC13783_INT_MASK_TSM (1 << 2) +#define MC13783_INT_MASK_WHIGHM (1 << 3) +#define MC13783_INT_MASK_WLOWM (1 << 4) +#define MC13783_INT_MASK_CHGDETM (1 << 6) +#define MC13783_INT_MASK_CHGOVM (1 << 7) +#define MC13783_INT_MASK_CHGREVM (1 << 8) +#define MC13783_INT_MASK_CHGSHORTM (1 << 9) +#define MC13783_INT_MASK_CCCVM (1 << 10) +#define MC13783_INT_MASK_CHGCURRM (1 << 11) +#define MC13783_INT_MASK_BPONM (1 << 12) +#define MC13783_INT_MASK_LOBATLM (1 << 13) +#define MC13783_INT_MASK_LOBATHM (1 << 14) +#define MC13783_INT_MASK_UDPM (1 << 15) +#define MC13783_INT_MASK_USBM (1 << 16) +#define MC13783_INT_MASK_IDM (1 << 19) +#define MC13783_INT_MASK_SE1M (1 << 21) +#define MC13783_INT_MASK_CKDETM (1 << 22) + +/* + * Reg Regulator Mode 0 + */ +#define MC13783_REGCTRL_VAUDIO_EN (1 << 0) +#define MC13783_REGCTRL_VAUDIO_STBY (1 << 1) +#define MC13783_REGCTRL_VAUDIO_MODE (1 << 2) +#define MC13783_REGCTRL_VIOHI_EN (1 << 3) +#define MC13783_REGCTRL_VIOHI_STBY (1 << 4) +#define MC13783_REGCTRL_VIOHI_MODE (1 << 5) +#define MC13783_REGCTRL_VIOLO_EN (1 << 6) +#define MC13783_REGCTRL_VIOLO_STBY (1 << 7) +#define MC13783_REGCTRL_VIOLO_MODE (1 << 8) +#define MC13783_REGCTRL_VDIG_EN (1 << 9) +#define MC13783_REGCTRL_VDIG_STBY (1 << 10) +#define MC13783_REGCTRL_VDIG_MODE (1 << 11) +#define MC13783_REGCTRL_VGEN_EN (1 << 12) +#define MC13783_REGCTRL_VGEN_STBY (1 << 13) +#define MC13783_REGCTRL_VGEN_MODE (1 << 14) +#define MC13783_REGCTRL_VRFDIG_EN (1 << 15) +#define MC13783_REGCTRL_VRFDIG_STBY (1 << 16) +#define MC13783_REGCTRL_VRFDIG_MODE (1 << 17) +#define MC13783_REGCTRL_VRFREF_EN (1 << 18) +#define MC13783_REGCTRL_VRFREF_STBY (1 << 19) +#define MC13783_REGCTRL_VRFREF_MODE (1 << 20) +#define MC13783_REGCTRL_VRFCP_EN (1 << 21) +#define MC13783_REGCTRL_VRFCP_STBY (1 << 22) +#define MC13783_REGCTRL_VRFCP_MODE (1 << 23) + +/* + * Reg Regulator Mode 1 + */ +#define MC13783_REGCTRL_VSIM_EN (1 << 0) +#define MC13783_REGCTRL_VSIM_STBY (1 << 1) +#define MC13783_REGCTRL_VSIM_MODE (1 << 2) +#define MC13783_REGCTRL_VESIM_EN (1 << 3) +#define MC13783_REGCTRL_VESIM_STBY (1 << 4) +#define MC13783_REGCTRL_VESIM_MODE (1 << 5) +#define MC13783_REGCTRL_VCAM_EN (1 << 6) +#define MC13783_REGCTRL_VCAM_STBY (1 << 7) +#define MC13783_REGCTRL_VCAM_MODE (1 << 8) +#define MC13783_REGCTRL_VRFBG_EN (1 << 9) +#define MC13783_REGCTRL_VRFBG_STBY (1 << 10) +#define MC13783_REGCTRL_VVIB_EN (1 << 11) +#define MC13783_REGCTRL_VRF1_EN (1 << 12) +#define MC13783_REGCTRL_VRF1_STBY (1 << 13) +#define MC13783_REGCTRL_VRF1_MODE (1 << 14) +#define MC13783_REGCTRL_VRF2_EN (1 << 15) +#define MC13783_REGCTRL_VRF2_STBY (1 << 16) +#define MC13783_REGCTRL_VRF2_MODE (1 << 17) +#define MC13783_REGCTRL_VMMC1_EN (1 << 18) +#define MC13783_REGCTRL_VMMC1_STBY (1 << 19) +#define MC13783_REGCTRL_VMMC1_MODE (1 << 20) +#define MC13783_REGCTRL_VMMC2_EN (1 << 21) +#define MC13783_REGCTRL_VMMC2_STBY (1 << 22) +#define MC13783_REGCTRL_VMMC2_MODE (1 << 23) + +/* + * Reg Regulator Misc. + */ +#define MC13783_REGCTRL_GPO1_EN (1 << 6) +#define MC13783_REGCTRL_GPO2_EN (1 << 8) +#define MC13783_REGCTRL_GPO3_EN (1 << 10) +#define MC13783_REGCTRL_GPO4_EN (1 << 12) +#define MC13783_REGCTRL_VIBPINCTRL (1 << 14) + +/* + * Reg Switcher 4 + */ +#define MC13783_SWCTRL_SW1A_MODE (1 << 0) +#define MC13783_SWCTRL_SW1A_STBY_MODE (1 << 2) +#define MC13783_SWCTRL_SW1A_DVS_SPEED (1 << 6) +#define MC13783_SWCTRL_SW1A_PANIC_MODE (1 << 8) +#define MC13783_SWCTRL_SW1A_SOFTSTART (1 << 9) +#define MC13783_SWCTRL_SW1B_MODE (1 << 10) +#define MC13783_SWCTRL_SW1B_STBY_MODE (1 << 12) +#define MC13783_SWCTRL_SW1B_DVS_SPEED (1 << 14) +#define MC13783_SWCTRL_SW1B_PANIC_MODE (1 << 16) +#define MC13783_SWCTRL_SW1B_SOFTSTART (1 << 17) +#define MC13783_SWCTRL_PLL_EN (1 << 18) +#define MC13783_SWCTRL_PLL_FACTOR (1 << 19) + +/* + * Reg Switcher 5 + */ +#define MC13783_SWCTRL_SW2A_MODE (1 << 0) +#define MC13783_SWCTRL_SW2A_STBY_MODE (1 << 2) +#define MC13783_SWCTRL_SW2A_DVS_SPEED (1 << 6) +#define MC13783_SWCTRL_SW2A_PANIC_MODE (1 << 8) +#define MC13783_SWCTRL_SW2A_SOFTSTART (1 << 9) +#define MC13783_SWCTRL_SW2B_MODE (1 << 10) +#define MC13783_SWCTRL_SW2B_STBY_MODE (1 << 12) +#define MC13783_SWCTRL_SW2B_DVS_SPEED (1 << 14) +#define MC13783_SWCTRL_SW2B_PANIC_MODE (1 << 16) +#define MC13783_SWCTRL_SW2B_SOFTSTART (1 << 17) +#define MC13783_SWSET_SW3 (1 << 18) +#define MC13783_SWCTRL_SW3_EN (1 << 20) +#define MC13783_SWCTRL_SW3_STBY (1 << 21) +#define MC13783_SWCTRL_SW3_MODE (1 << 22) + +/* + * ADC/Touch + */ +#define MC13783_ADC0_LICELLCON (1 << 0) +#define MC13783_ADC0_CHRGICON (1 << 1) +#define MC13783_ADC0_BATICON (1 << 2) +#define MC13783_ADC0_RTHEN (1 << 3) +#define MC13783_ADC0_DTHEN (1 << 4) +#define MC13783_ADC0_UIDEN (1 << 5) +#define MC13783_ADC0_ADOUTEN (1 << 6) +#define MC13783_ADC0_ADOUTPER (1 << 7) +#define MC13783_ADC0_ADREFEN (1 << 10) +#define MC13783_ADC0_ADREFMODE (1 << 11) +#define MC13783_ADC0_TSMOD0 (1 << 12) +#define MC13783_ADC0_TSMOD1 (1 << 13) +#define MC13783_ADC0_TSMOD2 (1 << 14) +#define MC13783_ADC0_CHRGRAWDIV (1 << 15) +#define MC13783_ADC0_ADINC1 (1 << 16) +#define MC13783_ADC0_ADINC2 (1 << 17) +#define MC13783_ADC0_WCOMP (1 << 18) +#define MC13783_ADC0_ADCBIS0 (1 << 23) + +#define MC13783_ADC1_ADEN (1 << 0) +#define MC13783_ADC1_RAND (1 << 1) +#define MC13783_ADC1_ADSEL (1 << 3) +#define MC13783_ADC1_TRIGMASK (1 << 4) +#define MC13783_ADC1_ADA10 (1 << 5) +#define MC13783_ADC1_ADA11 (1 << 6) +#define MC13783_ADC1_ADA12 (1 << 7) +#define MC13783_ADC1_ADA20 (1 << 8) +#define MC13783_ADC1_ADA21 (1 << 9) +#define MC13783_ADC1_ADA22 (1 << 10) +#define MC13783_ADC1_ATO0 (1 << 11) +#define MC13783_ADC1_ATO1 (1 << 12) +#define MC13783_ADC1_ATO2 (1 << 13) +#define MC13783_ADC1_ATO3 (1 << 14) +#define MC13783_ADC1_ATO4 (1 << 15) +#define MC13783_ADC1_ATO5 (1 << 16) +#define MC13783_ADC1_ATO6 (1 << 17) +#define MC13783_ADC1_ATO7 (1 << 18) +#define MC13783_ADC1_ATOX (1 << 19) +#define MC13783_ADC1_ASC (1 << 20) +#define MC13783_ADC1_ADTRIGIGN (1 << 21) +#define MC13783_ADC1_ADONESHOT (1 << 22) +#define MC13783_ADC1_ADCBIS1 (1 << 23) + +#define MC13783_ADC1_CHAN0_SHIFT 5 +#define MC13783_ADC1_CHAN1_SHIFT 8 + +#define MC13783_ADC2_ADD10 (1 << 2) +#define MC13783_ADC2_ADD11 (1 << 3) +#define MC13783_ADC2_ADD12 (1 << 4) +#define MC13783_ADC2_ADD13 (1 << 5) +#define MC13783_ADC2_ADD14 (1 << 6) +#define MC13783_ADC2_ADD15 (1 << 7) +#define MC13783_ADC2_ADD16 (1 << 8) +#define MC13783_ADC2_ADD17 (1 << 9) +#define MC13783_ADC2_ADD18 (1 << 10) +#define MC13783_ADC2_ADD19 (1 << 11) +#define MC13783_ADC2_ADD20 (1 << 14) +#define MC13783_ADC2_ADD21 (1 << 15) +#define MC13783_ADC2_ADD22 (1 << 16) +#define MC13783_ADC2_ADD23 (1 << 17) +#define MC13783_ADC2_ADD24 (1 << 18) +#define MC13783_ADC2_ADD25 (1 << 19) +#define MC13783_ADC2_ADD26 (1 << 20) +#define MC13783_ADC2_ADD27 (1 << 21) +#define MC13783_ADC2_ADD28 (1 << 22) +#define MC13783_ADC2_ADD29 (1 << 23) + +#define MC13783_ADC3_WHIGH0 (1 << 0) +#define MC13783_ADC3_WHIGH1 (1 << 1) +#define MC13783_ADC3_WHIGH2 (1 << 2) +#define MC13783_ADC3_WHIGH3 (1 << 3) +#define MC13783_ADC3_WHIGH4 (1 << 4) +#define MC13783_ADC3_WHIGH5 (1 << 5) +#define MC13783_ADC3_ICID0 (1 << 6) +#define MC13783_ADC3_ICID1 (1 << 7) +#define MC13783_ADC3_ICID2 (1 << 8) +#define MC13783_ADC3_WLOW0 (1 << 9) +#define MC13783_ADC3_WLOW1 (1 << 10) +#define MC13783_ADC3_WLOW2 (1 << 11) +#define MC13783_ADC3_WLOW3 (1 << 12) +#define MC13783_ADC3_WLOW4 (1 << 13) +#define MC13783_ADC3_WLOW5 (1 << 14) +#define MC13783_ADC3_ADCBIS2 (1 << 23) + +#define MC13783_ADC4_ADDBIS10 (1 << 2) +#define MC13783_ADC4_ADDBIS11 (1 << 3) +#define MC13783_ADC4_ADDBIS12 (1 << 4) +#define MC13783_ADC4_ADDBIS13 (1 << 5) +#define MC13783_ADC4_ADDBIS14 (1 << 6) +#define MC13783_ADC4_ADDBIS15 (1 << 7) +#define MC13783_ADC4_ADDBIS16 (1 << 8) +#define MC13783_ADC4_ADDBIS17 (1 << 9) +#define MC13783_ADC4_ADDBIS18 (1 << 10) +#define MC13783_ADC4_ADDBIS19 (1 << 11) +#define MC13783_ADC4_ADDBIS20 (1 << 14) +#define MC13783_ADC4_ADDBIS21 (1 << 15) +#define MC13783_ADC4_ADDBIS22 (1 << 16) +#define MC13783_ADC4_ADDBIS23 (1 << 17) +#define MC13783_ADC4_ADDBIS24 (1 << 18) +#define MC13783_ADC4_ADDBIS25 (1 << 19) +#define MC13783_ADC4_ADDBIS26 (1 << 20) +#define MC13783_ADC4_ADDBIS27 (1 << 21) +#define MC13783_ADC4_ADDBIS28 (1 << 22) +#define MC13783_ADC4_ADDBIS29 (1 << 23) + +#endif /* __LINUX_MFD_MC13783_PRIV_H */ + diff --git a/include/linux/mfd/mc13783.h b/include/linux/mfd/mc13783.h new file mode 100644 index 00000000000..b3a2a724357 --- /dev/null +++ b/include/linux/mfd/mc13783.h @@ -0,0 +1,84 @@ +/* + * Copyright 2009 Pengutronix, Sascha Hauer + * + * Initial development of this code was funded by + * Phytec Messtechnik GmbH, http://www.phytec.de + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ + +#ifndef __INCLUDE_LINUX_MFD_MC13783_H +#define __INCLUDE_LINUX_MFD_MC13783_H + +struct mc13783; +struct regulator_init_data; + +struct mc13783_regulator_init_data { + int id; + struct regulator_init_data *init_data; +}; + +struct mc13783_platform_data { + struct mc13783_regulator_init_data *regulators; + int num_regulators; + unsigned int flags; +}; + +/* mc13783_platform_data flags */ +#define MC13783_USE_TOUCHSCREEN (1 << 0) +#define MC13783_USE_CODEC (1 << 1) +#define MC13783_USE_ADC (1 << 2) +#define MC13783_USE_RTC (1 << 3) +#define MC13783_USE_REGULATOR (1 << 4) + +int mc13783_adc_do_conversion(struct mc13783 *mc13783, unsigned int mode, + unsigned int channel, unsigned int *sample); + +void mc13783_adc_set_ts_status(struct mc13783 *mc13783, unsigned int status); + +#define MC13783_SW_SW1A 0 +#define MC13783_SW_SW1B 1 +#define MC13783_SW_SW2A 2 +#define MC13783_SW_SW2B 3 +#define MC13783_SW_SW3 4 +#define MC13783_SW_PLL 5 +#define MC13783_REGU_VAUDIO 6 +#define MC13783_REGU_VIOHI 7 +#define MC13783_REGU_VIOLO 8 +#define MC13783_REGU_VDIG 9 +#define MC13783_REGU_VGEN 10 +#define MC13783_REGU_VRFDIG 11 +#define MC13783_REGU_VRFREF 12 +#define MC13783_REGU_VRFCP 13 +#define MC13783_REGU_VSIM 14 +#define MC13783_REGU_VESIM 15 +#define MC13783_REGU_VCAM 16 +#define MC13783_REGU_VRFBG 17 +#define MC13783_REGU_VVIB 18 +#define MC13783_REGU_VRF1 19 +#define MC13783_REGU_VRF2 20 +#define MC13783_REGU_VMMC1 21 +#define MC13783_REGU_VMMC2 22 +#define MC13783_REGU_GPO1 23 +#define MC13783_REGU_GPO2 24 +#define MC13783_REGU_GPO3 25 +#define MC13783_REGU_GPO4 26 +#define MC13783_REGU_V1 27 +#define MC13783_REGU_V2 28 +#define MC13783_REGU_V3 29 +#define MC13783_REGU_V4 30 + +#endif /* __INCLUDE_LINUX_MFD_MC13783_H */ + -- cgit v1.2.3-70-g09d2 From ebf0bd366ed8161e6fbc919705d878ccbfd51624 Mon Sep 17 00:00:00 2001 From: Amit Kucheria Date: Mon, 31 Aug 2009 18:32:18 +0200 Subject: mfd: Add support for TWL4030/5030 dynamic power switching The TWL4030/5030 family of multifunction devices allows board-specific control of the the various regulators, clock and reset lines through 'scripts' that are loaded into its memory. This allows for Dynamic Power Switching (DPS). Implement board-independent core support for DPS that is then used by board-specific code to load custom DPS scripts. Signed-off-by: Amit Kucheria Signed-off-by: Samuel Ortiz --- drivers/mfd/Kconfig | 13 ++ drivers/mfd/Makefile | 1 + drivers/mfd/twl4030-core.c | 10 + drivers/mfd/twl4030-power.c | 466 ++++++++++++++++++++++++++++++++++++++++++++ include/linux/i2c/twl4030.h | 94 ++++++++- 5 files changed, 574 insertions(+), 10 deletions(-) create mode 100644 drivers/mfd/twl4030-power.c (limited to 'include') diff --git a/drivers/mfd/Kconfig b/drivers/mfd/Kconfig index 4fd4f913c36..570be139f9d 100644 --- a/drivers/mfd/Kconfig +++ b/drivers/mfd/Kconfig @@ -108,6 +108,19 @@ config TWL4030_CORE high speed USB OTG transceiver, an audio codec (on most versions) and many other features. +config TWL4030_POWER + bool "Support power resources on TWL4030 family chips" + depends on TWL4030_CORE && ARM + help + Say yes here if you want to use the power resources on the + TWL4030 family chips. Most of these resources are regulators, + which have a separate driver; some are control signals, such + as clock request handshaking. + + This driver uses board-specific data to initialize the resources + and load scripts controling which resources are switched off/on + or reset when a sleep, wakeup or warm reset event occurs. + config MFD_TMIO bool default n diff --git a/drivers/mfd/Makefile b/drivers/mfd/Makefile index 6a5d8cb545f..f3b277b90d4 100644 --- a/drivers/mfd/Makefile +++ b/drivers/mfd/Makefile @@ -25,6 +25,7 @@ obj-$(CONFIG_TPS65010) += tps65010.o obj-$(CONFIG_MENELAUS) += menelaus.o obj-$(CONFIG_TWL4030_CORE) += twl4030-core.o twl4030-irq.o +obj-$(CONFIG_TWL4030_POWER) += twl4030-power.o obj-$(CONFIG_MFD_MC13783) += mc13783-core.o diff --git a/drivers/mfd/twl4030-core.c b/drivers/mfd/twl4030-core.c index 1fd2620819d..e424cf6d8e9 100644 --- a/drivers/mfd/twl4030-core.c +++ b/drivers/mfd/twl4030-core.c @@ -89,6 +89,12 @@ #define twl_has_madc() false #endif +#ifdef CONFIG_TWL4030_POWER +#define twl_has_power() true +#else +#define twl_has_power() false +#endif + #if defined(CONFIG_RTC_DRV_TWL4030) || defined(CONFIG_RTC_DRV_TWL4030_MODULE) #define twl_has_rtc() true #else @@ -801,6 +807,10 @@ twl4030_probe(struct i2c_client *client, const struct i2c_device_id *id) /* setup clock framework */ clocks_init(&client->dev); + /* load power event scripts */ + if (twl_has_power() && pdata->power) + twl4030_power_init(pdata->power); + /* Maybe init the T2 Interrupt subsystem */ if (client->irq && pdata->irq_base diff --git a/drivers/mfd/twl4030-power.c b/drivers/mfd/twl4030-power.c new file mode 100644 index 00000000000..e7688b04126 --- /dev/null +++ b/drivers/mfd/twl4030-power.c @@ -0,0 +1,466 @@ +/* + * linux/drivers/i2c/chips/twl4030-power.c + * + * Handle TWL4030 Power initialization + * + * Copyright (C) 2008 Nokia Corporation + * Copyright (C) 2006 Texas Instruments, Inc + * + * Written by Kalle Jokiniemi + * Peter De Schrijver + * Several fixes by Amit Kucheria + * + * This file is subject to the terms and conditions of the GNU General + * Public License. See the file "COPYING" in the main directory of this + * archive for more details. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +#include +#include +#include +#include + +#include + +static u8 twl4030_start_script_address = 0x2b; + +#define PWR_P1_SW_EVENTS 0x10 +#define PWR_DEVOFF (1<<0) + +#define PHY_TO_OFF_PM_MASTER(p) (p - 0x36) +#define PHY_TO_OFF_PM_RECEIVER(p) (p - 0x5b) + +/* resource - hfclk */ +#define R_HFCLKOUT_DEV_GRP PHY_TO_OFF_PM_RECEIVER(0xe6) + +/* PM events */ +#define R_P1_SW_EVENTS PHY_TO_OFF_PM_MASTER(0x46) +#define R_P2_SW_EVENTS PHY_TO_OFF_PM_MASTER(0x47) +#define R_P3_SW_EVENTS PHY_TO_OFF_PM_MASTER(0x48) +#define R_CFG_P1_TRANSITION PHY_TO_OFF_PM_MASTER(0x36) +#define R_CFG_P2_TRANSITION PHY_TO_OFF_PM_MASTER(0x37) +#define R_CFG_P3_TRANSITION PHY_TO_OFF_PM_MASTER(0x38) + +#define LVL_WAKEUP 0x08 + +#define ENABLE_WARMRESET (1<<4) + +#define END_OF_SCRIPT 0x3f + +#define R_SEQ_ADD_A2S PHY_TO_OFF_PM_MASTER(0x55) +#define R_SEQ_ADD_S2A12 PHY_TO_OFF_PM_MASTER(0x56) +#define R_SEQ_ADD_S2A3 PHY_TO_OFF_PM_MASTER(0x57) +#define R_SEQ_ADD_WARM PHY_TO_OFF_PM_MASTER(0x58) +#define R_MEMORY_ADDRESS PHY_TO_OFF_PM_MASTER(0x59) +#define R_MEMORY_DATA PHY_TO_OFF_PM_MASTER(0x5a) + +#define R_PROTECT_KEY 0x0E +#define KEY_1 0xC0 +#define KEY_2 0x0C + +/* resource configuration registers */ + +#define DEVGROUP_OFFSET 0 +#define TYPE_OFFSET 1 + +/* Bit positions */ +#define DEVGROUP_SHIFT 5 +#define DEVGROUP_MASK (7 << DEVGROUP_SHIFT) +#define TYPE_SHIFT 0 +#define TYPE_MASK (7 << TYPE_SHIFT) +#define TYPE2_SHIFT 3 +#define TYPE2_MASK (3 << TYPE2_SHIFT) + +static u8 res_config_addrs[] = { + [RES_VAUX1] = 0x17, + [RES_VAUX2] = 0x1b, + [RES_VAUX3] = 0x1f, + [RES_VAUX4] = 0x23, + [RES_VMMC1] = 0x27, + [RES_VMMC2] = 0x2b, + [RES_VPLL1] = 0x2f, + [RES_VPLL2] = 0x33, + [RES_VSIM] = 0x37, + [RES_VDAC] = 0x3b, + [RES_VINTANA1] = 0x3f, + [RES_VINTANA2] = 0x43, + [RES_VINTDIG] = 0x47, + [RES_VIO] = 0x4b, + [RES_VDD1] = 0x55, + [RES_VDD2] = 0x63, + [RES_VUSB_1V5] = 0x71, + [RES_VUSB_1V8] = 0x74, + [RES_VUSB_3V1] = 0x77, + [RES_VUSBCP] = 0x7a, + [RES_REGEN] = 0x7f, + [RES_NRES_PWRON] = 0x82, + [RES_CLKEN] = 0x85, + [RES_SYSEN] = 0x88, + [RES_HFCLKOUT] = 0x8b, + [RES_32KCLKOUT] = 0x8e, + [RES_RESET] = 0x91, + [RES_Main_Ref] = 0x94, +}; + +static int __init twl4030_write_script_byte(u8 address, u8 byte) +{ + int err; + + err = twl4030_i2c_write_u8(TWL4030_MODULE_PM_MASTER, address, + R_MEMORY_ADDRESS); + if (err) + goto out; + err = twl4030_i2c_write_u8(TWL4030_MODULE_PM_MASTER, byte, + R_MEMORY_DATA); +out: + return err; +} + +static int __init twl4030_write_script_ins(u8 address, u16 pmb_message, + u8 delay, u8 next) +{ + int err; + + address *= 4; + err = twl4030_write_script_byte(address++, pmb_message >> 8); + if (err) + goto out; + err = twl4030_write_script_byte(address++, pmb_message & 0xff); + if (err) + goto out; + err = twl4030_write_script_byte(address++, delay); + if (err) + goto out; + err = twl4030_write_script_byte(address++, next); +out: + return err; +} + +static int __init twl4030_write_script(u8 address, struct twl4030_ins *script, + int len) +{ + int err; + + for (; len; len--, address++, script++) { + if (len == 1) { + err = twl4030_write_script_ins(address, + script->pmb_message, + script->delay, + END_OF_SCRIPT); + if (err) + break; + } else { + err = twl4030_write_script_ins(address, + script->pmb_message, + script->delay, + address + 1); + if (err) + break; + } + } + return err; +} + +static int __init twl4030_config_wakeup3_sequence(u8 address) +{ + int err; + u8 data; + + /* Set SLEEP to ACTIVE SEQ address for P3 */ + err = twl4030_i2c_write_u8(TWL4030_MODULE_PM_MASTER, address, + R_SEQ_ADD_S2A3); + if (err) + goto out; + + /* P3 LVL_WAKEUP should be on LEVEL */ + err = twl4030_i2c_read_u8(TWL4030_MODULE_PM_MASTER, &data, + R_P3_SW_EVENTS); + if (err) + goto out; + data |= LVL_WAKEUP; + err = twl4030_i2c_write_u8(TWL4030_MODULE_PM_MASTER, data, + R_P3_SW_EVENTS); +out: + if (err) + pr_err("TWL4030 wakeup sequence for P3 config error\n"); + return err; +} + +static int __init twl4030_config_wakeup12_sequence(u8 address) +{ + int err = 0; + u8 data; + + /* Set SLEEP to ACTIVE SEQ address for P1 and P2 */ + err = twl4030_i2c_write_u8(TWL4030_MODULE_PM_MASTER, address, + R_SEQ_ADD_S2A12); + if (err) + goto out; + + /* P1/P2 LVL_WAKEUP should be on LEVEL */ + err = twl4030_i2c_read_u8(TWL4030_MODULE_PM_MASTER, &data, + R_P1_SW_EVENTS); + if (err) + goto out; + + data |= LVL_WAKEUP; + err = twl4030_i2c_write_u8(TWL4030_MODULE_PM_MASTER, data, + R_P1_SW_EVENTS); + if (err) + goto out; + + err = twl4030_i2c_read_u8(TWL4030_MODULE_PM_MASTER, &data, + R_P2_SW_EVENTS); + if (err) + goto out; + + data |= LVL_WAKEUP; + err = twl4030_i2c_write_u8(TWL4030_MODULE_PM_MASTER, data, + R_P2_SW_EVENTS); + if (err) + goto out; + + if (machine_is_omap_3430sdp() || machine_is_omap_ldp()) { + /* Disabling AC charger effect on sleep-active transitions */ + err = twl4030_i2c_read_u8(TWL4030_MODULE_PM_MASTER, &data, + R_CFG_P1_TRANSITION); + if (err) + goto out; + data &= ~(1<<1); + err = twl4030_i2c_write_u8(TWL4030_MODULE_PM_MASTER, data , + R_CFG_P1_TRANSITION); + if (err) + goto out; + } + +out: + if (err) + pr_err("TWL4030 wakeup sequence for P1 and P2" \ + "config error\n"); + return err; +} + +static int __init twl4030_config_sleep_sequence(u8 address) +{ + int err; + + /* Set ACTIVE to SLEEP SEQ address in T2 memory*/ + err = twl4030_i2c_write_u8(TWL4030_MODULE_PM_MASTER, address, + R_SEQ_ADD_A2S); + + if (err) + pr_err("TWL4030 sleep sequence config error\n"); + + return err; +} + +static int __init twl4030_config_warmreset_sequence(u8 address) +{ + int err; + u8 rd_data; + + /* Set WARM RESET SEQ address for P1 */ + err = twl4030_i2c_write_u8(TWL4030_MODULE_PM_MASTER, address, + R_SEQ_ADD_WARM); + if (err) + goto out; + + /* P1/P2/P3 enable WARMRESET */ + err = twl4030_i2c_read_u8(TWL4030_MODULE_PM_MASTER, &rd_data, + R_P1_SW_EVENTS); + if (err) + goto out; + + rd_data |= ENABLE_WARMRESET; + err = twl4030_i2c_write_u8(TWL4030_MODULE_PM_MASTER, rd_data, + R_P1_SW_EVENTS); + if (err) + goto out; + + err = twl4030_i2c_read_u8(TWL4030_MODULE_PM_MASTER, &rd_data, + R_P2_SW_EVENTS); + if (err) + goto out; + + rd_data |= ENABLE_WARMRESET; + err = twl4030_i2c_write_u8(TWL4030_MODULE_PM_MASTER, rd_data, + R_P2_SW_EVENTS); + if (err) + goto out; + + err = twl4030_i2c_read_u8(TWL4030_MODULE_PM_MASTER, &rd_data, + R_P3_SW_EVENTS); + if (err) + goto out; + + rd_data |= ENABLE_WARMRESET; + err = twl4030_i2c_write_u8(TWL4030_MODULE_PM_MASTER, rd_data, + R_P3_SW_EVENTS); +out: + if (err) + pr_err("TWL4030 warmreset seq config error\n"); + return err; +} + +static int __init twl4030_configure_resource(struct twl4030_resconfig *rconfig) +{ + int rconfig_addr; + int err; + u8 type; + u8 grp; + + if (rconfig->resource > TOTAL_RESOURCES) { + pr_err("TWL4030 Resource %d does not exist\n", + rconfig->resource); + return -EINVAL; + } + + rconfig_addr = res_config_addrs[rconfig->resource]; + + /* Set resource group */ + err = twl4030_i2c_read_u8(TWL4030_MODULE_PM_RECEIVER, &grp, + rconfig_addr + DEVGROUP_OFFSET); + if (err) { + pr_err("TWL4030 Resource %d group could not be read\n", + rconfig->resource); + return err; + } + + if (rconfig->devgroup >= 0) { + grp &= ~DEVGROUP_MASK; + grp |= rconfig->devgroup << DEVGROUP_SHIFT; + err = twl4030_i2c_write_u8(TWL4030_MODULE_PM_RECEIVER, + grp, rconfig_addr + DEVGROUP_OFFSET); + if (err < 0) { + pr_err("TWL4030 failed to program devgroup\n"); + return err; + } + } + + /* Set resource types */ + err = twl4030_i2c_read_u8(TWL4030_MODULE_PM_RECEIVER, &type, + rconfig_addr + TYPE_OFFSET); + if (err < 0) { + pr_err("TWL4030 Resource %d type could not be read\n", + rconfig->resource); + return err; + } + + if (rconfig->type >= 0) { + type &= ~TYPE_MASK; + type |= rconfig->type << TYPE_SHIFT; + } + + if (rconfig->type2 >= 0) { + type &= ~TYPE2_MASK; + type |= rconfig->type2 << TYPE2_SHIFT; + } + + err = twl4030_i2c_write_u8(TWL4030_MODULE_PM_RECEIVER, + type, rconfig_addr + TYPE_OFFSET); + if (err < 0) { + pr_err("TWL4030 failed to program resource type\n"); + return err; + } + + return 0; +} + +static int __init load_twl4030_script(struct twl4030_script *tscript, + u8 address) +{ + int err; + + /* Make sure the script isn't going beyond last valid address (0x3f) */ + if ((address + tscript->size) > END_OF_SCRIPT) { + pr_err("TWL4030 scripts too big error\n"); + return -EINVAL; + } + + err = twl4030_write_script(address, tscript->script, tscript->size); + if (err) + goto out; + + if (tscript->flags & TWL4030_WRST_SCRIPT) { + err = twl4030_config_warmreset_sequence(address); + if (err) + goto out; + } + if (tscript->flags & TWL4030_WAKEUP12_SCRIPT) { + err = twl4030_config_wakeup12_sequence(address); + if (err) + goto out; + } + if (tscript->flags & TWL4030_WAKEUP3_SCRIPT) { + err = twl4030_config_wakeup3_sequence(address); + if (err) + goto out; + } + if (tscript->flags & TWL4030_SLEEP_SCRIPT) + err = twl4030_config_sleep_sequence(address); +out: + return err; +} + +void __init twl4030_power_init(struct twl4030_power_data *twl4030_scripts) +{ + int err = 0; + int i; + struct twl4030_resconfig *resconfig; + u8 address = twl4030_start_script_address; + + err = twl4030_i2c_write_u8(TWL4030_MODULE_PM_MASTER, KEY_1, + R_PROTECT_KEY); + if (err) + goto unlock; + + err = twl4030_i2c_write_u8(TWL4030_MODULE_PM_MASTER, KEY_2, + R_PROTECT_KEY); + if (err) + goto unlock; + + for (i = 0; i < twl4030_scripts->num; i++) { + err = load_twl4030_script(twl4030_scripts->scripts[i], address); + if (err) + goto load; + address += twl4030_scripts->scripts[i]->size; + } + + resconfig = twl4030_scripts->resource_config; + if (resconfig) { + while (resconfig->resource) { + err = twl4030_configure_resource(resconfig); + if (err) + goto resource; + resconfig++; + + } + } + + err = twl4030_i2c_write_u8(TWL4030_MODULE_PM_MASTER, 0, R_PROTECT_KEY); + if (err) + pr_err("TWL4030 Unable to relock registers\n"); + return; + +unlock: + if (err) + pr_err("TWL4030 Unable to unlock registers\n"); + return; +load: + if (err) + pr_err("TWL4030 failed to load scripts\n"); + return; +resource: + if (err) + pr_err("TWL4030 failed to configure resource\n"); + return; +} diff --git a/include/linux/i2c/twl4030.h b/include/linux/i2c/twl4030.h index 3fd21d7cb6b..2d02dfd7076 100644 --- a/include/linux/i2c/twl4030.h +++ b/include/linux/i2c/twl4030.h @@ -223,19 +223,28 @@ int twl4030_i2c_read(u8 mod_no, u8 *value, u8 reg, unsigned num_bytes); /* Power bus message definitions */ -#define DEV_GRP_NULL 0x0 -#define DEV_GRP_P1 0x1 -#define DEV_GRP_P2 0x2 -#define DEV_GRP_P3 0x4 +/* The TWL4030/5030 splits its power-management resources (the various + * regulators, clock and reset lines) into 3 processor groups - P1, P2 and + * P3. These groups can then be configured to transition between sleep, wait-on + * and active states by sending messages to the power bus. See Section 5.4.2 + * Power Resources of TWL4030 TRM + */ -#define RES_GRP_RES 0x0 -#define RES_GRP_PP 0x1 -#define RES_GRP_RC 0x2 +/* Processor groups */ +#define DEV_GRP_NULL 0x0 +#define DEV_GRP_P1 0x1 /* P1: all OMAP devices */ +#define DEV_GRP_P2 0x2 /* P2: all Modem devices */ +#define DEV_GRP_P3 0x4 /* P3: all peripheral devices */ + +/* Resource groups */ +#define RES_GRP_RES 0x0 /* Reserved */ +#define RES_GRP_PP 0x1 /* Power providers */ +#define RES_GRP_RC 0x2 /* Reset and control */ #define RES_GRP_PP_RC 0x3 -#define RES_GRP_PR 0x4 +#define RES_GRP_PR 0x4 /* Power references */ #define RES_GRP_PP_PR 0x5 #define RES_GRP_RC_PR 0x6 -#define RES_GRP_ALL 0x7 +#define RES_GRP_ALL 0x7 /* All resource groups */ #define RES_TYPE2_R0 0x0 @@ -246,6 +255,41 @@ int twl4030_i2c_read(u8 mod_no, u8 *value, u8 reg, unsigned num_bytes); #define RES_STATE_SLEEP 0x8 #define RES_STATE_OFF 0x0 +/* Power resources */ + +/* Power providers */ +#define RES_VAUX1 1 +#define RES_VAUX2 2 +#define RES_VAUX3 3 +#define RES_VAUX4 4 +#define RES_VMMC1 5 +#define RES_VMMC2 6 +#define RES_VPLL1 7 +#define RES_VPLL2 8 +#define RES_VSIM 9 +#define RES_VDAC 10 +#define RES_VINTANA1 11 +#define RES_VINTANA2 12 +#define RES_VINTDIG 13 +#define RES_VIO 14 +#define RES_VDD1 15 +#define RES_VDD2 16 +#define RES_VUSB_1V5 17 +#define RES_VUSB_1V8 18 +#define RES_VUSB_3V1 19 +#define RES_VUSBCP 20 +#define RES_REGEN 21 +/* Reset and control */ +#define RES_NRES_PWRON 22 +#define RES_CLKEN 23 +#define RES_SYSEN 24 +#define RES_HFCLKOUT 25 +#define RES_32KCLKOUT 26 +#define RES_RESET 27 +/* Power Reference */ +#define RES_Main_Ref 28 + +#define TOTAL_RESOURCES 28 /* * Power Bus Message Format ... these can be sent individually by Linux, * but are usually part of downloaded scripts that are run when various @@ -327,6 +371,36 @@ struct twl4030_usb_data { enum twl4030_usb_mode usb_mode; }; +struct twl4030_ins { + u16 pmb_message; + u8 delay; +}; + +struct twl4030_script { + struct twl4030_ins *script; + unsigned size; + u8 flags; +#define TWL4030_WRST_SCRIPT (1<<0) +#define TWL4030_WAKEUP12_SCRIPT (1<<1) +#define TWL4030_WAKEUP3_SCRIPT (1<<2) +#define TWL4030_SLEEP_SCRIPT (1<<3) +}; + +struct twl4030_resconfig { + u8 resource; + u8 devgroup; /* Processor group that Power resource belongs to */ + u8 type; /* Power resource addressed, 6 / broadcast message */ + u8 type2; /* Power resource addressed, 3 / broadcast message */ +}; + +struct twl4030_power_data { + struct twl4030_script **scripts; + unsigned num; + struct twl4030_resconfig *resource_config; +}; + +extern void twl4030_power_init(struct twl4030_power_data *triton2_scripts); + struct twl4030_platform_data { unsigned irq_base, irq_end; struct twl4030_bci_platform_data *bci; @@ -334,6 +408,7 @@ struct twl4030_platform_data { struct twl4030_madc_platform_data *madc; struct twl4030_keypad_data *keypad; struct twl4030_usb_data *usb; + struct twl4030_power_data *power; /* LDO regulators */ struct regulator_init_data *vdac; @@ -364,7 +439,6 @@ int twl4030_sih_setup(int module); #define TWL4030_VAUX3_DEV_GRP 0x1F #define TWL4030_VAUX3_DEDICATED 0x22 - #if defined(CONFIG_TWL4030_BCI_BATTERY) || \ defined(CONFIG_TWL4030_BCI_BATTERY_MODULE) extern int twl4030charger_usb_en(int enable); -- cgit v1.2.3-70-g09d2 From 8aba721b23917bc6d374ad42bf80bde5058710e2 Mon Sep 17 00:00:00 2001 From: Samuel Ortiz Date: Thu, 27 Aug 2009 20:49:08 +0200 Subject: mfd: Fix ab3100-otp build failure ab3100.h should include linux/workqueue.h for otp to build properly. Signed-off-by: Samuel Ortiz --- drivers/mfd/ab3100-core.c | 1 - include/linux/mfd/ab3100.h | 1 + 2 files changed, 1 insertion(+), 1 deletion(-) (limited to 'include') diff --git a/drivers/mfd/ab3100-core.c b/drivers/mfd/ab3100-core.c index 1d8ac1a1e30..a848df77514 100644 --- a/drivers/mfd/ab3100-core.c +++ b/drivers/mfd/ab3100-core.c @@ -14,7 +14,6 @@ #include #include #include -#include #include #include #include diff --git a/include/linux/mfd/ab3100.h b/include/linux/mfd/ab3100.h index 56343b8013b..3ead5cfb638 100644 --- a/include/linux/mfd/ab3100.h +++ b/include/linux/mfd/ab3100.h @@ -6,6 +6,7 @@ */ #include +#include #ifndef MFD_AB3100_H #define MFD_AB3100_H -- cgit v1.2.3-70-g09d2 From d619bc143e311a738113dbbe7792bd032403939f Mon Sep 17 00:00:00 2001 From: Linus Walleij Date: Wed, 9 Sep 2009 11:31:00 +0200 Subject: regulator: AB3100 support This adds support for the regulators found in the AB3100 Mixed-Signal IC. It further also defines platform data for the ST-Ericsson U300 platform and extends the AB3100 MFD driver so that platform/board data with regulation constraints and an init function can be passed down all the way from the board to the regulators. Signed-off-by: Linus Walleij Acked-by: Mark Brown Signed-off-by: Liam Girdwood Signed-off-by: Samuel Ortiz --- drivers/mfd/ab3100-core.c | 4 + drivers/regulator/Kconfig | 9 + drivers/regulator/Makefile | 1 + drivers/regulator/ab3100.c | 694 +++++++++++++++++++++++++++++++++++++++++++++ include/linux/mfd/ab3100.h | 28 ++ 5 files changed, 736 insertions(+) create mode 100644 drivers/regulator/ab3100.c (limited to 'include') diff --git a/drivers/mfd/ab3100-core.c b/drivers/mfd/ab3100-core.c index a848df77514..c533f86ff5e 100644 --- a/drivers/mfd/ab3100-core.c +++ b/drivers/mfd/ab3100-core.c @@ -837,6 +837,8 @@ static int __init ab3100_probe(struct i2c_client *client, const struct i2c_device_id *id) { struct ab3100 *ab3100; + struct ab3100_platform_data *ab3100_plf_data = + client->dev.platform_data; int err; int i; @@ -920,6 +922,8 @@ static int __init ab3100_probe(struct i2c_client *client, for (i = 0; i < ARRAY_SIZE(ab3100_platform_devs); i++) { ab3100_platform_devs[i]->dev.parent = &client->dev; + ab3100_platform_devs[i]->dev.platform_data = + ab3100_plf_data; platform_set_drvdata(ab3100_platform_devs[i], ab3100); } diff --git a/drivers/regulator/Kconfig b/drivers/regulator/Kconfig index c505714b0a9..2dc42bbf6fe 100644 --- a/drivers/regulator/Kconfig +++ b/drivers/regulator/Kconfig @@ -138,5 +138,14 @@ config REGULATOR_MC13783 Say y here to support the regulators found on the Freescale MC13783 PMIC. +config REGULATOR_AB3100 + tristate "ST-Ericsson AB3100 Regulator functions" + depends on AB3100_CORE + default y if AB3100_CORE + help + These regulators correspond to functionality in the + AB3100 analog baseband dealing with power regulators + for the system. + endif diff --git a/drivers/regulator/Makefile b/drivers/regulator/Makefile index 5034830a395..768b3316d6e 100644 --- a/drivers/regulator/Makefile +++ b/drivers/regulator/Makefile @@ -21,5 +21,6 @@ obj-$(CONFIG_REGULATOR_DA903X) += da903x.o obj-$(CONFIG_REGULATOR_PCF50633) += pcf50633-regulator.o obj-$(CONFIG_REGULATOR_PCAP) += pcap-regulator.o obj-$(CONFIG_REGULATOR_MC13783) += mc13783.o +obj-$(CONFIG_REGULATOR_AB3100) += ab3100.o ccflags-$(CONFIG_REGULATOR_DEBUG) += -DDEBUG diff --git a/drivers/regulator/ab3100.c b/drivers/regulator/ab3100.c new file mode 100644 index 00000000000..156979d1f41 --- /dev/null +++ b/drivers/regulator/ab3100.c @@ -0,0 +1,694 @@ +/* + * drivers/regulator/ab3100.c + * + * Copyright (C) 2008-2009 ST-Ericsson AB + * License terms: GNU General Public License (GPL) version 2 + * Low-level control of the AB3100 IC Low Dropout (LDO) + * regulators, external regulator and buck converter + * Author: Mattias Wallin + * Author: Linus Walleij + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +/* LDO registers and some handy masking definitions for AB3100 */ +#define AB3100_LDO_A 0x40 +#define AB3100_LDO_C 0x41 +#define AB3100_LDO_D 0x42 +#define AB3100_LDO_E 0x43 +#define AB3100_LDO_E_SLEEP 0x44 +#define AB3100_LDO_F 0x45 +#define AB3100_LDO_G 0x46 +#define AB3100_LDO_H 0x47 +#define AB3100_LDO_H_SLEEP_MODE 0 +#define AB3100_LDO_H_SLEEP_EN 2 +#define AB3100_LDO_ON 4 +#define AB3100_LDO_H_VSEL_AC 5 +#define AB3100_LDO_K 0x48 +#define AB3100_LDO_EXT 0x49 +#define AB3100_BUCK 0x4A +#define AB3100_BUCK_SLEEP 0x4B +#define AB3100_REG_ON_MASK 0x10 + +/** + * struct ab3100_regulator + * A struct passed around the individual regulator functions + * @platform_device: platform device holding this regulator + * @ab3100: handle to the AB3100 parent chip + * @plfdata: AB3100 platform data passed in at probe time + * @regreg: regulator register number in the AB3100 + * @fixed_voltage: a fixed voltage for this regulator, if this + * 0 the voltages array is used instead. + * @typ_voltages: an array of available typical voltages for + * this regulator + * @voltages_len: length of the array of available voltages + */ +struct ab3100_regulator { + struct regulator_dev *rdev; + struct ab3100 *ab3100; + struct ab3100_platform_data *plfdata; + u8 regreg; + int fixed_voltage; + int const *typ_voltages; + u8 voltages_len; +}; + +/* The order in which registers are initialized */ +static const u8 ab3100_reg_init_order[AB3100_NUM_REGULATORS+2] = { + AB3100_LDO_A, + AB3100_LDO_C, + AB3100_LDO_E, + AB3100_LDO_E_SLEEP, + AB3100_LDO_F, + AB3100_LDO_G, + AB3100_LDO_H, + AB3100_LDO_K, + AB3100_LDO_EXT, + AB3100_BUCK, + AB3100_BUCK_SLEEP, + AB3100_LDO_D, +}; + +/* Preset (hardware defined) voltages for these regulators */ +#define LDO_A_VOLTAGE 2750000 +#define LDO_C_VOLTAGE 2650000 +#define LDO_D_VOLTAGE 2650000 + +static const int const ldo_e_buck_typ_voltages[] = { + 1800000, + 1400000, + 1300000, + 1200000, + 1100000, + 1050000, + 900000, +}; + +static const int const ldo_f_typ_voltages[] = { + 1800000, + 1400000, + 1300000, + 1200000, + 1100000, + 1050000, + 2500000, + 2650000, +}; + +static const int const ldo_g_typ_voltages[] = { + 2850000, + 2750000, + 1800000, + 1500000, +}; + +static const int const ldo_h_typ_voltages[] = { + 2750000, + 1800000, + 1500000, + 1200000, +}; + +static const int const ldo_k_typ_voltages[] = { + 2750000, + 1800000, +}; + + +/* The regulator devices */ +static struct ab3100_regulator +ab3100_regulators[AB3100_NUM_REGULATORS] = { + { + .regreg = AB3100_LDO_A, + .fixed_voltage = LDO_A_VOLTAGE, + }, + { + .regreg = AB3100_LDO_C, + .fixed_voltage = LDO_C_VOLTAGE, + }, + { + .regreg = AB3100_LDO_D, + .fixed_voltage = LDO_D_VOLTAGE, + }, + { + .regreg = AB3100_LDO_E, + .typ_voltages = ldo_e_buck_typ_voltages, + .voltages_len = ARRAY_SIZE(ldo_e_buck_typ_voltages), + }, + { + .regreg = AB3100_LDO_F, + .typ_voltages = ldo_f_typ_voltages, + .voltages_len = ARRAY_SIZE(ldo_f_typ_voltages), + }, + { + .regreg = AB3100_LDO_G, + .typ_voltages = ldo_g_typ_voltages, + .voltages_len = ARRAY_SIZE(ldo_g_typ_voltages), + }, + { + .regreg = AB3100_LDO_H, + .typ_voltages = ldo_h_typ_voltages, + .voltages_len = ARRAY_SIZE(ldo_h_typ_voltages), + }, + { + .regreg = AB3100_LDO_K, + .typ_voltages = ldo_k_typ_voltages, + .voltages_len = ARRAY_SIZE(ldo_k_typ_voltages), + }, + { + .regreg = AB3100_LDO_EXT, + /* No voltages for the external regulator */ + }, + { + .regreg = AB3100_BUCK, + .typ_voltages = ldo_e_buck_typ_voltages, + .voltages_len = ARRAY_SIZE(ldo_e_buck_typ_voltages), + }, +}; + +/* + * General functions for enable, disable and is_enabled used for + * LDO: A,C,E,F,G,H,K,EXT and BUCK + */ +static int ab3100_enable_regulator(struct regulator_dev *reg) +{ + struct ab3100_regulator *abreg = reg->reg_data; + int err; + u8 regval; + + err = ab3100_get_register_interruptible(abreg->ab3100, abreg->regreg, + ®val); + if (err) { + dev_warn(®->dev, "failed to get regid %d value\n", + abreg->regreg); + return err; + } + + /* The regulator is already on, no reason to go further */ + if (regval & AB3100_REG_ON_MASK) + return 0; + + regval |= AB3100_REG_ON_MASK; + + err = ab3100_set_register_interruptible(abreg->ab3100, abreg->regreg, + regval); + if (err) { + dev_warn(®->dev, "failed to set regid %d value\n", + abreg->regreg); + return err; + } + + /* Per-regulator power on delay from spec */ + switch (abreg->regreg) { + case AB3100_LDO_A: /* Fallthrough */ + case AB3100_LDO_C: /* Fallthrough */ + case AB3100_LDO_D: /* Fallthrough */ + case AB3100_LDO_E: /* Fallthrough */ + case AB3100_LDO_H: /* Fallthrough */ + case AB3100_LDO_K: + udelay(200); + break; + case AB3100_LDO_F: + udelay(600); + break; + case AB3100_LDO_G: + udelay(400); + break; + case AB3100_BUCK: + mdelay(1); + break; + default: + break; + } + + return 0; +} + +static int ab3100_disable_regulator(struct regulator_dev *reg) +{ + struct ab3100_regulator *abreg = reg->reg_data; + int err; + u8 regval; + + /* + * LDO D is a special regulator. When it is disabled, the entire + * system is shut down. So this is handled specially. + */ + if (abreg->regreg == AB3100_LDO_D) { + int i; + + dev_info(®->dev, "disabling LDO D - shut down system\n"); + /* + * Set regulators to default values, ignore any errors, + * we're going DOWN + */ + for (i = 0; i < ARRAY_SIZE(ab3100_reg_init_order); i++) { + (void) ab3100_set_register_interruptible(abreg->ab3100, + ab3100_reg_init_order[i], + abreg->plfdata->reg_initvals[i]); + } + + /* Setting LDO D to 0x00 cuts the power to the SoC */ + return ab3100_set_register_interruptible(abreg->ab3100, + AB3100_LDO_D, 0x00U); + + } + + /* + * All other regulators are handled here + */ + err = ab3100_get_register_interruptible(abreg->ab3100, abreg->regreg, + ®val); + if (err) { + dev_err(®->dev, "unable to get register 0x%x\n", + abreg->regreg); + return err; + } + regval &= ~AB3100_REG_ON_MASK; + return ab3100_set_register_interruptible(abreg->ab3100, abreg->regreg, + regval); +} + +static int ab3100_is_enabled_regulator(struct regulator_dev *reg) +{ + struct ab3100_regulator *abreg = reg->reg_data; + u8 regval; + int err; + + err = ab3100_get_register_interruptible(abreg->ab3100, abreg->regreg, + ®val); + if (err) { + dev_err(®->dev, "unable to get register 0x%x\n", + abreg->regreg); + return err; + } + + return regval & AB3100_REG_ON_MASK; +} + +static int ab3100_list_voltage_regulator(struct regulator_dev *reg, + unsigned selector) +{ + struct ab3100_regulator *abreg = reg->reg_data; + + if (selector > abreg->voltages_len) + return -EINVAL; + return abreg->typ_voltages[selector]; +} + +static int ab3100_get_voltage_regulator(struct regulator_dev *reg) +{ + struct ab3100_regulator *abreg = reg->reg_data; + u8 regval; + int err; + + /* Return the voltage for fixed regulators immediately */ + if (abreg->fixed_voltage) + return abreg->fixed_voltage; + + /* + * For variable types, read out setting and index into + * supplied voltage list. + */ + err = ab3100_get_register_interruptible(abreg->ab3100, + abreg->regreg, ®val); + if (err) { + dev_warn(®->dev, + "failed to get regulator value in register %02x\n", + abreg->regreg); + return err; + } + + /* The 3 highest bits index voltages */ + regval &= 0xE0; + regval >>= 5; + + if (regval > abreg->voltages_len) { + dev_err(®->dev, + "regulator register %02x contains an illegal voltage setting\n", + abreg->regreg); + return -EINVAL; + } + + return abreg->typ_voltages[regval]; +} + +static int ab3100_get_best_voltage_index(struct regulator_dev *reg, + int min_uV, int max_uV) +{ + struct ab3100_regulator *abreg = reg->reg_data; + int i; + int bestmatch; + int bestindex; + + /* + * Locate the minimum voltage fitting the criteria on + * this regulator. The switchable voltages are not + * in strict falling order so we need to check them + * all for the best match. + */ + bestmatch = INT_MAX; + bestindex = -1; + for (i = 0; i < abreg->voltages_len; i++) { + if (abreg->typ_voltages[i] <= max_uV && + abreg->typ_voltages[i] >= min_uV && + abreg->typ_voltages[i] < bestmatch) { + bestmatch = abreg->typ_voltages[i]; + bestindex = i; + } + } + + if (bestindex < 0) { + dev_warn(®->dev, "requested %d<=x<=%d uV, out of range!\n", + min_uV, max_uV); + return -EINVAL; + } + return bestindex; +} + +static int ab3100_set_voltage_regulator(struct regulator_dev *reg, + int min_uV, int max_uV) +{ + struct ab3100_regulator *abreg = reg->reg_data; + u8 regval; + int err; + int bestindex; + + bestindex = ab3100_get_best_voltage_index(reg, min_uV, max_uV); + if (bestindex < 0) + return bestindex; + + err = ab3100_get_register_interruptible(abreg->ab3100, + abreg->regreg, ®val); + if (err) { + dev_warn(®->dev, + "failed to get regulator register %02x\n", + abreg->regreg); + return err; + } + + /* The highest three bits control the variable regulators */ + regval &= ~0xE0; + regval |= (bestindex << 5); + + err = ab3100_set_register_interruptible(abreg->ab3100, + abreg->regreg, regval); + if (err) + dev_warn(®->dev, "failed to set regulator register %02x\n", + abreg->regreg); + + return err; +} + +static int ab3100_set_suspend_voltage_regulator(struct regulator_dev *reg, + int uV) +{ + struct ab3100_regulator *abreg = reg->reg_data; + u8 regval; + int err; + int bestindex; + u8 targetreg; + + if (abreg->regreg == AB3100_LDO_E) + targetreg = AB3100_LDO_E_SLEEP; + else if (abreg->regreg == AB3100_BUCK) + targetreg = AB3100_BUCK_SLEEP; + else + return -EINVAL; + + /* LDO E and BUCK have special suspend voltages you can set */ + bestindex = ab3100_get_best_voltage_index(reg, uV, uV); + + err = ab3100_get_register_interruptible(abreg->ab3100, + targetreg, ®val); + if (err) { + dev_warn(®->dev, + "failed to get regulator register %02x\n", + targetreg); + return err; + } + + /* The highest three bits control the variable regulators */ + regval &= ~0xE0; + regval |= (bestindex << 5); + + err = ab3100_set_register_interruptible(abreg->ab3100, + targetreg, regval); + if (err) + dev_warn(®->dev, "failed to set regulator register %02x\n", + abreg->regreg); + + return err; +} + +/* + * The external regulator can just define a fixed voltage. + */ +static int ab3100_get_voltage_regulator_external(struct regulator_dev *reg) +{ + struct ab3100_regulator *abreg = reg->reg_data; + + return abreg->plfdata->external_voltage; +} + +static struct regulator_ops regulator_ops_fixed = { + .enable = ab3100_enable_regulator, + .disable = ab3100_disable_regulator, + .is_enabled = ab3100_is_enabled_regulator, + .get_voltage = ab3100_get_voltage_regulator, +}; + +static struct regulator_ops regulator_ops_variable = { + .enable = ab3100_enable_regulator, + .disable = ab3100_disable_regulator, + .is_enabled = ab3100_is_enabled_regulator, + .get_voltage = ab3100_get_voltage_regulator, + .set_voltage = ab3100_set_voltage_regulator, + .list_voltage = ab3100_list_voltage_regulator, +}; + +static struct regulator_ops regulator_ops_variable_sleepable = { + .enable = ab3100_enable_regulator, + .disable = ab3100_disable_regulator, + .is_enabled = ab3100_is_enabled_regulator, + .get_voltage = ab3100_get_voltage_regulator, + .set_voltage = ab3100_set_voltage_regulator, + .set_suspend_voltage = ab3100_set_suspend_voltage_regulator, + .list_voltage = ab3100_list_voltage_regulator, +}; + +/* + * LDO EXT is an external regulator so it is really + * not possible to set any voltage locally here, AB3100 + * is an on/off switch plain an simple. The external + * voltage is defined in the board set-up if any. + */ +static struct regulator_ops regulator_ops_external = { + .enable = ab3100_enable_regulator, + .disable = ab3100_disable_regulator, + .is_enabled = ab3100_is_enabled_regulator, + .get_voltage = ab3100_get_voltage_regulator_external, +}; + +static struct regulator_desc +ab3100_regulator_desc[AB3100_NUM_REGULATORS] = { + { + .name = "LDO_A", + .id = AB3100_LDO_A, + .ops = ®ulator_ops_fixed, + .type = REGULATOR_VOLTAGE, + }, + { + .name = "LDO_C", + .id = AB3100_LDO_C, + .ops = ®ulator_ops_fixed, + .type = REGULATOR_VOLTAGE, + }, + { + .name = "LDO_D", + .id = AB3100_LDO_D, + .ops = ®ulator_ops_fixed, + .type = REGULATOR_VOLTAGE, + }, + { + .name = "LDO_E", + .id = AB3100_LDO_E, + .ops = ®ulator_ops_variable_sleepable, + .type = REGULATOR_VOLTAGE, + }, + { + .name = "LDO_F", + .id = AB3100_LDO_F, + .ops = ®ulator_ops_variable, + .type = REGULATOR_VOLTAGE, + }, + { + .name = "LDO_G", + .id = AB3100_LDO_G, + .ops = ®ulator_ops_variable, + .type = REGULATOR_VOLTAGE, + }, + { + .name = "LDO_H", + .id = AB3100_LDO_H, + .ops = ®ulator_ops_variable, + .type = REGULATOR_VOLTAGE, + }, + { + .name = "LDO_K", + .id = AB3100_LDO_K, + .ops = ®ulator_ops_variable, + .type = REGULATOR_VOLTAGE, + }, + { + .name = "LDO_EXT", + .id = AB3100_LDO_EXT, + .ops = ®ulator_ops_external, + .type = REGULATOR_VOLTAGE, + }, + { + .name = "BUCK", + .id = AB3100_BUCK, + .ops = ®ulator_ops_variable_sleepable, + .type = REGULATOR_VOLTAGE, + }, +}; + +/* + * NOTE: the following functions are regulators pluralis - it is the + * binding to the AB3100 core driver and the parent platform device + * for all the different regulators. + */ + +static int __init ab3100_regulators_probe(struct platform_device *pdev) +{ + struct ab3100_platform_data *plfdata = pdev->dev.platform_data; + struct ab3100 *ab3100 = platform_get_drvdata(pdev); + int err = 0; + u8 data; + int i; + + /* Check chip state */ + err = ab3100_get_register_interruptible(ab3100, + AB3100_LDO_D, &data); + if (err) { + dev_err(&pdev->dev, "could not read initial status of LDO_D\n"); + return err; + } + if (data & 0x10) + dev_notice(&pdev->dev, + "chip is already in active mode (Warm start)\n"); + else + dev_notice(&pdev->dev, + "chip is in inactive mode (Cold start)\n"); + + /* Set up regulators */ + for (i = 0; i < ARRAY_SIZE(ab3100_reg_init_order); i++) { + err = ab3100_set_register_interruptible(ab3100, + ab3100_reg_init_order[i], + plfdata->reg_initvals[i]); + if (err) { + dev_err(&pdev->dev, "regulator initialization failed with error %d\n", + err); + return err; + } + } + + if (err) { + dev_err(&pdev->dev, + "LDO D regulator initialization failed with error %d\n", + err); + return err; + } + + /* Register the regulators */ + for (i = 0; i < AB3100_NUM_REGULATORS; i++) { + struct ab3100_regulator *reg = &ab3100_regulators[i]; + struct regulator_dev *rdev; + + /* + * Initialize per-regulator struct. + * Inherit platform data, this comes down from the + * i2c boarddata, from the machine. So if you want to + * see what it looks like for a certain machine, go + * into the machine I2C setup. + */ + reg->ab3100 = ab3100; + reg->plfdata = plfdata; + + /* + * Register the regulator, pass around + * the ab3100_regulator struct + */ + rdev = regulator_register(&ab3100_regulator_desc[i], + &pdev->dev, + &plfdata->reg_constraints[i], + reg); + + if (IS_ERR(rdev)) { + err = PTR_ERR(rdev); + dev_err(&pdev->dev, + "%s: failed to register regulator %s err %d\n", + __func__, ab3100_regulator_desc[i].name, + err); + i--; + /* remove the already registered regulators */ + while (i > 0) { + regulator_unregister(ab3100_regulators[i].rdev); + i--; + } + return err; + } + + /* Then set a pointer back to the registered regulator */ + reg->rdev = rdev; + } + + return 0; +} + +static int __exit ab3100_regulators_remove(struct platform_device *pdev) +{ + int i; + + for (i = 0; i < AB3100_NUM_REGULATORS; i++) { + struct ab3100_regulator *reg = &ab3100_regulators[i]; + + regulator_unregister(reg->rdev); + } + return 0; +} + +static struct platform_driver ab3100_regulators_driver = { + .driver = { + .name = "ab3100-regulators", + .owner = THIS_MODULE, + }, + .probe = ab3100_regulators_probe, + .remove = __exit_p(ab3100_regulators_remove), +}; + +static __init int ab3100_regulators_init(void) +{ + return platform_driver_register(&ab3100_regulators_driver); +} + +static __exit void ab3100_regulators_exit(void) +{ + platform_driver_register(&ab3100_regulators_driver); +} + +subsys_initcall(ab3100_regulators_init); +module_exit(ab3100_regulators_exit); + +MODULE_AUTHOR("Mattias Wallin "); +MODULE_DESCRIPTION("AB3100 Regulator driver"); +MODULE_LICENSE("GPL"); +MODULE_ALIAS("platform:ab3100-regulators"); diff --git a/include/linux/mfd/ab3100.h b/include/linux/mfd/ab3100.h index 3ead5cfb638..e9aa4c9d749 100644 --- a/include/linux/mfd/ab3100.h +++ b/include/linux/mfd/ab3100.h @@ -7,6 +7,7 @@ #include #include +#include #ifndef MFD_AB3100_H #define MFD_AB3100_H @@ -57,6 +58,14 @@ #define AB3100_STR_BATT_REMOVAL (0x40) #define AB3100_STR_VBUS (0x80) +/* + * AB3100 contains 8 regulators, one external regulator controller + * and a buck converter, further the LDO E and buck converter can + * have separate settings if they are in sleep mode, this is + * modeled as a separate regulator. + */ +#define AB3100_NUM_REGULATORS 10 + /** * struct ab3100 * @access_mutex: lock out concurrent accesses to the AB3100 registers @@ -87,6 +96,25 @@ struct ab3100 { bool startup_events_read; }; +/** + * struct ab3100_platform_data + * Data supplied to initialize board connections to the AB3100 + * @reg_constraints: regulator constraints for target board + * the order of these constraints are: LDO A, C, D, E, + * F, G, H, K, EXT and BUCK. + * @reg_initvals: initial values for the regulator registers + * plus two sleep settings for LDO E and the BUCK converter. + * exactly AB3100_NUM_REGULATORS+2 values must be sent in. + * Order: LDO A, C, E, E sleep, F, G, H, K, EXT, BUCK, + * BUCK sleep, LDO D. (LDO D need to be initialized last.) + * @external_voltage: voltage level of the external regulator. + */ +struct ab3100_platform_data { + struct regulator_init_data reg_constraints[AB3100_NUM_REGULATORS]; + u8 reg_initvals[AB3100_NUM_REGULATORS+2]; + int external_voltage; +}; + int ab3100_set_register_interruptible(struct ab3100 *ab3100, u8 reg, u8 regval); int ab3100_get_register_interruptible(struct ab3100 *ab3100, u8 reg, u8 *regval); int ab3100_get_register_page_interruptible(struct ab3100 *ab3100, -- cgit v1.2.3-70-g09d2 From c4c259bcc27c4242b012106afdba183622b1735f Mon Sep 17 00:00:00 2001 From: Jiri Kosina Date: Tue, 15 Sep 2009 16:27:45 +0200 Subject: HID: consolidate connect and disconnect into core code HID core registers input, hidraw and hiddev devices, but leaves unregistering it up to the individual driver, which is not really nice. Let's move all the logic to the core. Reported-by: Marcel Holtmann Reported-by: Brian Rogers Acked-by: Marcel Holtmann Signed-off-by: Jiri Kosina --- drivers/hid/hid-core.c | 11 +++++++++++ drivers/hid/usbhid/hid-core.c | 16 +++++----------- include/linux/hid.h | 3 +++ net/bluetooth/hidp/core.c | 7 ------- 4 files changed, 19 insertions(+), 18 deletions(-) (limited to 'include') diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c index ca9bb26c207..be34d32906b 100644 --- a/drivers/hid/hid-core.c +++ b/drivers/hid/hid-core.c @@ -1237,6 +1237,17 @@ int hid_connect(struct hid_device *hdev, unsigned int connect_mask) } EXPORT_SYMBOL_GPL(hid_connect); +void hid_disconnect(struct hid_device *hdev) +{ + if (hdev->claimed & HID_CLAIMED_INPUT) + hidinput_disconnect(hdev); + if (hdev->claimed & HID_CLAIMED_HIDDEV) + hdev->hiddev_disconnect(hdev); + if (hdev->claimed & HID_CLAIMED_HIDRAW) + hidraw_disconnect(hdev); +} +EXPORT_SYMBOL_GPL(hid_disconnect); + /* a list of devices for which there is a specialized driver on HID bus */ static const struct hid_device_id hid_blacklist[] = { { HID_USB_DEVICE(USB_VENDOR_ID_A4TECH, USB_DEVICE_ID_A4TECH_WCP32PU) }, diff --git a/drivers/hid/usbhid/hid-core.c b/drivers/hid/usbhid/hid-core.c index 1b0e07a67d6..03bd703255a 100644 --- a/drivers/hid/usbhid/hid-core.c +++ b/drivers/hid/usbhid/hid-core.c @@ -1041,13 +1041,6 @@ static void usbhid_stop(struct hid_device *hid) hid_cancel_delayed_stuff(usbhid); - if (hid->claimed & HID_CLAIMED_INPUT) - hidinput_disconnect(hid); - if (hid->claimed & HID_CLAIMED_HIDDEV) - hiddev_disconnect(hid); - if (hid->claimed & HID_CLAIMED_HIDRAW) - hidraw_disconnect(hid); - hid->claimed = 0; usb_free_urb(usbhid->urbin); @@ -1085,7 +1078,7 @@ static struct hid_ll_driver usb_hid_driver = { .hidinput_input_event = usb_hidinput_input_event, }; -static int hid_probe(struct usb_interface *intf, const struct usb_device_id *id) +static int usbhid_probe(struct usb_interface *intf, const struct usb_device_id *id) { struct usb_host_interface *interface = intf->cur_altsetting; struct usb_device *dev = interface_to_usbdev(intf); @@ -1117,6 +1110,7 @@ static int hid_probe(struct usb_interface *intf, const struct usb_device_id *id) hid->ff_init = hid_pidff_init; #ifdef CONFIG_USB_HIDDEV hid->hiddev_connect = hiddev_connect; + hid->hiddev_disconnect = hiddev_disconnect; hid->hiddev_hid_event = hiddev_hid_event; hid->hiddev_report_event = hiddev_report_event; #endif @@ -1177,7 +1171,7 @@ err: return ret; } -static void hid_disconnect(struct usb_interface *intf) +static void usbhid_disconnect(struct usb_interface *intf) { struct hid_device *hid = usb_get_intfdata(intf); struct usbhid_device *usbhid; @@ -1359,8 +1353,8 @@ MODULE_DEVICE_TABLE (usb, hid_usb_ids); static struct usb_driver hid_driver = { .name = "usbhid", - .probe = hid_probe, - .disconnect = hid_disconnect, + .probe = usbhid_probe, + .disconnect = usbhid_disconnect, #ifdef CONFIG_PM .suspend = hid_suspend, .resume = hid_resume, diff --git a/include/linux/hid.h b/include/linux/hid.h index a0ebdace7ba..10f62841674 100644 --- a/include/linux/hid.h +++ b/include/linux/hid.h @@ -494,6 +494,7 @@ struct hid_device { /* device report descriptor */ /* hiddev event handler */ int (*hiddev_connect)(struct hid_device *, unsigned int); + void (*hiddev_disconnect)(struct hid_device *); void (*hiddev_hid_event) (struct hid_device *, struct hid_field *field, struct hid_usage *, __s32); void (*hiddev_report_event) (struct hid_device *, struct hid_report *); @@ -691,6 +692,7 @@ struct hid_device *hid_allocate_device(void); int hid_parse_report(struct hid_device *hid, __u8 *start, unsigned size); int hid_check_keys_pressed(struct hid_device *hid); int hid_connect(struct hid_device *hid, unsigned int connect_mask); +void hid_disconnect(struct hid_device *hid); /** * hid_map_usage - map usage input bits @@ -800,6 +802,7 @@ static inline int __must_check hid_hw_start(struct hid_device *hdev, */ static inline void hid_hw_stop(struct hid_device *hdev) { + hid_disconnect(hdev); hdev->ll_driver->stop(hdev); } diff --git a/net/bluetooth/hidp/core.c b/net/bluetooth/hidp/core.c index 09bedeb5579..49d8495d69b 100644 --- a/net/bluetooth/hidp/core.c +++ b/net/bluetooth/hidp/core.c @@ -577,11 +577,6 @@ static int hidp_session(void *arg) } if (session->hid) { - if (session->hid->claimed & HID_CLAIMED_INPUT) - hidinput_disconnect(session->hid); - if (session->hid->claimed & HID_CLAIMED_HIDRAW) - hidraw_disconnect(session->hid); - hid_destroy_device(session->hid); session->hid = NULL; } @@ -747,8 +742,6 @@ static void hidp_stop(struct hid_device *hid) skb_queue_purge(&session->ctrl_transmit); skb_queue_purge(&session->intr_transmit); - if (hid->claimed & HID_CLAIMED_INPUT) - hidinput_disconnect(hid); hid->claimed = 0; } -- cgit v1.2.3-70-g09d2 From 37bce07077b0c335d8747f1ddb27ad585434a47e Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Wed, 16 Sep 2009 19:07:32 +0100 Subject: mfd: Convert WM8350 to use request_threaded_irq() Instead of hand rolling our own variant. Signed-off-by: Mark Brown Signed-off-by: Samuel Ortiz --- drivers/mfd/wm8350-core.c | 24 ++++++------------------ include/linux/mfd/wm8350/core.h | 1 - 2 files changed, 6 insertions(+), 19 deletions(-) (limited to 'include') diff --git a/drivers/mfd/wm8350-core.c b/drivers/mfd/wm8350-core.c index 9d662a576a4..ba27c9dc1ad 100644 --- a/drivers/mfd/wm8350-core.c +++ b/drivers/mfd/wm8350-core.c @@ -353,15 +353,15 @@ static void wm8350_irq_call_handler(struct wm8350 *wm8350, int irq) } /* - * wm8350_irq_worker actually handles the interrupts. Since all + * This is a threaded IRQ handler so can access I2C/SPI. Since all * interrupts are clear on read the IRQ line will be reasserted and * the physical IRQ will be handled again if another interrupt is * asserted while we run - in the normal course of events this is a * rare occurrence so we save I2C/SPI reads. */ -static void wm8350_irq_worker(struct work_struct *work) +static irqreturn_t wm8350_irq(int irq, void *data) { - struct wm8350 *wm8350 = container_of(work, struct wm8350, irq_work); + struct wm8350 *wm8350 = data; u16 level_one, status1, status2, comp; /* TODO: Use block reads to improve performance? */ @@ -552,16 +552,6 @@ static void wm8350_irq_worker(struct work_struct *work) } } - enable_irq(wm8350->chip_irq); -} - -static irqreturn_t wm8350_irq(int irq, void *data) -{ - struct wm8350 *wm8350 = data; - - disable_irq_nosync(irq); - schedule_work(&wm8350->irq_work); - return IRQ_HANDLED; } @@ -1428,9 +1418,8 @@ int wm8350_device_init(struct wm8350 *wm8350, int irq, mutex_init(&wm8350->auxadc_mutex); mutex_init(&wm8350->irq_mutex); - INIT_WORK(&wm8350->irq_work, wm8350_irq_worker); if (irq) { - int flags = 0; + int flags = IRQF_ONESHOT; if (pdata && pdata->irq_high) { flags |= IRQF_TRIGGER_HIGH; @@ -1444,8 +1433,8 @@ int wm8350_device_init(struct wm8350 *wm8350, int irq, WM8350_IRQ_POL); } - ret = request_irq(irq, wm8350_irq, flags, - "wm8350", wm8350); + ret = request_threaded_irq(irq, NULL, wm8350_irq, flags, + "wm8350", wm8350); if (ret != 0) { dev_err(wm8350->dev, "Failed to request IRQ: %d\n", ret); @@ -1505,7 +1494,6 @@ void wm8350_device_exit(struct wm8350 *wm8350) platform_device_unregister(wm8350->codec.pdev); free_irq(wm8350->chip_irq, wm8350); - flush_work(&wm8350->irq_work); kfree(wm8350->reg_cache); } EXPORT_SYMBOL_GPL(wm8350_device_exit); diff --git a/include/linux/mfd/wm8350/core.h b/include/linux/mfd/wm8350/core.h index 969b0b55615..1d595de6a05 100644 --- a/include/linux/mfd/wm8350/core.h +++ b/include/linux/mfd/wm8350/core.h @@ -626,7 +626,6 @@ struct wm8350 { struct mutex auxadc_mutex; /* Interrupt handling */ - struct work_struct irq_work; struct mutex irq_mutex; /* IRQ table mutex */ struct wm8350_irq irq[WM8350_NUM_IRQ]; int chip_irq; -- cgit v1.2.3-70-g09d2 From c0826574ddc0df486ecfc2d655e08904c6513209 Mon Sep 17 00:00:00 2001 From: Stephen Rothwell Date: Thu, 17 Sep 2009 17:03:06 +1000 Subject: nfsd: return success for non-NFS4 nfs4_state_start Today's linux-next build (sparc64_defconfig) failed like this: In file included from arch/sparc/kernel/sys_sparc32.c:32: include/linux/nfsd/nfsd.h: In function 'nfs4_state_start': include/linux/nfsd/nfsd.h:177: error: no return statement in function returning non-void Caused by commit 29ab23cc5d351658d01a4327d55e9106a73fd04f ("nfsd4: allow nfs4 state startup to fail"). Please, if you add code that depends on a CONFIG option, build with that option enabled and disabled. Signed-off-by: Stephen Rothwell Signed-off-by: J. Bruce Fields --- include/linux/nfsd/nfsd.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include') diff --git a/include/linux/nfsd/nfsd.h b/include/linux/nfsd/nfsd.h index 24fdf89cea8..03bbe903910 100644 --- a/include/linux/nfsd/nfsd.h +++ b/include/linux/nfsd/nfsd.h @@ -174,7 +174,7 @@ int nfs4_reset_recoverydir(char *recdir); #else static inline int nfs4_state_init(void) { return 0; } static inline void nfsd4_free_slabs(void) { } -static inline int nfs4_state_start(void) { } +static inline int nfs4_state_start(void) { return 0; } static inline void nfs4_state_shutdown(void) { } static inline time_t nfs4_lease_time(void) { return 0; } static inline void nfs4_reset_lease(time_t leasetime) { } -- cgit v1.2.3-70-g09d2 From e2d4304b7d2b85c45de89ec420037d6b9261a12d Mon Sep 17 00:00:00 2001 From: Jesse Barnes Date: Thu, 17 Sep 2009 09:40:31 -0700 Subject: PCI: fix VGA arbiter header file Remove reference to vgaarb.c and replace it with a comment about the arbiter itself. Reported-by: Tiago Vignatti Signed-off-by: Jesse Barnes --- include/linux/vgaarb.h | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'include') diff --git a/include/linux/vgaarb.h b/include/linux/vgaarb.h index e81c64af80c..a1fa1da8dc1 100644 --- a/include/linux/vgaarb.h +++ b/include/linux/vgaarb.h @@ -1,5 +1,6 @@ /* - * vgaarb.c + * The VGA aribiter manages VGA space routing and VGA resource decode to + * allow multiple VGA devices to be used in a system in a safe way. * * (C) Copyright 2005 Benjamin Herrenschmidt * (C) Copyright 2007 Paulo R. Zanoni -- cgit v1.2.3-70-g09d2 From 2667de81f3256c944b06abdf2c56c2f192fcb724 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Thu, 17 Sep 2009 19:01:10 +0200 Subject: perf_counter: Allow for a wakeup watermark Currently we wake the mmap() consumer once every PAGE_SIZE of data and/or once event wakeup_events when specified. For high speed sampling this results in too many wakeups wrt. the buffer size, hence change this. We move the default wakeup limit to 1/4-th the buffer size, and provide for means to manually specify this limit. Signed-off-by: Peter Zijlstra LKML-Reference: Signed-off-by: Ingo Molnar --- include/linux/perf_counter.h | 10 ++++++++-- kernel/perf_counter.c | 32 +++++++++++++++++++------------- 2 files changed, 27 insertions(+), 15 deletions(-) (limited to 'include') diff --git a/include/linux/perf_counter.h b/include/linux/perf_counter.h index 972f90d7a32..6c1ef72ea50 100644 --- a/include/linux/perf_counter.h +++ b/include/linux/perf_counter.h @@ -199,10 +199,14 @@ struct perf_counter_attr { inherit_stat : 1, /* per task counts */ enable_on_exec : 1, /* next exec enables */ task : 1, /* trace fork/exit */ + watermark : 1, /* wakeup_watermark */ - __reserved_1 : 50; + __reserved_1 : 49; - __u32 wakeup_events; /* wakeup every n events */ + union { + __u32 wakeup_events; /* wakeup every n events */ + __u32 wakeup_watermark; /* bytes before wakeup */ + }; __u32 __reserved_2; __u64 __reserved_3; @@ -521,6 +525,8 @@ struct perf_mmap_data { atomic_t wakeup; /* needs a wakeup */ atomic_t lost; /* nr records lost */ + long watermark; /* wakeup watermark */ + struct perf_counter_mmap_page *user_page; void *data_pages[0]; }; diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c index fe0d1adde80..29b73b6e814 100644 --- a/kernel/perf_counter.c +++ b/kernel/perf_counter.c @@ -2176,6 +2176,13 @@ static int perf_mmap_data_alloc(struct perf_counter *counter, int nr_pages) data->nr_pages = nr_pages; atomic_set(&data->lock, -1); + if (counter->attr.watermark) { + data->watermark = min_t(long, PAGE_SIZE * nr_pages, + counter->attr.wakeup_watermark); + } + if (!data->watermark) + data->watermark = max(PAGE_SIZE, PAGE_SIZE * nr_pages / 4); + rcu_assign_pointer(counter->data, data); return 0; @@ -2517,23 +2524,15 @@ struct perf_output_handle { unsigned long flags; }; -static bool perf_output_space(struct perf_mmap_data *data, - unsigned int offset, unsigned int head) +static bool perf_output_space(struct perf_mmap_data *data, unsigned long tail, + unsigned long offset, unsigned long head) { - unsigned long tail; unsigned long mask; if (!data->writable) return true; mask = (data->nr_pages << PAGE_SHIFT) - 1; - /* - * Userspace could choose to issue a mb() before updating the tail - * pointer. So that all reads will be completed before the write is - * issued. - */ - tail = ACCESS_ONCE(data->user_page->data_tail); - smp_rmb(); offset = (offset - tail) & mask; head = (head - tail) & mask; @@ -2679,7 +2678,7 @@ static int perf_output_begin(struct perf_output_handle *handle, { struct perf_counter *output_counter; struct perf_mmap_data *data; - unsigned int offset, head; + unsigned long tail, offset, head; int have_lost; struct { struct perf_event_header header; @@ -2717,16 +2716,23 @@ static int perf_output_begin(struct perf_output_handle *handle, perf_output_lock(handle); do { + /* + * Userspace could choose to issue a mb() before updating the + * tail pointer. So that all reads will be completed before the + * write is issued. + */ + tail = ACCESS_ONCE(data->user_page->data_tail); + smp_rmb(); offset = head = atomic_long_read(&data->head); head += size; - if (unlikely(!perf_output_space(data, offset, head))) + if (unlikely(!perf_output_space(data, tail, offset, head))) goto fail; } while (atomic_long_cmpxchg(&data->head, offset, head) != offset); handle->offset = offset; handle->head = head; - if ((offset >> PAGE_SHIFT) != (head >> PAGE_SHIFT)) + if (head - tail > data->watermark) atomic_set(&data->wakeup, 1); if (have_lost) { -- cgit v1.2.3-70-g09d2 From 3ef94daae7530b4ebcd2e5f48f1028cd2d2470ba Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Mon, 14 Sep 2009 16:50:29 +0100 Subject: drm/i915: Add ioctl to set 'purgeability' of objects Similar to the madvise() concept, the application may wish to mark some data as volatile. That is in the event of memory pressure the kernel is free to discard such buffers safe in the knowledge that the application can recreate them on demand, and is simply using these as a cache. Signed-off-by: Chris Wilson Signed-off-by: Jesse Barnes --- drivers/gpu/drm/i915/i915_dma.c | 1 + drivers/gpu/drm/i915/i915_drv.h | 7 ++++ drivers/gpu/drm/i915/i915_gem.c | 81 +++++++++++++++++++++++++++++++++++++---- include/drm/i915_drm.h | 18 +++++++++ 4 files changed, 99 insertions(+), 8 deletions(-) (limited to 'include') diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c index f47adb4aa59..250999cdf81 100644 --- a/drivers/gpu/drm/i915/i915_dma.c +++ b/drivers/gpu/drm/i915/i915_dma.c @@ -1616,6 +1616,7 @@ struct drm_ioctl_desc i915_ioctls[] = { DRM_IOCTL_DEF(DRM_I915_GEM_GET_TILING, i915_gem_get_tiling, 0), DRM_IOCTL_DEF(DRM_I915_GEM_GET_APERTURE, i915_gem_get_aperture_ioctl, 0), DRM_IOCTL_DEF(DRM_I915_GET_PIPE_FROM_CRTC_ID, intel_get_pipe_from_crtc_id, 0), + DRM_IOCTL_DEF(DRM_I915_GEM_MADVISE, i915_gem_madvise_ioctl, 0), }; int i915_max_ioctl = DRM_ARRAY_SIZE(i915_ioctls); diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index bbcf5fc7266..2f89c2136be 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -566,6 +566,11 @@ struct drm_i915_gem_object { * in an execbuffer object list. */ int in_execbuffer; + + /** + * Advice: are the backing pages purgeable? + */ + int madv; }; /** @@ -705,6 +710,8 @@ int i915_gem_busy_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); int i915_gem_throttle_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); +int i915_gem_madvise_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv); int i915_gem_entervt_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); int i915_gem_leavevt_ioctl(struct drm_device *dev, void *data, diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 2fff2e0a976..2ab30f25194 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -1436,13 +1436,21 @@ i915_gem_object_put_pages(struct drm_gem_object *obj) if (obj_priv->tiling_mode != I915_TILING_NONE) i915_gem_object_save_bit_17_swizzle(obj); - for (i = 0; i < page_count; i++) - if (obj_priv->pages[i] != NULL) { - if (obj_priv->dirty) - set_page_dirty(obj_priv->pages[i]); - mark_page_accessed(obj_priv->pages[i]); - page_cache_release(obj_priv->pages[i]); - } + if (obj_priv->madv == I915_MADV_DONTNEED) + obj_priv->dirty = 0; + + for (i = 0; i < page_count; i++) { + if (obj_priv->pages[i] == NULL) + break; + + if (obj_priv->dirty) + set_page_dirty(obj_priv->pages[i]); + + if (obj_priv->madv == I915_MADV_WILLNEED) + mark_page_accessed(obj_priv->pages[i]); + + page_cache_release(obj_priv->pages[i]); + } obj_priv->dirty = 0; drm_free_large(obj_priv->pages); @@ -2412,6 +2420,12 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment) if (dev_priv->mm.suspended) return -EBUSY; + + if (obj_priv->madv == I915_MADV_DONTNEED) { + DRM_ERROR("Attempting to bind a purgeable object\n"); + return -EINVAL; + } + if (alignment == 0) alignment = i915_gem_get_gtt_alignment(obj); if (alignment & (i915_gem_get_gtt_alignment(obj) - 1)) { @@ -3679,6 +3693,13 @@ i915_gem_pin_ioctl(struct drm_device *dev, void *data, } obj_priv = obj->driver_private; + if (obj_priv->madv == I915_MADV_DONTNEED) { + DRM_ERROR("Attempting to pin a I915_MADV_DONTNEED buffer\n"); + drm_gem_object_unreference(obj); + mutex_unlock(&dev->struct_mutex); + return -EINVAL; + } + if (obj_priv->pin_filp != NULL && obj_priv->pin_filp != file_priv) { DRM_ERROR("Already pinned in i915_gem_pin_ioctl(): %d\n", args->handle); @@ -3791,6 +3812,49 @@ i915_gem_throttle_ioctl(struct drm_device *dev, void *data, return i915_gem_ring_throttle(dev, file_priv); } +int +i915_gem_madvise_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv) +{ + struct drm_i915_gem_madvise *args = data; + struct drm_gem_object *obj; + struct drm_i915_gem_object *obj_priv; + + switch (args->madv) { + case I915_MADV_DONTNEED: + case I915_MADV_WILLNEED: + break; + default: + return -EINVAL; + } + + obj = drm_gem_object_lookup(dev, file_priv, args->handle); + if (obj == NULL) { + DRM_ERROR("Bad handle in i915_gem_madvise_ioctl(): %d\n", + args->handle); + return -EBADF; + } + + mutex_lock(&dev->struct_mutex); + obj_priv = obj->driver_private; + + if (obj_priv->pin_count) { + drm_gem_object_unreference(obj); + mutex_unlock(&dev->struct_mutex); + + DRM_ERROR("Attempted i915_gem_madvise_ioctl() on a pinned object\n"); + return -EINVAL; + } + + obj_priv->madv = args->madv; + args->retained = obj_priv->gtt_space != NULL; + + drm_gem_object_unreference(obj); + mutex_unlock(&dev->struct_mutex); + + return 0; +} + int i915_gem_init_object(struct drm_gem_object *obj) { struct drm_i915_gem_object *obj_priv; @@ -3815,6 +3879,7 @@ int i915_gem_init_object(struct drm_gem_object *obj) obj_priv->fence_reg = I915_FENCE_REG_NONE; INIT_LIST_HEAD(&obj_priv->list); INIT_LIST_HEAD(&obj_priv->fence_list); + obj_priv->madv = I915_MADV_WILLNEED; return 0; } @@ -4506,7 +4571,7 @@ i915_gem_object_truncate(struct drm_gem_object *obj) static inline int i915_gem_object_is_purgeable(struct drm_i915_gem_object *obj_priv) { - return !obj_priv->dirty; + return !obj_priv->dirty || obj_priv->madv == I915_MADV_DONTNEED; } static int diff --git a/include/drm/i915_drm.h b/include/drm/i915_drm.h index 8e1e92583fb..607c9da061e 100644 --- a/include/drm/i915_drm.h +++ b/include/drm/i915_drm.h @@ -185,6 +185,7 @@ typedef struct _drm_i915_sarea { #define DRM_I915_GEM_GET_APERTURE 0x23 #define DRM_I915_GEM_MMAP_GTT 0x24 #define DRM_I915_GET_PIPE_FROM_CRTC_ID 0x25 +#define DRM_I915_GEM_MADVISE 0x26 #define DRM_IOCTL_I915_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_I915_INIT, drm_i915_init_t) #define DRM_IOCTL_I915_FLUSH DRM_IO ( DRM_COMMAND_BASE + DRM_I915_FLUSH) @@ -221,6 +222,7 @@ typedef struct _drm_i915_sarea { #define DRM_IOCTL_I915_GEM_GET_TILING DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_GET_TILING, struct drm_i915_gem_get_tiling) #define DRM_IOCTL_I915_GEM_GET_APERTURE DRM_IOR (DRM_COMMAND_BASE + DRM_I915_GEM_GET_APERTURE, struct drm_i915_gem_get_aperture) #define DRM_IOCTL_I915_GET_PIPE_FROM_CRTC_ID DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GET_PIPE_FROM_CRTC_ID, struct drm_intel_get_pipe_from_crtc_id) +#define DRM_IOCTL_I915_GEM_MADVISE DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_MADVISE, struct drm_i915_gem_madvise) /* Allow drivers to submit batchbuffers directly to hardware, relying * on the security mechanisms provided by hardware. @@ -667,4 +669,20 @@ struct drm_i915_get_pipe_from_crtc_id { __u32 pipe; }; +#define I915_MADV_WILLNEED 0 +#define I915_MADV_DONTNEED 1 + +struct drm_i915_gem_madvise { + /** Handle of the buffer to change the backing store advice */ + __u32 handle; + + /* Advice: either the buffer will be needed again in the near future, + * or wont be and could be discarded under memory pressure. + */ + __u32 madv; + + /** Whether the backing store still exists. */ + __u32 retained; +}; + #endif /* _I915_DRM_H_ */ -- cgit v1.2.3-70-g09d2 From 1a133e0c9dabda23e6693cabfdc1d5106dca5fc2 Mon Sep 17 00:00:00 2001 From: Jesse Barnes Date: Tue, 15 Sep 2009 16:57:24 -0700 Subject: ACPI: make ACPI button funcs no-ops if not built in Yakui pointed out that we don't properly no-op the ACPI button routines if the button driver isn't built in. This will cause problems if ACPI is disabled, so provide stub functions in that case. Reported-by: ykzhao Signed-off-by: Jesse Barnes --- include/acpi/button.h | 15 +++++++++++++++ 1 file changed, 15 insertions(+) (limited to 'include') diff --git a/include/acpi/button.h b/include/acpi/button.h index bb643a79d65..97eea0e4c01 100644 --- a/include/acpi/button.h +++ b/include/acpi/button.h @@ -3,8 +3,23 @@ #include +#if defined(CONFIG_ACPI_BUTTON) || defined(CONFIG_ACPI_BUTTON_MODULE) extern int acpi_lid_notifier_register(struct notifier_block *nb); extern int acpi_lid_notifier_unregister(struct notifier_block *nb); extern int acpi_lid_open(void); +#else +static inline int acpi_lid_notifier_register(struct notifier_block *nb) +{ + return 0; +} +static inline int acpi_lid_notifier_unregister(struct notifier_block *nb) +{ + return 0; +} +static inline int acpi_lid_open(void) +{ + return 1; +} +#endif /* defined(CONFIG_ACPI_BUTTON) || defined(CONFIG_ACPI_BUTTON_MODULE) */ #endif /* ACPI_BUTTON_H */ -- cgit v1.2.3-70-g09d2 From c3422bea5f09b0e85704f51f2b01271630b8940b Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Sun, 13 Sep 2009 09:15:10 -0700 Subject: rcu: Simplify rcu_read_unlock_special() quiescent-state accounting The earlier approach required two scheduling-clock ticks to note an preemptable-RCU quiescent state in the situation in which the scheduling-clock interrupt is unlucky enough to always interrupt an RCU read-side critical section. With this change, the quiescent state is instead noted by the outermost rcu_read_unlock() immediately following the first scheduling-clock tick, or, alternatively, by the first subsequent context switch. Therefore, this change also speeds up grace periods. Suggested-by: Josh Triplett Signed-off-by: Paul E. McKenney Cc: laijs@cn.fujitsu.com Cc: dipankar@in.ibm.com Cc: akpm@linux-foundation.org Cc: mathieu.desnoyers@polymtl.ca Cc: dvhltc@us.ibm.com Cc: niv@us.ibm.com Cc: peterz@infradead.org Cc: rostedt@goodmis.org Cc: Valdis.Kletnieks@vt.edu LKML-Reference: <12528585111945-git-send-email-> Signed-off-by: Ingo Molnar --- include/linux/sched.h | 1 - kernel/rcutree.c | 15 ++++++-------- kernel/rcutree_plugin.h | 54 ++++++++++++++++++++++++------------------------- 3 files changed, 32 insertions(+), 38 deletions(-) (limited to 'include') diff --git a/include/linux/sched.h b/include/linux/sched.h index f3d74bd04d1..c62a9f84d61 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -1740,7 +1740,6 @@ extern cputime_t task_gtime(struct task_struct *p); #define RCU_READ_UNLOCK_BLOCKED (1 << 0) /* blocked while in RCU read-side. */ #define RCU_READ_UNLOCK_NEED_QS (1 << 1) /* RCU core needs CPU response. */ -#define RCU_READ_UNLOCK_GOT_QS (1 << 2) /* CPU has responded to RCU core. */ static inline void rcu_copy_process(struct task_struct *p) { diff --git a/kernel/rcutree.c b/kernel/rcutree.c index e9a4ae94647..6c99553e9f1 100644 --- a/kernel/rcutree.c +++ b/kernel/rcutree.c @@ -107,27 +107,23 @@ static void __cpuinit rcu_init_percpu_data(int cpu, struct rcu_state *rsp, */ void rcu_sched_qs(int cpu) { - unsigned long flags; struct rcu_data *rdp; - local_irq_save(flags); rdp = &per_cpu(rcu_sched_data, cpu); - rdp->passed_quiesc = 1; rdp->passed_quiesc_completed = rdp->completed; - rcu_preempt_qs(cpu); - local_irq_restore(flags); + barrier(); + rdp->passed_quiesc = 1; + rcu_preempt_note_context_switch(cpu); } void rcu_bh_qs(int cpu) { - unsigned long flags; struct rcu_data *rdp; - local_irq_save(flags); rdp = &per_cpu(rcu_bh_data, cpu); - rdp->passed_quiesc = 1; rdp->passed_quiesc_completed = rdp->completed; - local_irq_restore(flags); + barrier(); + rdp->passed_quiesc = 1; } #ifdef CONFIG_NO_HZ @@ -615,6 +611,7 @@ rcu_start_gp(struct rcu_state *rsp, unsigned long flags) /* Advance to a new grace period and initialize state. */ rsp->gpnum++; + WARN_ON_ONCE(rsp->signaled == RCU_GP_INIT); rsp->signaled = RCU_GP_INIT; /* Hold off force_quiescent_state. */ rsp->jiffies_force_qs = jiffies + RCU_JIFFIES_TILL_FORCE_QS; record_gp_stall_check_time(rsp); diff --git a/kernel/rcutree_plugin.h b/kernel/rcutree_plugin.h index b8e4b0384f0..c9616e48379 100644 --- a/kernel/rcutree_plugin.h +++ b/kernel/rcutree_plugin.h @@ -64,34 +64,42 @@ EXPORT_SYMBOL_GPL(rcu_batches_completed); * not in a quiescent state. There might be any number of tasks blocked * while in an RCU read-side critical section. */ -static void rcu_preempt_qs_record(int cpu) +static void rcu_preempt_qs(int cpu) { struct rcu_data *rdp = &per_cpu(rcu_preempt_data, cpu); - rdp->passed_quiesc = 1; rdp->passed_quiesc_completed = rdp->completed; + barrier(); + rdp->passed_quiesc = 1; } /* - * We have entered the scheduler or are between softirqs in ksoftirqd. - * If we are in an RCU read-side critical section, we need to reflect - * that in the state of the rcu_node structure corresponding to this CPU. - * Caller must disable hardirqs. + * We have entered the scheduler, and the current task might soon be + * context-switched away from. If this task is in an RCU read-side + * critical section, we will no longer be able to rely on the CPU to + * record that fact, so we enqueue the task on the appropriate entry + * of the blocked_tasks[] array. The task will dequeue itself when + * it exits the outermost enclosing RCU read-side critical section. + * Therefore, the current grace period cannot be permitted to complete + * until the blocked_tasks[] entry indexed by the low-order bit of + * rnp->gpnum empties. + * + * Caller must disable preemption. */ -static void rcu_preempt_qs(int cpu) +static void rcu_preempt_note_context_switch(int cpu) { struct task_struct *t = current; + unsigned long flags; int phase; struct rcu_data *rdp; struct rcu_node *rnp; if (t->rcu_read_lock_nesting && (t->rcu_read_unlock_special & RCU_READ_UNLOCK_BLOCKED) == 0) { - WARN_ON_ONCE(cpu != smp_processor_id()); /* Possibly blocking in an RCU read-side critical section. */ rdp = rcu_preempt_state.rda[cpu]; rnp = rdp->mynode; - spin_lock(&rnp->lock); + spin_lock_irqsave(&rnp->lock, flags); t->rcu_read_unlock_special |= RCU_READ_UNLOCK_BLOCKED; t->rcu_blocked_node = rnp; @@ -112,7 +120,7 @@ static void rcu_preempt_qs(int cpu) phase = !(rnp->qsmask & rdp->grpmask) ^ (rnp->gpnum & 0x1); list_add(&t->rcu_node_entry, &rnp->blocked_tasks[phase]); smp_mb(); /* Ensure later ctxt swtch seen after above. */ - spin_unlock(&rnp->lock); + spin_unlock_irqrestore(&rnp->lock, flags); } /* @@ -124,9 +132,8 @@ static void rcu_preempt_qs(int cpu) * grace period, then the fact that the task has been enqueued * means that we continue to block the current grace period. */ - rcu_preempt_qs_record(cpu); - t->rcu_read_unlock_special &= ~(RCU_READ_UNLOCK_NEED_QS | - RCU_READ_UNLOCK_GOT_QS); + rcu_preempt_qs(cpu); + t->rcu_read_unlock_special &= ~RCU_READ_UNLOCK_NEED_QS; } /* @@ -162,7 +169,7 @@ static void rcu_read_unlock_special(struct task_struct *t) special = t->rcu_read_unlock_special; if (special & RCU_READ_UNLOCK_NEED_QS) { t->rcu_read_unlock_special &= ~RCU_READ_UNLOCK_NEED_QS; - t->rcu_read_unlock_special |= RCU_READ_UNLOCK_GOT_QS; + rcu_preempt_qs(smp_processor_id()); } /* Hardware IRQ handlers cannot block. */ @@ -199,9 +206,7 @@ static void rcu_read_unlock_special(struct task_struct *t) */ if (!empty && rnp->qsmask == 0 && list_empty(&rnp->blocked_tasks[rnp->gpnum & 0x1])) { - t->rcu_read_unlock_special &= - ~(RCU_READ_UNLOCK_NEED_QS | - RCU_READ_UNLOCK_GOT_QS); + t->rcu_read_unlock_special &= ~RCU_READ_UNLOCK_NEED_QS; if (rnp->parent == NULL) { /* Only one rcu_node in the tree. */ cpu_quiet_msk_finish(&rcu_preempt_state, flags); @@ -352,19 +357,12 @@ static void rcu_preempt_check_callbacks(int cpu) struct task_struct *t = current; if (t->rcu_read_lock_nesting == 0) { - t->rcu_read_unlock_special &= - ~(RCU_READ_UNLOCK_NEED_QS | RCU_READ_UNLOCK_GOT_QS); - rcu_preempt_qs_record(cpu); + t->rcu_read_unlock_special &= ~RCU_READ_UNLOCK_NEED_QS; + rcu_preempt_qs(cpu); return; } if (per_cpu(rcu_preempt_data, cpu).qs_pending) { - if (t->rcu_read_unlock_special & RCU_READ_UNLOCK_GOT_QS) { - rcu_preempt_qs_record(cpu); - t->rcu_read_unlock_special &= ~RCU_READ_UNLOCK_GOT_QS; - } else if (!(t->rcu_read_unlock_special & - RCU_READ_UNLOCK_NEED_QS)) { - t->rcu_read_unlock_special |= RCU_READ_UNLOCK_NEED_QS; - } + t->rcu_read_unlock_special |= RCU_READ_UNLOCK_NEED_QS; } } @@ -451,7 +449,7 @@ EXPORT_SYMBOL_GPL(rcu_batches_completed); * Because preemptable RCU does not exist, we never have to check for * CPUs being in quiescent states. */ -static void rcu_preempt_qs(int cpu) +static void rcu_preempt_note_context_switch(int cpu) { } -- cgit v1.2.3-70-g09d2 From 16e3081191837a6a04733de5cd5d1d1b303140d4 Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Sun, 13 Sep 2009 09:15:11 -0700 Subject: rcu: Fix synchronize_rcu() for TREE_PREEMPT_RCU The redirection of synchronize_sched() to synchronize_rcu() was appropriate for TREE_RCU, but not for TREE_PREEMPT_RCU. Fix this by creating an underlying synchronize_sched(). TREE_RCU then redirects synchronize_rcu() to synchronize_sched(), while TREE_PREEMPT_RCU has its own version of synchronize_rcu(). Signed-off-by: Paul E. McKenney Cc: laijs@cn.fujitsu.com Cc: dipankar@in.ibm.com Cc: akpm@linux-foundation.org Cc: mathieu.desnoyers@polymtl.ca Cc: josh@joshtriplett.org Cc: dvhltc@us.ibm.com Cc: niv@us.ibm.com Cc: peterz@infradead.org Cc: rostedt@goodmis.org Cc: Valdis.Kletnieks@vt.edu LKML-Reference: <12528585111916-git-send-email-> Signed-off-by: Ingo Molnar --- include/linux/rcupdate.h | 23 +++++------------------ include/linux/rcutree.h | 4 ++-- kernel/rcupdate.c | 44 +++++++++++++++++++++++++++++++++++++++++++- 3 files changed, 50 insertions(+), 21 deletions(-) (limited to 'include') diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h index 95e0615f4d7..39dce83c486 100644 --- a/include/linux/rcupdate.h +++ b/include/linux/rcupdate.h @@ -52,8 +52,13 @@ struct rcu_head { }; /* Exported common interfaces */ +#ifdef CONFIG_TREE_PREEMPT_RCU extern void synchronize_rcu(void); +#else /* #ifdef CONFIG_TREE_PREEMPT_RCU */ +#define synchronize_rcu synchronize_sched +#endif /* #else #ifdef CONFIG_TREE_PREEMPT_RCU */ extern void synchronize_rcu_bh(void); +extern void synchronize_sched(void); extern void rcu_barrier(void); extern void rcu_barrier_bh(void); extern void rcu_barrier_sched(void); @@ -261,24 +266,6 @@ struct rcu_synchronize { extern void wakeme_after_rcu(struct rcu_head *head); -/** - * synchronize_sched - block until all CPUs have exited any non-preemptive - * kernel code sequences. - * - * This means that all preempt_disable code sequences, including NMI and - * hardware-interrupt handlers, in progress on entry will have completed - * before this primitive returns. However, this does not guarantee that - * softirq handlers will have completed, since in some kernels, these - * handlers can run in process context, and can block. - * - * This primitive provides the guarantees made by the (now removed) - * synchronize_kernel() API. In contrast, synchronize_rcu() only - * guarantees that rcu_read_lock() sections will have completed. - * In "classic RCU", these two guarantees happen to be one and - * the same, but can differ in realtime RCU implementations. - */ -#define synchronize_sched() __synchronize_sched() - /** * call_rcu - Queue an RCU callback for invocation after a grace period. * @head: structure to be used for queueing the RCU updates. diff --git a/include/linux/rcutree.h b/include/linux/rcutree.h index a8930771782..00d08c0cbcc 100644 --- a/include/linux/rcutree.h +++ b/include/linux/rcutree.h @@ -53,6 +53,8 @@ static inline void __rcu_read_unlock(void) preempt_enable(); } +#define __synchronize_sched() synchronize_rcu() + static inline void exit_rcu(void) { } @@ -68,8 +70,6 @@ static inline void __rcu_read_unlock_bh(void) local_bh_enable(); } -#define __synchronize_sched() synchronize_rcu() - extern void call_rcu_sched(struct rcu_head *head, void (*func)(struct rcu_head *rcu)); diff --git a/kernel/rcupdate.c b/kernel/rcupdate.c index bd5d5c8e514..28d2f24e787 100644 --- a/kernel/rcupdate.c +++ b/kernel/rcupdate.c @@ -74,6 +74,8 @@ void wakeme_after_rcu(struct rcu_head *head) complete(&rcu->completion); } +#ifdef CONFIG_TREE_PREEMPT_RCU + /** * synchronize_rcu - wait until a grace period has elapsed. * @@ -87,7 +89,7 @@ void synchronize_rcu(void) { struct rcu_synchronize rcu; - if (rcu_blocking_is_gp()) + if (!rcu_scheduler_active) return; init_completion(&rcu.completion); @@ -98,6 +100,46 @@ void synchronize_rcu(void) } EXPORT_SYMBOL_GPL(synchronize_rcu); +#endif /* #ifdef CONFIG_TREE_PREEMPT_RCU */ + +/** + * synchronize_sched - wait until an rcu-sched grace period has elapsed. + * + * Control will return to the caller some time after a full rcu-sched + * grace period has elapsed, in other words after all currently executing + * rcu-sched read-side critical sections have completed. These read-side + * critical sections are delimited by rcu_read_lock_sched() and + * rcu_read_unlock_sched(), and may be nested. Note that preempt_disable(), + * local_irq_disable(), and so on may be used in place of + * rcu_read_lock_sched(). + * + * This means that all preempt_disable code sequences, including NMI and + * hardware-interrupt handlers, in progress on entry will have completed + * before this primitive returns. However, this does not guarantee that + * softirq handlers will have completed, since in some kernels, these + * handlers can run in process context, and can block. + * + * This primitive provides the guarantees made by the (now removed) + * synchronize_kernel() API. In contrast, synchronize_rcu() only + * guarantees that rcu_read_lock() sections will have completed. + * In "classic RCU", these two guarantees happen to be one and + * the same, but can differ in realtime RCU implementations. + */ +void synchronize_sched(void) +{ + struct rcu_synchronize rcu; + + if (rcu_blocking_is_gp()) + return; + + init_completion(&rcu.completion); + /* Will wake me after RCU finished. */ + call_rcu_sched(&rcu.head, wakeme_after_rcu); + /* Wait for it. */ + wait_for_completion(&rcu.completion); +} +EXPORT_SYMBOL_GPL(synchronize_sched); + /** * synchronize_rcu_bh - wait until an rcu_bh grace period has elapsed. * -- cgit v1.2.3-70-g09d2 From e5e25cf47b0bdd1f7e9b8bb6368ee48e16de0c87 Mon Sep 17 00:00:00 2001 From: Frederic Weisbecker Date: Fri, 18 Sep 2009 00:54:43 +0200 Subject: tracing: Factorize the events profile accounting Factorize the events enabling accounting in a common tracing core helper. This reduces the size of the profile_enable() and profile_disable() callbacks for each trace events. Signed-off-by: Frederic Weisbecker Acked-by: Steven Rostedt Acked-by: Li Zefan Cc: Peter Zijlstra Cc: Jason Baron Cc: Masami Hiramatsu Cc: Ingo Molnar --- include/linux/ftrace_event.h | 4 ++-- include/linux/syscalls.h | 24 ++++++++---------------- include/trace/ftrace.h | 28 ++++++++-------------------- kernel/trace/trace_event_profile.c | 20 ++++++++++++++++++-- 4 files changed, 36 insertions(+), 40 deletions(-) (limited to 'include') diff --git a/include/linux/ftrace_event.h b/include/linux/ftrace_event.h index bd099ba82cc..bc103d7b1ca 100644 --- a/include/linux/ftrace_event.h +++ b/include/linux/ftrace_event.h @@ -130,8 +130,8 @@ struct ftrace_event_call { void *data; atomic_t profile_count; - int (*profile_enable)(struct ftrace_event_call *); - void (*profile_disable)(struct ftrace_event_call *); + int (*profile_enable)(void); + void (*profile_disable)(void); }; #define MAX_FILTER_PRED 32 diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h index a8e37821cc6..7d9803cbb20 100644 --- a/include/linux/syscalls.h +++ b/include/linux/syscalls.h @@ -100,33 +100,25 @@ struct perf_counter_attr; #ifdef CONFIG_EVENT_PROFILE #define TRACE_SYS_ENTER_PROFILE(sname) \ -static int prof_sysenter_enable_##sname(struct ftrace_event_call *event_call) \ +static int prof_sysenter_enable_##sname(void) \ { \ - int ret = 0; \ - if (!atomic_inc_return(&event_enter_##sname.profile_count)) \ - ret = reg_prof_syscall_enter("sys"#sname); \ - return ret; \ + return reg_prof_syscall_enter("sys"#sname); \ } \ \ -static void prof_sysenter_disable_##sname(struct ftrace_event_call *event_call)\ +static void prof_sysenter_disable_##sname(void) \ { \ - if (atomic_add_negative(-1, &event_enter_##sname.profile_count)) \ - unreg_prof_syscall_enter("sys"#sname); \ + unreg_prof_syscall_enter("sys"#sname); \ } #define TRACE_SYS_EXIT_PROFILE(sname) \ -static int prof_sysexit_enable_##sname(struct ftrace_event_call *event_call) \ +static int prof_sysexit_enable_##sname(void) \ { \ - int ret = 0; \ - if (!atomic_inc_return(&event_exit_##sname.profile_count)) \ - ret = reg_prof_syscall_exit("sys"#sname); \ - return ret; \ + return reg_prof_syscall_exit("sys"#sname); \ } \ \ -static void prof_sysexit_disable_##sname(struct ftrace_event_call *event_call) \ +static void prof_sysexit_disable_##sname(void) \ { \ - if (atomic_add_negative(-1, &event_exit_##sname.profile_count)) \ - unreg_prof_syscall_exit("sys"#sname); \ + unreg_prof_syscall_exit("sys"#sname); \ } #define TRACE_SYS_ENTER_PROFILE_INIT(sname) \ diff --git a/include/trace/ftrace.h b/include/trace/ftrace.h index 72a3b437b82..a822087857e 100644 --- a/include/trace/ftrace.h +++ b/include/trace/ftrace.h @@ -382,20 +382,14 @@ static inline int ftrace_get_offsets_##call( \ * * NOTE: The insertion profile callback (ftrace_profile_) is defined later * - * static int ftrace_profile_enable_(struct ftrace_event_call *event_call) + * static int ftrace_profile_enable_(void) * { - * int ret = 0; - * - * if (!atomic_inc_return(&event_call->profile_count)) - * ret = register_trace_(ftrace_profile_); - * - * return ret; + * return register_trace_(ftrace_profile_); * } * - * static void ftrace_profile_disable_(struct ftrace_event_call *event_call) + * static void ftrace_profile_disable_(void) * { - * if (atomic_add_negative(-1, &event->call->profile_count)) - * unregister_trace_(ftrace_profile_); + * unregister_trace_(ftrace_profile_); * } * */ @@ -405,20 +399,14 @@ static inline int ftrace_get_offsets_##call( \ \ static void ftrace_profile_##call(proto); \ \ -static int ftrace_profile_enable_##call(struct ftrace_event_call *event_call) \ +static int ftrace_profile_enable_##call(void) \ { \ - int ret = 0; \ - \ - if (!atomic_inc_return(&event_call->profile_count)) \ - ret = register_trace_##call(ftrace_profile_##call); \ - \ - return ret; \ + return register_trace_##call(ftrace_profile_##call); \ } \ \ -static void ftrace_profile_disable_##call(struct ftrace_event_call *event_call)\ +static void ftrace_profile_disable_##call(void) \ { \ - if (atomic_add_negative(-1, &event_call->profile_count)) \ - unregister_trace_##call(ftrace_profile_##call); \ + unregister_trace_##call(ftrace_profile_##call); \ } #include TRACE_INCLUDE(TRACE_INCLUDE_FILE) diff --git a/kernel/trace/trace_event_profile.c b/kernel/trace/trace_event_profile.c index 55a25c933d1..df4a74efd50 100644 --- a/kernel/trace/trace_event_profile.c +++ b/kernel/trace/trace_event_profile.c @@ -8,6 +8,14 @@ #include #include "trace.h" +static int ftrace_profile_enable_event(struct ftrace_event_call *event) +{ + if (atomic_inc_return(&event->profile_count)) + return 0; + + return event->profile_enable(); +} + int ftrace_profile_enable(int event_id) { struct ftrace_event_call *event; @@ -17,7 +25,7 @@ int ftrace_profile_enable(int event_id) list_for_each_entry(event, &ftrace_events, list) { if (event->id == event_id && event->profile_enable && try_module_get(event->mod)) { - ret = event->profile_enable(event); + ret = ftrace_profile_enable_event(event); break; } } @@ -26,6 +34,14 @@ int ftrace_profile_enable(int event_id) return ret; } +static void ftrace_profile_disable_event(struct ftrace_event_call *event) +{ + if (!atomic_add_negative(-1, &event->profile_count)) + return; + + event->profile_disable(); +} + void ftrace_profile_disable(int event_id) { struct ftrace_event_call *event; @@ -33,7 +49,7 @@ void ftrace_profile_disable(int event_id) mutex_lock(&event_mutex); list_for_each_entry(event, &ftrace_events, list) { if (event->id == event_id) { - event->profile_disable(event); + ftrace_profile_disable_event(event); module_put(event->mod); break; } -- cgit v1.2.3-70-g09d2 From 20ab4425a77a1f34028cc6ce57053c22c184ba5f Mon Sep 17 00:00:00 2001 From: Frederic Weisbecker Date: Fri, 18 Sep 2009 06:10:28 +0200 Subject: tracing: Allocate the ftrace event profile buffer dynamically Currently the trace event profile buffer is allocated in the stack. But this may be too much for the stack, as the events can have large statically defined field size and can also grow with dynamic arrays. Allocate two per cpu buffer for all profiled events. The first cpu buffer is used to host every non-nmi context traces. It is protected by disabling the interrupts while writing and committing the trace. The second buffer is reserved for nmi. So that there is no race between them and the first buffer. The whole write/commit section is rcu protected because we release these buffers while deactivating the last profiling trace event. v2: Move the buffers from trace_event to be global, as pointed by Steven Rostedt. v3: Fix the syscall events to handle the profiling buffer races by disabling interrupts, now that the buffers are globals. Suggested-by: Steven Rostedt Signed-off-by: Frederic Weisbecker Cc: Steven Rostedt Cc: Peter Zijlstra Cc: Li Zefan Cc: Jason Baron Cc: Masami Hiramatsu Cc: Ingo Molnar --- include/linux/ftrace_event.h | 6 +++ include/trace/ftrace.h | 83 +++++++++++++++++++++----------- kernel/trace/trace_event_profile.c | 61 +++++++++++++++++++++++- kernel/trace/trace_syscalls.c | 97 ++++++++++++++++++++++++++++++-------- 4 files changed, 199 insertions(+), 48 deletions(-) (limited to 'include') diff --git a/include/linux/ftrace_event.h b/include/linux/ftrace_event.h index bc103d7b1ca..4ec5e67e18c 100644 --- a/include/linux/ftrace_event.h +++ b/include/linux/ftrace_event.h @@ -4,6 +4,7 @@ #include #include #include +#include struct trace_array; struct tracer; @@ -134,6 +135,11 @@ struct ftrace_event_call { void (*profile_disable)(void); }; +#define FTRACE_MAX_PROFILE_SIZE 2048 + +extern char *trace_profile_buf; +extern char *trace_profile_buf_nmi; + #define MAX_FILTER_PRED 32 #define MAX_FILTER_STR_VAL 256 /* Should handle KSYM_SYMBOL_LEN */ diff --git a/include/trace/ftrace.h b/include/trace/ftrace.h index a822087857e..a0361cb6976 100644 --- a/include/trace/ftrace.h +++ b/include/trace/ftrace.h @@ -648,11 +648,12 @@ __attribute__((section("_ftrace_events"))) event_##call = { \ * struct ftrace_raw_##call *entry; * u64 __addr = 0, __count = 1; * unsigned long irq_flags; + * struct trace_entry *ent; * int __entry_size; * int __data_size; + * int __cpu * int pc; * - * local_save_flags(irq_flags); * pc = preempt_count(); * * __data_size = ftrace_get_offsets_(&__data_offsets, args); @@ -663,25 +664,34 @@ __attribute__((section("_ftrace_events"))) event_##call = { \ * sizeof(u64)); * __entry_size -= sizeof(u32); * - * do { - * char raw_data[__entry_size]; <- allocate our sample in the stack - * struct trace_entry *ent; + * // Protect the non nmi buffer + * // This also protects the rcu read side + * local_irq_save(irq_flags); + * __cpu = smp_processor_id(); + * + * if (in_nmi()) + * raw_data = rcu_dereference(trace_profile_buf_nmi); + * else + * raw_data = rcu_dereference(trace_profile_buf); + * + * if (!raw_data) + * goto end; * - * zero dead bytes from alignment to avoid stack leak to userspace: + * raw_data = per_cpu_ptr(raw_data, __cpu); * - * *(u64 *)(&raw_data[__entry_size - sizeof(u64)]) = 0ULL; - * entry = (struct ftrace_raw_ *)raw_data; - * ent = &entry->ent; - * tracing_generic_entry_update(ent, irq_flags, pc); - * ent->type = event_call->id; + * //zero dead bytes from alignment to avoid stack leak to userspace: + * *(u64 *)(&raw_data[__entry_size - sizeof(u64)]) = 0ULL; + * entry = (struct ftrace_raw_ *)raw_data; + * ent = &entry->ent; + * tracing_generic_entry_update(ent, irq_flags, pc); + * ent->type = event_call->id; * - * <- do some jobs with dynamic arrays + * <- do some jobs with dynamic arrays * - * <- affect our values + * <- affect our values * - * perf_tpcounter_event(event_call->id, __addr, __count, entry, - * __entry_size); <- submit them to perf counter - * } while (0); + * perf_tpcounter_event(event_call->id, __addr, __count, entry, + * __entry_size); <- submit them to perf counter * * } */ @@ -704,11 +714,13 @@ static void ftrace_profile_##call(proto) \ struct ftrace_raw_##call *entry; \ u64 __addr = 0, __count = 1; \ unsigned long irq_flags; \ + struct trace_entry *ent; \ int __entry_size; \ int __data_size; \ + char *raw_data; \ + int __cpu; \ int pc; \ \ - local_save_flags(irq_flags); \ pc = preempt_count(); \ \ __data_size = ftrace_get_offsets_##call(&__data_offsets, args); \ @@ -716,23 +728,38 @@ static void ftrace_profile_##call(proto) \ sizeof(u64)); \ __entry_size -= sizeof(u32); \ \ - do { \ - char raw_data[__entry_size]; \ - struct trace_entry *ent; \ + if (WARN_ONCE(__entry_size > FTRACE_MAX_PROFILE_SIZE, \ + "profile buffer not large enough")) \ + return; \ + \ + local_irq_save(irq_flags); \ + __cpu = smp_processor_id(); \ \ - *(u64 *)(&raw_data[__entry_size - sizeof(u64)]) = 0ULL; \ - entry = (struct ftrace_raw_##call *)raw_data; \ - ent = &entry->ent; \ - tracing_generic_entry_update(ent, irq_flags, pc); \ - ent->type = event_call->id; \ + if (in_nmi()) \ + raw_data = rcu_dereference(trace_profile_buf_nmi); \ + else \ + raw_data = rcu_dereference(trace_profile_buf); \ \ - tstruct \ + if (!raw_data) \ + goto end; \ \ - { assign; } \ + raw_data = per_cpu_ptr(raw_data, __cpu); \ \ - perf_tpcounter_event(event_call->id, __addr, __count, entry,\ + *(u64 *)(&raw_data[__entry_size - sizeof(u64)]) = 0ULL; \ + entry = (struct ftrace_raw_##call *)raw_data; \ + ent = &entry->ent; \ + tracing_generic_entry_update(ent, irq_flags, pc); \ + ent->type = event_call->id; \ + \ + tstruct \ + \ + { assign; } \ + \ + perf_tpcounter_event(event_call->id, __addr, __count, entry, \ __entry_size); \ - } while (0); \ + \ +end: \ + local_irq_restore(irq_flags); \ \ } diff --git a/kernel/trace/trace_event_profile.c b/kernel/trace/trace_event_profile.c index df4a74efd50..3aaa77c3309 100644 --- a/kernel/trace/trace_event_profile.c +++ b/kernel/trace/trace_event_profile.c @@ -8,12 +8,52 @@ #include #include "trace.h" +/* + * We can't use a size but a type in alloc_percpu() + * So let's create a dummy type that matches the desired size + */ +typedef struct {char buf[FTRACE_MAX_PROFILE_SIZE];} profile_buf_t; + +char *trace_profile_buf; +char *trace_profile_buf_nmi; + +/* Count the events in use (per event id, not per instance) */ +static int total_profile_count; + static int ftrace_profile_enable_event(struct ftrace_event_call *event) { + char *buf; + int ret = -ENOMEM; + if (atomic_inc_return(&event->profile_count)) return 0; - return event->profile_enable(); + if (!total_profile_count++) { + buf = (char *)alloc_percpu(profile_buf_t); + if (!buf) + goto fail_buf; + + rcu_assign_pointer(trace_profile_buf, buf); + + buf = (char *)alloc_percpu(profile_buf_t); + if (!buf) + goto fail_buf_nmi; + + rcu_assign_pointer(trace_profile_buf_nmi, buf); + } + + ret = event->profile_enable(); + if (!ret) + return 0; + + kfree(trace_profile_buf_nmi); +fail_buf_nmi: + kfree(trace_profile_buf); +fail_buf: + total_profile_count--; + atomic_dec(&event->profile_count); + + return ret; } int ftrace_profile_enable(int event_id) @@ -36,10 +76,29 @@ int ftrace_profile_enable(int event_id) static void ftrace_profile_disable_event(struct ftrace_event_call *event) { + char *buf, *nmi_buf; + if (!atomic_add_negative(-1, &event->profile_count)) return; event->profile_disable(); + + if (!--total_profile_count) { + buf = trace_profile_buf; + rcu_assign_pointer(trace_profile_buf, NULL); + + nmi_buf = trace_profile_buf_nmi; + rcu_assign_pointer(trace_profile_buf_nmi, NULL); + + /* + * Ensure every events in profiling have finished before + * releasing the buffers + */ + synchronize_sched(); + + free_percpu(buf); + free_percpu(nmi_buf); + } } void ftrace_profile_disable(int event_id) diff --git a/kernel/trace/trace_syscalls.c b/kernel/trace/trace_syscalls.c index 8712ce3c6a0..7a3550cf259 100644 --- a/kernel/trace/trace_syscalls.c +++ b/kernel/trace/trace_syscalls.c @@ -384,10 +384,13 @@ static int sys_prof_refcount_exit; static void prof_syscall_enter(struct pt_regs *regs, long id) { - struct syscall_trace_enter *rec; struct syscall_metadata *sys_data; + struct syscall_trace_enter *rec; + unsigned long flags; + char *raw_data; int syscall_nr; int size; + int cpu; syscall_nr = syscall_get_nr(current, regs); if (!test_bit(syscall_nr, enabled_prof_enter_syscalls)) @@ -402,20 +405,38 @@ static void prof_syscall_enter(struct pt_regs *regs, long id) size = ALIGN(size + sizeof(u32), sizeof(u64)); size -= sizeof(u32); - do { - char raw_data[size]; + if (WARN_ONCE(size > FTRACE_MAX_PROFILE_SIZE, + "profile buffer not large enough")) + return; + + /* Protect the per cpu buffer, begin the rcu read side */ + local_irq_save(flags); - /* zero the dead bytes from align to not leak stack to user */ - *(u64 *)(&raw_data[size - sizeof(u64)]) = 0ULL; + cpu = smp_processor_id(); + + if (in_nmi()) + raw_data = rcu_dereference(trace_profile_buf_nmi); + else + raw_data = rcu_dereference(trace_profile_buf); + + if (!raw_data) + goto end; - rec = (struct syscall_trace_enter *) raw_data; - tracing_generic_entry_update(&rec->ent, 0, 0); - rec->ent.type = sys_data->enter_id; - rec->nr = syscall_nr; - syscall_get_arguments(current, regs, 0, sys_data->nb_args, - (unsigned long *)&rec->args); - perf_tpcounter_event(sys_data->enter_id, 0, 1, rec, size); - } while(0); + raw_data = per_cpu_ptr(raw_data, cpu); + + /* zero the dead bytes from align to not leak stack to user */ + *(u64 *)(&raw_data[size - sizeof(u64)]) = 0ULL; + + rec = (struct syscall_trace_enter *) raw_data; + tracing_generic_entry_update(&rec->ent, 0, 0); + rec->ent.type = sys_data->enter_id; + rec->nr = syscall_nr; + syscall_get_arguments(current, regs, 0, sys_data->nb_args, + (unsigned long *)&rec->args); + perf_tpcounter_event(sys_data->enter_id, 0, 1, rec, size); + +end: + local_irq_restore(flags); } int reg_prof_syscall_enter(char *name) @@ -460,8 +481,12 @@ void unreg_prof_syscall_enter(char *name) static void prof_syscall_exit(struct pt_regs *regs, long ret) { struct syscall_metadata *sys_data; - struct syscall_trace_exit rec; + struct syscall_trace_exit *rec; + unsigned long flags; int syscall_nr; + char *raw_data; + int size; + int cpu; syscall_nr = syscall_get_nr(current, regs); if (!test_bit(syscall_nr, enabled_prof_exit_syscalls)) @@ -471,12 +496,46 @@ static void prof_syscall_exit(struct pt_regs *regs, long ret) if (!sys_data) return; - tracing_generic_entry_update(&rec.ent, 0, 0); - rec.ent.type = sys_data->exit_id; - rec.nr = syscall_nr; - rec.ret = syscall_get_return_value(current, regs); + /* We can probably do that at build time */ + size = ALIGN(sizeof(*rec) + sizeof(u32), sizeof(u64)); + size -= sizeof(u32); - perf_tpcounter_event(sys_data->exit_id, 0, 1, &rec, sizeof(rec)); + /* + * Impossible, but be paranoid with the future + * How to put this check outside runtime? + */ + if (WARN_ONCE(size > FTRACE_MAX_PROFILE_SIZE, + "exit event has grown above profile buffer size")) + return; + + /* Protect the per cpu buffer, begin the rcu read side */ + local_irq_save(flags); + cpu = smp_processor_id(); + + if (in_nmi()) + raw_data = rcu_dereference(trace_profile_buf_nmi); + else + raw_data = rcu_dereference(trace_profile_buf); + + if (!raw_data) + goto end; + + raw_data = per_cpu_ptr(raw_data, cpu); + + /* zero the dead bytes from align to not leak stack to user */ + *(u64 *)(&raw_data[size - sizeof(u64)]) = 0ULL; + + rec = (struct syscall_trace_exit *)raw_data; + + tracing_generic_entry_update(&rec->ent, 0, 0); + rec->ent.type = sys_data->exit_id; + rec->nr = syscall_nr; + rec->ret = syscall_get_return_value(current, regs); + + perf_tpcounter_event(sys_data->exit_id, 0, 1, rec, size); + +end: + local_irq_restore(flags); } int reg_prof_syscall_exit(char *name) -- cgit v1.2.3-70-g09d2 From 733289c2656c556d5cf36eafa1c8ec77222c359f Mon Sep 17 00:00:00 2001 From: Jerome Glisse Date: Wed, 16 Sep 2009 15:24:21 +0200 Subject: drm/radeon/kms: don't fail if we fail to init GPU acceleration Userspace can query if acceleration is working or not true get info ioctl and could fallback to software if for some reason kernel failed to initialize KMS. This should allow to give a working KMS setup in all case (even with non functionning accel). Signed-off-by: Jerome Glisse Signed-off-by: Dave Airlie --- drivers/gpu/drm/radeon/r420.c | 2 ++ drivers/gpu/drm/radeon/r600.c | 33 +++++++++++++------------ drivers/gpu/drm/radeon/radeon.h | 1 + drivers/gpu/drm/radeon/radeon_device.c | 44 ++++++++++++++-------------------- drivers/gpu/drm/radeon/radeon_kms.c | 3 +++ drivers/gpu/drm/radeon/rv770.c | 33 +++++++++++++------------ include/drm/radeon_drm.h | 1 + 7 files changed, 61 insertions(+), 56 deletions(-) (limited to 'include') diff --git a/drivers/gpu/drm/radeon/r420.c b/drivers/gpu/drm/radeon/r420.c index 33a25a4377b..2142a478197 100644 --- a/drivers/gpu/drm/radeon/r420.c +++ b/drivers/gpu/drm/radeon/r420.c @@ -329,6 +329,7 @@ int r420_init(struct radeon_device *rdev) return r; } r300_set_reg_safe(rdev); + rdev->accel_working = true; r = r420_resume(rdev); if (r) { /* Somethings want wront with the accel init stop accel */ @@ -343,6 +344,7 @@ int r420_init(struct radeon_device *rdev) r100_pci_gart_fini(rdev); radeon_agp_fini(rdev); radeon_irq_kms_fini(rdev); + rdev->accel_working = false; } return 0; } diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c index 65699e9f202..af430d719e7 100644 --- a/drivers/gpu/drm/radeon/r600.c +++ b/drivers/gpu/drm/radeon/r600.c @@ -1573,6 +1573,7 @@ int r600_init(struct radeon_device *rdev) if (r) return r; + rdev->accel_working = true; r = r600_resume(rdev); if (r) { if (rdev->flags & RADEON_IS_AGP) { @@ -1581,22 +1582,24 @@ int r600_init(struct radeon_device *rdev) rdev->flags &= ~RADEON_IS_AGP; return r600_init(rdev); } - return r; - } - r = radeon_ib_pool_init(rdev); - if (r) { - DRM_ERROR("radeon: failled initializing IB pool (%d).\n", r); - return r; + rdev->accel_working = false; } - r = r600_blit_init(rdev); - if (r) { - DRM_ERROR("radeon: failled blitter (%d).\n", r); - return r; - } - r = radeon_ib_test(rdev); - if (r) { - DRM_ERROR("radeon: failled testing IB (%d).\n", r); - return r; + if (rdev->accel_working) { + r = radeon_ib_pool_init(rdev); + if (r) { + DRM_ERROR("radeon: failled initializing IB pool (%d).\n", r); + rdev->accel_working = false; + } + r = r600_blit_init(rdev); + if (r) { + DRM_ERROR("radeon: failled blitter (%d).\n", r); + rdev->accel_working = false; + } + r = radeon_ib_test(rdev); + if (r) { + DRM_ERROR("radeon: failled testing IB (%d).\n", r); + rdev->accel_working = false; + } } return 0; } diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h index 5bfc05612ac..d7c4efd0892 100644 --- a/drivers/gpu/drm/radeon/radeon.h +++ b/drivers/gpu/drm/radeon/radeon.h @@ -793,6 +793,7 @@ struct radeon_device { bool suspend; bool need_dma32; bool new_init_path; + bool accel_working; struct radeon_surface_reg surface_regs[RADEON_GEM_MAX_SURFACES]; const struct firmware *me_fw; /* all family ME firmware */ const struct firmware *pfp_fw; /* r6/700 PFP firmware */ diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c index db5ae73d628..0b5014c2ae7 100644 --- a/drivers/gpu/drm/radeon/radeon_device.c +++ b/drivers/gpu/drm/radeon/radeon_device.c @@ -504,6 +504,7 @@ int radeon_device_init(struct radeon_device *rdev, rdev->usec_timeout = RADEON_MAX_USEC_TIMEOUT; rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024; rdev->gpu_lockup = false; + rdev->accel_working = false; /* mutex initialization are all done here so we * can recall function without having locking issues */ mutex_init(&rdev->cs_mutex); @@ -649,35 +650,26 @@ int radeon_device_init(struct radeon_device *rdev, /* Initialize GART (initialize after TTM so we can allocate * memory through TTM but finalize after TTM) */ r = radeon_gart_enable(rdev); - if (!r) { + if (r) + return 0; r = radeon_gem_init(rdev); - } + if (r) + return 0; /* 1M ring buffer */ - if (!r) { - r = radeon_cp_init(rdev, 1024 * 1024); - } - if (!r) { - r = radeon_wb_init(rdev); - if (r) { - DRM_ERROR("radeon: failled initializing WB (%d).\n", r); - return r; - } - } - if (!r) { - r = radeon_ib_pool_init(rdev); - if (r) { - DRM_ERROR("radeon: failled initializing IB pool (%d).\n", r); - return r; - } - } - if (!r) { - r = radeon_ib_test(rdev); - if (r) { - DRM_ERROR("radeon: failled testing IB (%d).\n", r); - return r; - } - } + r = radeon_cp_init(rdev, 1024 * 1024); + if (r) + return 0; + r = radeon_wb_init(rdev); + if (r) + DRM_ERROR("radeon: failled initializing WB (%d).\n", r); + r = radeon_ib_pool_init(rdev); + if (r) + return 0; + r = radeon_ib_test(rdev); + if (r) + return 0; + rdev->accel_working = true; } DRM_INFO("radeon: kernel modesetting successfully initialized.\n"); if (radeon_testing) { diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c index ac8505fe2ca..709bd892b3a 100644 --- a/drivers/gpu/drm/radeon/radeon_kms.c +++ b/drivers/gpu/drm/radeon/radeon_kms.c @@ -112,6 +112,9 @@ int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) case RADEON_INFO_NUM_Z_PIPES: value = rdev->num_z_pipes; break; + case RADEON_INFO_ACCEL_WORKING: + value = rdev->accel_working; + break; default: DRM_DEBUG("Invalid request %d\n", info->request); return -EINVAL; diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c index 4f2098bc797..be2f86539eb 100644 --- a/drivers/gpu/drm/radeon/rv770.c +++ b/drivers/gpu/drm/radeon/rv770.c @@ -953,6 +953,7 @@ int rv770_init(struct radeon_device *rdev) if (r) return r; + rdev->accel_working = true; r = rv770_resume(rdev); if (r) { if (rdev->flags & RADEON_IS_AGP) { @@ -961,22 +962,24 @@ int rv770_init(struct radeon_device *rdev) rdev->flags &= ~RADEON_IS_AGP; return rv770_init(rdev); } - return r; - } - r = r600_blit_init(rdev); - if (r) { - DRM_ERROR("radeon: failled blitter (%d).\n", r); - return r; + rdev->accel_working = false; } - r = radeon_ib_pool_init(rdev); - if (r) { - DRM_ERROR("radeon: failled initializing IB pool (%d).\n", r); - return r; - } - r = radeon_ib_test(rdev); - if (r) { - DRM_ERROR("radeon: failled testing IB (%d).\n", r); - return r; + if (rdev->accel_working) { + r = r600_blit_init(rdev); + if (r) { + DRM_ERROR("radeon: failled blitter (%d).\n", r); + rdev->accel_working = false; + } + r = radeon_ib_pool_init(rdev); + if (r) { + DRM_ERROR("radeon: failled initializing IB pool (%d).\n", r); + rdev->accel_working = false; + } + r = radeon_ib_test(rdev); + if (r) { + DRM_ERROR("radeon: failled testing IB (%d).\n", r); + rdev->accel_working = false; + } } return 0; } diff --git a/include/drm/radeon_drm.h b/include/drm/radeon_drm.h index 2ba61e18fc8..b67bbd75bd2 100644 --- a/include/drm/radeon_drm.h +++ b/include/drm/radeon_drm.h @@ -899,6 +899,7 @@ struct drm_radeon_cs { #define RADEON_INFO_DEVICE_ID 0x00 #define RADEON_INFO_NUM_GB_PIPES 0x01 #define RADEON_INFO_NUM_Z_PIPES 0x02 +#define RADEON_INFO_ACCEL_WORKING 0x03 struct drm_radeon_info { uint32_t request; -- cgit v1.2.3-70-g09d2 From c88f9f0c91de55efaece6d9bd9ec920b90244776 Mon Sep 17 00:00:00 2001 From: Michel Dänzer Date: Tue, 15 Sep 2009 17:09:30 +0200 Subject: drm/radeon/kms: Use surfaces for scanout / cursor byte swapping on big endian. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Michel Dänzer Signed-off-by: Dave Airlie --- drivers/gpu/drm/radeon/r100.c | 5 ++ drivers/gpu/drm/radeon/radeon_fb.c | 121 +++++---------------------------- drivers/gpu/drm/radeon/radeon_object.c | 2 + include/drm/radeon_drm.h | 11 +-- 4 files changed, 31 insertions(+), 108 deletions(-) (limited to 'include') diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c index fa0fdc1e345..737970b43ae 100644 --- a/drivers/gpu/drm/radeon/r100.c +++ b/drivers/gpu/drm/radeon/r100.c @@ -2235,6 +2235,11 @@ int r100_set_surface_reg(struct radeon_device *rdev, int reg, flags |= R300_SURF_TILE_MICRO; } + if (tiling_flags & RADEON_TILING_SWAP_16BIT) + flags |= RADEON_SURF_AP0_SWP_16BPP | RADEON_SURF_AP1_SWP_16BPP; + if (tiling_flags & RADEON_TILING_SWAP_32BIT) + flags |= RADEON_SURF_AP0_SWP_32BPP | RADEON_SURF_AP1_SWP_32BPP; + DRM_DEBUG("writing surface %d %d %x %x\n", reg, flags, offset, offset+obj_size-1); WREG32(RADEON_SURFACE0_INFO + surf_index, flags); WREG32(RADEON_SURFACE0_LOWER_BOUND + surf_index, offset); diff --git a/drivers/gpu/drm/radeon/radeon_fb.c b/drivers/gpu/drm/radeon/radeon_fb.c index 19e244a512b..944e4fa78db 100644 --- a/drivers/gpu/drm/radeon/radeon_fb.c +++ b/drivers/gpu/drm/radeon/radeon_fb.c @@ -45,71 +45,9 @@ struct radeon_fb_device { struct radeon_device *rdev; }; -static int radeon_fb_check_var(struct fb_var_screeninfo *var, - struct fb_info *info) -{ - int ret; - ret = drm_fb_helper_check_var(var, info); - if (ret) - return ret; - - /* big endian override for radeon endian workaround */ -#ifdef __BIG_ENDIAN - { - int depth; - switch (var->bits_per_pixel) { - case 16: - depth = (var->green.length == 6) ? 16 : 15; - break; - case 32: - depth = (var->transp.length > 0) ? 32 : 24; - break; - default: - depth = var->bits_per_pixel; - break; - } - switch (depth) { - case 8: - var->red.offset = 0; - var->green.offset = 0; - var->blue.offset = 0; - var->red.length = 8; - var->green.length = 8; - var->blue.length = 8; - var->transp.length = 0; - var->transp.offset = 0; - break; - case 24: - var->red.offset = 8; - var->green.offset = 16; - var->blue.offset = 24; - var->red.length = 8; - var->green.length = 8; - var->blue.length = 8; - var->transp.length = 0; - var->transp.offset = 0; - break; - case 32: - var->red.offset = 8; - var->green.offset = 16; - var->blue.offset = 24; - var->red.length = 8; - var->green.length = 8; - var->blue.length = 8; - var->transp.length = 8; - var->transp.offset = 0; - break; - default: - return -EINVAL; - } - } -#endif - return 0; -} - static struct fb_ops radeonfb_ops = { .owner = THIS_MODULE, - .fb_check_var = radeon_fb_check_var, + .fb_check_var = drm_fb_helper_check_var, .fb_set_par = drm_fb_helper_set_par, .fb_setcolreg = drm_fb_helper_setcolreg, .fb_fillrect = cfb_fillrect, @@ -206,6 +144,7 @@ int radeonfb_create(struct drm_device *dev, void *fbptr = NULL; unsigned long tmp; bool fb_tiled = false; /* useful for testing */ + u32 tiling_flags = 0; mode_cmd.width = surface_width; mode_cmd.height = surface_height; @@ -230,7 +169,22 @@ int radeonfb_create(struct drm_device *dev, robj = gobj->driver_private; if (fb_tiled) - radeon_object_set_tiling_flags(robj, RADEON_TILING_MACRO|RADEON_TILING_SURFACE, mode_cmd.pitch); + tiling_flags = RADEON_TILING_MACRO; + +#ifdef __BIG_ENDIAN + switch (mode_cmd.bpp) { + case 32: + tiling_flags |= RADEON_TILING_SWAP_32BIT; + break; + case 16: + tiling_flags |= RADEON_TILING_SWAP_16BIT; + default: + break; + } +#endif + + if (tiling_flags) + radeon_object_set_tiling_flags(robj, tiling_flags | RADEON_TILING_SURFACE, mode_cmd.pitch); mutex_lock(&rdev->ddev->struct_mutex); fb = radeon_framebuffer_create(rdev->ddev, &mode_cmd, gobj); if (fb == NULL) { @@ -313,45 +267,6 @@ int radeonfb_create(struct drm_device *dev, DRM_INFO("fb depth is %d\n", fb->depth); DRM_INFO(" pitch is %d\n", fb->pitch); -#ifdef __BIG_ENDIAN - /* fill var sets defaults for this stuff - override - on big endian */ - switch (fb->depth) { - case 8: - info->var.red.offset = 0; - info->var.green.offset = 0; - info->var.blue.offset = 0; - info->var.red.length = 8; /* 8bit DAC */ - info->var.green.length = 8; - info->var.blue.length = 8; - info->var.transp.offset = 0; - info->var.transp.length = 0; - break; - case 24: - info->var.red.offset = 8; - info->var.green.offset = 16; - info->var.blue.offset = 24; - info->var.red.length = 8; - info->var.green.length = 8; - info->var.blue.length = 8; - info->var.transp.offset = 0; - info->var.transp.length = 0; - break; - case 32: - info->var.red.offset = 8; - info->var.green.offset = 16; - info->var.blue.offset = 24; - info->var.red.length = 8; - info->var.green.length = 8; - info->var.blue.length = 8; - info->var.transp.offset = 0; - info->var.transp.length = 8; - break; - default: - break; - } -#endif - fb->fbdev = info; rfbdev->rfb = rfb; rfbdev->rdev = rdev; diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c index 1500d5bc7af..73af463b7a5 100644 --- a/drivers/gpu/drm/radeon/radeon_object.c +++ b/drivers/gpu/drm/radeon/radeon_object.c @@ -188,6 +188,7 @@ int radeon_object_kmap(struct radeon_object *robj, void **ptr) if (ptr) { *ptr = robj->kptr; } + radeon_object_check_tiling(robj, 0, 0); return 0; } @@ -200,6 +201,7 @@ void radeon_object_kunmap(struct radeon_object *robj) } robj->kptr = NULL; spin_unlock(&robj->tobj.lock); + radeon_object_check_tiling(robj, 0, 0); ttm_bo_kunmap(&robj->kmap); } diff --git a/include/drm/radeon_drm.h b/include/drm/radeon_drm.h index b67bbd75bd2..3b9932ab175 100644 --- a/include/drm/radeon_drm.h +++ b/include/drm/radeon_drm.h @@ -802,11 +802,12 @@ struct drm_radeon_gem_create { uint32_t flags; }; -#define RADEON_TILING_MACRO 0x1 -#define RADEON_TILING_MICRO 0x2 -#define RADEON_TILING_SWAP 0x4 -#define RADEON_TILING_SURFACE 0x8 /* this object requires a surface - * when mapped - i.e. front buffer */ +#define RADEON_TILING_MACRO 0x1 +#define RADEON_TILING_MICRO 0x2 +#define RADEON_TILING_SWAP_16BIT 0x4 +#define RADEON_TILING_SWAP_32BIT 0x8 +#define RADEON_TILING_SURFACE 0x10 /* this object requires a surface + * when mapped - i.e. front buffer */ struct drm_radeon_gem_set_tiling { uint32_t handle; -- cgit v1.2.3-70-g09d2 From 181d683d752c432635eda0f182ee71548c1f1820 Mon Sep 17 00:00:00 2001 From: Dmitry Torokhov Date: Wed, 16 Sep 2009 01:06:43 -0700 Subject: Input: libps2 - additional locking for i8042 ports The serio ports on i8042 are not completely isolated; while we provide enough locking to ensure proper serialization when accessing control and data registers AUX and KBD ports can still have an effect on each other on PS/2 protocol level. The most prominent effect is that issuing a command for the device connected to one port may cause abort of the command currently executing by the device connected to another port. Since i8042 nor serio subsystem are not aware of the details of the PS/2 protocol (length of the commands and their replies and so on) the locking should be done on libps2 level by adding special handling when we see that we are dealing with serio port on i8042. Signed-off-by: Dmitry Torokhov --- drivers/input/mouse/sentelic.c | 18 ++++++++++-------- drivers/input/serio/i8042.c | 41 +++++++++++++++++++++++++++++++++++++++++ drivers/input/serio/libps2.c | 28 ++++++++++++++++++++++++---- drivers/leds/leds-clevo-mail.c | 8 ++++++++ drivers/platform/x86/acer-wmi.c | 2 ++ include/linux/i8042.h | 30 ++++++++++++++++++++++++++++++ include/linux/libps2.h | 2 ++ 7 files changed, 117 insertions(+), 12 deletions(-) (limited to 'include') diff --git a/drivers/input/mouse/sentelic.c b/drivers/input/mouse/sentelic.c index 84e2fc04d11..f84cbd97c88 100644 --- a/drivers/input/mouse/sentelic.c +++ b/drivers/input/mouse/sentelic.c @@ -92,7 +92,8 @@ static int fsp_reg_read(struct psmouse *psmouse, int reg_addr, int *reg_val) */ ps2_command(ps2dev, NULL, PSMOUSE_CMD_DISABLE); psmouse_set_state(psmouse, PSMOUSE_CMD_MODE); - mutex_lock(&ps2dev->cmd_mutex); + + ps2_begin_command(ps2dev); if (ps2_sendbyte(ps2dev, 0xf3, FSP_CMD_TIMEOUT) < 0) goto out; @@ -126,7 +127,7 @@ static int fsp_reg_read(struct psmouse *psmouse, int reg_addr, int *reg_val) rc = 0; out: - mutex_unlock(&ps2dev->cmd_mutex); + ps2_end_command(ps2dev); ps2_command(ps2dev, NULL, PSMOUSE_CMD_ENABLE); psmouse_set_state(psmouse, PSMOUSE_ACTIVATED); dev_dbg(&ps2dev->serio->dev, "READ REG: 0x%02x is 0x%02x (rc = %d)\n", @@ -140,7 +141,7 @@ static int fsp_reg_write(struct psmouse *psmouse, int reg_addr, int reg_val) unsigned char v; int rc = -1; - mutex_lock(&ps2dev->cmd_mutex); + ps2_begin_command(ps2dev); if (ps2_sendbyte(ps2dev, 0xf3, FSP_CMD_TIMEOUT) < 0) goto out; @@ -179,7 +180,7 @@ static int fsp_reg_write(struct psmouse *psmouse, int reg_addr, int reg_val) rc = 0; out: - mutex_unlock(&ps2dev->cmd_mutex); + ps2_end_command(ps2dev); dev_dbg(&ps2dev->serio->dev, "WRITE REG: 0x%02x to 0x%02x (rc = %d)\n", reg_addr, reg_val, rc); return rc; @@ -214,7 +215,8 @@ static int fsp_page_reg_read(struct psmouse *psmouse, int *reg_val) ps2_command(ps2dev, NULL, PSMOUSE_CMD_DISABLE); psmouse_set_state(psmouse, PSMOUSE_CMD_MODE); - mutex_lock(&ps2dev->cmd_mutex); + + ps2_begin_command(ps2dev); if (ps2_sendbyte(ps2dev, 0xf3, FSP_CMD_TIMEOUT) < 0) goto out; @@ -236,7 +238,7 @@ static int fsp_page_reg_read(struct psmouse *psmouse, int *reg_val) rc = 0; out: - mutex_unlock(&ps2dev->cmd_mutex); + ps2_end_command(ps2dev); ps2_command(ps2dev, NULL, PSMOUSE_CMD_ENABLE); psmouse_set_state(psmouse, PSMOUSE_ACTIVATED); dev_dbg(&ps2dev->serio->dev, "READ PAGE REG: 0x%02x (rc = %d)\n", @@ -250,7 +252,7 @@ static int fsp_page_reg_write(struct psmouse *psmouse, int reg_val) unsigned char v; int rc = -1; - mutex_lock(&ps2dev->cmd_mutex); + ps2_begin_command(ps2dev); if (ps2_sendbyte(ps2dev, 0xf3, FSP_CMD_TIMEOUT) < 0) goto out; @@ -275,7 +277,7 @@ static int fsp_page_reg_write(struct psmouse *psmouse, int reg_val) rc = 0; out: - mutex_unlock(&ps2dev->cmd_mutex); + ps2_end_command(ps2dev); dev_dbg(&ps2dev->serio->dev, "WRITE PAGE REG: to 0x%02x (rc = %d)\n", reg_val, rc); return rc; diff --git a/drivers/input/serio/i8042.c b/drivers/input/serio/i8042.c index eb3ff94af58..bc56e52b945 100644 --- a/drivers/input/serio/i8042.c +++ b/drivers/input/serio/i8042.c @@ -87,8 +87,22 @@ static bool i8042_bypass_aux_irq_test; #include "i8042.h" +/* + * i8042_lock protects serialization between i8042_command and + * the interrupt handler. + */ static DEFINE_SPINLOCK(i8042_lock); +/* + * Writers to AUX and KBD ports as well as users issuing i8042_command + * directly should acquire i8042_mutex (by means of calling + * i8042_lock_chip() and i8042_unlock_ship() helpers) to ensure that + * they do not disturb each other (unfortunately in many i8042 + * implementations write to one of the ports will immediately abort + * command that is being processed by another port). + */ +static DEFINE_MUTEX(i8042_mutex); + struct i8042_port { struct serio *serio; int irq; @@ -113,6 +127,18 @@ static struct platform_device *i8042_platform_device; static irqreturn_t i8042_interrupt(int irq, void *dev_id); +void i8042_lock_chip(void) +{ + mutex_lock(&i8042_mutex); +} +EXPORT_SYMBOL(i8042_lock_chip); + +void i8042_unlock_chip(void) +{ + mutex_unlock(&i8042_mutex); +} +EXPORT_SYMBOL(i8042_unlock_chip); + /* * The i8042_wait_read() and i8042_wait_write functions wait for the i8042 to * be ready for reading values from it / writing values to it. @@ -1161,6 +1187,21 @@ static void __devexit i8042_unregister_ports(void) } } +/* + * Checks whether port belongs to i8042 controller. + */ +bool i8042_check_port_owner(const struct serio *port) +{ + int i; + + for (i = 0; i < I8042_NUM_PORTS; i++) + if (i8042_ports[i].serio == port) + return true; + + return false; +} +EXPORT_SYMBOL(i8042_check_port_owner); + static void i8042_free_irqs(void) { if (i8042_aux_irq_registered) diff --git a/drivers/input/serio/libps2.c b/drivers/input/serio/libps2.c index 3a95b508bf2..769ba65a585 100644 --- a/drivers/input/serio/libps2.c +++ b/drivers/input/serio/libps2.c @@ -17,6 +17,7 @@ #include #include #include +#include #include #include @@ -54,6 +55,24 @@ int ps2_sendbyte(struct ps2dev *ps2dev, unsigned char byte, int timeout) } EXPORT_SYMBOL(ps2_sendbyte); +void ps2_begin_command(struct ps2dev *ps2dev) +{ + mutex_lock(&ps2dev->cmd_mutex); + + if (i8042_check_port_owner(ps2dev->serio)) + i8042_lock_chip(); +} +EXPORT_SYMBOL(ps2_begin_command); + +void ps2_end_command(struct ps2dev *ps2dev) +{ + if (i8042_check_port_owner(ps2dev->serio)) + i8042_unlock_chip(); + + mutex_unlock(&ps2dev->cmd_mutex); +} +EXPORT_SYMBOL(ps2_end_command); + /* * ps2_drain() waits for device to transmit requested number of bytes * and discards them. @@ -66,7 +85,7 @@ void ps2_drain(struct ps2dev *ps2dev, int maxbytes, int timeout) maxbytes = sizeof(ps2dev->cmdbuf); } - mutex_lock(&ps2dev->cmd_mutex); + ps2_begin_command(ps2dev); serio_pause_rx(ps2dev->serio); ps2dev->flags = PS2_FLAG_CMD; @@ -76,7 +95,8 @@ void ps2_drain(struct ps2dev *ps2dev, int maxbytes, int timeout) wait_event_timeout(ps2dev->wait, !(ps2dev->flags & PS2_FLAG_CMD), msecs_to_jiffies(timeout)); - mutex_unlock(&ps2dev->cmd_mutex); + + ps2_end_command(ps2dev); } EXPORT_SYMBOL(ps2_drain); @@ -237,9 +257,9 @@ int ps2_command(struct ps2dev *ps2dev, unsigned char *param, int command) { int rc; - mutex_lock(&ps2dev->cmd_mutex); + ps2_begin_command(ps2dev); rc = __ps2_command(ps2dev, param, command); - mutex_unlock(&ps2dev->cmd_mutex); + ps2_end_command(ps2dev); return rc; } diff --git a/drivers/leds/leds-clevo-mail.c b/drivers/leds/leds-clevo-mail.c index 1813c84ea5f..f2242db5401 100644 --- a/drivers/leds/leds-clevo-mail.c +++ b/drivers/leds/leds-clevo-mail.c @@ -93,6 +93,8 @@ static struct dmi_system_id __initdata mail_led_whitelist[] = { static void clevo_mail_led_set(struct led_classdev *led_cdev, enum led_brightness value) { + i8042_lock_chip(); + if (value == LED_OFF) i8042_command(NULL, CLEVO_MAIL_LED_OFF); else if (value <= LED_HALF) @@ -100,6 +102,8 @@ static void clevo_mail_led_set(struct led_classdev *led_cdev, else i8042_command(NULL, CLEVO_MAIL_LED_BLINK_1HZ); + i8042_unlock_chip(); + } static int clevo_mail_led_blink(struct led_classdev *led_cdev, @@ -108,6 +112,8 @@ static int clevo_mail_led_blink(struct led_classdev *led_cdev, { int status = -EINVAL; + i8042_lock_chip(); + if (*delay_on == 0 /* ms */ && *delay_off == 0 /* ms */) { /* Special case: the leds subsystem requested us to * chose one user friendly blinking of the LED, and @@ -135,6 +141,8 @@ static int clevo_mail_led_blink(struct led_classdev *led_cdev, *delay_on, *delay_off); } + i8042_unlock_chip(); + return status; } diff --git a/drivers/platform/x86/acer-wmi.c b/drivers/platform/x86/acer-wmi.c index fb45f5ee8df..454970d2d70 100644 --- a/drivers/platform/x86/acer-wmi.c +++ b/drivers/platform/x86/acer-wmi.c @@ -746,7 +746,9 @@ static acpi_status WMID_set_u32(u32 value, u32 cap, struct wmi_interface *iface) return AE_BAD_PARAMETER; if (quirks->mailled == 1) { param = value ? 0x92 : 0x93; + i8042_lock_chip(); i8042_command(¶m, 0x1059); + i8042_unlock_chip(); return 0; } break; diff --git a/include/linux/i8042.h b/include/linux/i8042.h index 7907a72403e..60c3360ef6a 100644 --- a/include/linux/i8042.h +++ b/include/linux/i8042.h @@ -7,6 +7,7 @@ * the Free Software Foundation. */ +#include /* * Standard commands. @@ -30,6 +31,35 @@ #define I8042_CMD_MUX_PFX 0x0090 #define I8042_CMD_MUX_SEND 0x1090 +struct serio; + +#if defined(CONFIG_SERIO_I8042) || defined(CONFIG_SERIO_I8042_MODULE) + +void i8042_lock_chip(void); +void i8042_unlock_chip(void); int i8042_command(unsigned char *param, int command); +bool i8042_check_port_owner(const struct serio *); + +#else + +void i8042_lock_chip(void) +{ +} + +void i8042_unlock_chip(void) +{ +} + +int i8042_command(unsigned char *param, int command) +{ + return -ENOSYS; +} + +bool i8042_check_port_owner(const struct serio *serio) +{ + return false; +} + +#endif #endif diff --git a/include/linux/libps2.h b/include/linux/libps2.h index fcf5fbe6a50..79603a6c356 100644 --- a/include/linux/libps2.h +++ b/include/linux/libps2.h @@ -44,6 +44,8 @@ struct ps2dev { void ps2_init(struct ps2dev *ps2dev, struct serio *serio); int ps2_sendbyte(struct ps2dev *ps2dev, unsigned char byte, int timeout); void ps2_drain(struct ps2dev *ps2dev, int maxbytes, int timeout); +void ps2_begin_command(struct ps2dev *ps2dev); +void ps2_end_command(struct ps2dev *ps2dev); int __ps2_command(struct ps2dev *ps2dev, unsigned char *param, int command); int ps2_command(struct ps2dev *ps2dev, unsigned char *param, int command); int ps2_handle_ack(struct ps2dev *ps2dev, unsigned char data); -- cgit v1.2.3-70-g09d2 From ffd0db97196c1057f09c2ab42dd5b30e94e511d9 Mon Sep 17 00:00:00 2001 From: Dmitry Torokhov Date: Wed, 16 Sep 2009 01:06:43 -0700 Subject: Input: add generic suspend and resume for input devices Automatically turn off leds and sound effects as part of suspend process and restore led state, sounds and repeat rate at resume. Signed-off-by: Dmitry Torokhov --- drivers/input/input.c | 64 ++++++++++++++++++++++++++++++++++++++++++++++++++- include/linux/input.h | 2 +- 2 files changed, 64 insertions(+), 2 deletions(-) (limited to 'include') diff --git a/drivers/input/input.c b/drivers/input/input.c index 7c237e6ac71..b8ed4294fcc 100644 --- a/drivers/input/input.c +++ b/drivers/input/input.c @@ -11,6 +11,7 @@ */ #include +#include #include #include #include @@ -514,7 +515,7 @@ static void input_disconnect_device(struct input_dev *dev) * that there are no threads in the middle of input_open_device() */ mutex_lock(&dev->mutex); - dev->going_away = 1; + dev->going_away = true; mutex_unlock(&dev->mutex); spin_lock_irq(&dev->event_lock); @@ -1259,10 +1260,71 @@ static int input_dev_uevent(struct device *device, struct kobj_uevent_env *env) return 0; } +#define INPUT_DO_TOGGLE(dev, type, bits, on) \ + do { \ + int i; \ + if (!test_bit(EV_##type, dev->evbit)) \ + break; \ + for (i = 0; i < type##_MAX; i++) { \ + if (!test_bit(i, dev->bits##bit) || \ + !test_bit(i, dev->bits)) \ + continue; \ + dev->event(dev, EV_##type, i, on); \ + } \ + } while (0) + +static void input_dev_reset(struct input_dev *dev, bool activate) +{ + if (!dev->event) + return; + + INPUT_DO_TOGGLE(dev, LED, led, activate); + INPUT_DO_TOGGLE(dev, SND, snd, activate); + + if (activate && test_bit(EV_REP, dev->evbit)) { + dev->event(dev, EV_REP, REP_PERIOD, dev->rep[REP_PERIOD]); + dev->event(dev, EV_REP, REP_DELAY, dev->rep[REP_DELAY]); + } +} + +#ifdef CONFIG_PM +static int input_dev_suspend(struct device *dev) +{ + struct input_dev *input_dev = to_input_dev(dev); + + mutex_lock(&input_dev->mutex); + input_dev_reset(input_dev, false); + mutex_unlock(&input_dev->mutex); + + return 0; +} + +static int input_dev_resume(struct device *dev) +{ + struct input_dev *input_dev = to_input_dev(dev); + + mutex_lock(&input_dev->mutex); + input_dev_reset(input_dev, true); + mutex_unlock(&input_dev->mutex); + + return 0; +} + +static const struct dev_pm_ops input_dev_pm_ops = { + .suspend = input_dev_suspend, + .resume = input_dev_resume, + .poweroff = input_dev_suspend, + .restore = input_dev_resume, +}; +#endif /* CONFIG_PM */ + static struct device_type input_dev_type = { .groups = input_dev_attr_groups, .release = input_dev_release, .uevent = input_dev_uevent, +#ifdef CONFIG_PM + .pm = &input_dev_pm_ops, +#endif }; static char *input_nodename(struct device *dev) diff --git a/include/linux/input.h b/include/linux/input.h index 8b3bc3e0d14..0ccfc30cd40 100644 --- a/include/linux/input.h +++ b/include/linux/input.h @@ -1123,7 +1123,7 @@ struct input_dev { struct mutex mutex; unsigned int users; - int going_away; + bool going_away; struct device dev; -- cgit v1.2.3-70-g09d2 From 38e783b38148531c0840ac130b97eb8158f84b48 Mon Sep 17 00:00:00 2001 From: Joonyoung Shim Date: Thu, 17 Sep 2009 22:35:45 -0700 Subject: Input: add touchscreen driver for MELFAS MCS-5000 controller The MELPAS MCS-5000 is the touchscreen controller. The overview of this controller can see at the following website: http://www.melfas.com/product/product01.asp?k_r=eng_ This driver is tested on s3c6410 NCP board and supports only the i2c interface. Signed-off-by: Joonyoung Shim Signed-off-by: Dmitry Torokhov --- drivers/input/touchscreen/Kconfig | 11 ++ drivers/input/touchscreen/Makefile | 1 + drivers/input/touchscreen/mcs5000_ts.c | 318 +++++++++++++++++++++++++++++++++ include/linux/i2c/mcs5000_ts.h | 24 +++ 4 files changed, 354 insertions(+) create mode 100644 drivers/input/touchscreen/mcs5000_ts.c create mode 100644 include/linux/i2c/mcs5000_ts.h (limited to 'include') diff --git a/drivers/input/touchscreen/Kconfig b/drivers/input/touchscreen/Kconfig index 5a252d449e5..6dfb0a2b0a7 100644 --- a/drivers/input/touchscreen/Kconfig +++ b/drivers/input/touchscreen/Kconfig @@ -169,6 +169,17 @@ config TOUCHSCREEN_WACOM_W8001 To compile this driver as a module, choose M here: the module will be called wacom_w8001. +config TOUCHSCREEN_MCS5000 + tristate "MELFAS MCS-5000 touchscreen" + depends on I2C + help + Say Y here if you have the MELFAS MCS-5000 touchscreen controller + chip in your system. + + If unsure, say N. + + To compile this driver as a module, choose M here: the + module will be called mcs5000_ts. config TOUCHSCREEN_MTOUCH tristate "MicroTouch serial touchscreens" diff --git a/drivers/input/touchscreen/Makefile b/drivers/input/touchscreen/Makefile index 3e1c5e0b952..a882ca16506 100644 --- a/drivers/input/touchscreen/Makefile +++ b/drivers/input/touchscreen/Makefile @@ -17,6 +17,7 @@ obj-$(CONFIG_TOUCHSCREEN_EETI) += eeti_ts.o obj-$(CONFIG_TOUCHSCREEN_ELO) += elo.o obj-$(CONFIG_TOUCHSCREEN_FUJITSU) += fujitsu_ts.o obj-$(CONFIG_TOUCHSCREEN_INEXIO) += inexio.o +obj-$(CONFIG_TOUCHSCREEN_MCS5000) += mcs5000_ts.o obj-$(CONFIG_TOUCHSCREEN_MIGOR) += migor_ts.o obj-$(CONFIG_TOUCHSCREEN_MTOUCH) += mtouch.o obj-$(CONFIG_TOUCHSCREEN_MK712) += mk712.o diff --git a/drivers/input/touchscreen/mcs5000_ts.c b/drivers/input/touchscreen/mcs5000_ts.c new file mode 100644 index 00000000000..4c28b89757f --- /dev/null +++ b/drivers/input/touchscreen/mcs5000_ts.c @@ -0,0 +1,318 @@ +/* + * mcs5000_ts.c - Touchscreen driver for MELFAS MCS-5000 controller + * + * Copyright (C) 2009 Samsung Electronics Co.Ltd + * Author: Joonyoung Shim + * + * Based on wm97xx-core.c + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + */ + +#include +#include +#include +#include +#include +#include +#include + +/* Registers */ +#define MCS5000_TS_STATUS 0x00 +#define STATUS_OFFSET 0 +#define STATUS_NO (0 << STATUS_OFFSET) +#define STATUS_INIT (1 << STATUS_OFFSET) +#define STATUS_SENSING (2 << STATUS_OFFSET) +#define STATUS_COORD (3 << STATUS_OFFSET) +#define STATUS_GESTURE (4 << STATUS_OFFSET) +#define ERROR_OFFSET 4 +#define ERROR_NO (0 << ERROR_OFFSET) +#define ERROR_POWER_ON_RESET (1 << ERROR_OFFSET) +#define ERROR_INT_RESET (2 << ERROR_OFFSET) +#define ERROR_EXT_RESET (3 << ERROR_OFFSET) +#define ERROR_INVALID_REG_ADDRESS (8 << ERROR_OFFSET) +#define ERROR_INVALID_REG_VALUE (9 << ERROR_OFFSET) + +#define MCS5000_TS_OP_MODE 0x01 +#define RESET_OFFSET 0 +#define RESET_NO (0 << RESET_OFFSET) +#define RESET_EXT_SOFT (1 << RESET_OFFSET) +#define OP_MODE_OFFSET 1 +#define OP_MODE_SLEEP (0 << OP_MODE_OFFSET) +#define OP_MODE_ACTIVE (1 << OP_MODE_OFFSET) +#define GESTURE_OFFSET 4 +#define GESTURE_DISABLE (0 << GESTURE_OFFSET) +#define GESTURE_ENABLE (1 << GESTURE_OFFSET) +#define PROXIMITY_OFFSET 5 +#define PROXIMITY_DISABLE (0 << PROXIMITY_OFFSET) +#define PROXIMITY_ENABLE (1 << PROXIMITY_OFFSET) +#define SCAN_MODE_OFFSET 6 +#define SCAN_MODE_INTERRUPT (0 << SCAN_MODE_OFFSET) +#define SCAN_MODE_POLLING (1 << SCAN_MODE_OFFSET) +#define REPORT_RATE_OFFSET 7 +#define REPORT_RATE_40 (0 << REPORT_RATE_OFFSET) +#define REPORT_RATE_80 (1 << REPORT_RATE_OFFSET) + +#define MCS5000_TS_SENS_CTL 0x02 +#define MCS5000_TS_FILTER_CTL 0x03 +#define PRI_FILTER_OFFSET 0 +#define SEC_FILTER_OFFSET 4 + +#define MCS5000_TS_X_SIZE_UPPER 0x08 +#define MCS5000_TS_X_SIZE_LOWER 0x09 +#define MCS5000_TS_Y_SIZE_UPPER 0x0A +#define MCS5000_TS_Y_SIZE_LOWER 0x0B + +#define MCS5000_TS_INPUT_INFO 0x10 +#define INPUT_TYPE_OFFSET 0 +#define INPUT_TYPE_NONTOUCH (0 << INPUT_TYPE_OFFSET) +#define INPUT_TYPE_SINGLE (1 << INPUT_TYPE_OFFSET) +#define INPUT_TYPE_DUAL (2 << INPUT_TYPE_OFFSET) +#define INPUT_TYPE_PALM (3 << INPUT_TYPE_OFFSET) +#define INPUT_TYPE_PROXIMITY (7 << INPUT_TYPE_OFFSET) +#define GESTURE_CODE_OFFSET 3 +#define GESTURE_CODE_NO (0 << GESTURE_CODE_OFFSET) + +#define MCS5000_TS_X_POS_UPPER 0x11 +#define MCS5000_TS_X_POS_LOWER 0x12 +#define MCS5000_TS_Y_POS_UPPER 0x13 +#define MCS5000_TS_Y_POS_LOWER 0x14 +#define MCS5000_TS_Z_POS 0x15 +#define MCS5000_TS_WIDTH 0x16 +#define MCS5000_TS_GESTURE_VAL 0x17 +#define MCS5000_TS_MODULE_REV 0x20 +#define MCS5000_TS_FIRMWARE_VER 0x21 + +/* Touchscreen absolute values */ +#define MCS5000_MAX_XC 0x3ff +#define MCS5000_MAX_YC 0x3ff + +enum mcs5000_ts_read_offset { + READ_INPUT_INFO, + READ_X_POS_UPPER, + READ_X_POS_LOWER, + READ_Y_POS_UPPER, + READ_Y_POS_LOWER, + READ_BLOCK_SIZE, +}; + +/* Each client has this additional data */ +struct mcs5000_ts_data { + struct i2c_client *client; + struct input_dev *input_dev; + const struct mcs5000_ts_platform_data *platform_data; +}; + +static irqreturn_t mcs5000_ts_interrupt(int irq, void *dev_id) +{ + struct mcs5000_ts_data *data = dev_id; + struct i2c_client *client = data->client; + u8 buffer[READ_BLOCK_SIZE]; + int err; + int x; + int y; + + err = i2c_smbus_read_i2c_block_data(client, MCS5000_TS_INPUT_INFO, + READ_BLOCK_SIZE, buffer); + if (err < 0) { + dev_err(&client->dev, "%s, err[%d]\n", __func__, err); + goto out; + } + + switch (buffer[READ_INPUT_INFO]) { + case INPUT_TYPE_NONTOUCH: + input_report_key(data->input_dev, BTN_TOUCH, 0); + input_sync(data->input_dev); + break; + + case INPUT_TYPE_SINGLE: + x = (buffer[READ_X_POS_UPPER] << 8) | buffer[READ_X_POS_LOWER]; + y = (buffer[READ_Y_POS_UPPER] << 8) | buffer[READ_Y_POS_LOWER]; + + input_report_key(data->input_dev, BTN_TOUCH, 1); + input_report_abs(data->input_dev, ABS_X, x); + input_report_abs(data->input_dev, ABS_Y, y); + input_sync(data->input_dev); + break; + + case INPUT_TYPE_DUAL: + /* TODO */ + break; + + case INPUT_TYPE_PALM: + /* TODO */ + break; + + case INPUT_TYPE_PROXIMITY: + /* TODO */ + break; + + default: + dev_err(&client->dev, "Unknown ts input type %d\n", + buffer[READ_INPUT_INFO]); + break; + } + + out: + return IRQ_HANDLED; +} + +static void mcs5000_ts_phys_init(struct mcs5000_ts_data *data) +{ + const struct mcs5000_ts_platform_data *platform_data = + data->platform_data; + struct i2c_client *client = data->client; + + /* Touch reset & sleep mode */ + i2c_smbus_write_byte_data(client, MCS5000_TS_OP_MODE, + RESET_EXT_SOFT | OP_MODE_SLEEP); + + /* Touch size */ + i2c_smbus_write_byte_data(client, MCS5000_TS_X_SIZE_UPPER, + platform_data->x_size >> 8); + i2c_smbus_write_byte_data(client, MCS5000_TS_X_SIZE_LOWER, + platform_data->x_size & 0xff); + i2c_smbus_write_byte_data(client, MCS5000_TS_Y_SIZE_UPPER, + platform_data->y_size >> 8); + i2c_smbus_write_byte_data(client, MCS5000_TS_Y_SIZE_LOWER, + platform_data->y_size & 0xff); + + /* Touch active mode & 80 report rate */ + i2c_smbus_write_byte_data(data->client, MCS5000_TS_OP_MODE, + OP_MODE_ACTIVE | REPORT_RATE_80); +} + +static int __devinit mcs5000_ts_probe(struct i2c_client *client, + const struct i2c_device_id *id) +{ + struct mcs5000_ts_data *data; + struct input_dev *input_dev; + int ret; + + if (!client->dev.platform_data) + return -EINVAL; + + data = kzalloc(sizeof(struct mcs5000_ts_data), GFP_KERNEL); + input_dev = input_allocate_device(); + if (!data || !input_dev) { + dev_err(&client->dev, "Failed to allocate memory\n"); + ret = -ENOMEM; + goto err_free_mem; + } + + data->client = client; + data->input_dev = input_dev; + data->platform_data = client->dev.platform_data; + + input_dev->name = "MELPAS MCS-5000 Touchscreen"; + input_dev->id.bustype = BUS_I2C; + input_dev->dev.parent = &client->dev; + + __set_bit(EV_ABS, input_dev->evbit); + __set_bit(EV_KEY, input_dev->evbit); + __set_bit(BTN_TOUCH, input_dev->keybit); + input_set_abs_params(input_dev, ABS_X, 0, MCS5000_MAX_XC, 0, 0); + input_set_abs_params(input_dev, ABS_Y, 0, MCS5000_MAX_YC, 0, 0); + + input_set_drvdata(input_dev, data); + + if (data->platform_data->cfg_pin) + data->platform_data->cfg_pin(); + + ret = request_threaded_irq(client->irq, NULL, mcs5000_ts_interrupt, + IRQF_TRIGGER_LOW | IRQF_ONESHOT, "mcs5000_ts", data); + + if (ret < 0) { + dev_err(&client->dev, "Failed to register interrupt\n"); + goto err_free_mem; + } + + ret = input_register_device(data->input_dev); + if (ret < 0) + goto err_free_irq; + + mcs5000_ts_phys_init(data); + i2c_set_clientdata(client, data); + + return 0; + +err_free_irq: + free_irq(client->irq, data); +err_free_mem: + input_free_device(input_dev); + kfree(data); + return ret; +} + +static int __devexit mcs5000_ts_remove(struct i2c_client *client) +{ + struct mcs5000_ts_data *data = i2c_get_clientdata(client); + + free_irq(client->irq, data); + input_unregister_device(data->input_dev); + kfree(data); + i2c_set_clientdata(client, NULL); + + return 0; +} + +#ifdef CONFIG_PM +static int mcs5000_ts_suspend(struct i2c_client *client, pm_message_t mesg) +{ + /* Touch sleep mode */ + i2c_smbus_write_byte_data(client, MCS5000_TS_OP_MODE, OP_MODE_SLEEP); + + return 0; +} + +static int mcs5000_ts_resume(struct i2c_client *client) +{ + struct mcs5000_ts_data *data = i2c_get_clientdata(client); + + mcs5000_ts_phys_init(data); + + return 0; +} +#else +#define mcs5000_ts_suspend NULL +#define mcs5000_ts_resume NULL +#endif + +static const struct i2c_device_id mcs5000_ts_id[] = { + { "mcs5000_ts", 0 }, + { } +}; +MODULE_DEVICE_TABLE(i2c, mcs5000_ts_id); + +static struct i2c_driver mcs5000_ts_driver = { + .probe = mcs5000_ts_probe, + .remove = __devexit_p(mcs5000_ts_remove), + .suspend = mcs5000_ts_suspend, + .resume = mcs5000_ts_resume, + .driver = { + .name = "mcs5000_ts", + }, + .id_table = mcs5000_ts_id, +}; + +static int __init mcs5000_ts_init(void) +{ + return i2c_add_driver(&mcs5000_ts_driver); +} + +static void __exit mcs5000_ts_exit(void) +{ + i2c_del_driver(&mcs5000_ts_driver); +} + +module_init(mcs5000_ts_init); +module_exit(mcs5000_ts_exit); + +/* Module information */ +MODULE_AUTHOR("Joonyoung Shim "); +MODULE_DESCRIPTION("Touchscreen driver for MELFAS MCS-5000 controller"); +MODULE_LICENSE("GPL"); diff --git a/include/linux/i2c/mcs5000_ts.h b/include/linux/i2c/mcs5000_ts.h new file mode 100644 index 00000000000..5a117b5ca15 --- /dev/null +++ b/include/linux/i2c/mcs5000_ts.h @@ -0,0 +1,24 @@ +/* + * mcs5000_ts.h + * + * Copyright (C) 2009 Samsung Electronics Co.Ltd + * Author: Joonyoung Shim + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + */ + +#ifndef __LINUX_MCS5000_TS_H +#define __LINUX_MCS5000_TS_H + +/* platform data for the MELFAS MCS-5000 touchscreen driver */ +struct mcs5000_ts_platform_data { + void (*cfg_pin)(void); + int x_size; + int y_size; +}; + +#endif /* __LINUX_MCS5000_TS_H */ -- cgit v1.2.3-70-g09d2 From 88751dd6ce1fb0627c36c4ab08a40730e5a50d3e Mon Sep 17 00:00:00 2001 From: Michael Hennerich Date: Thu, 17 Sep 2009 22:39:38 -0700 Subject: Input: add driver for ADP5588 QWERTY I2C Keypad Signed-off-by: Michael Hennerich Signed-off-by: Bryan Wu Signed-off-by: Mike Frysinger Signed-off-by: Dmitry Torokhov --- drivers/input/keyboard/Kconfig | 10 + drivers/input/keyboard/Makefile | 1 + drivers/input/keyboard/adp5588-keys.c | 361 ++++++++++++++++++++++++++++++++++ include/linux/i2c/adp5588.h | 92 +++++++++ 4 files changed, 464 insertions(+) create mode 100644 drivers/input/keyboard/adp5588-keys.c create mode 100644 include/linux/i2c/adp5588.h (limited to 'include') diff --git a/drivers/input/keyboard/Kconfig b/drivers/input/keyboard/Kconfig index b14bd3a0714..d615c09a83c 100644 --- a/drivers/input/keyboard/Kconfig +++ b/drivers/input/keyboard/Kconfig @@ -24,6 +24,16 @@ config KEYBOARD_AAED2000 To compile this driver as a module, choose M here: the module will be called aaed2000_kbd. +config KEYBOARD_ADP5588 + tristate "ADP5588 I2C QWERTY Keypad and IO Expander" + depends on I2C + help + Say Y here if you want to use a ADP5588 attached to your + system I2C bus. + + To compile this driver as a module, choose M here: the + module will be called adp5588-keys. + config KEYBOARD_AMIGA tristate "Amiga keyboard" depends on AMIGA diff --git a/drivers/input/keyboard/Makefile b/drivers/input/keyboard/Makefile index ab35ac36111..a5c08cdf808 100644 --- a/drivers/input/keyboard/Makefile +++ b/drivers/input/keyboard/Makefile @@ -5,6 +5,7 @@ # Each configuration option enables a list of files. obj-$(CONFIG_KEYBOARD_AAED2000) += aaed2000_kbd.o +obj-$(CONFIG_KEYBOARD_ADP5588) += adp5588-keys.o obj-$(CONFIG_KEYBOARD_AMIGA) += amikbd.o obj-$(CONFIG_KEYBOARD_ATARI) += atakbd.o obj-$(CONFIG_KEYBOARD_ATKBD) += atkbd.o diff --git a/drivers/input/keyboard/adp5588-keys.c b/drivers/input/keyboard/adp5588-keys.c new file mode 100644 index 00000000000..d48c808d592 --- /dev/null +++ b/drivers/input/keyboard/adp5588-keys.c @@ -0,0 +1,361 @@ +/* + * File: drivers/input/keyboard/adp5588_keys.c + * Description: keypad driver for ADP5588 I2C QWERTY Keypad and IO Expander + * Bugs: Enter bugs at http://blackfin.uclinux.org/ + * + * Copyright (C) 2008-2009 Analog Devices Inc. + * Licensed under the GPL-2 or later. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + + /* Configuration Register1 */ +#define AUTO_INC (1 << 7) +#define GPIEM_CFG (1 << 6) +#define OVR_FLOW_M (1 << 5) +#define INT_CFG (1 << 4) +#define OVR_FLOW_IEN (1 << 3) +#define K_LCK_IM (1 << 2) +#define GPI_IEN (1 << 1) +#define KE_IEN (1 << 0) + +/* Interrupt Status Register */ +#define CMP2_INT (1 << 5) +#define CMP1_INT (1 << 4) +#define OVR_FLOW_INT (1 << 3) +#define K_LCK_INT (1 << 2) +#define GPI_INT (1 << 1) +#define KE_INT (1 << 0) + +/* Key Lock and Event Counter Register */ +#define K_LCK_EN (1 << 6) +#define LCK21 0x30 +#define KEC 0xF + +/* Key Event Register xy */ +#define KEY_EV_PRESSED (1 << 7) +#define KEY_EV_MASK (0x7F) + +#define KP_SEL(x) (0xFFFF >> (16 - x)) /* 2^x-1 */ + +#define KEYP_MAX_EVENT 10 + +/* + * Early pre 4.0 Silicon required to delay readout by at least 25ms, + * since the Event Counter Register updated 25ms after the interrupt + * asserted. + */ +#define WA_DELAYED_READOUT_REVID(rev) ((rev) < 4) + +struct adp5588_kpad { + struct i2c_client *client; + struct input_dev *input; + struct delayed_work work; + unsigned long delay; + unsigned short keycode[ADP5588_KEYMAPSIZE]; +}; + +static int adp5588_read(struct i2c_client *client, u8 reg) +{ + int ret = i2c_smbus_read_byte_data(client, reg); + + if (ret < 0) + dev_err(&client->dev, "Read Error\n"); + + return ret; +} + +static int adp5588_write(struct i2c_client *client, u8 reg, u8 val) +{ + return i2c_smbus_write_byte_data(client, reg, val); +} + +static void adp5588_work(struct work_struct *work) +{ + struct adp5588_kpad *kpad = container_of(work, + struct adp5588_kpad, work.work); + struct i2c_client *client = kpad->client; + int i, key, status, ev_cnt; + + status = adp5588_read(client, INT_STAT); + + if (status & OVR_FLOW_INT) /* Unlikely and should never happen */ + dev_err(&client->dev, "Event Overflow Error\n"); + + if (status & KE_INT) { + ev_cnt = adp5588_read(client, KEY_LCK_EC_STAT) & KEC; + if (ev_cnt) { + for (i = 0; i < ev_cnt; i++) { + key = adp5588_read(client, Key_EVENTA + i); + input_report_key(kpad->input, + kpad->keycode[(key & KEY_EV_MASK) - 1], + key & KEY_EV_PRESSED); + } + input_sync(kpad->input); + } + } + adp5588_write(client, INT_STAT, status); /* Status is W1C */ +} + +static irqreturn_t adp5588_irq(int irq, void *handle) +{ + struct adp5588_kpad *kpad = handle; + + /* + * use keventd context to read the event fifo registers + * Schedule readout at least 25ms after notification for + * REVID < 4 + */ + + schedule_delayed_work(&kpad->work, kpad->delay); + + return IRQ_HANDLED; +} + +static int __devinit adp5588_setup(struct i2c_client *client) +{ + struct adp5588_kpad_platform_data *pdata = client->dev.platform_data; + int i, ret; + + ret = adp5588_write(client, KP_GPIO1, KP_SEL(pdata->rows)); + ret |= adp5588_write(client, KP_GPIO2, KP_SEL(pdata->cols) & 0xFF); + ret |= adp5588_write(client, KP_GPIO3, KP_SEL(pdata->cols) >> 8); + + if (pdata->en_keylock) { + ret |= adp5588_write(client, UNLOCK1, pdata->unlock_key1); + ret |= adp5588_write(client, UNLOCK2, pdata->unlock_key2); + ret |= adp5588_write(client, KEY_LCK_EC_STAT, K_LCK_EN); + } + + for (i = 0; i < KEYP_MAX_EVENT; i++) + ret |= adp5588_read(client, Key_EVENTA); + + ret |= adp5588_write(client, INT_STAT, CMP2_INT | CMP1_INT | + OVR_FLOW_INT | K_LCK_INT | + GPI_INT | KE_INT); /* Status is W1C */ + + ret |= adp5588_write(client, CFG, INT_CFG | OVR_FLOW_IEN | KE_IEN); + + if (ret < 0) { + dev_err(&client->dev, "Write Error\n"); + return ret; + } + + return 0; +} + +static int __devinit adp5588_probe(struct i2c_client *client, + const struct i2c_device_id *id) +{ + struct adp5588_kpad *kpad; + struct adp5588_kpad_platform_data *pdata = client->dev.platform_data; + struct input_dev *input; + unsigned int revid; + int ret, i; + int error; + + if (!i2c_check_functionality(client->adapter, + I2C_FUNC_SMBUS_BYTE_DATA)) { + dev_err(&client->dev, "SMBUS Byte Data not Supported\n"); + return -EIO; + } + + if (!pdata) { + dev_err(&client->dev, "no platform data?\n"); + return -EINVAL; + } + + if (!pdata->rows || !pdata->cols || !pdata->keymap) { + dev_err(&client->dev, "no rows, cols or keymap from pdata\n"); + return -EINVAL; + } + + if (pdata->keymapsize != ADP5588_KEYMAPSIZE) { + dev_err(&client->dev, "invalid keymapsize\n"); + return -EINVAL; + } + + if (!client->irq) { + dev_err(&client->dev, "no IRQ?\n"); + return -EINVAL; + } + + kpad = kzalloc(sizeof(*kpad), GFP_KERNEL); + input = input_allocate_device(); + if (!kpad || !input) { + error = -ENOMEM; + goto err_free_mem; + } + + kpad->client = client; + kpad->input = input; + INIT_DELAYED_WORK(&kpad->work, adp5588_work); + + ret = adp5588_read(client, DEV_ID); + if (ret < 0) { + error = ret; + goto err_free_mem; + } + + revid = (u8) ret & ADP5588_DEVICE_ID_MASK; + if (WA_DELAYED_READOUT_REVID(revid)) + kpad->delay = msecs_to_jiffies(30); + + input->name = client->name; + input->phys = "adp5588-keys/input0"; + input->dev.parent = &client->dev; + + input_set_drvdata(input, kpad); + + input->id.bustype = BUS_I2C; + input->id.vendor = 0x0001; + input->id.product = 0x0001; + input->id.version = revid; + + input->keycodesize = sizeof(kpad->keycode[0]); + input->keycodemax = pdata->keymapsize; + input->keycode = kpad->keycode; + + memcpy(kpad->keycode, pdata->keymap, + pdata->keymapsize * input->keycodesize); + + /* setup input device */ + __set_bit(EV_KEY, input->evbit); + + if (pdata->repeat) + __set_bit(EV_REP, input->evbit); + + for (i = 0; i < input->keycodemax; i++) + __set_bit(kpad->keycode[i] & KEY_MAX, input->keybit); + __clear_bit(KEY_RESERVED, input->keybit); + + error = input_register_device(input); + if (error) { + dev_err(&client->dev, "unable to register input device\n"); + goto err_free_mem; + } + + error = request_irq(client->irq, adp5588_irq, + IRQF_TRIGGER_FALLING | IRQF_DISABLED, + client->dev.driver->name, kpad); + if (error) { + dev_err(&client->dev, "irq %d busy?\n", client->irq); + goto err_unreg_dev; + } + + error = adp5588_setup(client); + if (error) + goto err_free_irq; + + device_init_wakeup(&client->dev, 1); + i2c_set_clientdata(client, kpad); + + dev_info(&client->dev, "Rev.%d keypad, irq %d\n", revid, client->irq); + return 0; + + err_free_irq: + free_irq(client->irq, kpad); + err_unreg_dev: + input_unregister_device(input); + input = NULL; + err_free_mem: + input_free_device(input); + kfree(kpad); + + return error; +} + +static int __devexit adp5588_remove(struct i2c_client *client) +{ + struct adp5588_kpad *kpad = i2c_get_clientdata(client); + + adp5588_write(client, CFG, 0); + free_irq(client->irq, kpad); + cancel_delayed_work_sync(&kpad->work); + input_unregister_device(kpad->input); + i2c_set_clientdata(client, NULL); + kfree(kpad); + + return 0; +} + +#ifdef CONFIG_PM +static int adp5588_suspend(struct device *dev) +{ + struct adp5588_kpad *kpad = dev_get_drvdata(dev); + struct i2c_client *client = kpad->client; + + disable_irq(client->irq); + cancel_delayed_work_sync(&kpad->work); + + if (device_may_wakeup(&client->dev)) + enable_irq_wake(client->irq); + + return 0; +} + +static int adp5588_resume(struct device *dev) +{ + struct adp5588_kpad *kpad = dev_get_drvdata(dev); + struct i2c_client *client = kpad->client; + + if (device_may_wakeup(&client->dev)) + disable_irq_wake(client->irq); + + enable_irq(client->irq); + + return 0; +} + +static struct dev_pm_ops adp5588_dev_pm_ops = { + .suspend = adp5588_suspend, + .resume = adp5588_resume, +}; +#endif + +static const struct i2c_device_id adp5588_id[] = { + { KBUILD_MODNAME, 0 }, + { } +}; +MODULE_DEVICE_TABLE(i2c, adp5588_id); + +static struct i2c_driver adp5588_driver = { + .driver = { + .name = KBUILD_MODNAME, +#ifdef CONFIG_PM + .pm = &adp5588_dev_pm_ops, +#endif + }, + .probe = adp5588_probe, + .remove = __devexit_p(adp5588_remove), + .id_table = adp5588_id, +}; + +static int __init adp5588_init(void) +{ + return i2c_add_driver(&adp5588_driver); +} +module_init(adp5588_init); + +static void __exit adp5588_exit(void) +{ + i2c_del_driver(&adp5588_driver); +} +module_exit(adp5588_exit); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Michael Hennerich "); +MODULE_DESCRIPTION("ADP5588 Keypad driver"); +MODULE_ALIAS("platform:adp5588-keys"); diff --git a/include/linux/i2c/adp5588.h b/include/linux/i2c/adp5588.h new file mode 100644 index 00000000000..fc5db826b48 --- /dev/null +++ b/include/linux/i2c/adp5588.h @@ -0,0 +1,92 @@ +/* + * Analog Devices ADP5588 I/O Expander and QWERTY Keypad Controller + * + * Copyright 2009 Analog Devices Inc. + * + * Licensed under the GPL-2 or later. + */ + +#ifndef _ADP5588_H +#define _ADP5588_H + +#define DEV_ID 0x00 /* Device ID */ +#define CFG 0x01 /* Configuration Register1 */ +#define INT_STAT 0x02 /* Interrupt Status Register */ +#define KEY_LCK_EC_STAT 0x03 /* Key Lock and Event Counter Register */ +#define Key_EVENTA 0x04 /* Key Event Register A */ +#define Key_EVENTB 0x05 /* Key Event Register B */ +#define Key_EVENTC 0x06 /* Key Event Register C */ +#define Key_EVENTD 0x07 /* Key Event Register D */ +#define Key_EVENTE 0x08 /* Key Event Register E */ +#define Key_EVENTF 0x09 /* Key Event Register F */ +#define Key_EVENTG 0x0A /* Key Event Register G */ +#define Key_EVENTH 0x0B /* Key Event Register H */ +#define Key_EVENTI 0x0C /* Key Event Register I */ +#define Key_EVENTJ 0x0D /* Key Event Register J */ +#define KP_LCK_TMR 0x0E /* Keypad Lock1 to Lock2 Timer */ +#define UNLOCK1 0x0F /* Unlock Key1 */ +#define UNLOCK2 0x10 /* Unlock Key2 */ +#define GPIO_INT_STAT1 0x11 /* GPIO Interrupt Status */ +#define GPIO_INT_STAT2 0x12 /* GPIO Interrupt Status */ +#define GPIO_INT_STAT3 0x13 /* GPIO Interrupt Status */ +#define GPIO_DAT_STAT1 0x14 /* GPIO Data Status, Read twice to clear */ +#define GPIO_DAT_STAT2 0x15 /* GPIO Data Status, Read twice to clear */ +#define GPIO_DAT_STAT3 0x16 /* GPIO Data Status, Read twice to clear */ +#define GPIO_DAT_OUT1 0x17 /* GPIO DATA OUT */ +#define GPIO_DAT_OUT2 0x18 /* GPIO DATA OUT */ +#define GPIO_DAT_OUT3 0x19 /* GPIO DATA OUT */ +#define GPIO_INT_EN1 0x1A /* GPIO Interrupt Enable */ +#define GPIO_INT_EN2 0x1B /* GPIO Interrupt Enable */ +#define GPIO_INT_EN3 0x1C /* GPIO Interrupt Enable */ +#define KP_GPIO1 0x1D /* Keypad or GPIO Selection */ +#define KP_GPIO2 0x1E /* Keypad or GPIO Selection */ +#define KP_GPIO3 0x1F /* Keypad or GPIO Selection */ +#define GPI_EM1 0x20 /* GPI Event Mode 1 */ +#define GPI_EM2 0x21 /* GPI Event Mode 2 */ +#define GPI_EM3 0x22 /* GPI Event Mode 3 */ +#define GPIO_DIR1 0x23 /* GPIO Data Direction */ +#define GPIO_DIR2 0x24 /* GPIO Data Direction */ +#define GPIO_DIR3 0x25 /* GPIO Data Direction */ +#define GPIO_INT_LVL1 0x26 /* GPIO Edge/Level Detect */ +#define GPIO_INT_LVL2 0x27 /* GPIO Edge/Level Detect */ +#define GPIO_INT_LVL3 0x28 /* GPIO Edge/Level Detect */ +#define Debounce_DIS1 0x29 /* Debounce Disable */ +#define Debounce_DIS2 0x2A /* Debounce Disable */ +#define Debounce_DIS3 0x2B /* Debounce Disable */ +#define GPIO_PULL1 0x2C /* GPIO Pull Disable */ +#define GPIO_PULL2 0x2D /* GPIO Pull Disable */ +#define GPIO_PULL3 0x2E /* GPIO Pull Disable */ +#define CMP_CFG_STAT 0x30 /* Comparator Configuration and Status Register */ +#define CMP_CONFG_SENS1 0x31 /* Sensor1 Comparator Configuration Register */ +#define CMP_CONFG_SENS2 0x32 /* L2 Light Sensor Reference Level, Output Falling for Sensor 1 */ +#define CMP1_LVL2_TRIP 0x33 /* L2 Light Sensor Hysteresis (Active when Output Rising) for Sensor 1 */ +#define CMP1_LVL2_HYS 0x34 /* L3 Light Sensor Reference Level, Output Falling For Sensor 1 */ +#define CMP1_LVL3_TRIP 0x35 /* L3 Light Sensor Hysteresis (Active when Output Rising) For Sensor 1 */ +#define CMP1_LVL3_HYS 0x36 /* Sensor 2 Comparator Configuration Register */ +#define CMP2_LVL2_TRIP 0x37 /* L2 Light Sensor Reference Level, Output Falling for Sensor 2 */ +#define CMP2_LVL2_HYS 0x38 /* L2 Light Sensor Hysteresis (Active when Output Rising) for Sensor 2 */ +#define CMP2_LVL3_TRIP 0x39 /* L3 Light Sensor Reference Level, Output Falling For Sensor 2 */ +#define CMP2_LVL3_HYS 0x3A /* L3 Light Sensor Hysteresis (Active when Output Rising) For Sensor 2 */ +#define CMP1_ADC_DAT_R1 0x3B /* Comparator 1 ADC data Register1 */ +#define CMP1_ADC_DAT_R2 0x3C /* Comparator 1 ADC data Register2 */ +#define CMP2_ADC_DAT_R1 0x3D /* Comparator 2 ADC data Register1 */ +#define CMP2_ADC_DAT_R2 0x3E /* Comparator 2 ADC data Register2 */ + +#define ADP5588_DEVICE_ID_MASK 0xF + +/* Put one of these structures in i2c_board_info platform_data */ + +#define ADP5588_KEYMAPSIZE 80 + +struct adp5588_kpad_platform_data { + int rows; /* Number of rows */ + int cols; /* Number of columns */ + const unsigned short *keymap; /* Pointer to keymap */ + unsigned short keymapsize; /* Keymap size */ + unsigned repeat:1; /* Enable key repeat */ + unsigned en_keylock:1; /* Enable Key Lock feature */ + unsigned short unlock_key1; /* Unlock Key 1 */ + unsigned short unlock_key2; /* Unlock Key 2 */ +}; + +#endif -- cgit v1.2.3-70-g09d2 From 502a0106b2cc31940f690dc6693fddfd3b97cab5 Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Mon, 27 Jul 2009 14:46:12 +0100 Subject: [WATCHDOG] Add support for WM831x watchdog The WM831x series of devices provide a watchdog with configurable behaviour on timer expiry. Currently this driver support refreshes via a register or GPIO line and autonomous refreshes from a hardware source (eg, a clock). Signed-off-by: Mark Brown Signed-off-by: Wim Van Sebroeck --- drivers/watchdog/Kconfig | 7 + drivers/watchdog/Makefile | 1 + drivers/watchdog/wm831x_wdt.c | 441 ++++++++++++++++++++++++++++++++++++ include/linux/mfd/wm831x/watchdog.h | 52 +++++ 4 files changed, 501 insertions(+) create mode 100644 drivers/watchdog/wm831x_wdt.c create mode 100644 include/linux/mfd/wm831x/watchdog.h (limited to 'include') diff --git a/drivers/watchdog/Kconfig b/drivers/watchdog/Kconfig index 3229889c359..ff3eb8ff6bd 100644 --- a/drivers/watchdog/Kconfig +++ b/drivers/watchdog/Kconfig @@ -55,6 +55,13 @@ config SOFT_WATCHDOG To compile this driver as a module, choose M here: the module will be called softdog. +config WM831X_WATCHDOG + tristate "WM831x watchdog" + depends on MFD_WM831X + help + Support for the watchdog in the WM831x AudioPlus PMICs. When + the watchdog triggers the system will be reset. + config WM8350_WATCHDOG tristate "WM8350 watchdog" depends on MFD_WM8350 diff --git a/drivers/watchdog/Makefile b/drivers/watchdog/Makefile index e1784a6208a..348b3b862c9 100644 --- a/drivers/watchdog/Makefile +++ b/drivers/watchdog/Makefile @@ -141,5 +141,6 @@ obj-$(CONFIG_WATCHDOG_CP1XXX) += cpwd.o # XTENSA Architecture # Architecture Independant +obj-$(CONFIG_WM831X_WATCHDOG) += wm831x_wdt.o obj-$(CONFIG_WM8350_WATCHDOG) += wm8350_wdt.o obj-$(CONFIG_SOFT_WATCHDOG) += softdog.o diff --git a/drivers/watchdog/wm831x_wdt.c b/drivers/watchdog/wm831x_wdt.c new file mode 100644 index 00000000000..775bcd807f3 --- /dev/null +++ b/drivers/watchdog/wm831x_wdt.c @@ -0,0 +1,441 @@ +/* + * Watchdog driver for the wm831x PMICs + * + * Copyright (C) 2009 Wolfson Microelectronics + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +static int nowayout = WATCHDOG_NOWAYOUT; +module_param(nowayout, int, 0); +MODULE_PARM_DESC(nowayout, + "Watchdog cannot be stopped once started (default=" + __MODULE_STRING(WATCHDOG_NOWAYOUT) ")"); + +static unsigned long wm831x_wdt_users; +static struct miscdevice wm831x_wdt_miscdev; +static int wm831x_wdt_expect_close; +static DEFINE_MUTEX(wdt_mutex); +static struct wm831x *wm831x; +static unsigned int update_gpio; +static unsigned int update_state; + +/* We can't use the sub-second values here but they're included + * for completeness. */ +static struct { + int time; /* Seconds */ + u16 val; /* WDOG_TO value */ +} wm831x_wdt_cfgs[] = { + { 1, 2 }, + { 2, 3 }, + { 4, 4 }, + { 8, 5 }, + { 16, 6 }, + { 32, 7 }, + { 33, 7 }, /* Actually 32.768s so include both, others round down */ +}; + +static int wm831x_wdt_set_timeout(struct wm831x *wm831x, u16 value) +{ + int ret; + + mutex_lock(&wdt_mutex); + + ret = wm831x_reg_unlock(wm831x); + if (ret == 0) { + ret = wm831x_set_bits(wm831x, WM831X_WATCHDOG, + WM831X_WDOG_TO_MASK, value); + wm831x_reg_lock(wm831x); + } else { + dev_err(wm831x->dev, "Failed to unlock security key: %d\n", + ret); + } + + mutex_unlock(&wdt_mutex); + + return ret; +} + +static int wm831x_wdt_start(struct wm831x *wm831x) +{ + int ret; + + mutex_lock(&wdt_mutex); + + ret = wm831x_reg_unlock(wm831x); + if (ret == 0) { + ret = wm831x_set_bits(wm831x, WM831X_WATCHDOG, + WM831X_WDOG_ENA, WM831X_WDOG_ENA); + wm831x_reg_lock(wm831x); + } else { + dev_err(wm831x->dev, "Failed to unlock security key: %d\n", + ret); + } + + mutex_unlock(&wdt_mutex); + + return ret; +} + +static int wm831x_wdt_stop(struct wm831x *wm831x) +{ + int ret; + + mutex_lock(&wdt_mutex); + + ret = wm831x_reg_unlock(wm831x); + if (ret == 0) { + ret = wm831x_set_bits(wm831x, WM831X_WATCHDOG, + WM831X_WDOG_ENA, 0); + wm831x_reg_lock(wm831x); + } else { + dev_err(wm831x->dev, "Failed to unlock security key: %d\n", + ret); + } + + mutex_unlock(&wdt_mutex); + + return ret; +} + +static int wm831x_wdt_kick(struct wm831x *wm831x) +{ + int ret; + u16 reg; + + mutex_lock(&wdt_mutex); + + if (update_gpio) { + gpio_set_value_cansleep(update_gpio, update_state); + update_state = !update_state; + ret = 0; + goto out; + } + + + reg = wm831x_reg_read(wm831x, WM831X_WATCHDOG); + + if (!(reg & WM831X_WDOG_RST_SRC)) { + dev_err(wm831x->dev, "Hardware watchdog update unsupported\n"); + ret = -EINVAL; + goto out; + } + + reg |= WM831X_WDOG_RESET; + + ret = wm831x_reg_unlock(wm831x); + if (ret == 0) { + ret = wm831x_reg_write(wm831x, WM831X_WATCHDOG, reg); + wm831x_reg_lock(wm831x); + } else { + dev_err(wm831x->dev, "Failed to unlock security key: %d\n", + ret); + } + +out: + mutex_unlock(&wdt_mutex); + + return ret; +} + +static int wm831x_wdt_open(struct inode *inode, struct file *file) +{ + int ret; + + if (!wm831x) + return -ENODEV; + + if (test_and_set_bit(0, &wm831x_wdt_users)) + return -EBUSY; + + ret = wm831x_wdt_start(wm831x); + if (ret != 0) + return ret; + + return nonseekable_open(inode, file); +} + +static int wm831x_wdt_release(struct inode *inode, struct file *file) +{ + if (wm831x_wdt_expect_close) + wm831x_wdt_stop(wm831x); + else { + dev_warn(wm831x->dev, "Watchdog device closed uncleanly\n"); + wm831x_wdt_kick(wm831x); + } + + clear_bit(0, &wm831x_wdt_users); + + return 0; +} + +static ssize_t wm831x_wdt_write(struct file *file, + const char __user *data, size_t count, + loff_t *ppos) +{ + size_t i; + + if (count) { + wm831x_wdt_kick(wm831x); + + if (!nowayout) { + /* In case it was set long ago */ + wm831x_wdt_expect_close = 0; + + /* scan to see whether or not we got the magic + character */ + for (i = 0; i != count; i++) { + char c; + if (get_user(c, data + i)) + return -EFAULT; + if (c == 'V') + wm831x_wdt_expect_close = 42; + } + } + } + return count; +} + +static struct watchdog_info ident = { + .options = WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING | WDIOF_MAGICCLOSE, + .identity = "WM831x Watchdog", +}; + +static long wm831x_wdt_ioctl(struct file *file, unsigned int cmd, + unsigned long arg) +{ + int ret = -ENOTTY, time, i; + void __user *argp = (void __user *)arg; + int __user *p = argp; + u16 reg; + + switch (cmd) { + case WDIOC_GETSUPPORT: + ret = copy_to_user(argp, &ident, sizeof(ident)) ? -EFAULT : 0; + break; + + case WDIOC_GETSTATUS: + case WDIOC_GETBOOTSTATUS: + ret = put_user(0, p); + break; + + case WDIOC_SETOPTIONS: + { + int options; + + if (get_user(options, p)) + return -EFAULT; + + ret = -EINVAL; + + /* Setting both simultaneously means at least one must fail */ + if (options == WDIOS_DISABLECARD) + ret = wm831x_wdt_start(wm831x); + + if (options == WDIOS_ENABLECARD) + ret = wm831x_wdt_stop(wm831x); + break; + } + + case WDIOC_KEEPALIVE: + ret = wm831x_wdt_kick(wm831x); + break; + + case WDIOC_SETTIMEOUT: + ret = get_user(time, p); + if (ret) + break; + + if (time == 0) { + if (nowayout) + ret = -EINVAL; + else + wm831x_wdt_stop(wm831x); + break; + } + + for (i = 0; i < ARRAY_SIZE(wm831x_wdt_cfgs); i++) + if (wm831x_wdt_cfgs[i].time == time) + break; + if (i == ARRAY_SIZE(wm831x_wdt_cfgs)) + ret = -EINVAL; + else + ret = wm831x_wdt_set_timeout(wm831x, + wm831x_wdt_cfgs[i].val); + break; + + case WDIOC_GETTIMEOUT: + reg = wm831x_reg_read(wm831x, WM831X_WATCHDOG); + reg &= WM831X_WDOG_TO_MASK; + for (i = 0; i < ARRAY_SIZE(wm831x_wdt_cfgs); i++) + if (wm831x_wdt_cfgs[i].val == reg) + break; + if (i == ARRAY_SIZE(wm831x_wdt_cfgs)) { + dev_warn(wm831x->dev, + "Unknown watchdog configuration: %x\n", reg); + ret = -EINVAL; + } else + ret = put_user(wm831x_wdt_cfgs[i].time, p); + + } + + return ret; +} + +static const struct file_operations wm831x_wdt_fops = { + .owner = THIS_MODULE, + .llseek = no_llseek, + .write = wm831x_wdt_write, + .unlocked_ioctl = wm831x_wdt_ioctl, + .open = wm831x_wdt_open, + .release = wm831x_wdt_release, +}; + +static struct miscdevice wm831x_wdt_miscdev = { + .minor = WATCHDOG_MINOR, + .name = "watchdog", + .fops = &wm831x_wdt_fops, +}; + +static int __devinit wm831x_wdt_probe(struct platform_device *pdev) +{ + struct wm831x_pdata *chip_pdata; + struct wm831x_watchdog_pdata *pdata; + int reg, ret; + + wm831x = dev_get_drvdata(pdev->dev.parent); + + ret = wm831x_reg_read(wm831x, WM831X_WATCHDOG); + if (ret < 0) { + dev_err(wm831x->dev, "Failed to read watchdog status: %d\n", + ret); + goto err; + } + reg = ret; + + if (reg & WM831X_WDOG_DEBUG) + dev_warn(wm831x->dev, "Watchdog is paused\n"); + + /* Apply any configuration */ + if (pdev->dev.parent->platform_data) { + chip_pdata = pdev->dev.parent->platform_data; + pdata = chip_pdata->watchdog; + } else { + pdata = NULL; + } + + if (pdata) { + reg &= ~(WM831X_WDOG_SECACT_MASK | WM831X_WDOG_PRIMACT_MASK | + WM831X_WDOG_RST_SRC); + + reg |= pdata->primary << WM831X_WDOG_PRIMACT_SHIFT; + reg |= pdata->secondary << WM831X_WDOG_SECACT_SHIFT; + reg |= pdata->software << WM831X_WDOG_RST_SRC_SHIFT; + + if (pdata->update_gpio) { + ret = gpio_request(pdata->update_gpio, + "Watchdog update"); + if (ret < 0) { + dev_err(wm831x->dev, + "Failed to request update GPIO: %d\n", + ret); + goto err; + } + + ret = gpio_direction_output(pdata->update_gpio, 0); + if (ret != 0) { + dev_err(wm831x->dev, + "gpio_direction_output returned: %d\n", + ret); + goto err_gpio; + } + + update_gpio = pdata->update_gpio; + + /* Make sure the watchdog takes hardware updates */ + reg |= WM831X_WDOG_RST_SRC; + } + + ret = wm831x_reg_unlock(wm831x); + if (ret == 0) { + ret = wm831x_reg_write(wm831x, WM831X_WATCHDOG, reg); + wm831x_reg_lock(wm831x); + } else { + dev_err(wm831x->dev, + "Failed to unlock security key: %d\n", ret); + goto err_gpio; + } + } + + wm831x_wdt_miscdev.parent = &pdev->dev; + + ret = misc_register(&wm831x_wdt_miscdev); + if (ret != 0) { + dev_err(wm831x->dev, "Failed to register miscdev: %d\n", ret); + goto err_gpio; + } + + return 0; + +err_gpio: + if (update_gpio) { + gpio_free(update_gpio); + update_gpio = 0; + } +err: + return ret; +} + +static int __devexit wm831x_wdt_remove(struct platform_device *pdev) +{ + if (update_gpio) { + gpio_free(update_gpio); + update_gpio = 0; + } + + misc_deregister(&wm831x_wdt_miscdev); + + return 0; +} + +static struct platform_driver wm831x_wdt_driver = { + .probe = wm831x_wdt_probe, + .remove = __devexit_p(wm831x_wdt_remove), + .driver = { + .name = "wm831x-watchdog", + }, +}; + +static int __init wm831x_wdt_init(void) +{ + return platform_driver_register(&wm831x_wdt_driver); +} +module_init(wm831x_wdt_init); + +static void __exit wm831x_wdt_exit(void) +{ + platform_driver_unregister(&wm831x_wdt_driver); +} +module_exit(wm831x_wdt_exit); + +MODULE_AUTHOR("Mark Brown"); +MODULE_DESCRIPTION("WM831x Watchdog"); +MODULE_LICENSE("GPL"); +MODULE_ALIAS("platform:wm831x-watchdog"); diff --git a/include/linux/mfd/wm831x/watchdog.h b/include/linux/mfd/wm831x/watchdog.h new file mode 100644 index 00000000000..97a99b52956 --- /dev/null +++ b/include/linux/mfd/wm831x/watchdog.h @@ -0,0 +1,52 @@ +/* + * include/linux/mfd/wm831x/watchdog.h -- Watchdog for WM831x + * + * Copyright 2009 Wolfson Microelectronics PLC. + * + * Author: Mark Brown + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + */ + +#ifndef __MFD_WM831X_WATCHDOG_H__ +#define __MFD_WM831X_WATCHDOG_H__ + + +/* + * R16388 (0x4004) - Watchdog + */ +#define WM831X_WDOG_ENA 0x8000 /* WDOG_ENA */ +#define WM831X_WDOG_ENA_MASK 0x8000 /* WDOG_ENA */ +#define WM831X_WDOG_ENA_SHIFT 15 /* WDOG_ENA */ +#define WM831X_WDOG_ENA_WIDTH 1 /* WDOG_ENA */ +#define WM831X_WDOG_DEBUG 0x4000 /* WDOG_DEBUG */ +#define WM831X_WDOG_DEBUG_MASK 0x4000 /* WDOG_DEBUG */ +#define WM831X_WDOG_DEBUG_SHIFT 14 /* WDOG_DEBUG */ +#define WM831X_WDOG_DEBUG_WIDTH 1 /* WDOG_DEBUG */ +#define WM831X_WDOG_RST_SRC 0x2000 /* WDOG_RST_SRC */ +#define WM831X_WDOG_RST_SRC_MASK 0x2000 /* WDOG_RST_SRC */ +#define WM831X_WDOG_RST_SRC_SHIFT 13 /* WDOG_RST_SRC */ +#define WM831X_WDOG_RST_SRC_WIDTH 1 /* WDOG_RST_SRC */ +#define WM831X_WDOG_SLPENA 0x1000 /* WDOG_SLPENA */ +#define WM831X_WDOG_SLPENA_MASK 0x1000 /* WDOG_SLPENA */ +#define WM831X_WDOG_SLPENA_SHIFT 12 /* WDOG_SLPENA */ +#define WM831X_WDOG_SLPENA_WIDTH 1 /* WDOG_SLPENA */ +#define WM831X_WDOG_RESET 0x0800 /* WDOG_RESET */ +#define WM831X_WDOG_RESET_MASK 0x0800 /* WDOG_RESET */ +#define WM831X_WDOG_RESET_SHIFT 11 /* WDOG_RESET */ +#define WM831X_WDOG_RESET_WIDTH 1 /* WDOG_RESET */ +#define WM831X_WDOG_SECACT_MASK 0x0300 /* WDOG_SECACT - [9:8] */ +#define WM831X_WDOG_SECACT_SHIFT 8 /* WDOG_SECACT - [9:8] */ +#define WM831X_WDOG_SECACT_WIDTH 2 /* WDOG_SECACT - [9:8] */ +#define WM831X_WDOG_PRIMACT_MASK 0x0030 /* WDOG_PRIMACT - [5:4] */ +#define WM831X_WDOG_PRIMACT_SHIFT 4 /* WDOG_PRIMACT - [5:4] */ +#define WM831X_WDOG_PRIMACT_WIDTH 2 /* WDOG_PRIMACT - [5:4] */ +#define WM831X_WDOG_TO_MASK 0x0007 /* WDOG_TO - [2:0] */ +#define WM831X_WDOG_TO_SHIFT 0 /* WDOG_TO - [2:0] */ +#define WM831X_WDOG_TO_WIDTH 3 /* WDOG_TO - [2:0] */ + +#endif -- cgit v1.2.3-70-g09d2 From 6952b61de9984073289859073e8195ad0bee8fd5 Mon Sep 17 00:00:00 2001 From: Alexey Dobriyan Date: Fri, 18 Sep 2009 23:55:55 +0400 Subject: headers: taskstats_kern.h trim Remove net/genetlink.h inclusion, now sched.c won't be recompiled because of some networking changes. Signed-off-by: Alexey Dobriyan Signed-off-by: Linus Torvalds --- include/linux/delayacct.h | 1 - include/linux/taskstats_kern.h | 1 - kernel/delayacct.c | 1 + mm/memory.c | 1 + 4 files changed, 2 insertions(+), 2 deletions(-) (limited to 'include') diff --git a/include/linux/delayacct.h b/include/linux/delayacct.h index f352f06fa06..5076fe0c8a9 100644 --- a/include/linux/delayacct.h +++ b/include/linux/delayacct.h @@ -18,7 +18,6 @@ #define _LINUX_DELAYACCT_H #include -#include /* * Per-task flags relevant to delay accounting diff --git a/include/linux/taskstats_kern.h b/include/linux/taskstats_kern.h index 7e9680f4afd..3398f455326 100644 --- a/include/linux/taskstats_kern.h +++ b/include/linux/taskstats_kern.h @@ -9,7 +9,6 @@ #include #include -#include #ifdef CONFIG_TASKSTATS extern struct kmem_cache *taskstats_cache; diff --git a/kernel/delayacct.c b/kernel/delayacct.c index abb6e17505e..ead9b610aa7 100644 --- a/kernel/delayacct.c +++ b/kernel/delayacct.c @@ -15,6 +15,7 @@ #include #include +#include #include #include #include diff --git a/mm/memory.c b/mm/memory.c index aede2ce3aba..e8f63d9961e 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -56,6 +56,7 @@ #include #include +#include #include #include #include -- cgit v1.2.3-70-g09d2 From 5622f295b53fb60dbf9bed3e2c89d182490a8b7f Mon Sep 17 00:00:00 2001 From: Markus Metzger Date: Tue, 15 Sep 2009 13:00:23 +0200 Subject: x86, perf_counter, bts: Optimize BTS overflow handling Draining the BTS buffer on a buffer overflow interrupt takes too long resulting in a kernel lockup when tracing the kernel. Restructure perf_counter sampling into sample creation and sample output. Prepare a single reference sample for BTS sampling and update the from and to address fields when draining the BTS buffer. Drain the entire BTS buffer between a single perf_output_begin() / perf_output_end() pair. Signed-off-by: Markus Metzger Signed-off-by: Peter Zijlstra LKML-Reference: <20090915130023.A16204@sedona.ch.intel.com> Signed-off-by: Ingo Molnar --- arch/x86/kernel/cpu/perf_counter.c | 60 ++++--- include/linux/perf_counter.h | 68 +++++++- kernel/perf_counter.c | 312 ++++++++++++++++++++----------------- 3 files changed, 266 insertions(+), 174 deletions(-) (limited to 'include') diff --git a/arch/x86/kernel/cpu/perf_counter.c b/arch/x86/kernel/cpu/perf_counter.c index f9cd0849bd4..6a0e71b3812 100644 --- a/arch/x86/kernel/cpu/perf_counter.c +++ b/arch/x86/kernel/cpu/perf_counter.c @@ -36,10 +36,10 @@ static u64 perf_counter_mask __read_mostly; #define BTS_RECORD_SIZE 24 /* The size of a per-cpu BTS buffer in bytes: */ -#define BTS_BUFFER_SIZE (BTS_RECORD_SIZE * 1024) +#define BTS_BUFFER_SIZE (BTS_RECORD_SIZE * 2048) /* The BTS overflow threshold in bytes from the end of the buffer: */ -#define BTS_OVFL_TH (BTS_RECORD_SIZE * 64) +#define BTS_OVFL_TH (BTS_RECORD_SIZE * 128) /* @@ -1488,8 +1488,7 @@ void perf_counter_print_debug(void) local_irq_restore(flags); } -static void intel_pmu_drain_bts_buffer(struct cpu_hw_counters *cpuc, - struct perf_sample_data *data) +static void intel_pmu_drain_bts_buffer(struct cpu_hw_counters *cpuc) { struct debug_store *ds = cpuc->ds; struct bts_record { @@ -1498,8 +1497,11 @@ static void intel_pmu_drain_bts_buffer(struct cpu_hw_counters *cpuc, u64 flags; }; struct perf_counter *counter = cpuc->counters[X86_PMC_IDX_FIXED_BTS]; - unsigned long orig_ip = data->regs->ip; struct bts_record *at, *top; + struct perf_output_handle handle; + struct perf_event_header header; + struct perf_sample_data data; + struct pt_regs regs; if (!counter) return; @@ -1510,19 +1512,38 @@ static void intel_pmu_drain_bts_buffer(struct cpu_hw_counters *cpuc, at = (struct bts_record *)(unsigned long)ds->bts_buffer_base; top = (struct bts_record *)(unsigned long)ds->bts_index; + if (top <= at) + return; + ds->bts_index = ds->bts_buffer_base; + + data.period = counter->hw.last_period; + data.addr = 0; + regs.ip = 0; + + /* + * Prepare a generic sample, i.e. fill in the invariant fields. + * We will overwrite the from and to address before we output + * the sample. + */ + perf_prepare_sample(&header, &data, counter, ®s); + + if (perf_output_begin(&handle, counter, + header.size * (top - at), 1, 1)) + return; + for (; at < top; at++) { - data->regs->ip = at->from; - data->addr = at->to; + data.ip = at->from; + data.addr = at->to; - perf_counter_output(counter, 1, data); + perf_output_sample(&handle, &header, &data, counter); } - data->regs->ip = orig_ip; - data->addr = 0; + perf_output_end(&handle); /* There's new data available. */ + counter->hw.interrupts++; counter->pending_kill = POLL_IN; } @@ -1552,13 +1573,9 @@ static void x86_pmu_disable(struct perf_counter *counter) x86_perf_counter_update(counter, hwc, idx); /* Drain the remaining BTS records. */ - if (unlikely(idx == X86_PMC_IDX_FIXED_BTS)) { - struct perf_sample_data data; - struct pt_regs regs; + if (unlikely(idx == X86_PMC_IDX_FIXED_BTS)) + intel_pmu_drain_bts_buffer(cpuc); - data.regs = ®s; - intel_pmu_drain_bts_buffer(cpuc, &data); - } cpuc->counters[idx] = NULL; clear_bit(idx, cpuc->used_mask); @@ -1619,7 +1636,6 @@ static int p6_pmu_handle_irq(struct pt_regs *regs) int idx, handled = 0; u64 val; - data.regs = regs; data.addr = 0; cpuc = &__get_cpu_var(cpu_hw_counters); @@ -1644,7 +1660,7 @@ static int p6_pmu_handle_irq(struct pt_regs *regs) if (!x86_perf_counter_set_period(counter, hwc, idx)) continue; - if (perf_counter_overflow(counter, 1, &data)) + if (perf_counter_overflow(counter, 1, &data, regs)) p6_pmu_disable_counter(hwc, idx); } @@ -1665,13 +1681,12 @@ static int intel_pmu_handle_irq(struct pt_regs *regs) int bit, loops; u64 ack, status; - data.regs = regs; data.addr = 0; cpuc = &__get_cpu_var(cpu_hw_counters); perf_disable(); - intel_pmu_drain_bts_buffer(cpuc, &data); + intel_pmu_drain_bts_buffer(cpuc); status = intel_pmu_get_status(); if (!status) { perf_enable(); @@ -1702,7 +1717,7 @@ again: data.period = counter->hw.last_period; - if (perf_counter_overflow(counter, 1, &data)) + if (perf_counter_overflow(counter, 1, &data, regs)) intel_pmu_disable_counter(&counter->hw, bit); } @@ -1729,7 +1744,6 @@ static int amd_pmu_handle_irq(struct pt_regs *regs) int idx, handled = 0; u64 val; - data.regs = regs; data.addr = 0; cpuc = &__get_cpu_var(cpu_hw_counters); @@ -1754,7 +1768,7 @@ static int amd_pmu_handle_irq(struct pt_regs *regs) if (!x86_perf_counter_set_period(counter, hwc, idx)) continue; - if (perf_counter_overflow(counter, 1, &data)) + if (perf_counter_overflow(counter, 1, &data, regs)) amd_pmu_disable_counter(hwc, idx); } diff --git a/include/linux/perf_counter.h b/include/linux/perf_counter.h index 6c1ef72ea50..c7375f97aa1 100644 --- a/include/linux/perf_counter.h +++ b/include/linux/perf_counter.h @@ -691,6 +691,17 @@ struct perf_cpu_context { int recursion[4]; }; +struct perf_output_handle { + struct perf_counter *counter; + struct perf_mmap_data *data; + unsigned long head; + unsigned long offset; + int nmi; + int sample; + int locked; + unsigned long flags; +}; + #ifdef CONFIG_PERF_COUNTERS /* @@ -722,16 +733,38 @@ extern int hw_perf_group_sched_in(struct perf_counter *group_leader, extern void perf_counter_update_userpage(struct perf_counter *counter); struct perf_sample_data { - struct pt_regs *regs; + u64 type; + + u64 ip; + struct { + u32 pid; + u32 tid; + } tid_entry; + u64 time; u64 addr; + u64 id; + u64 stream_id; + struct { + u32 cpu; + u32 reserved; + } cpu_entry; u64 period; + struct perf_callchain_entry *callchain; struct perf_raw_record *raw; }; +extern void perf_output_sample(struct perf_output_handle *handle, + struct perf_event_header *header, + struct perf_sample_data *data, + struct perf_counter *counter); +extern void perf_prepare_sample(struct perf_event_header *header, + struct perf_sample_data *data, + struct perf_counter *counter, + struct pt_regs *regs); + extern int perf_counter_overflow(struct perf_counter *counter, int nmi, - struct perf_sample_data *data); -extern void perf_counter_output(struct perf_counter *counter, int nmi, - struct perf_sample_data *data); + struct perf_sample_data *data, + struct pt_regs *regs); /* * Return 1 for a software counter, 0 for a hardware counter @@ -781,6 +814,12 @@ extern void perf_tpcounter_event(int event_id, u64 addr, u64 count, #define perf_instruction_pointer(regs) instruction_pointer(regs) #endif +extern int perf_output_begin(struct perf_output_handle *handle, + struct perf_counter *counter, unsigned int size, + int nmi, int sample); +extern void perf_output_end(struct perf_output_handle *handle); +extern void perf_output_copy(struct perf_output_handle *handle, + const void *buf, unsigned int len); #else static inline void perf_counter_task_sched_in(struct task_struct *task, int cpu) { } @@ -807,7 +846,28 @@ static inline void perf_counter_mmap(struct vm_area_struct *vma) { } static inline void perf_counter_comm(struct task_struct *tsk) { } static inline void perf_counter_fork(struct task_struct *tsk) { } static inline void perf_counter_init(void) { } + +static inline int +perf_output_begin(struct perf_output_handle *handle, struct perf_counter *c, + unsigned int size, int nmi, int sample) { } +static inline void perf_output_end(struct perf_output_handle *handle) { } +static inline void +perf_output_copy(struct perf_output_handle *handle, + const void *buf, unsigned int len) { } +static inline void +perf_output_sample(struct perf_output_handle *handle, + struct perf_event_header *header, + struct perf_sample_data *data, + struct perf_counter *counter) { } +static inline void +perf_prepare_sample(struct perf_event_header *header, + struct perf_sample_data *data, + struct perf_counter *counter, + struct pt_regs *regs) { } #endif +#define perf_output_put(handle, x) \ + perf_output_copy((handle), &(x), sizeof(x)) + #endif /* __KERNEL__ */ #endif /* _LINUX_PERF_COUNTER_H */ diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c index 29b73b6e814..215845243a6 100644 --- a/kernel/perf_counter.c +++ b/kernel/perf_counter.c @@ -2512,18 +2512,6 @@ __weak struct perf_callchain_entry *perf_callchain(struct pt_regs *regs) /* * Output */ - -struct perf_output_handle { - struct perf_counter *counter; - struct perf_mmap_data *data; - unsigned long head; - unsigned long offset; - int nmi; - int sample; - int locked; - unsigned long flags; -}; - static bool perf_output_space(struct perf_mmap_data *data, unsigned long tail, unsigned long offset, unsigned long head) { @@ -2633,8 +2621,8 @@ out: local_irq_restore(handle->flags); } -static void perf_output_copy(struct perf_output_handle *handle, - const void *buf, unsigned int len) +void perf_output_copy(struct perf_output_handle *handle, + const void *buf, unsigned int len) { unsigned int pages_mask; unsigned int offset; @@ -2669,12 +2657,9 @@ static void perf_output_copy(struct perf_output_handle *handle, WARN_ON_ONCE(((long)(handle->head - handle->offset)) < 0); } -#define perf_output_put(handle, x) \ - perf_output_copy((handle), &(x), sizeof(x)) - -static int perf_output_begin(struct perf_output_handle *handle, - struct perf_counter *counter, unsigned int size, - int nmi, int sample) +int perf_output_begin(struct perf_output_handle *handle, + struct perf_counter *counter, unsigned int size, + int nmi, int sample) { struct perf_counter *output_counter; struct perf_mmap_data *data; @@ -2756,7 +2741,7 @@ out: return -ENOSPC; } -static void perf_output_end(struct perf_output_handle *handle) +void perf_output_end(struct perf_output_handle *handle) { struct perf_counter *counter = handle->counter; struct perf_mmap_data *data = handle->data; @@ -2870,82 +2855,151 @@ static void perf_output_read(struct perf_output_handle *handle, perf_output_read_one(handle, counter); } -void perf_counter_output(struct perf_counter *counter, int nmi, - struct perf_sample_data *data) +void perf_output_sample(struct perf_output_handle *handle, + struct perf_event_header *header, + struct perf_sample_data *data, + struct perf_counter *counter) +{ + u64 sample_type = data->type; + + perf_output_put(handle, *header); + + if (sample_type & PERF_SAMPLE_IP) + perf_output_put(handle, data->ip); + + if (sample_type & PERF_SAMPLE_TID) + perf_output_put(handle, data->tid_entry); + + if (sample_type & PERF_SAMPLE_TIME) + perf_output_put(handle, data->time); + + if (sample_type & PERF_SAMPLE_ADDR) + perf_output_put(handle, data->addr); + + if (sample_type & PERF_SAMPLE_ID) + perf_output_put(handle, data->id); + + if (sample_type & PERF_SAMPLE_STREAM_ID) + perf_output_put(handle, data->stream_id); + + if (sample_type & PERF_SAMPLE_CPU) + perf_output_put(handle, data->cpu_entry); + + if (sample_type & PERF_SAMPLE_PERIOD) + perf_output_put(handle, data->period); + + if (sample_type & PERF_SAMPLE_READ) + perf_output_read(handle, counter); + + if (sample_type & PERF_SAMPLE_CALLCHAIN) { + if (data->callchain) { + int size = 1; + + if (data->callchain) + size += data->callchain->nr; + + size *= sizeof(u64); + + perf_output_copy(handle, data->callchain, size); + } else { + u64 nr = 0; + perf_output_put(handle, nr); + } + } + + if (sample_type & PERF_SAMPLE_RAW) { + if (data->raw) { + perf_output_put(handle, data->raw->size); + perf_output_copy(handle, data->raw->data, + data->raw->size); + } else { + struct { + u32 size; + u32 data; + } raw = { + .size = sizeof(u32), + .data = 0, + }; + perf_output_put(handle, raw); + } + } +} + +void perf_prepare_sample(struct perf_event_header *header, + struct perf_sample_data *data, + struct perf_counter *counter, + struct pt_regs *regs) { - int ret; u64 sample_type = counter->attr.sample_type; - struct perf_output_handle handle; - struct perf_event_header header; - u64 ip; - struct { - u32 pid, tid; - } tid_entry; - struct perf_callchain_entry *callchain = NULL; - int callchain_size = 0; - u64 time; - struct { - u32 cpu, reserved; - } cpu_entry; - header.type = PERF_EVENT_SAMPLE; - header.size = sizeof(header); + data->type = sample_type; - header.misc = 0; - header.misc |= perf_misc_flags(data->regs); + header->type = PERF_EVENT_SAMPLE; + header->size = sizeof(*header); + + header->misc = 0; + header->misc |= perf_misc_flags(regs); if (sample_type & PERF_SAMPLE_IP) { - ip = perf_instruction_pointer(data->regs); - header.size += sizeof(ip); + data->ip = perf_instruction_pointer(regs); + + header->size += sizeof(data->ip); } if (sample_type & PERF_SAMPLE_TID) { /* namespace issues */ - tid_entry.pid = perf_counter_pid(counter, current); - tid_entry.tid = perf_counter_tid(counter, current); + data->tid_entry.pid = perf_counter_pid(counter, current); + data->tid_entry.tid = perf_counter_tid(counter, current); - header.size += sizeof(tid_entry); + header->size += sizeof(data->tid_entry); } if (sample_type & PERF_SAMPLE_TIME) { /* * Maybe do better on x86 and provide cpu_clock_nmi() */ - time = sched_clock(); + data->time = sched_clock(); - header.size += sizeof(u64); + header->size += sizeof(data->time); } if (sample_type & PERF_SAMPLE_ADDR) - header.size += sizeof(u64); + header->size += sizeof(data->addr); - if (sample_type & PERF_SAMPLE_ID) - header.size += sizeof(u64); + if (sample_type & PERF_SAMPLE_ID) { + data->id = primary_counter_id(counter); - if (sample_type & PERF_SAMPLE_STREAM_ID) - header.size += sizeof(u64); + header->size += sizeof(data->id); + } + + if (sample_type & PERF_SAMPLE_STREAM_ID) { + data->stream_id = counter->id; + + header->size += sizeof(data->stream_id); + } if (sample_type & PERF_SAMPLE_CPU) { - header.size += sizeof(cpu_entry); + data->cpu_entry.cpu = raw_smp_processor_id(); + data->cpu_entry.reserved = 0; - cpu_entry.cpu = raw_smp_processor_id(); - cpu_entry.reserved = 0; + header->size += sizeof(data->cpu_entry); } if (sample_type & PERF_SAMPLE_PERIOD) - header.size += sizeof(u64); + header->size += sizeof(data->period); if (sample_type & PERF_SAMPLE_READ) - header.size += perf_counter_read_size(counter); + header->size += perf_counter_read_size(counter); if (sample_type & PERF_SAMPLE_CALLCHAIN) { - callchain = perf_callchain(data->regs); + int size = 1; - if (callchain) { - callchain_size = (1 + callchain->nr) * sizeof(u64); - header.size += callchain_size; - } else - header.size += sizeof(u64); + data->callchain = perf_callchain(regs); + + if (data->callchain) + size += data->callchain->nr; + + header->size += size * sizeof(u64); } if (sample_type & PERF_SAMPLE_RAW) { @@ -2957,69 +3011,23 @@ void perf_counter_output(struct perf_counter *counter, int nmi, size += sizeof(u32); WARN_ON_ONCE(size & (sizeof(u64)-1)); - header.size += size; - } - - ret = perf_output_begin(&handle, counter, header.size, nmi, 1); - if (ret) - return; - - perf_output_put(&handle, header); - - if (sample_type & PERF_SAMPLE_IP) - perf_output_put(&handle, ip); - - if (sample_type & PERF_SAMPLE_TID) - perf_output_put(&handle, tid_entry); - - if (sample_type & PERF_SAMPLE_TIME) - perf_output_put(&handle, time); - - if (sample_type & PERF_SAMPLE_ADDR) - perf_output_put(&handle, data->addr); - - if (sample_type & PERF_SAMPLE_ID) { - u64 id = primary_counter_id(counter); - - perf_output_put(&handle, id); + header->size += size; } +} - if (sample_type & PERF_SAMPLE_STREAM_ID) - perf_output_put(&handle, counter->id); - - if (sample_type & PERF_SAMPLE_CPU) - perf_output_put(&handle, cpu_entry); - - if (sample_type & PERF_SAMPLE_PERIOD) - perf_output_put(&handle, data->period); +static void perf_counter_output(struct perf_counter *counter, int nmi, + struct perf_sample_data *data, + struct pt_regs *regs) +{ + struct perf_output_handle handle; + struct perf_event_header header; - if (sample_type & PERF_SAMPLE_READ) - perf_output_read(&handle, counter); + perf_prepare_sample(&header, data, counter, regs); - if (sample_type & PERF_SAMPLE_CALLCHAIN) { - if (callchain) - perf_output_copy(&handle, callchain, callchain_size); - else { - u64 nr = 0; - perf_output_put(&handle, nr); - } - } + if (perf_output_begin(&handle, counter, header.size, nmi, 1)) + return; - if (sample_type & PERF_SAMPLE_RAW) { - if (data->raw) { - perf_output_put(&handle, data->raw->size); - perf_output_copy(&handle, data->raw->data, data->raw->size); - } else { - struct { - u32 size; - u32 data; - } raw = { - .size = sizeof(u32), - .data = 0, - }; - perf_output_put(&handle, raw); - } - } + perf_output_sample(&handle, &header, data, counter); perf_output_end(&handle); } @@ -3501,7 +3509,8 @@ static void perf_log_throttle(struct perf_counter *counter, int enable) */ static int __perf_counter_overflow(struct perf_counter *counter, int nmi, - int throttle, struct perf_sample_data *data) + int throttle, struct perf_sample_data *data, + struct pt_regs *regs) { int events = atomic_read(&counter->event_limit); struct hw_perf_counter *hwc = &counter->hw; @@ -3557,14 +3566,15 @@ static int __perf_counter_overflow(struct perf_counter *counter, int nmi, perf_counter_disable(counter); } - perf_counter_output(counter, nmi, data); + perf_counter_output(counter, nmi, data, regs); return ret; } int perf_counter_overflow(struct perf_counter *counter, int nmi, - struct perf_sample_data *data) + struct perf_sample_data *data, + struct pt_regs *regs) { - return __perf_counter_overflow(counter, nmi, 1, data); + return __perf_counter_overflow(counter, nmi, 1, data, regs); } /* @@ -3602,7 +3612,8 @@ again: } static void perf_swcounter_overflow(struct perf_counter *counter, - int nmi, struct perf_sample_data *data) + int nmi, struct perf_sample_data *data, + struct pt_regs *regs) { struct hw_perf_counter *hwc = &counter->hw; int throttle = 0; @@ -3615,7 +3626,8 @@ static void perf_swcounter_overflow(struct perf_counter *counter, return; for (; overflow; overflow--) { - if (__perf_counter_overflow(counter, nmi, throttle, data)) { + if (__perf_counter_overflow(counter, nmi, throttle, + data, regs)) { /* * We inhibit the overflow from happening when * hwc->interrupts == MAX_INTERRUPTS. @@ -3634,7 +3646,8 @@ static void perf_swcounter_unthrottle(struct perf_counter *counter) } static void perf_swcounter_add(struct perf_counter *counter, u64 nr, - int nmi, struct perf_sample_data *data) + int nmi, struct perf_sample_data *data, + struct pt_regs *regs) { struct hw_perf_counter *hwc = &counter->hw; @@ -3643,11 +3656,11 @@ static void perf_swcounter_add(struct perf_counter *counter, u64 nr, if (!hwc->sample_period) return; - if (!data->regs) + if (!regs) return; if (!atomic64_add_negative(nr, &hwc->period_left)) - perf_swcounter_overflow(counter, nmi, data); + perf_swcounter_overflow(counter, nmi, data, regs); } static int perf_swcounter_is_counting(struct perf_counter *counter) @@ -3706,7 +3719,8 @@ static int perf_swcounter_match(struct perf_counter *counter, static void perf_swcounter_ctx_event(struct perf_counter_context *ctx, enum perf_type_id type, u32 event, u64 nr, int nmi, - struct perf_sample_data *data) + struct perf_sample_data *data, + struct pt_regs *regs) { struct perf_counter *counter; @@ -3715,8 +3729,8 @@ static void perf_swcounter_ctx_event(struct perf_counter_context *ctx, rcu_read_lock(); list_for_each_entry_rcu(counter, &ctx->event_list, event_entry) { - if (perf_swcounter_match(counter, type, event, data->regs)) - perf_swcounter_add(counter, nr, nmi, data); + if (perf_swcounter_match(counter, type, event, regs)) + perf_swcounter_add(counter, nr, nmi, data, regs); } rcu_read_unlock(); } @@ -3737,7 +3751,8 @@ static int *perf_swcounter_recursion_context(struct perf_cpu_context *cpuctx) static void do_perf_swcounter_event(enum perf_type_id type, u32 event, u64 nr, int nmi, - struct perf_sample_data *data) + struct perf_sample_data *data, + struct pt_regs *regs) { struct perf_cpu_context *cpuctx = &get_cpu_var(perf_cpu_context); int *recursion = perf_swcounter_recursion_context(cpuctx); @@ -3750,7 +3765,7 @@ static void do_perf_swcounter_event(enum perf_type_id type, u32 event, barrier(); perf_swcounter_ctx_event(&cpuctx->ctx, type, event, - nr, nmi, data); + nr, nmi, data, regs); rcu_read_lock(); /* * doesn't really matter which of the child contexts the @@ -3758,7 +3773,7 @@ static void do_perf_swcounter_event(enum perf_type_id type, u32 event, */ ctx = rcu_dereference(current->perf_counter_ctxp); if (ctx) - perf_swcounter_ctx_event(ctx, type, event, nr, nmi, data); + perf_swcounter_ctx_event(ctx, type, event, nr, nmi, data, regs); rcu_read_unlock(); barrier(); @@ -3772,11 +3787,11 @@ void __perf_swcounter_event(u32 event, u64 nr, int nmi, struct pt_regs *regs, u64 addr) { struct perf_sample_data data = { - .regs = regs, .addr = addr, }; - do_perf_swcounter_event(PERF_TYPE_SOFTWARE, event, nr, nmi, &data); + do_perf_swcounter_event(PERF_TYPE_SOFTWARE, event, nr, nmi, + &data, regs); } static void perf_swcounter_read(struct perf_counter *counter) @@ -3813,6 +3828,7 @@ static enum hrtimer_restart perf_swcounter_hrtimer(struct hrtimer *hrtimer) { enum hrtimer_restart ret = HRTIMER_RESTART; struct perf_sample_data data; + struct pt_regs *regs; struct perf_counter *counter; u64 period; @@ -3820,17 +3836,17 @@ static enum hrtimer_restart perf_swcounter_hrtimer(struct hrtimer *hrtimer) counter->pmu->read(counter); data.addr = 0; - data.regs = get_irq_regs(); + regs = get_irq_regs(); /* * In case we exclude kernel IPs or are somehow not in interrupt * context, provide the next best thing, the user IP. */ - if ((counter->attr.exclude_kernel || !data.regs) && + if ((counter->attr.exclude_kernel || !regs) && !counter->attr.exclude_user) - data.regs = task_pt_regs(current); + regs = task_pt_regs(current); - if (data.regs) { - if (perf_counter_overflow(counter, 0, &data)) + if (regs) { + if (perf_counter_overflow(counter, 0, &data, regs)) ret = HRTIMER_NORESTART; } @@ -3966,15 +3982,17 @@ void perf_tpcounter_event(int event_id, u64 addr, u64 count, void *record, }; struct perf_sample_data data = { - .regs = get_irq_regs(), .addr = addr, .raw = &raw, }; - if (!data.regs) - data.regs = task_pt_regs(current); + struct pt_regs *regs = get_irq_regs(); + + if (!regs) + regs = task_pt_regs(current); - do_perf_swcounter_event(PERF_TYPE_TRACEPOINT, event_id, count, 1, &data); + do_perf_swcounter_event(PERF_TYPE_TRACEPOINT, event_id, count, 1, + &data, regs); } EXPORT_SYMBOL_GPL(perf_tpcounter_event); -- cgit v1.2.3-70-g09d2 From fc5377668c3d808e1d53c4aee152c836f55c3490 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Thu, 17 Sep 2009 19:35:28 +0200 Subject: tracing: Remove markers Now that the last users of markers have migrated to the event tracer we can kill off the (now orphan) support code. Signed-off-by: Christoph Hellwig Acked-by: Mathieu Desnoyers Cc: Steven Rostedt Cc: Frederic Weisbecker LKML-Reference: <20090917173527.GA1699@lst.de> Signed-off-by: Ingo Molnar --- Documentation/markers.txt | 104 ---- arch/powerpc/platforms/cell/spufs/file.c | 1 - arch/powerpc/platforms/cell/spufs/sched.c | 1 - include/linux/kvm_host.h | 1 - include/linux/marker.h | 221 ------- include/linux/module.h | 11 - init/Kconfig | 7 - kernel/Makefile | 1 - kernel/marker.c | 930 ------------------------------ kernel/module.c | 18 - kernel/trace/trace_printk.c | 1 - samples/Kconfig | 6 - samples/Makefile | 2 +- samples/markers/Makefile | 4 - samples/markers/marker-example.c | 53 -- samples/markers/probe-example.c | 92 --- scripts/Makefile.modpost | 12 - 17 files changed, 1 insertion(+), 1464 deletions(-) delete mode 100644 Documentation/markers.txt delete mode 100644 include/linux/marker.h delete mode 100644 kernel/marker.c delete mode 100644 samples/markers/Makefile delete mode 100644 samples/markers/marker-example.c delete mode 100644 samples/markers/probe-example.c (limited to 'include') diff --git a/Documentation/markers.txt b/Documentation/markers.txt deleted file mode 100644 index d2b3d0e91b2..00000000000 --- a/Documentation/markers.txt +++ /dev/null @@ -1,104 +0,0 @@ - Using the Linux Kernel Markers - - Mathieu Desnoyers - - -This document introduces Linux Kernel Markers and their use. It provides -examples of how to insert markers in the kernel and connect probe functions to -them and provides some examples of probe functions. - - -* Purpose of markers - -A marker placed in code provides a hook to call a function (probe) that you can -provide at runtime. A marker can be "on" (a probe is connected to it) or "off" -(no probe is attached). When a marker is "off" it has no effect, except for -adding a tiny time penalty (checking a condition for a branch) and space -penalty (adding a few bytes for the function call at the end of the -instrumented function and adds a data structure in a separate section). When a -marker is "on", the function you provide is called each time the marker is -executed, in the execution context of the caller. When the function provided -ends its execution, it returns to the caller (continuing from the marker site). - -You can put markers at important locations in the code. Markers are -lightweight hooks that can pass an arbitrary number of parameters, -described in a printk-like format string, to the attached probe function. - -They can be used for tracing and performance accounting. - - -* Usage - -In order to use the macro trace_mark, you should include linux/marker.h. - -#include - -And, - -trace_mark(subsystem_event, "myint %d mystring %s", someint, somestring); -Where : -- subsystem_event is an identifier unique to your event - - subsystem is the name of your subsystem. - - event is the name of the event to mark. -- "myint %d mystring %s" is the formatted string for the serializer. "myint" and - "mystring" are repectively the field names associated with the first and - second parameter. -- someint is an integer. -- somestring is a char pointer. - -Connecting a function (probe) to a marker is done by providing a probe (function -to call) for the specific marker through marker_probe_register() and can be -activated by calling marker_arm(). Marker deactivation can be done by calling -marker_disarm() as many times as marker_arm() has been called. Removing a probe -is done through marker_probe_unregister(); it will disarm the probe. - -marker_synchronize_unregister() must be called between probe unregistration and -the first occurrence of -- the end of module exit function, - to make sure there is no caller left using the probe; -- the free of any resource used by the probes, - to make sure the probes wont be accessing invalid data. -This, and the fact that preemption is disabled around the probe call, make sure -that probe removal and module unload are safe. See the "Probe example" section -below for a sample probe module. - -The marker mechanism supports inserting multiple instances of the same marker. -Markers can be put in inline functions, inlined static functions, and -unrolled loops as well as regular functions. - -The naming scheme "subsystem_event" is suggested here as a convention intended -to limit collisions. Marker names are global to the kernel: they are considered -as being the same whether they are in the core kernel image or in modules. -Conflicting format strings for markers with the same name will cause the markers -to be detected to have a different format string not to be armed and will output -a printk warning which identifies the inconsistency: - -"Format mismatch for probe probe_name (format), marker (format)" - -Another way to use markers is to simply define the marker without generating any -function call to actually call into the marker. This is useful in combination -with tracepoint probes in a scheme like this : - -void probe_tracepoint_name(unsigned int arg1, struct task_struct *tsk); - -DEFINE_MARKER_TP(marker_eventname, tracepoint_name, probe_tracepoint_name, - "arg1 %u pid %d"); - -notrace void probe_tracepoint_name(unsigned int arg1, struct task_struct *tsk) -{ - struct marker *marker = &GET_MARKER(kernel_irq_entry); - /* write data to trace buffers ... */ -} - -* Probe / marker example - -See the example provided in samples/markers/src - -Compile them with your kernel. - -Run, as root : -modprobe marker-example (insmod order is not important) -modprobe probe-example -cat /proc/marker-example (returns an expected error) -rmmod marker-example probe-example -dmesg diff --git a/arch/powerpc/platforms/cell/spufs/file.c b/arch/powerpc/platforms/cell/spufs/file.c index ab8aef9bb8e..8f079b865ad 100644 --- a/arch/powerpc/platforms/cell/spufs/file.c +++ b/arch/powerpc/platforms/cell/spufs/file.c @@ -29,7 +29,6 @@ #include #include #include -#include #include #include diff --git a/arch/powerpc/platforms/cell/spufs/sched.c b/arch/powerpc/platforms/cell/spufs/sched.c index bb5b77c66d0..4678078fede 100644 --- a/arch/powerpc/platforms/cell/spufs/sched.c +++ b/arch/powerpc/platforms/cell/spufs/sched.c @@ -39,7 +39,6 @@ #include #include #include -#include #include #include diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index 4af56036a6b..b7bbb5ddd7a 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -15,7 +15,6 @@ #include #include #include -#include #include #include diff --git a/include/linux/marker.h b/include/linux/marker.h deleted file mode 100644 index b85e74ca782..00000000000 --- a/include/linux/marker.h +++ /dev/null @@ -1,221 +0,0 @@ -#ifndef _LINUX_MARKER_H -#define _LINUX_MARKER_H - -/* - * Code markup for dynamic and static tracing. - * - * See Documentation/marker.txt. - * - * (C) Copyright 2006 Mathieu Desnoyers - * - * This file is released under the GPLv2. - * See the file COPYING for more details. - */ - -#include -#include - -struct module; -struct marker; - -/** - * marker_probe_func - Type of a marker probe function - * @probe_private: probe private data - * @call_private: call site private data - * @fmt: format string - * @args: variable argument list pointer. Use a pointer to overcome C's - * inability to pass this around as a pointer in a portable manner in - * the callee otherwise. - * - * Type of marker probe functions. They receive the mdata and need to parse the - * format string to recover the variable argument list. - */ -typedef void marker_probe_func(void *probe_private, void *call_private, - const char *fmt, va_list *args); - -struct marker_probe_closure { - marker_probe_func *func; /* Callback */ - void *probe_private; /* Private probe data */ -}; - -struct marker { - const char *name; /* Marker name */ - const char *format; /* Marker format string, describing the - * variable argument list. - */ - char state; /* Marker state. */ - char ptype; /* probe type : 0 : single, 1 : multi */ - /* Probe wrapper */ - void (*call)(const struct marker *mdata, void *call_private, ...); - struct marker_probe_closure single; - struct marker_probe_closure *multi; - const char *tp_name; /* Optional tracepoint name */ - void *tp_cb; /* Optional tracepoint callback */ -} __attribute__((aligned(8))); - -#ifdef CONFIG_MARKERS - -#define _DEFINE_MARKER(name, tp_name_str, tp_cb, format) \ - static const char __mstrtab_##name[] \ - __attribute__((section("__markers_strings"))) \ - = #name "\0" format; \ - static struct marker __mark_##name \ - __attribute__((section("__markers"), aligned(8))) = \ - { __mstrtab_##name, &__mstrtab_##name[sizeof(#name)], \ - 0, 0, marker_probe_cb, { __mark_empty_function, NULL},\ - NULL, tp_name_str, tp_cb } - -#define DEFINE_MARKER(name, format) \ - _DEFINE_MARKER(name, NULL, NULL, format) - -#define DEFINE_MARKER_TP(name, tp_name, tp_cb, format) \ - _DEFINE_MARKER(name, #tp_name, tp_cb, format) - -/* - * Note : the empty asm volatile with read constraint is used here instead of a - * "used" attribute to fix a gcc 4.1.x bug. - * Make sure the alignment of the structure in the __markers section will - * not add unwanted padding between the beginning of the section and the - * structure. Force alignment to the same alignment as the section start. - * - * The "generic" argument controls which marker enabling mechanism must be used. - * If generic is true, a variable read is used. - * If generic is false, immediate values are used. - */ -#define __trace_mark(generic, name, call_private, format, args...) \ - do { \ - DEFINE_MARKER(name, format); \ - __mark_check_format(format, ## args); \ - if (unlikely(__mark_##name.state)) { \ - (*__mark_##name.call) \ - (&__mark_##name, call_private, ## args);\ - } \ - } while (0) - -#define __trace_mark_tp(name, call_private, tp_name, tp_cb, format, args...) \ - do { \ - void __check_tp_type(void) \ - { \ - register_trace_##tp_name(tp_cb); \ - } \ - DEFINE_MARKER_TP(name, tp_name, tp_cb, format); \ - __mark_check_format(format, ## args); \ - (*__mark_##name.call)(&__mark_##name, call_private, \ - ## args); \ - } while (0) - -extern void marker_update_probe_range(struct marker *begin, - struct marker *end); - -#define GET_MARKER(name) (__mark_##name) - -#else /* !CONFIG_MARKERS */ -#define DEFINE_MARKER(name, tp_name, tp_cb, format) -#define __trace_mark(generic, name, call_private, format, args...) \ - __mark_check_format(format, ## args) -#define __trace_mark_tp(name, call_private, tp_name, tp_cb, format, args...) \ - do { \ - void __check_tp_type(void) \ - { \ - register_trace_##tp_name(tp_cb); \ - } \ - __mark_check_format(format, ## args); \ - } while (0) -static inline void marker_update_probe_range(struct marker *begin, - struct marker *end) -{ } -#define GET_MARKER(name) -#endif /* CONFIG_MARKERS */ - -/** - * trace_mark - Marker using code patching - * @name: marker name, not quoted. - * @format: format string - * @args...: variable argument list - * - * Places a marker using optimized code patching technique (imv_read()) - * to be enabled when immediate values are present. - */ -#define trace_mark(name, format, args...) \ - __trace_mark(0, name, NULL, format, ## args) - -/** - * _trace_mark - Marker using variable read - * @name: marker name, not quoted. - * @format: format string - * @args...: variable argument list - * - * Places a marker using a standard memory read (_imv_read()) to be - * enabled. Should be used for markers in code paths where instruction - * modification based enabling is not welcome. (__init and __exit functions, - * lockdep, some traps, printk). - */ -#define _trace_mark(name, format, args...) \ - __trace_mark(1, name, NULL, format, ## args) - -/** - * trace_mark_tp - Marker in a tracepoint callback - * @name: marker name, not quoted. - * @tp_name: tracepoint name, not quoted. - * @tp_cb: tracepoint callback. Should have an associated global symbol so it - * is not optimized away by the compiler (should not be static). - * @format: format string - * @args...: variable argument list - * - * Places a marker in a tracepoint callback. - */ -#define trace_mark_tp(name, tp_name, tp_cb, format, args...) \ - __trace_mark_tp(name, NULL, tp_name, tp_cb, format, ## args) - -/** - * MARK_NOARGS - Format string for a marker with no argument. - */ -#define MARK_NOARGS " " - -/* To be used for string format validity checking with gcc */ -static inline void __printf(1, 2) ___mark_check_format(const char *fmt, ...) -{ -} - -#define __mark_check_format(format, args...) \ - do { \ - if (0) \ - ___mark_check_format(format, ## args); \ - } while (0) - -extern marker_probe_func __mark_empty_function; - -extern void marker_probe_cb(const struct marker *mdata, - void *call_private, ...); - -/* - * Connect a probe to a marker. - * private data pointer must be a valid allocated memory address, or NULL. - */ -extern int marker_probe_register(const char *name, const char *format, - marker_probe_func *probe, void *probe_private); - -/* - * Returns the private data given to marker_probe_register. - */ -extern int marker_probe_unregister(const char *name, - marker_probe_func *probe, void *probe_private); -/* - * Unregister a marker by providing the registered private data. - */ -extern int marker_probe_unregister_private_data(marker_probe_func *probe, - void *probe_private); - -extern void *marker_get_private_data(const char *name, marker_probe_func *probe, - int num); - -/* - * marker_synchronize_unregister must be called between the last marker probe - * unregistration and the first one of - * - the end of module exit function - * - the free of any resource used by the probes - * to ensure the code and data are valid for any possibly running probes. - */ -#define marker_synchronize_unregister() synchronize_sched() - -#endif diff --git a/include/linux/module.h b/include/linux/module.h index f8f92d015ef..1c755b2f937 100644 --- a/include/linux/module.h +++ b/include/linux/module.h @@ -15,7 +15,6 @@ #include #include #include -#include #include #include @@ -327,10 +326,6 @@ struct module /* The command line arguments (may be mangled). People like keeping pointers to this stuff */ char *args; -#ifdef CONFIG_MARKERS - struct marker *markers; - unsigned int num_markers; -#endif #ifdef CONFIG_TRACEPOINTS struct tracepoint *tracepoints; unsigned int num_tracepoints; @@ -535,8 +530,6 @@ int unregister_module_notifier(struct notifier_block * nb); extern void print_modules(void); -extern void module_update_markers(void); - extern void module_update_tracepoints(void); extern int module_get_iter_tracepoints(struct tracepoint_iter *iter); @@ -651,10 +644,6 @@ static inline void print_modules(void) { } -static inline void module_update_markers(void) -{ -} - static inline void module_update_tracepoints(void) { } diff --git a/init/Kconfig b/init/Kconfig index 8e8b76d8a27..4cc0fa13d5e 100644 --- a/init/Kconfig +++ b/init/Kconfig @@ -1054,13 +1054,6 @@ config PROFILING config TRACEPOINTS bool -config MARKERS - bool "Activate markers" - select TRACEPOINTS - help - Place an empty function call at each marker site. Can be - dynamically changed for a probe function. - source "arch/Kconfig" config SLOW_WORK diff --git a/kernel/Makefile b/kernel/Makefile index 3d9c7e27e3f..7c9b0a58550 100644 --- a/kernel/Makefile +++ b/kernel/Makefile @@ -87,7 +87,6 @@ obj-$(CONFIG_RELAY) += relay.o obj-$(CONFIG_SYSCTL) += utsname_sysctl.o obj-$(CONFIG_TASK_DELAY_ACCT) += delayacct.o obj-$(CONFIG_TASKSTATS) += taskstats.o tsacct.o -obj-$(CONFIG_MARKERS) += marker.o obj-$(CONFIG_TRACEPOINTS) += tracepoint.o obj-$(CONFIG_LATENCYTOP) += latencytop.o obj-$(CONFIG_FUNCTION_TRACER) += trace/ diff --git a/kernel/marker.c b/kernel/marker.c deleted file mode 100644 index ea54f264786..00000000000 --- a/kernel/marker.c +++ /dev/null @@ -1,930 +0,0 @@ -/* - * Copyright (C) 2007 Mathieu Desnoyers - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. - */ -#include -#include -#include -#include -#include -#include -#include -#include -#include - -extern struct marker __start___markers[]; -extern struct marker __stop___markers[]; - -/* Set to 1 to enable marker debug output */ -static const int marker_debug; - -/* - * markers_mutex nests inside module_mutex. Markers mutex protects the builtin - * and module markers and the hash table. - */ -static DEFINE_MUTEX(markers_mutex); - -/* - * Marker hash table, containing the active markers. - * Protected by module_mutex. - */ -#define MARKER_HASH_BITS 6 -#define MARKER_TABLE_SIZE (1 << MARKER_HASH_BITS) -static struct hlist_head marker_table[MARKER_TABLE_SIZE]; - -/* - * Note about RCU : - * It is used to make sure every handler has finished using its private data - * between two consecutive operation (add or remove) on a given marker. It is - * also used to delay the free of multiple probes array until a quiescent state - * is reached. - * marker entries modifications are protected by the markers_mutex. - */ -struct marker_entry { - struct hlist_node hlist; - char *format; - /* Probe wrapper */ - void (*call)(const struct marker *mdata, void *call_private, ...); - struct marker_probe_closure single; - struct marker_probe_closure *multi; - int refcount; /* Number of times armed. 0 if disarmed. */ - struct rcu_head rcu; - void *oldptr; - int rcu_pending; - unsigned char ptype:1; - unsigned char format_allocated:1; - char name[0]; /* Contains name'\0'format'\0' */ -}; - -/** - * __mark_empty_function - Empty probe callback - * @probe_private: probe private data - * @call_private: call site private data - * @fmt: format string - * @...: variable argument list - * - * Empty callback provided as a probe to the markers. By providing this to a - * disabled marker, we make sure the execution flow is always valid even - * though the function pointer change and the marker enabling are two distinct - * operations that modifies the execution flow of preemptible code. - */ -notrace void __mark_empty_function(void *probe_private, void *call_private, - const char *fmt, va_list *args) -{ -} -EXPORT_SYMBOL_GPL(__mark_empty_function); - -/* - * marker_probe_cb Callback that prepares the variable argument list for probes. - * @mdata: pointer of type struct marker - * @call_private: caller site private data - * @...: Variable argument list. - * - * Since we do not use "typical" pointer based RCU in the 1 argument case, we - * need to put a full smp_rmb() in this branch. This is why we do not use - * rcu_dereference() for the pointer read. - */ -notrace void marker_probe_cb(const struct marker *mdata, - void *call_private, ...) -{ - va_list args; - char ptype; - - /* - * rcu_read_lock_sched does two things : disabling preemption to make - * sure the teardown of the callbacks can be done correctly when they - * are in modules and they insure RCU read coherency. - */ - rcu_read_lock_sched_notrace(); - ptype = mdata->ptype; - if (likely(!ptype)) { - marker_probe_func *func; - /* Must read the ptype before ptr. They are not data dependant, - * so we put an explicit smp_rmb() here. */ - smp_rmb(); - func = mdata->single.func; - /* Must read the ptr before private data. They are not data - * dependant, so we put an explicit smp_rmb() here. */ - smp_rmb(); - va_start(args, call_private); - func(mdata->single.probe_private, call_private, mdata->format, - &args); - va_end(args); - } else { - struct marker_probe_closure *multi; - int i; - /* - * Read mdata->ptype before mdata->multi. - */ - smp_rmb(); - multi = mdata->multi; - /* - * multi points to an array, therefore accessing the array - * depends on reading multi. However, even in this case, - * we must insure that the pointer is read _before_ the array - * data. Same as rcu_dereference, but we need a full smp_rmb() - * in the fast path, so put the explicit barrier here. - */ - smp_read_barrier_depends(); - for (i = 0; multi[i].func; i++) { - va_start(args, call_private); - multi[i].func(multi[i].probe_private, call_private, - mdata->format, &args); - va_end(args); - } - } - rcu_read_unlock_sched_notrace(); -} -EXPORT_SYMBOL_GPL(marker_probe_cb); - -/* - * marker_probe_cb Callback that does not prepare the variable argument list. - * @mdata: pointer of type struct marker - * @call_private: caller site private data - * @...: Variable argument list. - * - * Should be connected to markers "MARK_NOARGS". - */ -static notrace void marker_probe_cb_noarg(const struct marker *mdata, - void *call_private, ...) -{ - va_list args; /* not initialized */ - char ptype; - - rcu_read_lock_sched_notrace(); - ptype = mdata->ptype; - if (likely(!ptype)) { - marker_probe_func *func; - /* Must read the ptype before ptr. They are not data dependant, - * so we put an explicit smp_rmb() here. */ - smp_rmb(); - func = mdata->single.func; - /* Must read the ptr before private data. They are not data - * dependant, so we put an explicit smp_rmb() here. */ - smp_rmb(); - func(mdata->single.probe_private, call_private, mdata->format, - &args); - } else { - struct marker_probe_closure *multi; - int i; - /* - * Read mdata->ptype before mdata->multi. - */ - smp_rmb(); - multi = mdata->multi; - /* - * multi points to an array, therefore accessing the array - * depends on reading multi. However, even in this case, - * we must insure that the pointer is read _before_ the array - * data. Same as rcu_dereference, but we need a full smp_rmb() - * in the fast path, so put the explicit barrier here. - */ - smp_read_barrier_depends(); - for (i = 0; multi[i].func; i++) - multi[i].func(multi[i].probe_private, call_private, - mdata->format, &args); - } - rcu_read_unlock_sched_notrace(); -} - -static void free_old_closure(struct rcu_head *head) -{ - struct marker_entry *entry = container_of(head, - struct marker_entry, rcu); - kfree(entry->oldptr); - /* Make sure we free the data before setting the pending flag to 0 */ - smp_wmb(); - entry->rcu_pending = 0; -} - -static void debug_print_probes(struct marker_entry *entry) -{ - int i; - - if (!marker_debug) - return; - - if (!entry->ptype) { - printk(KERN_DEBUG "Single probe : %p %p\n", - entry->single.func, - entry->single.probe_private); - } else { - for (i = 0; entry->multi[i].func; i++) - printk(KERN_DEBUG "Multi probe %d : %p %p\n", i, - entry->multi[i].func, - entry->multi[i].probe_private); - } -} - -static struct marker_probe_closure * -marker_entry_add_probe(struct marker_entry *entry, - marker_probe_func *probe, void *probe_private) -{ - int nr_probes = 0; - struct marker_probe_closure *old, *new; - - WARN_ON(!probe); - - debug_print_probes(entry); - old = entry->multi; - if (!entry->ptype) { - if (entry->single.func == probe && - entry->single.probe_private == probe_private) - return ERR_PTR(-EBUSY); - if (entry->single.func == __mark_empty_function) { - /* 0 -> 1 probes */ - entry->single.func = probe; - entry->single.probe_private = probe_private; - entry->refcount = 1; - entry->ptype = 0; - debug_print_probes(entry); - return NULL; - } else { - /* 1 -> 2 probes */ - nr_probes = 1; - old = NULL; - } - } else { - /* (N -> N+1), (N != 0, 1) probes */ - for (nr_probes = 0; old[nr_probes].func; nr_probes++) - if (old[nr_probes].func == probe - && old[nr_probes].probe_private - == probe_private) - return ERR_PTR(-EBUSY); - } - /* + 2 : one for new probe, one for NULL func */ - new = kzalloc((nr_probes + 2) * sizeof(struct marker_probe_closure), - GFP_KERNEL); - if (new == NULL) - return ERR_PTR(-ENOMEM); - if (!old) - new[0] = entry->single; - else - memcpy(new, old, - nr_probes * sizeof(struct marker_probe_closure)); - new[nr_probes].func = probe; - new[nr_probes].probe_private = probe_private; - entry->refcount = nr_probes + 1; - entry->multi = new; - entry->ptype = 1; - debug_print_probes(entry); - return old; -} - -static struct marker_probe_closure * -marker_entry_remove_probe(struct marker_entry *entry, - marker_probe_func *probe, void *probe_private) -{ - int nr_probes = 0, nr_del = 0, i; - struct marker_probe_closure *old, *new; - - old = entry->multi; - - debug_print_probes(entry); - if (!entry->ptype) { - /* 0 -> N is an error */ - WARN_ON(entry->single.func == __mark_empty_function); - /* 1 -> 0 probes */ - WARN_ON(probe && entry->single.func != probe); - WARN_ON(entry->single.probe_private != probe_private); - entry->single.func = __mark_empty_function; - entry->refcount = 0; - entry->ptype = 0; - debug_print_probes(entry); - return NULL; - } else { - /* (N -> M), (N > 1, M >= 0) probes */ - for (nr_probes = 0; old[nr_probes].func; nr_probes++) { - if ((!probe || old[nr_probes].func == probe) - && old[nr_probes].probe_private - == probe_private) - nr_del++; - } - } - - if (nr_probes - nr_del == 0) { - /* N -> 0, (N > 1) */ - entry->single.func = __mark_empty_function; - entry->refcount = 0; - entry->ptype = 0; - } else if (nr_probes - nr_del == 1) { - /* N -> 1, (N > 1) */ - for (i = 0; old[i].func; i++) - if ((probe && old[i].func != probe) || - old[i].probe_private != probe_private) - entry->single = old[i]; - entry->refcount = 1; - entry->ptype = 0; - } else { - int j = 0; - /* N -> M, (N > 1, M > 1) */ - /* + 1 for NULL */ - new = kzalloc((nr_probes - nr_del + 1) - * sizeof(struct marker_probe_closure), GFP_KERNEL); - if (new == NULL) - return ERR_PTR(-ENOMEM); - for (i = 0; old[i].func; i++) - if ((probe && old[i].func != probe) || - old[i].probe_private != probe_private) - new[j++] = old[i]; - entry->refcount = nr_probes - nr_del; - entry->ptype = 1; - entry->multi = new; - } - debug_print_probes(entry); - return old; -} - -/* - * Get marker if the marker is present in the marker hash table. - * Must be called with markers_mutex held. - * Returns NULL if not present. - */ -static struct marker_entry *get_marker(const char *name) -{ - struct hlist_head *head; - struct hlist_node *node; - struct marker_entry *e; - u32 hash = jhash(name, strlen(name), 0); - - head = &marker_table[hash & ((1 << MARKER_HASH_BITS)-1)]; - hlist_for_each_entry(e, node, head, hlist) { - if (!strcmp(name, e->name)) - return e; - } - return NULL; -} - -/* - * Add the marker to the marker hash table. Must be called with markers_mutex - * held. - */ -static struct marker_entry *add_marker(const char *name, const char *format) -{ - struct hlist_head *head; - struct hlist_node *node; - struct marker_entry *e; - size_t name_len = strlen(name) + 1; - size_t format_len = 0; - u32 hash = jhash(name, name_len-1, 0); - - if (format) - format_len = strlen(format) + 1; - head = &marker_table[hash & ((1 << MARKER_HASH_BITS)-1)]; - hlist_for_each_entry(e, node, head, hlist) { - if (!strcmp(name, e->name)) { - printk(KERN_NOTICE - "Marker %s busy\n", name); - return ERR_PTR(-EBUSY); /* Already there */ - } - } - /* - * Using kmalloc here to allocate a variable length element. Could - * cause some memory fragmentation if overused. - */ - e = kmalloc(sizeof(struct marker_entry) + name_len + format_len, - GFP_KERNEL); - if (!e) - return ERR_PTR(-ENOMEM); - memcpy(&e->name[0], name, name_len); - if (format) { - e->format = &e->name[name_len]; - memcpy(e->format, format, format_len); - if (strcmp(e->format, MARK_NOARGS) == 0) - e->call = marker_probe_cb_noarg; - else - e->call = marker_probe_cb; - trace_mark(core_marker_format, "name %s format %s", - e->name, e->format); - } else { - e->format = NULL; - e->call = marker_probe_cb; - } - e->single.func = __mark_empty_function; - e->single.probe_private = NULL; - e->multi = NULL; - e->ptype = 0; - e->format_allocated = 0; - e->refcount = 0; - e->rcu_pending = 0; - hlist_add_head(&e->hlist, head); - return e; -} - -/* - * Remove the marker from the marker hash table. Must be called with mutex_lock - * held. - */ -static int remove_marker(const char *name) -{ - struct hlist_head *head; - struct hlist_node *node; - struct marker_entry *e; - int found = 0; - size_t len = strlen(name) + 1; - u32 hash = jhash(name, len-1, 0); - - head = &marker_table[hash & ((1 << MARKER_HASH_BITS)-1)]; - hlist_for_each_entry(e, node, head, hlist) { - if (!strcmp(name, e->name)) { - found = 1; - break; - } - } - if (!found) - return -ENOENT; - if (e->single.func != __mark_empty_function) - return -EBUSY; - hlist_del(&e->hlist); - if (e->format_allocated) - kfree(e->format); - /* Make sure the call_rcu has been executed */ - if (e->rcu_pending) - rcu_barrier_sched(); - kfree(e); - return 0; -} - -/* - * Set the mark_entry format to the format found in the element. - */ -static int marker_set_format(struct marker_entry *entry, const char *format) -{ - entry->format = kstrdup(format, GFP_KERNEL); - if (!entry->format) - return -ENOMEM; - entry->format_allocated = 1; - - trace_mark(core_marker_format, "name %s format %s", - entry->name, entry->format); - return 0; -} - -/* - * Sets the probe callback corresponding to one marker. - */ -static int set_marker(struct marker_entry *entry, struct marker *elem, - int active) -{ - int ret = 0; - WARN_ON(strcmp(entry->name, elem->name) != 0); - - if (entry->format) { - if (strcmp(entry->format, elem->format) != 0) { - printk(KERN_NOTICE - "Format mismatch for probe %s " - "(%s), marker (%s)\n", - entry->name, - entry->format, - elem->format); - return -EPERM; - } - } else { - ret = marker_set_format(entry, elem->format); - if (ret) - return ret; - } - - /* - * probe_cb setup (statically known) is done here. It is - * asynchronous with the rest of execution, therefore we only - * pass from a "safe" callback (with argument) to an "unsafe" - * callback (does not set arguments). - */ - elem->call = entry->call; - /* - * Sanity check : - * We only update the single probe private data when the ptr is - * set to a _non_ single probe! (0 -> 1 and N -> 1, N != 1) - */ - WARN_ON(elem->single.func != __mark_empty_function - && elem->single.probe_private != entry->single.probe_private - && !elem->ptype); - elem->single.probe_private = entry->single.probe_private; - /* - * Make sure the private data is valid when we update the - * single probe ptr. - */ - smp_wmb(); - elem->single.func = entry->single.func; - /* - * We also make sure that the new probe callbacks array is consistent - * before setting a pointer to it. - */ - rcu_assign_pointer(elem->multi, entry->multi); - /* - * Update the function or multi probe array pointer before setting the - * ptype. - */ - smp_wmb(); - elem->ptype = entry->ptype; - - if (elem->tp_name && (active ^ elem->state)) { - WARN_ON(!elem->tp_cb); - /* - * It is ok to directly call the probe registration because type - * checking has been done in the __trace_mark_tp() macro. - */ - - if (active) { - /* - * try_module_get should always succeed because we hold - * lock_module() to get the tp_cb address. - */ - ret = try_module_get(__module_text_address( - (unsigned long)elem->tp_cb)); - BUG_ON(!ret); - ret = tracepoint_probe_register_noupdate( - elem->tp_name, - elem->tp_cb); - } else { - ret = tracepoint_probe_unregister_noupdate( - elem->tp_name, - elem->tp_cb); - /* - * tracepoint_probe_update_all() must be called - * before the module containing tp_cb is unloaded. - */ - module_put(__module_text_address( - (unsigned long)elem->tp_cb)); - } - } - elem->state = active; - - return ret; -} - -/* - * Disable a marker and its probe callback. - * Note: only waiting an RCU period after setting elem->call to the empty - * function insures that the original callback is not used anymore. This insured - * by rcu_read_lock_sched around the call site. - */ -static void disable_marker(struct marker *elem) -{ - int ret; - - /* leave "call" as is. It is known statically. */ - if (elem->tp_name && elem->state) { - WARN_ON(!elem->tp_cb); - /* - * It is ok to directly call the probe registration because type - * checking has been done in the __trace_mark_tp() macro. - */ - ret = tracepoint_probe_unregister_noupdate(elem->tp_name, - elem->tp_cb); - WARN_ON(ret); - /* - * tracepoint_probe_update_all() must be called - * before the module containing tp_cb is unloaded. - */ - module_put(__module_text_address((unsigned long)elem->tp_cb)); - } - elem->state = 0; - elem->single.func = __mark_empty_function; - /* Update the function before setting the ptype */ - smp_wmb(); - elem->ptype = 0; /* single probe */ - /* - * Leave the private data and id there, because removal is racy and - * should be done only after an RCU period. These are never used until - * the next initialization anyway. - */ -} - -/** - * marker_update_probe_range - Update a probe range - * @begin: beginning of the range - * @end: end of the range - * - * Updates the probe callback corresponding to a range of markers. - */ -void marker_update_probe_range(struct marker *begin, - struct marker *end) -{ - struct marker *iter; - struct marker_entry *mark_entry; - - mutex_lock(&markers_mutex); - for (iter = begin; iter < end; iter++) { - mark_entry = get_marker(iter->name); - if (mark_entry) { - set_marker(mark_entry, iter, !!mark_entry->refcount); - /* - * ignore error, continue - */ - } else { - disable_marker(iter); - } - } - mutex_unlock(&markers_mutex); -} - -/* - * Update probes, removing the faulty probes. - * - * Internal callback only changed before the first probe is connected to it. - * Single probe private data can only be changed on 0 -> 1 and 2 -> 1 - * transitions. All other transitions will leave the old private data valid. - * This makes the non-atomicity of the callback/private data updates valid. - * - * "special case" updates : - * 0 -> 1 callback - * 1 -> 0 callback - * 1 -> 2 callbacks - * 2 -> 1 callbacks - * Other updates all behave the same, just like the 2 -> 3 or 3 -> 2 updates. - * Site effect : marker_set_format may delete the marker entry (creating a - * replacement). - */ -static void marker_update_probes(void) -{ - /* Core kernel markers */ - marker_update_probe_range(__start___markers, __stop___markers); - /* Markers in modules. */ - module_update_markers(); - tracepoint_probe_update_all(); -} - -/** - * marker_probe_register - Connect a probe to a marker - * @name: marker name - * @format: format string - * @probe: probe handler - * @probe_private: probe private data - * - * private data must be a valid allocated memory address, or NULL. - * Returns 0 if ok, error value on error. - * The probe address must at least be aligned on the architecture pointer size. - */ -int marker_probe_register(const char *name, const char *format, - marker_probe_func *probe, void *probe_private) -{ - struct marker_entry *entry; - int ret = 0; - struct marker_probe_closure *old; - - mutex_lock(&markers_mutex); - entry = get_marker(name); - if (!entry) { - entry = add_marker(name, format); - if (IS_ERR(entry)) - ret = PTR_ERR(entry); - } else if (format) { - if (!entry->format) - ret = marker_set_format(entry, format); - else if (strcmp(entry->format, format)) - ret = -EPERM; - } - if (ret) - goto end; - - /* - * If we detect that a call_rcu is pending for this marker, - * make sure it's executed now. - */ - if (entry->rcu_pending) - rcu_barrier_sched(); - old = marker_entry_add_probe(entry, probe, probe_private); - if (IS_ERR(old)) { - ret = PTR_ERR(old); - goto end; - } - mutex_unlock(&markers_mutex); - marker_update_probes(); - mutex_lock(&markers_mutex); - entry = get_marker(name); - if (!entry) - goto end; - if (entry->rcu_pending) - rcu_barrier_sched(); - entry->oldptr = old; - entry->rcu_pending = 1; - /* write rcu_pending before calling the RCU callback */ - smp_wmb(); - call_rcu_sched(&entry->rcu, free_old_closure); -end: - mutex_unlock(&markers_mutex); - return ret; -} -EXPORT_SYMBOL_GPL(marker_probe_register); - -/** - * marker_probe_unregister - Disconnect a probe from a marker - * @name: marker name - * @probe: probe function pointer - * @probe_private: probe private data - * - * Returns the private data given to marker_probe_register, or an ERR_PTR(). - * We do not need to call a synchronize_sched to make sure the probes have - * finished running before doing a module unload, because the module unload - * itself uses stop_machine(), which insures that every preempt disabled section - * have finished. - */ -int marker_probe_unregister(const char *name, - marker_probe_func *probe, void *probe_private) -{ - struct marker_entry *entry; - struct marker_probe_closure *old; - int ret = -ENOENT; - - mutex_lock(&markers_mutex); - entry = get_marker(name); - if (!entry) - goto end; - if (entry->rcu_pending) - rcu_barrier_sched(); - old = marker_entry_remove_probe(entry, probe, probe_private); - mutex_unlock(&markers_mutex); - marker_update_probes(); - mutex_lock(&markers_mutex); - entry = get_marker(name); - if (!entry) - goto end; - if (entry->rcu_pending) - rcu_barrier_sched(); - entry->oldptr = old; - entry->rcu_pending = 1; - /* write rcu_pending before calling the RCU callback */ - smp_wmb(); - call_rcu_sched(&entry->rcu, free_old_closure); - remove_marker(name); /* Ignore busy error message */ - ret = 0; -end: - mutex_unlock(&markers_mutex); - return ret; -} -EXPORT_SYMBOL_GPL(marker_probe_unregister); - -static struct marker_entry * -get_marker_from_private_data(marker_probe_func *probe, void *probe_private) -{ - struct marker_entry *entry; - unsigned int i; - struct hlist_head *head; - struct hlist_node *node; - - for (i = 0; i < MARKER_TABLE_SIZE; i++) { - head = &marker_table[i]; - hlist_for_each_entry(entry, node, head, hlist) { - if (!entry->ptype) { - if (entry->single.func == probe - && entry->single.probe_private - == probe_private) - return entry; - } else { - struct marker_probe_closure *closure; - closure = entry->multi; - for (i = 0; closure[i].func; i++) { - if (closure[i].func == probe && - closure[i].probe_private - == probe_private) - return entry; - } - } - } - } - return NULL; -} - -/** - * marker_probe_unregister_private_data - Disconnect a probe from a marker - * @probe: probe function - * @probe_private: probe private data - * - * Unregister a probe by providing the registered private data. - * Only removes the first marker found in hash table. - * Return 0 on success or error value. - * We do not need to call a synchronize_sched to make sure the probes have - * finished running before doing a module unload, because the module unload - * itself uses stop_machine(), which insures that every preempt disabled section - * have finished. - */ -int marker_probe_unregister_private_data(marker_probe_func *probe, - void *probe_private) -{ - struct marker_entry *entry; - int ret = 0; - struct marker_probe_closure *old; - - mutex_lock(&markers_mutex); - entry = get_marker_from_private_data(probe, probe_private); - if (!entry) { - ret = -ENOENT; - goto end; - } - if (entry->rcu_pending) - rcu_barrier_sched(); - old = marker_entry_remove_probe(entry, NULL, probe_private); - mutex_unlock(&markers_mutex); - marker_update_probes(); - mutex_lock(&markers_mutex); - entry = get_marker_from_private_data(probe, probe_private); - if (!entry) - goto end; - if (entry->rcu_pending) - rcu_barrier_sched(); - entry->oldptr = old; - entry->rcu_pending = 1; - /* write rcu_pending before calling the RCU callback */ - smp_wmb(); - call_rcu_sched(&entry->rcu, free_old_closure); - remove_marker(entry->name); /* Ignore busy error message */ -end: - mutex_unlock(&markers_mutex); - return ret; -} -EXPORT_SYMBOL_GPL(marker_probe_unregister_private_data); - -/** - * marker_get_private_data - Get a marker's probe private data - * @name: marker name - * @probe: probe to match - * @num: get the nth matching probe's private data - * - * Returns the nth private data pointer (starting from 0) matching, or an - * ERR_PTR. - * Returns the private data pointer, or an ERR_PTR. - * The private data pointer should _only_ be dereferenced if the caller is the - * owner of the data, or its content could vanish. This is mostly used to - * confirm that a caller is the owner of a registered probe. - */ -void *marker_get_private_data(const char *name, marker_probe_func *probe, - int num) -{ - struct hlist_head *head; - struct hlist_node *node; - struct marker_entry *e; - size_t name_len = strlen(name) + 1; - u32 hash = jhash(name, name_len-1, 0); - int i; - - head = &marker_table[hash & ((1 << MARKER_HASH_BITS)-1)]; - hlist_for_each_entry(e, node, head, hlist) { - if (!strcmp(name, e->name)) { - if (!e->ptype) { - if (num == 0 && e->single.func == probe) - return e->single.probe_private; - } else { - struct marker_probe_closure *closure; - int match = 0; - closure = e->multi; - for (i = 0; closure[i].func; i++) { - if (closure[i].func != probe) - continue; - if (match++ == num) - return closure[i].probe_private; - } - } - break; - } - } - return ERR_PTR(-ENOENT); -} -EXPORT_SYMBOL_GPL(marker_get_private_data); - -#ifdef CONFIG_MODULES - -int marker_module_notify(struct notifier_block *self, - unsigned long val, void *data) -{ - struct module *mod = data; - - switch (val) { - case MODULE_STATE_COMING: - marker_update_probe_range(mod->markers, - mod->markers + mod->num_markers); - break; - case MODULE_STATE_GOING: - marker_update_probe_range(mod->markers, - mod->markers + mod->num_markers); - break; - } - return 0; -} - -struct notifier_block marker_module_nb = { - .notifier_call = marker_module_notify, - .priority = 0, -}; - -static int init_markers(void) -{ - return register_module_notifier(&marker_module_nb); -} -__initcall(init_markers); - -#endif /* CONFIG_MODULES */ diff --git a/kernel/module.c b/kernel/module.c index 05ce49ced8f..b6ee424245d 100644 --- a/kernel/module.c +++ b/kernel/module.c @@ -2237,10 +2237,6 @@ static noinline struct module *load_module(void __user *umod, sizeof(*mod->ctors), &mod->num_ctors); #endif -#ifdef CONFIG_MARKERS - mod->markers = section_objs(hdr, sechdrs, secstrings, "__markers", - sizeof(*mod->markers), &mod->num_markers); -#endif #ifdef CONFIG_TRACEPOINTS mod->tracepoints = section_objs(hdr, sechdrs, secstrings, "__tracepoints", @@ -2958,20 +2954,6 @@ void module_layout(struct module *mod, EXPORT_SYMBOL(module_layout); #endif -#ifdef CONFIG_MARKERS -void module_update_markers(void) -{ - struct module *mod; - - mutex_lock(&module_mutex); - list_for_each_entry(mod, &modules, list) - if (!mod->taints) - marker_update_probe_range(mod->markers, - mod->markers + mod->num_markers); - mutex_unlock(&module_mutex); -} -#endif - #ifdef CONFIG_TRACEPOINTS void module_update_tracepoints(void) { diff --git a/kernel/trace/trace_printk.c b/kernel/trace/trace_printk.c index 687699d365a..2547d8813cf 100644 --- a/kernel/trace/trace_printk.c +++ b/kernel/trace/trace_printk.c @@ -11,7 +11,6 @@ #include #include #include -#include #include #include #include diff --git a/samples/Kconfig b/samples/Kconfig index 428b065ba69..b92bde3c6a8 100644 --- a/samples/Kconfig +++ b/samples/Kconfig @@ -7,12 +7,6 @@ menuconfig SAMPLES if SAMPLES -config SAMPLE_MARKERS - tristate "Build markers examples -- loadable modules only" - depends on MARKERS && m - help - This build markers example modules. - config SAMPLE_TRACEPOINTS tristate "Build tracepoints examples -- loadable modules only" depends on TRACEPOINTS && m diff --git a/samples/Makefile b/samples/Makefile index 13e4b470b53..43343a03b1f 100644 --- a/samples/Makefile +++ b/samples/Makefile @@ -1,3 +1,3 @@ # Makefile for Linux samples code -obj-$(CONFIG_SAMPLES) += markers/ kobject/ kprobes/ tracepoints/ trace_events/ +obj-$(CONFIG_SAMPLES) += kobject/ kprobes/ tracepoints/ trace_events/ diff --git a/samples/markers/Makefile b/samples/markers/Makefile deleted file mode 100644 index 6d7231265f0..00000000000 --- a/samples/markers/Makefile +++ /dev/null @@ -1,4 +0,0 @@ -# builds the kprobes example kernel modules; -# then to use one (as root): insmod - -obj-$(CONFIG_SAMPLE_MARKERS) += probe-example.o marker-example.o diff --git a/samples/markers/marker-example.c b/samples/markers/marker-example.c deleted file mode 100644 index e9cd9c0bc84..00000000000 --- a/samples/markers/marker-example.c +++ /dev/null @@ -1,53 +0,0 @@ -/* marker-example.c - * - * Executes a marker when /proc/marker-example is opened. - * - * (C) Copyright 2007 Mathieu Desnoyers - * - * This file is released under the GPLv2. - * See the file COPYING for more details. - */ - -#include -#include -#include -#include - -struct proc_dir_entry *pentry_example; - -static int my_open(struct inode *inode, struct file *file) -{ - int i; - - trace_mark(subsystem_event, "integer %d string %s", 123, - "example string"); - for (i = 0; i < 10; i++) - trace_mark(subsystem_eventb, MARK_NOARGS); - return -EPERM; -} - -static struct file_operations mark_ops = { - .open = my_open, -}; - -static int __init example_init(void) -{ - printk(KERN_ALERT "example init\n"); - pentry_example = proc_create("marker-example", 0444, NULL, &mark_ops); - if (!pentry_example) - return -EPERM; - return 0; -} - -static void __exit example_exit(void) -{ - printk(KERN_ALERT "example exit\n"); - remove_proc_entry("marker-example", NULL); -} - -module_init(example_init) -module_exit(example_exit) - -MODULE_LICENSE("GPL"); -MODULE_AUTHOR("Mathieu Desnoyers"); -MODULE_DESCRIPTION("Marker example"); diff --git a/samples/markers/probe-example.c b/samples/markers/probe-example.c deleted file mode 100644 index 2dfb3b32937..00000000000 --- a/samples/markers/probe-example.c +++ /dev/null @@ -1,92 +0,0 @@ -/* probe-example.c - * - * Connects two functions to marker call sites. - * - * (C) Copyright 2007 Mathieu Desnoyers - * - * This file is released under the GPLv2. - * See the file COPYING for more details. - */ - -#include -#include -#include -#include -#include - -struct probe_data { - const char *name; - const char *format; - marker_probe_func *probe_func; -}; - -void probe_subsystem_event(void *probe_data, void *call_data, - const char *format, va_list *args) -{ - /* Declare args */ - unsigned int value; - const char *mystr; - - /* Assign args */ - value = va_arg(*args, typeof(value)); - mystr = va_arg(*args, typeof(mystr)); - - /* Call printk */ - printk(KERN_INFO "Value %u, string %s\n", value, mystr); - - /* or count, check rights, serialize data in a buffer */ -} - -atomic_t eventb_count = ATOMIC_INIT(0); - -void probe_subsystem_eventb(void *probe_data, void *call_data, - const char *format, va_list *args) -{ - /* Increment counter */ - atomic_inc(&eventb_count); -} - -static struct probe_data probe_array[] = -{ - { .name = "subsystem_event", - .format = "integer %d string %s", - .probe_func = probe_subsystem_event }, - { .name = "subsystem_eventb", - .format = MARK_NOARGS, - .probe_func = probe_subsystem_eventb }, -}; - -static int __init probe_init(void) -{ - int result; - int i; - - for (i = 0; i < ARRAY_SIZE(probe_array); i++) { - result = marker_probe_register(probe_array[i].name, - probe_array[i].format, - probe_array[i].probe_func, &probe_array[i]); - if (result) - printk(KERN_INFO "Unable to register probe %s\n", - probe_array[i].name); - } - return 0; -} - -static void __exit probe_fini(void) -{ - int i; - - for (i = 0; i < ARRAY_SIZE(probe_array); i++) - marker_probe_unregister(probe_array[i].name, - probe_array[i].probe_func, &probe_array[i]); - printk(KERN_INFO "Number of event b : %u\n", - atomic_read(&eventb_count)); - marker_synchronize_unregister(); -} - -module_init(probe_init); -module_exit(probe_fini); - -MODULE_LICENSE("GPL"); -MODULE_AUTHOR("Mathieu Desnoyers"); -MODULE_DESCRIPTION("SUBSYSTEM Probe"); diff --git a/scripts/Makefile.modpost b/scripts/Makefile.modpost index f4053dc7b5d..8f14c81abbc 100644 --- a/scripts/Makefile.modpost +++ b/scripts/Makefile.modpost @@ -13,7 +13,6 @@ # 2) modpost is then used to # 3) create one .mod.c file pr. module # 4) create one Module.symvers file with CRC for all exported symbols -# 4a) [CONFIG_MARKERS] create one Module.markers file listing defined markers # 5) compile all .mod.c files # 6) final link of the module to a file @@ -59,10 +58,6 @@ include scripts/Makefile.lib kernelsymfile := $(objtree)/Module.symvers modulesymfile := $(firstword $(KBUILD_EXTMOD))/Module.symvers -kernelmarkersfile := $(objtree)/Module.markers -modulemarkersfile := $(firstword $(KBUILD_EXTMOD))/Module.markers - -markersfile = $(if $(KBUILD_EXTMOD),$(modulemarkersfile),$(kernelmarkersfile)) # Step 1), find all modules listed in $(MODVERDIR)/ __modules := $(sort $(shell grep -h '\.ko' /dev/null $(wildcard $(MODVERDIR)/*.mod))) @@ -85,8 +80,6 @@ modpost = scripts/mod/modpost \ $(if $(KBUILD_EXTRA_SYMBOLS), $(patsubst %, -e %,$(KBUILD_EXTRA_SYMBOLS))) \ $(if $(KBUILD_EXTMOD),-o $(modulesymfile)) \ $(if $(CONFIG_DEBUG_SECTION_MISMATCH),,-S) \ - $(if $(CONFIG_MARKERS),-K $(kernelmarkersfile)) \ - $(if $(CONFIG_MARKERS),-M $(markersfile)) \ $(if $(KBUILD_EXTMOD)$(KBUILD_MODPOST_WARN),-w) \ $(if $(cross_build),-c) @@ -101,17 +94,12 @@ quiet_cmd_kernel-mod = MODPOST $@ cmd_kernel-mod = $(modpost) $@ vmlinux.o: FORCE - @rm -fr $(kernelmarkersfile) $(call cmd,kernel-mod) # Declare generated files as targets for modpost $(symverfile): __modpost ; $(modules:.ko=.mod.c): __modpost ; -ifdef CONFIG_MARKERS -$(markersfile): __modpost ; -endif - # Step 5), compile all *.mod.c files -- cgit v1.2.3-70-g09d2 From 6a891a3111fe701517bb31c2204304724c7299c8 Mon Sep 17 00:00:00 2001 From: Jean Delvare Date: Fri, 18 Sep 2009 22:45:43 +0200 Subject: i2c: Drop unused i2c_driver.id field Nobody is using i2c_driver.id any longer, so we can drop that field. Signed-off-by: Jean Delvare --- include/linux/i2c-id.h | 11 ----------- include/linux/i2c.h | 2 -- 2 files changed, 13 deletions(-) (limited to 'include') diff --git a/include/linux/i2c-id.h b/include/linux/i2c-id.h index c9087de5c6c..e844a0b1869 100644 --- a/include/linux/i2c-id.h +++ b/include/linux/i2c-id.h @@ -27,17 +27,6 @@ legacy chip driver needs to identify a bus or a bus driver needs to identify a legacy client. If you don't need them, just don't set them. */ -/* - * ---- Driver types ----------------------------------------------------- - */ - -#define I2C_DRIVERID_MSP3400 1 -#define I2C_DRIVERID_TUNER 2 -#define I2C_DRIVERID_TDA7432 27 /* Stereo sound processor */ -#define I2C_DRIVERID_TVAUDIO 29 /* Generic TV sound driver */ -#define I2C_DRIVERID_SAA711X 73 /* saa711x video encoders */ -#define I2C_DRIVERID_INFRARED 75 /* I2C InfraRed on Video boards */ - /* * ---- Adapter types ---------------------------------------------------- */ diff --git a/include/linux/i2c.h b/include/linux/i2c.h index f4784c0fe97..57d41b0abce 100644 --- a/include/linux/i2c.h +++ b/include/linux/i2c.h @@ -98,7 +98,6 @@ extern s32 i2c_smbus_write_i2c_block_data(struct i2c_client *client, /** * struct i2c_driver - represent an I2C device driver - * @id: Unique driver ID (optional) * @class: What kind of i2c device we instantiate (for detect) * @attach_adapter: Callback for bus addition (for legacy drivers) * @detach_adapter: Callback for bus removal (for legacy drivers) @@ -135,7 +134,6 @@ extern s32 i2c_smbus_write_i2c_block_data(struct i2c_client *client, * not allowed. */ struct i2c_driver { - int id; unsigned int class; /* Notifies the driver that a new bus has appeared or is about to be -- cgit v1.2.3-70-g09d2 From 76b3e28fa728bb68895cbd8375f5ce233bd891de Mon Sep 17 00:00:00 2001 From: Crane Cai Date: Fri, 18 Sep 2009 22:45:50 +0200 Subject: i2c-piix4: Add AMD SB900 SMBus device ID Add new SMBus device ID for AMD SB900. Signed-off-by: Crane Cai Signed-off-by: Jean Delvare --- Documentation/i2c/busses/i2c-piix4 | 2 ++ drivers/i2c/busses/Kconfig | 3 ++- drivers/i2c/busses/i2c-piix4.c | 9 ++++++--- include/linux/pci_ids.h | 1 + 4 files changed, 11 insertions(+), 4 deletions(-) (limited to 'include') diff --git a/Documentation/i2c/busses/i2c-piix4 b/Documentation/i2c/busses/i2c-piix4 index f889481762b..c5b37c57055 100644 --- a/Documentation/i2c/busses/i2c-piix4 +++ b/Documentation/i2c/busses/i2c-piix4 @@ -8,6 +8,8 @@ Supported adapters: Datasheet: Only available via NDA from ServerWorks * ATI IXP200, IXP300, IXP400, SB600, SB700 and SB800 southbridges Datasheet: Not publicly available + * AMD SB900 + Datasheet: Not publicly available * Standard Microsystems (SMSC) SLC90E66 (Victory66) southbridge Datasheet: Publicly available at the SMSC website http://www.smsc.com diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig index 8206442fbab..bb216c5fe30 100644 --- a/drivers/i2c/busses/Kconfig +++ b/drivers/i2c/busses/Kconfig @@ -113,7 +113,7 @@ config I2C_ISCH will be called i2c-isch. config I2C_PIIX4 - tristate "Intel PIIX4 and compatible (ATI/Serverworks/Broadcom/SMSC)" + tristate "Intel PIIX4 and compatible (ATI/AMD/Serverworks/Broadcom/SMSC)" depends on PCI help If you say yes to this option, support will be included for the Intel @@ -128,6 +128,7 @@ config I2C_PIIX4 ATI SB600 ATI SB700 ATI SB800 + AMD SB900 Serverworks OSB4 Serverworks CSB5 Serverworks CSB6 diff --git a/drivers/i2c/busses/i2c-piix4.c b/drivers/i2c/busses/i2c-piix4.c index 0249a7d762b..a782c7a08f9 100644 --- a/drivers/i2c/busses/i2c-piix4.c +++ b/drivers/i2c/busses/i2c-piix4.c @@ -22,6 +22,7 @@ Intel PIIX4, 440MX Serverworks OSB4, CSB5, CSB6, HT-1000, HT-1100 ATI IXP200, IXP300, IXP400, SB600, SB700, SB800 + AMD SB900 SMSC Victory66 Note: we assume there can only be one device, with one SMBus interface. @@ -479,6 +480,7 @@ static struct pci_device_id piix4_ids[] = { { PCI_DEVICE(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_IXP300_SMBUS) }, { PCI_DEVICE(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_IXP400_SMBUS) }, { PCI_DEVICE(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_SBX00_SMBUS) }, + { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_SB900_SMBUS) }, { PCI_DEVICE(PCI_VENDOR_ID_SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_OSB4) }, { PCI_DEVICE(PCI_VENDOR_ID_SERVERWORKS, @@ -499,9 +501,10 @@ static int __devinit piix4_probe(struct pci_dev *dev, { int retval; - if ((dev->vendor == PCI_VENDOR_ID_ATI) && - (dev->device == PCI_DEVICE_ID_ATI_SBX00_SMBUS) && - (dev->revision >= 0x40)) + if ((dev->vendor == PCI_VENDOR_ID_ATI && + dev->device == PCI_DEVICE_ID_ATI_SBX00_SMBUS && + dev->revision >= 0x40) || + dev->vendor == PCI_VENDOR_ID_AMD) /* base address location etc changed in SB800 */ retval = piix4_setup_sb800(dev, id); else diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h index 3b6b788fe2b..7803565aa87 100644 --- a/include/linux/pci_ids.h +++ b/include/linux/pci_ids.h @@ -543,6 +543,7 @@ #define PCI_DEVICE_ID_AMD_8131_BRIDGE 0x7450 #define PCI_DEVICE_ID_AMD_8131_APIC 0x7451 #define PCI_DEVICE_ID_AMD_8132_BRIDGE 0x7458 +#define PCI_DEVICE_ID_AMD_SB900_SMBUS 0x780b #define PCI_DEVICE_ID_AMD_CS5535_IDE 0x208F #define PCI_DEVICE_ID_AMD_CS5536_ISA 0x2090 #define PCI_DEVICE_ID_AMD_CS5536_FLASH 0x2091 -- cgit v1.2.3-70-g09d2 From ee2b805c8eb6459cf541ef141ff70dae17af59ca Mon Sep 17 00:00:00 2001 From: Linus Walleij Date: Sat, 15 Aug 2009 15:12:05 +0100 Subject: ARM: 5678/1: SSP/SPI PL022 polarity terminology fix The definition of the SPI clock phase for the Motorola mode of the PL022 driver was incorrect: the spec had been interpreted as data being recieved on rising or falling edge of the clocks while the correct interpretation is that data can be recieved on the first or second edge transition, falling or rising depending on the polarity setting. Signed-off-by: Linus Walleij Signed-off-by: Russell King --- arch/arm/mach-u300/spi.c | 2 +- drivers/spi/amba-pl022.c | 8 ++++---- include/linux/amba/pl022.h | 8 ++++---- 3 files changed, 9 insertions(+), 9 deletions(-) (limited to 'include') diff --git a/arch/arm/mach-u300/spi.c b/arch/arm/mach-u300/spi.c index 307d007ea7f..f0e887bea30 100644 --- a/arch/arm/mach-u300/spi.c +++ b/arch/arm/mach-u300/spi.c @@ -48,7 +48,7 @@ struct pl022_config_chip dummy_chip_info = { .data_size = SSP_DATA_BITS_8, /* used to be 12 in some default */ .rx_lev_trig = SSP_RX_1_OR_MORE_ELEM, .tx_lev_trig = SSP_TX_1_OR_MORE_EMPTY_LOC, - .clk_phase = SSP_CLK_FALLING_EDGE, + .clk_phase = SSP_CLK_SECOND_EDGE, .clk_pol = SSP_CLK_POL_IDLE_LOW, .ctrl_len = SSP_BITS_12, .wait_state = SSP_MWIRE_WAIT_ZERO, diff --git a/drivers/spi/amba-pl022.c b/drivers/spi/amba-pl022.c index da76797ce8b..35521af0d0d 100644 --- a/drivers/spi/amba-pl022.c +++ b/drivers/spi/amba-pl022.c @@ -534,7 +534,7 @@ static void restore_state(struct pl022 *pl022) GEN_MASK_BITS(SSP_DATA_BITS_12, SSP_CR0_MASK_DSS, 0) | \ GEN_MASK_BITS(SSP_MICROWIRE_CHANNEL_FULL_DUPLEX, SSP_CR0_MASK_HALFDUP, 5) | \ GEN_MASK_BITS(SSP_CLK_POL_IDLE_LOW, SSP_CR0_MASK_SPO, 6) | \ - GEN_MASK_BITS(SSP_CLK_FALLING_EDGE, SSP_CR0_MASK_SPH, 7) | \ + GEN_MASK_BITS(SSP_CLK_SECOND_EDGE, SSP_CR0_MASK_SPH, 7) | \ GEN_MASK_BITS(NMDK_SSP_DEFAULT_CLKRATE, SSP_CR0_MASK_SCR, 8) | \ GEN_MASK_BITS(SSP_BITS_8, SSP_CR0_MASK_CSS, 16) | \ GEN_MASK_BITS(SSP_INTERFACE_MOTOROLA_SPI, SSP_CR0_MASK_FRF, 21) \ @@ -1249,8 +1249,8 @@ static int verify_controller_parameters(struct pl022 *pl022, return -EINVAL; } if (chip_info->iface == SSP_INTERFACE_MOTOROLA_SPI) { - if ((chip_info->clk_phase != SSP_CLK_RISING_EDGE) - && (chip_info->clk_phase != SSP_CLK_FALLING_EDGE)) { + if ((chip_info->clk_phase != SSP_CLK_FIRST_EDGE) + && (chip_info->clk_phase != SSP_CLK_SECOND_EDGE)) { dev_err(chip_info->dev, "Clock Phase is configured incorrectly\n"); return -EINVAL; @@ -1487,7 +1487,7 @@ static int pl022_setup(struct spi_device *spi) chip_info->data_size = SSP_DATA_BITS_12; chip_info->rx_lev_trig = SSP_RX_1_OR_MORE_ELEM; chip_info->tx_lev_trig = SSP_TX_1_OR_MORE_EMPTY_LOC; - chip_info->clk_phase = SSP_CLK_FALLING_EDGE; + chip_info->clk_phase = SSP_CLK_SECOND_EDGE; chip_info->clk_pol = SSP_CLK_POL_IDLE_LOW; chip_info->ctrl_len = SSP_BITS_8; chip_info->wait_state = SSP_MWIRE_WAIT_ZERO; diff --git a/include/linux/amba/pl022.h b/include/linux/amba/pl022.h index dcad0ffd175..e4836c6b3dd 100644 --- a/include/linux/amba/pl022.h +++ b/include/linux/amba/pl022.h @@ -136,12 +136,12 @@ enum ssp_tx_level_trig { /** * enum SPI Clock Phase - clock phase (Motorola SPI interface only) - * @SSP_CLK_RISING_EDGE: Receive data on rising edge - * @SSP_CLK_FALLING_EDGE: Receive data on falling edge + * @SSP_CLK_FIRST_EDGE: Receive data on first edge transition (actual direction depends on polarity) + * @SSP_CLK_SECOND_EDGE: Receive data on second edge transition (actual direction depends on polarity) */ enum ssp_spi_clk_phase { - SSP_CLK_RISING_EDGE, - SSP_CLK_FALLING_EDGE + SSP_CLK_FIRST_EDGE, + SSP_CLK_SECOND_EDGE }; /** -- cgit v1.2.3-70-g09d2 From b6e760f3097501e60e76fbcb7a313d42da930c1f Mon Sep 17 00:00:00 2001 From: Patrick Boettcher Date: Mon, 3 Aug 2009 14:39:15 -0300 Subject: V4L/DVB (12892): DVB-API: add support for ISDB-T and ISDB-Tsb (version 5.1) This patch increments the DVB-API to version 5.1 in order to reflect the addition of ISDB-T and ISDB-Tsb on Linux' DVB-API. Changes in detail: - added a small document to describe how to use the API to tune to an ISDB-T or ISDB-Tsb channel - added necessary fields to dtv_frontend_cache - added a smarter clear-cache function which resets all fields of the dtv_frontend_cache - added a TRANSMISSION_MODE_4K to fe_transmit_mode_t Signed-off-by: Olivier Grenie Signed-off-by: Patrick Boettcher Signed-off-by: Mauro Carvalho Chehab --- drivers/media/dvb/dvb-core/dvb_frontend.c | 202 +++++++++++++++++++++++++++++- drivers/media/dvb/dvb-core/dvb_frontend.h | 14 +++ include/linux/dvb/frontend.h | 44 +++++-- include/linux/dvb/version.h | 2 +- 4 files changed, 247 insertions(+), 15 deletions(-) (limited to 'include') diff --git a/drivers/media/dvb/dvb-core/dvb_frontend.c b/drivers/media/dvb/dvb-core/dvb_frontend.c index d13ebcb0c6b..826080416c9 100644 --- a/drivers/media/dvb/dvb-core/dvb_frontend.c +++ b/drivers/media/dvb/dvb-core/dvb_frontend.c @@ -850,6 +850,49 @@ static int dvb_frontend_check_parameters(struct dvb_frontend *fe, return 0; } +static int dvb_frontend_clear_cache(struct dvb_frontend *fe) +{ + int i; + + memset(&(fe->dtv_property_cache), 0, + sizeof(struct dtv_frontend_properties)); + + fe->dtv_property_cache.state = DTV_CLEAR; + fe->dtv_property_cache.delivery_system = SYS_UNDEFINED; + fe->dtv_property_cache.inversion = INVERSION_AUTO; + fe->dtv_property_cache.fec_inner = FEC_AUTO; + fe->dtv_property_cache.transmission_mode = TRANSMISSION_MODE_AUTO; + fe->dtv_property_cache.bandwidth_hz = BANDWIDTH_AUTO; + fe->dtv_property_cache.guard_interval = GUARD_INTERVAL_AUTO; + fe->dtv_property_cache.hierarchy = HIERARCHY_AUTO; + fe->dtv_property_cache.symbol_rate = QAM_AUTO; + fe->dtv_property_cache.code_rate_HP = FEC_AUTO; + fe->dtv_property_cache.code_rate_LP = FEC_AUTO; + + fe->dtv_property_cache.isdbt_partial_reception = -1; + fe->dtv_property_cache.isdbt_sb_mode = -1; + fe->dtv_property_cache.isdbt_sb_subchannel = -1; + fe->dtv_property_cache.isdbt_sb_segment_idx = -1; + fe->dtv_property_cache.isdbt_sb_segment_count = -1; + fe->dtv_property_cache.isdbt_layer_enabled = 0x7; + for (i = 0; i < 3; i++) { + fe->dtv_property_cache.layer[i].fec = FEC_AUTO; + fe->dtv_property_cache.layer[i].modulation = QAM_AUTO; + fe->dtv_property_cache.layer[i].interleaving = -1; + fe->dtv_property_cache.layer[i].segment_count = -1; + } + + return 0; +} + +#define _DTV_CMD(n, s, b) \ +[n] = { \ + .name = #n, \ + .cmd = n, \ + .set = s,\ + .buffer = b \ +} + static struct dtv_cmds_h dtv_cmds[] = { [DTV_TUNE] = { .name = "DTV_TUNE", @@ -949,6 +992,43 @@ static struct dtv_cmds_h dtv_cmds[] = { .cmd = DTV_TRANSMISSION_MODE, .set = 1, }, + + _DTV_CMD(DTV_ISDBT_PARTIAL_RECEPTION, 1, 0), + _DTV_CMD(DTV_ISDBT_SOUND_BROADCASTING, 1, 0), + _DTV_CMD(DTV_ISDBT_SB_SUBCHANNEL_ID, 1, 0), + _DTV_CMD(DTV_ISDBT_SB_SEGMENT_IDX, 1, 0), + _DTV_CMD(DTV_ISDBT_SB_SEGMENT_COUNT, 1, 0), + _DTV_CMD(DTV_ISDBT_LAYERA_FEC, 1, 0), + _DTV_CMD(DTV_ISDBT_LAYERA_MODULATION, 1, 0), + _DTV_CMD(DTV_ISDBT_LAYERA_SEGMENT_COUNT, 1, 0), + _DTV_CMD(DTV_ISDBT_LAYERA_TIME_INTERLEAVING, 1, 0), + _DTV_CMD(DTV_ISDBT_LAYERB_FEC, 1, 0), + _DTV_CMD(DTV_ISDBT_LAYERB_MODULATION, 1, 0), + _DTV_CMD(DTV_ISDBT_LAYERB_SEGMENT_COUNT, 1, 0), + _DTV_CMD(DTV_ISDBT_LAYERB_TIME_INTERLEAVING, 1, 0), + _DTV_CMD(DTV_ISDBT_LAYERC_FEC, 1, 0), + _DTV_CMD(DTV_ISDBT_LAYERC_MODULATION, 1, 0), + _DTV_CMD(DTV_ISDBT_LAYERC_SEGMENT_COUNT, 1, 0), + _DTV_CMD(DTV_ISDBT_LAYERC_TIME_INTERLEAVING, 1, 0), + + _DTV_CMD(DTV_ISDBT_PARTIAL_RECEPTION, 0, 0), + _DTV_CMD(DTV_ISDBT_SOUND_BROADCASTING, 0, 0), + _DTV_CMD(DTV_ISDBT_SB_SUBCHANNEL_ID, 0, 0), + _DTV_CMD(DTV_ISDBT_SB_SEGMENT_IDX, 0, 0), + _DTV_CMD(DTV_ISDBT_SB_SEGMENT_COUNT, 0, 0), + _DTV_CMD(DTV_ISDBT_LAYERA_FEC, 0, 0), + _DTV_CMD(DTV_ISDBT_LAYERA_MODULATION, 0, 0), + _DTV_CMD(DTV_ISDBT_LAYERA_SEGMENT_COUNT, 0, 0), + _DTV_CMD(DTV_ISDBT_LAYERA_TIME_INTERLEAVING, 0, 0), + _DTV_CMD(DTV_ISDBT_LAYERB_FEC, 0, 0), + _DTV_CMD(DTV_ISDBT_LAYERB_MODULATION, 0, 0), + _DTV_CMD(DTV_ISDBT_LAYERB_SEGMENT_COUNT, 0, 0), + _DTV_CMD(DTV_ISDBT_LAYERB_TIME_INTERLEAVING, 0, 0), + _DTV_CMD(DTV_ISDBT_LAYERC_FEC, 0, 0), + _DTV_CMD(DTV_ISDBT_LAYERC_MODULATION, 0, 0), + _DTV_CMD(DTV_ISDBT_LAYERC_SEGMENT_COUNT, 0, 0), + _DTV_CMD(DTV_ISDBT_LAYERC_TIME_INTERLEAVING, 0, 0), + /* Get */ [DTV_DISEQC_SLAVE_REPLY] = { .name = "DTV_DISEQC_SLAVE_REPLY", @@ -956,6 +1036,7 @@ static struct dtv_cmds_h dtv_cmds[] = { .set = 0, .buffer = 1, }, + [DTV_API_VERSION] = { .name = "DTV_API_VERSION", .cmd = DTV_API_VERSION, @@ -1165,14 +1246,21 @@ static void dtv_property_adv_params_sync(struct dvb_frontend *fe) if(c->delivery_system == SYS_ISDBT) { /* Fake out a generic DVB-T request so we pass validation in the ioctl */ p->frequency = c->frequency; - p->inversion = INVERSION_AUTO; + p->inversion = c->inversion; p->u.ofdm.constellation = QAM_AUTO; p->u.ofdm.code_rate_HP = FEC_AUTO; p->u.ofdm.code_rate_LP = FEC_AUTO; - p->u.ofdm.bandwidth = BANDWIDTH_AUTO; p->u.ofdm.transmission_mode = TRANSMISSION_MODE_AUTO; p->u.ofdm.guard_interval = GUARD_INTERVAL_AUTO; p->u.ofdm.hierarchy_information = HIERARCHY_AUTO; + if (c->bandwidth_hz == 8000000) + p->u.ofdm.bandwidth = BANDWIDTH_8_MHZ; + else if (c->bandwidth_hz == 7000000) + p->u.ofdm.bandwidth = BANDWIDTH_7_MHZ; + else if (c->bandwidth_hz == 6000000) + p->u.ofdm.bandwidth = BANDWIDTH_6_MHZ; + else + p->u.ofdm.bandwidth = BANDWIDTH_AUTO; } } @@ -1274,6 +1362,59 @@ static int dtv_property_process_get(struct dvb_frontend *fe, case DTV_HIERARCHY: tvp->u.data = fe->dtv_property_cache.hierarchy; break; + + /* ISDB-T Support here */ + case DTV_ISDBT_PARTIAL_RECEPTION: + tvp->u.data = fe->dtv_property_cache.isdbt_partial_reception; + break; + case DTV_ISDBT_SOUND_BROADCASTING: + tvp->u.data = fe->dtv_property_cache.isdbt_sb_mode; + break; + case DTV_ISDBT_SB_SUBCHANNEL_ID: + tvp->u.data = fe->dtv_property_cache.isdbt_sb_subchannel; + break; + case DTV_ISDBT_SB_SEGMENT_IDX: + tvp->u.data = fe->dtv_property_cache.isdbt_sb_segment_idx; + break; + case DTV_ISDBT_SB_SEGMENT_COUNT: + tvp->u.data = fe->dtv_property_cache.isdbt_sb_segment_count; + break; + case DTV_ISDBT_LAYERA_FEC: + tvp->u.data = fe->dtv_property_cache.layer[0].fec; + break; + case DTV_ISDBT_LAYERA_MODULATION: + tvp->u.data = fe->dtv_property_cache.layer[0].modulation; + break; + case DTV_ISDBT_LAYERA_SEGMENT_COUNT: + tvp->u.data = fe->dtv_property_cache.layer[0].segment_count; + break; + case DTV_ISDBT_LAYERA_TIME_INTERLEAVING: + tvp->u.data = fe->dtv_property_cache.layer[0].interleaving; + break; + case DTV_ISDBT_LAYERB_FEC: + tvp->u.data = fe->dtv_property_cache.layer[1].fec; + break; + case DTV_ISDBT_LAYERB_MODULATION: + tvp->u.data = fe->dtv_property_cache.layer[1].modulation; + break; + case DTV_ISDBT_LAYERB_SEGMENT_COUNT: + tvp->u.data = fe->dtv_property_cache.layer[1].segment_count; + break; + case DTV_ISDBT_LAYERB_TIME_INTERLEAVING: + tvp->u.data = fe->dtv_property_cache.layer[1].interleaving; + break; + case DTV_ISDBT_LAYERC_FEC: + tvp->u.data = fe->dtv_property_cache.layer[2].fec; + break; + case DTV_ISDBT_LAYERC_MODULATION: + tvp->u.data = fe->dtv_property_cache.layer[2].modulation; + break; + case DTV_ISDBT_LAYERC_SEGMENT_COUNT: + tvp->u.data = fe->dtv_property_cache.layer[2].segment_count; + break; + case DTV_ISDBT_LAYERC_TIME_INTERLEAVING: + tvp->u.data = fe->dtv_property_cache.layer[2].interleaving; + break; default: r = -1; } @@ -1302,10 +1443,8 @@ static int dtv_property_process_set(struct dvb_frontend *fe, /* Reset a cache of data specific to the frontend here. This does * not effect hardware. */ + dvb_frontend_clear_cache(fe); dprintk("%s() Flushing property cache\n", __func__); - memset(&fe->dtv_property_cache, 0, sizeof(struct dtv_frontend_properties)); - fe->dtv_property_cache.state = tvp->cmd; - fe->dtv_property_cache.delivery_system = SYS_UNDEFINED; break; case DTV_TUNE: /* interpret the cache of data, build either a traditional frontend @@ -1371,6 +1510,59 @@ static int dtv_property_process_set(struct dvb_frontend *fe, case DTV_HIERARCHY: fe->dtv_property_cache.hierarchy = tvp->u.data; break; + + /* ISDB-T Support here */ + case DTV_ISDBT_PARTIAL_RECEPTION: + fe->dtv_property_cache.isdbt_partial_reception = tvp->u.data; + break; + case DTV_ISDBT_SOUND_BROADCASTING: + fe->dtv_property_cache.isdbt_sb_mode = tvp->u.data; + break; + case DTV_ISDBT_SB_SUBCHANNEL_ID: + fe->dtv_property_cache.isdbt_sb_subchannel = tvp->u.data; + break; + case DTV_ISDBT_SB_SEGMENT_IDX: + fe->dtv_property_cache.isdbt_sb_segment_idx = tvp->u.data; + break; + case DTV_ISDBT_SB_SEGMENT_COUNT: + fe->dtv_property_cache.isdbt_sb_segment_count = tvp->u.data; + break; + case DTV_ISDBT_LAYERA_FEC: + fe->dtv_property_cache.layer[0].fec = tvp->u.data; + break; + case DTV_ISDBT_LAYERA_MODULATION: + fe->dtv_property_cache.layer[0].modulation = tvp->u.data; + break; + case DTV_ISDBT_LAYERA_SEGMENT_COUNT: + fe->dtv_property_cache.layer[0].segment_count = tvp->u.data; + break; + case DTV_ISDBT_LAYERA_TIME_INTERLEAVING: + fe->dtv_property_cache.layer[0].interleaving = tvp->u.data; + break; + case DTV_ISDBT_LAYERB_FEC: + fe->dtv_property_cache.layer[1].fec = tvp->u.data; + break; + case DTV_ISDBT_LAYERB_MODULATION: + fe->dtv_property_cache.layer[1].modulation = tvp->u.data; + break; + case DTV_ISDBT_LAYERB_SEGMENT_COUNT: + fe->dtv_property_cache.layer[1].segment_count = tvp->u.data; + break; + case DTV_ISDBT_LAYERB_TIME_INTERLEAVING: + fe->dtv_property_cache.layer[1].interleaving = tvp->u.data; + break; + case DTV_ISDBT_LAYERC_FEC: + fe->dtv_property_cache.layer[2].fec = tvp->u.data; + break; + case DTV_ISDBT_LAYERC_MODULATION: + fe->dtv_property_cache.layer[2].modulation = tvp->u.data; + break; + case DTV_ISDBT_LAYERC_SEGMENT_COUNT: + fe->dtv_property_cache.layer[2].segment_count = tvp->u.data; + break; + case DTV_ISDBT_LAYERC_TIME_INTERLEAVING: + fe->dtv_property_cache.layer[2].interleaving = tvp->u.data; + break; default: r = -1; } diff --git a/drivers/media/dvb/dvb-core/dvb_frontend.h b/drivers/media/dvb/dvb-core/dvb_frontend.h index e176da472d7..9e46f1772c5 100644 --- a/drivers/media/dvb/dvb-core/dvb_frontend.h +++ b/drivers/media/dvb/dvb-core/dvb_frontend.h @@ -341,6 +341,20 @@ struct dtv_frontend_properties { fe_rolloff_t rolloff; fe_delivery_system_t delivery_system; + + /* ISDB-T specifics */ + u8 isdbt_partial_reception; + u8 isdbt_sb_mode; + u8 isdbt_sb_subchannel; + u32 isdbt_sb_segment_idx; + u32 isdbt_sb_segment_count; + u8 isdbt_layer_enabled; + struct { + u8 segment_count; + fe_code_rate_t fec; + fe_modulation_t modulation; + u8 interleaving; + } layer[3]; }; struct dvb_frontend { diff --git a/include/linux/dvb/frontend.h b/include/linux/dvb/frontend.h index 51c8d2d49e4..25b01c14727 100644 --- a/include/linux/dvb/frontend.h +++ b/include/linux/dvb/frontend.h @@ -173,7 +173,8 @@ typedef enum fe_modulation { typedef enum fe_transmit_mode { TRANSMISSION_MODE_2K, TRANSMISSION_MODE_8K, - TRANSMISSION_MODE_AUTO + TRANSMISSION_MODE_AUTO, + TRANSMISSION_MODE_4K } fe_transmit_mode_t; typedef enum fe_bandwidth { @@ -268,15 +269,40 @@ struct dvb_frontend_event { #define DTV_FE_CAPABILITY 16 #define DTV_DELIVERY_SYSTEM 17 -#define DTV_API_VERSION 35 -#define DTV_API_VERSION 35 -#define DTV_CODE_RATE_HP 36 -#define DTV_CODE_RATE_LP 37 -#define DTV_GUARD_INTERVAL 38 -#define DTV_TRANSMISSION_MODE 39 -#define DTV_HIERARCHY 40 +/* ISDB-T and ISDB-Tsb */ +#define DTV_ISDBT_PARTIAL_RECEPTION 18 +#define DTV_ISDBT_SOUND_BROADCASTING 19 -#define DTV_MAX_COMMAND DTV_HIERARCHY +#define DTV_ISDBT_SB_SUBCHANNEL_ID 20 +#define DTV_ISDBT_SB_SEGMENT_IDX 21 +#define DTV_ISDBT_SB_SEGMENT_COUNT 22 + +#define DTV_ISDBT_LAYERA_FEC 23 +#define DTV_ISDBT_LAYERA_MODULATION 24 +#define DTV_ISDBT_LAYERA_SEGMENT_COUNT 25 +#define DTV_ISDBT_LAYERA_TIME_INTERLEAVING 26 + +#define DTV_ISDBT_LAYERB_FEC 27 +#define DTV_ISDBT_LAYERB_MODULATION 28 +#define DTV_ISDBT_LAYERB_SEGMENT_COUNT 29 +#define DTV_ISDBT_LAYERB_TIME_INTERLEAVING 30 + +#define DTV_ISDBT_LAYERC_FEC 31 +#define DTV_ISDBT_LAYERC_MODULATION 32 +#define DTV_ISDBT_LAYERC_SEGMENT_COUNT 33 +#define DTV_ISDBT_LAYERC_TIME_INTERLEAVING 34 + +#define DTV_API_VERSION 35 + +#define DTV_CODE_RATE_HP 36 +#define DTV_CODE_RATE_LP 37 +#define DTV_GUARD_INTERVAL 38 +#define DTV_TRANSMISSION_MODE 39 +#define DTV_HIERARCHY 40 + +#define DTV_ISDBT_LAYER_ENABLED 41 + +#define DTV_MAX_COMMAND DTV_ISDBT_LAYER_ENABLED typedef enum fe_pilot { PILOT_ON, diff --git a/include/linux/dvb/version.h b/include/linux/dvb/version.h index 25b823b8173..540b0583d9f 100644 --- a/include/linux/dvb/version.h +++ b/include/linux/dvb/version.h @@ -24,6 +24,6 @@ #define _DVBVERSION_H_ #define DVB_API_VERSION 5 -#define DVB_API_VERSION_MINOR 0 +#define DVB_API_VERSION_MINOR 1 #endif /*_DVBVERSION_H_*/ -- cgit v1.2.3-70-g09d2 From 9afef394308cf63ddb67b003a1c6036615456eb9 Mon Sep 17 00:00:00 2001 From: Steven Toth Date: Sat, 9 May 2009 13:24:12 -0300 Subject: V4L/DVB (12922): Add the SAA7164 I2C bus identifier Add the SAA7164 I2C bus identifier Signed-off-by: Steven Toth Signed-off-by: Mauro Carvalho Chehab --- include/linux/i2c-id.h | 1 + 1 file changed, 1 insertion(+) (limited to 'include') diff --git a/include/linux/i2c-id.h b/include/linux/i2c-id.h index c9087de5c6c..271b67a3167 100644 --- a/include/linux/i2c-id.h +++ b/include/linux/i2c-id.h @@ -53,6 +53,7 @@ #define I2C_HW_B_AU0828 0x010023 /* auvitek au0828 usb bridge */ #define I2C_HW_B_CX231XX 0x010024 /* Conexant CX231XX USB based cards */ #define I2C_HW_B_HDPVR 0x010025 /* Hauppauge HD PVR */ +#define I2C_HW_B_SAA7164 0x010024 /* NXP 7164 based tv cards */ /* --- SGI adapters */ #define I2C_HW_SGI_VINO 0x160000 -- cgit v1.2.3-70-g09d2 From e558170a91677d3065be3922bb4467d8969d875c Mon Sep 17 00:00:00 2001 From: Antti Palosaari Date: Tue, 15 Sep 2009 14:37:20 -0300 Subject: V4L/DVB (12950): tuner-simple: add Philips CU1216L add Philips CU1216L NIM Signed-off-by: Antti Palosaari Signed-off-by: Mauro Carvalho Chehab --- Documentation/video4linux/CARDLIST.tuner | 1 + drivers/media/common/tuners/tuner-types.c | 23 +++++++++++++++++++++++ include/media/tuner.h | 1 + 3 files changed, 25 insertions(+) (limited to 'include') diff --git a/Documentation/video4linux/CARDLIST.tuner b/Documentation/video4linux/CARDLIST.tuner index ba9fa679e2d..3561b09fb41 100644 --- a/Documentation/video4linux/CARDLIST.tuner +++ b/Documentation/video4linux/CARDLIST.tuner @@ -79,3 +79,4 @@ tuner=78 - Philips FMD1216MEX MK3 Hybrid Tuner tuner=79 - Philips PAL/SECAM multi (FM1216 MK5) tuner=80 - Philips FQ1216LME MK3 PAL/SECAM w/active loopthrough tuner=81 - Partsnic (Daewoo) PTI-5NF05 +tuner=82 - Philips CU1216L diff --git a/drivers/media/common/tuners/tuner-types.c b/drivers/media/common/tuners/tuner-types.c index 5c6ef1e23c9..a2204df796e 100644 --- a/drivers/media/common/tuners/tuner-types.c +++ b/drivers/media/common/tuners/tuner-types.c @@ -1320,6 +1320,23 @@ static struct tuner_params tuner_partsnic_pti_5nf05_params[] = { }, }; +/* --------- TUNER_PHILIPS_CU1216L - DVB-C NIM ------------------------- */ + +static struct tuner_range tuner_cu1216l_ranges[] = { + { 16 * 160.25 /*MHz*/, 0xce, 0x01 }, + { 16 * 444.25 /*MHz*/, 0xce, 0x02 }, + { 16 * 999.99 , 0xce, 0x04 }, +}; + +static struct tuner_params tuner_philips_cu1216l_params[] = { + { + .type = TUNER_PARAM_TYPE_DIGITAL, + .ranges = tuner_cu1216l_ranges, + .count = ARRAY_SIZE(tuner_cu1216l_ranges), + .iffreq = 16 * 36.125, /*MHz*/ + }, +}; + /* --------------------------------------------------------------------- */ struct tunertype tuners[] = { @@ -1778,6 +1795,12 @@ struct tunertype tuners[] = { .params = tuner_partsnic_pti_5nf05_params, .count = ARRAY_SIZE(tuner_partsnic_pti_5nf05_params), }, + [TUNER_PHILIPS_CU1216L] = { + .name = "Philips CU1216L", + .params = tuner_philips_cu1216l_params, + .count = ARRAY_SIZE(tuner_philips_cu1216l_params), + .stepsize = 62500, + }, }; EXPORT_SYMBOL(tuners); diff --git a/include/media/tuner.h b/include/media/tuner.h index c146f2f530b..b1f57e175e9 100644 --- a/include/media/tuner.h +++ b/include/media/tuner.h @@ -127,6 +127,7 @@ #define TUNER_PHILIPS_FM1216MK5 79 #define TUNER_PHILIPS_FQ1216LME_MK3 80 /* Active loopthrough, no FM */ #define TUNER_PARTSNIC_PTI_5NF05 81 +#define TUNER_PHILIPS_CU1216L 82 /* tv card specific */ #define TDA9887_PRESENT (1<<0) -- cgit v1.2.3-70-g09d2 From 93463895ae0a87b689d71d65c44d5ccdcd950dc4 Mon Sep 17 00:00:00 2001 From: Michael Krufky Date: Tue, 15 Sep 2009 23:04:18 -0300 Subject: V4L/DVB (12964): tuner-core: add support for NXP TDA18271 without TDA829X demod Add support for NXP TDA18271 as a standalone tuner, allowing the use of analog demodulators other than the Philips/NXP TDA829x. Signed-off-by: Michael Krufky Signed-off-by: Mauro Carvalho Chehab --- Documentation/video4linux/CARDLIST.tuner | 1 + drivers/media/common/tuners/tuner-types.c | 4 ++++ drivers/media/video/tuner-core.c | 12 ++++++++++++ include/media/tuner.h | 1 + 4 files changed, 18 insertions(+) (limited to 'include') diff --git a/Documentation/video4linux/CARDLIST.tuner b/Documentation/video4linux/CARDLIST.tuner index 3561b09fb41..e0d298fe883 100644 --- a/Documentation/video4linux/CARDLIST.tuner +++ b/Documentation/video4linux/CARDLIST.tuner @@ -80,3 +80,4 @@ tuner=79 - Philips PAL/SECAM multi (FM1216 MK5) tuner=80 - Philips FQ1216LME MK3 PAL/SECAM w/active loopthrough tuner=81 - Partsnic (Daewoo) PTI-5NF05 tuner=82 - Philips CU1216L +tuner=83 - NXP TDA18271 diff --git a/drivers/media/common/tuners/tuner-types.c b/drivers/media/common/tuners/tuner-types.c index a2204df796e..2b876f3988c 100644 --- a/drivers/media/common/tuners/tuner-types.c +++ b/drivers/media/common/tuners/tuner-types.c @@ -1801,6 +1801,10 @@ struct tunertype tuners[] = { .count = ARRAY_SIZE(tuner_philips_cu1216l_params), .stepsize = 62500, }, + [TUNER_NXP_TDA18271] = { + .name = "NXP TDA18271", + /* see tda18271-fe.c for details */ + }, }; EXPORT_SYMBOL(tuners); diff --git a/drivers/media/video/tuner-core.c b/drivers/media/video/tuner-core.c index 2816f183923..aba92e2313d 100644 --- a/drivers/media/video/tuner-core.c +++ b/drivers/media/video/tuner-core.c @@ -29,6 +29,7 @@ #include "tuner-simple.h" #include "tda9887.h" #include "xc5000.h" +#include "tda18271.h" #define UNSET (-1U) @@ -420,6 +421,17 @@ static void set_type(struct i2c_client *c, unsigned int type, goto attach_failed; break; } + case TUNER_NXP_TDA18271: + { + struct tda18271_config cfg = { + .config = t->config, + }; + + if (!dvb_attach(tda18271_attach, &t->fe, t->i2c->addr, + t->i2c->adapter, &cfg)) + goto attach_failed; + break; + } default: if (!dvb_attach(simple_tuner_attach, &t->fe, t->i2c->adapter, t->i2c->addr, t->type)) diff --git a/include/media/tuner.h b/include/media/tuner.h index b1f57e175e9..4d5b53ff17d 100644 --- a/include/media/tuner.h +++ b/include/media/tuner.h @@ -128,6 +128,7 @@ #define TUNER_PHILIPS_FQ1216LME_MK3 80 /* Active loopthrough, no FM */ #define TUNER_PARTSNIC_PTI_5NF05 81 #define TUNER_PHILIPS_CU1216L 82 +#define TUNER_NXP_TDA18271 83 /* tv card specific */ #define TDA9887_PRESENT (1<<0) -- cgit v1.2.3-70-g09d2 From c9230457a98f33fe3658fd0bd9b9ceffdd97b63b Mon Sep 17 00:00:00 2001 From: Steven Toth Date: Thu, 17 Sep 2009 15:05:38 -0300 Subject: V4L/DVB (12974): SAA7164: Remove the SAA7164 bus id, no longer required. SAA7164: Remove the SAA7164 bus id, no longer required. Signed-off-by: Steven Toth Signed-off-by: Mauro Carvalho Chehab --- drivers/media/video/saa7164/saa7164-i2c.c | 1 - include/linux/i2c-id.h | 1 - 2 files changed, 2 deletions(-) (limited to 'include') diff --git a/drivers/media/video/saa7164/saa7164-i2c.c b/drivers/media/video/saa7164/saa7164-i2c.c index 4c431b4ac8d..8198e6833f7 100644 --- a/drivers/media/video/saa7164/saa7164-i2c.c +++ b/drivers/media/video/saa7164/saa7164-i2c.c @@ -116,7 +116,6 @@ static struct i2c_algorithm saa7164_i2c_algo_template = { static struct i2c_adapter saa7164_i2c_adap_template = { .name = "saa7164", .owner = THIS_MODULE, - .id = I2C_HW_B_SAA7164, .algo = &saa7164_i2c_algo_template, .client_register = attach_inform, .client_unregister = detach_inform, diff --git a/include/linux/i2c-id.h b/include/linux/i2c-id.h index 271b67a3167..c9087de5c6c 100644 --- a/include/linux/i2c-id.h +++ b/include/linux/i2c-id.h @@ -53,7 +53,6 @@ #define I2C_HW_B_AU0828 0x010023 /* auvitek au0828 usb bridge */ #define I2C_HW_B_CX231XX 0x010024 /* Conexant CX231XX USB based cards */ #define I2C_HW_B_HDPVR 0x010025 /* Hauppauge HD PVR */ -#define I2C_HW_B_SAA7164 0x010024 /* NXP 7164 based tv cards */ /* --- SGI adapters */ #define I2C_HW_SGI_VINO 0x160000 -- cgit v1.2.3-70-g09d2 From 62ef80a1f3fb69711dc7e59394ddc8e88097a7cc Mon Sep 17 00:00:00 2001 From: Muralidharan Karicheri Date: Fri, 19 Jun 2009 07:13:44 -0300 Subject: V4L/DVB (12246): tvp514x: Migration to sub-device framework This patch converts TVP514x driver to sub-device framework from V4L2-int framework. [hverkuil@xs4all.nl: remove inline from the dump_reg function] Signed-off-by: Brijesh Jadav Signed-off-by: Hardik Shah Signed-off-by: Vaibhav Hiremath Signed-off-by: Murali Karicheri Signed-off-by: Hans Verkuil Signed-off-by: Mauro Carvalho Chehab --- drivers/media/video/tvp514x.c | 875 +++++++++++++++---------------------- drivers/media/video/tvp514x_regs.h | 10 - include/media/tvp514x.h | 4 - 3 files changed, 349 insertions(+), 540 deletions(-) (limited to 'include') diff --git a/drivers/media/video/tvp514x.c b/drivers/media/video/tvp514x.c index 3750f7fadb1..c8386e2f7a9 100644 --- a/drivers/media/video/tvp514x.c +++ b/drivers/media/video/tvp514x.c @@ -31,7 +31,10 @@ #include #include #include -#include + +#include +#include +#include #include #include "tvp514x_regs.h" @@ -49,13 +52,11 @@ static int debug; module_param(debug, bool, 0644); MODULE_PARM_DESC(debug, "Debug level (0-1)"); -#define dump_reg(client, reg, val) \ - do { \ - val = tvp514x_read_reg(client, reg); \ - v4l_info(client, "Reg(0x%.2X): 0x%.2X\n", reg, val); \ - } while (0) +MODULE_AUTHOR("Texas Instruments"); +MODULE_DESCRIPTION("TVP514X linux decoder driver"); +MODULE_LICENSE("GPL"); -/** +/* * enum tvp514x_std - enum for supported standards */ enum tvp514x_std { @@ -64,15 +65,7 @@ enum tvp514x_std { STD_INVALID }; -/** - * enum tvp514x_state - enum for different decoder states - */ -enum tvp514x_state { - STATE_NOT_DETECTED, - STATE_DETECTED -}; - -/** +/* * struct tvp514x_std_info - Structure to store standard informations * @width: Line width in pixels * @height:Number of active lines @@ -87,35 +80,29 @@ struct tvp514x_std_info { }; static struct tvp514x_reg tvp514x_reg_list_default[0x40]; -/** +/* * struct tvp514x_decoder - TVP5146/47 decoder object - * @v4l2_int_device: Slave handle - * @tvp514x_slave: Slave pointer which is used by @v4l2_int_device + * @sd: Subdevice Slave handle * @tvp514x_regs: copy of hw's regs with preset values. * @pdata: Board specific - * @client: I2C client data - * @id: Entry from I2C table * @ver: Chip version - * @state: TVP5146/47 decoder state - detected or not-detected + * @streaming: TVP5146/47 decoder streaming - enabled or disabled. * @pix: Current pixel format * @num_fmts: Number of formats * @fmt_list: Format list * @current_std: Current standard * @num_stds: Number of standards * @std_list: Standards list - * @route: input and output routing at chip level + * @input: Input routing at chip level + * @output: Output routing at chip level */ struct tvp514x_decoder { - struct v4l2_int_device v4l2_int_device; - struct v4l2_int_slave tvp514x_slave; + struct v4l2_subdev sd; struct tvp514x_reg tvp514x_regs[ARRAY_SIZE(tvp514x_reg_list_default)]; const struct tvp514x_platform_data *pdata; - struct i2c_client *client; - - struct i2c_device_id *id; int ver; - enum tvp514x_state state; + int streaming; struct v4l2_pix_format pix; int num_fmts; @@ -124,8 +111,11 @@ struct tvp514x_decoder { enum tvp514x_std current_std; int num_stds; struct tvp514x_std_info *std_list; - - struct v4l2_routing route; + /* + * Input and Output Routing parameters + */ + u32 input; + u32 output; }; /* TVP514x default register values */ @@ -191,7 +181,8 @@ static struct tvp514x_reg tvp514x_reg_list_default[] = { {TOK_TERM, 0, 0}, }; -/* List of image formats supported by TVP5146/47 decoder +/* + * List of image formats supported by TVP5146/47 decoder * Currently we are using 8 bit mode only, but can be * extended to 10/20 bit mode. */ @@ -240,35 +231,29 @@ static struct tvp514x_std_info tvp514x_std_list[] = { }, /* Standard: need to add for additional standard */ }; -/* - * Control structure for Auto Gain - * This is temporary data, will get replaced once - * v4l2_ctrl_query_fill supports it. - */ -static const struct v4l2_queryctrl tvp514x_autogain_ctrl = { - .id = V4L2_CID_AUTOGAIN, - .name = "Gain, Automatic", - .type = V4L2_CTRL_TYPE_BOOLEAN, - .minimum = 0, - .maximum = 1, - .step = 1, - .default_value = 1, -}; + + +static inline struct tvp514x_decoder *to_decoder(struct v4l2_subdev *sd) +{ + return container_of(sd, struct tvp514x_decoder, sd); +} + /* * Read a value from a register in an TVP5146/47 decoder device. * Returns value read if successful, or non-zero (-1) otherwise. */ -static int tvp514x_read_reg(struct i2c_client *client, u8 reg) +static int tvp514x_read_reg(struct v4l2_subdev *sd, u8 reg) { - int err; - int retry = 0; + int err, retry = 0; + struct i2c_client *client = v4l2_get_subdevdata(sd); + read_again: err = i2c_smbus_read_byte_data(client, reg); if (err == -1) { if (retry <= I2C_RETRY_COUNT) { - v4l_warn(client, "Read: retry ... %d\n", retry); + v4l2_warn(sd, "Read: retry ... %d\n", retry); retry++; msleep_interruptible(10); goto read_again; @@ -278,20 +263,29 @@ read_again: return err; } +static void dump_reg(struct v4l2_subdev *sd, u8 reg) +{ + u32 val; + + val = tvp514x_read_reg(sd, reg); + v4l2_info(sd, "Reg(0x%.2X): 0x%.2X\n", reg, val); +} + /* * Write a value to a register in an TVP5146/47 decoder device. * Returns zero if successful, or non-zero otherwise. */ -static int tvp514x_write_reg(struct i2c_client *client, u8 reg, u8 val) +static int tvp514x_write_reg(struct v4l2_subdev *sd, u8 reg, u8 val) { - int err; - int retry = 0; + int err, retry = 0; + struct i2c_client *client = v4l2_get_subdevdata(sd); + write_again: err = i2c_smbus_write_byte_data(client, reg, val); if (err) { if (retry <= I2C_RETRY_COUNT) { - v4l_warn(client, "Write: retry ... %d\n", retry); + v4l2_warn(sd, "Write: retry ... %d\n", retry); retry++; msleep_interruptible(10); goto write_again; @@ -311,7 +305,7 @@ write_again: * reglist - list of registers to be written * Returns zero if successful, or non-zero otherwise. */ -static int tvp514x_write_regs(struct i2c_client *client, +static int tvp514x_write_regs(struct v4l2_subdev *sd, const struct tvp514x_reg reglist[]) { int err; @@ -326,9 +320,9 @@ static int tvp514x_write_regs(struct i2c_client *client, if (next->token == TOK_SKIP) continue; - err = tvp514x_write_reg(client, next->reg, (u8) next->val); + err = tvp514x_write_reg(sd, next->reg, (u8) next->val); if (err) { - v4l_err(client, "Write failed. Err[%d]\n", err); + v4l2_err(sd, "Write failed. Err[%d]\n", err); return err; } } @@ -339,17 +333,15 @@ static int tvp514x_write_regs(struct i2c_client *client, * tvp514x_get_current_std: * Returns the current standard detected by TVP5146/47 */ -static enum tvp514x_std tvp514x_get_current_std(struct tvp514x_decoder - *decoder) +static enum tvp514x_std tvp514x_get_current_std(struct v4l2_subdev *sd) { u8 std, std_status; - std = tvp514x_read_reg(decoder->client, REG_VIDEO_STD); - if ((std & VIDEO_STD_MASK) == VIDEO_STD_AUTO_SWITCH_BIT) { + std = tvp514x_read_reg(sd, REG_VIDEO_STD); + if ((std & VIDEO_STD_MASK) == VIDEO_STD_AUTO_SWITCH_BIT) /* use the standard status register */ - std_status = tvp514x_read_reg(decoder->client, - REG_VIDEO_STD_STATUS); - } else + std_status = tvp514x_read_reg(sd, REG_VIDEO_STD_STATUS); + else std_status = std; /* use the standard register itself */ switch (std_status & VIDEO_STD_MASK) { @@ -369,70 +361,69 @@ static enum tvp514x_std tvp514x_get_current_std(struct tvp514x_decoder /* * TVP5146/47 register dump function */ -static void tvp514x_reg_dump(struct tvp514x_decoder *decoder) +static void tvp514x_reg_dump(struct v4l2_subdev *sd) { - u8 value; - - dump_reg(decoder->client, REG_INPUT_SEL, value); - dump_reg(decoder->client, REG_AFE_GAIN_CTRL, value); - dump_reg(decoder->client, REG_VIDEO_STD, value); - dump_reg(decoder->client, REG_OPERATION_MODE, value); - dump_reg(decoder->client, REG_COLOR_KILLER, value); - dump_reg(decoder->client, REG_LUMA_CONTROL1, value); - dump_reg(decoder->client, REG_LUMA_CONTROL2, value); - dump_reg(decoder->client, REG_LUMA_CONTROL3, value); - dump_reg(decoder->client, REG_BRIGHTNESS, value); - dump_reg(decoder->client, REG_CONTRAST, value); - dump_reg(decoder->client, REG_SATURATION, value); - dump_reg(decoder->client, REG_HUE, value); - dump_reg(decoder->client, REG_CHROMA_CONTROL1, value); - dump_reg(decoder->client, REG_CHROMA_CONTROL2, value); - dump_reg(decoder->client, REG_COMP_PR_SATURATION, value); - dump_reg(decoder->client, REG_COMP_Y_CONTRAST, value); - dump_reg(decoder->client, REG_COMP_PB_SATURATION, value); - dump_reg(decoder->client, REG_COMP_Y_BRIGHTNESS, value); - dump_reg(decoder->client, REG_AVID_START_PIXEL_LSB, value); - dump_reg(decoder->client, REG_AVID_START_PIXEL_MSB, value); - dump_reg(decoder->client, REG_AVID_STOP_PIXEL_LSB, value); - dump_reg(decoder->client, REG_AVID_STOP_PIXEL_MSB, value); - dump_reg(decoder->client, REG_HSYNC_START_PIXEL_LSB, value); - dump_reg(decoder->client, REG_HSYNC_START_PIXEL_MSB, value); - dump_reg(decoder->client, REG_HSYNC_STOP_PIXEL_LSB, value); - dump_reg(decoder->client, REG_HSYNC_STOP_PIXEL_MSB, value); - dump_reg(decoder->client, REG_VSYNC_START_LINE_LSB, value); - dump_reg(decoder->client, REG_VSYNC_START_LINE_MSB, value); - dump_reg(decoder->client, REG_VSYNC_STOP_LINE_LSB, value); - dump_reg(decoder->client, REG_VSYNC_STOP_LINE_MSB, value); - dump_reg(decoder->client, REG_VBLK_START_LINE_LSB, value); - dump_reg(decoder->client, REG_VBLK_START_LINE_MSB, value); - dump_reg(decoder->client, REG_VBLK_STOP_LINE_LSB, value); - dump_reg(decoder->client, REG_VBLK_STOP_LINE_MSB, value); - dump_reg(decoder->client, REG_SYNC_CONTROL, value); - dump_reg(decoder->client, REG_OUTPUT_FORMATTER1, value); - dump_reg(decoder->client, REG_OUTPUT_FORMATTER2, value); - dump_reg(decoder->client, REG_OUTPUT_FORMATTER3, value); - dump_reg(decoder->client, REG_OUTPUT_FORMATTER4, value); - dump_reg(decoder->client, REG_OUTPUT_FORMATTER5, value); - dump_reg(decoder->client, REG_OUTPUT_FORMATTER6, value); - dump_reg(decoder->client, REG_CLEAR_LOST_LOCK, value); + dump_reg(sd, REG_INPUT_SEL); + dump_reg(sd, REG_AFE_GAIN_CTRL); + dump_reg(sd, REG_VIDEO_STD); + dump_reg(sd, REG_OPERATION_MODE); + dump_reg(sd, REG_COLOR_KILLER); + dump_reg(sd, REG_LUMA_CONTROL1); + dump_reg(sd, REG_LUMA_CONTROL2); + dump_reg(sd, REG_LUMA_CONTROL3); + dump_reg(sd, REG_BRIGHTNESS); + dump_reg(sd, REG_CONTRAST); + dump_reg(sd, REG_SATURATION); + dump_reg(sd, REG_HUE); + dump_reg(sd, REG_CHROMA_CONTROL1); + dump_reg(sd, REG_CHROMA_CONTROL2); + dump_reg(sd, REG_COMP_PR_SATURATION); + dump_reg(sd, REG_COMP_Y_CONTRAST); + dump_reg(sd, REG_COMP_PB_SATURATION); + dump_reg(sd, REG_COMP_Y_BRIGHTNESS); + dump_reg(sd, REG_AVID_START_PIXEL_LSB); + dump_reg(sd, REG_AVID_START_PIXEL_MSB); + dump_reg(sd, REG_AVID_STOP_PIXEL_LSB); + dump_reg(sd, REG_AVID_STOP_PIXEL_MSB); + dump_reg(sd, REG_HSYNC_START_PIXEL_LSB); + dump_reg(sd, REG_HSYNC_START_PIXEL_MSB); + dump_reg(sd, REG_HSYNC_STOP_PIXEL_LSB); + dump_reg(sd, REG_HSYNC_STOP_PIXEL_MSB); + dump_reg(sd, REG_VSYNC_START_LINE_LSB); + dump_reg(sd, REG_VSYNC_START_LINE_MSB); + dump_reg(sd, REG_VSYNC_STOP_LINE_LSB); + dump_reg(sd, REG_VSYNC_STOP_LINE_MSB); + dump_reg(sd, REG_VBLK_START_LINE_LSB); + dump_reg(sd, REG_VBLK_START_LINE_MSB); + dump_reg(sd, REG_VBLK_STOP_LINE_LSB); + dump_reg(sd, REG_VBLK_STOP_LINE_MSB); + dump_reg(sd, REG_SYNC_CONTROL); + dump_reg(sd, REG_OUTPUT_FORMATTER1); + dump_reg(sd, REG_OUTPUT_FORMATTER2); + dump_reg(sd, REG_OUTPUT_FORMATTER3); + dump_reg(sd, REG_OUTPUT_FORMATTER4); + dump_reg(sd, REG_OUTPUT_FORMATTER5); + dump_reg(sd, REG_OUTPUT_FORMATTER6); + dump_reg(sd, REG_CLEAR_LOST_LOCK); } /* * Configure the TVP5146/47 with the current register settings * Returns zero if successful, or non-zero otherwise. */ -static int tvp514x_configure(struct tvp514x_decoder *decoder) +static int tvp514x_configure(struct v4l2_subdev *sd, + struct tvp514x_decoder *decoder) { int err; /* common register initialization */ err = - tvp514x_write_regs(decoder->client, decoder->tvp514x_regs); + tvp514x_write_regs(sd, decoder->tvp514x_regs); if (err) return err; if (debug) - tvp514x_reg_dump(decoder); + tvp514x_reg_dump(sd); return 0; } @@ -445,15 +436,17 @@ static int tvp514x_configure(struct tvp514x_decoder *decoder) * Returns ENODEV error number if no device is detected, or zero * if a device is detected. */ -static int tvp514x_detect(struct tvp514x_decoder *decoder) +static int tvp514x_detect(struct v4l2_subdev *sd, + struct tvp514x_decoder *decoder) { u8 chip_id_msb, chip_id_lsb, rom_ver; + struct i2c_client *client = v4l2_get_subdevdata(sd); - chip_id_msb = tvp514x_read_reg(decoder->client, REG_CHIP_ID_MSB); - chip_id_lsb = tvp514x_read_reg(decoder->client, REG_CHIP_ID_LSB); - rom_ver = tvp514x_read_reg(decoder->client, REG_ROM_VERSION); + chip_id_msb = tvp514x_read_reg(sd, REG_CHIP_ID_MSB); + chip_id_lsb = tvp514x_read_reg(sd, REG_CHIP_ID_LSB); + rom_ver = tvp514x_read_reg(sd, REG_ROM_VERSION); - v4l_dbg(1, debug, decoder->client, + v4l2_dbg(1, debug, sd, "chip id detected msb:0x%x lsb:0x%x rom version:0x%x\n", chip_id_msb, chip_id_lsb, rom_ver); if ((chip_id_msb != TVP514X_CHIP_ID_MSB) @@ -462,19 +455,16 @@ static int tvp514x_detect(struct tvp514x_decoder *decoder) /* We didn't read the values we expected, so this must not be * an TVP5146/47. */ - v4l_err(decoder->client, - "chip id mismatch msb:0x%x lsb:0x%x\n", - chip_id_msb, chip_id_lsb); + v4l2_err(sd, "chip id mismatch msb:0x%x lsb:0x%x\n", + chip_id_msb, chip_id_lsb); return -ENODEV; } decoder->ver = rom_ver; - decoder->state = STATE_DETECTED; - v4l_info(decoder->client, - "%s found at 0x%x (%s)\n", decoder->client->name, - decoder->client->addr << 1, - decoder->client->adapter->name); + v4l2_info(sd, "%s (Version - 0x%.2x) found at 0x%x (%s)\n", + client->name, decoder->ver, + client->addr << 1, client->adapter->name); return 0; } @@ -483,17 +473,17 @@ static int tvp514x_detect(struct tvp514x_decoder *decoder) * TVP5146/47 decoder driver. */ -/** - * ioctl_querystd - V4L2 decoder interface handler for VIDIOC_QUERYSTD ioctl - * @s: pointer to standard V4L2 device structure +/* + * tvp514x_querystd - V4L2 decoder interface handler for VIDIOC_QUERYSTD ioctl + * @sd: pointer to standard V4L2 sub-device structure * @std_id: standard V4L2 std_id ioctl enum * * Returns the current standard detected by TVP5146/47. If no active input is * detected, returns -EINVAL */ -static int ioctl_querystd(struct v4l2_int_device *s, v4l2_std_id *std_id) +static int tvp514x_querystd(struct v4l2_subdev *sd, v4l2_std_id *std_id) { - struct tvp514x_decoder *decoder = s->priv; + struct tvp514x_decoder *decoder = to_decoder(sd); enum tvp514x_std current_std; enum tvp514x_input input_sel; u8 sync_lock_status, lock_mask; @@ -502,11 +492,11 @@ static int ioctl_querystd(struct v4l2_int_device *s, v4l2_std_id *std_id) return -EINVAL; /* get the current standard */ - current_std = tvp514x_get_current_std(decoder); + current_std = tvp514x_get_current_std(sd); if (current_std == STD_INVALID) return -EINVAL; - input_sel = decoder->route.input; + input_sel = decoder->input; switch (input_sel) { case INPUT_CVBS_VI1A: @@ -544,42 +534,39 @@ static int ioctl_querystd(struct v4l2_int_device *s, v4l2_std_id *std_id) return -EINVAL; } /* check whether signal is locked */ - sync_lock_status = tvp514x_read_reg(decoder->client, REG_STATUS1); + sync_lock_status = tvp514x_read_reg(sd, REG_STATUS1); if (lock_mask != (sync_lock_status & lock_mask)) return -EINVAL; /* No input detected */ decoder->current_std = current_std; *std_id = decoder->std_list[current_std].standard.id; - v4l_dbg(1, debug, decoder->client, "Current STD: %s", + v4l2_dbg(1, debug, sd, "Current STD: %s", decoder->std_list[current_std].standard.name); return 0; } -/** - * ioctl_s_std - V4L2 decoder interface handler for VIDIOC_S_STD ioctl - * @s: pointer to standard V4L2 device structure +/* + * tvp514x_s_std - V4L2 decoder interface handler for VIDIOC_S_STD ioctl + * @sd: pointer to standard V4L2 sub-device structure * @std_id: standard V4L2 v4l2_std_id ioctl enum * * If std_id is supported, sets the requested standard. Otherwise, returns * -EINVAL */ -static int ioctl_s_std(struct v4l2_int_device *s, v4l2_std_id *std_id) +static int tvp514x_s_std(struct v4l2_subdev *sd, v4l2_std_id std_id) { - struct tvp514x_decoder *decoder = s->priv; + struct tvp514x_decoder *decoder = to_decoder(sd); int err, i; - if (std_id == NULL) - return -EINVAL; - for (i = 0; i < decoder->num_stds; i++) - if (*std_id & decoder->std_list[i].standard.id) + if (std_id & decoder->std_list[i].standard.id) break; if ((i == decoder->num_stds) || (i == STD_INVALID)) return -EINVAL; - err = tvp514x_write_reg(decoder->client, REG_VIDEO_STD, + err = tvp514x_write_reg(sd, REG_VIDEO_STD, decoder->std_list[i].video_std); if (err) return err; @@ -588,24 +575,24 @@ static int ioctl_s_std(struct v4l2_int_device *s, v4l2_std_id *std_id) decoder->tvp514x_regs[REG_VIDEO_STD].val = decoder->std_list[i].video_std; - v4l_dbg(1, debug, decoder->client, "Standard set to: %s", + v4l2_dbg(1, debug, sd, "Standard set to: %s", decoder->std_list[i].standard.name); return 0; } -/** - * ioctl_s_routing - V4L2 decoder interface handler for VIDIOC_S_INPUT ioctl - * @s: pointer to standard V4L2 device structure +/* + * tvp514x_s_routing - V4L2 decoder interface handler for VIDIOC_S_INPUT ioctl + * @sd: pointer to standard V4L2 sub-device structure * @index: number of the input * * If index is valid, selects the requested input. Otherwise, returns -EINVAL if * the input is not supported or there is no active signal present in the * selected input. */ -static int ioctl_s_routing(struct v4l2_int_device *s, - struct v4l2_routing *route) +static int tvp514x_s_routing(struct v4l2_subdev *sd, + u32 input, u32 output, u32 config) { - struct tvp514x_decoder *decoder = s->priv; + struct tvp514x_decoder *decoder = to_decoder(sd); int err; enum tvp514x_input input_sel; enum tvp514x_output output_sel; @@ -613,20 +600,20 @@ static int ioctl_s_routing(struct v4l2_int_device *s, u8 sync_lock_status, lock_mask; int try_count = LOCK_RETRY_COUNT; - if ((!route) || (route->input >= INPUT_INVALID) || - (route->output >= OUTPUT_INVALID)) + if ((input >= INPUT_INVALID) || + (output >= OUTPUT_INVALID)) return -EINVAL; /* Index out of bound */ - input_sel = route->input; - output_sel = route->output; + input_sel = input; + output_sel = output; - err = tvp514x_write_reg(decoder->client, REG_INPUT_SEL, input_sel); + err = tvp514x_write_reg(sd, REG_INPUT_SEL, input_sel); if (err) return err; - output_sel |= tvp514x_read_reg(decoder->client, + output_sel |= tvp514x_read_reg(sd, REG_OUTPUT_FORMATTER1) & 0x7; - err = tvp514x_write_reg(decoder->client, REG_OUTPUT_FORMATTER1, + err = tvp514x_write_reg(sd, REG_OUTPUT_FORMATTER1, output_sel); if (err) return err; @@ -637,7 +624,7 @@ static int ioctl_s_routing(struct v4l2_int_device *s, /* Clear status */ msleep(LOCK_RETRY_DELAY); err = - tvp514x_write_reg(decoder->client, REG_CLEAR_LOST_LOCK, 0x01); + tvp514x_write_reg(sd, REG_CLEAR_LOST_LOCK, 0x01); if (err) return err; @@ -682,11 +669,11 @@ static int ioctl_s_routing(struct v4l2_int_device *s, msleep(LOCK_RETRY_DELAY); /* get the current standard for future reference */ - current_std = tvp514x_get_current_std(decoder); + current_std = tvp514x_get_current_std(sd); if (current_std == STD_INVALID) continue; - sync_lock_status = tvp514x_read_reg(decoder->client, + sync_lock_status = tvp514x_read_reg(sd, REG_STATUS1); if (lock_mask == (sync_lock_status & lock_mask)) break; /* Input detected */ @@ -696,28 +683,26 @@ static int ioctl_s_routing(struct v4l2_int_device *s, return -EINVAL; decoder->current_std = current_std; - decoder->route.input = route->input; - decoder->route.output = route->output; + decoder->input = input; + decoder->output = output; - v4l_dbg(1, debug, decoder->client, - "Input set to: %d, std : %d", + v4l2_dbg(1, debug, sd, "Input set to: %d, std : %d", input_sel, current_std); return 0; } -/** - * ioctl_queryctrl - V4L2 decoder interface handler for VIDIOC_QUERYCTRL ioctl - * @s: pointer to standard V4L2 device structure +/* + * tvp514x_queryctrl - V4L2 decoder interface handler for VIDIOC_QUERYCTRL ioctl + * @sd: pointer to standard V4L2 sub-device structure * @qctrl: standard V4L2 v4l2_queryctrl structure * * If the requested control is supported, returns the control information. * Otherwise, returns -EINVAL if the control is not supported. */ static int -ioctl_queryctrl(struct v4l2_int_device *s, struct v4l2_queryctrl *qctrl) +tvp514x_queryctrl(struct v4l2_subdev *sd, struct v4l2_queryctrl *qctrl) { - struct tvp514x_decoder *decoder = s->priv; int err = -EINVAL; if (qctrl == NULL) @@ -744,30 +729,27 @@ ioctl_queryctrl(struct v4l2_int_device *s, struct v4l2_queryctrl *qctrl) err = v4l2_ctrl_query_fill(qctrl, -180, 180, 180, 0); break; case V4L2_CID_AUTOGAIN: - /* Autogain is either 0 or 1*/ - memcpy(qctrl, &tvp514x_autogain_ctrl, - sizeof(struct v4l2_queryctrl)); - err = 0; + /* + * Auto Gain supported is - + * 0 - 1 (Default - 1) + */ + err = v4l2_ctrl_query_fill(qctrl, 0, 1, 1, 1); break; default: - v4l_err(decoder->client, - "invalid control id %d\n", qctrl->id); + v4l2_err(sd, "invalid control id %d\n", qctrl->id); return err; } - v4l_dbg(1, debug, decoder->client, - "Query Control: %s : Min - %d, Max - %d, Def - %d", - qctrl->name, - qctrl->minimum, - qctrl->maximum, + v4l2_dbg(1, debug, sd, "Query Control:%s: Min - %d, Max - %d, Def - %d", + qctrl->name, qctrl->minimum, qctrl->maximum, qctrl->default_value); return err; } -/** - * ioctl_g_ctrl - V4L2 decoder interface handler for VIDIOC_G_CTRL ioctl - * @s: pointer to standard V4L2 device structure +/* + * tvp514x_g_ctrl - V4L2 decoder interface handler for VIDIOC_G_CTRL ioctl + * @sd: pointer to standard V4L2 sub-device structure * @ctrl: pointer to v4l2_control structure * * If the requested control is supported, returns the control's current @@ -775,9 +757,9 @@ ioctl_queryctrl(struct v4l2_int_device *s, struct v4l2_queryctrl *qctrl) * supported. */ static int -ioctl_g_ctrl(struct v4l2_int_device *s, struct v4l2_control *ctrl) +tvp514x_g_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl) { - struct tvp514x_decoder *decoder = s->priv; + struct tvp514x_decoder *decoder = to_decoder(sd); if (ctrl == NULL) return -EINVAL; @@ -811,74 +793,70 @@ ioctl_g_ctrl(struct v4l2_int_device *s, struct v4l2_control *ctrl) break; default: - v4l_err(decoder->client, - "invalid control id %d\n", ctrl->id); + v4l2_err(sd, "invalid control id %d\n", ctrl->id); return -EINVAL; } - v4l_dbg(1, debug, decoder->client, - "Get Control: ID - %d - %d", + v4l2_dbg(1, debug, sd, "Get Control: ID - %d - %d", ctrl->id, ctrl->value); return 0; } -/** - * ioctl_s_ctrl - V4L2 decoder interface handler for VIDIOC_S_CTRL ioctl - * @s: pointer to standard V4L2 device structure +/* + * tvp514x_s_ctrl - V4L2 decoder interface handler for VIDIOC_S_CTRL ioctl + * @sd: pointer to standard V4L2 sub-device structure * @ctrl: pointer to v4l2_control structure * * If the requested control is supported, sets the control's current * value in HW. Otherwise, returns -EINVAL if the control is not supported. */ static int -ioctl_s_ctrl(struct v4l2_int_device *s, struct v4l2_control *ctrl) +tvp514x_s_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl) { - struct tvp514x_decoder *decoder = s->priv; + struct tvp514x_decoder *decoder = to_decoder(sd); int err = -EINVAL, value; if (ctrl == NULL) return err; - value = (__s32) ctrl->value; + value = ctrl->value; switch (ctrl->id) { case V4L2_CID_BRIGHTNESS: if (ctrl->value < 0 || ctrl->value > 255) { - v4l_err(decoder->client, - "invalid brightness setting %d\n", + v4l2_err(sd, "invalid brightness setting %d\n", ctrl->value); return -ERANGE; } - err = tvp514x_write_reg(decoder->client, REG_BRIGHTNESS, + err = tvp514x_write_reg(sd, REG_BRIGHTNESS, value); if (err) return err; + decoder->tvp514x_regs[REG_BRIGHTNESS].val = value; break; case V4L2_CID_CONTRAST: if (ctrl->value < 0 || ctrl->value > 255) { - v4l_err(decoder->client, - "invalid contrast setting %d\n", + v4l2_err(sd, "invalid contrast setting %d\n", ctrl->value); return -ERANGE; } - err = tvp514x_write_reg(decoder->client, REG_CONTRAST, - value); + err = tvp514x_write_reg(sd, REG_CONTRAST, value); if (err) return err; + decoder->tvp514x_regs[REG_CONTRAST].val = value; break; case V4L2_CID_SATURATION: if (ctrl->value < 0 || ctrl->value > 255) { - v4l_err(decoder->client, - "invalid saturation setting %d\n", + v4l2_err(sd, "invalid saturation setting %d\n", ctrl->value); return -ERANGE; } - err = tvp514x_write_reg(decoder->client, REG_SATURATION, - value); + err = tvp514x_write_reg(sd, REG_SATURATION, value); if (err) return err; + decoder->tvp514x_regs[REG_SATURATION].val = value; break; case V4L2_CID_HUE: @@ -889,15 +867,13 @@ ioctl_s_ctrl(struct v4l2_int_device *s, struct v4l2_control *ctrl) else if (value == 0) value = 0; else { - v4l_err(decoder->client, - "invalid hue setting %d\n", - ctrl->value); + v4l2_err(sd, "invalid hue setting %d\n", ctrl->value); return -ERANGE; } - err = tvp514x_write_reg(decoder->client, REG_HUE, - value); + err = tvp514x_write_reg(sd, REG_HUE, value); if (err) return err; + decoder->tvp514x_regs[REG_HUE].val = value; break; case V4L2_CID_AUTOGAIN: @@ -906,41 +882,38 @@ ioctl_s_ctrl(struct v4l2_int_device *s, struct v4l2_control *ctrl) else if (value == 0) value = 0x0C; else { - v4l_err(decoder->client, - "invalid auto gain setting %d\n", + v4l2_err(sd, "invalid auto gain setting %d\n", ctrl->value); return -ERANGE; } - err = tvp514x_write_reg(decoder->client, REG_AFE_GAIN_CTRL, - value); + err = tvp514x_write_reg(sd, REG_AFE_GAIN_CTRL, value); if (err) return err; + decoder->tvp514x_regs[REG_AFE_GAIN_CTRL].val = value; break; default: - v4l_err(decoder->client, - "invalid control id %d\n", ctrl->id); + v4l2_err(sd, "invalid control id %d\n", ctrl->id); return err; } - v4l_dbg(1, debug, decoder->client, - "Set Control: ID - %d - %d", + v4l2_dbg(1, debug, sd, "Set Control: ID - %d - %d", ctrl->id, ctrl->value); return err; } -/** - * ioctl_enum_fmt_cap - Implement the CAPTURE buffer VIDIOC_ENUM_FMT ioctl - * @s: pointer to standard V4L2 device structure +/* + * tvp514x_enum_fmt_cap - Implement the CAPTURE buffer VIDIOC_ENUM_FMT ioctl + * @sd: pointer to standard V4L2 sub-device structure * @fmt: standard V4L2 VIDIOC_ENUM_FMT ioctl structure * * Implement the VIDIOC_ENUM_FMT ioctl to enumerate supported formats */ static int -ioctl_enum_fmt_cap(struct v4l2_int_device *s, struct v4l2_fmtdesc *fmt) +tvp514x_enum_fmt_cap(struct v4l2_subdev *sd, struct v4l2_fmtdesc *fmt) { - struct tvp514x_decoder *decoder = s->priv; + struct tvp514x_decoder *decoder = to_decoder(sd); int index; if (fmt == NULL) @@ -956,16 +929,15 @@ ioctl_enum_fmt_cap(struct v4l2_int_device *s, struct v4l2_fmtdesc *fmt) memcpy(fmt, &decoder->fmt_list[index], sizeof(struct v4l2_fmtdesc)); - v4l_dbg(1, debug, decoder->client, - "Current FMT: index - %d (%s)", + v4l2_dbg(1, debug, sd, "Current FMT: index - %d (%s)", decoder->fmt_list[index].index, decoder->fmt_list[index].description); return 0; } -/** - * ioctl_try_fmt_cap - Implement the CAPTURE buffer VIDIOC_TRY_FMT ioctl - * @s: pointer to standard V4L2 device structure +/* + * tvp514x_try_fmt_cap - Implement the CAPTURE buffer VIDIOC_TRY_FMT ioctl + * @sd: pointer to standard V4L2 sub-device structure * @f: pointer to standard V4L2 VIDIOC_TRY_FMT ioctl structure * * Implement the VIDIOC_TRY_FMT ioctl for the CAPTURE buffer type. This @@ -973,9 +945,9 @@ ioctl_enum_fmt_cap(struct v4l2_int_device *s, struct v4l2_fmtdesc *fmt) * without actually making it take effect. */ static int -ioctl_try_fmt_cap(struct v4l2_int_device *s, struct v4l2_format *f) +tvp514x_try_fmt_cap(struct v4l2_subdev *sd, struct v4l2_format *f) { - struct tvp514x_decoder *decoder = s->priv; + struct tvp514x_decoder *decoder = to_decoder(sd); int ifmt; struct v4l2_pix_format *pix; enum tvp514x_std current_std; @@ -989,7 +961,7 @@ ioctl_try_fmt_cap(struct v4l2_int_device *s, struct v4l2_format *f) pix = &f->fmt.pix; /* Calculate height and width based on current standard */ - current_std = tvp514x_get_current_std(decoder); + current_std = tvp514x_get_current_std(sd); if (current_std == STD_INVALID) return -EINVAL; @@ -1012,17 +984,16 @@ ioctl_try_fmt_cap(struct v4l2_int_device *s, struct v4l2_format *f) pix->colorspace = V4L2_COLORSPACE_SMPTE170M; pix->priv = 0; - v4l_dbg(1, debug, decoder->client, - "Try FMT: pixelformat - %s, bytesperline - %d" + v4l2_dbg(1, debug, sd, "Try FMT: pixelformat - %s, bytesperline - %d" "Width - %d, Height - %d", decoder->fmt_list[ifmt].description, pix->bytesperline, pix->width, pix->height); return 0; } -/** - * ioctl_s_fmt_cap - V4L2 decoder interface handler for VIDIOC_S_FMT ioctl - * @s: pointer to standard V4L2 device structure +/* + * tvp514x_s_fmt_cap - V4L2 decoder interface handler for VIDIOC_S_FMT ioctl + * @sd: pointer to standard V4L2 sub-device structure * @f: pointer to standard V4L2 VIDIOC_S_FMT ioctl structure * * If the requested format is supported, configures the HW to use that @@ -1030,9 +1001,9 @@ ioctl_try_fmt_cap(struct v4l2_int_device *s, struct v4l2_format *f) * correctly configured. */ static int -ioctl_s_fmt_cap(struct v4l2_int_device *s, struct v4l2_format *f) +tvp514x_s_fmt_cap(struct v4l2_subdev *sd, struct v4l2_format *f) { - struct tvp514x_decoder *decoder = s->priv; + struct tvp514x_decoder *decoder = to_decoder(sd); struct v4l2_pix_format *pix; int rval; @@ -1043,7 +1014,7 @@ ioctl_s_fmt_cap(struct v4l2_int_device *s, struct v4l2_format *f) return -EINVAL; /* only capture is supported */ pix = &f->fmt.pix; - rval = ioctl_try_fmt_cap(s, f); + rval = tvp514x_try_fmt_cap(sd, f); if (rval) return rval; @@ -1052,18 +1023,18 @@ ioctl_s_fmt_cap(struct v4l2_int_device *s, struct v4l2_format *f) return rval; } -/** - * ioctl_g_fmt_cap - V4L2 decoder interface handler for ioctl_g_fmt_cap - * @s: pointer to standard V4L2 device structure +/* + * tvp514x_g_fmt_cap - V4L2 decoder interface handler for tvp514x_g_fmt_cap + * @sd: pointer to standard V4L2 sub-device structure * @f: pointer to standard V4L2 v4l2_format structure * * Returns the decoder's current pixel format in the v4l2_format * parameter. */ static int -ioctl_g_fmt_cap(struct v4l2_int_device *s, struct v4l2_format *f) +tvp514x_g_fmt_cap(struct v4l2_subdev *sd, struct v4l2_format *f) { - struct tvp514x_decoder *decoder = s->priv; + struct tvp514x_decoder *decoder = to_decoder(sd); if (f == NULL) return -EINVAL; @@ -1073,25 +1044,24 @@ ioctl_g_fmt_cap(struct v4l2_int_device *s, struct v4l2_format *f) f->fmt.pix = decoder->pix; - v4l_dbg(1, debug, decoder->client, - "Current FMT: bytesperline - %d" + v4l2_dbg(1, debug, sd, "Current FMT: bytesperline - %d" "Width - %d, Height - %d", decoder->pix.bytesperline, decoder->pix.width, decoder->pix.height); return 0; } -/** - * ioctl_g_parm - V4L2 decoder interface handler for VIDIOC_G_PARM ioctl - * @s: pointer to standard V4L2 device structure +/* + * tvp514x_g_parm - V4L2 decoder interface handler for VIDIOC_G_PARM ioctl + * @sd: pointer to standard V4L2 sub-device structure * @a: pointer to standard V4L2 VIDIOC_G_PARM ioctl structure * * Returns the decoder's video CAPTURE parameters. */ static int -ioctl_g_parm(struct v4l2_int_device *s, struct v4l2_streamparm *a) +tvp514x_g_parm(struct v4l2_subdev *sd, struct v4l2_streamparm *a) { - struct tvp514x_decoder *decoder = s->priv; + struct tvp514x_decoder *decoder = to_decoder(sd); struct v4l2_captureparm *cparm; enum tvp514x_std current_std; @@ -1105,7 +1075,7 @@ ioctl_g_parm(struct v4l2_int_device *s, struct v4l2_streamparm *a) a->type = V4L2_BUF_TYPE_VIDEO_CAPTURE; /* get the current standard */ - current_std = tvp514x_get_current_std(decoder); + current_std = tvp514x_get_current_std(sd); if (current_std == STD_INVALID) return -EINVAL; @@ -1119,18 +1089,18 @@ ioctl_g_parm(struct v4l2_int_device *s, struct v4l2_streamparm *a) return 0; } -/** - * ioctl_s_parm - V4L2 decoder interface handler for VIDIOC_S_PARM ioctl - * @s: pointer to standard V4L2 device structure +/* + * tvp514x_s_parm - V4L2 decoder interface handler for VIDIOC_S_PARM ioctl + * @sd: pointer to standard V4L2 sub-device structure * @a: pointer to standard V4L2 VIDIOC_S_PARM ioctl structure * * Configures the decoder to use the input parameters, if possible. If * not possible, returns the appropriate error code. */ static int -ioctl_s_parm(struct v4l2_int_device *s, struct v4l2_streamparm *a) +tvp514x_s_parm(struct v4l2_subdev *sd, struct v4l2_streamparm *a) { - struct tvp514x_decoder *decoder = s->priv; + struct tvp514x_decoder *decoder = to_decoder(sd); struct v4l2_fract *timeperframe; enum tvp514x_std current_std; @@ -1143,7 +1113,7 @@ ioctl_s_parm(struct v4l2_int_device *s, struct v4l2_streamparm *a) timeperframe = &a->parm.capture.timeperframe; /* get the current standard */ - current_std = tvp514x_get_current_std(decoder); + current_std = tvp514x_get_current_std(sd); if (current_std == STD_INVALID) return -EINVAL; @@ -1155,112 +1125,59 @@ ioctl_s_parm(struct v4l2_int_device *s, struct v4l2_streamparm *a) return 0; } -/** - * ioctl_g_ifparm - V4L2 decoder interface handler for vidioc_int_g_ifparm_num - * @s: pointer to standard V4L2 device structure - * @p: pointer to standard V4L2 vidioc_int_g_ifparm_num ioctl structure - * - * Gets slave interface parameters. - * Calculates the required xclk value to support the requested - * clock parameters in p. This value is returned in the p - * parameter. - */ -static int ioctl_g_ifparm(struct v4l2_int_device *s, struct v4l2_ifparm *p) -{ - struct tvp514x_decoder *decoder = s->priv; - int rval; - - if (p == NULL) - return -EINVAL; - - if (NULL == decoder->pdata->ifparm) - return -EINVAL; - - rval = decoder->pdata->ifparm(p); - if (rval) { - v4l_err(decoder->client, "g_ifparm.Err[%d]\n", rval); - return rval; - } - - p->u.bt656.clock_curr = TVP514X_XCLK_BT656; - - return 0; -} - -/** - * ioctl_g_priv - V4L2 decoder interface handler for vidioc_int_g_priv_num - * @s: pointer to standard V4L2 device structure - * @p: void pointer to hold decoder's private data address - * - * Returns device's (decoder's) private data area address in p parameter - */ -static int ioctl_g_priv(struct v4l2_int_device *s, void *p) -{ - struct tvp514x_decoder *decoder = s->priv; - - if (NULL == decoder->pdata->priv_data_set) - return -EINVAL; - - return decoder->pdata->priv_data_set(p); -} - -/** - * ioctl_s_power - V4L2 decoder interface handler for vidioc_int_s_power_num - * @s: pointer to standard V4L2 device structure - * @on: power state to which device is to be set +/* + * tvp514x_s_stream - V4L2 decoder interface handler for vidioc_int_s_power_num + * @sd: pointer to standard V4L2 sub-device structure + * @enable: streaming enable or disable * - * Sets devices power state to requrested state, if possible. + * Sets streaming to enable or disable, if possible. */ -static int ioctl_s_power(struct v4l2_int_device *s, enum v4l2_power on) +static int tvp514x_s_stream(struct v4l2_subdev *sd, int enable) { - struct tvp514x_decoder *decoder = s->priv; int err = 0; + struct i2c_client *client = v4l2_get_subdevdata(sd); + struct tvp514x_decoder *decoder = to_decoder(sd); - switch (on) { - case V4L2_POWER_OFF: - /* Power Down Sequence */ - err = - tvp514x_write_reg(decoder->client, REG_OPERATION_MODE, - 0x01); - /* Disable mux for TVP5146/47 decoder data path */ - if (decoder->pdata->power_set) - err |= decoder->pdata->power_set(on); - decoder->state = STATE_NOT_DETECTED; - break; + if (decoder->streaming == enable) + return 0; - case V4L2_POWER_STANDBY: - if (decoder->pdata->power_set) - err = decoder->pdata->power_set(on); + switch (enable) { + case 0: + { + /* Power Down Sequence */ + err = tvp514x_write_reg(sd, REG_OPERATION_MODE, 0x01); + if (err) { + v4l2_err(sd, "Unable to turn off decoder\n"); + return err; + } + decoder->streaming = enable; break; + } + case 1: + { + struct tvp514x_reg *int_seq = (struct tvp514x_reg *) + client->driver->id_table->driver_data; - case V4L2_POWER_ON: - /* Enable mux for TVP5146/47 decoder data path */ - if ((decoder->pdata->power_set) && - (decoder->state == STATE_NOT_DETECTED)) { - int i; - struct tvp514x_init_seq *int_seq = - (struct tvp514x_init_seq *) - decoder->id->driver_data; - - err = decoder->pdata->power_set(on); - - /* Power Up Sequence */ - for (i = 0; i < int_seq->no_regs; i++) { - err |= tvp514x_write_reg(decoder->client, - int_seq->init_reg_seq[i].reg, - int_seq->init_reg_seq[i].val); - } - /* Detect the sensor is not already detected */ - err |= tvp514x_detect(decoder); - if (err) { - v4l_err(decoder->client, - "Unable to detect decoder\n"); - return err; - } + /* Power Up Sequence */ + err = tvp514x_write_regs(sd, int_seq); + if (err) { + v4l2_err(sd, "Unable to turn on decoder\n"); + return err; + } + /* Detect if not already detected */ + err = tvp514x_detect(sd, decoder); + if (err) { + v4l2_err(sd, "Unable to detect decoder\n"); + return err; } - err |= tvp514x_configure(decoder); + err = tvp514x_configure(sd, decoder); + if (err) { + v4l2_err(sd, "Unable to configure decoder\n"); + return err; + } + decoder->streaming = enable; break; - + } default: err = -ENODEV; break; @@ -1269,93 +1186,37 @@ static int ioctl_s_power(struct v4l2_int_device *s, enum v4l2_power on) return err; } -/** - * ioctl_init - V4L2 decoder interface handler for VIDIOC_INT_INIT - * @s: pointer to standard V4L2 device structure - * - * Initialize the decoder device (calls tvp514x_configure()) - */ -static int ioctl_init(struct v4l2_int_device *s) -{ - struct tvp514x_decoder *decoder = s->priv; - - /* Set default standard to auto */ - decoder->tvp514x_regs[REG_VIDEO_STD].val = - VIDEO_STD_AUTO_SWITCH_BIT; - - return tvp514x_configure(decoder); -} - -/** - * ioctl_dev_exit - V4L2 decoder interface handler for vidioc_int_dev_exit_num - * @s: pointer to standard V4L2 device structure - * - * Delinitialise the dev. at slave detach. The complement of ioctl_dev_init. - */ -static int ioctl_dev_exit(struct v4l2_int_device *s) -{ - return 0; -} - -/** - * ioctl_dev_init - V4L2 decoder interface handler for vidioc_int_dev_init_num - * @s: pointer to standard V4L2 device structure - * - * Initialise the device when slave attaches to the master. Returns 0 if - * TVP5146/47 device could be found, otherwise returns appropriate error. - */ -static int ioctl_dev_init(struct v4l2_int_device *s) -{ - struct tvp514x_decoder *decoder = s->priv; - int err; - - err = tvp514x_detect(decoder); - if (err < 0) { - v4l_err(decoder->client, - "Unable to detect decoder\n"); - return err; - } - - v4l_info(decoder->client, - "chip version 0x%.2x detected\n", decoder->ver); +static const struct v4l2_subdev_core_ops tvp514x_core_ops = { + .queryctrl = tvp514x_queryctrl, + .g_ctrl = tvp514x_g_ctrl, + .s_ctrl = tvp514x_s_ctrl, + .s_std = tvp514x_s_std, +}; - return 0; -} +static const struct v4l2_subdev_video_ops tvp514x_video_ops = { + .s_routing = tvp514x_s_routing, + .querystd = tvp514x_querystd, + .enum_fmt = tvp514x_enum_fmt_cap, + .g_fmt = tvp514x_g_fmt_cap, + .try_fmt = tvp514x_try_fmt_cap, + .s_fmt = tvp514x_s_fmt_cap, + .g_parm = tvp514x_g_parm, + .s_parm = tvp514x_s_parm, + .s_stream = tvp514x_s_stream, +}; -static struct v4l2_int_ioctl_desc tvp514x_ioctl_desc[] = { - {vidioc_int_dev_init_num, (v4l2_int_ioctl_func*) ioctl_dev_init}, - {vidioc_int_dev_exit_num, (v4l2_int_ioctl_func*) ioctl_dev_exit}, - {vidioc_int_s_power_num, (v4l2_int_ioctl_func*) ioctl_s_power}, - {vidioc_int_g_priv_num, (v4l2_int_ioctl_func*) ioctl_g_priv}, - {vidioc_int_g_ifparm_num, (v4l2_int_ioctl_func*) ioctl_g_ifparm}, - {vidioc_int_init_num, (v4l2_int_ioctl_func*) ioctl_init}, - {vidioc_int_enum_fmt_cap_num, - (v4l2_int_ioctl_func *) ioctl_enum_fmt_cap}, - {vidioc_int_try_fmt_cap_num, - (v4l2_int_ioctl_func *) ioctl_try_fmt_cap}, - {vidioc_int_g_fmt_cap_num, - (v4l2_int_ioctl_func *) ioctl_g_fmt_cap}, - {vidioc_int_s_fmt_cap_num, - (v4l2_int_ioctl_func *) ioctl_s_fmt_cap}, - {vidioc_int_g_parm_num, (v4l2_int_ioctl_func *) ioctl_g_parm}, - {vidioc_int_s_parm_num, (v4l2_int_ioctl_func *) ioctl_s_parm}, - {vidioc_int_queryctrl_num, - (v4l2_int_ioctl_func *) ioctl_queryctrl}, - {vidioc_int_g_ctrl_num, (v4l2_int_ioctl_func *) ioctl_g_ctrl}, - {vidioc_int_s_ctrl_num, (v4l2_int_ioctl_func *) ioctl_s_ctrl}, - {vidioc_int_querystd_num, (v4l2_int_ioctl_func *) ioctl_querystd}, - {vidioc_int_s_std_num, (v4l2_int_ioctl_func *) ioctl_s_std}, - {vidioc_int_s_video_routing_num, - (v4l2_int_ioctl_func *) ioctl_s_routing}, +static const struct v4l2_subdev_ops tvp514x_ops = { + .core = &tvp514x_core_ops, + .video = &tvp514x_video_ops, }; static struct tvp514x_decoder tvp514x_dev = { - .state = STATE_NOT_DETECTED, + .streaming = 0, .fmt_list = tvp514x_fmt_list, .num_fmts = ARRAY_SIZE(tvp514x_fmt_list), - .pix = { /* Default to NTSC 8-bit YUV 422 */ + .pix = {/* Default to NTSC 8-bit YUV 422 */ .width = NTSC_NUM_ACTIVE_PIXELS, .height = NTSC_NUM_ACTIVE_LINES, .pixelformat = V4L2_PIX_FMT_UYVY, @@ -1369,20 +1230,13 @@ static struct tvp514x_decoder tvp514x_dev = { .current_std = STD_NTSC_MJ, .std_list = tvp514x_std_list, .num_stds = ARRAY_SIZE(tvp514x_std_list), - .v4l2_int_device = { - .module = THIS_MODULE, - .name = TVP514X_MODULE_NAME, - .type = v4l2_int_type_slave, - }, - .tvp514x_slave = { - .ioctls = tvp514x_ioctl_desc, - .num_ioctls = ARRAY_SIZE(tvp514x_ioctl_desc), - }, + }; -/** +/* * tvp514x_probe - decoder driver i2c probe handler * @client: i2c driver client device structure + * @id: i2c driver id table * * Register decoder as an i2c client device and V4L2 * device. @@ -1391,82 +1245,71 @@ static int tvp514x_probe(struct i2c_client *client, const struct i2c_device_id *id) { struct tvp514x_decoder *decoder; - int err; + struct v4l2_subdev *sd; /* Check if the adapter supports the needed features */ if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_BYTE_DATA)) return -EIO; + if (!client->dev.platform_data) { + v4l2_err(client, "No platform data!!\n"); + return -ENODEV; + } + decoder = kzalloc(sizeof(*decoder), GFP_KERNEL); if (!decoder) return -ENOMEM; - if (!client->dev.platform_data) { - v4l_err(client, "No platform data!!\n"); - err = -ENODEV; - goto out_free; - } - + /* + * Initialize the tvp514x_decoder with default configuration + */ *decoder = tvp514x_dev; - decoder->v4l2_int_device.priv = decoder; - decoder->pdata = client->dev.platform_data; - decoder->v4l2_int_device.u.slave = &decoder->tvp514x_slave; + /* Copy default register configuration */ memcpy(decoder->tvp514x_regs, tvp514x_reg_list_default, sizeof(tvp514x_reg_list_default)); + + /* + * Copy board specific information here + */ + decoder->pdata = client->dev.platform_data; + /* * Fetch platform specific data, and configure the * tvp514x_reg_list[] accordingly. Since this is one * time configuration, no need to preserve. */ decoder->tvp514x_regs[REG_OUTPUT_FORMATTER2].val |= - (decoder->pdata->clk_polarity << 1); + (decoder->pdata->clk_polarity << 1); decoder->tvp514x_regs[REG_SYNC_CONTROL].val |= - ((decoder->pdata->hs_polarity << 2) | - (decoder->pdata->vs_polarity << 3)); - /* - * Save the id data, required for power up sequence - */ - decoder->id = (struct i2c_device_id *)id; - /* Attach to Master */ - strcpy(decoder->v4l2_int_device.u.slave->attach_to, - decoder->pdata->master); - decoder->client = client; - i2c_set_clientdata(client, decoder); + ((decoder->pdata->hs_polarity << 2) | + (decoder->pdata->vs_polarity << 3)); + /* Set default standard to auto */ + decoder->tvp514x_regs[REG_VIDEO_STD].val = + VIDEO_STD_AUTO_SWITCH_BIT; /* Register with V4L2 layer as slave device */ - err = v4l2_int_device_register(&decoder->v4l2_int_device); - if (err) { - i2c_set_clientdata(client, NULL); - v4l_err(client, - "Unable to register to v4l2. Err[%d]\n", err); - goto out_free; - - } else - v4l_info(client, "Registered to v4l2 master %s!!\n", - decoder->pdata->master); + sd = &decoder->sd; + v4l2_i2c_subdev_init(sd, client, &tvp514x_ops); + + v4l2_info(sd, "%s decoder driver registered !!\n", sd->name); + return 0; -out_free: - kfree(decoder); - return err; } -/** +/* * tvp514x_remove - decoder driver i2c remove handler * @client: i2c driver client device structure * * Unregister decoder as an i2c client device and V4L2 * device. Complement of tvp514x_probe(). */ -static int __exit tvp514x_remove(struct i2c_client *client) +static int tvp514x_remove(struct i2c_client *client) { - struct tvp514x_decoder *decoder = i2c_get_clientdata(client); - - if (!client->adapter) - return -ENODEV; /* our client isn't attached */ + struct v4l2_subdev *sd = i2c_get_clientdata(client); + struct tvp514x_decoder *decoder = to_decoder(sd); - v4l2_int_device_unregister(&decoder->v4l2_int_device); - i2c_set_clientdata(client, NULL); + v4l2_device_unregister_subdev(sd); kfree(decoder); return 0; } @@ -1485,11 +1328,9 @@ static const struct tvp514x_reg tvp5146_init_reg_seq[] = { {TOK_WRITE, REG_VBUS_DATA_ACCESS_NO_VBUS_ADDR_INCR, 0x00}, {TOK_WRITE, REG_OPERATION_MODE, 0x01}, {TOK_WRITE, REG_OPERATION_MODE, 0x00}, + {TOK_TERM, 0, 0}, }; -static const struct tvp514x_init_seq tvp5146_init = { - .no_regs = ARRAY_SIZE(tvp5146_init_reg_seq), - .init_reg_seq = tvp5146_init_reg_seq, -}; + /* * TVP5147 Init/Power on Sequence */ @@ -1512,22 +1353,18 @@ static const struct tvp514x_reg tvp5147_init_reg_seq[] = { {TOK_WRITE, REG_VBUS_DATA_ACCESS_NO_VBUS_ADDR_INCR, 0x00}, {TOK_WRITE, REG_OPERATION_MODE, 0x01}, {TOK_WRITE, REG_OPERATION_MODE, 0x00}, + {TOK_TERM, 0, 0}, }; -static const struct tvp514x_init_seq tvp5147_init = { - .no_regs = ARRAY_SIZE(tvp5147_init_reg_seq), - .init_reg_seq = tvp5147_init_reg_seq, -}; + /* * TVP5146M2/TVP5147M1 Init/Power on Sequence */ static const struct tvp514x_reg tvp514xm_init_reg_seq[] = { {TOK_WRITE, REG_OPERATION_MODE, 0x01}, {TOK_WRITE, REG_OPERATION_MODE, 0x00}, + {TOK_TERM, 0, 0}, }; -static const struct tvp514x_init_seq tvp514xm_init = { - .no_regs = ARRAY_SIZE(tvp514xm_init_reg_seq), - .init_reg_seq = tvp514xm_init_reg_seq, -}; + /* * I2C Device Table - * @@ -1535,48 +1372,34 @@ static const struct tvp514x_init_seq tvp514xm_init = { * driver_data - Driver data */ static const struct i2c_device_id tvp514x_id[] = { - {"tvp5146", (unsigned long)&tvp5146_init}, - {"tvp5146m2", (unsigned long)&tvp514xm_init}, - {"tvp5147", (unsigned long)&tvp5147_init}, - {"tvp5147m1", (unsigned long)&tvp514xm_init}, + {"tvp5146", (unsigned long)tvp5146_init_reg_seq}, + {"tvp5146m2", (unsigned long)tvp514xm_init_reg_seq}, + {"tvp5147", (unsigned long)tvp5147_init_reg_seq}, + {"tvp5147m1", (unsigned long)tvp514xm_init_reg_seq}, {}, }; MODULE_DEVICE_TABLE(i2c, tvp514x_id); -static struct i2c_driver tvp514x_i2c_driver = { +static struct i2c_driver tvp514x_driver = { .driver = { - .name = TVP514X_MODULE_NAME, - .owner = THIS_MODULE, - }, + .owner = THIS_MODULE, + .name = TVP514X_MODULE_NAME, + }, .probe = tvp514x_probe, - .remove = __exit_p(tvp514x_remove), + .remove = tvp514x_remove, .id_table = tvp514x_id, }; -/** - * tvp514x_init - * - * Module init function - */ static int __init tvp514x_init(void) { - return i2c_add_driver(&tvp514x_i2c_driver); + return i2c_add_driver(&tvp514x_driver); } -/** - * tvp514x_cleanup - * - * Module exit function - */ -static void __exit tvp514x_cleanup(void) +static void __exit tvp514x_exit(void) { - i2c_del_driver(&tvp514x_i2c_driver); + i2c_del_driver(&tvp514x_driver); } module_init(tvp514x_init); -module_exit(tvp514x_cleanup); - -MODULE_AUTHOR("Texas Instruments"); -MODULE_DESCRIPTION("TVP514X linux decoder driver"); -MODULE_LICENSE("GPL"); +module_exit(tvp514x_exit); diff --git a/drivers/media/video/tvp514x_regs.h b/drivers/media/video/tvp514x_regs.h index 351620aeecc..18f29ad0dfe 100644 --- a/drivers/media/video/tvp514x_regs.h +++ b/drivers/media/video/tvp514x_regs.h @@ -284,14 +284,4 @@ struct tvp514x_reg { u32 val; }; -/** - * struct tvp514x_init_seq - Structure for TVP5146/47/46M2/47M1 power up - * Sequence. - * @ no_regs - Number of registers to write for power up sequence. - * @ init_reg_seq - Array of registers and respective value to write. - */ -struct tvp514x_init_seq { - unsigned int no_regs; - const struct tvp514x_reg *init_reg_seq; -}; #endif /* ifndef _TVP514X_REGS_H */ diff --git a/include/media/tvp514x.h b/include/media/tvp514x.h index 5e7ee968c6d..74387e83f5b 100644 --- a/include/media/tvp514x.h +++ b/include/media/tvp514x.h @@ -104,10 +104,6 @@ enum tvp514x_output { * @ vs_polarity: VSYNC Polarity configuration for current interface. */ struct tvp514x_platform_data { - char *master; - int (*power_set) (enum v4l2_power on); - int (*ifparm) (struct v4l2_ifparm *p); - int (*priv_data_set) (void *); /* Interface control params */ bool clk_polarity; bool hs_polarity; -- cgit v1.2.3-70-g09d2 From 7da8a6cb3e5b60e73b196f1c71031423e0791032 Mon Sep 17 00:00:00 2001 From: Muralidharan Karicheri Date: Mon, 6 Jul 2009 15:04:12 -0300 Subject: V4L/DVB (12248): v4l: vpfe capture bridge driver for DM355 and DM6446 This the vpfe capture bridge driver for doing video capture on DM355 and DM6446 evms. The ccdc hw modules register with the driver and are used for configuring the CCD Controller for a specific decoder interface. The driver also registers the sub devices required for a specific evm. More than one sub devices can be registered. This allows driver to switch dynamically to capture video from any sub device that is registered. Currently only one sub device (tvp5146) is supported. But in future this driver is expected to do capture from sensor devices such as Micron's MT9T001, MT9T031 and MT9P031 etc. The driver currently supports MMAP based IO. Reviewed by: Laurent Pinchart Reviewed by: Alexey Klimov Signed-off-by: Muralidharan Karicheri Signed-off-by: Hans Verkuil Signed-off-by: Mauro Carvalho Chehab --- drivers/media/video/davinci/vpfe_capture.c | 2124 ++++++++++++++++++++++++++++ include/media/davinci/vpfe_capture.h | 198 +++ include/media/davinci/vpfe_types.h | 51 + 3 files changed, 2373 insertions(+) create mode 100644 drivers/media/video/davinci/vpfe_capture.c create mode 100644 include/media/davinci/vpfe_capture.h create mode 100644 include/media/davinci/vpfe_types.h (limited to 'include') diff --git a/drivers/media/video/davinci/vpfe_capture.c b/drivers/media/video/davinci/vpfe_capture.c new file mode 100644 index 00000000000..402ce43ef38 --- /dev/null +++ b/drivers/media/video/davinci/vpfe_capture.c @@ -0,0 +1,2124 @@ +/* + * Copyright (C) 2008-2009 Texas Instruments Inc + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * + * Driver name : VPFE Capture driver + * VPFE Capture driver allows applications to capture and stream video + * frames on DaVinci SoCs (DM6446, DM355 etc) from a YUV source such as + * TVP5146 or Raw Bayer RGB image data from an image sensor + * such as Microns' MT9T001, MT9T031 etc. + * + * These SoCs have, in common, a Video Processing Subsystem (VPSS) that + * consists of a Video Processing Front End (VPFE) for capturing + * video/raw image data and Video Processing Back End (VPBE) for displaying + * YUV data through an in-built analog encoder or Digital LCD port. This + * driver is for capture through VPFE. A typical EVM using these SoCs have + * following high level configuration. + * + * + * decoder(TVP5146/ YUV/ + * MT9T001) --> Raw Bayer RGB ---> MUX -> VPFE (CCDC/ISIF) + * data input | | + * V | + * SDRAM | + * V + * Image Processor + * | + * V + * SDRAM + * The data flow happens from a decoder connected to the VPFE over a + * YUV embedded (BT.656/BT.1120) or separate sync or raw bayer rgb interface + * and to the input of VPFE through an optional MUX (if more inputs are + * to be interfaced on the EVM). The input data is first passed through + * CCDC (CCD Controller, a.k.a Image Sensor Interface, ISIF). The CCDC + * does very little or no processing on YUV data and does pre-process Raw + * Bayer RGB data through modules such as Defect Pixel Correction (DFC) + * Color Space Conversion (CSC), data gain/offset etc. After this, data + * can be written to SDRAM or can be connected to the image processing + * block such as IPIPE (on DM355 only). + * + * Features supported + * - MMAP IO + * - Capture using TVP5146 over BT.656 + * - support for interfacing decoders using sub device model + * - Work with DM355 or DM6446 CCDC to do Raw Bayer RGB/YUV + * data capture to SDRAM. + * TODO list + * - Support multiple REQBUF after open + * - Support for de-allocating buffers through REQBUF + * - Support for Raw Bayer RGB capture + * - Support for chaining Image Processor + * - Support for static allocation of buffers + * - Support for USERPTR IO + * - Support for STREAMON before QBUF + * - Support for control ioctls + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include "ccdc_hw_device.h" + +static int debug; +static u32 numbuffers = 3; +static u32 bufsize = (720 * 576 * 2); + +module_param(numbuffers, uint, S_IRUGO); +module_param(bufsize, uint, S_IRUGO); +module_param(debug, int, 0644); + +MODULE_PARM_DESC(numbuffers, "buffer count (default:3)"); +MODULE_PARM_DESC(bufsize, "buffer size in bytes (default:720 x 576 x 2)"); +MODULE_PARM_DESC(debug, "Debug level 0-1"); + +MODULE_DESCRIPTION("VPFE Video for Linux Capture Driver"); +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Texas Instruments"); + +/* standard information */ +struct vpfe_standard { + v4l2_std_id std_id; + unsigned int width; + unsigned int height; + struct v4l2_fract pixelaspect; + /* 0 - progressive, 1 - interlaced */ + int frame_format; +}; + +/* ccdc configuration */ +struct ccdc_config { + /* This make sure vpfe is probed and ready to go */ + int vpfe_probed; + /* name of ccdc device */ + char name[32]; + /* for storing mem maps for CCDC */ + int ccdc_addr_size; + void *__iomem ccdc_addr; +}; + +/* data structures */ +static struct vpfe_config_params config_params = { + .min_numbuffers = 3, + .numbuffers = 3, + .min_bufsize = 720 * 480 * 2, + .device_bufsize = 720 * 576 * 2, +}; + +/* ccdc device registered */ +static struct ccdc_hw_device *ccdc_dev; +/* lock for accessing ccdc information */ +static DEFINE_MUTEX(ccdc_lock); +/* ccdc configuration */ +static struct ccdc_config *ccdc_cfg; + +const struct vpfe_standard vpfe_standards[] = { + {V4L2_STD_525_60, 720, 480, {11, 10}, 1}, + {V4L2_STD_625_50, 720, 576, {54, 59}, 1}, +}; + +/* Used when raw Bayer image from ccdc is directly captured to SDRAM */ +static const struct vpfe_pixel_format vpfe_pix_fmts[] = { + { + .fmtdesc = { + .index = 0, + .type = V4L2_BUF_TYPE_VIDEO_CAPTURE, + .description = "Bayer GrRBGb 8bit A-Law compr.", + .pixelformat = V4L2_PIX_FMT_SBGGR8, + }, + .bpp = 1, + }, + { + .fmtdesc = { + .index = 1, + .type = V4L2_BUF_TYPE_VIDEO_CAPTURE, + .description = "Bayer GrRBGb - 16bit", + .pixelformat = V4L2_PIX_FMT_SBGGR16, + }, + .bpp = 2, + }, + { + .fmtdesc = { + .index = 2, + .type = V4L2_BUF_TYPE_VIDEO_CAPTURE, + .description = "Bayer GrRBGb 8bit DPCM compr.", + .pixelformat = V4L2_PIX_FMT_SGRBG10DPCM8, + }, + .bpp = 1, + }, + { + .fmtdesc = { + .index = 3, + .type = V4L2_BUF_TYPE_VIDEO_CAPTURE, + .description = "YCbCr 4:2:2 Interleaved UYVY", + .pixelformat = V4L2_PIX_FMT_UYVY, + }, + .bpp = 2, + }, + { + .fmtdesc = { + .index = 4, + .type = V4L2_BUF_TYPE_VIDEO_CAPTURE, + .description = "YCbCr 4:2:2 Interleaved YUYV", + .pixelformat = V4L2_PIX_FMT_YUYV, + }, + .bpp = 2, + }, + { + .fmtdesc = { + .index = 5, + .type = V4L2_BUF_TYPE_VIDEO_CAPTURE, + .description = "Y/CbCr 4:2:0 - Semi planar", + .pixelformat = V4L2_PIX_FMT_NV12, + }, + .bpp = 1, + }, +}; + +/* + * vpfe_lookup_pix_format() + * lookup an entry in the vpfe pix format table based on pix_format + */ +static const struct vpfe_pixel_format *vpfe_lookup_pix_format(u32 pix_format) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(vpfe_pix_fmts); i++) { + if (pix_format == vpfe_pix_fmts[i].fmtdesc.pixelformat) + return &vpfe_pix_fmts[i]; + } + return NULL; +} + +/* + * vpfe_register_ccdc_device. CCDC module calls this to + * register with vpfe capture + */ +int vpfe_register_ccdc_device(struct ccdc_hw_device *dev) +{ + int ret = 0; + printk(KERN_NOTICE "vpfe_register_ccdc_device: %s\n", dev->name); + + BUG_ON(!dev->hw_ops.open); + BUG_ON(!dev->hw_ops.enable); + BUG_ON(!dev->hw_ops.set_hw_if_params); + BUG_ON(!dev->hw_ops.configure); + BUG_ON(!dev->hw_ops.set_buftype); + BUG_ON(!dev->hw_ops.get_buftype); + BUG_ON(!dev->hw_ops.enum_pix); + BUG_ON(!dev->hw_ops.set_frame_format); + BUG_ON(!dev->hw_ops.get_frame_format); + BUG_ON(!dev->hw_ops.get_pixel_format); + BUG_ON(!dev->hw_ops.set_pixel_format); + BUG_ON(!dev->hw_ops.set_params); + BUG_ON(!dev->hw_ops.set_image_window); + BUG_ON(!dev->hw_ops.get_image_window); + BUG_ON(!dev->hw_ops.get_line_length); + BUG_ON(!dev->hw_ops.setfbaddr); + BUG_ON(!dev->hw_ops.getfid); + + mutex_lock(&ccdc_lock); + if (NULL == ccdc_cfg) { + /* + * TODO. Will this ever happen? if so, we need to fix it. + * Proabably we need to add the request to a linked list and + * walk through it during vpfe probe + */ + printk(KERN_ERR "vpfe capture not initialized\n"); + ret = -1; + goto unlock; + } + + if (strcmp(dev->name, ccdc_cfg->name)) { + /* ignore this ccdc */ + ret = -1; + goto unlock; + } + + if (ccdc_dev) { + printk(KERN_ERR "ccdc already registered\n"); + ret = -1; + goto unlock; + } + + ccdc_dev = dev; + dev->hw_ops.set_ccdc_base(ccdc_cfg->ccdc_addr, + ccdc_cfg->ccdc_addr_size); +unlock: + mutex_unlock(&ccdc_lock); + return ret; +} +EXPORT_SYMBOL(vpfe_register_ccdc_device); + +/* + * vpfe_unregister_ccdc_device. CCDC module calls this to + * unregister with vpfe capture + */ +void vpfe_unregister_ccdc_device(struct ccdc_hw_device *dev) +{ + if (NULL == dev) { + printk(KERN_ERR "invalid ccdc device ptr\n"); + return; + } + + printk(KERN_NOTICE "vpfe_unregister_ccdc_device, dev->name = %s\n", + dev->name); + + if (strcmp(dev->name, ccdc_cfg->name)) { + /* ignore this ccdc */ + return; + } + + mutex_lock(&ccdc_lock); + ccdc_dev = NULL; + mutex_unlock(&ccdc_lock); + return; +} +EXPORT_SYMBOL(vpfe_unregister_ccdc_device); + +/* + * vpfe_get_ccdc_image_format - Get image parameters based on CCDC settings + */ +static int vpfe_get_ccdc_image_format(struct vpfe_device *vpfe_dev, + struct v4l2_format *f) +{ + struct v4l2_rect image_win; + enum ccdc_buftype buf_type; + enum ccdc_frmfmt frm_fmt; + + memset(f, 0, sizeof(*f)); + f->type = V4L2_BUF_TYPE_VIDEO_OUTPUT; + ccdc_dev->hw_ops.get_image_window(&image_win); + f->fmt.pix.width = image_win.width; + f->fmt.pix.height = image_win.height; + f->fmt.pix.bytesperline = ccdc_dev->hw_ops.get_line_length(); + f->fmt.pix.sizeimage = f->fmt.pix.bytesperline * + f->fmt.pix.height; + buf_type = ccdc_dev->hw_ops.get_buftype(); + f->fmt.pix.pixelformat = ccdc_dev->hw_ops.get_pixel_format(); + frm_fmt = ccdc_dev->hw_ops.get_frame_format(); + if (frm_fmt == CCDC_FRMFMT_PROGRESSIVE) + f->fmt.pix.field = V4L2_FIELD_NONE; + else if (frm_fmt == CCDC_FRMFMT_INTERLACED) { + if (buf_type == CCDC_BUFTYPE_FLD_INTERLEAVED) + f->fmt.pix.field = V4L2_FIELD_INTERLACED; + else if (buf_type == CCDC_BUFTYPE_FLD_SEPARATED) + f->fmt.pix.field = V4L2_FIELD_SEQ_TB; + else { + v4l2_err(&vpfe_dev->v4l2_dev, "Invalid buf_type\n"); + return -EINVAL; + } + } else { + v4l2_err(&vpfe_dev->v4l2_dev, "Invalid frm_fmt\n"); + return -EINVAL; + } + return 0; +} + +/* + * vpfe_config_ccdc_image_format() + * For a pix format, configure ccdc to setup the capture + */ +static int vpfe_config_ccdc_image_format(struct vpfe_device *vpfe_dev) +{ + enum ccdc_frmfmt frm_fmt = CCDC_FRMFMT_INTERLACED; + int ret = 0; + + if (ccdc_dev->hw_ops.set_pixel_format( + vpfe_dev->fmt.fmt.pix.pixelformat) < 0) { + v4l2_err(&vpfe_dev->v4l2_dev, + "couldn't set pix format in ccdc\n"); + return -EINVAL; + } + /* configure the image window */ + ccdc_dev->hw_ops.set_image_window(&vpfe_dev->crop); + + switch (vpfe_dev->fmt.fmt.pix.field) { + case V4L2_FIELD_INTERLACED: + /* do nothing, since it is default */ + ret = ccdc_dev->hw_ops.set_buftype( + CCDC_BUFTYPE_FLD_INTERLEAVED); + break; + case V4L2_FIELD_NONE: + frm_fmt = CCDC_FRMFMT_PROGRESSIVE; + /* buffer type only applicable for interlaced scan */ + break; + case V4L2_FIELD_SEQ_TB: + ret = ccdc_dev->hw_ops.set_buftype( + CCDC_BUFTYPE_FLD_SEPARATED); + break; + default: + return -EINVAL; + } + + /* set the frame format */ + if (!ret) + ret = ccdc_dev->hw_ops.set_frame_format(frm_fmt); + return ret; +} +/* + * vpfe_config_image_format() + * For a given standard, this functions sets up the default + * pix format & crop values in the vpfe device and ccdc. It first + * starts with defaults based values from the standard table. + * It then checks if sub device support g_fmt and then override the + * values based on that.Sets crop values to match with scan resolution + * starting at 0,0. It calls vpfe_config_ccdc_image_format() set the + * values in ccdc + */ +static int vpfe_config_image_format(struct vpfe_device *vpfe_dev, + const v4l2_std_id *std_id) +{ + struct vpfe_subdev_info *sdinfo = vpfe_dev->current_subdev; + int i, ret = 0; + + for (i = 0; i < ARRAY_SIZE(vpfe_standards); i++) { + if (vpfe_standards[i].std_id & *std_id) { + vpfe_dev->std_info.active_pixels = + vpfe_standards[i].width; + vpfe_dev->std_info.active_lines = + vpfe_standards[i].height; + vpfe_dev->std_info.frame_format = + vpfe_standards[i].frame_format; + vpfe_dev->std_index = i; + break; + } + } + + if (i == ARRAY_SIZE(vpfe_standards)) { + v4l2_err(&vpfe_dev->v4l2_dev, "standard not supported\n"); + return -EINVAL; + } + + vpfe_dev->crop.top = 0; + vpfe_dev->crop.left = 0; + vpfe_dev->crop.width = vpfe_dev->std_info.active_pixels; + vpfe_dev->crop.height = vpfe_dev->std_info.active_lines; + vpfe_dev->fmt.fmt.pix.width = vpfe_dev->crop.width; + vpfe_dev->fmt.fmt.pix.height = vpfe_dev->crop.height; + + /* first field and frame format based on standard frame format */ + if (vpfe_dev->std_info.frame_format) { + vpfe_dev->fmt.fmt.pix.field = V4L2_FIELD_INTERLACED; + /* assume V4L2_PIX_FMT_UYVY as default */ + vpfe_dev->fmt.fmt.pix.pixelformat = V4L2_PIX_FMT_UYVY; + } else { + vpfe_dev->fmt.fmt.pix.field = V4L2_FIELD_NONE; + /* assume V4L2_PIX_FMT_SBGGR8 */ + vpfe_dev->fmt.fmt.pix.pixelformat = V4L2_PIX_FMT_SBGGR8; + } + + /* if sub device supports g_fmt, override the defaults */ + ret = v4l2_device_call_until_err(&vpfe_dev->v4l2_dev, + sdinfo->grp_id, video, g_fmt, &vpfe_dev->fmt); + + if (ret && ret != -ENOIOCTLCMD) { + v4l2_err(&vpfe_dev->v4l2_dev, + "error in getting g_fmt from sub device\n"); + return ret; + } + + /* Sets the values in CCDC */ + ret = vpfe_config_ccdc_image_format(vpfe_dev); + if (ret) + return ret; + + /* Update the values of sizeimage and bytesperline */ + if (!ret) { + vpfe_dev->fmt.fmt.pix.bytesperline = + ccdc_dev->hw_ops.get_line_length(); + vpfe_dev->fmt.fmt.pix.sizeimage = + vpfe_dev->fmt.fmt.pix.bytesperline * + vpfe_dev->fmt.fmt.pix.height; + } + return ret; +} + +static int vpfe_initialize_device(struct vpfe_device *vpfe_dev) +{ + int ret = 0; + + /* set first input of current subdevice as the current input */ + vpfe_dev->current_input = 0; + + /* set default standard */ + vpfe_dev->std_index = 0; + + /* Configure the default format information */ + ret = vpfe_config_image_format(vpfe_dev, + &vpfe_standards[vpfe_dev->std_index].std_id); + if (ret) + return ret; + + /* now open the ccdc device to initialize it */ + mutex_lock(&ccdc_lock); + if (NULL == ccdc_dev) { + v4l2_err(&vpfe_dev->v4l2_dev, "ccdc device not registered\n"); + ret = -ENODEV; + goto unlock; + } + + if (!try_module_get(ccdc_dev->owner)) { + v4l2_err(&vpfe_dev->v4l2_dev, "Couldn't lock ccdc module\n"); + ret = -ENODEV; + goto unlock; + } + ret = ccdc_dev->hw_ops.open(vpfe_dev->pdev); + if (!ret) + vpfe_dev->initialized = 1; +unlock: + mutex_unlock(&ccdc_lock); + return ret; +} + +/* + * vpfe_open : It creates object of file handle structure and + * stores it in private_data member of filepointer + */ +static int vpfe_open(struct file *file) +{ + struct vpfe_device *vpfe_dev = video_drvdata(file); + struct vpfe_fh *fh; + + v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_open\n"); + + if (!vpfe_dev->cfg->num_subdevs) { + v4l2_err(&vpfe_dev->v4l2_dev, "No decoder registered\n"); + return -ENODEV; + } + + /* Allocate memory for the file handle object */ + fh = kmalloc(sizeof(struct vpfe_fh), GFP_KERNEL); + if (NULL == fh) { + v4l2_err(&vpfe_dev->v4l2_dev, + "unable to allocate memory for file handle object\n"); + return -ENOMEM; + } + /* store pointer to fh in private_data member of file */ + file->private_data = fh; + fh->vpfe_dev = vpfe_dev; + mutex_lock(&vpfe_dev->lock); + /* If decoder is not initialized. initialize it */ + if (!vpfe_dev->initialized) { + if (vpfe_initialize_device(vpfe_dev)) { + mutex_unlock(&vpfe_dev->lock); + return -ENODEV; + } + } + /* Increment device usrs counter */ + vpfe_dev->usrs++; + /* Set io_allowed member to false */ + fh->io_allowed = 0; + /* Initialize priority of this instance to default priority */ + fh->prio = V4L2_PRIORITY_UNSET; + v4l2_prio_open(&vpfe_dev->prio, &fh->prio); + mutex_unlock(&vpfe_dev->lock); + return 0; +} + +static void vpfe_schedule_next_buffer(struct vpfe_device *vpfe_dev) +{ + unsigned long addr; + + vpfe_dev->next_frm = list_entry(vpfe_dev->dma_queue.next, + struct videobuf_buffer, queue); + list_del(&vpfe_dev->next_frm->queue); + vpfe_dev->next_frm->state = VIDEOBUF_ACTIVE; + addr = videobuf_to_dma_contig(vpfe_dev->next_frm); + ccdc_dev->hw_ops.setfbaddr(addr); +} + +static void vpfe_process_buffer_complete(struct vpfe_device *vpfe_dev) +{ + struct timeval timevalue; + + do_gettimeofday(&timevalue); + vpfe_dev->cur_frm->ts = timevalue; + vpfe_dev->cur_frm->state = VIDEOBUF_DONE; + vpfe_dev->cur_frm->size = vpfe_dev->fmt.fmt.pix.sizeimage; + wake_up_interruptible(&vpfe_dev->cur_frm->done); + vpfe_dev->cur_frm = vpfe_dev->next_frm; +} + +/* ISR for VINT0*/ +static irqreturn_t vpfe_isr(int irq, void *dev_id) +{ + struct vpfe_device *vpfe_dev = dev_id; + enum v4l2_field field; + unsigned long addr; + int fid; + + v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "\nStarting vpfe_isr...\n"); + field = vpfe_dev->fmt.fmt.pix.field; + + /* if streaming not started, don't do anything */ + if (!vpfe_dev->started) + return IRQ_HANDLED; + + /* only for 6446 this will be applicable */ + if (NULL != ccdc_dev->hw_ops.reset) + ccdc_dev->hw_ops.reset(); + + if (field == V4L2_FIELD_NONE) { + /* handle progressive frame capture */ + v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, + "frame format is progressive...\n"); + if (vpfe_dev->cur_frm != vpfe_dev->next_frm) + vpfe_process_buffer_complete(vpfe_dev); + return IRQ_HANDLED; + } + + /* interlaced or TB capture check which field we are in hardware */ + fid = ccdc_dev->hw_ops.getfid(); + + /* switch the software maintained field id */ + vpfe_dev->field_id ^= 1; + v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "field id = %x:%x.\n", + fid, vpfe_dev->field_id); + if (fid == vpfe_dev->field_id) { + /* we are in-sync here,continue */ + if (fid == 0) { + /* + * One frame is just being captured. If the next frame + * is available, release the current frame and move on + */ + if (vpfe_dev->cur_frm != vpfe_dev->next_frm) + vpfe_process_buffer_complete(vpfe_dev); + /* + * based on whether the two fields are stored + * interleavely or separately in memory, reconfigure + * the CCDC memory address + */ + if (field == V4L2_FIELD_SEQ_TB) { + addr = + videobuf_to_dma_contig(vpfe_dev->cur_frm); + addr += vpfe_dev->field_off; + ccdc_dev->hw_ops.setfbaddr(addr); + } + return IRQ_HANDLED; + } + /* + * if one field is just being captured configure + * the next frame get the next frame from the empty + * queue if no frame is available hold on to the + * current buffer + */ + spin_lock(&vpfe_dev->dma_queue_lock); + if (!list_empty(&vpfe_dev->dma_queue) && + vpfe_dev->cur_frm == vpfe_dev->next_frm) + vpfe_schedule_next_buffer(vpfe_dev); + spin_unlock(&vpfe_dev->dma_queue_lock); + } else if (fid == 0) { + /* + * out of sync. Recover from any hardware out-of-sync. + * May loose one frame + */ + vpfe_dev->field_id = fid; + } + return IRQ_HANDLED; +} + +/* vdint1_isr - isr handler for VINT1 interrupt */ +static irqreturn_t vdint1_isr(int irq, void *dev_id) +{ + struct vpfe_device *vpfe_dev = dev_id; + + v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "\nInside vdint1_isr...\n"); + + /* if streaming not started, don't do anything */ + if (!vpfe_dev->started) + return IRQ_HANDLED; + + spin_lock(&vpfe_dev->dma_queue_lock); + if ((vpfe_dev->fmt.fmt.pix.field == V4L2_FIELD_NONE) && + !list_empty(&vpfe_dev->dma_queue) && + vpfe_dev->cur_frm == vpfe_dev->next_frm) + vpfe_schedule_next_buffer(vpfe_dev); + spin_unlock(&vpfe_dev->dma_queue_lock); + return IRQ_HANDLED; +} + +static void vpfe_detach_irq(struct vpfe_device *vpfe_dev) +{ + enum ccdc_frmfmt frame_format; + + frame_format = ccdc_dev->hw_ops.get_frame_format(); + if (frame_format == CCDC_FRMFMT_PROGRESSIVE) + free_irq(IRQ_VDINT1, vpfe_dev); +} + +static int vpfe_attach_irq(struct vpfe_device *vpfe_dev) +{ + enum ccdc_frmfmt frame_format; + + frame_format = ccdc_dev->hw_ops.get_frame_format(); + if (frame_format == CCDC_FRMFMT_PROGRESSIVE) { + return request_irq(vpfe_dev->ccdc_irq1, vdint1_isr, + IRQF_DISABLED, "vpfe_capture1", + vpfe_dev); + } + return 0; +} + +/* vpfe_stop_ccdc_capture: stop streaming in ccdc/isif */ +static void vpfe_stop_ccdc_capture(struct vpfe_device *vpfe_dev) +{ + vpfe_dev->started = 0; + ccdc_dev->hw_ops.enable(0); + if (ccdc_dev->hw_ops.enable_out_to_sdram) + ccdc_dev->hw_ops.enable_out_to_sdram(0); +} + +/* + * vpfe_release : This function deletes buffer queue, frees the + * buffers and the vpfe file handle + */ +static int vpfe_release(struct file *file) +{ + struct vpfe_device *vpfe_dev = video_drvdata(file); + struct vpfe_fh *fh = file->private_data; + struct vpfe_subdev_info *sdinfo; + int ret; + + v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_release\n"); + + /* Get the device lock */ + mutex_lock(&vpfe_dev->lock); + /* if this instance is doing IO */ + if (fh->io_allowed) { + if (vpfe_dev->started) { + sdinfo = vpfe_dev->current_subdev; + ret = v4l2_device_call_until_err(&vpfe_dev->v4l2_dev, + sdinfo->grp_id, + video, s_stream, 0); + if (ret && (ret != -ENOIOCTLCMD)) + v4l2_err(&vpfe_dev->v4l2_dev, + "stream off failed in subdev\n"); + vpfe_stop_ccdc_capture(vpfe_dev); + vpfe_detach_irq(vpfe_dev); + videobuf_streamoff(&vpfe_dev->buffer_queue); + } + vpfe_dev->io_usrs = 0; + vpfe_dev->numbuffers = config_params.numbuffers; + } + + /* Decrement device usrs counter */ + vpfe_dev->usrs--; + /* Close the priority */ + v4l2_prio_close(&vpfe_dev->prio, &fh->prio); + /* If this is the last file handle */ + if (!vpfe_dev->usrs) { + vpfe_dev->initialized = 0; + if (ccdc_dev->hw_ops.close) + ccdc_dev->hw_ops.close(vpfe_dev->pdev); + module_put(ccdc_dev->owner); + } + mutex_unlock(&vpfe_dev->lock); + file->private_data = NULL; + /* Free memory allocated to file handle object */ + kfree(fh); + return 0; +} + +/* + * vpfe_mmap : It is used to map kernel space buffers + * into user spaces + */ +static int vpfe_mmap(struct file *file, struct vm_area_struct *vma) +{ + /* Get the device object and file handle object */ + struct vpfe_device *vpfe_dev = video_drvdata(file); + + v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_mmap\n"); + + return videobuf_mmap_mapper(&vpfe_dev->buffer_queue, vma); +} + +/* + * vpfe_poll: It is used for select/poll system call + */ +static unsigned int vpfe_poll(struct file *file, poll_table *wait) +{ + struct vpfe_device *vpfe_dev = video_drvdata(file); + + v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_poll\n"); + + if (vpfe_dev->started) + return videobuf_poll_stream(file, + &vpfe_dev->buffer_queue, wait); + return 0; +} + +/* vpfe capture driver file operations */ +static const struct v4l2_file_operations vpfe_fops = { + .owner = THIS_MODULE, + .open = vpfe_open, + .release = vpfe_release, + .unlocked_ioctl = video_ioctl2, + .mmap = vpfe_mmap, + .poll = vpfe_poll +}; + +/* + * vpfe_check_format() + * This function adjust the input pixel format as per hardware + * capabilities and update the same in pixfmt. + * Following algorithm used :- + * + * If given pixformat is not in the vpfe list of pix formats or not + * supported by the hardware, current value of pixformat in the device + * is used + * If given field is not supported, then current field is used. If field + * is different from current, then it is matched with that from sub device. + * Minimum height is 2 lines for interlaced or tb field and 1 line for + * progressive. Maximum height is clamped to active active lines of scan + * Minimum width is 32 bytes in memory and width is clamped to active + * pixels of scan. + * bytesperline is a multiple of 32. + */ +static const struct vpfe_pixel_format * + vpfe_check_format(struct vpfe_device *vpfe_dev, + struct v4l2_pix_format *pixfmt) +{ + u32 min_height = 1, min_width = 32, max_width, max_height; + const struct vpfe_pixel_format *vpfe_pix_fmt; + u32 pix; + int temp, found; + + vpfe_pix_fmt = vpfe_lookup_pix_format(pixfmt->pixelformat); + if (NULL == vpfe_pix_fmt) { + /* + * use current pixel format in the vpfe device. We + * will find this pix format in the table + */ + pixfmt->pixelformat = vpfe_dev->fmt.fmt.pix.pixelformat; + vpfe_pix_fmt = vpfe_lookup_pix_format(pixfmt->pixelformat); + } + + /* check if hw supports it */ + temp = 0; + found = 0; + while (ccdc_dev->hw_ops.enum_pix(&pix, temp) >= 0) { + if (vpfe_pix_fmt->fmtdesc.pixelformat == pix) { + found = 1; + break; + } + temp++; + } + + if (!found) { + /* use current pixel format */ + pixfmt->pixelformat = vpfe_dev->fmt.fmt.pix.pixelformat; + /* + * Since this is currently used in the vpfe device, we + * will find this pix format in the table + */ + vpfe_pix_fmt = vpfe_lookup_pix_format(pixfmt->pixelformat); + } + + /* check what field format is supported */ + if (pixfmt->field == V4L2_FIELD_ANY) { + /* if field is any, use current value as default */ + pixfmt->field = vpfe_dev->fmt.fmt.pix.field; + } + + /* + * if field is not same as current field in the vpfe device + * try matching the field with the sub device field + */ + if (vpfe_dev->fmt.fmt.pix.field != pixfmt->field) { + /* + * If field value is not in the supported fields, use current + * field used in the device as default + */ + switch (pixfmt->field) { + case V4L2_FIELD_INTERLACED: + case V4L2_FIELD_SEQ_TB: + /* if sub device is supporting progressive, use that */ + if (!vpfe_dev->std_info.frame_format) + pixfmt->field = V4L2_FIELD_NONE; + break; + case V4L2_FIELD_NONE: + if (vpfe_dev->std_info.frame_format) + pixfmt->field = V4L2_FIELD_INTERLACED; + break; + + default: + /* use current field as default */ + pixfmt->field = vpfe_dev->fmt.fmt.pix.field; + break; + } + } + + /* Now adjust image resolutions supported */ + if (pixfmt->field == V4L2_FIELD_INTERLACED || + pixfmt->field == V4L2_FIELD_SEQ_TB) + min_height = 2; + + max_width = vpfe_dev->std_info.active_pixels; + max_height = vpfe_dev->std_info.active_lines; + min_width /= vpfe_pix_fmt->bpp; + + v4l2_info(&vpfe_dev->v4l2_dev, "width = %d, height = %d, bpp = %d\n", + pixfmt->width, pixfmt->height, vpfe_pix_fmt->bpp); + + pixfmt->width = clamp((pixfmt->width), min_width, max_width); + pixfmt->height = clamp((pixfmt->height), min_height, max_height); + + /* If interlaced, adjust height to be a multiple of 2 */ + if (pixfmt->field == V4L2_FIELD_INTERLACED) + pixfmt->height &= (~1); + /* + * recalculate bytesperline and sizeimage since width + * and height might have changed + */ + pixfmt->bytesperline = (((pixfmt->width * vpfe_pix_fmt->bpp) + 31) + & ~31); + if (pixfmt->pixelformat == V4L2_PIX_FMT_NV12) + pixfmt->sizeimage = + pixfmt->bytesperline * pixfmt->height + + ((pixfmt->bytesperline * pixfmt->height) >> 1); + else + pixfmt->sizeimage = pixfmt->bytesperline * pixfmt->height; + + v4l2_info(&vpfe_dev->v4l2_dev, "adjusted width = %d, height =" + " %d, bpp = %d, bytesperline = %d, sizeimage = %d\n", + pixfmt->width, pixfmt->height, vpfe_pix_fmt->bpp, + pixfmt->bytesperline, pixfmt->sizeimage); + return vpfe_pix_fmt; +} + +static int vpfe_querycap(struct file *file, void *priv, + struct v4l2_capability *cap) +{ + struct vpfe_device *vpfe_dev = video_drvdata(file); + + v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_querycap\n"); + + cap->version = VPFE_CAPTURE_VERSION_CODE; + cap->capabilities = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING; + strlcpy(cap->driver, CAPTURE_DRV_NAME, sizeof(cap->driver)); + strlcpy(cap->bus_info, "VPFE", sizeof(cap->bus_info)); + strlcpy(cap->card, vpfe_dev->cfg->card_name, sizeof(cap->card)); + return 0; +} + +static int vpfe_g_fmt_vid_cap(struct file *file, void *priv, + struct v4l2_format *fmt) +{ + struct vpfe_device *vpfe_dev = video_drvdata(file); + int ret = 0; + + v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_g_fmt_vid_cap\n"); + /* Fill in the information about format */ + *fmt = vpfe_dev->fmt; + return ret; +} + +static int vpfe_enum_fmt_vid_cap(struct file *file, void *priv, + struct v4l2_fmtdesc *fmt) +{ + struct vpfe_device *vpfe_dev = video_drvdata(file); + const struct vpfe_pixel_format *pix_fmt; + int temp_index; + u32 pix; + + v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_enum_fmt_vid_cap\n"); + + if (ccdc_dev->hw_ops.enum_pix(&pix, fmt->index) < 0) + return -EINVAL; + + /* Fill in the information about format */ + pix_fmt = vpfe_lookup_pix_format(pix); + if (NULL != pix_fmt) { + temp_index = fmt->index; + *fmt = pix_fmt->fmtdesc; + fmt->index = temp_index; + return 0; + } + return -EINVAL; +} + +static int vpfe_s_fmt_vid_cap(struct file *file, void *priv, + struct v4l2_format *fmt) +{ + struct vpfe_device *vpfe_dev = video_drvdata(file); + const struct vpfe_pixel_format *pix_fmts; + int ret = 0; + + v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_s_fmt_vid_cap\n"); + + /* If streaming is started, return error */ + if (vpfe_dev->started) { + v4l2_err(&vpfe_dev->v4l2_dev, "Streaming is started\n"); + return -EBUSY; + } + + /* Check for valid frame format */ + pix_fmts = vpfe_check_format(vpfe_dev, &fmt->fmt.pix); + + if (NULL == pix_fmts) + return -EINVAL; + + /* store the pixel format in the device object */ + ret = mutex_lock_interruptible(&vpfe_dev->lock); + if (ret) + return ret; + + /* First detach any IRQ if currently attached */ + vpfe_detach_irq(vpfe_dev); + vpfe_dev->fmt = *fmt; + /* set image capture parameters in the ccdc */ + ret = vpfe_config_ccdc_image_format(vpfe_dev); + mutex_unlock(&vpfe_dev->lock); + return ret; +} + +static int vpfe_try_fmt_vid_cap(struct file *file, void *priv, + struct v4l2_format *f) +{ + struct vpfe_device *vpfe_dev = video_drvdata(file); + const struct vpfe_pixel_format *pix_fmts; + + v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_try_fmt_vid_cap\n"); + + pix_fmts = vpfe_check_format(vpfe_dev, &f->fmt.pix); + if (NULL == pix_fmts) + return -EINVAL; + return 0; +} + +/* + * vpfe_get_subdev_input_index - Get subdev index and subdev input index for a + * given app input index + */ +static int vpfe_get_subdev_input_index(struct vpfe_device *vpfe_dev, + int *subdev_index, + int *subdev_input_index, + int app_input_index) +{ + struct vpfe_config *cfg = vpfe_dev->cfg; + struct vpfe_subdev_info *sdinfo; + int i, j = 0; + + for (i = 0; i < cfg->num_subdevs; i++) { + sdinfo = &cfg->sub_devs[i]; + if (app_input_index < (j + sdinfo->num_inputs)) { + *subdev_index = i; + *subdev_input_index = app_input_index - j; + return 0; + } + j += sdinfo->num_inputs; + } + return -EINVAL; +} + +/* + * vpfe_get_app_input - Get app input index for a given subdev input index + * driver stores the input index of the current sub device and translate it + * when application request the current input + */ +static int vpfe_get_app_input_index(struct vpfe_device *vpfe_dev, + int *app_input_index) +{ + struct vpfe_config *cfg = vpfe_dev->cfg; + struct vpfe_subdev_info *sdinfo; + int i, j = 0; + + for (i = 0; i < cfg->num_subdevs; i++) { + sdinfo = &cfg->sub_devs[i]; + if (!strcmp(sdinfo->name, vpfe_dev->current_subdev->name)) { + if (vpfe_dev->current_input >= sdinfo->num_inputs) + return -1; + *app_input_index = j + vpfe_dev->current_input; + return 0; + } + j += sdinfo->num_inputs; + } + return -EINVAL; +} + +static int vpfe_enum_input(struct file *file, void *priv, + struct v4l2_input *inp) +{ + struct vpfe_device *vpfe_dev = video_drvdata(file); + struct vpfe_subdev_info *sdinfo; + int subdev, index ; + + v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_enum_input\n"); + + if (vpfe_get_subdev_input_index(vpfe_dev, + &subdev, + &index, + inp->index) < 0) { + v4l2_err(&vpfe_dev->v4l2_dev, "input information not found" + " for the subdev\n"); + return -EINVAL; + } + sdinfo = &vpfe_dev->cfg->sub_devs[subdev]; + memcpy(inp, &sdinfo->inputs[index], sizeof(struct v4l2_input)); + return 0; +} + +static int vpfe_g_input(struct file *file, void *priv, unsigned int *index) +{ + struct vpfe_device *vpfe_dev = video_drvdata(file); + + v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_g_input\n"); + + return vpfe_get_app_input_index(vpfe_dev, index); +} + + +static int vpfe_s_input(struct file *file, void *priv, unsigned int index) +{ + struct vpfe_device *vpfe_dev = video_drvdata(file); + struct vpfe_subdev_info *sdinfo; + int subdev_index, inp_index; + struct vpfe_route *route; + u32 input = 0, output = 0; + int ret = -EINVAL; + + v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_s_input\n"); + + ret = mutex_lock_interruptible(&vpfe_dev->lock); + if (ret) + return ret; + + /* + * If streaming is started return device busy + * error + */ + if (vpfe_dev->started) { + v4l2_err(&vpfe_dev->v4l2_dev, "Streaming is on\n"); + ret = -EBUSY; + goto unlock_out; + } + + if (vpfe_get_subdev_input_index(vpfe_dev, + &subdev_index, + &inp_index, + index) < 0) { + v4l2_err(&vpfe_dev->v4l2_dev, "invalid input index\n"); + goto unlock_out; + } + + sdinfo = &vpfe_dev->cfg->sub_devs[subdev_index]; + route = &sdinfo->routes[inp_index]; + if (route && sdinfo->can_route) { + input = route->input; + output = route->output; + } + + ret = v4l2_device_call_until_err(&vpfe_dev->v4l2_dev, sdinfo->grp_id, + video, s_routing, input, output, 0); + + if (ret) { + v4l2_err(&vpfe_dev->v4l2_dev, + "vpfe_doioctl:error in setting input in decoder\n"); + ret = -EINVAL; + goto unlock_out; + } + vpfe_dev->current_subdev = sdinfo; + vpfe_dev->current_input = index; + vpfe_dev->std_index = 0; + + /* set the bus/interface parameter for the sub device in ccdc */ + ret = ccdc_dev->hw_ops.set_hw_if_params(&sdinfo->ccdc_if_params); + if (ret) + goto unlock_out; + + /* set the default image parameters in the device */ + ret = vpfe_config_image_format(vpfe_dev, + &vpfe_standards[vpfe_dev->std_index].std_id); +unlock_out: + mutex_unlock(&vpfe_dev->lock); + return ret; +} + +static int vpfe_querystd(struct file *file, void *priv, v4l2_std_id *std_id) +{ + struct vpfe_device *vpfe_dev = video_drvdata(file); + struct vpfe_subdev_info *sdinfo; + int ret = 0; + + v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_querystd\n"); + + ret = mutex_lock_interruptible(&vpfe_dev->lock); + sdinfo = vpfe_dev->current_subdev; + if (ret) + return ret; + /* Call querystd function of decoder device */ + ret = v4l2_device_call_until_err(&vpfe_dev->v4l2_dev, sdinfo->grp_id, + video, querystd, std_id); + mutex_unlock(&vpfe_dev->lock); + return ret; +} + +static int vpfe_s_std(struct file *file, void *priv, v4l2_std_id *std_id) +{ + struct vpfe_device *vpfe_dev = video_drvdata(file); + struct vpfe_subdev_info *sdinfo; + int ret = 0; + + v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_s_std\n"); + + /* Call decoder driver function to set the standard */ + ret = mutex_lock_interruptible(&vpfe_dev->lock); + if (ret) + return ret; + + sdinfo = vpfe_dev->current_subdev; + /* If streaming is started, return device busy error */ + if (vpfe_dev->started) { + v4l2_err(&vpfe_dev->v4l2_dev, "streaming is started\n"); + ret = -EBUSY; + goto unlock_out; + } + + ret = v4l2_device_call_until_err(&vpfe_dev->v4l2_dev, sdinfo->grp_id, + core, s_std, *std_id); + if (ret < 0) { + v4l2_err(&vpfe_dev->v4l2_dev, "Failed to set standard\n"); + goto unlock_out; + } + ret = vpfe_config_image_format(vpfe_dev, std_id); + +unlock_out: + mutex_unlock(&vpfe_dev->lock); + return ret; +} + +static int vpfe_g_std(struct file *file, void *priv, v4l2_std_id *std_id) +{ + struct vpfe_device *vpfe_dev = video_drvdata(file); + + v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_g_std\n"); + + *std_id = vpfe_standards[vpfe_dev->std_index].std_id; + return 0; +} +/* + * Videobuf operations + */ +static int vpfe_videobuf_setup(struct videobuf_queue *vq, + unsigned int *count, + unsigned int *size) +{ + struct vpfe_fh *fh = vq->priv_data; + struct vpfe_device *vpfe_dev = fh->vpfe_dev; + + v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_buffer_setup\n"); + *size = config_params.device_bufsize; + + if (*count < config_params.min_numbuffers) + *count = config_params.min_numbuffers; + v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, + "count=%d, size=%d\n", *count, *size); + return 0; +} + +static int vpfe_videobuf_prepare(struct videobuf_queue *vq, + struct videobuf_buffer *vb, + enum v4l2_field field) +{ + struct vpfe_fh *fh = vq->priv_data; + struct vpfe_device *vpfe_dev = fh->vpfe_dev; + + v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_buffer_prepare\n"); + + /* If buffer is not initialized, initialize it */ + if (VIDEOBUF_NEEDS_INIT == vb->state) { + vb->width = vpfe_dev->fmt.fmt.pix.width; + vb->height = vpfe_dev->fmt.fmt.pix.height; + vb->size = vpfe_dev->fmt.fmt.pix.sizeimage; + vb->field = field; + } + vb->state = VIDEOBUF_PREPARED; + return 0; +} + +static void vpfe_videobuf_queue(struct videobuf_queue *vq, + struct videobuf_buffer *vb) +{ + /* Get the file handle object and device object */ + struct vpfe_fh *fh = vq->priv_data; + struct vpfe_device *vpfe_dev = fh->vpfe_dev; + unsigned long flags; + + v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_buffer_queue\n"); + + /* add the buffer to the DMA queue */ + spin_lock_irqsave(&vpfe_dev->dma_queue_lock, flags); + list_add_tail(&vb->queue, &vpfe_dev->dma_queue); + spin_unlock_irqrestore(&vpfe_dev->dma_queue_lock, flags); + + /* Change state of the buffer */ + vb->state = VIDEOBUF_QUEUED; +} + +static void vpfe_videobuf_release(struct videobuf_queue *vq, + struct videobuf_buffer *vb) +{ + struct vpfe_fh *fh = vq->priv_data; + struct vpfe_device *vpfe_dev = fh->vpfe_dev; + unsigned long flags; + + v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_videobuf_release\n"); + + /* + * We need to flush the buffer from the dma queue since + * they are de-allocated + */ + spin_lock_irqsave(&vpfe_dev->dma_queue_lock, flags); + INIT_LIST_HEAD(&vpfe_dev->dma_queue); + spin_unlock_irqrestore(&vpfe_dev->dma_queue_lock, flags); + videobuf_dma_contig_free(vq, vb); + vb->state = VIDEOBUF_NEEDS_INIT; +} + +static struct videobuf_queue_ops vpfe_videobuf_qops = { + .buf_setup = vpfe_videobuf_setup, + .buf_prepare = vpfe_videobuf_prepare, + .buf_queue = vpfe_videobuf_queue, + .buf_release = vpfe_videobuf_release, +}; + +/* + * vpfe_reqbufs. currently support REQBUF only once opening + * the device. + */ +static int vpfe_reqbufs(struct file *file, void *priv, + struct v4l2_requestbuffers *req_buf) +{ + struct vpfe_device *vpfe_dev = video_drvdata(file); + struct vpfe_fh *fh = file->private_data; + int ret = 0; + + v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_reqbufs\n"); + + if (V4L2_BUF_TYPE_VIDEO_CAPTURE != req_buf->type) { + v4l2_err(&vpfe_dev->v4l2_dev, "Invalid buffer type\n"); + return -EINVAL; + } + + if (V4L2_MEMORY_USERPTR == req_buf->memory) { + /* we don't support user ptr IO */ + v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_reqbufs:" + " USERPTR IO not supported\n"); + return -EINVAL; + } + + ret = mutex_lock_interruptible(&vpfe_dev->lock); + if (ret) + return ret; + + if (vpfe_dev->io_usrs != 0) { + v4l2_err(&vpfe_dev->v4l2_dev, "Only one IO user allowed\n"); + ret = -EBUSY; + goto unlock_out; + } + + vpfe_dev->memory = req_buf->memory; + videobuf_queue_dma_contig_init(&vpfe_dev->buffer_queue, + &vpfe_videobuf_qops, + NULL, + &vpfe_dev->irqlock, + req_buf->type, + vpfe_dev->fmt.fmt.pix.field, + sizeof(struct videobuf_buffer), + fh); + + fh->io_allowed = 1; + vpfe_dev->io_usrs = 1; + INIT_LIST_HEAD(&vpfe_dev->dma_queue); + ret = videobuf_reqbufs(&vpfe_dev->buffer_queue, req_buf); +unlock_out: + mutex_unlock(&vpfe_dev->lock); + return ret; +} + +static int vpfe_querybuf(struct file *file, void *priv, + struct v4l2_buffer *buf) +{ + struct vpfe_device *vpfe_dev = video_drvdata(file); + + v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_querybuf\n"); + + if (V4L2_BUF_TYPE_VIDEO_CAPTURE != buf->type) { + v4l2_err(&vpfe_dev->v4l2_dev, "Invalid buf type\n"); + return -EINVAL; + } + + if (vpfe_dev->memory != V4L2_MEMORY_MMAP) { + v4l2_err(&vpfe_dev->v4l2_dev, "Invalid memory\n"); + return -EINVAL; + } + /* Call videobuf_querybuf to get information */ + return videobuf_querybuf(&vpfe_dev->buffer_queue, buf); +} + +static int vpfe_qbuf(struct file *file, void *priv, + struct v4l2_buffer *p) +{ + struct vpfe_device *vpfe_dev = video_drvdata(file); + struct vpfe_fh *fh = file->private_data; + + v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_qbuf\n"); + + if (V4L2_BUF_TYPE_VIDEO_CAPTURE != p->type) { + v4l2_err(&vpfe_dev->v4l2_dev, "Invalid buf type\n"); + return -EINVAL; + } + + /* + * If this file handle is not allowed to do IO, + * return error + */ + if (!fh->io_allowed) { + v4l2_err(&vpfe_dev->v4l2_dev, "fh->io_allowed\n"); + return -EACCES; + } + return videobuf_qbuf(&vpfe_dev->buffer_queue, p); +} + +static int vpfe_dqbuf(struct file *file, void *priv, + struct v4l2_buffer *buf) +{ + struct vpfe_device *vpfe_dev = video_drvdata(file); + + v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_dqbuf\n"); + + if (V4L2_BUF_TYPE_VIDEO_CAPTURE != buf->type) { + v4l2_err(&vpfe_dev->v4l2_dev, "Invalid buf type\n"); + return -EINVAL; + } + return videobuf_dqbuf(&vpfe_dev->buffer_queue, + buf, file->f_flags & O_NONBLOCK); +} + +/* + * vpfe_calculate_offsets : This function calculates buffers offset + * for top and bottom field + */ +static void vpfe_calculate_offsets(struct vpfe_device *vpfe_dev) +{ + struct v4l2_rect image_win; + + v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_calculate_offsets\n"); + + ccdc_dev->hw_ops.get_image_window(&image_win); + vpfe_dev->field_off = image_win.height * image_win.width; +} + +/* vpfe_start_ccdc_capture: start streaming in ccdc/isif */ +static void vpfe_start_ccdc_capture(struct vpfe_device *vpfe_dev) +{ + ccdc_dev->hw_ops.enable(1); + if (ccdc_dev->hw_ops.enable_out_to_sdram) + ccdc_dev->hw_ops.enable_out_to_sdram(1); + vpfe_dev->started = 1; +} + +/* + * vpfe_streamon. Assume the DMA queue is not empty. + * application is expected to call QBUF before calling + * this ioctl. If not, driver returns error + */ +static int vpfe_streamon(struct file *file, void *priv, + enum v4l2_buf_type buf_type) +{ + struct vpfe_device *vpfe_dev = video_drvdata(file); + struct vpfe_fh *fh = file->private_data; + struct vpfe_subdev_info *sdinfo; + unsigned long addr; + int ret = 0; + + v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_streamon\n"); + + if (V4L2_BUF_TYPE_VIDEO_CAPTURE != buf_type) { + v4l2_err(&vpfe_dev->v4l2_dev, "Invalid buf type\n"); + return -EINVAL; + } + + /* If file handle is not allowed IO, return error */ + if (!fh->io_allowed) { + v4l2_err(&vpfe_dev->v4l2_dev, "fh->io_allowed\n"); + return -EACCES; + } + + sdinfo = vpfe_dev->current_subdev; + ret = v4l2_device_call_until_err(&vpfe_dev->v4l2_dev, sdinfo->grp_id, + video, s_stream, 1); + + if (ret && (ret != -ENOIOCTLCMD)) { + v4l2_err(&vpfe_dev->v4l2_dev, "stream on failed in subdev\n"); + return -EINVAL; + } + + /* If buffer queue is empty, return error */ + if (list_empty(&vpfe_dev->buffer_queue.stream)) { + v4l2_err(&vpfe_dev->v4l2_dev, "buffer queue is empty\n"); + return -EIO; + } + + /* Call videobuf_streamon to start streaming * in videobuf */ + ret = videobuf_streamon(&vpfe_dev->buffer_queue); + if (ret) + return ret; + + + ret = mutex_lock_interruptible(&vpfe_dev->lock); + if (ret) + goto streamoff; + /* Get the next frame from the buffer queue */ + vpfe_dev->next_frm = list_entry(vpfe_dev->dma_queue.next, + struct videobuf_buffer, queue); + vpfe_dev->cur_frm = vpfe_dev->next_frm; + /* Remove buffer from the buffer queue */ + list_del(&vpfe_dev->cur_frm->queue); + /* Mark state of the current frame to active */ + vpfe_dev->cur_frm->state = VIDEOBUF_ACTIVE; + /* Initialize field_id and started member */ + vpfe_dev->field_id = 0; + addr = videobuf_to_dma_contig(vpfe_dev->cur_frm); + + /* Calculate field offset */ + vpfe_calculate_offsets(vpfe_dev); + + if (vpfe_attach_irq(vpfe_dev) < 0) { + v4l2_err(&vpfe_dev->v4l2_dev, + "Error in attaching interrupt handle\n"); + ret = -EFAULT; + goto unlock_out; + } + if (ccdc_dev->hw_ops.configure() < 0) { + v4l2_err(&vpfe_dev->v4l2_dev, + "Error in configuring ccdc\n"); + ret = -EINVAL; + goto unlock_out; + } + ccdc_dev->hw_ops.setfbaddr((unsigned long)(addr)); + vpfe_start_ccdc_capture(vpfe_dev); + mutex_unlock(&vpfe_dev->lock); + return ret; +unlock_out: + mutex_unlock(&vpfe_dev->lock); +streamoff: + ret = videobuf_streamoff(&vpfe_dev->buffer_queue); + return ret; +} + +static int vpfe_streamoff(struct file *file, void *priv, + enum v4l2_buf_type buf_type) +{ + struct vpfe_device *vpfe_dev = video_drvdata(file); + struct vpfe_fh *fh = file->private_data; + struct vpfe_subdev_info *sdinfo; + int ret = 0; + + v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_streamoff\n"); + + if (V4L2_BUF_TYPE_VIDEO_CAPTURE != buf_type) { + v4l2_err(&vpfe_dev->v4l2_dev, "Invalid buf type\n"); + return -EINVAL; + } + + /* If io is allowed for this file handle, return error */ + if (!fh->io_allowed) { + v4l2_err(&vpfe_dev->v4l2_dev, "fh->io_allowed\n"); + return -EACCES; + } + + /* If streaming is not started, return error */ + if (!vpfe_dev->started) { + v4l2_err(&vpfe_dev->v4l2_dev, "device started\n"); + return -EINVAL; + } + + ret = mutex_lock_interruptible(&vpfe_dev->lock); + if (ret) + return ret; + + vpfe_stop_ccdc_capture(vpfe_dev); + vpfe_detach_irq(vpfe_dev); + + sdinfo = vpfe_dev->current_subdev; + ret = v4l2_device_call_until_err(&vpfe_dev->v4l2_dev, sdinfo->grp_id, + video, s_stream, 0); + + if (ret && (ret != -ENOIOCTLCMD)) + v4l2_err(&vpfe_dev->v4l2_dev, "stream off failed in subdev\n"); + ret = videobuf_streamoff(&vpfe_dev->buffer_queue); + mutex_unlock(&vpfe_dev->lock); + return ret; +} + +static int vpfe_cropcap(struct file *file, void *priv, + struct v4l2_cropcap *crop) +{ + struct vpfe_device *vpfe_dev = video_drvdata(file); + + v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_cropcap\n"); + + if (vpfe_dev->std_index > ARRAY_SIZE(vpfe_standards)) + return -EINVAL; + + memset(crop, 0, sizeof(struct v4l2_cropcap)); + crop->type = V4L2_BUF_TYPE_VIDEO_CAPTURE; + crop->bounds.width = crop->defrect.width = + vpfe_standards[vpfe_dev->std_index].width; + crop->bounds.height = crop->defrect.height = + vpfe_standards[vpfe_dev->std_index].height; + crop->pixelaspect = vpfe_standards[vpfe_dev->std_index].pixelaspect; + return 0; +} + +static int vpfe_g_crop(struct file *file, void *priv, + struct v4l2_crop *crop) +{ + struct vpfe_device *vpfe_dev = video_drvdata(file); + + v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_g_crop\n"); + + crop->c = vpfe_dev->crop; + return 0; +} + +static int vpfe_s_crop(struct file *file, void *priv, + struct v4l2_crop *crop) +{ + struct vpfe_device *vpfe_dev = video_drvdata(file); + int ret = 0; + + v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_s_crop\n"); + + if (vpfe_dev->started) { + /* make sure streaming is not started */ + v4l2_err(&vpfe_dev->v4l2_dev, + "Cannot change crop when streaming is ON\n"); + return -EBUSY; + } + + ret = mutex_lock_interruptible(&vpfe_dev->lock); + if (ret) + return ret; + + if (crop->c.top < 0 || crop->c.left < 0) { + v4l2_err(&vpfe_dev->v4l2_dev, + "doesn't support negative values for top & left\n"); + ret = -EINVAL; + goto unlock_out; + } + + /* adjust the width to 16 pixel boundry */ + crop->c.width = ((crop->c.width + 15) & ~0xf); + + /* make sure parameters are valid */ + if ((crop->c.left + crop->c.width > + vpfe_dev->std_info.active_pixels) || + (crop->c.top + crop->c.height > + vpfe_dev->std_info.active_lines)) { + v4l2_err(&vpfe_dev->v4l2_dev, "Error in S_CROP params\n"); + ret = -EINVAL; + goto unlock_out; + } + ccdc_dev->hw_ops.set_image_window(&crop->c); + vpfe_dev->fmt.fmt.pix.width = crop->c.width; + vpfe_dev->fmt.fmt.pix.height = crop->c.height; + vpfe_dev->fmt.fmt.pix.bytesperline = + ccdc_dev->hw_ops.get_line_length(); + vpfe_dev->fmt.fmt.pix.sizeimage = + vpfe_dev->fmt.fmt.pix.bytesperline * + vpfe_dev->fmt.fmt.pix.height; + vpfe_dev->crop = crop->c; +unlock_out: + mutex_unlock(&vpfe_dev->lock); + return ret; +} + + +static long vpfe_param_handler(struct file *file, void *priv, + int cmd, void *param) +{ + struct vpfe_device *vpfe_dev = video_drvdata(file); + int ret = 0; + + v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_param_handler\n"); + + if (vpfe_dev->started) { + /* only allowed if streaming is not started */ + v4l2_err(&vpfe_dev->v4l2_dev, "device already started\n"); + return -EBUSY; + } + + ret = mutex_lock_interruptible(&vpfe_dev->lock); + if (ret) + return ret; + + switch (cmd) { + case VPFE_CMD_S_CCDC_RAW_PARAMS: + v4l2_warn(&vpfe_dev->v4l2_dev, + "VPFE_CMD_S_CCDC_RAW_PARAMS: experimental ioctl\n"); + ret = ccdc_dev->hw_ops.set_params(param); + if (ret) { + v4l2_err(&vpfe_dev->v4l2_dev, + "Error in setting parameters in CCDC\n"); + goto unlock_out; + } + if (vpfe_get_ccdc_image_format(vpfe_dev, &vpfe_dev->fmt) < 0) { + v4l2_err(&vpfe_dev->v4l2_dev, + "Invalid image format at CCDC\n"); + goto unlock_out; + } + break; + default: + ret = -EINVAL; + } +unlock_out: + mutex_unlock(&vpfe_dev->lock); + return ret; +} + + +/* vpfe capture ioctl operations */ +static const struct v4l2_ioctl_ops vpfe_ioctl_ops = { + .vidioc_querycap = vpfe_querycap, + .vidioc_g_fmt_vid_cap = vpfe_g_fmt_vid_cap, + .vidioc_enum_fmt_vid_cap = vpfe_enum_fmt_vid_cap, + .vidioc_s_fmt_vid_cap = vpfe_s_fmt_vid_cap, + .vidioc_try_fmt_vid_cap = vpfe_try_fmt_vid_cap, + .vidioc_enum_input = vpfe_enum_input, + .vidioc_g_input = vpfe_g_input, + .vidioc_s_input = vpfe_s_input, + .vidioc_querystd = vpfe_querystd, + .vidioc_s_std = vpfe_s_std, + .vidioc_g_std = vpfe_g_std, + .vidioc_reqbufs = vpfe_reqbufs, + .vidioc_querybuf = vpfe_querybuf, + .vidioc_qbuf = vpfe_qbuf, + .vidioc_dqbuf = vpfe_dqbuf, + .vidioc_streamon = vpfe_streamon, + .vidioc_streamoff = vpfe_streamoff, + .vidioc_cropcap = vpfe_cropcap, + .vidioc_g_crop = vpfe_g_crop, + .vidioc_s_crop = vpfe_s_crop, + .vidioc_default = vpfe_param_handler, +}; + +static struct vpfe_device *vpfe_initialize(void) +{ + struct vpfe_device *vpfe_dev; + + /* Default number of buffers should be 3 */ + if ((numbuffers > 0) && + (numbuffers < config_params.min_numbuffers)) + numbuffers = config_params.min_numbuffers; + + /* + * Set buffer size to min buffers size if invalid buffer size is + * given + */ + if (bufsize < config_params.min_bufsize) + bufsize = config_params.min_bufsize; + + config_params.numbuffers = numbuffers; + + if (numbuffers) + config_params.device_bufsize = bufsize; + + /* Allocate memory for device objects */ + vpfe_dev = kzalloc(sizeof(*vpfe_dev), GFP_KERNEL); + + return vpfe_dev; +} + +static void vpfe_disable_clock(struct vpfe_device *vpfe_dev) +{ + struct vpfe_config *vpfe_cfg = vpfe_dev->cfg; + + clk_disable(vpfe_cfg->vpssclk); + clk_put(vpfe_cfg->vpssclk); + clk_disable(vpfe_cfg->slaveclk); + clk_put(vpfe_cfg->slaveclk); + v4l2_info(vpfe_dev->pdev->driver, + "vpfe vpss master & slave clocks disabled\n"); +} + +static int vpfe_enable_clock(struct vpfe_device *vpfe_dev) +{ + struct vpfe_config *vpfe_cfg = vpfe_dev->cfg; + int ret = -ENOENT; + + vpfe_cfg->vpssclk = clk_get(vpfe_dev->pdev, "vpss_master"); + if (NULL == vpfe_cfg->vpssclk) { + v4l2_err(vpfe_dev->pdev->driver, "No clock defined for" + "vpss_master\n"); + return ret; + } + + if (clk_enable(vpfe_cfg->vpssclk)) { + v4l2_err(vpfe_dev->pdev->driver, + "vpfe vpss master clock not enabled\n"); + goto out; + } + v4l2_info(vpfe_dev->pdev->driver, + "vpfe vpss master clock enabled\n"); + + vpfe_cfg->slaveclk = clk_get(vpfe_dev->pdev, "vpss_slave"); + if (NULL == vpfe_cfg->slaveclk) { + v4l2_err(vpfe_dev->pdev->driver, + "No clock defined for vpss slave\n"); + goto out; + } + + if (clk_enable(vpfe_cfg->slaveclk)) { + v4l2_err(vpfe_dev->pdev->driver, + "vpfe vpss slave clock not enabled\n"); + goto out; + } + v4l2_info(vpfe_dev->pdev->driver, "vpfe vpss slave clock enabled\n"); + return 0; +out: + if (vpfe_cfg->vpssclk) + clk_put(vpfe_cfg->vpssclk); + if (vpfe_cfg->slaveclk) + clk_put(vpfe_cfg->slaveclk); + + return -1; +} + +/* + * vpfe_probe : This function creates device entries by register + * itself to the V4L2 driver and initializes fields of each + * device objects + */ +static __init int vpfe_probe(struct platform_device *pdev) +{ + struct vpfe_subdev_info *sdinfo; + struct vpfe_config *vpfe_cfg; + struct resource *res1; + struct vpfe_device *vpfe_dev; + struct i2c_adapter *i2c_adap; + struct video_device *vfd; + int ret = -ENOMEM, i, j; + int num_subdevs = 0; + + /* Get the pointer to the device object */ + vpfe_dev = vpfe_initialize(); + + if (!vpfe_dev) { + v4l2_err(pdev->dev.driver, + "Failed to allocate memory for vpfe_dev\n"); + return ret; + } + + vpfe_dev->pdev = &pdev->dev; + + if (NULL == pdev->dev.platform_data) { + v4l2_err(pdev->dev.driver, "Unable to get vpfe config\n"); + ret = -ENOENT; + goto probe_free_dev_mem; + } + + vpfe_cfg = pdev->dev.platform_data; + vpfe_dev->cfg = vpfe_cfg; + if (NULL == vpfe_cfg->ccdc || + NULL == vpfe_cfg->card_name || + NULL == vpfe_cfg->sub_devs) { + v4l2_err(pdev->dev.driver, "null ptr in vpfe_cfg\n"); + ret = -ENOENT; + goto probe_free_dev_mem; + } + + /* enable vpss clocks */ + ret = vpfe_enable_clock(vpfe_dev); + if (ret) + goto probe_free_dev_mem; + + mutex_lock(&ccdc_lock); + /* Allocate memory for ccdc configuration */ + ccdc_cfg = kmalloc(sizeof(struct ccdc_config), GFP_KERNEL); + if (NULL == ccdc_cfg) { + v4l2_err(pdev->dev.driver, + "Memory allocation failed for ccdc_cfg\n"); + goto probe_disable_clock; + } + + strncpy(ccdc_cfg->name, vpfe_cfg->ccdc, 32); + /* Get VINT0 irq resource */ + res1 = platform_get_resource(pdev, IORESOURCE_IRQ, 0); + if (!res1) { + v4l2_err(pdev->dev.driver, + "Unable to get interrupt for VINT0\n"); + ret = -ENOENT; + goto probe_disable_clock; + } + vpfe_dev->ccdc_irq0 = res1->start; + + /* Get VINT1 irq resource */ + res1 = platform_get_resource(pdev, + IORESOURCE_IRQ, 1); + if (!res1) { + v4l2_err(pdev->dev.driver, + "Unable to get interrupt for VINT1\n"); + ret = -ENOENT; + goto probe_disable_clock; + } + vpfe_dev->ccdc_irq1 = res1->start; + + /* Get address base of CCDC */ + res1 = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!res1) { + v4l2_err(pdev->dev.driver, + "Unable to get register address map\n"); + ret = -ENOENT; + goto probe_disable_clock; + } + + ccdc_cfg->ccdc_addr_size = res1->end - res1->start + 1; + if (!request_mem_region(res1->start, ccdc_cfg->ccdc_addr_size, + pdev->dev.driver->name)) { + v4l2_err(pdev->dev.driver, + "Failed request_mem_region for ccdc base\n"); + ret = -ENXIO; + goto probe_disable_clock; + } + ccdc_cfg->ccdc_addr = ioremap_nocache(res1->start, + ccdc_cfg->ccdc_addr_size); + if (!ccdc_cfg->ccdc_addr) { + v4l2_err(pdev->dev.driver, "Unable to ioremap ccdc addr\n"); + ret = -ENXIO; + goto probe_out_release_mem1; + } + + ret = request_irq(vpfe_dev->ccdc_irq0, vpfe_isr, IRQF_DISABLED, + "vpfe_capture0", vpfe_dev); + + if (0 != ret) { + v4l2_err(pdev->dev.driver, "Unable to request interrupt\n"); + goto probe_out_unmap1; + } + + /* Allocate memory for video device */ + vfd = video_device_alloc(); + if (NULL == vfd) { + ret = -ENOMEM; + v4l2_err(pdev->dev.driver, + "Unable to alloc video device\n"); + goto probe_out_release_irq; + } + + /* Initialize field of video device */ + vfd->release = video_device_release; + vfd->fops = &vpfe_fops; + vfd->ioctl_ops = &vpfe_ioctl_ops; + vfd->minor = -1; + vfd->tvnorms = 0; + vfd->current_norm = V4L2_STD_PAL; + vfd->v4l2_dev = &vpfe_dev->v4l2_dev; + snprintf(vfd->name, sizeof(vfd->name), + "%s_V%d.%d.%d", + CAPTURE_DRV_NAME, + (VPFE_CAPTURE_VERSION_CODE >> 16) & 0xff, + (VPFE_CAPTURE_VERSION_CODE >> 8) & 0xff, + (VPFE_CAPTURE_VERSION_CODE) & 0xff); + /* Set video_dev to the video device */ + vpfe_dev->video_dev = vfd; + + ret = v4l2_device_register(&pdev->dev, &vpfe_dev->v4l2_dev); + if (ret) { + v4l2_err(pdev->dev.driver, + "Unable to register v4l2 device.\n"); + goto probe_out_video_release; + } + v4l2_info(&vpfe_dev->v4l2_dev, "v4l2 device registered\n"); + spin_lock_init(&vpfe_dev->irqlock); + spin_lock_init(&vpfe_dev->dma_queue_lock); + mutex_init(&vpfe_dev->lock); + + /* Initialize field of the device objects */ + vpfe_dev->numbuffers = config_params.numbuffers; + + /* Initialize prio member of device object */ + v4l2_prio_init(&vpfe_dev->prio); + /* register video device */ + v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, + "trying to register vpfe device.\n"); + v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, + "video_dev=%x\n", (int)&vpfe_dev->video_dev); + vpfe_dev->fmt.type = V4L2_BUF_TYPE_VIDEO_CAPTURE; + ret = video_register_device(vpfe_dev->video_dev, + VFL_TYPE_GRABBER, -1); + + if (ret) { + v4l2_err(pdev->dev.driver, + "Unable to register video device.\n"); + goto probe_out_v4l2_unregister; + } + + v4l2_info(&vpfe_dev->v4l2_dev, "video device registered\n"); + /* set the driver data in platform device */ + platform_set_drvdata(pdev, vpfe_dev); + /* set driver private data */ + video_set_drvdata(vpfe_dev->video_dev, vpfe_dev); + i2c_adap = i2c_get_adapter(1); + vpfe_cfg = pdev->dev.platform_data; + num_subdevs = vpfe_cfg->num_subdevs; + vpfe_dev->sd = kmalloc(sizeof(struct v4l2_subdev *) * num_subdevs, + GFP_KERNEL); + if (NULL == vpfe_dev->sd) { + v4l2_err(&vpfe_dev->v4l2_dev, + "unable to allocate memory for subdevice pointers\n"); + ret = -ENOMEM; + goto probe_out_video_unregister; + } + + for (i = 0; i < num_subdevs; i++) { + struct v4l2_input *inps; + + sdinfo = &vpfe_cfg->sub_devs[i]; + + /* Load up the subdevice */ + vpfe_dev->sd[i] = + v4l2_i2c_new_subdev_board(&vpfe_dev->v4l2_dev, + i2c_adap, + sdinfo->name, + &sdinfo->board_info, + NULL); + if (vpfe_dev->sd[i]) { + v4l2_info(&vpfe_dev->v4l2_dev, + "v4l2 sub device %s registered\n", + sdinfo->name); + vpfe_dev->sd[i]->grp_id = sdinfo->grp_id; + /* update tvnorms from the sub devices */ + for (j = 0; j < sdinfo->num_inputs; j++) { + inps = &sdinfo->inputs[j]; + vfd->tvnorms |= inps->std; + } + } else { + v4l2_info(&vpfe_dev->v4l2_dev, + "v4l2 sub device %s register fails\n", + sdinfo->name); + goto probe_sd_out; + } + } + + /* set first sub device as current one */ + vpfe_dev->current_subdev = &vpfe_cfg->sub_devs[0]; + + /* We have at least one sub device to work with */ + mutex_unlock(&ccdc_lock); + return 0; + +probe_sd_out: + kfree(vpfe_dev->sd); +probe_out_video_unregister: + video_unregister_device(vpfe_dev->video_dev); +probe_out_v4l2_unregister: + v4l2_device_unregister(&vpfe_dev->v4l2_dev); +probe_out_video_release: + if (vpfe_dev->video_dev->minor == -1) + video_device_release(vpfe_dev->video_dev); +probe_out_release_irq: + free_irq(vpfe_dev->ccdc_irq0, vpfe_dev); +probe_out_unmap1: + iounmap(ccdc_cfg->ccdc_addr); +probe_out_release_mem1: + release_mem_region(res1->start, res1->end - res1->start + 1); +probe_disable_clock: + vpfe_disable_clock(vpfe_dev); + mutex_unlock(&ccdc_lock); + kfree(ccdc_cfg); +probe_free_dev_mem: + kfree(vpfe_dev); + return ret; +} + +/* + * vpfe_remove : It un-register device from V4L2 driver + */ +static int vpfe_remove(struct platform_device *pdev) +{ + struct vpfe_device *vpfe_dev = platform_get_drvdata(pdev); + struct resource *res; + + v4l2_info(pdev->dev.driver, "vpfe_remove\n"); + + free_irq(vpfe_dev->ccdc_irq0, vpfe_dev); + kfree(vpfe_dev->sd); + v4l2_device_unregister(&vpfe_dev->v4l2_dev); + video_unregister_device(vpfe_dev->video_dev); + mutex_lock(&ccdc_lock); + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + release_mem_region(res->start, res->end - res->start + 1); + iounmap(ccdc_cfg->ccdc_addr); + mutex_unlock(&ccdc_lock); + vpfe_disable_clock(vpfe_dev); + kfree(vpfe_dev); + kfree(ccdc_cfg); + return 0; +} + +static int +vpfe_suspend(struct device *dev) +{ + /* add suspend code here later */ + return -1; +} + +static int +vpfe_resume(struct device *dev) +{ + /* add resume code here later */ + return -1; +} + +static struct dev_pm_ops vpfe_dev_pm_ops = { + .suspend = vpfe_suspend, + .resume = vpfe_resume, +}; + +static struct platform_driver vpfe_driver = { + .driver = { + .name = CAPTURE_DRV_NAME, + .owner = THIS_MODULE, + .pm = &vpfe_dev_pm_ops, + }, + .probe = vpfe_probe, + .remove = __devexit_p(vpfe_remove), +}; + +static __init int vpfe_init(void) +{ + printk(KERN_NOTICE "vpfe_init\n"); + /* Register driver to the kernel */ + return platform_driver_register(&vpfe_driver); +} + +/* + * vpfe_cleanup : This function un-registers device driver + */ +static void vpfe_cleanup(void) +{ + platform_driver_unregister(&vpfe_driver); +} + +module_init(vpfe_init); +module_exit(vpfe_cleanup); diff --git a/include/media/davinci/vpfe_capture.h b/include/media/davinci/vpfe_capture.h new file mode 100644 index 00000000000..71d8982e13f --- /dev/null +++ b/include/media/davinci/vpfe_capture.h @@ -0,0 +1,198 @@ +/* + * Copyright (C) 2008-2009 Texas Instruments Inc + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +#ifndef _VPFE_CAPTURE_H +#define _VPFE_CAPTURE_H + +#ifdef __KERNEL__ + +/* Header files */ +#include +#include +#include +#include +#include +#include +#include +#include + +#define VPFE_CAPTURE_NUM_DECODERS 5 + +/* Macros */ +#define VPFE_MAJOR_RELEASE 0 +#define VPFE_MINOR_RELEASE 0 +#define VPFE_BUILD 1 +#define VPFE_CAPTURE_VERSION_CODE ((VPFE_MAJOR_RELEASE << 16) | \ + (VPFE_MINOR_RELEASE << 8) | \ + VPFE_BUILD) + +#define CAPTURE_DRV_NAME "vpfe-capture" + +struct vpfe_pixel_format { + struct v4l2_fmtdesc fmtdesc; + /* bytes per pixel */ + int bpp; +}; + +struct vpfe_std_info { + int active_pixels; + int active_lines; + /* current frame format */ + int frame_format; +}; + +struct vpfe_route { + u32 input; + u32 output; +}; + +struct vpfe_subdev_info { + /* Sub device name */ + char name[32]; + /* Sub device group id */ + int grp_id; + /* Number of inputs supported */ + int num_inputs; + /* inputs available at the sub device */ + struct v4l2_input *inputs; + /* Sub dev routing information for each input */ + struct vpfe_route *routes; + /* check if sub dev supports routing */ + int can_route; + /* ccdc bus/interface configuration */ + struct vpfe_hw_if_param ccdc_if_params; + /* i2c subdevice board info */ + struct i2c_board_info board_info; +}; + +struct vpfe_config { + /* Number of sub devices connected to vpfe */ + int num_subdevs; + /* information about each subdev */ + struct vpfe_subdev_info *sub_devs; + /* evm card info */ + char *card_name; + /* ccdc name */ + char *ccdc; + /* vpfe clock */ + struct clk *vpssclk; + struct clk *slaveclk; +}; + +struct vpfe_device { + /* V4l2 specific parameters */ + /* Identifies video device for this channel */ + struct video_device *video_dev; + /* sub devices */ + struct v4l2_subdev **sd; + /* vpfe cfg */ + struct vpfe_config *cfg; + /* V4l2 device */ + struct v4l2_device v4l2_dev; + /* parent device */ + struct device *pdev; + /* Used to keep track of state of the priority */ + struct v4l2_prio_state prio; + /* number of open instances of the channel */ + u32 usrs; + /* Indicates id of the field which is being displayed */ + u32 field_id; + /* flag to indicate whether decoder is initialized */ + u8 initialized; + /* current interface type */ + struct vpfe_hw_if_param vpfe_if_params; + /* ptr to currently selected sub device */ + struct vpfe_subdev_info *current_subdev; + /* current input at the sub device */ + int current_input; + /* Keeps track of the information about the standard */ + struct vpfe_std_info std_info; + /* std index into std table */ + int std_index; + /* CCDC IRQs used when CCDC/ISIF output to SDRAM */ + unsigned int ccdc_irq0; + unsigned int ccdc_irq1; + /* number of buffers in fbuffers */ + u32 numbuffers; + /* List of buffer pointers for storing frames */ + u8 *fbuffers[VIDEO_MAX_FRAME]; + /* Pointer pointing to current v4l2_buffer */ + struct videobuf_buffer *cur_frm; + /* Pointer pointing to next v4l2_buffer */ + struct videobuf_buffer *next_frm; + /* + * This field keeps track of type of buffer exchange mechanism + * user has selected + */ + enum v4l2_memory memory; + /* Used to store pixel format */ + struct v4l2_format fmt; + /* + * used when IMP is chained to store the crop window which + * is different from the image window + */ + struct v4l2_rect crop; + /* Buffer queue used in video-buf */ + struct videobuf_queue buffer_queue; + /* Queue of filled frames */ + struct list_head dma_queue; + /* Used in video-buf */ + spinlock_t irqlock; + /* IRQ lock for DMA queue */ + spinlock_t dma_queue_lock; + /* lock used to access this structure */ + struct mutex lock; + /* number of users performing IO */ + u32 io_usrs; + /* Indicates whether streaming started */ + u8 started; + /* + * offset where second field starts from the starting of the + * buffer for field seperated YCbCr formats + */ + u32 field_off; +}; + +/* File handle structure */ +struct vpfe_fh { + struct vpfe_device *vpfe_dev; + /* Indicates whether this file handle is doing IO */ + u8 io_allowed; + /* Used to keep track priority of this instance */ + enum v4l2_priority prio; +}; + +struct vpfe_config_params { + u8 min_numbuffers; + u8 numbuffers; + u32 min_bufsize; + u32 device_bufsize; +}; + +#endif /* End of __KERNEL__ */ +/** + * VPFE_CMD_S_CCDC_RAW_PARAMS - EXPERIMENTAL IOCTL to set raw capture params + * This can be used to configure modules such as defect pixel correction, + * color space conversion, culling etc. This is an experimental ioctl that + * will change in future kernels. So use this ioctl with care ! + * TODO: This is to be split into multiple ioctls and also explore the + * possibility of extending the v4l2 api to include this + **/ +#define VPFE_CMD_S_CCDC_RAW_PARAMS _IOW('V', BASE_VIDIOC_PRIVATE + 1, \ + void *) +#endif /* _DAVINCI_VPFE_H */ diff --git a/include/media/davinci/vpfe_types.h b/include/media/davinci/vpfe_types.h new file mode 100644 index 00000000000..76fb74bad08 --- /dev/null +++ b/include/media/davinci/vpfe_types.h @@ -0,0 +1,51 @@ +/* + * Copyright (C) 2008-2009 Texas Instruments Inc + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option)any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ +#ifndef _VPFE_TYPES_H +#define _VPFE_TYPES_H + +#ifdef __KERNEL__ + +enum vpfe_pin_pol { + VPFE_PINPOL_POSITIVE, + VPFE_PINPOL_NEGATIVE +}; + +enum vpfe_hw_if_type { + /* BT656 - 8 bit */ + VPFE_BT656, + /* BT1120 - 16 bit */ + VPFE_BT1120, + /* Raw Bayer */ + VPFE_RAW_BAYER, + /* YCbCr - 8 bit with external sync */ + VPFE_YCBCR_SYNC_8, + /* YCbCr - 16 bit with external sync */ + VPFE_YCBCR_SYNC_16, + /* BT656 - 10 bit */ + VPFE_BT656_10BIT +}; + +/* interface description */ +struct vpfe_hw_if_param { + enum vpfe_hw_if_type if_type; + enum vpfe_pin_pol hdpol; + enum vpfe_pin_pol vdpol; +}; + +#endif +#endif -- cgit v1.2.3-70-g09d2 From dd2ceb1a4028dc9644ed4df80cea9c05ca0b5f6d Mon Sep 17 00:00:00 2001 From: Muralidharan Karicheri Date: Fri, 3 Jul 2009 05:23:07 -0300 Subject: V4L/DVB (12250): v4l: dm355 ccdc module for vpfe capture driver Adds ccdc hw module for DM355 CCDC. This registers with the bridge driver a set of hw_ops for configuring the CCDC for a specific decoder device connected to vpfe. Reviewed by: Hans Verkuil Reviewed by: Laurent Pinchart Reviewed by: Mauro Carvalho Chehab Signed-off-by: Muralidharan Karicheri Signed-off-by: Hans Verkuil Signed-off-by: Mauro Carvalho Chehab --- drivers/media/video/davinci/dm355_ccdc.c | 978 ++++++++++++++++++++++++++ drivers/media/video/davinci/dm355_ccdc_regs.h | 310 ++++++++ include/media/davinci/dm355_ccdc.h | 321 +++++++++ 3 files changed, 1609 insertions(+) create mode 100644 drivers/media/video/davinci/dm355_ccdc.c create mode 100644 drivers/media/video/davinci/dm355_ccdc_regs.h create mode 100644 include/media/davinci/dm355_ccdc.h (limited to 'include') diff --git a/drivers/media/video/davinci/dm355_ccdc.c b/drivers/media/video/davinci/dm355_ccdc.c new file mode 100644 index 00000000000..4629cabe3f2 --- /dev/null +++ b/drivers/media/video/davinci/dm355_ccdc.c @@ -0,0 +1,978 @@ +/* + * Copyright (C) 2005-2009 Texas Instruments Inc + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * + * CCDC hardware module for DM355 + * ------------------------------ + * + * This module is for configuring DM355 CCD controller of VPFE to capture + * Raw yuv or Bayer RGB data from a decoder. CCDC has several modules + * such as Defect Pixel Correction, Color Space Conversion etc to + * pre-process the Bayer RGB data, before writing it to SDRAM. This + * module also allows application to configure individual + * module parameters through VPFE_CMD_S_CCDC_RAW_PARAMS IOCTL. + * To do so, application include dm355_ccdc.h and vpfe_capture.h header + * files. The setparams() API is called by vpfe_capture driver + * to configure module parameters + * + * TODO: 1) Raw bayer parameter settings and bayer capture + * 2) Split module parameter structure to module specific ioctl structs + * 3) add support for lense shading correction + * 4) investigate if enum used for user space type definition + * to be replaced by #defines or integer + */ +#include +#include +#include +#include +#include +#include "dm355_ccdc_regs.h" +#include "ccdc_hw_device.h" + +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("CCDC Driver for DM355"); +MODULE_AUTHOR("Texas Instruments"); + +static struct device *dev; + +/* Object for CCDC raw mode */ +static struct ccdc_params_raw ccdc_hw_params_raw = { + .pix_fmt = CCDC_PIXFMT_RAW, + .frm_fmt = CCDC_FRMFMT_PROGRESSIVE, + .win = CCDC_WIN_VGA, + .fid_pol = VPFE_PINPOL_POSITIVE, + .vd_pol = VPFE_PINPOL_POSITIVE, + .hd_pol = VPFE_PINPOL_POSITIVE, + .gain = { + .r_ye = 256, + .gb_g = 256, + .gr_cy = 256, + .b_mg = 256 + }, + .config_params = { + .datasft = 2, + .data_sz = CCDC_DATA_10BITS, + .mfilt1 = CCDC_NO_MEDIAN_FILTER1, + .mfilt2 = CCDC_NO_MEDIAN_FILTER2, + .alaw = { + .gama_wd = 2, + }, + .blk_clamp = { + .sample_pixel = 1, + .dc_sub = 25 + }, + .col_pat_field0 = { + .olop = CCDC_GREEN_BLUE, + .olep = CCDC_BLUE, + .elop = CCDC_RED, + .elep = CCDC_GREEN_RED + }, + .col_pat_field1 = { + .olop = CCDC_GREEN_BLUE, + .olep = CCDC_BLUE, + .elop = CCDC_RED, + .elep = CCDC_GREEN_RED + }, + }, +}; + + +/* Object for CCDC ycbcr mode */ +static struct ccdc_params_ycbcr ccdc_hw_params_ycbcr = { + .win = CCDC_WIN_PAL, + .pix_fmt = CCDC_PIXFMT_YCBCR_8BIT, + .frm_fmt = CCDC_FRMFMT_INTERLACED, + .fid_pol = VPFE_PINPOL_POSITIVE, + .vd_pol = VPFE_PINPOL_POSITIVE, + .hd_pol = VPFE_PINPOL_POSITIVE, + .bt656_enable = 1, + .pix_order = CCDC_PIXORDER_CBYCRY, + .buf_type = CCDC_BUFTYPE_FLD_INTERLEAVED +}; + +static enum vpfe_hw_if_type ccdc_if_type; +static void *__iomem ccdc_base_addr; +static int ccdc_addr_size; + +/* Raw Bayer formats */ +static u32 ccdc_raw_bayer_pix_formats[] = + {V4L2_PIX_FMT_SBGGR8, V4L2_PIX_FMT_SBGGR16}; + +/* Raw YUV formats */ +static u32 ccdc_raw_yuv_pix_formats[] = + {V4L2_PIX_FMT_UYVY, V4L2_PIX_FMT_YUYV}; + +/* register access routines */ +static inline u32 regr(u32 offset) +{ + return __raw_readl(ccdc_base_addr + offset); +} + +static inline void regw(u32 val, u32 offset) +{ + __raw_writel(val, ccdc_base_addr + offset); +} + +static void ccdc_set_ccdc_base(void *addr, int size) +{ + ccdc_base_addr = addr; + ccdc_addr_size = size; +} + +static void ccdc_enable(int en) +{ + unsigned int temp; + temp = regr(SYNCEN); + temp &= (~CCDC_SYNCEN_VDHDEN_MASK); + temp |= (en & CCDC_SYNCEN_VDHDEN_MASK); + regw(temp, SYNCEN); +} + +static void ccdc_enable_output_to_sdram(int en) +{ + unsigned int temp; + temp = regr(SYNCEN); + temp &= (~(CCDC_SYNCEN_WEN_MASK)); + temp |= ((en << CCDC_SYNCEN_WEN_SHIFT) & CCDC_SYNCEN_WEN_MASK); + regw(temp, SYNCEN); +} + +static void ccdc_config_gain_offset(void) +{ + /* configure gain */ + regw(ccdc_hw_params_raw.gain.r_ye, RYEGAIN); + regw(ccdc_hw_params_raw.gain.gr_cy, GRCYGAIN); + regw(ccdc_hw_params_raw.gain.gb_g, GBGGAIN); + regw(ccdc_hw_params_raw.gain.b_mg, BMGGAIN); + /* configure offset */ + regw(ccdc_hw_params_raw.ccdc_offset, OFFSET); +} + +/* + * ccdc_restore_defaults() + * This function restore power on defaults in the ccdc registers + */ +static int ccdc_restore_defaults(void) +{ + int i; + + dev_dbg(dev, "\nstarting ccdc_restore_defaults..."); + /* set all registers to zero */ + for (i = 0; i <= CCDC_REG_LAST; i += 4) + regw(0, i); + + /* now override the values with power on defaults in registers */ + regw(MODESET_DEFAULT, MODESET); + /* no culling support */ + regw(CULH_DEFAULT, CULH); + regw(CULV_DEFAULT, CULV); + /* Set default Gain and Offset */ + ccdc_hw_params_raw.gain.r_ye = GAIN_DEFAULT; + ccdc_hw_params_raw.gain.gb_g = GAIN_DEFAULT; + ccdc_hw_params_raw.gain.gr_cy = GAIN_DEFAULT; + ccdc_hw_params_raw.gain.b_mg = GAIN_DEFAULT; + ccdc_config_gain_offset(); + regw(OUTCLIP_DEFAULT, OUTCLIP); + regw(LSCCFG2_DEFAULT, LSCCFG2); + /* select ccdc input */ + if (vpss_select_ccdc_source(VPSS_CCDCIN)) { + dev_dbg(dev, "\ncouldn't select ccdc input source"); + return -EFAULT; + } + /* select ccdc clock */ + if (vpss_enable_clock(VPSS_CCDC_CLOCK, 1) < 0) { + dev_dbg(dev, "\ncouldn't enable ccdc clock"); + return -EFAULT; + } + dev_dbg(dev, "\nEnd of ccdc_restore_defaults..."); + return 0; +} + +static int ccdc_open(struct device *device) +{ + dev = device; + return ccdc_restore_defaults(); +} + +static int ccdc_close(struct device *device) +{ + /* disable clock */ + vpss_enable_clock(VPSS_CCDC_CLOCK, 0); + /* do nothing for now */ + return 0; +} +/* + * ccdc_setwin() + * This function will configure the window size to + * be capture in CCDC reg. + */ +static void ccdc_setwin(struct v4l2_rect *image_win, + enum ccdc_frmfmt frm_fmt, int ppc) +{ + int horz_start, horz_nr_pixels; + int vert_start, vert_nr_lines; + int mid_img = 0; + + dev_dbg(dev, "\nStarting ccdc_setwin..."); + + /* + * ppc - per pixel count. indicates how many pixels per cell + * output to SDRAM. example, for ycbcr, it is one y and one c, so 2. + * raw capture this is 1 + */ + horz_start = image_win->left << (ppc - 1); + horz_nr_pixels = ((image_win->width) << (ppc - 1)) - 1; + + /* Writing the horizontal info into the registers */ + regw(horz_start, SPH); + regw(horz_nr_pixels, NPH); + vert_start = image_win->top; + + if (frm_fmt == CCDC_FRMFMT_INTERLACED) { + vert_nr_lines = (image_win->height >> 1) - 1; + vert_start >>= 1; + /* Since first line doesn't have any data */ + vert_start += 1; + /* configure VDINT0 and VDINT1 */ + regw(vert_start, VDINT0); + } else { + /* Since first line doesn't have any data */ + vert_start += 1; + vert_nr_lines = image_win->height - 1; + /* configure VDINT0 and VDINT1 */ + mid_img = vert_start + (image_win->height / 2); + regw(vert_start, VDINT0); + regw(mid_img, VDINT1); + } + regw(vert_start & CCDC_START_VER_ONE_MASK, SLV0); + regw(vert_start & CCDC_START_VER_TWO_MASK, SLV1); + regw(vert_nr_lines & CCDC_NUM_LINES_VER, NLV); + dev_dbg(dev, "\nEnd of ccdc_setwin..."); +} + +static int validate_ccdc_param(struct ccdc_config_params_raw *ccdcparam) +{ + if (ccdcparam->datasft < CCDC_DATA_NO_SHIFT || + ccdcparam->datasft > CCDC_DATA_SHIFT_6BIT) { + dev_dbg(dev, "Invalid value of data shift\n"); + return -EINVAL; + } + + if (ccdcparam->mfilt1 < CCDC_NO_MEDIAN_FILTER1 || + ccdcparam->mfilt1 > CCDC_MEDIAN_FILTER1) { + dev_dbg(dev, "Invalid value of median filter1\n"); + return -EINVAL; + } + + if (ccdcparam->mfilt2 < CCDC_NO_MEDIAN_FILTER2 || + ccdcparam->mfilt2 > CCDC_MEDIAN_FILTER2) { + dev_dbg(dev, "Invalid value of median filter2\n"); + return -EINVAL; + } + + if ((ccdcparam->med_filt_thres < 0) || + (ccdcparam->med_filt_thres > CCDC_MED_FILT_THRESH)) { + dev_dbg(dev, "Invalid value of median filter thresold\n"); + return -EINVAL; + } + + if (ccdcparam->data_sz < CCDC_DATA_16BITS || + ccdcparam->data_sz > CCDC_DATA_8BITS) { + dev_dbg(dev, "Invalid value of data size\n"); + return -EINVAL; + } + + if (ccdcparam->alaw.enable) { + if (ccdcparam->alaw.gama_wd < CCDC_GAMMA_BITS_13_4 || + ccdcparam->alaw.gama_wd > CCDC_GAMMA_BITS_09_0) { + dev_dbg(dev, "Invalid value of ALAW\n"); + return -EINVAL; + } + } + + if (ccdcparam->blk_clamp.b_clamp_enable) { + if (ccdcparam->blk_clamp.sample_pixel < CCDC_SAMPLE_1PIXELS || + ccdcparam->blk_clamp.sample_pixel > CCDC_SAMPLE_16PIXELS) { + dev_dbg(dev, "Invalid value of sample pixel\n"); + return -EINVAL; + } + if (ccdcparam->blk_clamp.sample_ln < CCDC_SAMPLE_1LINES || + ccdcparam->blk_clamp.sample_ln > CCDC_SAMPLE_16LINES) { + dev_dbg(dev, "Invalid value of sample lines\n"); + return -EINVAL; + } + } + return 0; +} + +/* Parameter operations */ +static int ccdc_set_params(void __user *params) +{ + struct ccdc_config_params_raw ccdc_raw_params; + int x; + + /* only raw module parameters can be set through the IOCTL */ + if (ccdc_if_type != VPFE_RAW_BAYER) + return -EINVAL; + + x = copy_from_user(&ccdc_raw_params, params, sizeof(ccdc_raw_params)); + if (x) { + dev_dbg(dev, "ccdc_set_params: error in copying ccdc" + "params, %d\n", x); + return -EFAULT; + } + + if (!validate_ccdc_param(&ccdc_raw_params)) { + memcpy(&ccdc_hw_params_raw.config_params, + &ccdc_raw_params, + sizeof(ccdc_raw_params)); + return 0; + } + return -EINVAL; +} + +/* This function will configure CCDC for YCbCr video capture */ +static void ccdc_config_ycbcr(void) +{ + struct ccdc_params_ycbcr *params = &ccdc_hw_params_ycbcr; + u32 temp; + + /* first set the CCDC power on defaults values in all registers */ + dev_dbg(dev, "\nStarting ccdc_config_ycbcr..."); + ccdc_restore_defaults(); + + /* configure pixel format & video frame format */ + temp = (((params->pix_fmt & CCDC_INPUT_MODE_MASK) << + CCDC_INPUT_MODE_SHIFT) | + ((params->frm_fmt & CCDC_FRM_FMT_MASK) << + CCDC_FRM_FMT_SHIFT)); + + /* setup BT.656 sync mode */ + if (params->bt656_enable) { + regw(CCDC_REC656IF_BT656_EN, REC656IF); + /* + * configure the FID, VD, HD pin polarity fld,hd pol positive, + * vd negative, 8-bit pack mode + */ + temp |= CCDC_VD_POL_NEGATIVE; + } else { /* y/c external sync mode */ + temp |= (((params->fid_pol & CCDC_FID_POL_MASK) << + CCDC_FID_POL_SHIFT) | + ((params->hd_pol & CCDC_HD_POL_MASK) << + CCDC_HD_POL_SHIFT) | + ((params->vd_pol & CCDC_VD_POL_MASK) << + CCDC_VD_POL_SHIFT)); + } + + /* pack the data to 8-bit */ + temp |= CCDC_DATA_PACK_ENABLE; + + regw(temp, MODESET); + + /* configure video window */ + ccdc_setwin(¶ms->win, params->frm_fmt, 2); + + /* configure the order of y cb cr in SD-RAM */ + temp = (params->pix_order << CCDC_Y8POS_SHIFT); + temp |= CCDC_LATCH_ON_VSYNC_DISABLE | CCDC_CCDCFG_FIDMD_NO_LATCH_VSYNC; + regw(temp, CCDCFG); + + /* + * configure the horizontal line offset. This is done by rounding up + * width to a multiple of 16 pixels and multiply by two to account for + * y:cb:cr 4:2:2 data + */ + regw(((params->win.width * 2 + 31) >> 5), HSIZE); + + /* configure the memory line offset */ + if (params->buf_type == CCDC_BUFTYPE_FLD_INTERLEAVED) { + /* two fields are interleaved in memory */ + regw(CCDC_SDOFST_FIELD_INTERLEAVED, SDOFST); + } + + dev_dbg(dev, "\nEnd of ccdc_config_ycbcr...\n"); +} + +/* + * ccdc_config_black_clamp() + * configure parameters for Optical Black Clamp + */ +static void ccdc_config_black_clamp(struct ccdc_black_clamp *bclamp) +{ + u32 val; + + if (!bclamp->b_clamp_enable) { + /* configure DCSub */ + regw(bclamp->dc_sub & CCDC_BLK_DC_SUB_MASK, DCSUB); + regw(0x0000, CLAMP); + return; + } + /* Enable the Black clamping, set sample lines and pixels */ + val = (bclamp->start_pixel & CCDC_BLK_ST_PXL_MASK) | + ((bclamp->sample_pixel & CCDC_BLK_SAMPLE_LN_MASK) << + CCDC_BLK_SAMPLE_LN_SHIFT) | CCDC_BLK_CLAMP_ENABLE; + regw(val, CLAMP); + + /* If Black clamping is enable then make dcsub 0 */ + val = (bclamp->sample_ln & CCDC_NUM_LINE_CALC_MASK) + << CCDC_NUM_LINE_CALC_SHIFT; + regw(val, DCSUB); +} + +/* + * ccdc_config_black_compense() + * configure parameters for Black Compensation + */ +static void ccdc_config_black_compense(struct ccdc_black_compensation *bcomp) +{ + u32 val; + + val = (bcomp->b & CCDC_BLK_COMP_MASK) | + ((bcomp->gb & CCDC_BLK_COMP_MASK) << + CCDC_BLK_COMP_GB_COMP_SHIFT); + regw(val, BLKCMP1); + + val = ((bcomp->gr & CCDC_BLK_COMP_MASK) << + CCDC_BLK_COMP_GR_COMP_SHIFT) | + ((bcomp->r & CCDC_BLK_COMP_MASK) << + CCDC_BLK_COMP_R_COMP_SHIFT); + regw(val, BLKCMP0); +} + +/* + * ccdc_write_dfc_entry() + * write an entry in the dfc table. + */ +int ccdc_write_dfc_entry(int index, struct ccdc_vertical_dft *dfc) +{ +/* TODO This is to be re-visited and adjusted */ +#define DFC_WRITE_WAIT_COUNT 1000 + u32 val, count = DFC_WRITE_WAIT_COUNT; + + regw(dfc->dft_corr_vert[index], DFCMEM0); + regw(dfc->dft_corr_horz[index], DFCMEM1); + regw(dfc->dft_corr_sub1[index], DFCMEM2); + regw(dfc->dft_corr_sub2[index], DFCMEM3); + regw(dfc->dft_corr_sub3[index], DFCMEM4); + /* set WR bit to write */ + val = regr(DFCMEMCTL) | CCDC_DFCMEMCTL_DFCMWR_MASK; + regw(val, DFCMEMCTL); + + /* + * Assume, it is very short. If we get an error, we need to + * adjust this value + */ + while (regr(DFCMEMCTL) & CCDC_DFCMEMCTL_DFCMWR_MASK) + count--; + /* + * TODO We expect the count to be non-zero to be successful. Adjust + * the count if write requires more time + */ + + if (count) { + dev_err(dev, "defect table write timeout !!!\n"); + return -1; + } + return 0; +} + +/* + * ccdc_config_vdfc() + * configure parameters for Vertical Defect Correction + */ +static int ccdc_config_vdfc(struct ccdc_vertical_dft *dfc) +{ + u32 val; + int i; + + /* Configure General Defect Correction. The table used is from IPIPE */ + val = dfc->gen_dft_en & CCDC_DFCCTL_GDFCEN_MASK; + + /* Configure Vertical Defect Correction if needed */ + if (!dfc->ver_dft_en) { + /* Enable only General Defect Correction */ + regw(val, DFCCTL); + return 0; + } + + if (dfc->table_size > CCDC_DFT_TABLE_SIZE) + return -EINVAL; + + val |= CCDC_DFCCTL_VDFC_DISABLE; + val |= (dfc->dft_corr_ctl.vdfcsl & CCDC_DFCCTL_VDFCSL_MASK) << + CCDC_DFCCTL_VDFCSL_SHIFT; + val |= (dfc->dft_corr_ctl.vdfcuda & CCDC_DFCCTL_VDFCUDA_MASK) << + CCDC_DFCCTL_VDFCUDA_SHIFT; + val |= (dfc->dft_corr_ctl.vdflsft & CCDC_DFCCTL_VDFLSFT_MASK) << + CCDC_DFCCTL_VDFLSFT_SHIFT; + regw(val , DFCCTL); + + /* clear address ptr to offset 0 */ + val = CCDC_DFCMEMCTL_DFCMARST_MASK << CCDC_DFCMEMCTL_DFCMARST_SHIFT; + + /* write defect table entries */ + for (i = 0; i < dfc->table_size; i++) { + /* increment address for non zero index */ + if (i != 0) + val = CCDC_DFCMEMCTL_INC_ADDR; + regw(val, DFCMEMCTL); + if (ccdc_write_dfc_entry(i, dfc) < 0) + return -EFAULT; + } + + /* update saturation level and enable dfc */ + regw(dfc->saturation_ctl & CCDC_VDC_DFCVSAT_MASK, DFCVSAT); + val = regr(DFCCTL) | (CCDC_DFCCTL_VDFCEN_MASK << + CCDC_DFCCTL_VDFCEN_SHIFT); + regw(val, DFCCTL); + return 0; +} + +/* + * ccdc_config_csc() + * configure parameters for color space conversion + * Each register CSCM0-7 has two values in S8Q5 format. + */ +static void ccdc_config_csc(struct ccdc_csc *csc) +{ + u32 val1, val2; + int i; + + if (!csc->enable) + return; + + /* Enable the CSC sub-module */ + regw(CCDC_CSC_ENABLE, CSCCTL); + + /* Converting the co-eff as per the format of the register */ + for (i = 0; i < CCDC_CSC_COEFF_TABLE_SIZE; i++) { + if ((i % 2) == 0) { + /* CSCM - LSB */ + val1 = (csc->coeff[i].integer & + CCDC_CSC_COEF_INTEG_MASK) + << CCDC_CSC_COEF_INTEG_SHIFT; + /* + * convert decimal part to binary. Use 2 decimal + * precision, user values range from .00 - 0.99 + */ + val1 |= (((csc->coeff[i].decimal & + CCDC_CSC_COEF_DECIMAL_MASK) * + CCDC_CSC_DEC_MAX) / 100); + } else { + + /* CSCM - MSB */ + val2 = (csc->coeff[i].integer & + CCDC_CSC_COEF_INTEG_MASK) + << CCDC_CSC_COEF_INTEG_SHIFT; + val2 |= (((csc->coeff[i].decimal & + CCDC_CSC_COEF_DECIMAL_MASK) * + CCDC_CSC_DEC_MAX) / 100); + val2 <<= CCDC_CSCM_MSB_SHIFT; + val2 |= val1; + regw(val2, (CSCM0 + ((i - 1) << 1))); + } + } +} + +/* + * ccdc_config_color_patterns() + * configure parameters for color patterns + */ +static void ccdc_config_color_patterns(struct ccdc_col_pat *pat0, + struct ccdc_col_pat *pat1) +{ + u32 val; + + val = (pat0->olop | (pat0->olep << 2) | (pat0->elop << 4) | + (pat0->elep << 6) | (pat1->olop << 8) | (pat1->olep << 10) | + (pat1->elop << 12) | (pat1->elep << 14)); + regw(val, COLPTN); +} + +/* This function will configure CCDC for Raw mode image capture */ +static int ccdc_config_raw(void) +{ + struct ccdc_params_raw *params = &ccdc_hw_params_raw; + struct ccdc_config_params_raw *config_params = + &ccdc_hw_params_raw.config_params; + unsigned int val; + + dev_dbg(dev, "\nStarting ccdc_config_raw..."); + + /* restore power on defaults to register */ + ccdc_restore_defaults(); + + /* CCDCFG register: + * set CCD Not to swap input since input is RAW data + * set FID detection function to Latch at V-Sync + * set WENLOG - ccdc valid area to AND + * set TRGSEL to WENBIT + * set EXTRG to DISABLE + * disable latching function on VSYNC - shadowed registers + */ + regw(CCDC_YCINSWP_RAW | CCDC_CCDCFG_FIDMD_LATCH_VSYNC | + CCDC_CCDCFG_WENLOG_AND | CCDC_CCDCFG_TRGSEL_WEN | + CCDC_CCDCFG_EXTRG_DISABLE | CCDC_LATCH_ON_VSYNC_DISABLE, CCDCFG); + + /* + * Set VDHD direction to input, input type to raw input + * normal data polarity, do not use external WEN + */ + val = (CCDC_VDHDOUT_INPUT | CCDC_RAW_IP_MODE | CCDC_DATAPOL_NORMAL | + CCDC_EXWEN_DISABLE); + + /* + * Configure the vertical sync polarity (MODESET.VDPOL), horizontal + * sync polarity (MODESET.HDPOL), field id polarity (MODESET.FLDPOL), + * frame format(progressive or interlace), & pixel format (Input mode) + */ + val |= (((params->vd_pol & CCDC_VD_POL_MASK) << CCDC_VD_POL_SHIFT) | + ((params->hd_pol & CCDC_HD_POL_MASK) << CCDC_HD_POL_SHIFT) | + ((params->fid_pol & CCDC_FID_POL_MASK) << CCDC_FID_POL_SHIFT) | + ((params->frm_fmt & CCDC_FRM_FMT_MASK) << CCDC_FRM_FMT_SHIFT) | + ((params->pix_fmt & CCDC_PIX_FMT_MASK) << CCDC_PIX_FMT_SHIFT)); + + /* set pack for alaw compression */ + if ((config_params->data_sz == CCDC_DATA_8BITS) || + config_params->alaw.enable) + val |= CCDC_DATA_PACK_ENABLE; + + /* Configure for LPF */ + if (config_params->lpf_enable) + val |= (config_params->lpf_enable & CCDC_LPF_MASK) << + CCDC_LPF_SHIFT; + + /* Configure the data shift */ + val |= (config_params->datasft & CCDC_DATASFT_MASK) << + CCDC_DATASFT_SHIFT; + regw(val , MODESET); + dev_dbg(dev, "\nWriting 0x%x to MODESET...\n", val); + + /* Configure the Median Filter threshold */ + regw((config_params->med_filt_thres) & CCDC_MED_FILT_THRESH, MEDFILT); + + /* Configure GAMMAWD register. defaur 11-2, and Mosaic cfa pattern */ + val = CCDC_GAMMA_BITS_11_2 << CCDC_GAMMAWD_INPUT_SHIFT | + CCDC_CFA_MOSAIC; + + /* Enable and configure aLaw register if needed */ + if (config_params->alaw.enable) { + val |= (CCDC_ALAW_ENABLE | + ((config_params->alaw.gama_wd & + CCDC_ALAW_GAMA_WD_MASK) << + CCDC_GAMMAWD_INPUT_SHIFT)); + } + + /* Configure Median filter1 & filter2 */ + val |= ((config_params->mfilt1 << CCDC_MFILT1_SHIFT) | + (config_params->mfilt2 << CCDC_MFILT2_SHIFT)); + + regw(val, GAMMAWD); + dev_dbg(dev, "\nWriting 0x%x to GAMMAWD...\n", val); + + /* configure video window */ + ccdc_setwin(¶ms->win, params->frm_fmt, 1); + + /* Optical Clamp Averaging */ + ccdc_config_black_clamp(&config_params->blk_clamp); + + /* Black level compensation */ + ccdc_config_black_compense(&config_params->blk_comp); + + /* Vertical Defect Correction if needed */ + if (ccdc_config_vdfc(&config_params->vertical_dft) < 0) + return -EFAULT; + + /* color space conversion */ + ccdc_config_csc(&config_params->csc); + + /* color pattern */ + ccdc_config_color_patterns(&config_params->col_pat_field0, + &config_params->col_pat_field1); + + /* Configure the Gain & offset control */ + ccdc_config_gain_offset(); + + dev_dbg(dev, "\nWriting %x to COLPTN...\n", val); + + /* Configure DATAOFST register */ + val = (config_params->data_offset.horz_offset & CCDC_DATAOFST_MASK) << + CCDC_DATAOFST_H_SHIFT; + val |= (config_params->data_offset.vert_offset & CCDC_DATAOFST_MASK) << + CCDC_DATAOFST_V_SHIFT; + regw(val, DATAOFST); + + /* configuring HSIZE register */ + val = (params->horz_flip_enable & CCDC_HSIZE_FLIP_MASK) << + CCDC_HSIZE_FLIP_SHIFT; + + /* If pack 8 is enable then 1 pixel will take 1 byte */ + if ((config_params->data_sz == CCDC_DATA_8BITS) || + config_params->alaw.enable) { + val |= (((params->win.width) + 31) >> 5) & + CCDC_HSIZE_VAL_MASK; + + /* adjust to multiple of 32 */ + dev_dbg(dev, "\nWriting 0x%x to HSIZE...\n", + (((params->win.width) + 31) >> 5) & + CCDC_HSIZE_VAL_MASK); + } else { + /* else one pixel will take 2 byte */ + val |= (((params->win.width * 2) + 31) >> 5) & + CCDC_HSIZE_VAL_MASK; + + dev_dbg(dev, "\nWriting 0x%x to HSIZE...\n", + (((params->win.width * 2) + 31) >> 5) & + CCDC_HSIZE_VAL_MASK); + } + regw(val, HSIZE); + + /* Configure SDOFST register */ + if (params->frm_fmt == CCDC_FRMFMT_INTERLACED) { + if (params->image_invert_enable) { + /* For interlace inverse mode */ + regw(CCDC_SDOFST_INTERLACE_INVERSE, SDOFST); + dev_dbg(dev, "\nWriting %x to SDOFST...\n", + CCDC_SDOFST_INTERLACE_INVERSE); + } else { + /* For interlace non inverse mode */ + regw(CCDC_SDOFST_INTERLACE_NORMAL, SDOFST); + dev_dbg(dev, "\nWriting %x to SDOFST...\n", + CCDC_SDOFST_INTERLACE_NORMAL); + } + } else if (params->frm_fmt == CCDC_FRMFMT_PROGRESSIVE) { + if (params->image_invert_enable) { + /* For progessive inverse mode */ + regw(CCDC_SDOFST_PROGRESSIVE_INVERSE, SDOFST); + dev_dbg(dev, "\nWriting %x to SDOFST...\n", + CCDC_SDOFST_PROGRESSIVE_INVERSE); + } else { + /* For progessive non inverse mode */ + regw(CCDC_SDOFST_PROGRESSIVE_NORMAL, SDOFST); + dev_dbg(dev, "\nWriting %x to SDOFST...\n", + CCDC_SDOFST_PROGRESSIVE_NORMAL); + } + } + dev_dbg(dev, "\nend of ccdc_config_raw..."); + return 0; +} + +static int ccdc_configure(void) +{ + if (ccdc_if_type == VPFE_RAW_BAYER) + return ccdc_config_raw(); + else + ccdc_config_ycbcr(); + return 0; +} + +static int ccdc_set_buftype(enum ccdc_buftype buf_type) +{ + if (ccdc_if_type == VPFE_RAW_BAYER) + ccdc_hw_params_raw.buf_type = buf_type; + else + ccdc_hw_params_ycbcr.buf_type = buf_type; + return 0; +} +static enum ccdc_buftype ccdc_get_buftype(void) +{ + if (ccdc_if_type == VPFE_RAW_BAYER) + return ccdc_hw_params_raw.buf_type; + return ccdc_hw_params_ycbcr.buf_type; +} + +static int ccdc_enum_pix(u32 *pix, int i) +{ + int ret = -EINVAL; + if (ccdc_if_type == VPFE_RAW_BAYER) { + if (i < ARRAY_SIZE(ccdc_raw_bayer_pix_formats)) { + *pix = ccdc_raw_bayer_pix_formats[i]; + ret = 0; + } + } else { + if (i < ARRAY_SIZE(ccdc_raw_yuv_pix_formats)) { + *pix = ccdc_raw_yuv_pix_formats[i]; + ret = 0; + } + } + return ret; +} + +static int ccdc_set_pixel_format(u32 pixfmt) +{ + struct ccdc_a_law *alaw = + &ccdc_hw_params_raw.config_params.alaw; + + if (ccdc_if_type == VPFE_RAW_BAYER) { + ccdc_hw_params_raw.pix_fmt = CCDC_PIXFMT_RAW; + if (pixfmt == V4L2_PIX_FMT_SBGGR8) + alaw->enable = 1; + else if (pixfmt != V4L2_PIX_FMT_SBGGR16) + return -EINVAL; + } else { + if (pixfmt == V4L2_PIX_FMT_YUYV) + ccdc_hw_params_ycbcr.pix_order = CCDC_PIXORDER_YCBYCR; + else if (pixfmt == V4L2_PIX_FMT_UYVY) + ccdc_hw_params_ycbcr.pix_order = CCDC_PIXORDER_CBYCRY; + else + return -EINVAL; + } + return 0; +} +static u32 ccdc_get_pixel_format(void) +{ + struct ccdc_a_law *alaw = + &ccdc_hw_params_raw.config_params.alaw; + u32 pixfmt; + + if (ccdc_if_type == VPFE_RAW_BAYER) + if (alaw->enable) + pixfmt = V4L2_PIX_FMT_SBGGR8; + else + pixfmt = V4L2_PIX_FMT_SBGGR16; + else { + if (ccdc_hw_params_ycbcr.pix_order == CCDC_PIXORDER_YCBYCR) + pixfmt = V4L2_PIX_FMT_YUYV; + else + pixfmt = V4L2_PIX_FMT_UYVY; + } + return pixfmt; +} +static int ccdc_set_image_window(struct v4l2_rect *win) +{ + if (ccdc_if_type == VPFE_RAW_BAYER) + ccdc_hw_params_raw.win = *win; + else + ccdc_hw_params_ycbcr.win = *win; + return 0; +} + +static void ccdc_get_image_window(struct v4l2_rect *win) +{ + if (ccdc_if_type == VPFE_RAW_BAYER) + *win = ccdc_hw_params_raw.win; + else + *win = ccdc_hw_params_ycbcr.win; +} + +static unsigned int ccdc_get_line_length(void) +{ + struct ccdc_config_params_raw *config_params = + &ccdc_hw_params_raw.config_params; + unsigned int len; + + if (ccdc_if_type == VPFE_RAW_BAYER) { + if ((config_params->alaw.enable) || + (config_params->data_sz == CCDC_DATA_8BITS)) + len = ccdc_hw_params_raw.win.width; + else + len = ccdc_hw_params_raw.win.width * 2; + } else + len = ccdc_hw_params_ycbcr.win.width * 2; + return ALIGN(len, 32); +} + +static int ccdc_set_frame_format(enum ccdc_frmfmt frm_fmt) +{ + if (ccdc_if_type == VPFE_RAW_BAYER) + ccdc_hw_params_raw.frm_fmt = frm_fmt; + else + ccdc_hw_params_ycbcr.frm_fmt = frm_fmt; + return 0; +} + +static enum ccdc_frmfmt ccdc_get_frame_format(void) +{ + if (ccdc_if_type == VPFE_RAW_BAYER) + return ccdc_hw_params_raw.frm_fmt; + else + return ccdc_hw_params_ycbcr.frm_fmt; +} + +static int ccdc_getfid(void) +{ + return (regr(MODESET) >> 15) & 1; +} + +/* misc operations */ +static inline void ccdc_setfbaddr(unsigned long addr) +{ + regw((addr >> 21) & 0x007f, STADRH); + regw((addr >> 5) & 0x0ffff, STADRL); +} + +static int ccdc_set_hw_if_params(struct vpfe_hw_if_param *params) +{ + ccdc_if_type = params->if_type; + + switch (params->if_type) { + case VPFE_BT656: + case VPFE_YCBCR_SYNC_16: + case VPFE_YCBCR_SYNC_8: + ccdc_hw_params_ycbcr.vd_pol = params->vdpol; + ccdc_hw_params_ycbcr.hd_pol = params->hdpol; + break; + default: + /* TODO add support for raw bayer here */ + return -EINVAL; + } + return 0; +} + +static struct ccdc_hw_device ccdc_hw_dev = { + .name = "DM355 CCDC", + .owner = THIS_MODULE, + .hw_ops = { + .open = ccdc_open, + .close = ccdc_close, + .set_ccdc_base = ccdc_set_ccdc_base, + .enable = ccdc_enable, + .enable_out_to_sdram = ccdc_enable_output_to_sdram, + .set_hw_if_params = ccdc_set_hw_if_params, + .set_params = ccdc_set_params, + .configure = ccdc_configure, + .set_buftype = ccdc_set_buftype, + .get_buftype = ccdc_get_buftype, + .enum_pix = ccdc_enum_pix, + .set_pixel_format = ccdc_set_pixel_format, + .get_pixel_format = ccdc_get_pixel_format, + .set_frame_format = ccdc_set_frame_format, + .get_frame_format = ccdc_get_frame_format, + .set_image_window = ccdc_set_image_window, + .get_image_window = ccdc_get_image_window, + .get_line_length = ccdc_get_line_length, + .setfbaddr = ccdc_setfbaddr, + .getfid = ccdc_getfid, + }, +}; + +static int dm355_ccdc_init(void) +{ + printk(KERN_NOTICE "dm355_ccdc_init\n"); + if (vpfe_register_ccdc_device(&ccdc_hw_dev) < 0) + return -1; + printk(KERN_NOTICE "%s is registered with vpfe.\n", + ccdc_hw_dev.name); + return 0; +} + +static void dm355_ccdc_exit(void) +{ + vpfe_unregister_ccdc_device(&ccdc_hw_dev); +} + +module_init(dm355_ccdc_init); +module_exit(dm355_ccdc_exit); diff --git a/drivers/media/video/davinci/dm355_ccdc_regs.h b/drivers/media/video/davinci/dm355_ccdc_regs.h new file mode 100644 index 00000000000..d6d2ef0533b --- /dev/null +++ b/drivers/media/video/davinci/dm355_ccdc_regs.h @@ -0,0 +1,310 @@ +/* + * Copyright (C) 2005-2009 Texas Instruments Inc + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ +#ifndef _DM355_CCDC_REGS_H +#define _DM355_CCDC_REGS_H + +/**************************************************************************\ +* Register OFFSET Definitions +\**************************************************************************/ +#define SYNCEN 0x00 +#define MODESET 0x04 +#define HDWIDTH 0x08 +#define VDWIDTH 0x0c +#define PPLN 0x10 +#define LPFR 0x14 +#define SPH 0x18 +#define NPH 0x1c +#define SLV0 0x20 +#define SLV1 0x24 +#define NLV 0x28 +#define CULH 0x2c +#define CULV 0x30 +#define HSIZE 0x34 +#define SDOFST 0x38 +#define STADRH 0x3c +#define STADRL 0x40 +#define CLAMP 0x44 +#define DCSUB 0x48 +#define COLPTN 0x4c +#define BLKCMP0 0x50 +#define BLKCMP1 0x54 +#define MEDFILT 0x58 +#define RYEGAIN 0x5c +#define GRCYGAIN 0x60 +#define GBGGAIN 0x64 +#define BMGGAIN 0x68 +#define OFFSET 0x6c +#define OUTCLIP 0x70 +#define VDINT0 0x74 +#define VDINT1 0x78 +#define RSV0 0x7c +#define GAMMAWD 0x80 +#define REC656IF 0x84 +#define CCDCFG 0x88 +#define FMTCFG 0x8c +#define FMTPLEN 0x90 +#define FMTSPH 0x94 +#define FMTLNH 0x98 +#define FMTSLV 0x9c +#define FMTLNV 0xa0 +#define FMTRLEN 0xa4 +#define FMTHCNT 0xa8 +#define FMT_ADDR_PTR_B 0xac +#define FMT_ADDR_PTR(i) (FMT_ADDR_PTR_B + (i * 4)) +#define FMTPGM_VF0 0xcc +#define FMTPGM_VF1 0xd0 +#define FMTPGM_AP0 0xd4 +#define FMTPGM_AP1 0xd8 +#define FMTPGM_AP2 0xdc +#define FMTPGM_AP3 0xe0 +#define FMTPGM_AP4 0xe4 +#define FMTPGM_AP5 0xe8 +#define FMTPGM_AP6 0xec +#define FMTPGM_AP7 0xf0 +#define LSCCFG1 0xf4 +#define LSCCFG2 0xf8 +#define LSCH0 0xfc +#define LSCV0 0x100 +#define LSCKH 0x104 +#define LSCKV 0x108 +#define LSCMEMCTL 0x10c +#define LSCMEMD 0x110 +#define LSCMEMQ 0x114 +#define DFCCTL 0x118 +#define DFCVSAT 0x11c +#define DFCMEMCTL 0x120 +#define DFCMEM0 0x124 +#define DFCMEM1 0x128 +#define DFCMEM2 0x12c +#define DFCMEM3 0x130 +#define DFCMEM4 0x134 +#define CSCCTL 0x138 +#define CSCM0 0x13c +#define CSCM1 0x140 +#define CSCM2 0x144 +#define CSCM3 0x148 +#define CSCM4 0x14c +#define CSCM5 0x150 +#define CSCM6 0x154 +#define CSCM7 0x158 +#define DATAOFST 0x15c +#define CCDC_REG_LAST DATAOFST +/************************************************************** +* Define for various register bit mask and shifts for CCDC +* +**************************************************************/ +#define CCDC_RAW_IP_MODE 0 +#define CCDC_VDHDOUT_INPUT 0 +#define CCDC_YCINSWP_RAW (0 << 4) +#define CCDC_EXWEN_DISABLE 0 +#define CCDC_DATAPOL_NORMAL 0 +#define CCDC_CCDCFG_FIDMD_LATCH_VSYNC 0 +#define CCDC_CCDCFG_FIDMD_NO_LATCH_VSYNC (1 << 6) +#define CCDC_CCDCFG_WENLOG_AND 0 +#define CCDC_CCDCFG_TRGSEL_WEN 0 +#define CCDC_CCDCFG_EXTRG_DISABLE 0 +#define CCDC_CFA_MOSAIC 0 +#define CCDC_Y8POS_SHIFT 11 + +#define CCDC_VDC_DFCVSAT_MASK 0x3fff +#define CCDC_DATAOFST_MASK 0x0ff +#define CCDC_DATAOFST_H_SHIFT 0 +#define CCDC_DATAOFST_V_SHIFT 8 +#define CCDC_GAMMAWD_CFA_MASK 1 +#define CCDC_GAMMAWD_CFA_SHIFT 5 +#define CCDC_GAMMAWD_INPUT_SHIFT 2 +#define CCDC_FID_POL_MASK 1 +#define CCDC_FID_POL_SHIFT 4 +#define CCDC_HD_POL_MASK 1 +#define CCDC_HD_POL_SHIFT 3 +#define CCDC_VD_POL_MASK 1 +#define CCDC_VD_POL_SHIFT 2 +#define CCDC_VD_POL_NEGATIVE (1 << 2) +#define CCDC_FRM_FMT_MASK 1 +#define CCDC_FRM_FMT_SHIFT 7 +#define CCDC_DATA_SZ_MASK 7 +#define CCDC_DATA_SZ_SHIFT 8 +#define CCDC_VDHDOUT_MASK 1 +#define CCDC_VDHDOUT_SHIFT 0 +#define CCDC_EXWEN_MASK 1 +#define CCDC_EXWEN_SHIFT 5 +#define CCDC_INPUT_MODE_MASK 3 +#define CCDC_INPUT_MODE_SHIFT 12 +#define CCDC_PIX_FMT_MASK 3 +#define CCDC_PIX_FMT_SHIFT 12 +#define CCDC_DATAPOL_MASK 1 +#define CCDC_DATAPOL_SHIFT 6 +#define CCDC_WEN_ENABLE (1 << 1) +#define CCDC_VDHDEN_ENABLE (1 << 16) +#define CCDC_LPF_ENABLE (1 << 14) +#define CCDC_ALAW_ENABLE 1 +#define CCDC_ALAW_GAMA_WD_MASK 7 +#define CCDC_REC656IF_BT656_EN 3 + +#define CCDC_FMTCFG_FMTMODE_MASK 3 +#define CCDC_FMTCFG_FMTMODE_SHIFT 1 +#define CCDC_FMTCFG_LNUM_MASK 3 +#define CCDC_FMTCFG_LNUM_SHIFT 4 +#define CCDC_FMTCFG_ADDRINC_MASK 7 +#define CCDC_FMTCFG_ADDRINC_SHIFT 8 + +#define CCDC_CCDCFG_FIDMD_SHIFT 6 +#define CCDC_CCDCFG_WENLOG_SHIFT 8 +#define CCDC_CCDCFG_TRGSEL_SHIFT 9 +#define CCDC_CCDCFG_EXTRG_SHIFT 10 +#define CCDC_CCDCFG_MSBINVI_SHIFT 13 + +#define CCDC_HSIZE_FLIP_SHIFT 12 +#define CCDC_HSIZE_FLIP_MASK 1 +#define CCDC_HSIZE_VAL_MASK 0xFFF +#define CCDC_SDOFST_FIELD_INTERLEAVED 0x249 +#define CCDC_SDOFST_INTERLACE_INVERSE 0x4B6D +#define CCDC_SDOFST_INTERLACE_NORMAL 0x0B6D +#define CCDC_SDOFST_PROGRESSIVE_INVERSE 0x4000 +#define CCDC_SDOFST_PROGRESSIVE_NORMAL 0 +#define CCDC_START_PX_HOR_MASK 0x7FFF +#define CCDC_NUM_PX_HOR_MASK 0x7FFF +#define CCDC_START_VER_ONE_MASK 0x7FFF +#define CCDC_START_VER_TWO_MASK 0x7FFF +#define CCDC_NUM_LINES_VER 0x7FFF + +#define CCDC_BLK_CLAMP_ENABLE (1 << 15) +#define CCDC_BLK_SGAIN_MASK 0x1F +#define CCDC_BLK_ST_PXL_MASK 0x1FFF +#define CCDC_BLK_SAMPLE_LN_MASK 3 +#define CCDC_BLK_SAMPLE_LN_SHIFT 13 + +#define CCDC_NUM_LINE_CALC_MASK 3 +#define CCDC_NUM_LINE_CALC_SHIFT 14 + +#define CCDC_BLK_DC_SUB_MASK 0x3FFF +#define CCDC_BLK_COMP_MASK 0xFF +#define CCDC_BLK_COMP_GB_COMP_SHIFT 8 +#define CCDC_BLK_COMP_GR_COMP_SHIFT 0 +#define CCDC_BLK_COMP_R_COMP_SHIFT 8 +#define CCDC_LATCH_ON_VSYNC_DISABLE (1 << 15) +#define CCDC_LATCH_ON_VSYNC_ENABLE (0 << 15) +#define CCDC_FPC_ENABLE (1 << 15) +#define CCDC_FPC_FPC_NUM_MASK 0x7FFF +#define CCDC_DATA_PACK_ENABLE (1 << 11) +#define CCDC_FMT_HORZ_FMTLNH_MASK 0x1FFF +#define CCDC_FMT_HORZ_FMTSPH_MASK 0x1FFF +#define CCDC_FMT_HORZ_FMTSPH_SHIFT 16 +#define CCDC_FMT_VERT_FMTLNV_MASK 0x1FFF +#define CCDC_FMT_VERT_FMTSLV_MASK 0x1FFF +#define CCDC_FMT_VERT_FMTSLV_SHIFT 16 +#define CCDC_VP_OUT_VERT_NUM_MASK 0x3FFF +#define CCDC_VP_OUT_VERT_NUM_SHIFT 17 +#define CCDC_VP_OUT_HORZ_NUM_MASK 0x1FFF +#define CCDC_VP_OUT_HORZ_NUM_SHIFT 4 +#define CCDC_VP_OUT_HORZ_ST_MASK 0xF + +#define CCDC_CSC_COEF_INTEG_MASK 7 +#define CCDC_CSC_COEF_DECIMAL_MASK 0x1f +#define CCDC_CSC_COEF_INTEG_SHIFT 5 +#define CCDC_CSCM_MSB_SHIFT 8 +#define CCDC_CSC_ENABLE 1 +#define CCDC_CSC_DEC_MAX 32 + +#define CCDC_MFILT1_SHIFT 10 +#define CCDC_MFILT2_SHIFT 8 +#define CCDC_MED_FILT_THRESH 0x3FFF +#define CCDC_LPF_MASK 1 +#define CCDC_LPF_SHIFT 14 +#define CCDC_OFFSET_MASK 0x3FF +#define CCDC_DATASFT_MASK 7 +#define CCDC_DATASFT_SHIFT 8 + +#define CCDC_DF_ENABLE 1 + +#define CCDC_FMTPLEN_P0_MASK 0xF +#define CCDC_FMTPLEN_P1_MASK 0xF +#define CCDC_FMTPLEN_P2_MASK 7 +#define CCDC_FMTPLEN_P3_MASK 7 +#define CCDC_FMTPLEN_P0_SHIFT 0 +#define CCDC_FMTPLEN_P1_SHIFT 4 +#define CCDC_FMTPLEN_P2_SHIFT 8 +#define CCDC_FMTPLEN_P3_SHIFT 12 + +#define CCDC_FMTSPH_MASK 0x1FFF +#define CCDC_FMTLNH_MASK 0x1FFF +#define CCDC_FMTSLV_MASK 0x1FFF +#define CCDC_FMTLNV_MASK 0x7FFF +#define CCDC_FMTRLEN_MASK 0x1FFF +#define CCDC_FMTHCNT_MASK 0x1FFF + +#define CCDC_ADP_INIT_MASK 0x1FFF +#define CCDC_ADP_LINE_SHIFT 13 +#define CCDC_ADP_LINE_MASK 3 +#define CCDC_FMTPGN_APTR_MASK 7 + +#define CCDC_DFCCTL_GDFCEN_MASK 1 +#define CCDC_DFCCTL_VDFCEN_MASK 1 +#define CCDC_DFCCTL_VDFC_DISABLE (0 << 4) +#define CCDC_DFCCTL_VDFCEN_SHIFT 4 +#define CCDC_DFCCTL_VDFCSL_MASK 3 +#define CCDC_DFCCTL_VDFCSL_SHIFT 5 +#define CCDC_DFCCTL_VDFCUDA_MASK 1 +#define CCDC_DFCCTL_VDFCUDA_SHIFT 7 +#define CCDC_DFCCTL_VDFLSFT_MASK 3 +#define CCDC_DFCCTL_VDFLSFT_SHIFT 8 +#define CCDC_DFCMEMCTL_DFCMARST_MASK 1 +#define CCDC_DFCMEMCTL_DFCMARST_SHIFT 2 +#define CCDC_DFCMEMCTL_DFCMWR_MASK 1 +#define CCDC_DFCMEMCTL_DFCMWR_SHIFT 0 +#define CCDC_DFCMEMCTL_INC_ADDR (0 << 2) + +#define CCDC_LSCCFG_GFTSF_MASK 7 +#define CCDC_LSCCFG_GFTSF_SHIFT 1 +#define CCDC_LSCCFG_GFTINV_MASK 0xf +#define CCDC_LSCCFG_GFTINV_SHIFT 4 +#define CCDC_LSC_GFTABLE_SEL_MASK 3 +#define CCDC_LSC_GFTABLE_EPEL_SHIFT 8 +#define CCDC_LSC_GFTABLE_OPEL_SHIFT 10 +#define CCDC_LSC_GFTABLE_EPOL_SHIFT 12 +#define CCDC_LSC_GFTABLE_OPOL_SHIFT 14 +#define CCDC_LSC_GFMODE_MASK 3 +#define CCDC_LSC_GFMODE_SHIFT 4 +#define CCDC_LSC_DISABLE 0 +#define CCDC_LSC_ENABLE 1 +#define CCDC_LSC_TABLE1_SLC 0 +#define CCDC_LSC_TABLE2_SLC 1 +#define CCDC_LSC_TABLE3_SLC 2 +#define CCDC_LSC_MEMADDR_RESET (1 << 2) +#define CCDC_LSC_MEMADDR_INCR (0 << 2) +#define CCDC_LSC_FRAC_MASK_T1 0xFF +#define CCDC_LSC_INT_MASK 3 +#define CCDC_LSC_FRAC_MASK 0x3FFF +#define CCDC_LSC_CENTRE_MASK 0x3FFF +#define CCDC_LSC_COEF_MASK 0xff +#define CCDC_LSC_COEFL_SHIFT 0 +#define CCDC_LSC_COEFU_SHIFT 8 +#define CCDC_GAIN_MASK 0x7FF +#define CCDC_SYNCEN_VDHDEN_MASK (1 << 0) +#define CCDC_SYNCEN_WEN_MASK (1 << 1) +#define CCDC_SYNCEN_WEN_SHIFT 1 + +/* Power on Defaults in hardware */ +#define MODESET_DEFAULT 0x200 +#define CULH_DEFAULT 0xFFFF +#define CULV_DEFAULT 0xFF +#define GAIN_DEFAULT 256 +#define OUTCLIP_DEFAULT 0x3FFF +#define LSCCFG2_DEFAULT 0xE + +#endif diff --git a/include/media/davinci/dm355_ccdc.h b/include/media/davinci/dm355_ccdc.h new file mode 100644 index 00000000000..df8a7b10747 --- /dev/null +++ b/include/media/davinci/dm355_ccdc.h @@ -0,0 +1,321 @@ +/* + * Copyright (C) 2005-2009 Texas Instruments Inc + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ +#ifndef _DM355_CCDC_H +#define _DM355_CCDC_H +#include +#include + +/* enum for No of pixel per line to be avg. in Black Clamping */ +enum ccdc_sample_length { + CCDC_SAMPLE_1PIXELS, + CCDC_SAMPLE_2PIXELS, + CCDC_SAMPLE_4PIXELS, + CCDC_SAMPLE_8PIXELS, + CCDC_SAMPLE_16PIXELS +}; + +/* enum for No of lines in Black Clamping */ +enum ccdc_sample_line { + CCDC_SAMPLE_1LINES, + CCDC_SAMPLE_2LINES, + CCDC_SAMPLE_4LINES, + CCDC_SAMPLE_8LINES, + CCDC_SAMPLE_16LINES +}; + +/* enum for Alaw gama width */ +enum ccdc_gamma_width { + CCDC_GAMMA_BITS_13_4, + CCDC_GAMMA_BITS_12_3, + CCDC_GAMMA_BITS_11_2, + CCDC_GAMMA_BITS_10_1, + CCDC_GAMMA_BITS_09_0 +}; + +enum ccdc_colpats { + CCDC_RED, + CCDC_GREEN_RED, + CCDC_GREEN_BLUE, + CCDC_BLUE +}; + +struct ccdc_col_pat { + enum ccdc_colpats olop; + enum ccdc_colpats olep; + enum ccdc_colpats elop; + enum ccdc_colpats elep; +}; + +enum ccdc_datasft { + CCDC_DATA_NO_SHIFT, + CCDC_DATA_SHIFT_1BIT, + CCDC_DATA_SHIFT_2BIT, + CCDC_DATA_SHIFT_3BIT, + CCDC_DATA_SHIFT_4BIT, + CCDC_DATA_SHIFT_5BIT, + CCDC_DATA_SHIFT_6BIT +}; + +enum ccdc_data_size { + CCDC_DATA_16BITS, + CCDC_DATA_15BITS, + CCDC_DATA_14BITS, + CCDC_DATA_13BITS, + CCDC_DATA_12BITS, + CCDC_DATA_11BITS, + CCDC_DATA_10BITS, + CCDC_DATA_8BITS +}; +enum ccdc_mfilt1 { + CCDC_NO_MEDIAN_FILTER1, + CCDC_AVERAGE_FILTER1, + CCDC_MEDIAN_FILTER1 +}; + +enum ccdc_mfilt2 { + CCDC_NO_MEDIAN_FILTER2, + CCDC_AVERAGE_FILTER2, + CCDC_MEDIAN_FILTER2 +}; + +/* structure for ALaw */ +struct ccdc_a_law { + /* Enable/disable A-Law */ + unsigned char enable; + /* Gama Width Input */ + enum ccdc_gamma_width gama_wd; +}; + +/* structure for Black Clamping */ +struct ccdc_black_clamp { + /* only if bClampEnable is TRUE */ + unsigned char b_clamp_enable; + /* only if bClampEnable is TRUE */ + enum ccdc_sample_length sample_pixel; + /* only if bClampEnable is TRUE */ + enum ccdc_sample_line sample_ln; + /* only if bClampEnable is TRUE */ + unsigned short start_pixel; + /* only if bClampEnable is FALSE */ + unsigned short sgain; + unsigned short dc_sub; +}; + +/* structure for Black Level Compensation */ +struct ccdc_black_compensation { + /* Constant value to subtract from Red component */ + unsigned char r; + /* Constant value to subtract from Gr component */ + unsigned char gr; + /* Constant value to subtract from Blue component */ + unsigned char b; + /* Constant value to subtract from Gb component */ + unsigned char gb; +}; + +struct ccdc_float { + int integer; + unsigned int decimal; +}; + +#define CCDC_CSC_COEFF_TABLE_SIZE 16 +/* structure for color space converter */ +struct ccdc_csc { + unsigned char enable; + /* + * S8Q5. Use 2 decimal precision, user values range from -3.00 to 3.99. + * example - to use 1.03, set integer part as 1, and decimal part as 3 + * to use -1.03, set integer part as -1 and decimal part as 3 + */ + struct ccdc_float coeff[CCDC_CSC_COEFF_TABLE_SIZE]; +}; + +/* Structures for Vertical Defect Correction*/ +enum ccdc_vdf_csl { + CCDC_VDF_NORMAL, + CCDC_VDF_HORZ_INTERPOL_SAT, + CCDC_VDF_HORZ_INTERPOL +}; + +enum ccdc_vdf_cuda { + CCDC_VDF_WHOLE_LINE_CORRECT, + CCDC_VDF_UPPER_DISABLE +}; + +enum ccdc_dfc_mwr { + CCDC_DFC_MWR_WRITE_COMPLETE, + CCDC_DFC_WRITE_REG +}; + +enum ccdc_dfc_mrd { + CCDC_DFC_READ_COMPLETE, + CCDC_DFC_READ_REG +}; + +enum ccdc_dfc_ma_rst { + CCDC_DFC_INCR_ADDR, + CCDC_DFC_CLR_ADDR +}; + +enum ccdc_dfc_mclr { + CCDC_DFC_CLEAR_COMPLETE, + CCDC_DFC_CLEAR +}; + +struct ccdc_dft_corr_ctl { + enum ccdc_vdf_csl vdfcsl; + enum ccdc_vdf_cuda vdfcuda; + unsigned int vdflsft; +}; + +struct ccdc_dft_corr_mem_ctl { + enum ccdc_dfc_mwr dfcmwr; + enum ccdc_dfc_mrd dfcmrd; + enum ccdc_dfc_ma_rst dfcmarst; + enum ccdc_dfc_mclr dfcmclr; +}; + +#define CCDC_DFT_TABLE_SIZE 16 +/* + * Main Structure for vertical defect correction. Vertical defect + * correction can correct upto 16 defects if defects less than 16 + * then pad the rest with 0 + */ +struct ccdc_vertical_dft { + unsigned char ver_dft_en; + unsigned char gen_dft_en; + unsigned int saturation_ctl; + struct ccdc_dft_corr_ctl dft_corr_ctl; + struct ccdc_dft_corr_mem_ctl dft_corr_mem_ctl; + int table_size; + unsigned int dft_corr_horz[CCDC_DFT_TABLE_SIZE]; + unsigned int dft_corr_vert[CCDC_DFT_TABLE_SIZE]; + unsigned int dft_corr_sub1[CCDC_DFT_TABLE_SIZE]; + unsigned int dft_corr_sub2[CCDC_DFT_TABLE_SIZE]; + unsigned int dft_corr_sub3[CCDC_DFT_TABLE_SIZE]; +}; + +struct ccdc_data_offset { + unsigned char horz_offset; + unsigned char vert_offset; +}; + +/* + * Structure for CCDC configuration parameters for raw capture mode passed + * by application + */ +struct ccdc_config_params_raw { + /* data shift to be applied before storing */ + enum ccdc_datasft datasft; + /* data size value from 8 to 16 bits */ + enum ccdc_data_size data_sz; + /* median filter for sdram */ + enum ccdc_mfilt1 mfilt1; + enum ccdc_mfilt2 mfilt2; + /* low pass filter enable/disable */ + unsigned char lpf_enable; + /* Threshold of median filter */ + int med_filt_thres; + /* + * horz and vertical data offset. Appliable for defect correction + * and lsc + */ + struct ccdc_data_offset data_offset; + /* Structure for Optional A-Law */ + struct ccdc_a_law alaw; + /* Structure for Optical Black Clamp */ + struct ccdc_black_clamp blk_clamp; + /* Structure for Black Compensation */ + struct ccdc_black_compensation blk_comp; + /* struture for vertical Defect Correction Module Configuration */ + struct ccdc_vertical_dft vertical_dft; + /* structure for color space converter Module Configuration */ + struct ccdc_csc csc; + /* color patters for bayer capture */ + struct ccdc_col_pat col_pat_field0; + struct ccdc_col_pat col_pat_field1; +}; + +#ifdef __KERNEL__ +#include + +#define CCDC_WIN_PAL {0, 0, 720, 576} +#define CCDC_WIN_VGA {0, 0, 640, 480} + +struct ccdc_params_ycbcr { + /* pixel format */ + enum ccdc_pixfmt pix_fmt; + /* progressive or interlaced frame */ + enum ccdc_frmfmt frm_fmt; + /* video window */ + struct v4l2_rect win; + /* field id polarity */ + enum vpfe_pin_pol fid_pol; + /* vertical sync polarity */ + enum vpfe_pin_pol vd_pol; + /* horizontal sync polarity */ + enum vpfe_pin_pol hd_pol; + /* enable BT.656 embedded sync mode */ + int bt656_enable; + /* cb:y:cr:y or y:cb:y:cr in memory */ + enum ccdc_pixorder pix_order; + /* interleaved or separated fields */ + enum ccdc_buftype buf_type; +}; + +/* Gain applied to Raw Bayer data */ +struct ccdc_gain { + unsigned short r_ye; + unsigned short gr_cy; + unsigned short gb_g; + unsigned short b_mg; +}; + +/* Structure for CCDC configuration parameters for raw capture mode */ +struct ccdc_params_raw { + /* pixel format */ + enum ccdc_pixfmt pix_fmt; + /* progressive or interlaced frame */ + enum ccdc_frmfmt frm_fmt; + /* video window */ + struct v4l2_rect win; + /* field id polarity */ + enum vpfe_pin_pol fid_pol; + /* vertical sync polarity */ + enum vpfe_pin_pol vd_pol; + /* horizontal sync polarity */ + enum vpfe_pin_pol hd_pol; + /* interleaved or separated fields */ + enum ccdc_buftype buf_type; + /* Gain values */ + struct ccdc_gain gain; + /* offset */ + unsigned int ccdc_offset; + /* horizontal flip enable */ + unsigned char horz_flip_enable; + /* + * enable to store the image in inverse order in memory + * (bottom to top) + */ + unsigned char image_invert_enable; + /* Configurable part of raw data */ + struct ccdc_config_params_raw config_params; +}; + +#endif +#endif /* DM355_CCDC_H */ -- cgit v1.2.3-70-g09d2 From 5f15fbb68fd774780a7fa8fe25a88e4c9e518109 Mon Sep 17 00:00:00 2001 From: Muralidharan Karicheri Date: Fri, 19 Jun 2009 09:18:14 -0300 Subject: V4L/DVB (12251): v4l: dm644x ccdc module for vpfe capture driver This is the hw module for DM644x CCDC. This registers with the vpfe capture driver and provides a set of hw_ops to configure CCDC for a specific decoder device connected to the VPFE. Reviewed by: Hans Verkuil Reviewed by: Laurent Pinchart Signed-off-by: Muralidharan Karicheri Signed-off-by: Hans Verkuil Signed-off-by: Mauro Carvalho Chehab --- drivers/media/video/davinci/dm644x_ccdc.c | 878 +++++++++++++++++++++++++ drivers/media/video/davinci/dm644x_ccdc_regs.h | 145 ++++ include/media/davinci/dm644x_ccdc.h | 184 ++++++ 3 files changed, 1207 insertions(+) create mode 100644 drivers/media/video/davinci/dm644x_ccdc.c create mode 100644 drivers/media/video/davinci/dm644x_ccdc_regs.h create mode 100644 include/media/davinci/dm644x_ccdc.h (limited to 'include') diff --git a/drivers/media/video/davinci/dm644x_ccdc.c b/drivers/media/video/davinci/dm644x_ccdc.c new file mode 100644 index 00000000000..2f19a919f47 --- /dev/null +++ b/drivers/media/video/davinci/dm644x_ccdc.c @@ -0,0 +1,878 @@ +/* + * Copyright (C) 2006-2009 Texas Instruments Inc + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * + * CCDC hardware module for DM6446 + * ------------------------------ + * + * This module is for configuring CCD controller of DM6446 VPFE to capture + * Raw yuv or Bayer RGB data from a decoder. CCDC has several modules + * such as Defect Pixel Correction, Color Space Conversion etc to + * pre-process the Raw Bayer RGB data, before writing it to SDRAM. This + * module also allows application to configure individual + * module parameters through VPFE_CMD_S_CCDC_RAW_PARAMS IOCTL. + * To do so, application includes dm644x_ccdc.h and vpfe_capture.h header + * files. The setparams() API is called by vpfe_capture driver + * to configure module parameters. This file is named DM644x so that other + * variants such DM6443 may be supported using the same module. + * + * TODO: Test Raw bayer parameter settings and bayer capture + * Split module parameter structure to module specific ioctl structs + * investigate if enum used for user space type definition + * to be replaced by #defines or integer + */ +#include +#include +#include +#include +#include +#include "dm644x_ccdc_regs.h" +#include "ccdc_hw_device.h" + +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("CCDC Driver for DM6446"); +MODULE_AUTHOR("Texas Instruments"); + +static struct device *dev; + +/* Object for CCDC raw mode */ +static struct ccdc_params_raw ccdc_hw_params_raw = { + .pix_fmt = CCDC_PIXFMT_RAW, + .frm_fmt = CCDC_FRMFMT_PROGRESSIVE, + .win = CCDC_WIN_VGA, + .fid_pol = VPFE_PINPOL_POSITIVE, + .vd_pol = VPFE_PINPOL_POSITIVE, + .hd_pol = VPFE_PINPOL_POSITIVE, + .config_params = { + .data_sz = CCDC_DATA_10BITS, + }, +}; + +/* Object for CCDC ycbcr mode */ +static struct ccdc_params_ycbcr ccdc_hw_params_ycbcr = { + .pix_fmt = CCDC_PIXFMT_YCBCR_8BIT, + .frm_fmt = CCDC_FRMFMT_INTERLACED, + .win = CCDC_WIN_PAL, + .fid_pol = VPFE_PINPOL_POSITIVE, + .vd_pol = VPFE_PINPOL_POSITIVE, + .hd_pol = VPFE_PINPOL_POSITIVE, + .bt656_enable = 1, + .pix_order = CCDC_PIXORDER_CBYCRY, + .buf_type = CCDC_BUFTYPE_FLD_INTERLEAVED +}; + +#define CCDC_MAX_RAW_YUV_FORMATS 2 + +/* Raw Bayer formats */ +static u32 ccdc_raw_bayer_pix_formats[] = + {V4L2_PIX_FMT_SBGGR8, V4L2_PIX_FMT_SBGGR16}; + +/* Raw YUV formats */ +static u32 ccdc_raw_yuv_pix_formats[] = + {V4L2_PIX_FMT_UYVY, V4L2_PIX_FMT_YUYV}; + +static void *__iomem ccdc_base_addr; +static int ccdc_addr_size; +static enum vpfe_hw_if_type ccdc_if_type; + +/* register access routines */ +static inline u32 regr(u32 offset) +{ + return __raw_readl(ccdc_base_addr + offset); +} + +static inline void regw(u32 val, u32 offset) +{ + __raw_writel(val, ccdc_base_addr + offset); +} + +static void ccdc_set_ccdc_base(void *addr, int size) +{ + ccdc_base_addr = addr; + ccdc_addr_size = size; +} + +static void ccdc_enable(int flag) +{ + regw(flag, CCDC_PCR); +} + +static void ccdc_enable_vport(int flag) +{ + if (flag) + /* enable video port */ + regw(CCDC_ENABLE_VIDEO_PORT, CCDC_FMTCFG); + else + regw(CCDC_DISABLE_VIDEO_PORT, CCDC_FMTCFG); +} + +/* + * ccdc_setwin() + * This function will configure the window size + * to be capture in CCDC reg + */ +void ccdc_setwin(struct v4l2_rect *image_win, + enum ccdc_frmfmt frm_fmt, + int ppc) +{ + int horz_start, horz_nr_pixels; + int vert_start, vert_nr_lines; + int val = 0, mid_img = 0; + + dev_dbg(dev, "\nStarting ccdc_setwin..."); + /* + * ppc - per pixel count. indicates how many pixels per cell + * output to SDRAM. example, for ycbcr, it is one y and one c, so 2. + * raw capture this is 1 + */ + horz_start = image_win->left << (ppc - 1); + horz_nr_pixels = (image_win->width << (ppc - 1)) - 1; + regw((horz_start << CCDC_HORZ_INFO_SPH_SHIFT) | horz_nr_pixels, + CCDC_HORZ_INFO); + + vert_start = image_win->top; + + if (frm_fmt == CCDC_FRMFMT_INTERLACED) { + vert_nr_lines = (image_win->height >> 1) - 1; + vert_start >>= 1; + /* Since first line doesn't have any data */ + vert_start += 1; + /* configure VDINT0 */ + val = (vert_start << CCDC_VDINT_VDINT0_SHIFT); + regw(val, CCDC_VDINT); + + } else { + /* Since first line doesn't have any data */ + vert_start += 1; + vert_nr_lines = image_win->height - 1; + /* + * configure VDINT0 and VDINT1. VDINT1 will be at half + * of image height + */ + mid_img = vert_start + (image_win->height / 2); + val = (vert_start << CCDC_VDINT_VDINT0_SHIFT) | + (mid_img & CCDC_VDINT_VDINT1_MASK); + regw(val, CCDC_VDINT); + + } + regw((vert_start << CCDC_VERT_START_SLV0_SHIFT) | vert_start, + CCDC_VERT_START); + regw(vert_nr_lines, CCDC_VERT_LINES); + dev_dbg(dev, "\nEnd of ccdc_setwin..."); +} + +static void ccdc_readregs(void) +{ + unsigned int val = 0; + + val = regr(CCDC_ALAW); + dev_notice(dev, "\nReading 0x%x to ALAW...\n", val); + val = regr(CCDC_CLAMP); + dev_notice(dev, "\nReading 0x%x to CLAMP...\n", val); + val = regr(CCDC_DCSUB); + dev_notice(dev, "\nReading 0x%x to DCSUB...\n", val); + val = regr(CCDC_BLKCMP); + dev_notice(dev, "\nReading 0x%x to BLKCMP...\n", val); + val = regr(CCDC_FPC_ADDR); + dev_notice(dev, "\nReading 0x%x to FPC_ADDR...\n", val); + val = regr(CCDC_FPC); + dev_notice(dev, "\nReading 0x%x to FPC...\n", val); + val = regr(CCDC_FMTCFG); + dev_notice(dev, "\nReading 0x%x to FMTCFG...\n", val); + val = regr(CCDC_COLPTN); + dev_notice(dev, "\nReading 0x%x to COLPTN...\n", val); + val = regr(CCDC_FMT_HORZ); + dev_notice(dev, "\nReading 0x%x to FMT_HORZ...\n", val); + val = regr(CCDC_FMT_VERT); + dev_notice(dev, "\nReading 0x%x to FMT_VERT...\n", val); + val = regr(CCDC_HSIZE_OFF); + dev_notice(dev, "\nReading 0x%x to HSIZE_OFF...\n", val); + val = regr(CCDC_SDOFST); + dev_notice(dev, "\nReading 0x%x to SDOFST...\n", val); + val = regr(CCDC_VP_OUT); + dev_notice(dev, "\nReading 0x%x to VP_OUT...\n", val); + val = regr(CCDC_SYN_MODE); + dev_notice(dev, "\nReading 0x%x to SYN_MODE...\n", val); + val = regr(CCDC_HORZ_INFO); + dev_notice(dev, "\nReading 0x%x to HORZ_INFO...\n", val); + val = regr(CCDC_VERT_START); + dev_notice(dev, "\nReading 0x%x to VERT_START...\n", val); + val = regr(CCDC_VERT_LINES); + dev_notice(dev, "\nReading 0x%x to VERT_LINES...\n", val); +} + +static int validate_ccdc_param(struct ccdc_config_params_raw *ccdcparam) +{ + if (ccdcparam->alaw.enable) { + if ((ccdcparam->alaw.gama_wd > CCDC_GAMMA_BITS_09_0) || + (ccdcparam->alaw.gama_wd < CCDC_GAMMA_BITS_15_6) || + (ccdcparam->alaw.gama_wd < ccdcparam->data_sz)) { + dev_dbg(dev, "\nInvalid data line select"); + return -1; + } + } + return 0; +} + +static int ccdc_update_raw_params(struct ccdc_config_params_raw *raw_params) +{ + struct ccdc_config_params_raw *config_params = + &ccdc_hw_params_raw.config_params; + unsigned int *fpc_virtaddr = NULL; + unsigned int *fpc_physaddr = NULL; + + memcpy(config_params, raw_params, sizeof(*raw_params)); + /* + * allocate memory for fault pixel table and copy the user + * values to the table + */ + if (!config_params->fault_pxl.enable) + return 0; + + fpc_physaddr = (unsigned int *)config_params->fault_pxl.fpc_table_addr; + fpc_virtaddr = (unsigned int *)phys_to_virt( + (unsigned long)fpc_physaddr); + /* + * Allocate memory for FPC table if current + * FPC table buffer is not big enough to + * accomodate FPC Number requested + */ + if (raw_params->fault_pxl.fp_num != config_params->fault_pxl.fp_num) { + if (fpc_physaddr != NULL) { + free_pages((unsigned long)fpc_physaddr, + get_order + (config_params->fault_pxl.fp_num * + FP_NUM_BYTES)); + } + + /* Allocate memory for FPC table */ + fpc_virtaddr = + (unsigned int *)__get_free_pages(GFP_KERNEL | GFP_DMA, + get_order(raw_params-> + fault_pxl.fp_num * + FP_NUM_BYTES)); + + if (fpc_virtaddr == NULL) { + dev_dbg(dev, + "\nUnable to allocate memory for FPC"); + return -EFAULT; + } + fpc_physaddr = + (unsigned int *)virt_to_phys((void *)fpc_virtaddr); + } + + /* Copy number of fault pixels and FPC table */ + config_params->fault_pxl.fp_num = raw_params->fault_pxl.fp_num; + if (copy_from_user(fpc_virtaddr, + (void __user *)raw_params->fault_pxl.fpc_table_addr, + config_params->fault_pxl.fp_num * FP_NUM_BYTES)) { + dev_dbg(dev, "\n copy_from_user failed"); + return -EFAULT; + } + config_params->fault_pxl.fpc_table_addr = (unsigned int)fpc_physaddr; + return 0; +} + +static int ccdc_close(struct device *dev) +{ + struct ccdc_config_params_raw *config_params = + &ccdc_hw_params_raw.config_params; + unsigned int *fpc_physaddr = NULL, *fpc_virtaddr = NULL; + + fpc_physaddr = (unsigned int *)config_params->fault_pxl.fpc_table_addr; + + if (fpc_physaddr != NULL) { + fpc_virtaddr = (unsigned int *) + phys_to_virt((unsigned long)fpc_physaddr); + free_pages((unsigned long)fpc_virtaddr, + get_order(config_params->fault_pxl.fp_num * + FP_NUM_BYTES)); + } + return 0; +} + +/* + * ccdc_restore_defaults() + * This function will write defaults to all CCDC registers + */ +static void ccdc_restore_defaults(void) +{ + int i; + + /* disable CCDC */ + ccdc_enable(0); + /* set all registers to default value */ + for (i = 4; i <= 0x94; i += 4) + regw(0, i); + regw(CCDC_NO_CULLING, CCDC_CULLING); + regw(CCDC_GAMMA_BITS_11_2, CCDC_ALAW); +} + +static int ccdc_open(struct device *device) +{ + dev = device; + ccdc_restore_defaults(); + if (ccdc_if_type == VPFE_RAW_BAYER) + ccdc_enable_vport(1); + return 0; +} + +static void ccdc_sbl_reset(void) +{ + vpss_clear_wbl_overflow(VPSS_PCR_CCDC_WBL_O); +} + +/* Parameter operations */ +static int ccdc_set_params(void __user *params) +{ + struct ccdc_config_params_raw ccdc_raw_params; + int x; + + if (ccdc_if_type != VPFE_RAW_BAYER) + return -EINVAL; + + x = copy_from_user(&ccdc_raw_params, params, sizeof(ccdc_raw_params)); + if (x) { + dev_dbg(dev, "ccdc_set_params: error in copying" + "ccdc params, %d\n", x); + return -EFAULT; + } + + if (!validate_ccdc_param(&ccdc_raw_params)) { + if (!ccdc_update_raw_params(&ccdc_raw_params)) + return 0; + } + return -EINVAL; +} + +/* + * ccdc_config_ycbcr() + * This function will configure CCDC for YCbCr video capture + */ +void ccdc_config_ycbcr(void) +{ + struct ccdc_params_ycbcr *params = &ccdc_hw_params_ycbcr; + u32 syn_mode; + + dev_dbg(dev, "\nStarting ccdc_config_ycbcr..."); + /* + * first restore the CCDC registers to default values + * This is important since we assume default values to be set in + * a lot of registers that we didn't touch + */ + ccdc_restore_defaults(); + + /* + * configure pixel format, frame format, configure video frame + * format, enable output to SDRAM, enable internal timing generator + * and 8bit pack mode + */ + syn_mode = (((params->pix_fmt & CCDC_SYN_MODE_INPMOD_MASK) << + CCDC_SYN_MODE_INPMOD_SHIFT) | + ((params->frm_fmt & CCDC_SYN_FLDMODE_MASK) << + CCDC_SYN_FLDMODE_SHIFT) | CCDC_VDHDEN_ENABLE | + CCDC_WEN_ENABLE | CCDC_DATA_PACK_ENABLE); + + /* setup BT.656 sync mode */ + if (params->bt656_enable) { + regw(CCDC_REC656IF_BT656_EN, CCDC_REC656IF); + + /* + * configure the FID, VD, HD pin polarity, + * fld,hd pol positive, vd negative, 8-bit data + */ + syn_mode |= CCDC_SYN_MODE_VD_POL_NEGATIVE | CCDC_SYN_MODE_8BITS; + } else { + /* y/c external sync mode */ + syn_mode |= (((params->fid_pol & CCDC_FID_POL_MASK) << + CCDC_FID_POL_SHIFT) | + ((params->hd_pol & CCDC_HD_POL_MASK) << + CCDC_HD_POL_SHIFT) | + ((params->vd_pol & CCDC_VD_POL_MASK) << + CCDC_VD_POL_SHIFT)); + } + regw(syn_mode, CCDC_SYN_MODE); + + /* configure video window */ + ccdc_setwin(¶ms->win, params->frm_fmt, 2); + + /* + * configure the order of y cb cr in SDRAM, and disable latch + * internal register on vsync + */ + regw((params->pix_order << CCDC_CCDCFG_Y8POS_SHIFT) | + CCDC_LATCH_ON_VSYNC_DISABLE, CCDC_CCDCFG); + + /* + * configure the horizontal line offset. This should be a + * on 32 byte bondary. So clear LSB 5 bits + */ + regw(((params->win.width * 2 + 31) & ~0x1f), CCDC_HSIZE_OFF); + + /* configure the memory line offset */ + if (params->buf_type == CCDC_BUFTYPE_FLD_INTERLEAVED) + /* two fields are interleaved in memory */ + regw(CCDC_SDOFST_FIELD_INTERLEAVED, CCDC_SDOFST); + + ccdc_sbl_reset(); + dev_dbg(dev, "\nEnd of ccdc_config_ycbcr...\n"); + ccdc_readregs(); +} + +static void ccdc_config_black_clamp(struct ccdc_black_clamp *bclamp) +{ + u32 val; + + if (!bclamp->enable) { + /* configure DCSub */ + val = (bclamp->dc_sub) & CCDC_BLK_DC_SUB_MASK; + regw(val, CCDC_DCSUB); + dev_dbg(dev, "\nWriting 0x%x to DCSUB...\n", val); + regw(CCDC_CLAMP_DEFAULT_VAL, CCDC_CLAMP); + dev_dbg(dev, "\nWriting 0x0000 to CLAMP...\n"); + return; + } + /* + * Configure gain, Start pixel, No of line to be avg, + * No of pixel/line to be avg, & Enable the Black clamping + */ + val = ((bclamp->sgain & CCDC_BLK_SGAIN_MASK) | + ((bclamp->start_pixel & CCDC_BLK_ST_PXL_MASK) << + CCDC_BLK_ST_PXL_SHIFT) | + ((bclamp->sample_ln & CCDC_BLK_SAMPLE_LINE_MASK) << + CCDC_BLK_SAMPLE_LINE_SHIFT) | + ((bclamp->sample_pixel & CCDC_BLK_SAMPLE_LN_MASK) << + CCDC_BLK_SAMPLE_LN_SHIFT) | CCDC_BLK_CLAMP_ENABLE); + regw(val, CCDC_CLAMP); + dev_dbg(dev, "\nWriting 0x%x to CLAMP...\n", val); + /* If Black clamping is enable then make dcsub 0 */ + regw(CCDC_DCSUB_DEFAULT_VAL, CCDC_DCSUB); + dev_dbg(dev, "\nWriting 0x00000000 to DCSUB...\n"); +} + +static void ccdc_config_black_compense(struct ccdc_black_compensation *bcomp) +{ + u32 val; + + val = ((bcomp->b & CCDC_BLK_COMP_MASK) | + ((bcomp->gb & CCDC_BLK_COMP_MASK) << + CCDC_BLK_COMP_GB_COMP_SHIFT) | + ((bcomp->gr & CCDC_BLK_COMP_MASK) << + CCDC_BLK_COMP_GR_COMP_SHIFT) | + ((bcomp->r & CCDC_BLK_COMP_MASK) << + CCDC_BLK_COMP_R_COMP_SHIFT)); + regw(val, CCDC_BLKCMP); +} + +static void ccdc_config_fpc(struct ccdc_fault_pixel *fpc) +{ + u32 val; + + /* Initially disable FPC */ + val = CCDC_FPC_DISABLE; + regw(val, CCDC_FPC); + + if (!fpc->enable) + return; + + /* Configure Fault pixel if needed */ + regw(fpc->fpc_table_addr, CCDC_FPC_ADDR); + dev_dbg(dev, "\nWriting 0x%x to FPC_ADDR...\n", + (fpc->fpc_table_addr)); + /* Write the FPC params with FPC disable */ + val = fpc->fp_num & CCDC_FPC_FPC_NUM_MASK; + regw(val, CCDC_FPC); + + dev_dbg(dev, "\nWriting 0x%x to FPC...\n", val); + /* read the FPC register */ + val = regr(CCDC_FPC) | CCDC_FPC_ENABLE; + regw(val, CCDC_FPC); + dev_dbg(dev, "\nWriting 0x%x to FPC...\n", val); +} + +/* + * ccdc_config_raw() + * This function will configure CCDC for Raw capture mode + */ +void ccdc_config_raw(void) +{ + struct ccdc_params_raw *params = &ccdc_hw_params_raw; + struct ccdc_config_params_raw *config_params = + &ccdc_hw_params_raw.config_params; + unsigned int syn_mode = 0; + unsigned int val; + + dev_dbg(dev, "\nStarting ccdc_config_raw..."); + + /* Reset CCDC */ + ccdc_restore_defaults(); + + /* Disable latching function registers on VSYNC */ + regw(CCDC_LATCH_ON_VSYNC_DISABLE, CCDC_CCDCFG); + + /* + * Configure the vertical sync polarity(SYN_MODE.VDPOL), + * horizontal sync polarity (SYN_MODE.HDPOL), frame id polarity + * (SYN_MODE.FLDPOL), frame format(progressive or interlace), + * data size(SYNMODE.DATSIZ), &pixel format (Input mode), output + * SDRAM, enable internal timing generator + */ + syn_mode = + (((params->vd_pol & CCDC_VD_POL_MASK) << CCDC_VD_POL_SHIFT) | + ((params->hd_pol & CCDC_HD_POL_MASK) << CCDC_HD_POL_SHIFT) | + ((params->fid_pol & CCDC_FID_POL_MASK) << CCDC_FID_POL_SHIFT) | + ((params->frm_fmt & CCDC_FRM_FMT_MASK) << CCDC_FRM_FMT_SHIFT) | + ((config_params->data_sz & CCDC_DATA_SZ_MASK) << + CCDC_DATA_SZ_SHIFT) | + ((params->pix_fmt & CCDC_PIX_FMT_MASK) << CCDC_PIX_FMT_SHIFT) | + CCDC_WEN_ENABLE | CCDC_VDHDEN_ENABLE); + + /* Enable and configure aLaw register if needed */ + if (config_params->alaw.enable) { + val = ((config_params->alaw.gama_wd & + CCDC_ALAW_GAMA_WD_MASK) | CCDC_ALAW_ENABLE); + regw(val, CCDC_ALAW); + dev_dbg(dev, "\nWriting 0x%x to ALAW...\n", val); + } + + /* Configure video window */ + ccdc_setwin(¶ms->win, params->frm_fmt, CCDC_PPC_RAW); + + /* Configure Black Clamp */ + ccdc_config_black_clamp(&config_params->blk_clamp); + + /* Configure Black level compensation */ + ccdc_config_black_compense(&config_params->blk_comp); + + /* Configure Fault Pixel Correction */ + ccdc_config_fpc(&config_params->fault_pxl); + + /* If data size is 8 bit then pack the data */ + if ((config_params->data_sz == CCDC_DATA_8BITS) || + config_params->alaw.enable) + syn_mode |= CCDC_DATA_PACK_ENABLE; + +#ifdef CONFIG_DM644X_VIDEO_PORT_ENABLE + /* enable video port */ + val = CCDC_ENABLE_VIDEO_PORT; +#else + /* disable video port */ + val = CCDC_DISABLE_VIDEO_PORT; +#endif + + if (config_params->data_sz == CCDC_DATA_8BITS) + val |= (CCDC_DATA_10BITS & CCDC_FMTCFG_VPIN_MASK) + << CCDC_FMTCFG_VPIN_SHIFT; + else + val |= (config_params->data_sz & CCDC_FMTCFG_VPIN_MASK) + << CCDC_FMTCFG_VPIN_SHIFT; + /* Write value in FMTCFG */ + regw(val, CCDC_FMTCFG); + + dev_dbg(dev, "\nWriting 0x%x to FMTCFG...\n", val); + /* Configure the color pattern according to mt9t001 sensor */ + regw(CCDC_COLPTN_VAL, CCDC_COLPTN); + + dev_dbg(dev, "\nWriting 0xBB11BB11 to COLPTN...\n"); + /* + * Configure Data formatter(Video port) pixel selection + * (FMT_HORZ, FMT_VERT) + */ + val = ((params->win.left & CCDC_FMT_HORZ_FMTSPH_MASK) << + CCDC_FMT_HORZ_FMTSPH_SHIFT) | + (params->win.width & CCDC_FMT_HORZ_FMTLNH_MASK); + regw(val, CCDC_FMT_HORZ); + + dev_dbg(dev, "\nWriting 0x%x to FMT_HORZ...\n", val); + val = (params->win.top & CCDC_FMT_VERT_FMTSLV_MASK) + << CCDC_FMT_VERT_FMTSLV_SHIFT; + if (params->frm_fmt == CCDC_FRMFMT_PROGRESSIVE) + val |= (params->win.height) & CCDC_FMT_VERT_FMTLNV_MASK; + else + val |= (params->win.height >> 1) & CCDC_FMT_VERT_FMTLNV_MASK; + + dev_dbg(dev, "\nparams->win.height 0x%x ...\n", + params->win.height); + regw(val, CCDC_FMT_VERT); + + dev_dbg(dev, "\nWriting 0x%x to FMT_VERT...\n", val); + + dev_dbg(dev, "\nbelow regw(val, FMT_VERT)..."); + + /* + * Configure Horizontal offset register. If pack 8 is enabled then + * 1 pixel will take 1 byte + */ + if ((config_params->data_sz == CCDC_DATA_8BITS) || + config_params->alaw.enable) + regw((params->win.width + CCDC_32BYTE_ALIGN_VAL) & + CCDC_HSIZE_OFF_MASK, CCDC_HSIZE_OFF); + else + /* else one pixel will take 2 byte */ + regw(((params->win.width * CCDC_TWO_BYTES_PER_PIXEL) + + CCDC_32BYTE_ALIGN_VAL) & CCDC_HSIZE_OFF_MASK, + CCDC_HSIZE_OFF); + + /* Set value for SDOFST */ + if (params->frm_fmt == CCDC_FRMFMT_INTERLACED) { + if (params->image_invert_enable) { + /* For intelace inverse mode */ + regw(CCDC_INTERLACED_IMAGE_INVERT, CCDC_SDOFST); + dev_dbg(dev, "\nWriting 0x4B6D to SDOFST...\n"); + } + + else { + /* For intelace non inverse mode */ + regw(CCDC_INTERLACED_NO_IMAGE_INVERT, CCDC_SDOFST); + dev_dbg(dev, "\nWriting 0x0249 to SDOFST...\n"); + } + } else if (params->frm_fmt == CCDC_FRMFMT_PROGRESSIVE) { + regw(CCDC_PROGRESSIVE_NO_IMAGE_INVERT, CCDC_SDOFST); + dev_dbg(dev, "\nWriting 0x0000 to SDOFST...\n"); + } + + /* + * Configure video port pixel selection (VPOUT) + * Here -1 is to make the height value less than FMT_VERT.FMTLNV + */ + if (params->frm_fmt == CCDC_FRMFMT_PROGRESSIVE) + val = (((params->win.height - 1) & CCDC_VP_OUT_VERT_NUM_MASK)) + << CCDC_VP_OUT_VERT_NUM_SHIFT; + else + val = + ((((params->win.height >> CCDC_INTERLACED_HEIGHT_SHIFT) - + 1) & CCDC_VP_OUT_VERT_NUM_MASK)) << + CCDC_VP_OUT_VERT_NUM_SHIFT; + + val |= ((((params->win.width))) & CCDC_VP_OUT_HORZ_NUM_MASK) + << CCDC_VP_OUT_HORZ_NUM_SHIFT; + val |= (params->win.left) & CCDC_VP_OUT_HORZ_ST_MASK; + regw(val, CCDC_VP_OUT); + + dev_dbg(dev, "\nWriting 0x%x to VP_OUT...\n", val); + regw(syn_mode, CCDC_SYN_MODE); + dev_dbg(dev, "\nWriting 0x%x to SYN_MODE...\n", syn_mode); + + ccdc_sbl_reset(); + dev_dbg(dev, "\nend of ccdc_config_raw..."); + ccdc_readregs(); +} + +static int ccdc_configure(void) +{ + if (ccdc_if_type == VPFE_RAW_BAYER) + ccdc_config_raw(); + else + ccdc_config_ycbcr(); + return 0; +} + +static int ccdc_set_buftype(enum ccdc_buftype buf_type) +{ + if (ccdc_if_type == VPFE_RAW_BAYER) + ccdc_hw_params_raw.buf_type = buf_type; + else + ccdc_hw_params_ycbcr.buf_type = buf_type; + return 0; +} + +static enum ccdc_buftype ccdc_get_buftype(void) +{ + if (ccdc_if_type == VPFE_RAW_BAYER) + return ccdc_hw_params_raw.buf_type; + return ccdc_hw_params_ycbcr.buf_type; +} + +static int ccdc_enum_pix(u32 *pix, int i) +{ + int ret = -EINVAL; + if (ccdc_if_type == VPFE_RAW_BAYER) { + if (i < ARRAY_SIZE(ccdc_raw_bayer_pix_formats)) { + *pix = ccdc_raw_bayer_pix_formats[i]; + ret = 0; + } + } else { + if (i < ARRAY_SIZE(ccdc_raw_yuv_pix_formats)) { + *pix = ccdc_raw_yuv_pix_formats[i]; + ret = 0; + } + } + return ret; +} + +static int ccdc_set_pixel_format(u32 pixfmt) +{ + if (ccdc_if_type == VPFE_RAW_BAYER) { + ccdc_hw_params_raw.pix_fmt = CCDC_PIXFMT_RAW; + if (pixfmt == V4L2_PIX_FMT_SBGGR8) + ccdc_hw_params_raw.config_params.alaw.enable = 1; + else if (pixfmt != V4L2_PIX_FMT_SBGGR16) + return -EINVAL; + } else { + if (pixfmt == V4L2_PIX_FMT_YUYV) + ccdc_hw_params_ycbcr.pix_order = CCDC_PIXORDER_YCBYCR; + else if (pixfmt == V4L2_PIX_FMT_UYVY) + ccdc_hw_params_ycbcr.pix_order = CCDC_PIXORDER_CBYCRY; + else + return -EINVAL; + } + return 0; +} + +static u32 ccdc_get_pixel_format(void) +{ + struct ccdc_a_law *alaw = + &ccdc_hw_params_raw.config_params.alaw; + u32 pixfmt; + + if (ccdc_if_type == VPFE_RAW_BAYER) + if (alaw->enable) + pixfmt = V4L2_PIX_FMT_SBGGR8; + else + pixfmt = V4L2_PIX_FMT_SBGGR16; + else { + if (ccdc_hw_params_ycbcr.pix_order == CCDC_PIXORDER_YCBYCR) + pixfmt = V4L2_PIX_FMT_YUYV; + else + pixfmt = V4L2_PIX_FMT_UYVY; + } + return pixfmt; +} + +static int ccdc_set_image_window(struct v4l2_rect *win) +{ + if (ccdc_if_type == VPFE_RAW_BAYER) + ccdc_hw_params_raw.win = *win; + else + ccdc_hw_params_ycbcr.win = *win; + return 0; +} + +static void ccdc_get_image_window(struct v4l2_rect *win) +{ + if (ccdc_if_type == VPFE_RAW_BAYER) + *win = ccdc_hw_params_raw.win; + else + *win = ccdc_hw_params_ycbcr.win; +} + +static unsigned int ccdc_get_line_length(void) +{ + struct ccdc_config_params_raw *config_params = + &ccdc_hw_params_raw.config_params; + unsigned int len; + + if (ccdc_if_type == VPFE_RAW_BAYER) { + if ((config_params->alaw.enable) || + (config_params->data_sz == CCDC_DATA_8BITS)) + len = ccdc_hw_params_raw.win.width; + else + len = ccdc_hw_params_raw.win.width * 2; + } else + len = ccdc_hw_params_ycbcr.win.width * 2; + return ALIGN(len, 32); +} + +static int ccdc_set_frame_format(enum ccdc_frmfmt frm_fmt) +{ + if (ccdc_if_type == VPFE_RAW_BAYER) + ccdc_hw_params_raw.frm_fmt = frm_fmt; + else + ccdc_hw_params_ycbcr.frm_fmt = frm_fmt; + return 0; +} + +static enum ccdc_frmfmt ccdc_get_frame_format(void) +{ + if (ccdc_if_type == VPFE_RAW_BAYER) + return ccdc_hw_params_raw.frm_fmt; + else + return ccdc_hw_params_ycbcr.frm_fmt; +} + +static int ccdc_getfid(void) +{ + return (regr(CCDC_SYN_MODE) >> 15) & 1; +} + +/* misc operations */ +static inline void ccdc_setfbaddr(unsigned long addr) +{ + regw(addr & 0xffffffe0, CCDC_SDR_ADDR); +} + +static int ccdc_set_hw_if_params(struct vpfe_hw_if_param *params) +{ + ccdc_if_type = params->if_type; + + switch (params->if_type) { + case VPFE_BT656: + case VPFE_YCBCR_SYNC_16: + case VPFE_YCBCR_SYNC_8: + ccdc_hw_params_ycbcr.vd_pol = params->vdpol; + ccdc_hw_params_ycbcr.hd_pol = params->hdpol; + break; + default: + /* TODO add support for raw bayer here */ + return -EINVAL; + } + return 0; +} + +static struct ccdc_hw_device ccdc_hw_dev = { + .name = "DM6446 CCDC", + .owner = THIS_MODULE, + .hw_ops = { + .open = ccdc_open, + .close = ccdc_close, + .set_ccdc_base = ccdc_set_ccdc_base, + .reset = ccdc_sbl_reset, + .enable = ccdc_enable, + .set_hw_if_params = ccdc_set_hw_if_params, + .set_params = ccdc_set_params, + .configure = ccdc_configure, + .set_buftype = ccdc_set_buftype, + .get_buftype = ccdc_get_buftype, + .enum_pix = ccdc_enum_pix, + .set_pixel_format = ccdc_set_pixel_format, + .get_pixel_format = ccdc_get_pixel_format, + .set_frame_format = ccdc_set_frame_format, + .get_frame_format = ccdc_get_frame_format, + .set_image_window = ccdc_set_image_window, + .get_image_window = ccdc_get_image_window, + .get_line_length = ccdc_get_line_length, + .setfbaddr = ccdc_setfbaddr, + .getfid = ccdc_getfid, + }, +}; + +static int dm644x_ccdc_init(void) +{ + printk(KERN_NOTICE "dm644x_ccdc_init\n"); + if (vpfe_register_ccdc_device(&ccdc_hw_dev) < 0) + return -1; + printk(KERN_NOTICE "%s is registered with vpfe.\n", + ccdc_hw_dev.name); + return 0; +} + +static void dm644x_ccdc_exit(void) +{ + vpfe_unregister_ccdc_device(&ccdc_hw_dev); +} + +module_init(dm644x_ccdc_init); +module_exit(dm644x_ccdc_exit); diff --git a/drivers/media/video/davinci/dm644x_ccdc_regs.h b/drivers/media/video/davinci/dm644x_ccdc_regs.h new file mode 100644 index 00000000000..6e5d0532446 --- /dev/null +++ b/drivers/media/video/davinci/dm644x_ccdc_regs.h @@ -0,0 +1,145 @@ +/* + * Copyright (C) 2006-2009 Texas Instruments Inc + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ +#ifndef _DM644X_CCDC_REGS_H +#define _DM644X_CCDC_REGS_H + +/**************************************************************************\ +* Register OFFSET Definitions +\**************************************************************************/ +#define CCDC_PID 0x0 +#define CCDC_PCR 0x4 +#define CCDC_SYN_MODE 0x8 +#define CCDC_HD_VD_WID 0xc +#define CCDC_PIX_LINES 0x10 +#define CCDC_HORZ_INFO 0x14 +#define CCDC_VERT_START 0x18 +#define CCDC_VERT_LINES 0x1c +#define CCDC_CULLING 0x20 +#define CCDC_HSIZE_OFF 0x24 +#define CCDC_SDOFST 0x28 +#define CCDC_SDR_ADDR 0x2c +#define CCDC_CLAMP 0x30 +#define CCDC_DCSUB 0x34 +#define CCDC_COLPTN 0x38 +#define CCDC_BLKCMP 0x3c +#define CCDC_FPC 0x40 +#define CCDC_FPC_ADDR 0x44 +#define CCDC_VDINT 0x48 +#define CCDC_ALAW 0x4c +#define CCDC_REC656IF 0x50 +#define CCDC_CCDCFG 0x54 +#define CCDC_FMTCFG 0x58 +#define CCDC_FMT_HORZ 0x5c +#define CCDC_FMT_VERT 0x60 +#define CCDC_FMT_ADDR0 0x64 +#define CCDC_FMT_ADDR1 0x68 +#define CCDC_FMT_ADDR2 0x6c +#define CCDC_FMT_ADDR3 0x70 +#define CCDC_FMT_ADDR4 0x74 +#define CCDC_FMT_ADDR5 0x78 +#define CCDC_FMT_ADDR6 0x7c +#define CCDC_FMT_ADDR7 0x80 +#define CCDC_PRGEVEN_0 0x84 +#define CCDC_PRGEVEN_1 0x88 +#define CCDC_PRGODD_0 0x8c +#define CCDC_PRGODD_1 0x90 +#define CCDC_VP_OUT 0x94 + + +/*************************************************************** +* Define for various register bit mask and shifts for CCDC +****************************************************************/ +#define CCDC_FID_POL_MASK 1 +#define CCDC_FID_POL_SHIFT 4 +#define CCDC_HD_POL_MASK 1 +#define CCDC_HD_POL_SHIFT 3 +#define CCDC_VD_POL_MASK 1 +#define CCDC_VD_POL_SHIFT 2 +#define CCDC_HSIZE_OFF_MASK 0xffffffe0 +#define CCDC_32BYTE_ALIGN_VAL 31 +#define CCDC_FRM_FMT_MASK 0x1 +#define CCDC_FRM_FMT_SHIFT 7 +#define CCDC_DATA_SZ_MASK 7 +#define CCDC_DATA_SZ_SHIFT 8 +#define CCDC_PIX_FMT_MASK 3 +#define CCDC_PIX_FMT_SHIFT 12 +#define CCDC_VP2SDR_DISABLE 0xFFFBFFFF +#define CCDC_WEN_ENABLE (1 << 17) +#define CCDC_SDR2RSZ_DISABLE 0xFFF7FFFF +#define CCDC_VDHDEN_ENABLE (1 << 16) +#define CCDC_LPF_ENABLE (1 << 14) +#define CCDC_ALAW_ENABLE (1 << 3) +#define CCDC_ALAW_GAMA_WD_MASK 7 +#define CCDC_BLK_CLAMP_ENABLE (1 << 31) +#define CCDC_BLK_SGAIN_MASK 0x1F +#define CCDC_BLK_ST_PXL_MASK 0x7FFF +#define CCDC_BLK_ST_PXL_SHIFT 10 +#define CCDC_BLK_SAMPLE_LN_MASK 7 +#define CCDC_BLK_SAMPLE_LN_SHIFT 28 +#define CCDC_BLK_SAMPLE_LINE_MASK 7 +#define CCDC_BLK_SAMPLE_LINE_SHIFT 25 +#define CCDC_BLK_DC_SUB_MASK 0x03FFF +#define CCDC_BLK_COMP_MASK 0xFF +#define CCDC_BLK_COMP_GB_COMP_SHIFT 8 +#define CCDC_BLK_COMP_GR_COMP_SHIFT 16 +#define CCDC_BLK_COMP_R_COMP_SHIFT 24 +#define CCDC_LATCH_ON_VSYNC_DISABLE (1 << 15) +#define CCDC_FPC_ENABLE (1 << 15) +#define CCDC_FPC_DISABLE 0 +#define CCDC_FPC_FPC_NUM_MASK 0x7FFF +#define CCDC_DATA_PACK_ENABLE (1 << 11) +#define CCDC_FMTCFG_VPIN_MASK 7 +#define CCDC_FMTCFG_VPIN_SHIFT 12 +#define CCDC_FMT_HORZ_FMTLNH_MASK 0x1FFF +#define CCDC_FMT_HORZ_FMTSPH_MASK 0x1FFF +#define CCDC_FMT_HORZ_FMTSPH_SHIFT 16 +#define CCDC_FMT_VERT_FMTLNV_MASK 0x1FFF +#define CCDC_FMT_VERT_FMTSLV_MASK 0x1FFF +#define CCDC_FMT_VERT_FMTSLV_SHIFT 16 +#define CCDC_VP_OUT_VERT_NUM_MASK 0x3FFF +#define CCDC_VP_OUT_VERT_NUM_SHIFT 17 +#define CCDC_VP_OUT_HORZ_NUM_MASK 0x1FFF +#define CCDC_VP_OUT_HORZ_NUM_SHIFT 4 +#define CCDC_VP_OUT_HORZ_ST_MASK 0xF +#define CCDC_HORZ_INFO_SPH_SHIFT 16 +#define CCDC_VERT_START_SLV0_SHIFT 16 +#define CCDC_VDINT_VDINT0_SHIFT 16 +#define CCDC_VDINT_VDINT1_MASK 0xFFFF +#define CCDC_PPC_RAW 1 +#define CCDC_DCSUB_DEFAULT_VAL 0 +#define CCDC_CLAMP_DEFAULT_VAL 0 +#define CCDC_ENABLE_VIDEO_PORT 0x8000 +#define CCDC_DISABLE_VIDEO_PORT 0 +#define CCDC_COLPTN_VAL 0xBB11BB11 +#define CCDC_TWO_BYTES_PER_PIXEL 2 +#define CCDC_INTERLACED_IMAGE_INVERT 0x4B6D +#define CCDC_INTERLACED_NO_IMAGE_INVERT 0x0249 +#define CCDC_PROGRESSIVE_IMAGE_INVERT 0x4000 +#define CCDC_PROGRESSIVE_NO_IMAGE_INVERT 0 +#define CCDC_INTERLACED_HEIGHT_SHIFT 1 +#define CCDC_SYN_MODE_INPMOD_SHIFT 12 +#define CCDC_SYN_MODE_INPMOD_MASK 3 +#define CCDC_SYN_MODE_8BITS (7 << 8) +#define CCDC_SYN_FLDMODE_MASK 1 +#define CCDC_SYN_FLDMODE_SHIFT 7 +#define CCDC_REC656IF_BT656_EN 3 +#define CCDC_SYN_MODE_VD_POL_NEGATIVE (1 << 2) +#define CCDC_CCDCFG_Y8POS_SHIFT 11 +#define CCDC_SDOFST_FIELD_INTERLEAVED 0x249 +#define CCDC_NO_CULLING 0xffff00ff +#endif diff --git a/include/media/davinci/dm644x_ccdc.h b/include/media/davinci/dm644x_ccdc.h new file mode 100644 index 00000000000..3e178eb52fb --- /dev/null +++ b/include/media/davinci/dm644x_ccdc.h @@ -0,0 +1,184 @@ +/* + * Copyright (C) 2006-2009 Texas Instruments Inc + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ +#ifndef _DM644X_CCDC_H +#define _DM644X_CCDC_H +#include +#include + +/* enum for No of pixel per line to be avg. in Black Clamping*/ +enum ccdc_sample_length { + CCDC_SAMPLE_1PIXELS, + CCDC_SAMPLE_2PIXELS, + CCDC_SAMPLE_4PIXELS, + CCDC_SAMPLE_8PIXELS, + CCDC_SAMPLE_16PIXELS +}; + +/* enum for No of lines in Black Clamping */ +enum ccdc_sample_line { + CCDC_SAMPLE_1LINES, + CCDC_SAMPLE_2LINES, + CCDC_SAMPLE_4LINES, + CCDC_SAMPLE_8LINES, + CCDC_SAMPLE_16LINES +}; + +/* enum for Alaw gama width */ +enum ccdc_gama_width { + CCDC_GAMMA_BITS_15_6, + CCDC_GAMMA_BITS_14_5, + CCDC_GAMMA_BITS_13_4, + CCDC_GAMMA_BITS_12_3, + CCDC_GAMMA_BITS_11_2, + CCDC_GAMMA_BITS_10_1, + CCDC_GAMMA_BITS_09_0 +}; + +enum ccdc_data_size { + CCDC_DATA_16BITS, + CCDC_DATA_15BITS, + CCDC_DATA_14BITS, + CCDC_DATA_13BITS, + CCDC_DATA_12BITS, + CCDC_DATA_11BITS, + CCDC_DATA_10BITS, + CCDC_DATA_8BITS +}; + +/* structure for ALaw */ +struct ccdc_a_law { + /* Enable/disable A-Law */ + unsigned char enable; + /* Gama Width Input */ + enum ccdc_gama_width gama_wd; +}; + +/* structure for Black Clamping */ +struct ccdc_black_clamp { + unsigned char enable; + /* only if bClampEnable is TRUE */ + enum ccdc_sample_length sample_pixel; + /* only if bClampEnable is TRUE */ + enum ccdc_sample_line sample_ln; + /* only if bClampEnable is TRUE */ + unsigned short start_pixel; + /* only if bClampEnable is TRUE */ + unsigned short sgain; + /* only if bClampEnable is FALSE */ + unsigned short dc_sub; +}; + +/* structure for Black Level Compensation */ +struct ccdc_black_compensation { + /* Constant value to subtract from Red component */ + char r; + /* Constant value to subtract from Gr component */ + char gr; + /* Constant value to subtract from Blue component */ + char b; + /* Constant value to subtract from Gb component */ + char gb; +}; + +/* structure for fault pixel correction */ +struct ccdc_fault_pixel { + /* Enable or Disable fault pixel correction */ + unsigned char enable; + /* Number of fault pixel */ + unsigned short fp_num; + /* Address of fault pixel table */ + unsigned int fpc_table_addr; +}; + +/* Structure for CCDC configuration parameters for raw capture mode passed + * by application + */ +struct ccdc_config_params_raw { + /* data size value from 8 to 16 bits */ + enum ccdc_data_size data_sz; + /* Structure for Optional A-Law */ + struct ccdc_a_law alaw; + /* Structure for Optical Black Clamp */ + struct ccdc_black_clamp blk_clamp; + /* Structure for Black Compensation */ + struct ccdc_black_compensation blk_comp; + /* Structure for Fault Pixel Module Configuration */ + struct ccdc_fault_pixel fault_pxl; +}; + + +#ifdef __KERNEL__ +#include +/* Define to enable/disable video port */ +#define FP_NUM_BYTES 4 +/* Define for extra pixel/line and extra lines/frame */ +#define NUM_EXTRAPIXELS 8 +#define NUM_EXTRALINES 8 + +/* settings for commonly used video formats */ +#define CCDC_WIN_PAL {0, 0, 720, 576} +/* ntsc square pixel */ +#define CCDC_WIN_VGA {0, 0, (640 + NUM_EXTRAPIXELS), (480 + NUM_EXTRALINES)} + +/* Structure for CCDC configuration parameters for raw capture mode */ +struct ccdc_params_raw { + /* pixel format */ + enum ccdc_pixfmt pix_fmt; + /* progressive or interlaced frame */ + enum ccdc_frmfmt frm_fmt; + /* video window */ + struct v4l2_rect win; + /* field id polarity */ + enum vpfe_pin_pol fid_pol; + /* vertical sync polarity */ + enum vpfe_pin_pol vd_pol; + /* horizontal sync polarity */ + enum vpfe_pin_pol hd_pol; + /* interleaved or separated fields */ + enum ccdc_buftype buf_type; + /* + * enable to store the image in inverse + * order in memory(bottom to top) + */ + unsigned char image_invert_enable; + /* configurable paramaters */ + struct ccdc_config_params_raw config_params; +}; + +struct ccdc_params_ycbcr { + /* pixel format */ + enum ccdc_pixfmt pix_fmt; + /* progressive or interlaced frame */ + enum ccdc_frmfmt frm_fmt; + /* video window */ + struct v4l2_rect win; + /* field id polarity */ + enum vpfe_pin_pol fid_pol; + /* vertical sync polarity */ + enum vpfe_pin_pol vd_pol; + /* horizontal sync polarity */ + enum vpfe_pin_pol hd_pol; + /* enable BT.656 embedded sync mode */ + int bt656_enable; + /* cb:y:cr:y or y:cb:y:cr in memory */ + enum ccdc_pixorder pix_order; + /* interleaved or separated fields */ + enum ccdc_buftype buf_type; +}; +#endif +#endif /* _DM644X_CCDC_H */ -- cgit v1.2.3-70-g09d2 From 92ee438b8e27f1b96ce5a7e8d73cb11b71a02584 Mon Sep 17 00:00:00 2001 From: Muralidharan Karicheri Date: Fri, 19 Jun 2009 09:19:17 -0300 Subject: V4L/DVB (12252): v4l: ccdc types used across ccdc modules for vpfe capture driver common types used across CCDC modules Reviewed by: Hans Verkuil Reviewed by: Laurent Pinchart Signed-off-by: Muralidharan Karicheri Signed-off-by: Hans Verkuil Signed-off-by: Mauro Carvalho Chehab --- include/media/davinci/ccdc_types.h | 43 ++++++++++++++++++++++++++++++++++++++ 1 file changed, 43 insertions(+) create mode 100644 include/media/davinci/ccdc_types.h (limited to 'include') diff --git a/include/media/davinci/ccdc_types.h b/include/media/davinci/ccdc_types.h new file mode 100644 index 00000000000..5773874bf26 --- /dev/null +++ b/include/media/davinci/ccdc_types.h @@ -0,0 +1,43 @@ +/* + * Copyright (C) 2008-2009 Texas Instruments Inc + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * + **************************************************************************/ +#ifndef _CCDC_TYPES_H +#define _CCDC_TYPES_H +enum ccdc_pixfmt { + CCDC_PIXFMT_RAW, + CCDC_PIXFMT_YCBCR_16BIT, + CCDC_PIXFMT_YCBCR_8BIT +}; + +enum ccdc_frmfmt { + CCDC_FRMFMT_PROGRESSIVE, + CCDC_FRMFMT_INTERLACED +}; + +/* PIXEL ORDER IN MEMORY from LSB to MSB */ +/* only applicable for 8-bit input mode */ +enum ccdc_pixorder { + CCDC_PIXORDER_YCBYCR, + CCDC_PIXORDER_CBYCRY, +}; + +enum ccdc_buftype { + CCDC_BUFTYPE_FLD_INTERLEAVED, + CCDC_BUFTYPE_FLD_SEPARATED +}; +#endif -- cgit v1.2.3-70-g09d2 From 7b140b89307a59527df644100ce5ab3bc1be7d1b Mon Sep 17 00:00:00 2001 From: Muralidharan Karicheri Date: Fri, 19 Jun 2009 09:20:16 -0300 Subject: V4L/DVB (12253): v4l: common vpss module for video drivers This is a new module added for vpss library functions that are used for configuring vpss system module. All video drivers will include vpss.h header file and call functions defined in this module to configure vpss system module. Reviewed by: Hans Verkuil Reviewed by: Laurent Pinchart Reviewed by: Alexey Klimov Signed-off-by: Muralidharan Karicheri Signed-off-by: Hans Verkuil Signed-off-by: Mauro Carvalho Chehab --- drivers/media/video/davinci/vpss.c | 301 +++++++++++++++++++++++++++++++++++++ include/media/davinci/vpss.h | 69 +++++++++ 2 files changed, 370 insertions(+) create mode 100644 drivers/media/video/davinci/vpss.c create mode 100644 include/media/davinci/vpss.h (limited to 'include') diff --git a/drivers/media/video/davinci/vpss.c b/drivers/media/video/davinci/vpss.c new file mode 100644 index 00000000000..6d709ca8cfb --- /dev/null +++ b/drivers/media/video/davinci/vpss.c @@ -0,0 +1,301 @@ +/* + * Copyright (C) 2009 Texas Instruments. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + * + * common vpss driver for all video drivers. + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("VPSS Driver"); +MODULE_AUTHOR("Texas Instruments"); + +/* DM644x defines */ +#define DM644X_SBL_PCR_VPSS (4) + +/* vpss BL register offsets */ +#define DM355_VPSSBL_CCDCMUX 0x1c +/* vpss CLK register offsets */ +#define DM355_VPSSCLK_CLKCTRL 0x04 +/* masks and shifts */ +#define VPSS_HSSISEL_SHIFT 4 + +/* + * vpss operations. Depends on platform. Not all functions are available + * on all platforms. The api, first check if a functio is available before + * invoking it. In the probe, the function ptrs are intialized based on + * vpss name. vpss name can be "dm355_vpss", "dm644x_vpss" etc. + */ +struct vpss_hw_ops { + /* enable clock */ + int (*enable_clock)(enum vpss_clock_sel clock_sel, int en); + /* select input to ccdc */ + void (*select_ccdc_source)(enum vpss_ccdc_source_sel src_sel); + /* clear wbl overlflow bit */ + int (*clear_wbl_overflow)(enum vpss_wbl_sel wbl_sel); +}; + +/* vpss configuration */ +struct vpss_oper_config { + __iomem void *vpss_bl_regs_base; + __iomem void *vpss_regs_base; + struct resource *r1; + resource_size_t len1; + struct resource *r2; + resource_size_t len2; + char vpss_name[32]; + spinlock_t vpss_lock; + struct vpss_hw_ops hw_ops; +}; + +static struct vpss_oper_config oper_cfg; + +/* register access routines */ +static inline u32 bl_regr(u32 offset) +{ + return __raw_readl(oper_cfg.vpss_bl_regs_base + offset); +} + +static inline void bl_regw(u32 val, u32 offset) +{ + __raw_writel(val, oper_cfg.vpss_bl_regs_base + offset); +} + +static inline u32 vpss_regr(u32 offset) +{ + return __raw_readl(oper_cfg.vpss_regs_base + offset); +} + +static inline void vpss_regw(u32 val, u32 offset) +{ + __raw_writel(val, oper_cfg.vpss_regs_base + offset); +} + +static void dm355_select_ccdc_source(enum vpss_ccdc_source_sel src_sel) +{ + bl_regw(src_sel << VPSS_HSSISEL_SHIFT, DM355_VPSSBL_CCDCMUX); +} + +int vpss_select_ccdc_source(enum vpss_ccdc_source_sel src_sel) +{ + if (!oper_cfg.hw_ops.select_ccdc_source) + return -1; + + dm355_select_ccdc_source(src_sel); + return 0; +} +EXPORT_SYMBOL(vpss_select_ccdc_source); + +static int dm644x_clear_wbl_overflow(enum vpss_wbl_sel wbl_sel) +{ + u32 mask = 1, val; + + if (wbl_sel < VPSS_PCR_AEW_WBL_0 || + wbl_sel > VPSS_PCR_CCDC_WBL_O) + return -1; + + /* writing a 0 clear the overflow */ + mask = ~(mask << wbl_sel); + val = bl_regr(DM644X_SBL_PCR_VPSS) & mask; + bl_regw(val, DM644X_SBL_PCR_VPSS); + return 0; +} + +int vpss_clear_wbl_overflow(enum vpss_wbl_sel wbl_sel) +{ + if (!oper_cfg.hw_ops.clear_wbl_overflow) + return -1; + + return oper_cfg.hw_ops.clear_wbl_overflow(wbl_sel); +} +EXPORT_SYMBOL(vpss_clear_wbl_overflow); + +/* + * dm355_enable_clock - Enable VPSS Clock + * @clock_sel: CLock to be enabled/disabled + * @en: enable/disable flag + * + * This is called to enable or disable a vpss clock + */ +static int dm355_enable_clock(enum vpss_clock_sel clock_sel, int en) +{ + unsigned long flags; + u32 utemp, mask = 0x1, shift = 0; + + switch (clock_sel) { + case VPSS_VPBE_CLOCK: + /* nothing since lsb */ + break; + case VPSS_VENC_CLOCK_SEL: + shift = 2; + break; + case VPSS_CFALD_CLOCK: + shift = 3; + break; + case VPSS_H3A_CLOCK: + shift = 4; + break; + case VPSS_IPIPE_CLOCK: + shift = 5; + break; + case VPSS_CCDC_CLOCK: + shift = 6; + break; + default: + printk(KERN_ERR "dm355_enable_clock:" + " Invalid selector: %d\n", clock_sel); + return -1; + } + + spin_lock_irqsave(&oper_cfg.vpss_lock, flags); + utemp = vpss_regr(DM355_VPSSCLK_CLKCTRL); + if (!en) + utemp &= ~(mask << shift); + else + utemp |= (mask << shift); + + vpss_regw(utemp, DM355_VPSSCLK_CLKCTRL); + spin_unlock_irqrestore(&oper_cfg.vpss_lock, flags); + return 0; +} + +int vpss_enable_clock(enum vpss_clock_sel clock_sel, int en) +{ + if (!oper_cfg.hw_ops.enable_clock) + return -1; + + return oper_cfg.hw_ops.enable_clock(clock_sel, en); +} +EXPORT_SYMBOL(vpss_enable_clock); + +static int __init vpss_probe(struct platform_device *pdev) +{ + int status, dm355 = 0; + + if (!pdev->dev.platform_data) { + dev_err(&pdev->dev, "no platform data\n"); + return -ENOENT; + } + strcpy(oper_cfg.vpss_name, pdev->dev.platform_data); + + if (!strcmp(oper_cfg.vpss_name, "dm355_vpss")) + dm355 = 1; + else if (strcmp(oper_cfg.vpss_name, "dm644x_vpss")) { + dev_err(&pdev->dev, "vpss driver not supported on" + " this platform\n"); + return -ENODEV; + } + + dev_info(&pdev->dev, "%s vpss probed\n", oper_cfg.vpss_name); + oper_cfg.r1 = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!oper_cfg.r1) + return -ENOENT; + + oper_cfg.len1 = oper_cfg.r1->end - oper_cfg.r1->start + 1; + + oper_cfg.r1 = request_mem_region(oper_cfg.r1->start, oper_cfg.len1, + oper_cfg.r1->name); + if (!oper_cfg.r1) + return -EBUSY; + + oper_cfg.vpss_bl_regs_base = ioremap(oper_cfg.r1->start, oper_cfg.len1); + if (!oper_cfg.vpss_bl_regs_base) { + status = -EBUSY; + goto fail1; + } + + if (dm355) { + oper_cfg.r2 = platform_get_resource(pdev, IORESOURCE_MEM, 1); + if (!oper_cfg.r2) { + status = -ENOENT; + goto fail2; + } + oper_cfg.len2 = oper_cfg.r2->end - oper_cfg.r2->start + 1; + oper_cfg.r2 = request_mem_region(oper_cfg.r2->start, + oper_cfg.len2, + oper_cfg.r2->name); + if (!oper_cfg.r2) { + status = -EBUSY; + goto fail2; + } + + oper_cfg.vpss_regs_base = ioremap(oper_cfg.r2->start, + oper_cfg.len2); + if (!oper_cfg.vpss_regs_base) { + status = -EBUSY; + goto fail3; + } + } + + if (dm355) { + oper_cfg.hw_ops.enable_clock = dm355_enable_clock; + oper_cfg.hw_ops.select_ccdc_source = dm355_select_ccdc_source; + } else + oper_cfg.hw_ops.clear_wbl_overflow = dm644x_clear_wbl_overflow; + + spin_lock_init(&oper_cfg.vpss_lock); + dev_info(&pdev->dev, "%s vpss probe success\n", oper_cfg.vpss_name); + return 0; + +fail3: + release_mem_region(oper_cfg.r2->start, oper_cfg.len2); +fail2: + iounmap(oper_cfg.vpss_bl_regs_base); +fail1: + release_mem_region(oper_cfg.r1->start, oper_cfg.len1); + return status; +} + +static int vpss_remove(struct platform_device *pdev) +{ + iounmap(oper_cfg.vpss_bl_regs_base); + release_mem_region(oper_cfg.r1->start, oper_cfg.len1); + if (!strcmp(oper_cfg.vpss_name, "dm355_vpss")) { + iounmap(oper_cfg.vpss_regs_base); + release_mem_region(oper_cfg.r2->start, oper_cfg.len2); + } + return 0; +} + +static struct platform_driver vpss_driver = { + .driver = { + .name = "vpss", + .owner = THIS_MODULE, + }, + .remove = __devexit_p(vpss_remove), + .probe = vpss_probe, +}; + +static void vpss_exit(void) +{ + platform_driver_unregister(&vpss_driver); +} + +static int __init vpss_init(void) +{ + return platform_driver_register(&vpss_driver); +} +subsys_initcall(vpss_init); +module_exit(vpss_exit); diff --git a/include/media/davinci/vpss.h b/include/media/davinci/vpss.h new file mode 100644 index 00000000000..fcdff745fae --- /dev/null +++ b/include/media/davinci/vpss.h @@ -0,0 +1,69 @@ +/* + * Copyright (C) 2009 Texas Instruments Inc + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * + * vpss - video processing subsystem module header file. + * + * Include this header file if a driver needs to configure vpss system + * module. It exports a set of library functions for video drivers to + * configure vpss system module functions such as clock enable/disable, + * vpss interrupt mux to arm, and other common vpss system module + * functions. + */ +#ifndef _VPSS_H +#define _VPSS_H + +/* selector for ccdc input selection on DM355 */ +enum vpss_ccdc_source_sel { + VPSS_CCDCIN, + VPSS_HSSIIN +}; + +/* Used for enable/diable VPSS Clock */ +enum vpss_clock_sel { + /* DM355/DM365 */ + VPSS_CCDC_CLOCK, + VPSS_IPIPE_CLOCK, + VPSS_H3A_CLOCK, + VPSS_CFALD_CLOCK, + /* + * When using VPSS_VENC_CLOCK_SEL in vpss_enable_clock() api + * following applies:- + * en = 0 selects ENC_CLK + * en = 1 selects ENC_CLK/2 + */ + VPSS_VENC_CLOCK_SEL, + VPSS_VPBE_CLOCK, +}; + +/* select input to ccdc on dm355 */ +int vpss_select_ccdc_source(enum vpss_ccdc_source_sel src_sel); +/* enable/disable a vpss clock, 0 - success, -1 - failure */ +int vpss_enable_clock(enum vpss_clock_sel clock_sel, int en); + +/* wbl reset for dm644x */ +enum vpss_wbl_sel { + VPSS_PCR_AEW_WBL_0 = 16, + VPSS_PCR_AF_WBL_0, + VPSS_PCR_RSZ4_WBL_0, + VPSS_PCR_RSZ3_WBL_0, + VPSS_PCR_RSZ2_WBL_0, + VPSS_PCR_RSZ1_WBL_0, + VPSS_PCR_PREV_WBL_0, + VPSS_PCR_CCDC_WBL_O, +}; +int vpss_clear_wbl_overflow(enum vpss_wbl_sel wbl_sel); +#endif -- cgit v1.2.3-70-g09d2 From c41debafc6e396a8e15f1f017aec7c0cf67e1b54 Mon Sep 17 00:00:00 2001 From: Guennadi Liakhovetski Date: Tue, 25 Aug 2009 11:06:21 -0300 Subject: V4L/DVB (12504): soc-camera: prepare soc_camera_platform.c and its users for conversion soc_camera_platform.c is only used by y SuperH ap325rxa board. This patch converts soc_camera_platform.c and its users for the soc-camera platform- device conversion and also extends soc-camera core to handle non-I2C cameras. Cc: Paul Mundt Signed-off-by: Guennadi Liakhovetski Acked-by: Paul Mundt Signed-off-by: Mauro Carvalho Chehab --- arch/sh/boards/board-ap325rxa.c | 43 +++++++++++++++++++------- drivers/media/video/soc_camera.c | 61 ++++++++++++++++++++++++++++--------- include/media/soc_camera.h | 6 ++++ include/media/soc_camera_platform.h | 2 ++ 4 files changed, 86 insertions(+), 26 deletions(-) (limited to 'include') diff --git a/arch/sh/boards/board-ap325rxa.c b/arch/sh/boards/board-ap325rxa.c index 327d47c25a5..7e2d2de0384 100644 --- a/arch/sh/boards/board-ap325rxa.c +++ b/arch/sh/boards/board-ap325rxa.c @@ -310,6 +310,9 @@ static int camera_set_capture(struct soc_camera_platform_info *info, return ret; } +static int ap325rxa_camera_add(struct soc_camera_link *icl, struct device *dev); +static void ap325rxa_camera_del(struct soc_camera_link *icl); + static struct soc_camera_platform_info camera_info = { .iface = 0, .format_name = "UYVY", @@ -323,6 +326,10 @@ static struct soc_camera_platform_info camera_info = { .bus_param = SOCAM_PCLK_SAMPLE_RISING | SOCAM_HSYNC_ACTIVE_HIGH | SOCAM_VSYNC_ACTIVE_HIGH | SOCAM_MASTER | SOCAM_DATAWIDTH_8, .set_capture = camera_set_capture, + .link = { + .add_device = ap325rxa_camera_add, + .del_device = ap325rxa_camera_del, + }, }; static struct platform_device camera_device = { @@ -332,15 +339,20 @@ static struct platform_device camera_device = { }, }; -static int __init camera_setup(void) +static int ap325rxa_camera_add(struct soc_camera_link *icl, + struct device *dev) { - if (camera_probe() > 0) - platform_device_register(&camera_device); + if (icl != &camera_info.link || camera_probe() <= 0) + return -ENODEV; - return 0; + return platform_device_register(&camera_device); } -late_initcall(camera_setup); +static void ap325rxa_camera_del(struct soc_camera_link *icl) +{ + if (icl == &camera_info.link) + platform_device_unregister(&camera_device); +} #endif /* CONFIG_I2C */ static int ov7725_power(struct device *dev, int mode) @@ -423,11 +435,19 @@ static struct ov772x_camera_info ov7725_info = { }, }; -static struct platform_device ap325rxa_camera = { - .name = "soc-camera-pdrv", - .id = 0, - .dev = { - .platform_data = &ov7725_info.link, +static struct platform_device ap325rxa_camera[] = { + { + .name = "soc-camera-pdrv", + .id = 0, + .dev = { + .platform_data = &ov7725_info.link, + }, + }, { + .name = "soc-camera-pdrv", + .id = 1, + .dev = { + .platform_data = &camera_info.link, + }, }, }; @@ -438,7 +458,8 @@ static struct platform_device *ap325rxa_devices[] __initdata = { &ceu_device, &nand_flash_device, &sdcard_cn3_device, - &ap325rxa_camera, + &ap325rxa_camera[0], + &ap325rxa_camera[1], }; static struct spi_board_info ap325rxa_spi_devices[] = { diff --git a/drivers/media/video/soc_camera.c b/drivers/media/video/soc_camera.c index 9f5ae816785..0340754e540 100644 --- a/drivers/media/video/soc_camera.c +++ b/drivers/media/video/soc_camera.c @@ -1165,45 +1165,76 @@ void soc_camera_video_stop(struct soc_camera_device *icd) } EXPORT_SYMBOL(soc_camera_video_stop); -static int __devinit soc_camera_pdrv_probe(struct platform_device *pdev) +#ifdef CONFIG_I2C_BOARDINFO +static int soc_camera_init_i2c(struct platform_device *pdev, + struct soc_camera_link *icl) { - struct soc_camera_link *icl = pdev->dev.platform_data; - struct i2c_adapter *adap; struct i2c_client *client; + struct i2c_adapter *adap = i2c_get_adapter(icl->i2c_adapter_id); + int ret; - if (!icl) - return -EINVAL; - - adap = i2c_get_adapter(icl->i2c_adapter_id); if (!adap) { - dev_warn(&pdev->dev, "Cannot get adapter #%d. No driver?\n", - icl->i2c_adapter_id); - /* -ENODEV and -ENXIO do not produce an error on probe()... */ - return -ENOENT; + ret = -ENODEV; + dev_err(&pdev->dev, "Cannot get adapter #%d. No driver?\n", + icl->i2c_adapter_id); + goto ei2cga; } icl->board_info->platform_data = icl; client = i2c_new_device(adap, icl->board_info); if (!client) { - i2c_put_adapter(adap); - return -ENOMEM; + ret = -ENOMEM; + goto ei2cnd; } platform_set_drvdata(pdev, client); return 0; +ei2cnd: + i2c_put_adapter(adap); +ei2cga: + return ret; } -static int __devexit soc_camera_pdrv_remove(struct platform_device *pdev) +static void soc_camera_free_i2c(struct platform_device *pdev) { struct i2c_client *client = platform_get_drvdata(pdev); if (!client) - return -ENODEV; + return; i2c_unregister_device(client); i2c_put_adapter(client->adapter); +} +#else +#define soc_camera_init_i2c(d, icl) (-ENODEV) +#define soc_camera_free_i2c(d) do {} while (0) +#endif +static int __devinit soc_camera_pdrv_probe(struct platform_device *pdev) +{ + struct soc_camera_link *icl = pdev->dev.platform_data; + + if (!icl) + return -EINVAL; + + if (icl->board_info) + return soc_camera_init_i2c(pdev, icl); + else if (!icl->add_device || !icl->del_device) + return -EINVAL; + + /* &pdev->dev will become &icd->dev */ + return icl->add_device(icl, &pdev->dev); +} + +static int __devexit soc_camera_pdrv_remove(struct platform_device *pdev) +{ + struct soc_camera_link *icl = pdev->dev.platform_data; + + if (icl->board_info) + soc_camera_free_i2c(pdev); + else + icl->del_device(icl); return 0; } diff --git a/include/media/soc_camera.h b/include/media/soc_camera.h index 23ecead35e7..813e12061da 100644 --- a/include/media/soc_camera.h +++ b/include/media/soc_camera.h @@ -102,6 +102,12 @@ struct soc_camera_link { int i2c_adapter_id; struct i2c_board_info *board_info; const char *module_name; + /* + * For non-I2C devices platform platform has to provide methods to + * add a device to the system and to remove + */ + int (*add_device)(struct soc_camera_link *, struct device *); + void (*del_device)(struct soc_camera_link *); /* Optional callbacks to power on or off and reset the sensor */ int (*power)(struct device *, int); int (*reset)(struct device *); diff --git a/include/media/soc_camera_platform.h b/include/media/soc_camera_platform.h index 1d092b4678a..af224deadb4 100644 --- a/include/media/soc_camera_platform.h +++ b/include/media/soc_camera_platform.h @@ -12,6 +12,7 @@ #define __SOC_CAMERA_H__ #include +#include struct soc_camera_platform_info { int iface; @@ -21,6 +22,7 @@ struct soc_camera_platform_info { unsigned long bus_param; void (*power)(int); int (*set_capture)(struct soc_camera_platform_info *info, int enable); + struct soc_camera_link link; }; #endif /* __SOC_CAMERA_H__ */ -- cgit v1.2.3-70-g09d2 From bc1937b41d8253e2b554da385023a92189d38917 Mon Sep 17 00:00:00 2001 From: Guennadi Liakhovetski Date: Tue, 25 Aug 2009 11:06:22 -0300 Subject: V4L/DVB (12505): soc_camera_platform: pass device pointer from soc-camera core on .add_device() Add a struct device pointer to struct soc_camera_platform_info and let the user (ap325rxa) pass it down to soc_camera_platform.c in its .add_device() method. Signed-off-by: Guennadi Liakhovetski Acked-by: Paul Mundt Signed-off-by: Mauro Carvalho Chehab --- arch/sh/boards/board-ap325rxa.c | 2 ++ include/media/soc_camera_platform.h | 3 +++ 2 files changed, 5 insertions(+) (limited to 'include') diff --git a/arch/sh/boards/board-ap325rxa.c b/arch/sh/boards/board-ap325rxa.c index 7e2d2de0384..97a2fc97e10 100644 --- a/arch/sh/boards/board-ap325rxa.c +++ b/arch/sh/boards/board-ap325rxa.c @@ -345,6 +345,8 @@ static int ap325rxa_camera_add(struct soc_camera_link *icl, if (icl != &camera_info.link || camera_probe() <= 0) return -ENODEV; + camera_info.dev = dev; + return platform_device_register(&camera_device); } diff --git a/include/media/soc_camera_platform.h b/include/media/soc_camera_platform.h index af224deadb4..3e8f020abf4 100644 --- a/include/media/soc_camera_platform.h +++ b/include/media/soc_camera_platform.h @@ -14,6 +14,8 @@ #include #include +struct device; + struct soc_camera_platform_info { int iface; char *format_name; @@ -21,6 +23,7 @@ struct soc_camera_platform_info { struct v4l2_pix_format format; unsigned long bus_param; void (*power)(int); + struct device *dev; int (*set_capture)(struct soc_camera_platform_info *info, int enable); struct soc_camera_link link; }; -- cgit v1.2.3-70-g09d2 From 40e2e0927003424c25807b575dd40da2b8685857 Mon Sep 17 00:00:00 2001 From: Guennadi Liakhovetski Date: Tue, 25 Aug 2009 11:28:22 -0300 Subject: V4L/DVB (12506): soc-camera: convert to platform device Convert soc-camera core and all drivers to platform device API. We already converted platforms to register a platform device for each soc-camera client, now we remove the compatibility code and switch completely to the new scheme. This is a preparatory step for the v4l2-subdev conversion. Signed-off-by: Guennadi Liakhovetski Signed-off-by: Mauro Carvalho Chehab --- drivers/media/video/mt9m001.c | 114 ++++---- drivers/media/video/mt9m111.c | 154 ++++++----- drivers/media/video/mt9t031.c | 116 ++++---- drivers/media/video/mt9v022.c | 119 ++++---- drivers/media/video/mx3_camera.c | 27 +- drivers/media/video/ov772x.c | 157 ++++++----- drivers/media/video/pxa_camera.c | 29 +- drivers/media/video/sh_mobile_ceu_camera.c | 13 +- drivers/media/video/soc_camera.c | 431 ++++++++++++++--------------- drivers/media/video/soc_camera_platform.c | 76 ++--- drivers/media/video/tw9910.c | 110 ++++---- include/media/soc_camera.h | 27 +- include/media/soc_camera_platform.h | 3 +- 13 files changed, 699 insertions(+), 677 deletions(-) (limited to 'include') diff --git a/drivers/media/video/mt9m001.c b/drivers/media/video/mt9m001.c index 4d794b42d6c..1e4f269fc08 100644 --- a/drivers/media/video/mt9m001.c +++ b/drivers/media/video/mt9m001.c @@ -69,8 +69,6 @@ static const struct soc_camera_data_format mt9m001_monochrome_formats[] = { }; struct mt9m001 { - struct i2c_client *client; - struct soc_camera_device icd; int model; /* V4L2_IDENT_MT9M001* codes from v4l2-chip-ident.h */ unsigned char autoexposure; }; @@ -111,11 +109,11 @@ static int reg_clear(struct i2c_client *client, const u8 reg, static int mt9m001_init(struct soc_camera_device *icd) { - struct i2c_client *client = to_i2c_client(icd->control); - struct soc_camera_link *icl = client->dev.platform_data; + struct i2c_client *client = to_i2c_client(to_soc_camera_control(icd)); + struct soc_camera_link *icl = to_soc_camera_link(icd); int ret; - dev_dbg(icd->vdev->parent, "%s\n", __func__); + dev_dbg(&icd->dev, "%s\n", __func__); if (icl->power) { ret = icl->power(&client->dev, 1); @@ -147,8 +145,8 @@ static int mt9m001_init(struct soc_camera_device *icd) static int mt9m001_release(struct soc_camera_device *icd) { - struct i2c_client *client = to_i2c_client(icd->control); - struct soc_camera_link *icl = client->dev.platform_data; + struct i2c_client *client = to_i2c_client(to_soc_camera_control(icd)); + struct soc_camera_link *icl = to_soc_camera_link(icd); /* Disable the chip */ reg_write(client, MT9M001_OUTPUT_CONTROL, 0); @@ -161,7 +159,7 @@ static int mt9m001_release(struct soc_camera_device *icd) static int mt9m001_start_capture(struct soc_camera_device *icd) { - struct i2c_client *client = to_i2c_client(icd->control); + struct i2c_client *client = to_i2c_client(to_soc_camera_control(icd)); /* Switch to master "normal" mode */ if (reg_write(client, MT9M001_OUTPUT_CONTROL, 2) < 0) @@ -171,7 +169,7 @@ static int mt9m001_start_capture(struct soc_camera_device *icd) static int mt9m001_stop_capture(struct soc_camera_device *icd) { - struct i2c_client *client = to_i2c_client(icd->control); + struct i2c_client *client = to_i2c_client(to_soc_camera_control(icd)); /* Stop sensor readout */ if (reg_write(client, MT9M001_OUTPUT_CONTROL, 0) < 0) @@ -182,8 +180,7 @@ static int mt9m001_stop_capture(struct soc_camera_device *icd) static int mt9m001_set_bus_param(struct soc_camera_device *icd, unsigned long flags) { - struct mt9m001 *mt9m001 = container_of(icd, struct mt9m001, icd); - struct soc_camera_link *icl = mt9m001->client->dev.platform_data; + struct soc_camera_link *icl = to_soc_camera_link(icd); unsigned long width_flag = flags & SOCAM_DATAWIDTH_MASK; /* Only one width bit may be set */ @@ -205,8 +202,7 @@ static int mt9m001_set_bus_param(struct soc_camera_device *icd, static unsigned long mt9m001_query_bus_param(struct soc_camera_device *icd) { - struct mt9m001 *mt9m001 = container_of(icd, struct mt9m001, icd); - struct soc_camera_link *icl = mt9m001->client->dev.platform_data; + struct soc_camera_link *icl = to_soc_camera_link(icd); /* MT9M001 has all capture_format parameters fixed */ unsigned long flags = SOCAM_PCLK_SAMPLE_FALLING | SOCAM_HSYNC_ACTIVE_HIGH | SOCAM_VSYNC_ACTIVE_HIGH | @@ -223,8 +219,8 @@ static unsigned long mt9m001_query_bus_param(struct soc_camera_device *icd) static int mt9m001_set_crop(struct soc_camera_device *icd, struct v4l2_rect *rect) { - struct i2c_client *client = to_i2c_client(icd->control); - struct mt9m001 *mt9m001 = container_of(icd, struct mt9m001, icd); + struct i2c_client *client = to_i2c_client(to_soc_camera_control(icd)); + struct mt9m001 *mt9m001 = i2c_get_clientdata(client); int ret; const u16 hblank = 9, vblank = 25; @@ -290,12 +286,13 @@ static int mt9m001_try_fmt(struct soc_camera_device *icd, static int mt9m001_get_chip_id(struct soc_camera_device *icd, struct v4l2_dbg_chip_ident *id) { - struct mt9m001 *mt9m001 = container_of(icd, struct mt9m001, icd); + struct i2c_client *client = to_i2c_client(to_soc_camera_control(icd)); + struct mt9m001 *mt9m001 = i2c_get_clientdata(client); if (id->match.type != V4L2_CHIP_MATCH_I2C_ADDR) return -EINVAL; - if (id->match.addr != mt9m001->client->addr) + if (id->match.addr != client->addr) return -ENODEV; id->ident = mt9m001->model; @@ -308,7 +305,7 @@ static int mt9m001_get_chip_id(struct soc_camera_device *icd, static int mt9m001_get_register(struct soc_camera_device *icd, struct v4l2_dbg_register *reg) { - struct i2c_client *client = to_i2c_client(icd->control); + struct i2c_client *client = to_i2c_client(to_soc_camera_control(icd)); if (reg->match.type != V4L2_CHIP_MATCH_I2C_ADDR || reg->reg > 0xff) return -EINVAL; @@ -328,7 +325,7 @@ static int mt9m001_get_register(struct soc_camera_device *icd, static int mt9m001_set_register(struct soc_camera_device *icd, struct v4l2_dbg_register *reg) { - struct i2c_client *client = to_i2c_client(icd->control); + struct i2c_client *client = to_i2c_client(to_soc_camera_control(icd)); if (reg->match.type != V4L2_CHIP_MATCH_I2C_ADDR || reg->reg > 0xff) return -EINVAL; @@ -381,15 +378,11 @@ static const struct v4l2_queryctrl mt9m001_controls[] = { } }; -static int mt9m001_video_probe(struct soc_camera_device *); -static void mt9m001_video_remove(struct soc_camera_device *); static int mt9m001_get_control(struct soc_camera_device *, struct v4l2_control *); static int mt9m001_set_control(struct soc_camera_device *, struct v4l2_control *); static struct soc_camera_ops mt9m001_ops = { .owner = THIS_MODULE, - .probe = mt9m001_video_probe, - .remove = mt9m001_video_remove, .init = mt9m001_init, .release = mt9m001_release, .start_capture = mt9m001_start_capture, @@ -412,8 +405,8 @@ static struct soc_camera_ops mt9m001_ops = { static int mt9m001_get_control(struct soc_camera_device *icd, struct v4l2_control *ctrl) { - struct i2c_client *client = to_i2c_client(icd->control); - struct mt9m001 *mt9m001 = container_of(icd, struct mt9m001, icd); + struct i2c_client *client = to_i2c_client(to_soc_camera_control(icd)); + struct mt9m001 *mt9m001 = i2c_get_clientdata(client); int data; switch (ctrl->id) { @@ -432,8 +425,8 @@ static int mt9m001_get_control(struct soc_camera_device *icd, struct v4l2_contro static int mt9m001_set_control(struct soc_camera_device *icd, struct v4l2_control *ctrl) { - struct i2c_client *client = to_i2c_client(icd->control); - struct mt9m001 *mt9m001 = container_of(icd, struct mt9m001, icd); + struct i2c_client *client = to_i2c_client(to_soc_camera_control(icd)); + struct mt9m001 *mt9m001 = i2c_get_clientdata(client); const struct v4l2_queryctrl *qctrl; int data; @@ -525,11 +518,11 @@ static int mt9m001_set_control(struct soc_camera_device *icd, struct v4l2_contro /* Interface active, can use i2c. If it fails, it can indeed mean, that * this wasn't our capture interface, so, we wait for the right one */ -static int mt9m001_video_probe(struct soc_camera_device *icd) +static int mt9m001_video_probe(struct soc_camera_device *icd, + struct i2c_client *client) { - struct i2c_client *client = to_i2c_client(icd->control); - struct mt9m001 *mt9m001 = container_of(icd, struct mt9m001, icd); - struct soc_camera_link *icl = client->dev.platform_data; + struct mt9m001 *mt9m001 = i2c_get_clientdata(client); + struct soc_camera_link *icl = to_soc_camera_link(icd); s32 data; int ret; unsigned long flags; @@ -540,6 +533,11 @@ static int mt9m001_video_probe(struct soc_camera_device *icd) to_soc_camera_host(icd->dev.parent)->nr != icd->iface) return -ENODEV; + /* Switch master clock on */ + ret = soc_camera_video_start(icd, &client->dev); + if (ret) + return ret; + /* Enable the chip */ data = reg_write(client, MT9M001_CHIP_ENABLE, 1); dev_dbg(&icd->dev, "write: %d\n", data); @@ -547,6 +545,8 @@ static int mt9m001_video_probe(struct soc_camera_device *icd) /* Read out the chip version register */ data = reg_read(client, MT9M001_CHIP_VERSION); + soc_camera_video_stop(icd); + /* must be 0x8411 or 0x8421 for colour sensor and 8431 for bw */ switch (data) { case 0x8411: @@ -559,10 +559,9 @@ static int mt9m001_video_probe(struct soc_camera_device *icd) icd->formats = mt9m001_monochrome_formats; break; default: - ret = -ENODEV; dev_err(&icd->dev, "No MT9M001 chip detected, register read %x\n", data); - goto ei2c; + return -ENODEV; } icd->num_formats = 0; @@ -588,26 +587,16 @@ static int mt9m001_video_probe(struct soc_camera_device *icd) dev_info(&icd->dev, "Detected a MT9M001 chip ID %x (%s)\n", data, data == 0x8431 ? "C12STM" : "C12ST"); - /* Now that we know the model, we can start video */ - ret = soc_camera_video_start(icd); - if (ret) - goto eisis; - return 0; - -eisis: -ei2c: - return ret; } static void mt9m001_video_remove(struct soc_camera_device *icd) { - struct mt9m001 *mt9m001 = container_of(icd, struct mt9m001, icd); - struct soc_camera_link *icl = mt9m001->client->dev.platform_data; + struct i2c_client *client = to_i2c_client(to_soc_camera_control(icd)); + struct soc_camera_link *icl = to_soc_camera_link(icd); - dev_dbg(&icd->dev, "Video %x removed: %p, %p\n", mt9m001->client->addr, + dev_dbg(&icd->dev, "Video %x removed: %p, %p\n", client->addr, icd->dev.parent, icd->vdev); - soc_camera_video_stop(icd); if (icl->free_bus) icl->free_bus(icl); } @@ -616,11 +605,17 @@ static int mt9m001_probe(struct i2c_client *client, const struct i2c_device_id *did) { struct mt9m001 *mt9m001; - struct soc_camera_device *icd; + struct soc_camera_device *icd = client->dev.platform_data; struct i2c_adapter *adapter = to_i2c_adapter(client->dev.parent); - struct soc_camera_link *icl = client->dev.platform_data; + struct soc_camera_link *icl; int ret; + if (!icd) { + dev_err(&client->dev, "MT9M001: missing soc-camera data!\n"); + return -EINVAL; + } + + icl = to_soc_camera_link(icd); if (!icl) { dev_err(&client->dev, "MT9M001 driver needs platform data\n"); return -EINVAL; @@ -636,13 +631,10 @@ static int mt9m001_probe(struct i2c_client *client, if (!mt9m001) return -ENOMEM; - mt9m001->client = client; i2c_set_clientdata(client, mt9m001); /* Second stage probe - when a capture adapter is there */ - icd = &mt9m001->icd; icd->ops = &mt9m001_ops; - icd->control = &client->dev; icd->x_min = 20; icd->y_min = 12; icd->x_current = 20; @@ -652,27 +644,29 @@ static int mt9m001_probe(struct i2c_client *client, icd->height_min = 32; icd->height_max = 1024; icd->y_skip_top = 1; - icd->iface = icl->bus_id; /* Simulated autoexposure. If enabled, we calculate shutter width * ourselves in the driver based on vertical blanking and frame width */ mt9m001->autoexposure = 1; - ret = soc_camera_device_register(icd); - if (ret) - goto eisdr; - - return 0; + ret = mt9m001_video_probe(icd, client); + if (ret) { + icd->ops = NULL; + i2c_set_clientdata(client, NULL); + kfree(mt9m001); + } -eisdr: - kfree(mt9m001); return ret; } static int mt9m001_remove(struct i2c_client *client) { struct mt9m001 *mt9m001 = i2c_get_clientdata(client); + struct soc_camera_device *icd = client->dev.platform_data; - soc_camera_device_unregister(&mt9m001->icd); + icd->ops = NULL; + mt9m001_video_remove(icd); + i2c_set_clientdata(client, NULL); + client->driver = NULL; kfree(mt9m001); return 0; diff --git a/drivers/media/video/mt9m111.c b/drivers/media/video/mt9m111.c index fc5e2de0376..95c2f089605 100644 --- a/drivers/media/video/mt9m111.c +++ b/drivers/media/video/mt9m111.c @@ -148,8 +148,6 @@ enum mt9m111_context { }; struct mt9m111 { - struct i2c_client *client; - struct soc_camera_device icd; int model; /* V4L2_IDENT_MT9M11x* codes from v4l2-chip-ident.h */ enum mt9m111_context context; struct v4l2_rect rect; @@ -203,7 +201,7 @@ static int mt9m111_reg_write(struct i2c_client *client, const u16 reg, ret = reg_page_map_set(client, reg); if (!ret) - ret = i2c_smbus_write_word_data(client, (reg & 0xff), + ret = i2c_smbus_write_word_data(client, reg & 0xff, swab16(data)); dev_dbg(&client->dev, "write reg.%03x = %04x -> %d\n", reg, data, ret); return ret; @@ -232,7 +230,7 @@ static int mt9m111_reg_clear(struct i2c_client *client, const u16 reg, static int mt9m111_set_context(struct soc_camera_device *icd, enum mt9m111_context ctxt) { - struct i2c_client *client = to_i2c_client(icd->control); + struct i2c_client *client = to_i2c_client(to_soc_camera_control(icd)); int valB = MT9M111_CTXT_CTRL_RESTART | MT9M111_CTXT_CTRL_DEFECTCOR_B | MT9M111_CTXT_CTRL_RESIZE_B | MT9M111_CTXT_CTRL_CTRL2_B | MT9M111_CTXT_CTRL_GAMMA_B | MT9M111_CTXT_CTRL_READ_MODE_B @@ -249,8 +247,8 @@ static int mt9m111_set_context(struct soc_camera_device *icd, static int mt9m111_setup_rect(struct soc_camera_device *icd, struct v4l2_rect *rect) { - struct i2c_client *client = to_i2c_client(icd->control); - struct mt9m111 *mt9m111 = container_of(icd, struct mt9m111, icd); + struct i2c_client *client = to_i2c_client(to_soc_camera_control(icd)); + struct mt9m111 *mt9m111 = i2c_get_clientdata(client); int ret, is_raw_format; int width = rect->width; int height = rect->height; @@ -294,7 +292,7 @@ static int mt9m111_setup_rect(struct soc_camera_device *icd, static int mt9m111_setup_pixfmt(struct soc_camera_device *icd, u16 outfmt) { - struct i2c_client *client = to_i2c_client(icd->control); + struct i2c_client *client = to_i2c_client(to_soc_camera_control(icd)); int ret; ret = reg_write(OUTPUT_FORMAT_CTRL2_A, outfmt); @@ -315,7 +313,8 @@ static int mt9m111_setfmt_bayer10(struct soc_camera_device *icd) static int mt9m111_setfmt_rgb565(struct soc_camera_device *icd) { - struct mt9m111 *mt9m111 = container_of(icd, struct mt9m111, icd); + struct i2c_client *client = to_i2c_client(to_soc_camera_control(icd)); + struct mt9m111 *mt9m111 = i2c_get_clientdata(client); int val = 0; if (mt9m111->swap_rgb_red_blue) @@ -329,7 +328,8 @@ static int mt9m111_setfmt_rgb565(struct soc_camera_device *icd) static int mt9m111_setfmt_rgb555(struct soc_camera_device *icd) { - struct mt9m111 *mt9m111 = container_of(icd, struct mt9m111, icd); + struct i2c_client *client = to_i2c_client(to_soc_camera_control(icd)); + struct mt9m111 *mt9m111 = i2c_get_clientdata(client); int val = 0; if (mt9m111->swap_rgb_red_blue) @@ -343,7 +343,8 @@ static int mt9m111_setfmt_rgb555(struct soc_camera_device *icd) static int mt9m111_setfmt_yuv(struct soc_camera_device *icd) { - struct mt9m111 *mt9m111 = container_of(icd, struct mt9m111, icd); + struct i2c_client *client = to_i2c_client(to_soc_camera_control(icd)); + struct mt9m111 *mt9m111 = i2c_get_clientdata(client); int val = 0; if (mt9m111->swap_yuv_cb_cr) @@ -356,9 +357,9 @@ static int mt9m111_setfmt_yuv(struct soc_camera_device *icd) static int mt9m111_enable(struct soc_camera_device *icd) { - struct i2c_client *client = to_i2c_client(icd->control); - struct mt9m111 *mt9m111 = container_of(icd, struct mt9m111, icd); - struct soc_camera_link *icl = client->dev.platform_data; + struct i2c_client *client = to_i2c_client(to_soc_camera_control(icd)); + struct soc_camera_link *icl = to_soc_camera_link(icd); + struct mt9m111 *mt9m111 = i2c_get_clientdata(client); int ret; if (icl->power) { @@ -378,9 +379,9 @@ static int mt9m111_enable(struct soc_camera_device *icd) static int mt9m111_disable(struct soc_camera_device *icd) { - struct i2c_client *client = to_i2c_client(icd->control); - struct mt9m111 *mt9m111 = container_of(icd, struct mt9m111, icd); - struct soc_camera_link *icl = client->dev.platform_data; + struct i2c_client *client = to_i2c_client(to_soc_camera_control(icd)); + struct soc_camera_link *icl = to_soc_camera_link(icd); + struct mt9m111 *mt9m111 = i2c_get_clientdata(client); int ret; ret = reg_clear(RESET, MT9M111_RESET_CHIP_ENABLE); @@ -395,8 +396,8 @@ static int mt9m111_disable(struct soc_camera_device *icd) static int mt9m111_reset(struct soc_camera_device *icd) { - struct i2c_client *client = to_i2c_client(icd->control); - struct soc_camera_link *icl = client->dev.platform_data; + struct i2c_client *client = to_i2c_client(to_soc_camera_control(icd)); + struct soc_camera_link *icl = to_soc_camera_link(icd); int ret; ret = reg_set(RESET, MT9M111_RESET_RESET_MODE); @@ -424,8 +425,7 @@ static int mt9m111_stop_capture(struct soc_camera_device *icd) static unsigned long mt9m111_query_bus_param(struct soc_camera_device *icd) { - struct mt9m111 *mt9m111 = container_of(icd, struct mt9m111, icd); - struct soc_camera_link *icl = mt9m111->client->dev.platform_data; + struct soc_camera_link *icl = to_soc_camera_link(icd); unsigned long flags = SOCAM_MASTER | SOCAM_PCLK_SAMPLE_RISING | SOCAM_HSYNC_ACTIVE_HIGH | SOCAM_VSYNC_ACTIVE_HIGH | SOCAM_DATA_ACTIVE_HIGH | SOCAM_DATAWIDTH_8; @@ -441,7 +441,8 @@ static int mt9m111_set_bus_param(struct soc_camera_device *icd, unsigned long f) static int mt9m111_set_crop(struct soc_camera_device *icd, struct v4l2_rect *rect) { - struct mt9m111 *mt9m111 = container_of(icd, struct mt9m111, icd); + struct i2c_client *client = to_i2c_client(to_soc_camera_control(icd)); + struct mt9m111 *mt9m111 = i2c_get_clientdata(client); int ret; dev_dbg(&icd->dev, "%s left=%d, top=%d, width=%d, height=%d\n", @@ -456,7 +457,8 @@ static int mt9m111_set_crop(struct soc_camera_device *icd, static int mt9m111_set_pixfmt(struct soc_camera_device *icd, u32 pixfmt) { - struct mt9m111 *mt9m111 = container_of(icd, struct mt9m111, icd); + struct i2c_client *client = to_i2c_client(to_soc_camera_control(icd)); + struct mt9m111 *mt9m111 = i2c_get_clientdata(client); int ret; switch (pixfmt) { @@ -506,7 +508,8 @@ static int mt9m111_set_pixfmt(struct soc_camera_device *icd, u32 pixfmt) static int mt9m111_set_fmt(struct soc_camera_device *icd, struct v4l2_format *f) { - struct mt9m111 *mt9m111 = container_of(icd, struct mt9m111, icd); + struct i2c_client *client = to_i2c_client(to_soc_camera_control(icd)); + struct mt9m111 *mt9m111 = i2c_get_clientdata(client); struct v4l2_pix_format *pix = &f->fmt.pix; struct v4l2_rect rect = { .left = mt9m111->rect.left, @@ -544,12 +547,13 @@ static int mt9m111_try_fmt(struct soc_camera_device *icd, static int mt9m111_get_chip_id(struct soc_camera_device *icd, struct v4l2_dbg_chip_ident *id) { - struct mt9m111 *mt9m111 = container_of(icd, struct mt9m111, icd); + struct i2c_client *client = to_i2c_client(to_soc_camera_control(icd)); + struct mt9m111 *mt9m111 = i2c_get_clientdata(client); if (id->match.type != V4L2_CHIP_MATCH_I2C_ADDR) return -EINVAL; - if (id->match.addr != mt9m111->client->addr) + if (id->match.addr != client->addr) return -ENODEV; id->ident = mt9m111->model; @@ -562,8 +566,8 @@ static int mt9m111_get_chip_id(struct soc_camera_device *icd, static int mt9m111_get_register(struct soc_camera_device *icd, struct v4l2_dbg_register *reg) { + struct i2c_client *client = to_i2c_client(to_soc_camera_control(icd)); int val; - struct i2c_client *client = to_i2c_client(icd->control); if (reg->match.type != V4L2_CHIP_MATCH_I2C_ADDR || reg->reg > 0x2ff) return -EINVAL; @@ -583,7 +587,7 @@ static int mt9m111_get_register(struct soc_camera_device *icd, static int mt9m111_set_register(struct soc_camera_device *icd, struct v4l2_dbg_register *reg) { - struct i2c_client *client = to_i2c_client(icd->control); + struct i2c_client *client = to_i2c_client(to_soc_camera_control(icd)); if (reg->match.type != V4L2_CHIP_MATCH_I2C_ADDR || reg->reg > 0x2ff) return -EINVAL; @@ -635,8 +639,6 @@ static const struct v4l2_queryctrl mt9m111_controls[] = { } }; -static int mt9m111_video_probe(struct soc_camera_device *); -static void mt9m111_video_remove(struct soc_camera_device *); static int mt9m111_get_control(struct soc_camera_device *, struct v4l2_control *); static int mt9m111_set_control(struct soc_camera_device *, @@ -647,8 +649,6 @@ static int mt9m111_release(struct soc_camera_device *icd); static struct soc_camera_ops mt9m111_ops = { .owner = THIS_MODULE, - .probe = mt9m111_video_probe, - .remove = mt9m111_video_remove, .init = mt9m111_init, .resume = mt9m111_resume, .release = mt9m111_release, @@ -672,8 +672,8 @@ static struct soc_camera_ops mt9m111_ops = { static int mt9m111_set_flip(struct soc_camera_device *icd, int flip, int mask) { - struct i2c_client *client = to_i2c_client(icd->control); - struct mt9m111 *mt9m111 = container_of(icd, struct mt9m111, icd); + struct i2c_client *client = to_i2c_client(to_soc_camera_control(icd)); + struct mt9m111 *mt9m111 = i2c_get_clientdata(client); int ret; if (mt9m111->context == HIGHPOWER) { @@ -693,7 +693,7 @@ static int mt9m111_set_flip(struct soc_camera_device *icd, int flip, int mask) static int mt9m111_get_global_gain(struct soc_camera_device *icd) { - struct i2c_client *client = to_i2c_client(icd->control); + struct i2c_client *client = to_i2c_client(to_soc_camera_control(icd)); int data; data = reg_read(GLOBAL_GAIN); @@ -705,7 +705,7 @@ static int mt9m111_get_global_gain(struct soc_camera_device *icd) static int mt9m111_set_global_gain(struct soc_camera_device *icd, int gain) { - struct i2c_client *client = to_i2c_client(icd->control); + struct i2c_client *client = to_i2c_client(to_soc_camera_control(icd)); u16 val; if (gain > 63 * 2 * 2) @@ -724,8 +724,8 @@ static int mt9m111_set_global_gain(struct soc_camera_device *icd, int gain) static int mt9m111_set_autoexposure(struct soc_camera_device *icd, int on) { - struct i2c_client *client = to_i2c_client(icd->control); - struct mt9m111 *mt9m111 = container_of(icd, struct mt9m111, icd); + struct i2c_client *client = to_i2c_client(to_soc_camera_control(icd)); + struct mt9m111 *mt9m111 = i2c_get_clientdata(client); int ret; if (on) @@ -741,8 +741,8 @@ static int mt9m111_set_autoexposure(struct soc_camera_device *icd, int on) static int mt9m111_set_autowhitebalance(struct soc_camera_device *icd, int on) { - struct i2c_client *client = to_i2c_client(icd->control); - struct mt9m111 *mt9m111 = container_of(icd, struct mt9m111, icd); + struct i2c_client *client = to_i2c_client(to_soc_camera_control(icd)); + struct mt9m111 *mt9m111 = i2c_get_clientdata(client); int ret; if (on) @@ -759,8 +759,8 @@ static int mt9m111_set_autowhitebalance(struct soc_camera_device *icd, int on) static int mt9m111_get_control(struct soc_camera_device *icd, struct v4l2_control *ctrl) { - struct i2c_client *client = to_i2c_client(icd->control); - struct mt9m111 *mt9m111 = container_of(icd, struct mt9m111, icd); + struct i2c_client *client = to_i2c_client(to_soc_camera_control(icd)); + struct mt9m111 *mt9m111 = i2c_get_clientdata(client); int data; switch (ctrl->id) { @@ -803,7 +803,8 @@ static int mt9m111_get_control(struct soc_camera_device *icd, static int mt9m111_set_control(struct soc_camera_device *icd, struct v4l2_control *ctrl) { - struct mt9m111 *mt9m111 = container_of(icd, struct mt9m111, icd); + struct i2c_client *client = to_i2c_client(to_soc_camera_control(icd)); + struct mt9m111 *mt9m111 = i2c_get_clientdata(client); const struct v4l2_queryctrl *qctrl; int ret; @@ -841,7 +842,8 @@ static int mt9m111_set_control(struct soc_camera_device *icd, static int mt9m111_restore_state(struct soc_camera_device *icd) { - struct mt9m111 *mt9m111 = container_of(icd, struct mt9m111, icd); + struct i2c_client *client = to_i2c_client(to_soc_camera_control(icd)); + struct mt9m111 *mt9m111 = i2c_get_clientdata(client); mt9m111_set_context(icd, mt9m111->context); mt9m111_set_pixfmt(icd, mt9m111->pixfmt); @@ -856,7 +858,8 @@ static int mt9m111_restore_state(struct soc_camera_device *icd) static int mt9m111_resume(struct soc_camera_device *icd) { - struct mt9m111 *mt9m111 = container_of(icd, struct mt9m111, icd); + struct i2c_client *client = to_i2c_client(to_soc_camera_control(icd)); + struct mt9m111 *mt9m111 = i2c_get_clientdata(client); int ret = 0; if (mt9m111->powered) { @@ -871,7 +874,8 @@ static int mt9m111_resume(struct soc_camera_device *icd) static int mt9m111_init(struct soc_camera_device *icd) { - struct mt9m111 *mt9m111 = container_of(icd, struct mt9m111, icd); + struct i2c_client *client = to_i2c_client(to_soc_camera_control(icd)); + struct mt9m111 *mt9m111 = i2c_get_clientdata(client); int ret; mt9m111->context = HIGHPOWER; @@ -902,10 +906,10 @@ static int mt9m111_release(struct soc_camera_device *icd) * Interface active, can use i2c. If it fails, it can indeed mean, that * this wasn't our capture interface, so, we wait for the right one */ -static int mt9m111_video_probe(struct soc_camera_device *icd) +static int mt9m111_video_probe(struct soc_camera_device *icd, + struct i2c_client *client) { - struct i2c_client *client = to_i2c_client(icd->control); - struct mt9m111 *mt9m111 = container_of(icd, struct mt9m111, icd); + struct mt9m111 *mt9m111 = i2c_get_clientdata(client); s32 data; int ret; @@ -917,6 +921,11 @@ static int mt9m111_video_probe(struct soc_camera_device *icd) to_soc_camera_host(icd->dev.parent)->nr != icd->iface) return -ENODEV; + /* Switch master clock on */ + ret = soc_camera_video_start(icd, &client->dev); + if (ret) + goto evstart; + ret = mt9m111_enable(icd); if (ret) goto ei2c; @@ -945,40 +954,33 @@ static int mt9m111_video_probe(struct soc_camera_device *icd) dev_info(&icd->dev, "Detected a MT9M11x chip ID %x\n", data); - ret = soc_camera_video_start(icd); - if (ret) - goto eisis; - mt9m111->autoexposure = 1; mt9m111->autowhitebalance = 1; mt9m111->swap_rgb_even_odd = 1; mt9m111->swap_rgb_red_blue = 1; - return 0; -eisis: ei2c: + soc_camera_video_stop(icd); +evstart: return ret; } -static void mt9m111_video_remove(struct soc_camera_device *icd) -{ - struct mt9m111 *mt9m111 = container_of(icd, struct mt9m111, icd); - - dev_dbg(&icd->dev, "Video %x removed: %p, %p\n", mt9m111->client->addr, - mt9m111->icd.dev.parent, mt9m111->icd.vdev); - soc_camera_video_stop(&mt9m111->icd); -} - static int mt9m111_probe(struct i2c_client *client, const struct i2c_device_id *did) { struct mt9m111 *mt9m111; - struct soc_camera_device *icd; + struct soc_camera_device *icd = client->dev.platform_data; struct i2c_adapter *adapter = to_i2c_adapter(client->dev.parent); - struct soc_camera_link *icl = client->dev.platform_data; + struct soc_camera_link *icl; int ret; + if (!icd) { + dev_err(&client->dev, "MT9M11x: missing soc-camera data!\n"); + return -EINVAL; + } + + icl = to_soc_camera_link(icd); if (!icl) { dev_err(&client->dev, "MT9M11x driver needs platform data\n"); return -EINVAL; @@ -994,13 +996,10 @@ static int mt9m111_probe(struct i2c_client *client, if (!mt9m111) return -ENOMEM; - mt9m111->client = client; i2c_set_clientdata(client, mt9m111); /* Second stage probe - when a capture adapter is there */ - icd = &mt9m111->icd; icd->ops = &mt9m111_ops; - icd->control = &client->dev; icd->x_min = MT9M111_MIN_DARK_COLS; icd->y_min = MT9M111_MIN_DARK_ROWS; icd->x_current = icd->x_min; @@ -1010,22 +1009,25 @@ static int mt9m111_probe(struct i2c_client *client, icd->height_min = MT9M111_MIN_DARK_COLS; icd->height_max = MT9M111_MAX_HEIGHT; icd->y_skip_top = 0; - icd->iface = icl->bus_id; - ret = soc_camera_device_register(icd); - if (ret) - goto eisdr; - return 0; + ret = mt9m111_video_probe(icd, client); + if (ret) { + icd->ops = NULL; + i2c_set_clientdata(client, NULL); + kfree(mt9m111); + } -eisdr: - kfree(mt9m111); return ret; } static int mt9m111_remove(struct i2c_client *client) { struct mt9m111 *mt9m111 = i2c_get_clientdata(client); - soc_camera_device_unregister(&mt9m111->icd); + struct soc_camera_device *icd = client->dev.platform_data; + + icd->ops = NULL; + i2c_set_clientdata(client, NULL); + client->driver = NULL; kfree(mt9m111); return 0; diff --git a/drivers/media/video/mt9t031.c b/drivers/media/video/mt9t031.c index 4207fb34267..d9c7c2fd698 100644 --- a/drivers/media/video/mt9t031.c +++ b/drivers/media/video/mt9t031.c @@ -68,8 +68,6 @@ static const struct soc_camera_data_format mt9t031_colour_formats[] = { }; struct mt9t031 { - struct i2c_client *client; - struct soc_camera_device icd; int model; /* V4L2_IDENT_MT9T031* codes from v4l2-chip-ident.h */ unsigned char autoexposure; u16 xskip; @@ -138,8 +136,8 @@ static int get_shutter(struct i2c_client *client, u32 *data) static int mt9t031_init(struct soc_camera_device *icd) { - struct i2c_client *client = to_i2c_client(icd->control); - struct soc_camera_link *icl = client->dev.platform_data; + struct i2c_client *client = to_i2c_client(to_soc_camera_control(icd)); + struct soc_camera_link *icl = to_soc_camera_link(icd); int ret; if (icl->power) { @@ -166,8 +164,8 @@ static int mt9t031_init(struct soc_camera_device *icd) static int mt9t031_release(struct soc_camera_device *icd) { - struct i2c_client *client = to_i2c_client(icd->control); - struct soc_camera_link *icl = client->dev.platform_data; + struct i2c_client *client = to_i2c_client(to_soc_camera_control(icd)); + struct soc_camera_link *icl = to_soc_camera_link(icd); /* Disable the chip */ reg_clear(client, MT9T031_OUTPUT_CONTROL, 2); @@ -180,7 +178,7 @@ static int mt9t031_release(struct soc_camera_device *icd) static int mt9t031_start_capture(struct soc_camera_device *icd) { - struct i2c_client *client = to_i2c_client(icd->control); + struct i2c_client *client = to_i2c_client(to_soc_camera_control(icd)); /* Switch to master "normal" mode */ if (reg_set(client, MT9T031_OUTPUT_CONTROL, 2) < 0) @@ -190,7 +188,7 @@ static int mt9t031_start_capture(struct soc_camera_device *icd) static int mt9t031_stop_capture(struct soc_camera_device *icd) { - struct i2c_client *client = to_i2c_client(icd->control); + struct i2c_client *client = to_i2c_client(to_soc_camera_control(icd)); /* Stop sensor readout */ if (reg_clear(client, MT9T031_OUTPUT_CONTROL, 2) < 0) @@ -201,7 +199,7 @@ static int mt9t031_stop_capture(struct soc_camera_device *icd) static int mt9t031_set_bus_param(struct soc_camera_device *icd, unsigned long flags) { - struct i2c_client *client = to_i2c_client(icd->control); + struct i2c_client *client = to_i2c_client(to_soc_camera_control(icd)); /* The caller should have queried our parameters, check anyway */ if (flags & ~MT9T031_BUS_PARAM) @@ -217,8 +215,7 @@ static int mt9t031_set_bus_param(struct soc_camera_device *icd, static unsigned long mt9t031_query_bus_param(struct soc_camera_device *icd) { - struct mt9t031 *mt9t031 = container_of(icd, struct mt9t031, icd); - struct soc_camera_link *icl = mt9t031->client->dev.platform_data; + struct soc_camera_link *icl = to_soc_camera_link(icd); return soc_camera_apply_sensor_flags(icl, MT9T031_BUS_PARAM); } @@ -238,8 +235,8 @@ static void recalculate_limits(struct soc_camera_device *icd, static int mt9t031_set_params(struct soc_camera_device *icd, struct v4l2_rect *rect, u16 xskip, u16 yskip) { - struct i2c_client *client = to_i2c_client(icd->control); - struct mt9t031 *mt9t031 = container_of(icd, struct mt9t031, icd); + struct i2c_client *client = to_i2c_client(to_soc_camera_control(icd)); + struct mt9t031 *mt9t031 = i2c_get_clientdata(client); int ret; u16 xbin, ybin, width, height, left, top; const u16 hblank = MT9T031_HORIZONTAL_BLANK, @@ -336,7 +333,8 @@ static int mt9t031_set_params(struct soc_camera_device *icd, static int mt9t031_set_crop(struct soc_camera_device *icd, struct v4l2_rect *rect) { - struct mt9t031 *mt9t031 = container_of(icd, struct mt9t031, icd); + struct i2c_client *client = to_i2c_client(to_soc_camera_control(icd)); + struct mt9t031 *mt9t031 = i2c_get_clientdata(client); /* CROP - no change in scaling, or in limits */ return mt9t031_set_params(icd, rect, mt9t031->xskip, mt9t031->yskip); @@ -345,7 +343,8 @@ static int mt9t031_set_crop(struct soc_camera_device *icd, static int mt9t031_set_fmt(struct soc_camera_device *icd, struct v4l2_format *f) { - struct mt9t031 *mt9t031 = container_of(icd, struct mt9t031, icd); + struct i2c_client *client = to_i2c_client(to_soc_camera_control(icd)); + struct mt9t031 *mt9t031 = i2c_get_clientdata(client); int ret; u16 xskip, yskip; struct v4l2_rect rect = { @@ -395,12 +394,13 @@ static int mt9t031_try_fmt(struct soc_camera_device *icd, static int mt9t031_get_chip_id(struct soc_camera_device *icd, struct v4l2_dbg_chip_ident *id) { - struct mt9t031 *mt9t031 = container_of(icd, struct mt9t031, icd); + struct i2c_client *client = to_i2c_client(to_soc_camera_control(icd)); + struct mt9t031 *mt9t031 = i2c_get_clientdata(client); if (id->match.type != V4L2_CHIP_MATCH_I2C_ADDR) return -EINVAL; - if (id->match.addr != mt9t031->client->addr) + if (id->match.addr != client->addr) return -ENODEV; id->ident = mt9t031->model; @@ -413,7 +413,7 @@ static int mt9t031_get_chip_id(struct soc_camera_device *icd, static int mt9t031_get_register(struct soc_camera_device *icd, struct v4l2_dbg_register *reg) { - struct i2c_client *client = to_i2c_client(icd->control); + struct i2c_client *client = to_i2c_client(to_soc_camera_control(icd)); if (reg->match.type != V4L2_CHIP_MATCH_I2C_ADDR || reg->reg > 0xff) return -EINVAL; @@ -432,7 +432,7 @@ static int mt9t031_get_register(struct soc_camera_device *icd, static int mt9t031_set_register(struct soc_camera_device *icd, struct v4l2_dbg_register *reg) { - struct i2c_client *client = to_i2c_client(icd->control); + struct i2c_client *client = to_i2c_client(to_soc_camera_control(icd)); if (reg->match.type != V4L2_CHIP_MATCH_I2C_ADDR || reg->reg > 0xff) return -EINVAL; @@ -493,15 +493,11 @@ static const struct v4l2_queryctrl mt9t031_controls[] = { } }; -static int mt9t031_video_probe(struct soc_camera_device *); -static void mt9t031_video_remove(struct soc_camera_device *); static int mt9t031_get_control(struct soc_camera_device *, struct v4l2_control *); static int mt9t031_set_control(struct soc_camera_device *, struct v4l2_control *); static struct soc_camera_ops mt9t031_ops = { .owner = THIS_MODULE, - .probe = mt9t031_video_probe, - .remove = mt9t031_video_remove, .init = mt9t031_init, .release = mt9t031_release, .start_capture = mt9t031_start_capture, @@ -524,8 +520,8 @@ static struct soc_camera_ops mt9t031_ops = { static int mt9t031_get_control(struct soc_camera_device *icd, struct v4l2_control *ctrl) { - struct i2c_client *client = to_i2c_client(icd->control); - struct mt9t031 *mt9t031 = container_of(icd, struct mt9t031, icd); + struct i2c_client *client = to_i2c_client(to_soc_camera_control(icd)); + struct mt9t031 *mt9t031 = i2c_get_clientdata(client); int data; switch (ctrl->id) { @@ -550,8 +546,8 @@ static int mt9t031_get_control(struct soc_camera_device *icd, struct v4l2_contro static int mt9t031_set_control(struct soc_camera_device *icd, struct v4l2_control *ctrl) { - struct i2c_client *client = to_i2c_client(icd->control); - struct mt9t031 *mt9t031 = container_of(icd, struct mt9t031, icd); + struct i2c_client *client = to_i2c_client(to_soc_camera_control(icd)); + struct mt9t031 *mt9t031 = i2c_get_clientdata(client); const struct v4l2_queryctrl *qctrl; int data; @@ -657,10 +653,10 @@ static int mt9t031_set_control(struct soc_camera_device *icd, struct v4l2_contro /* Interface active, can use i2c. If it fails, it can indeed mean, that * this wasn't our capture interface, so, we wait for the right one */ -static int mt9t031_video_probe(struct soc_camera_device *icd) +static int mt9t031_video_probe(struct soc_camera_device *icd, + struct i2c_client *client) { - struct i2c_client *client = to_i2c_client(icd->control); - struct mt9t031 *mt9t031 = container_of(icd, struct mt9t031, icd); + struct mt9t031 *mt9t031 = i2c_get_clientdata(client); s32 data; int ret; @@ -670,6 +666,11 @@ static int mt9t031_video_probe(struct soc_camera_device *icd) to_soc_camera_host(icd->dev.parent)->nr != icd->iface) return -ENODEV; + /* Switch master clock on */ + ret = soc_camera_video_start(icd, &client->dev); + if (ret) + return ret; + /* Enable the chip */ data = reg_write(client, MT9T031_CHIP_ENABLE, 1); dev_dbg(&icd->dev, "write: %d\n", data); @@ -677,6 +678,8 @@ static int mt9t031_video_probe(struct soc_camera_device *icd) /* Read out the chip version register */ data = reg_read(client, MT9T031_CHIP_VERSION); + soc_camera_video_stop(icd); + switch (data) { case 0x1621: mt9t031->model = V4L2_IDENT_MT9T031; @@ -684,44 +687,31 @@ static int mt9t031_video_probe(struct soc_camera_device *icd) icd->num_formats = ARRAY_SIZE(mt9t031_colour_formats); break; default: - ret = -ENODEV; dev_err(&icd->dev, "No MT9T031 chip detected, register read %x\n", data); - goto ei2c; + return -ENODEV; } dev_info(&icd->dev, "Detected a MT9T031 chip ID %x\n", data); - /* Now that we know the model, we can start video */ - ret = soc_camera_video_start(icd); - if (ret) - goto evstart; - return 0; - -evstart: -ei2c: - return ret; -} - -static void mt9t031_video_remove(struct soc_camera_device *icd) -{ - struct mt9t031 *mt9t031 = container_of(icd, struct mt9t031, icd); - - dev_dbg(&icd->dev, "Video %x removed: %p, %p\n", mt9t031->client->addr, - icd->dev.parent, icd->vdev); - soc_camera_video_stop(icd); } static int mt9t031_probe(struct i2c_client *client, const struct i2c_device_id *did) { struct mt9t031 *mt9t031; - struct soc_camera_device *icd; + struct soc_camera_device *icd = client->dev.platform_data; struct i2c_adapter *adapter = to_i2c_adapter(client->dev.parent); - struct soc_camera_link *icl = client->dev.platform_data; + struct soc_camera_link *icl; int ret; + if (!icd) { + dev_err(&client->dev, "MT9T031: missing soc-camera data!\n"); + return -EINVAL; + } + + icl = to_soc_camera_link(icd); if (!icl) { dev_err(&client->dev, "MT9T031 driver needs platform data\n"); return -EINVAL; @@ -737,13 +727,10 @@ static int mt9t031_probe(struct i2c_client *client, if (!mt9t031) return -ENOMEM; - mt9t031->client = client; i2c_set_clientdata(client, mt9t031); /* Second stage probe - when a capture adapter is there */ - icd = &mt9t031->icd; icd->ops = &mt9t031_ops; - icd->control = &client->dev; icd->x_min = MT9T031_COLUMN_SKIP; icd->y_min = MT9T031_ROW_SKIP; icd->x_current = icd->x_min; @@ -753,7 +740,6 @@ static int mt9t031_probe(struct i2c_client *client, icd->height_min = MT9T031_MIN_HEIGHT; icd->height_max = MT9T031_MAX_HEIGHT; icd->y_skip_top = 0; - icd->iface = icl->bus_id; /* Simulated autoexposure. If enabled, we calculate shutter width * ourselves in the driver based on vertical blanking and frame width */ mt9t031->autoexposure = 1; @@ -761,24 +747,24 @@ static int mt9t031_probe(struct i2c_client *client, mt9t031->xskip = 1; mt9t031->yskip = 1; - ret = soc_camera_device_register(icd); - if (ret) - goto eisdr; - - return 0; + ret = mt9t031_video_probe(icd, client); + if (ret) { + icd->ops = NULL; + i2c_set_clientdata(client, NULL); + kfree(mt9t031); + } -eisdr: - i2c_set_clientdata(client, NULL); - kfree(mt9t031); return ret; } static int mt9t031_remove(struct i2c_client *client) { struct mt9t031 *mt9t031 = i2c_get_clientdata(client); + struct soc_camera_device *icd = client->dev.platform_data; - soc_camera_device_unregister(&mt9t031->icd); + icd->ops = NULL; i2c_set_clientdata(client, NULL); + client->driver = NULL; kfree(mt9t031); return 0; diff --git a/drivers/media/video/mt9v022.c b/drivers/media/video/mt9v022.c index dbdcc86ae50..959cc299f1a 100644 --- a/drivers/media/video/mt9v022.c +++ b/drivers/media/video/mt9v022.c @@ -85,8 +85,6 @@ static const struct soc_camera_data_format mt9v022_monochrome_formats[] = { }; struct mt9v022 { - struct i2c_client *client; - struct soc_camera_device icd; int model; /* V4L2_IDENT_MT9V022* codes from v4l2-chip-ident.h */ u16 chip_control; }; @@ -127,9 +125,9 @@ static int reg_clear(struct i2c_client *client, const u8 reg, static int mt9v022_init(struct soc_camera_device *icd) { - struct i2c_client *client = to_i2c_client(icd->control); - struct mt9v022 *mt9v022 = container_of(icd, struct mt9v022, icd); - struct soc_camera_link *icl = client->dev.platform_data; + struct i2c_client *client = to_i2c_client(to_soc_camera_control(icd)); + struct soc_camera_link *icl = to_soc_camera_link(icd); + struct mt9v022 *mt9v022 = i2c_get_clientdata(client); int ret; if (icl->power) { @@ -173,19 +171,19 @@ static int mt9v022_init(struct soc_camera_device *icd) static int mt9v022_release(struct soc_camera_device *icd) { - struct mt9v022 *mt9v022 = container_of(icd, struct mt9v022, icd); - struct soc_camera_link *icl = mt9v022->client->dev.platform_data; + struct i2c_client *client = to_i2c_client(to_soc_camera_control(icd)); + struct soc_camera_link *icl = to_soc_camera_link(icd); if (icl->power) - icl->power(&mt9v022->client->dev, 0); + icl->power(&client->dev, 0); return 0; } static int mt9v022_start_capture(struct soc_camera_device *icd) { - struct i2c_client *client = to_i2c_client(icd->control); - struct mt9v022 *mt9v022 = container_of(icd, struct mt9v022, icd); + struct i2c_client *client = to_i2c_client(to_soc_camera_control(icd)); + struct mt9v022 *mt9v022 = i2c_get_clientdata(client); /* Switch to master "normal" mode */ mt9v022->chip_control &= ~0x10; if (reg_write(client, MT9V022_CHIP_CONTROL, @@ -196,8 +194,8 @@ static int mt9v022_start_capture(struct soc_camera_device *icd) static int mt9v022_stop_capture(struct soc_camera_device *icd) { - struct i2c_client *client = to_i2c_client(icd->control); - struct mt9v022 *mt9v022 = container_of(icd, struct mt9v022, icd); + struct i2c_client *client = to_i2c_client(to_soc_camera_control(icd)); + struct mt9v022 *mt9v022 = i2c_get_clientdata(client); /* Switch to snapshot mode */ mt9v022->chip_control |= 0x10; if (reg_write(client, MT9V022_CHIP_CONTROL, @@ -209,9 +207,9 @@ static int mt9v022_stop_capture(struct soc_camera_device *icd) static int mt9v022_set_bus_param(struct soc_camera_device *icd, unsigned long flags) { - struct i2c_client *client = to_i2c_client(icd->control); - struct mt9v022 *mt9v022 = container_of(icd, struct mt9v022, icd); - struct soc_camera_link *icl = client->dev.platform_data; + struct i2c_client *client = to_i2c_client(to_soc_camera_control(icd)); + struct mt9v022 *mt9v022 = i2c_get_clientdata(client); + struct soc_camera_link *icl = to_soc_camera_link(icd); unsigned int width_flag = flags & SOCAM_DATAWIDTH_MASK; int ret; u16 pixclk = 0; @@ -263,8 +261,7 @@ static int mt9v022_set_bus_param(struct soc_camera_device *icd, static unsigned long mt9v022_query_bus_param(struct soc_camera_device *icd) { - struct mt9v022 *mt9v022 = container_of(icd, struct mt9v022, icd); - struct soc_camera_link *icl = mt9v022->client->dev.platform_data; + struct soc_camera_link *icl = to_soc_camera_link(icd); unsigned int width_flag; if (icl->query_bus_param) @@ -283,7 +280,7 @@ static unsigned long mt9v022_query_bus_param(struct soc_camera_device *icd) static int mt9v022_set_crop(struct soc_camera_device *icd, struct v4l2_rect *rect) { - struct i2c_client *client = to_i2c_client(icd->control); + struct i2c_client *client = to_i2c_client(to_soc_camera_control(icd)); int ret; /* Like in example app. Contradicts the datasheet though */ @@ -326,7 +323,8 @@ static int mt9v022_set_crop(struct soc_camera_device *icd, static int mt9v022_set_fmt(struct soc_camera_device *icd, struct v4l2_format *f) { - struct mt9v022 *mt9v022 = container_of(icd, struct mt9v022, icd); + struct i2c_client *client = to_i2c_client(to_soc_camera_control(icd)); + struct mt9v022 *mt9v022 = i2c_get_clientdata(client); struct v4l2_pix_format *pix = &f->fmt.pix; struct v4l2_rect rect = { .left = icd->x_current, @@ -374,12 +372,13 @@ static int mt9v022_try_fmt(struct soc_camera_device *icd, static int mt9v022_get_chip_id(struct soc_camera_device *icd, struct v4l2_dbg_chip_ident *id) { - struct mt9v022 *mt9v022 = container_of(icd, struct mt9v022, icd); + struct i2c_client *client = to_i2c_client(to_soc_camera_control(icd)); + struct mt9v022 *mt9v022 = i2c_get_clientdata(client); if (id->match.type != V4L2_CHIP_MATCH_I2C_ADDR) return -EINVAL; - if (id->match.addr != mt9v022->client->addr) + if (id->match.addr != client->addr) return -ENODEV; id->ident = mt9v022->model; @@ -392,7 +391,7 @@ static int mt9v022_get_chip_id(struct soc_camera_device *icd, static int mt9v022_get_register(struct soc_camera_device *icd, struct v4l2_dbg_register *reg) { - struct i2c_client *client = to_i2c_client(icd->control); + struct i2c_client *client = to_i2c_client(to_soc_camera_control(icd)); if (reg->match.type != V4L2_CHIP_MATCH_I2C_ADDR || reg->reg > 0xff) return -EINVAL; @@ -412,7 +411,7 @@ static int mt9v022_get_register(struct soc_camera_device *icd, static int mt9v022_set_register(struct soc_camera_device *icd, struct v4l2_dbg_register *reg) { - struct i2c_client *client = to_i2c_client(icd->control); + struct i2c_client *client = to_i2c_client(to_soc_camera_control(icd)); if (reg->match.type != V4L2_CHIP_MATCH_I2C_ADDR || reg->reg > 0xff) return -EINVAL; @@ -481,15 +480,11 @@ static const struct v4l2_queryctrl mt9v022_controls[] = { } }; -static int mt9v022_video_probe(struct soc_camera_device *); -static void mt9v022_video_remove(struct soc_camera_device *); static int mt9v022_get_control(struct soc_camera_device *, struct v4l2_control *); static int mt9v022_set_control(struct soc_camera_device *, struct v4l2_control *); static struct soc_camera_ops mt9v022_ops = { .owner = THIS_MODULE, - .probe = mt9v022_video_probe, - .remove = mt9v022_video_remove, .init = mt9v022_init, .release = mt9v022_release, .start_capture = mt9v022_start_capture, @@ -513,7 +508,7 @@ static struct soc_camera_ops mt9v022_ops = { static int mt9v022_get_control(struct soc_camera_device *icd, struct v4l2_control *ctrl) { - struct i2c_client *client = to_i2c_client(icd->control); + struct i2c_client *client = to_i2c_client(to_soc_camera_control(icd)); int data; switch (ctrl->id) { @@ -549,7 +544,7 @@ static int mt9v022_set_control(struct soc_camera_device *icd, struct v4l2_control *ctrl) { int data; - struct i2c_client *client = to_i2c_client(icd->control); + struct i2c_client *client = to_i2c_client(to_soc_camera_control(icd)); const struct v4l2_queryctrl *qctrl; qctrl = soc_camera_find_qctrl(&mt9v022_ops, ctrl->id); @@ -646,11 +641,11 @@ static int mt9v022_set_control(struct soc_camera_device *icd, /* Interface active, can use i2c. If it fails, it can indeed mean, that * this wasn't our capture interface, so, we wait for the right one */ -static int mt9v022_video_probe(struct soc_camera_device *icd) +static int mt9v022_video_probe(struct soc_camera_device *icd, + struct i2c_client *client) { - struct i2c_client *client = to_i2c_client(icd->control); - struct mt9v022 *mt9v022 = container_of(icd, struct mt9v022, icd); - struct soc_camera_link *icl = client->dev.platform_data; + struct mt9v022 *mt9v022 = i2c_get_clientdata(client); + struct soc_camera_link *icl = to_soc_camera_link(icd); s32 data; int ret; unsigned long flags; @@ -659,6 +654,11 @@ static int mt9v022_video_probe(struct soc_camera_device *icd) to_soc_camera_host(icd->dev.parent)->nr != icd->iface) return -ENODEV; + /* Switch master clock on */ + ret = soc_camera_video_start(icd, &client->dev); + if (ret) + return ret; + /* Read out the chip version register */ data = reg_read(client, MT9V022_CHIP_VERSION); @@ -678,6 +678,8 @@ static int mt9v022_video_probe(struct soc_camera_device *icd) udelay(200); if (reg_read(client, MT9V022_RESET)) { dev_err(&icd->dev, "Resetting MT9V022 failed!\n"); + if (ret > 0) + ret = -EIO; goto ei2c; } @@ -694,7 +696,7 @@ static int mt9v022_video_probe(struct soc_camera_device *icd) } if (ret < 0) - goto eisis; + goto ei2c; icd->num_formats = 0; @@ -716,29 +718,23 @@ static int mt9v022_video_probe(struct soc_camera_device *icd) if (flags & SOCAM_DATAWIDTH_8) icd->num_formats++; - ret = soc_camera_video_start(icd); - if (ret < 0) - goto eisis; - dev_info(&icd->dev, "Detected a MT9V022 chip ID %x, %s sensor\n", data, mt9v022->model == V4L2_IDENT_MT9V022IX7ATM ? "monochrome" : "colour"); - return 0; - -eisis: ei2c: + soc_camera_video_stop(icd); + return ret; } static void mt9v022_video_remove(struct soc_camera_device *icd) { - struct mt9v022 *mt9v022 = container_of(icd, struct mt9v022, icd); - struct soc_camera_link *icl = mt9v022->client->dev.platform_data; + struct i2c_client *client = to_i2c_client(to_soc_camera_control(icd)); + struct soc_camera_link *icl = to_soc_camera_link(icd); - dev_dbg(&icd->dev, "Video %x removed: %p, %p\n", mt9v022->client->addr, + dev_dbg(&icd->dev, "Video %x removed: %p, %p\n", client->addr, icd->dev.parent, icd->vdev); - soc_camera_video_stop(icd); if (icl->free_bus) icl->free_bus(icl); } @@ -747,11 +743,17 @@ static int mt9v022_probe(struct i2c_client *client, const struct i2c_device_id *did) { struct mt9v022 *mt9v022; - struct soc_camera_device *icd; + struct soc_camera_device *icd = client->dev.platform_data; struct i2c_adapter *adapter = to_i2c_adapter(client->dev.parent); - struct soc_camera_link *icl = client->dev.platform_data; + struct soc_camera_link *icl; int ret; + if (!icd) { + dev_err(&client->dev, "MT9V022: missing soc-camera data!\n"); + return -EINVAL; + } + + icl = to_soc_camera_link(icd); if (!icl) { dev_err(&client->dev, "MT9V022 driver needs platform data\n"); return -EINVAL; @@ -768,12 +770,9 @@ static int mt9v022_probe(struct i2c_client *client, return -ENOMEM; mt9v022->chip_control = MT9V022_CHIP_CONTROL_DEFAULT; - mt9v022->client = client; i2c_set_clientdata(client, mt9v022); - icd = &mt9v022->icd; icd->ops = &mt9v022_ops; - icd->control = &client->dev; icd->x_min = 1; icd->y_min = 4; icd->x_current = 1; @@ -783,24 +782,26 @@ static int mt9v022_probe(struct i2c_client *client, icd->height_min = 32; icd->height_max = 480; icd->y_skip_top = 1; - icd->iface = icl->bus_id; - - ret = soc_camera_device_register(icd); - if (ret) - goto eisdr; - return 0; + ret = mt9v022_video_probe(icd, client); + if (ret) { + icd->ops = NULL; + i2c_set_clientdata(client, NULL); + kfree(mt9v022); + } -eisdr: - kfree(mt9v022); return ret; } static int mt9v022_remove(struct i2c_client *client) { struct mt9v022 *mt9v022 = i2c_get_clientdata(client); + struct soc_camera_device *icd = client->dev.platform_data; - soc_camera_device_unregister(&mt9v022->icd); + icd->ops = NULL; + mt9v022_video_remove(icd); + i2c_set_clientdata(client, NULL); + client->driver = NULL; kfree(mt9v022); return 0; diff --git a/drivers/media/video/mx3_camera.c b/drivers/media/video/mx3_camera.c index 9770cb7932c..2edf77a6256 100644 --- a/drivers/media/video/mx3_camera.c +++ b/drivers/media/video/mx3_camera.c @@ -503,18 +503,19 @@ static int mx3_camera_add_device(struct soc_camera_device *icd) mx3_camera_activate(mx3_cam, icd); ret = icd->ops->init(icd); - if (ret < 0) { - clk_disable(mx3_cam->clk); + if (ret < 0) goto einit; - } mx3_cam->icd = icd; + dev_info(&icd->dev, "MX3 Camera driver attached to camera %d\n", + icd->devnum); + + return 0; + einit: + clk_disable(mx3_cam->clk); ebusy: - if (!ret) - dev_info(&icd->dev, "MX3 Camera driver attached to camera %d\n", - icd->devnum); return ret; } @@ -947,9 +948,10 @@ static int mx3_camera_set_bus_param(struct soc_camera_device *icd, __u32 pixfmt) camera_flags = icd->ops->query_bus_param(icd); common_flags = soc_camera_bus_param_compatible(camera_flags, bus_flags); + dev_dbg(ici->dev, "Flags cam: 0x%lx host: 0x%lx common: 0x%lx\n", + camera_flags, bus_flags, common_flags); if (!common_flags) { - dev_dbg(ici->dev, "no common flags: camera %lx, host %lx\n", - camera_flags, bus_flags); + dev_dbg(ici->dev, "no common flags"); return -EINVAL; } @@ -1002,8 +1004,11 @@ static int mx3_camera_set_bus_param(struct soc_camera_device *icd, __u32 pixfmt) SOCAM_DATAWIDTH_4; ret = icd->ops->set_bus_param(icd, common_flags); - if (ret < 0) + if (ret < 0) { + dev_dbg(ici->dev, "camera set_bus_param(%lx) returned %d\n", + common_flags, ret); return ret; + } /* * So far only gated clock mode is supported. Add a line @@ -1127,8 +1132,9 @@ static int __devinit mx3_camera_probe(struct platform_device *pdev) INIT_LIST_HEAD(&mx3_cam->capture); spin_lock_init(&mx3_cam->lock); - base = ioremap(res->start, res->end - res->start + 1); + base = ioremap(res->start, resource_size(res)); if (!base) { + pr_err("Couldn't map %x@%x\n", resource_size(res), res->start); err = -ENOMEM; goto eioremap; } @@ -1215,3 +1221,4 @@ module_exit(mx3_camera_exit); MODULE_DESCRIPTION("i.MX3x SoC Camera Host driver"); MODULE_AUTHOR("Guennadi Liakhovetski "); MODULE_LICENSE("GPL v2"); +MODULE_ALIAS("platform:" MX3_CAM_DRV_NAME); diff --git a/drivers/media/video/ov772x.c b/drivers/media/video/ov772x.c index 0bce255168b..3ea650d55b1 100644 --- a/drivers/media/video/ov772x.c +++ b/drivers/media/video/ov772x.c @@ -399,8 +399,6 @@ struct ov772x_win_size { struct ov772x_priv { struct ov772x_camera_info *info; - struct i2c_client *client; - struct soc_camera_device icd; const struct ov772x_color_format *fmt; const struct ov772x_win_size *win; int model; @@ -619,53 +617,56 @@ static int ov772x_reset(struct i2c_client *client) static int ov772x_init(struct soc_camera_device *icd) { - struct ov772x_priv *priv = container_of(icd, struct ov772x_priv, icd); + struct i2c_client *client = to_i2c_client(to_soc_camera_control(icd)); + struct soc_camera_link *icl = to_soc_camera_link(icd); int ret = 0; - if (priv->info->link.power) { - ret = priv->info->link.power(&priv->client->dev, 1); + if (icl->power) { + ret = icl->power(&client->dev, 1); if (ret < 0) return ret; } - if (priv->info->link.reset) - ret = priv->info->link.reset(&priv->client->dev); + if (icl->reset) + ret = icl->reset(&client->dev); return ret; } static int ov772x_release(struct soc_camera_device *icd) { - struct ov772x_priv *priv = container_of(icd, struct ov772x_priv, icd); + struct i2c_client *client = to_i2c_client(to_soc_camera_control(icd)); + struct soc_camera_link *icl = to_soc_camera_link(icd); int ret = 0; - if (priv->info->link.power) - ret = priv->info->link.power(&priv->client->dev, 0); + if (icl->power) + ret = icl->power(&client->dev, 0); return ret; } static int ov772x_start_capture(struct soc_camera_device *icd) { - struct ov772x_priv *priv = container_of(icd, struct ov772x_priv, icd); + struct i2c_client *client = to_i2c_client(to_soc_camera_control(icd)); + struct ov772x_priv *priv = i2c_get_clientdata(client); if (!priv->win || !priv->fmt) { dev_err(&icd->dev, "norm or win select error\n"); return -EPERM; } - ov772x_mask_set(priv->client, COM2, SOFT_SLEEP_MODE, 0); + ov772x_mask_set(client, COM2, SOFT_SLEEP_MODE, 0); dev_dbg(&icd->dev, - "format %s, win %s\n", priv->fmt->name, priv->win->name); + "format %s, win %s\n", priv->fmt->name, priv->win->name); return 0; } static int ov772x_stop_capture(struct soc_camera_device *icd) { - struct ov772x_priv *priv = container_of(icd, struct ov772x_priv, icd); - ov772x_mask_set(priv->client, COM2, SOFT_SLEEP_MODE, SOFT_SLEEP_MODE); + struct i2c_client *client = to_i2c_client(to_soc_camera_control(icd)); + ov772x_mask_set(client, COM2, SOFT_SLEEP_MODE, SOFT_SLEEP_MODE); return 0; } @@ -677,8 +678,9 @@ static int ov772x_set_bus_param(struct soc_camera_device *icd, static unsigned long ov772x_query_bus_param(struct soc_camera_device *icd) { - struct ov772x_priv *priv = container_of(icd, struct ov772x_priv, icd); - struct soc_camera_link *icl = &priv->info->link; + struct i2c_client *client = to_i2c_client(to_soc_camera_control(icd)); + struct ov772x_priv *priv = i2c_get_clientdata(client); + struct soc_camera_link *icl = to_soc_camera_link(icd); unsigned long flags = SOCAM_PCLK_SAMPLE_RISING | SOCAM_MASTER | SOCAM_VSYNC_ACTIVE_HIGH | SOCAM_HSYNC_ACTIVE_HIGH | SOCAM_DATA_ACTIVE_HIGH | priv->info->buswidth; @@ -689,7 +691,8 @@ static unsigned long ov772x_query_bus_param(struct soc_camera_device *icd) static int ov772x_get_control(struct soc_camera_device *icd, struct v4l2_control *ctrl) { - struct ov772x_priv *priv = container_of(icd, struct ov772x_priv, icd); + struct i2c_client *client = to_i2c_client(to_soc_camera_control(icd)); + struct ov772x_priv *priv = i2c_get_clientdata(client); switch (ctrl->id) { case V4L2_CID_VFLIP: @@ -705,7 +708,8 @@ static int ov772x_get_control(struct soc_camera_device *icd, static int ov772x_set_control(struct soc_camera_device *icd, struct v4l2_control *ctrl) { - struct ov772x_priv *priv = container_of(icd, struct ov772x_priv, icd); + struct i2c_client *client = to_i2c_client(to_soc_camera_control(icd)); + struct ov772x_priv *priv = i2c_get_clientdata(client); int ret = 0; u8 val; @@ -715,14 +719,14 @@ static int ov772x_set_control(struct soc_camera_device *icd, priv->flag_vflip = ctrl->value; if (priv->info->flags & OV772X_FLAG_VFLIP) val ^= VFLIP_IMG; - ret = ov772x_mask_set(priv->client, COM3, VFLIP_IMG, val); + ret = ov772x_mask_set(client, COM3, VFLIP_IMG, val); break; case V4L2_CID_HFLIP: val = ctrl->value ? HFLIP_IMG : 0x00; priv->flag_hflip = ctrl->value; if (priv->info->flags & OV772X_FLAG_HFLIP) val ^= HFLIP_IMG; - ret = ov772x_mask_set(priv->client, COM3, HFLIP_IMG, val); + ret = ov772x_mask_set(client, COM3, HFLIP_IMG, val); break; } @@ -730,9 +734,10 @@ static int ov772x_set_control(struct soc_camera_device *icd, } static int ov772x_get_chip_id(struct soc_camera_device *icd, - struct v4l2_dbg_chip_ident *id) + struct v4l2_dbg_chip_ident *id) { - struct ov772x_priv *priv = container_of(icd, struct ov772x_priv, icd); + struct i2c_client *client = to_i2c_client(to_soc_camera_control(icd)); + struct ov772x_priv *priv = i2c_get_clientdata(client); id->ident = priv->model; id->revision = 0; @@ -744,14 +749,14 @@ static int ov772x_get_chip_id(struct soc_camera_device *icd, static int ov772x_get_register(struct soc_camera_device *icd, struct v4l2_dbg_register *reg) { - struct ov772x_priv *priv = container_of(icd, struct ov772x_priv, icd); - int ret; + struct i2c_client *client = to_i2c_client(to_soc_camera_control(icd)); + int ret; reg->size = 1; if (reg->reg > 0xff) return -EINVAL; - ret = i2c_smbus_read_byte_data(priv->client, reg->reg); + ret = i2c_smbus_read_byte_data(client, reg->reg); if (ret < 0) return ret; @@ -763,13 +768,13 @@ static int ov772x_get_register(struct soc_camera_device *icd, static int ov772x_set_register(struct soc_camera_device *icd, struct v4l2_dbg_register *reg) { - struct ov772x_priv *priv = container_of(icd, struct ov772x_priv, icd); + struct i2c_client *client = to_i2c_client(to_soc_camera_control(icd)); if (reg->reg > 0xff || reg->val > 0xff) return -EINVAL; - return i2c_smbus_write_byte_data(priv->client, reg->reg, reg->val); + return i2c_smbus_write_byte_data(client, reg->reg, reg->val); } #endif @@ -793,9 +798,11 @@ ov772x_select_win(u32 width, u32 height) return win; } -static int ov772x_set_params(struct ov772x_priv *priv, u32 width, u32 height, - u32 pixfmt) +static int ov772x_set_params(struct soc_camera_device *icd, + u32 width, u32 height, u32 pixfmt) { + struct i2c_client *client = to_i2c_client(to_soc_camera_control(icd)); + struct ov772x_priv *priv = i2c_get_clientdata(client); int ret = -EINVAL; u8 val; int i; @@ -810,6 +817,7 @@ static int ov772x_set_params(struct ov772x_priv *priv, u32 width, u32 height, break; } } + dev_dbg(&icd->dev, "Using fmt %x #%d\n", pixfmt, i); if (!priv->fmt) goto ov772x_set_fmt_error; @@ -821,7 +829,7 @@ static int ov772x_set_params(struct ov772x_priv *priv, u32 width, u32 height, /* * reset hardware */ - ov772x_reset(priv->client); + ov772x_reset(client); /* * Edge Ctrl @@ -835,17 +843,17 @@ static int ov772x_set_params(struct ov772x_priv *priv, u32 width, u32 height, * Remove it when manual mode. */ - ret = ov772x_mask_set(priv->client, DSPAUTO, EDGE_ACTRL, 0x00); + ret = ov772x_mask_set(client, DSPAUTO, EDGE_ACTRL, 0x00); if (ret < 0) goto ov772x_set_fmt_error; - ret = ov772x_mask_set(priv->client, + ret = ov772x_mask_set(client, EDGE_TRSHLD, EDGE_THRESHOLD_MASK, priv->info->edgectrl.threshold); if (ret < 0) goto ov772x_set_fmt_error; - ret = ov772x_mask_set(priv->client, + ret = ov772x_mask_set(client, EDGE_STRNGT, EDGE_STRENGTH_MASK, priv->info->edgectrl.strength); if (ret < 0) @@ -857,13 +865,13 @@ static int ov772x_set_params(struct ov772x_priv *priv, u32 width, u32 height, * * set upper and lower limit */ - ret = ov772x_mask_set(priv->client, + ret = ov772x_mask_set(client, EDGE_UPPER, EDGE_UPPER_MASK, priv->info->edgectrl.upper); if (ret < 0) goto ov772x_set_fmt_error; - ret = ov772x_mask_set(priv->client, + ret = ov772x_mask_set(client, EDGE_LOWER, EDGE_LOWER_MASK, priv->info->edgectrl.lower); if (ret < 0) @@ -873,7 +881,7 @@ static int ov772x_set_params(struct ov772x_priv *priv, u32 width, u32 height, /* * set size format */ - ret = ov772x_write_array(priv->client, priv->win->regs); + ret = ov772x_write_array(client, priv->win->regs); if (ret < 0) goto ov772x_set_fmt_error; @@ -882,7 +890,7 @@ static int ov772x_set_params(struct ov772x_priv *priv, u32 width, u32 height, */ val = priv->fmt->dsp3; if (val) { - ret = ov772x_mask_set(priv->client, + ret = ov772x_mask_set(client, DSP_CTRL3, UV_MASK, val); if (ret < 0) goto ov772x_set_fmt_error; @@ -901,7 +909,7 @@ static int ov772x_set_params(struct ov772x_priv *priv, u32 width, u32 height, if (priv->flag_hflip) val ^= HFLIP_IMG; - ret = ov772x_mask_set(priv->client, + ret = ov772x_mask_set(client, COM3, SWAP_MASK | IMG_MASK, val); if (ret < 0) goto ov772x_set_fmt_error; @@ -910,7 +918,7 @@ static int ov772x_set_params(struct ov772x_priv *priv, u32 width, u32 height, * set COM7 */ val = priv->win->com7_bit | priv->fmt->com7; - ret = ov772x_mask_set(priv->client, + ret = ov772x_mask_set(client, COM7, (SLCT_MASK | FMT_MASK | OFMT_MASK), val); if (ret < 0) @@ -920,7 +928,7 @@ static int ov772x_set_params(struct ov772x_priv *priv, u32 width, u32 height, ov772x_set_fmt_error: - ov772x_reset(priv->client); + ov772x_reset(client); priv->win = NULL; priv->fmt = NULL; @@ -930,22 +938,22 @@ ov772x_set_fmt_error: static int ov772x_set_crop(struct soc_camera_device *icd, struct v4l2_rect *rect) { - struct ov772x_priv *priv = container_of(icd, struct ov772x_priv, icd); + struct i2c_client *client = to_i2c_client(to_soc_camera_control(icd)); + struct ov772x_priv *priv = i2c_get_clientdata(client); if (!priv->fmt) return -EINVAL; - return ov772x_set_params(priv, rect->width, rect->height, + return ov772x_set_params(icd, rect->width, rect->height, priv->fmt->fourcc); } static int ov772x_set_fmt(struct soc_camera_device *icd, struct v4l2_format *f) { - struct ov772x_priv *priv = container_of(icd, struct ov772x_priv, icd); struct v4l2_pix_format *pix = &f->fmt.pix; - return ov772x_set_params(priv, pix->width, pix->height, + return ov772x_set_params(icd, pix->width, pix->height, pix->pixelformat); } @@ -967,11 +975,13 @@ static int ov772x_try_fmt(struct soc_camera_device *icd, return 0; } -static int ov772x_video_probe(struct soc_camera_device *icd) +static int ov772x_video_probe(struct soc_camera_device *icd, + struct i2c_client *client) { - struct ov772x_priv *priv = container_of(icd, struct ov772x_priv, icd); + struct ov772x_priv *priv = i2c_get_clientdata(client); u8 pid, ver; const char *devname; + int ret; /* * We must have a parent by now. And it cannot be a wrong one. @@ -993,11 +1003,16 @@ static int ov772x_video_probe(struct soc_camera_device *icd) icd->formats = ov772x_fmt_lists; icd->num_formats = ARRAY_SIZE(ov772x_fmt_lists); + /* Switch master clock on */ + ret = soc_camera_video_start(icd, &client->dev); + if (ret) + return ret; + /* * check and show product ID and manufacturer ID */ - pid = i2c_smbus_read_byte_data(priv->client, PID); - ver = i2c_smbus_read_byte_data(priv->client, VER); + pid = i2c_smbus_read_byte_data(client, PID); + ver = i2c_smbus_read_byte_data(client, VER); switch (VERSION(pid, ver)) { case OV7720: @@ -1011,7 +1026,8 @@ static int ov772x_video_probe(struct soc_camera_device *icd) default: dev_err(&icd->dev, "Product ID error %x:%x\n", pid, ver); - return -ENODEV; + ret = -ENODEV; + goto ever; } dev_info(&icd->dev, @@ -1019,21 +1035,17 @@ static int ov772x_video_probe(struct soc_camera_device *icd) devname, pid, ver, - i2c_smbus_read_byte_data(priv->client, MIDH), - i2c_smbus_read_byte_data(priv->client, MIDL)); - - return soc_camera_video_start(icd); -} + i2c_smbus_read_byte_data(client, MIDH), + i2c_smbus_read_byte_data(client, MIDL)); -static void ov772x_video_remove(struct soc_camera_device *icd) -{ soc_camera_video_stop(icd); + +ever: + return ret; } static struct soc_camera_ops ov772x_ops = { .owner = THIS_MODULE, - .probe = ov772x_video_probe, - .remove = ov772x_video_remove, .init = ov772x_init, .release = ov772x_release, .start_capture = ov772x_start_capture, @@ -1059,19 +1071,25 @@ static struct soc_camera_ops ov772x_ops = { */ static int ov772x_probe(struct i2c_client *client, - const struct i2c_device_id *did) + const struct i2c_device_id *did) { struct ov772x_priv *priv; struct ov772x_camera_info *info; - struct soc_camera_device *icd; + struct soc_camera_device *icd = client->dev.platform_data; struct i2c_adapter *adapter = to_i2c_adapter(client->dev.parent); + struct soc_camera_link *icl; int ret; - if (!client->dev.platform_data) + if (!icd) { + dev_err(&client->dev, "MT9M001: missing soc-camera data!\n"); return -EINVAL; + } - info = container_of(client->dev.platform_data, - struct ov772x_camera_info, link); + icl = to_soc_camera_link(icd); + if (!icl) + return -EINVAL; + + info = container_of(icl, struct ov772x_camera_info, link); if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA)) { dev_err(&adapter->dev, @@ -1085,19 +1103,15 @@ static int ov772x_probe(struct i2c_client *client, return -ENOMEM; priv->info = info; - priv->client = client; i2c_set_clientdata(client, priv); - icd = &priv->icd; icd->ops = &ov772x_ops; - icd->control = &client->dev; icd->width_max = MAX_WIDTH; icd->height_max = MAX_HEIGHT; - icd->iface = priv->info->link.bus_id; - - ret = soc_camera_device_register(icd); + ret = ov772x_video_probe(icd, client); if (ret) { + icd->ops = NULL; i2c_set_clientdata(client, NULL); kfree(priv); } @@ -1108,8 +1122,9 @@ static int ov772x_probe(struct i2c_client *client, static int ov772x_remove(struct i2c_client *client) { struct ov772x_priv *priv = i2c_get_clientdata(client); + struct soc_camera_device *icd = client->dev.platform_data; - soc_camera_device_unregister(&priv->icd); + icd->ops = NULL; i2c_set_clientdata(client, NULL); kfree(priv); return 0; diff --git a/drivers/media/video/pxa_camera.c b/drivers/media/video/pxa_camera.c index 016bb45ba0c..8b9b44d8683 100644 --- a/drivers/media/video/pxa_camera.c +++ b/drivers/media/video/pxa_camera.c @@ -830,7 +830,8 @@ static void pxa_camera_init_videobuf(struct videobuf_queue *q, sizeof(struct pxa_buffer), icd); } -static u32 mclk_get_divisor(struct pxa_camera_dev *pcdev) +static u32 mclk_get_divisor(struct platform_device *pdev, + struct pxa_camera_dev *pcdev) { unsigned long mclk = pcdev->mclk; u32 div; @@ -842,7 +843,7 @@ static u32 mclk_get_divisor(struct pxa_camera_dev *pcdev) /* mclk <= ciclk / 4 (27.4.2) */ if (mclk > lcdclk / 4) { mclk = lcdclk / 4; - dev_warn(pcdev->soc_host.dev, "Limiting master clock to %lu\n", mclk); + dev_warn(&pdev->dev, "Limiting master clock to %lu\n", mclk); } /* We verify mclk != 0, so if anyone breaks it, here comes their Oops */ @@ -852,8 +853,8 @@ static u32 mclk_get_divisor(struct pxa_camera_dev *pcdev) if (pcdev->platform_flags & PXA_CAMERA_MCLK_EN) pcdev->mclk = lcdclk / (2 * (div + 1)); - dev_dbg(pcdev->soc_host.dev, "LCD clock %luHz, target freq %luHz, " - "divisor %u\n", lcdclk, mclk, div); + dev_dbg(&pdev->dev, "LCD clock %luHz, target freq %luHz, divisor %u\n", + lcdclk, mclk, div); return div; } @@ -958,15 +959,20 @@ static int pxa_camera_add_device(struct soc_camera_device *icd) goto ebusy; } - dev_info(&icd->dev, "PXA Camera driver attached to camera %d\n", - icd->devnum); - pxa_camera_activate(pcdev); ret = icd->ops->init(icd); + if (ret < 0) + goto einit; + + pcdev->icd = icd; - if (!ret) - pcdev->icd = icd; + dev_info(&icd->dev, "PXA Camera driver attached to camera %d\n", + icd->devnum); + return 0; + +einit: + pxa_camera_deactivate(pcdev); ebusy: return ret; } @@ -1575,8 +1581,7 @@ static int __devinit pxa_camera_probe(struct platform_device *pdev) pcdev->mclk = 20000000; } - pcdev->soc_host.dev = &pdev->dev; - pcdev->mclk_divisor = mclk_get_divisor(pcdev); + pcdev->mclk_divisor = mclk_get_divisor(pdev, pcdev); INIT_LIST_HEAD(&pcdev->capture); spin_lock_init(&pcdev->lock); @@ -1641,6 +1646,7 @@ static int __devinit pxa_camera_probe(struct platform_device *pdev) pcdev->soc_host.drv_name = PXA_CAM_DRV_NAME; pcdev->soc_host.ops = &pxa_soc_camera_host_ops; pcdev->soc_host.priv = pcdev; + pcdev->soc_host.dev = &pdev->dev; pcdev->soc_host.nr = pdev->id; err = soc_camera_host_register(&pcdev->soc_host); @@ -1722,3 +1728,4 @@ module_exit(pxa_camera_exit); MODULE_DESCRIPTION("PXA27x SoC Camera Host driver"); MODULE_AUTHOR("Guennadi Liakhovetski "); MODULE_LICENSE("GPL"); +MODULE_ALIAS("platform:" PXA_CAM_DRV_NAME); diff --git a/drivers/media/video/sh_mobile_ceu_camera.c b/drivers/media/video/sh_mobile_ceu_camera.c index 61c47b82408..e7ac84daf67 100644 --- a/drivers/media/video/sh_mobile_ceu_camera.c +++ b/drivers/media/video/sh_mobile_ceu_camera.c @@ -356,11 +356,13 @@ static int sh_mobile_ceu_add_device(struct soc_camera_device *icd) "SuperH Mobile CEU driver attached to camera %d\n", icd->devnum); + clk_enable(pcdev->clk); + ret = icd->ops->init(icd); - if (ret) + if (ret) { + clk_disable(pcdev->clk); goto err; - - pm_runtime_get_sync(ici->dev); + } ceu_write(pcdev, CAPSR, 1 << 16); /* reset */ while (ceu_read(pcdev, CSTSR) & 1) @@ -394,10 +396,10 @@ static void sh_mobile_ceu_remove_device(struct soc_camera_device *icd) } spin_unlock_irqrestore(&pcdev->lock, flags); - pm_runtime_put_sync(ici->dev); - icd->ops->release(icd); + clk_disable(pcdev->clk); + dev_info(&icd->dev, "SuperH Mobile CEU driver detached from camera %d\n", icd->devnum); @@ -946,3 +948,4 @@ module_exit(sh_mobile_ceu_exit); MODULE_DESCRIPTION("SuperH Mobile CEU driver"); MODULE_AUTHOR("Magnus Damm"); MODULE_LICENSE("GPL"); +MODULE_ALIAS("platform:sh_mobile_ceu"); diff --git a/drivers/media/video/soc_camera.c b/drivers/media/video/soc_camera.c index 0340754e540..20ef5c773fa 100644 --- a/drivers/media/video/soc_camera.c +++ b/drivers/media/video/soc_camera.c @@ -21,15 +21,15 @@ #include #include #include -#include #include +#include #include #include #include #include -#include #include +#include #include /* Default to VGA resolution */ @@ -38,7 +38,7 @@ static LIST_HEAD(hosts); static LIST_HEAD(devices); -static DEFINE_MUTEX(list_lock); +static DEFINE_MUTEX(list_lock); /* Protects the list of hosts */ const struct soc_camera_data_format *soc_camera_format_by_fourcc( struct soc_camera_device *icd, unsigned int fourcc) @@ -209,6 +209,7 @@ static int soc_camera_dqbuf(struct file *file, void *priv, return videobuf_dqbuf(&icf->vb_vidq, p, file->f_flags & O_NONBLOCK); } +/* Always entered with .video_lock held */ static int soc_camera_init_user_formats(struct soc_camera_device *icd) { struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent); @@ -257,9 +258,12 @@ static int soc_camera_init_user_formats(struct soc_camera_device *icd) return 0; } +/* Always entered with .video_lock held */ static void soc_camera_free_user_formats(struct soc_camera_device *icd) { + icd->current_fmt = NULL; vfree(icd->user_formats); + icd->user_formats = NULL; } /* Called with .vb_lock held */ @@ -310,10 +314,6 @@ static int soc_camera_open(struct file *file) struct soc_camera_file *icf; int ret; - icf = vmalloc(sizeof(*icf)); - if (!icf) - return -ENOMEM; - /* * It is safe to dereference these pointers now as long as a user has * the video device open - we are protected by the held cdev reference. @@ -321,8 +321,17 @@ static int soc_camera_open(struct file *file) vdev = video_devdata(file); icd = container_of(vdev->parent, struct soc_camera_device, dev); + + if (!icd->ops) + /* No device driver attached */ + return -ENODEV; + ici = to_soc_camera_host(icd->dev.parent); + icf = vmalloc(sizeof(*icf)); + if (!icf) + return -ENOMEM; + if (!try_module_get(icd->ops->owner)) { dev_err(&icd->dev, "Couldn't lock sensor driver.\n"); ret = -EINVAL; @@ -335,7 +344,7 @@ static int soc_camera_open(struct file *file) goto emgi; } - /* Protect against icd->remove() until we module_get() both drivers. */ + /* Protect against icd->ops->remove() until we module_get() both drivers. */ mutex_lock(&icd->video_lock); icf->icd = icd; @@ -350,11 +359,18 @@ static int soc_camera_open(struct file *file) .width = icd->width, .height = icd->height, .field = icd->field, - .pixelformat = icd->current_fmt->fourcc, - .colorspace = icd->current_fmt->colorspace, }, }; + ret = soc_camera_init_user_formats(icd); + if (ret < 0) + goto eiufmt; + + dev_dbg(&icd->dev, "Using fmt %x\n", icd->current_fmt->fourcc); + + f.fmt.pix.pixelformat = icd->current_fmt->fourcc; + f.fmt.pix.colorspace = icd->current_fmt->colorspace; + ret = ici->ops->add(icd); if (ret < 0) { dev_err(&icd->dev, "Couldn't activate the camera: %d\n", ret); @@ -383,6 +399,8 @@ static int soc_camera_open(struct file *file) esfmt: ici->ops->remove(icd); eiciadd: + soc_camera_free_user_formats(icd); +eiufmt: icd->use_count--; mutex_unlock(&icd->video_lock); module_put(ici->ops->owner); @@ -402,8 +420,10 @@ static int soc_camera_close(struct file *file) mutex_lock(&icd->video_lock); icd->use_count--; - if (!icd->use_count) + if (!icd->use_count) { ici->ops->remove(icd); + soc_camera_free_user_formats(icd); + } mutex_unlock(&icd->video_lock); @@ -764,29 +784,6 @@ static int soc_camera_s_register(struct file *file, void *fh, } #endif -static int device_register_link(struct soc_camera_device *icd) -{ - int ret = dev_set_name(&icd->dev, "%u-%u", icd->iface, icd->devnum); - - if (!ret) - ret = device_register(&icd->dev); - - if (ret < 0) { - /* Prevent calling device_unregister() */ - icd->dev.parent = NULL; - dev_err(&icd->dev, "Cannot register device: %d\n", ret); - /* Even if probe() was unsuccessful for all registered drivers, - * device_register() returns 0, and we add the link, just to - * document this camera's control device */ - } else if (icd->control) - /* Have to sysfs_remove_link() before device_unregister()? */ - if (sysfs_create_link(&icd->dev.kobj, &icd->control->kobj, - "control")) - dev_warn(&icd->dev, - "Failed creating the control symlink\n"); - return ret; -} - /* So far this function cannot fail */ static void scan_add_host(struct soc_camera_host *ici) { @@ -796,106 +793,124 @@ static void scan_add_host(struct soc_camera_host *ici) list_for_each_entry(icd, &devices, list) { if (icd->iface == ici->nr) { + int ret; icd->dev.parent = ici->dev; - device_register_link(icd); + dev_set_name(&icd->dev, "%u-%u", icd->iface, + icd->devnum); + ret = device_register(&icd->dev); + if (ret < 0) { + icd->dev.parent = NULL; + dev_err(&icd->dev, + "Cannot register device: %d\n", ret); + } } } mutex_unlock(&list_lock); } -/* return: 0 if no match found or a match found and - * device_register() successful, error code otherwise */ -static int scan_add_device(struct soc_camera_device *icd) +#ifdef CONFIG_I2C_BOARDINFO +static int soc_camera_init_i2c(struct soc_camera_device *icd, + struct soc_camera_link *icl) { - struct soc_camera_host *ici; - int ret = 0; + struct i2c_client *client; + struct i2c_adapter *adap = i2c_get_adapter(icl->i2c_adapter_id); + int ret; - mutex_lock(&list_lock); + if (!adap) { + ret = -ENODEV; + dev_err(&icd->dev, "Cannot get I2C adapter #%d. No driver?\n", + icl->i2c_adapter_id); + goto ei2cga; + } - list_add_tail(&icd->list, &devices); + icl->board_info->platform_data = icd; - /* Watch out for class_for_each_device / class_find_device API by - * Dave Young */ - list_for_each_entry(ici, &hosts, list) { - if (icd->iface == ici->nr) { - ret = 1; - icd->dev.parent = ici->dev; - break; - } + client = i2c_new_device(adap, icl->board_info); + if (!client) { + ret = -ENOMEM; + goto ei2cnd; } - mutex_unlock(&list_lock); - - if (ret) - ret = device_register_link(icd); + /* + * We set icd drvdata at two locations - here and in + * soc_camera_video_start(). Depending on the module loading / + * initialisation order one of these locations will be entered first + */ + /* Use to_i2c_client(dev) to recover the i2c client */ + dev_set_drvdata(&icd->dev, &client->dev); + return 0; +ei2cnd: + i2c_put_adapter(adap); +ei2cga: return ret; } +static void soc_camera_free_i2c(struct soc_camera_device *icd) +{ + struct i2c_client *client = + to_i2c_client(to_soc_camera_control(icd)); + dev_set_drvdata(&icd->dev, NULL); + i2c_unregister_device(client); + i2c_put_adapter(client->adapter); +} +#else +#define soc_camera_init_i2c(icd, icl) (-ENODEV) +#define soc_camera_free_i2c(icd) do {} while (0) +#endif + +static int video_dev_create(struct soc_camera_device *icd); +/* Called during host-driver probe */ static int soc_camera_probe(struct device *dev) { struct soc_camera_device *icd = to_soc_camera_dev(dev); - struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent); + struct soc_camera_link *icl = to_soc_camera_link(icd); int ret; - /* - * Possible race scenario: - * modprobe triggers __func__ - * at this moment respective gets rmmod'ed - * to protect take module references. - */ + dev_info(dev, "Probing %s\n", dev_name(dev)); - if (!try_module_get(icd->ops->owner)) { - dev_err(&icd->dev, "Couldn't lock sensor driver.\n"); - ret = -EINVAL; - goto emgd; - } + ret = video_dev_create(icd); + if (ret < 0) + goto evdc; - if (!try_module_get(ici->ops->owner)) { - dev_err(&icd->dev, "Couldn't lock capture bus driver.\n"); + /* Non-i2c cameras, e.g., soc_camera_platform, have no board_info */ + if (icl->board_info) { + ret = soc_camera_init_i2c(icd, icl); + if (ret < 0) + goto eadddev; + } else if (!icl->add_device || !icl->del_device) { ret = -EINVAL; - goto emgi; + goto eadddev; + } else { + ret = icl->add_device(icl, &icd->dev); + if (ret < 0) + goto eadddev; } - mutex_lock(&icd->video_lock); - - /* We only call ->add() here to activate and probe the camera. - * We shall ->remove() and deactivate it immediately afterwards. */ - ret = ici->ops->add(icd); - if (ret < 0) - goto eiadd; - - ret = icd->ops->probe(icd); - if (ret >= 0) { - const struct v4l2_queryctrl *qctrl; + ret = video_register_device(icd->vdev, VFL_TYPE_GRABBER, icd->vdev->minor); + if (ret < 0) { + dev_err(&icd->dev, "video_register_device failed: %d\n", ret); + goto evidregd; + } - qctrl = soc_camera_find_qctrl(icd->ops, V4L2_CID_GAIN); - icd->gain = qctrl ? qctrl->default_value : (unsigned short)~0; - qctrl = soc_camera_find_qctrl(icd->ops, V4L2_CID_EXPOSURE); - icd->exposure = qctrl ? qctrl->default_value : - (unsigned short)~0; + /* Do we have to sysfs_remove_link() before device_unregister()? */ + if (to_soc_camera_control(icd) && + sysfs_create_link(&icd->dev.kobj, &to_soc_camera_control(icd)->kobj, + "control")) + dev_warn(&icd->dev, "Failed creating the control symlink\n"); - ret = soc_camera_init_user_formats(icd); - if (ret < 0) { - if (icd->ops->remove) - icd->ops->remove(icd); - goto eiufmt; - } - icd->height = DEFAULT_HEIGHT; - icd->width = DEFAULT_WIDTH; - icd->field = V4L2_FIELD_ANY; - } + return 0; -eiufmt: - ici->ops->remove(icd); -eiadd: - mutex_unlock(&icd->video_lock); - module_put(ici->ops->owner); -emgi: - module_put(icd->ops->owner); -emgd: +evidregd: + if (icl->board_info) + soc_camera_free_i2c(icd); + else + icl->del_device(icl); +eadddev: + video_device_release(icd->vdev); +evdc: return ret; } @@ -904,13 +919,22 @@ emgd: static int soc_camera_remove(struct device *dev) { struct soc_camera_device *icd = to_soc_camera_dev(dev); + struct soc_camera_link *icl = to_soc_camera_link(icd); + struct video_device *vdev = icd->vdev; - mutex_lock(&icd->video_lock); - if (icd->ops->remove) - icd->ops->remove(icd); - mutex_unlock(&icd->video_lock); + BUG_ON(!dev->parent); - soc_camera_free_user_formats(icd); + if (vdev) { + mutex_lock(&icd->video_lock); + video_unregister_device(vdev); + icd->vdev = NULL; + mutex_unlock(&icd->video_lock); + } + + if (icl->board_info) + soc_camera_free_i2c(icd); + else + icl->del_device(icl); return 0; } @@ -1005,10 +1029,14 @@ void soc_camera_host_unregister(struct soc_camera_host *ici) list_for_each_entry(icd, &devices, list) { if (icd->dev.parent == ici->dev) { + /* The bus->remove will be called */ device_unregister(&icd->dev); /* Not before device_unregister(), .remove * needs parent to call ici->ops->remove() */ icd->dev.parent = NULL; + + /* If the host module is loaded again, device_register() + * would complain "already initialised" */ memset(&icd->dev.kobj, 0, sizeof(icd->dev.kobj)); } } @@ -1020,26 +1048,14 @@ void soc_camera_host_unregister(struct soc_camera_host *ici) EXPORT_SYMBOL(soc_camera_host_unregister); /* Image capture device */ -int soc_camera_device_register(struct soc_camera_device *icd) +static int soc_camera_device_register(struct soc_camera_device *icd) { struct soc_camera_device *ix; int num = -1, i; - if (!icd || !icd->ops || - !icd->ops->probe || - !icd->ops->init || - !icd->ops->release || - !icd->ops->start_capture || - !icd->ops->stop_capture || - !icd->ops->set_crop || - !icd->ops->set_fmt || - !icd->ops->try_fmt || - !icd->ops->query_bus_param || - !icd->ops->set_bus_param) - return -EINVAL; - for (i = 0; i < 256 && num < 0; i++) { num = i; + /* Check if this index is available on this interface */ list_for_each_entry(ix, &devices, list) { if (ix->iface == icd->iface && ix->devnum == i) { num = -1; @@ -1061,21 +1077,15 @@ int soc_camera_device_register(struct soc_camera_device *icd) icd->host_priv = NULL; mutex_init(&icd->video_lock); - return scan_add_device(icd); + list_add_tail(&icd->list, &devices); + + return 0; } -EXPORT_SYMBOL(soc_camera_device_register); -void soc_camera_device_unregister(struct soc_camera_device *icd) +static void soc_camera_device_unregister(struct soc_camera_device *icd) { - mutex_lock(&list_lock); list_del(&icd->list); - - /* The bus->remove will be eventually called */ - if (icd->dev.parent) - device_unregister(&icd->dev); - mutex_unlock(&list_lock); } -EXPORT_SYMBOL(soc_camera_device_unregister); static const struct v4l2_ioctl_ops soc_camera_ioctl_ops = { .vidioc_querycap = soc_camera_querycap, @@ -1106,22 +1116,13 @@ static const struct v4l2_ioctl_ops soc_camera_ioctl_ops = { #endif }; -/* - * Usually called from the struct soc_camera_ops .probe() method, i.e., from - * soc_camera_probe() above with .video_lock held - */ -int soc_camera_video_start(struct soc_camera_device *icd) +static int video_dev_create(struct soc_camera_device *icd) { struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent); - int err = -ENOMEM; - struct video_device *vdev; + struct video_device *vdev = video_device_alloc(); - if (!icd->dev.parent) - return -ENODEV; - - vdev = video_device_alloc(); if (!vdev) - goto evidallocd; + return -ENOMEM; dev_dbg(ici->dev, "Allocated video_device %p\n", vdev); strlcpy(vdev->name, ici->drv_name, sizeof(vdev->name)); @@ -1132,118 +1133,110 @@ int soc_camera_video_start(struct soc_camera_device *icd) vdev->ioctl_ops = &soc_camera_ioctl_ops; vdev->release = video_device_release; vdev->minor = -1; - vdev->tvnorms = V4L2_STD_UNKNOWN, + vdev->tvnorms = V4L2_STD_UNKNOWN; - err = video_register_device(vdev, VFL_TYPE_GRABBER, vdev->minor); - if (err < 0) { - dev_err(vdev->parent, "video_register_device failed\n"); - goto evidregd; - } icd->vdev = vdev; return 0; +} -evidregd: - video_device_release(vdev); -evidallocd: - return err; +/* + * Usually called from the struct soc_camera_ops .probe() method, i.e., from + * soc_camera_probe() above with .video_lock held + */ +int soc_camera_video_start(struct soc_camera_device *icd, struct device *dev) +{ + struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent); + const struct v4l2_queryctrl *qctrl; + + if (!icd->dev.parent) + return -ENODEV; + + if (!icd->ops || + !icd->ops->init || + !icd->ops->release || + !icd->ops->start_capture || + !icd->ops->stop_capture || + !icd->ops->set_fmt || + !icd->ops->try_fmt || + !icd->ops->query_bus_param || + !icd->ops->set_bus_param) + return -EINVAL; + + /* See comment in soc_camera_probe() */ + dev_set_drvdata(&icd->dev, dev); + + qctrl = soc_camera_find_qctrl(icd->ops, V4L2_CID_GAIN); + icd->gain = qctrl ? qctrl->default_value : (unsigned short)~0; + qctrl = soc_camera_find_qctrl(icd->ops, V4L2_CID_EXPOSURE); + icd->exposure = qctrl ? qctrl->default_value : (unsigned short)~0; + + return ici->ops->add(icd); } EXPORT_SYMBOL(soc_camera_video_start); /* Called from client .remove() methods with .video_lock held */ void soc_camera_video_stop(struct soc_camera_device *icd) { - struct video_device *vdev = icd->vdev; + struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent); dev_dbg(&icd->dev, "%s\n", __func__); - if (!icd->dev.parent || !vdev) - return; - - video_unregister_device(vdev); - icd->vdev = NULL; + ici->ops->remove(icd); } EXPORT_SYMBOL(soc_camera_video_stop); -#ifdef CONFIG_I2C_BOARDINFO -static int soc_camera_init_i2c(struct platform_device *pdev, - struct soc_camera_link *icl) +static int __devinit soc_camera_pdrv_probe(struct platform_device *pdev) { - struct i2c_client *client; - struct i2c_adapter *adap = i2c_get_adapter(icl->i2c_adapter_id); + struct soc_camera_link *icl = pdev->dev.platform_data; + struct soc_camera_device *icd; int ret; - if (!adap) { - ret = -ENODEV; - dev_err(&pdev->dev, "Cannot get adapter #%d. No driver?\n", - icl->i2c_adapter_id); - goto ei2cga; - } + if (!icl) + return -EINVAL; - icl->board_info->platform_data = icl; - client = i2c_new_device(adap, icl->board_info); - if (!client) { - ret = -ENOMEM; - goto ei2cnd; - } + icd = kzalloc(sizeof(*icd), GFP_KERNEL); + if (!icd) + return -ENOMEM; - platform_set_drvdata(pdev, client); + icd->iface = icl->bus_id; + platform_set_drvdata(pdev, icd); + icd->dev.platform_data = icl; - return 0; -ei2cnd: - i2c_put_adapter(adap); -ei2cga: - return ret; -} + ret = soc_camera_device_register(icd); + if (ret < 0) + goto escdevreg; -static void soc_camera_free_i2c(struct platform_device *pdev) -{ - struct i2c_client *client = platform_get_drvdata(pdev); + return 0; - if (!client) - return; +escdevreg: + kfree(icd); - i2c_unregister_device(client); - i2c_put_adapter(client->adapter); + return ret; } -#else -#define soc_camera_init_i2c(d, icl) (-ENODEV) -#define soc_camera_free_i2c(d) do {} while (0) -#endif -static int __devinit soc_camera_pdrv_probe(struct platform_device *pdev) +/* Only called on rmmod for each platform device, since they are not + * hot-pluggable. Now we know, that all our users - hosts and devices have + * been unloaded already */ +static int __devexit soc_camera_pdrv_remove(struct platform_device *pdev) { - struct soc_camera_link *icl = pdev->dev.platform_data; + struct soc_camera_device *icd = platform_get_drvdata(pdev); - if (!icl) + if (!icd) return -EINVAL; - if (icl->board_info) - return soc_camera_init_i2c(pdev, icl); - else if (!icl->add_device || !icl->del_device) - return -EINVAL; + soc_camera_device_unregister(icd); - /* &pdev->dev will become &icd->dev */ - return icl->add_device(icl, &pdev->dev); -} + kfree(icd); -static int __devexit soc_camera_pdrv_remove(struct platform_device *pdev) -{ - struct soc_camera_link *icl = pdev->dev.platform_data; - - if (icl->board_info) - soc_camera_free_i2c(pdev); - else - icl->del_device(icl); return 0; } static struct platform_driver __refdata soc_camera_pdrv = { - .probe = soc_camera_pdrv_probe, - .remove = __devexit_p(soc_camera_pdrv_remove), - .driver = { - .name = "soc-camera-pdrv", - .owner = THIS_MODULE, + .remove = __devexit_p(soc_camera_pdrv_remove), + .driver = { + .name = "soc-camera-pdrv", + .owner = THIS_MODULE, }, }; @@ -1256,7 +1249,7 @@ static int __init soc_camera_init(void) if (ret) goto edrvr; - ret = platform_driver_register(&soc_camera_pdrv); + ret = platform_driver_probe(&soc_camera_pdrv, soc_camera_pdrv_probe); if (ret) goto epdr; diff --git a/drivers/media/video/soc_camera_platform.c b/drivers/media/video/soc_camera_platform.c index c48676356ab..d84c134f8d5 100644 --- a/drivers/media/video/soc_camera_platform.c +++ b/drivers/media/video/soc_camera_platform.c @@ -21,35 +21,32 @@ #include struct soc_camera_platform_priv { - struct soc_camera_platform_info *info; - struct soc_camera_device icd; struct soc_camera_data_format format; }; static struct soc_camera_platform_info * soc_camera_platform_get_info(struct soc_camera_device *icd) { - struct soc_camera_platform_priv *priv; - priv = container_of(icd, struct soc_camera_platform_priv, icd); - return priv->info; + struct platform_device *pdev = to_platform_device(dev_get_drvdata(&icd->dev)); + return pdev->dev.platform_data; } static int soc_camera_platform_init(struct soc_camera_device *icd) { - struct soc_camera_platform_info *p = soc_camera_platform_get_info(icd); + struct soc_camera_link *icl = to_soc_camera_link(icd); - if (p->power) - p->power(1); + if (icl->power) + icl->power(dev_get_drvdata(&icd->dev), 1); return 0; } static int soc_camera_platform_release(struct soc_camera_device *icd) { - struct soc_camera_platform_info *p = soc_camera_platform_get_info(icd); + struct soc_camera_link *icl = to_soc_camera_link(icd); - if (p->power) - p->power(0); + if (icl->power) + icl->power(dev_get_drvdata(&icd->dev), 0); return 0; } @@ -102,31 +99,29 @@ static int soc_camera_platform_try_fmt(struct soc_camera_device *icd, return 0; } -static int soc_camera_platform_video_probe(struct soc_camera_device *icd) +static int soc_camera_platform_video_probe(struct soc_camera_device *icd, + struct platform_device *pdev) { - struct soc_camera_platform_priv *priv; - priv = container_of(icd, struct soc_camera_platform_priv, icd); + struct soc_camera_platform_priv *priv = platform_get_drvdata(pdev); + struct soc_camera_platform_info *p = pdev->dev.platform_data; + int ret; - priv->format.name = priv->info->format_name; - priv->format.depth = priv->info->format_depth; - priv->format.fourcc = priv->info->format.pixelformat; - priv->format.colorspace = priv->info->format.colorspace; + priv->format.name = p->format_name; + priv->format.depth = p->format_depth; + priv->format.fourcc = p->format.pixelformat; + priv->format.colorspace = p->format.colorspace; icd->formats = &priv->format; icd->num_formats = 1; - return soc_camera_video_start(icd); -} - -static void soc_camera_platform_video_remove(struct soc_camera_device *icd) -{ + /* ..._video_start() does dev_set_drvdata(&icd->dev, &pdev->dev) */ + ret = soc_camera_video_start(icd, &pdev->dev); soc_camera_video_stop(icd); + return ret; } static struct soc_camera_ops soc_camera_platform_ops = { .owner = THIS_MODULE, - .probe = soc_camera_platform_video_probe, - .remove = soc_camera_platform_video_remove, .init = soc_camera_platform_init, .release = soc_camera_platform_release, .start_capture = soc_camera_platform_start_capture, @@ -141,11 +136,10 @@ static struct soc_camera_ops soc_camera_platform_ops = { static int soc_camera_platform_probe(struct platform_device *pdev) { struct soc_camera_platform_priv *priv; - struct soc_camera_platform_info *p; + struct soc_camera_platform_info *p = pdev->dev.platform_data; struct soc_camera_device *icd; int ret; - p = pdev->dev.platform_data; if (!p) return -EINVAL; @@ -153,31 +147,40 @@ static int soc_camera_platform_probe(struct platform_device *pdev) if (!priv) return -ENOMEM; - priv->info = p; platform_set_drvdata(pdev, priv); - icd = &priv->icd; + icd = to_soc_camera_dev(p->dev); + if (!icd) + goto enoicd; + icd->ops = &soc_camera_platform_ops; - icd->control = &pdev->dev; + dev_set_drvdata(&icd->dev, &pdev->dev); icd->width_min = 0; - icd->width_max = priv->info->format.width; + icd->width_max = p->format.width; icd->height_min = 0; - icd->height_max = priv->info->format.height; + icd->height_max = p->format.height; icd->y_skip_top = 0; - icd->iface = priv->info->iface; - ret = soc_camera_device_register(icd); - if (ret) + ret = soc_camera_platform_video_probe(icd, pdev); + if (ret) { + icd->ops = NULL; kfree(priv); + } return ret; + +enoicd: + kfree(priv); + return -EINVAL; } static int soc_camera_platform_remove(struct platform_device *pdev) { struct soc_camera_platform_priv *priv = platform_get_drvdata(pdev); + struct soc_camera_platform_info *p = pdev->dev.platform_data; + struct soc_camera_device *icd = to_soc_camera_dev(p->dev); - soc_camera_device_unregister(&priv->icd); + icd->ops = NULL; kfree(priv); return 0; } @@ -206,3 +209,4 @@ module_exit(soc_camera_platform_module_exit); MODULE_DESCRIPTION("SoC Camera Platform driver"); MODULE_AUTHOR("Magnus Damm"); MODULE_LICENSE("GPL v2"); +MODULE_ALIAS("platform:soc_camera_platform"); diff --git a/drivers/media/video/tw9910.c b/drivers/media/video/tw9910.c index aa5065ea09e..d780a509faa 100644 --- a/drivers/media/video/tw9910.c +++ b/drivers/media/video/tw9910.c @@ -224,8 +224,6 @@ struct tw9910_hsync_ctrl { struct tw9910_priv { struct tw9910_video_info *info; - struct i2c_client *client; - struct soc_camera_device icd; const struct tw9910_scale_ctrl *scale; }; @@ -511,35 +509,38 @@ tw9910_select_norm(struct soc_camera_device *icd, u32 width, u32 height) */ static int tw9910_init(struct soc_camera_device *icd) { - struct tw9910_priv *priv = container_of(icd, struct tw9910_priv, icd); + struct i2c_client *client = to_i2c_client(to_soc_camera_control(icd)); + struct soc_camera_link *icl = to_soc_camera_link(icd); int ret = 0; - if (priv->info->link.power) { - ret = priv->info->link.power(&priv->client->dev, 1); + if (icl->power) { + ret = icl->power(&client->dev, 1); if (ret < 0) return ret; } - if (priv->info->link.reset) - ret = priv->info->link.reset(&priv->client->dev); + if (icl->reset) + ret = icl->reset(&client->dev); return ret; } static int tw9910_release(struct soc_camera_device *icd) { - struct tw9910_priv *priv = container_of(icd, struct tw9910_priv, icd); + struct i2c_client *client = to_i2c_client(to_soc_camera_control(icd)); + struct soc_camera_link *icl = to_soc_camera_link(icd); int ret = 0; - if (priv->info->link.power) - ret = priv->info->link.power(&priv->client->dev, 0); + if (icl->power) + ret = icl->power(&client->dev, 0); return ret; } static int tw9910_start_capture(struct soc_camera_device *icd) { - struct tw9910_priv *priv = container_of(icd, struct tw9910_priv, icd); + struct i2c_client *client = to_i2c_client(to_soc_camera_control(icd)); + struct tw9910_priv *priv = i2c_get_clientdata(client); if (!priv->scale) { dev_err(&icd->dev, "norm select error\n"); @@ -567,8 +568,9 @@ static int tw9910_set_bus_param(struct soc_camera_device *icd, static unsigned long tw9910_query_bus_param(struct soc_camera_device *icd) { - struct tw9910_priv *priv = container_of(icd, struct tw9910_priv, icd); - struct soc_camera_link *icl = priv->client->dev.platform_data; + struct i2c_client *client = to_i2c_client(to_soc_camera_control(icd)); + struct tw9910_priv *priv = i2c_get_clientdata(client); + struct soc_camera_link *icl = to_soc_camera_link(icd); unsigned long flags = SOCAM_PCLK_SAMPLE_RISING | SOCAM_MASTER | SOCAM_VSYNC_ACTIVE_HIGH | SOCAM_HSYNC_ACTIVE_HIGH | SOCAM_DATA_ACTIVE_HIGH | priv->info->buswidth; @@ -610,13 +612,13 @@ static int tw9910_enum_input(struct soc_camera_device *icd, static int tw9910_get_register(struct soc_camera_device *icd, struct v4l2_dbg_register *reg) { - struct tw9910_priv *priv = container_of(icd, struct tw9910_priv, icd); + struct i2c_client *client = to_i2c_client(to_soc_camera_control(icd)); int ret; if (reg->reg > 0xff) return -EINVAL; - ret = i2c_smbus_read_byte_data(priv->client, reg->reg); + ret = i2c_smbus_read_byte_data(client, reg->reg); if (ret < 0) return ret; @@ -631,20 +633,21 @@ static int tw9910_get_register(struct soc_camera_device *icd, static int tw9910_set_register(struct soc_camera_device *icd, struct v4l2_dbg_register *reg) { - struct tw9910_priv *priv = container_of(icd, struct tw9910_priv, icd); + struct i2c_client *client = to_i2c_client(to_soc_camera_control(icd)); if (reg->reg > 0xff || reg->val > 0xff) return -EINVAL; - return i2c_smbus_write_byte_data(priv->client, reg->reg, reg->val); + return i2c_smbus_write_byte_data(client, reg->reg, reg->val); } #endif static int tw9910_set_crop(struct soc_camera_device *icd, struct v4l2_rect *rect) { - struct tw9910_priv *priv = container_of(icd, struct tw9910_priv, icd); + struct i2c_client *client = to_i2c_client(to_soc_camera_control(icd)); + struct tw9910_priv *priv = i2c_get_clientdata(client); int ret = -EINVAL; u8 val; @@ -658,8 +661,8 @@ static int tw9910_set_crop(struct soc_camera_device *icd, /* * reset hardware */ - tw9910_reset(priv->client); - ret = tw9910_write_array(priv->client, tw9910_default_regs); + tw9910_reset(client); + ret = tw9910_write_array(client, tw9910_default_regs); if (ret < 0) goto tw9910_set_fmt_error; @@ -670,7 +673,7 @@ static int tw9910_set_crop(struct soc_camera_device *icd, if (SOCAM_DATAWIDTH_16 == priv->info->buswidth) val = LEN; - ret = tw9910_mask_set(priv->client, OPFORM, LEN, val); + ret = tw9910_mask_set(client, OPFORM, LEN, val); if (ret < 0) goto tw9910_set_fmt_error; @@ -698,28 +701,28 @@ static int tw9910_set_crop(struct soc_camera_device *icd, val = 0; } - ret = tw9910_mask_set(priv->client, VBICNTL, RTSEL_MASK, val); + ret = tw9910_mask_set(client, VBICNTL, RTSEL_MASK, val); if (ret < 0) goto tw9910_set_fmt_error; /* * set scale */ - ret = tw9910_set_scale(priv->client, priv->scale); + ret = tw9910_set_scale(client, priv->scale); if (ret < 0) goto tw9910_set_fmt_error; /* * set cropping */ - ret = tw9910_set_cropping(priv->client, &tw9910_cropping_ctrl); + ret = tw9910_set_cropping(client, &tw9910_cropping_ctrl); if (ret < 0) goto tw9910_set_fmt_error; /* * set hsync */ - ret = tw9910_set_hsync(priv->client, &tw9910_hsync_ctrl); + ret = tw9910_set_hsync(client, &tw9910_hsync_ctrl); if (ret < 0) goto tw9910_set_fmt_error; @@ -727,7 +730,7 @@ static int tw9910_set_crop(struct soc_camera_device *icd, tw9910_set_fmt_error: - tw9910_reset(priv->client); + tw9910_reset(client); priv->scale = NULL; return ret; @@ -784,9 +787,10 @@ static int tw9910_try_fmt(struct soc_camera_device *icd, return 0; } -static int tw9910_video_probe(struct soc_camera_device *icd) +static int tw9910_video_probe(struct soc_camera_device *icd, + struct i2c_client *client) { - struct tw9910_priv *priv = container_of(icd, struct tw9910_priv, icd); + struct tw9910_priv *priv = i2c_get_clientdata(client); s32 val; int ret; @@ -810,10 +814,18 @@ static int tw9910_video_probe(struct soc_camera_device *icd) icd->formats = tw9910_color_fmt; icd->num_formats = ARRAY_SIZE(tw9910_color_fmt); + /* Switch master clock on */ + ret = soc_camera_video_start(icd, &client->dev); + if (ret) + return ret; + /* * check and show Product ID */ - val = i2c_smbus_read_byte_data(priv->client, ID); + val = i2c_smbus_read_byte_data(client, ID); + + soc_camera_video_stop(icd); + if (0x0B != GET_ID(val) || 0x00 != GET_ReV(val)) { dev_err(&icd->dev, @@ -824,25 +836,14 @@ static int tw9910_video_probe(struct soc_camera_device *icd) dev_info(&icd->dev, "tw9910 Product ID %0x:%0x\n", GET_ID(val), GET_ReV(val)); - ret = soc_camera_video_start(icd); - if (ret < 0) - return ret; - icd->vdev->tvnorms = V4L2_STD_NTSC | V4L2_STD_PAL; icd->vdev->current_norm = V4L2_STD_NTSC; return ret; } -static void tw9910_video_remove(struct soc_camera_device *icd) -{ - soc_camera_video_stop(icd); -} - static struct soc_camera_ops tw9910_ops = { .owner = THIS_MODULE, - .probe = tw9910_video_probe, - .remove = tw9910_video_remove, .init = tw9910_init, .release = tw9910_release, .start_capture = tw9910_start_capture, @@ -871,18 +872,25 @@ static int tw9910_probe(struct i2c_client *client, { struct tw9910_priv *priv; struct tw9910_video_info *info; - struct soc_camera_device *icd; + struct soc_camera_device *icd = client->dev.platform_data; + struct i2c_adapter *adapter = + to_i2c_adapter(client->dev.parent); + struct soc_camera_link *icl; const struct tw9910_scale_ctrl *scale; int i, ret; - if (!client->dev.platform_data) + if (!icd) { + dev_err(&client->dev, "TW9910: missing soc-camera data!\n"); return -EINVAL; + } - info = container_of(client->dev.platform_data, - struct tw9910_video_info, link); + icl = to_soc_camera_link(icd); + if (!icl) + return -EINVAL; - if (!i2c_check_functionality(to_i2c_adapter(client->dev.parent), - I2C_FUNC_SMBUS_BYTE_DATA)) { + info = container_of(icl, struct tw9910_video_info, link); + + if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA)) { dev_err(&client->dev, "I2C-Adapter doesn't support " "I2C_FUNC_SMBUS_BYTE_DATA\n"); @@ -894,12 +902,9 @@ static int tw9910_probe(struct i2c_client *client, return -ENOMEM; priv->info = info; - priv->client = client; i2c_set_clientdata(client, priv); - icd = &priv->icd; icd->ops = &tw9910_ops; - icd->control = &client->dev; icd->iface = info->link.bus_id; /* @@ -925,9 +930,9 @@ static int tw9910_probe(struct i2c_client *client, icd->height_min = min(scale[i].height, icd->height_min); } - ret = soc_camera_device_register(icd); - + ret = tw9910_video_probe(icd, client); if (ret) { + icd->ops = NULL; i2c_set_clientdata(client, NULL); kfree(priv); } @@ -938,8 +943,9 @@ static int tw9910_probe(struct i2c_client *client, static int tw9910_remove(struct i2c_client *client) { struct tw9910_priv *priv = i2c_get_clientdata(client); + struct soc_camera_device *icd = client->dev.platform_data; - soc_camera_device_unregister(&priv->icd); + icd->ops = NULL; i2c_set_clientdata(client, NULL); kfree(priv); return 0; diff --git a/include/media/soc_camera.h b/include/media/soc_camera.h index 813e12061da..d8b4256126a 100644 --- a/include/media/soc_camera.h +++ b/include/media/soc_camera.h @@ -20,7 +20,6 @@ struct soc_camera_device { struct list_head list; struct device dev; - struct device *control; unsigned short width; /* Current window */ unsigned short height; /* sizes */ unsigned short x_min; /* Camera capabilities */ @@ -131,17 +130,25 @@ static inline struct soc_camera_host *to_soc_camera_host(struct device *dev) return dev_get_drvdata(dev); } -extern int soc_camera_host_register(struct soc_camera_host *ici); -extern void soc_camera_host_unregister(struct soc_camera_host *ici); -extern int soc_camera_device_register(struct soc_camera_device *icd); -extern void soc_camera_device_unregister(struct soc_camera_device *icd); +static inline struct soc_camera_link *to_soc_camera_link(struct soc_camera_device *icd) +{ + return icd->dev.platform_data; +} -extern int soc_camera_video_start(struct soc_camera_device *icd); -extern void soc_camera_video_stop(struct soc_camera_device *icd); +static inline struct device *to_soc_camera_control(struct soc_camera_device *icd) +{ + return dev_get_drvdata(&icd->dev); +} -extern const struct soc_camera_data_format *soc_camera_format_by_fourcc( +int soc_camera_host_register(struct soc_camera_host *ici); +void soc_camera_host_unregister(struct soc_camera_host *ici); + +int soc_camera_video_start(struct soc_camera_device *icd, struct device *dev); +void soc_camera_video_stop(struct soc_camera_device *icd); + +const struct soc_camera_data_format *soc_camera_format_by_fourcc( struct soc_camera_device *icd, unsigned int fourcc); -extern const struct soc_camera_format_xlate *soc_camera_xlate_by_fourcc( +const struct soc_camera_format_xlate *soc_camera_xlate_by_fourcc( struct soc_camera_device *icd, unsigned int fourcc); struct soc_camera_data_format { @@ -170,8 +177,6 @@ struct soc_camera_format_xlate { struct soc_camera_ops { struct module *owner; - int (*probe)(struct soc_camera_device *); - void (*remove)(struct soc_camera_device *); int (*suspend)(struct soc_camera_device *, pm_message_t state); int (*resume)(struct soc_camera_device *); int (*init)(struct soc_camera_device *); diff --git a/include/media/soc_camera_platform.h b/include/media/soc_camera_platform.h index 3e8f020abf4..b144f947f1c 100644 --- a/include/media/soc_camera_platform.h +++ b/include/media/soc_camera_platform.h @@ -18,11 +18,10 @@ struct device; struct soc_camera_platform_info { int iface; - char *format_name; + const char *format_name; unsigned long format_depth; struct v4l2_pix_format format; unsigned long bus_param; - void (*power)(int); struct device *dev; int (*set_capture)(struct soc_camera_platform_info *info, int enable); struct soc_camera_link link; -- cgit v1.2.3-70-g09d2 From dd4f0ad4b027078b0642d99a2d30c9c93a5e38ac Mon Sep 17 00:00:00 2001 From: Guennadi Liakhovetski Date: Tue, 25 Aug 2009 11:34:17 -0300 Subject: V4L/DVB (12508): soc-camera: remove unused .iface from struct soc_camera_platform_info Signed-off-by: Guennadi Liakhovetski Signed-off-by: Mauro Carvalho Chehab --- include/media/soc_camera_platform.h | 1 - 1 file changed, 1 deletion(-) (limited to 'include') diff --git a/include/media/soc_camera_platform.h b/include/media/soc_camera_platform.h index b144f947f1c..bb70401b814 100644 --- a/include/media/soc_camera_platform.h +++ b/include/media/soc_camera_platform.h @@ -17,7 +17,6 @@ struct device; struct soc_camera_platform_info { - int iface; const char *format_name; unsigned long format_depth; struct v4l2_pix_format format; -- cgit v1.2.3-70-g09d2 From 979ea1ddf80ac7383acdea03471355ca62702539 Mon Sep 17 00:00:00 2001 From: Guennadi Liakhovetski Date: Tue, 25 Aug 2009 11:43:33 -0300 Subject: V4L/DVB (12510): soc-camera: (partially) convert to v4l2-(sub)dev API Convert the soc-camera framework to use the v4l2-(sub)dev API. Start using v4l2-subdev operations. Only a part of the interface between the soc_camera core, soc_camera host drivers on one side and soc_camera device drivers on the other side is replaced so far. The rest of the interface will be replaced in incremental steps, and will require extensions and, possibly, modifications to the v4l2-subdev code. Signed-off-by: Guennadi Liakhovetski Signed-off-by: Mauro Carvalho Chehab --- drivers/media/video/mt9m001.c | 157 +++++++-------- drivers/media/video/mt9m111.c | 309 ++++++++++++----------------- drivers/media/video/mt9t031.c | 173 ++++++++-------- drivers/media/video/mt9v022.c | 167 +++++++--------- drivers/media/video/mx1_camera.c | 31 ++- drivers/media/video/mx3_camera.c | 54 ++--- drivers/media/video/ov772x.c | 169 +++++++--------- drivers/media/video/pxa_camera.c | 107 +++++----- drivers/media/video/sh_mobile_ceu_camera.c | 45 ++--- drivers/media/video/soc_camera.c | 250 +++++++++++++---------- drivers/media/video/soc_camera_platform.c | 115 +++++------ drivers/media/video/tw9910.c | 151 ++++++-------- include/media/soc_camera.h | 23 +-- 13 files changed, 789 insertions(+), 962 deletions(-) (limited to 'include') diff --git a/drivers/media/video/mt9m001.c b/drivers/media/video/mt9m001.c index 1e4f269fc08..2a73dac11d3 100644 --- a/drivers/media/video/mt9m001.c +++ b/drivers/media/video/mt9m001.c @@ -13,13 +13,13 @@ #include #include -#include +#include #include #include /* mt9m001 i2c address 0x5d - * The platform has to define i2c_board_info - * and call i2c_register_board_info() */ + * The platform has to define ctruct i2c_board_info objects and link to them + * from struct soc_camera_link */ /* mt9m001 selected register addresses */ #define MT9M001_CHIP_VERSION 0x00 @@ -69,10 +69,16 @@ static const struct soc_camera_data_format mt9m001_monochrome_formats[] = { }; struct mt9m001 { + struct v4l2_subdev subdev; int model; /* V4L2_IDENT_MT9M001* codes from v4l2-chip-ident.h */ unsigned char autoexposure; }; +static struct mt9m001 *to_mt9m001(const struct i2c_client *client) +{ + return container_of(i2c_get_clientdata(client), struct mt9m001, subdev); +} + static int reg_read(struct i2c_client *client, const u8 reg) { s32 data = i2c_smbus_read_word_data(client, reg); @@ -110,32 +116,18 @@ static int reg_clear(struct i2c_client *client, const u8 reg, static int mt9m001_init(struct soc_camera_device *icd) { struct i2c_client *client = to_i2c_client(to_soc_camera_control(icd)); - struct soc_camera_link *icl = to_soc_camera_link(icd); int ret; dev_dbg(&icd->dev, "%s\n", __func__); - if (icl->power) { - ret = icl->power(&client->dev, 1); - if (ret < 0) { - dev_err(icd->vdev->parent, - "Platform failed to power-on the camera.\n"); - return ret; - } - } - - /* The camera could have been already on, we reset it additionally */ - if (icl->reset) - ret = icl->reset(&client->dev); - else - ret = -ENODEV; + /* + * We don't know, whether platform provides reset, + * issue a soft reset too + */ + ret = reg_write(client, MT9M001_RESET, 1); + if (!ret) + ret = reg_write(client, MT9M001_RESET, 0); - if (ret < 0) { - /* Either no platform reset, or platform reset failed */ - ret = reg_write(client, MT9M001_RESET, 1); - if (!ret) - ret = reg_write(client, MT9M001_RESET, 0); - } /* Disable chip, synchronous option update */ if (!ret) ret = reg_write(client, MT9M001_OUTPUT_CONTROL, 0); @@ -146,33 +138,19 @@ static int mt9m001_init(struct soc_camera_device *icd) static int mt9m001_release(struct soc_camera_device *icd) { struct i2c_client *client = to_i2c_client(to_soc_camera_control(icd)); - struct soc_camera_link *icl = to_soc_camera_link(icd); /* Disable the chip */ reg_write(client, MT9M001_OUTPUT_CONTROL, 0); - if (icl->power) - icl->power(&client->dev, 0); - return 0; } -static int mt9m001_start_capture(struct soc_camera_device *icd) +static int mt9m001_s_stream(struct v4l2_subdev *sd, int enable) { - struct i2c_client *client = to_i2c_client(to_soc_camera_control(icd)); + struct i2c_client *client = sd->priv; - /* Switch to master "normal" mode */ - if (reg_write(client, MT9M001_OUTPUT_CONTROL, 2) < 0) - return -EIO; - return 0; -} - -static int mt9m001_stop_capture(struct soc_camera_device *icd) -{ - struct i2c_client *client = to_i2c_client(to_soc_camera_control(icd)); - - /* Stop sensor readout */ - if (reg_write(client, MT9M001_OUTPUT_CONTROL, 0) < 0) + /* Switch to master "normal" mode or stop sensor readout */ + if (reg_write(client, MT9M001_OUTPUT_CONTROL, enable ? 2 : 0) < 0) return -EIO; return 0; } @@ -220,7 +198,7 @@ static int mt9m001_set_crop(struct soc_camera_device *icd, struct v4l2_rect *rect) { struct i2c_client *client = to_i2c_client(to_soc_camera_control(icd)); - struct mt9m001 *mt9m001 = i2c_get_clientdata(client); + struct mt9m001 *mt9m001 = to_mt9m001(client); int ret; const u16 hblank = 9, vblank = 25; @@ -257,9 +235,10 @@ static int mt9m001_set_crop(struct soc_camera_device *icd, return ret; } -static int mt9m001_set_fmt(struct soc_camera_device *icd, - struct v4l2_format *f) +static int mt9m001_s_fmt(struct v4l2_subdev *sd, struct v4l2_format *f) { + struct i2c_client *client = sd->priv; + struct soc_camera_device *icd = client->dev.platform_data; struct v4l2_rect rect = { .left = icd->x_current, .top = icd->y_current, @@ -271,9 +250,10 @@ static int mt9m001_set_fmt(struct soc_camera_device *icd, return mt9m001_set_crop(icd, &rect); } -static int mt9m001_try_fmt(struct soc_camera_device *icd, - struct v4l2_format *f) +static int mt9m001_try_fmt(struct v4l2_subdev *sd, struct v4l2_format *f) { + struct i2c_client *client = sd->priv; + struct soc_camera_device *icd = client->dev.platform_data; struct v4l2_pix_format *pix = &f->fmt.pix; v4l_bound_align_image(&pix->width, 48, 1280, 1, @@ -283,11 +263,11 @@ static int mt9m001_try_fmt(struct soc_camera_device *icd, return 0; } -static int mt9m001_get_chip_id(struct soc_camera_device *icd, - struct v4l2_dbg_chip_ident *id) +static int mt9m001_g_chip_ident(struct v4l2_subdev *sd, + struct v4l2_dbg_chip_ident *id) { - struct i2c_client *client = to_i2c_client(to_soc_camera_control(icd)); - struct mt9m001 *mt9m001 = i2c_get_clientdata(client); + struct i2c_client *client = sd->priv; + struct mt9m001 *mt9m001 = to_mt9m001(client); if (id->match.type != V4L2_CHIP_MATCH_I2C_ADDR) return -EINVAL; @@ -302,10 +282,10 @@ static int mt9m001_get_chip_id(struct soc_camera_device *icd, } #ifdef CONFIG_VIDEO_ADV_DEBUG -static int mt9m001_get_register(struct soc_camera_device *icd, - struct v4l2_dbg_register *reg) +static int mt9m001_g_register(struct v4l2_subdev *sd, + struct v4l2_dbg_register *reg) { - struct i2c_client *client = to_i2c_client(to_soc_camera_control(icd)); + struct i2c_client *client = sd->priv; if (reg->match.type != V4L2_CHIP_MATCH_I2C_ADDR || reg->reg > 0xff) return -EINVAL; @@ -322,10 +302,10 @@ static int mt9m001_get_register(struct soc_camera_device *icd, return 0; } -static int mt9m001_set_register(struct soc_camera_device *icd, - struct v4l2_dbg_register *reg) +static int mt9m001_s_register(struct v4l2_subdev *sd, + struct v4l2_dbg_register *reg) { - struct i2c_client *client = to_i2c_client(to_soc_camera_control(icd)); + struct i2c_client *client = sd->priv; if (reg->match.type != V4L2_CHIP_MATCH_I2C_ADDR || reg->reg > 0xff) return -EINVAL; @@ -378,35 +358,20 @@ static const struct v4l2_queryctrl mt9m001_controls[] = { } }; -static int mt9m001_get_control(struct soc_camera_device *, struct v4l2_control *); -static int mt9m001_set_control(struct soc_camera_device *, struct v4l2_control *); - static struct soc_camera_ops mt9m001_ops = { - .owner = THIS_MODULE, .init = mt9m001_init, .release = mt9m001_release, - .start_capture = mt9m001_start_capture, - .stop_capture = mt9m001_stop_capture, .set_crop = mt9m001_set_crop, - .set_fmt = mt9m001_set_fmt, - .try_fmt = mt9m001_try_fmt, .set_bus_param = mt9m001_set_bus_param, .query_bus_param = mt9m001_query_bus_param, .controls = mt9m001_controls, .num_controls = ARRAY_SIZE(mt9m001_controls), - .get_control = mt9m001_get_control, - .set_control = mt9m001_set_control, - .get_chip_id = mt9m001_get_chip_id, -#ifdef CONFIG_VIDEO_ADV_DEBUG - .get_register = mt9m001_get_register, - .set_register = mt9m001_set_register, -#endif }; -static int mt9m001_get_control(struct soc_camera_device *icd, struct v4l2_control *ctrl) +static int mt9m001_g_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl) { - struct i2c_client *client = to_i2c_client(to_soc_camera_control(icd)); - struct mt9m001 *mt9m001 = i2c_get_clientdata(client); + struct i2c_client *client = sd->priv; + struct mt9m001 *mt9m001 = to_mt9m001(client); int data; switch (ctrl->id) { @@ -423,10 +388,11 @@ static int mt9m001_get_control(struct soc_camera_device *icd, struct v4l2_contro return 0; } -static int mt9m001_set_control(struct soc_camera_device *icd, struct v4l2_control *ctrl) +static int mt9m001_s_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl) { - struct i2c_client *client = to_i2c_client(to_soc_camera_control(icd)); - struct mt9m001 *mt9m001 = i2c_get_clientdata(client); + struct i2c_client *client = sd->priv; + struct mt9m001 *mt9m001 = to_mt9m001(client); + struct soc_camera_device *icd = client->dev.platform_data; const struct v4l2_queryctrl *qctrl; int data; @@ -521,10 +487,9 @@ static int mt9m001_set_control(struct soc_camera_device *icd, struct v4l2_contro static int mt9m001_video_probe(struct soc_camera_device *icd, struct i2c_client *client) { - struct mt9m001 *mt9m001 = i2c_get_clientdata(client); + struct mt9m001 *mt9m001 = to_mt9m001(client); struct soc_camera_link *icl = to_soc_camera_link(icd); s32 data; - int ret; unsigned long flags; /* We must have a parent by now. And it cannot be a wrong one. @@ -533,11 +498,6 @@ static int mt9m001_video_probe(struct soc_camera_device *icd, to_soc_camera_host(icd->dev.parent)->nr != icd->iface) return -ENODEV; - /* Switch master clock on */ - ret = soc_camera_video_start(icd, &client->dev); - if (ret) - return ret; - /* Enable the chip */ data = reg_write(client, MT9M001_CHIP_ENABLE, 1); dev_dbg(&icd->dev, "write: %d\n", data); @@ -545,8 +505,6 @@ static int mt9m001_video_probe(struct soc_camera_device *icd, /* Read out the chip version register */ data = reg_read(client, MT9M001_CHIP_VERSION); - soc_camera_video_stop(icd); - /* must be 0x8411 or 0x8421 for colour sensor and 8431 for bw */ switch (data) { case 0x8411: @@ -601,6 +559,27 @@ static void mt9m001_video_remove(struct soc_camera_device *icd) icl->free_bus(icl); } +static struct v4l2_subdev_core_ops mt9m001_subdev_core_ops = { + .g_ctrl = mt9m001_g_ctrl, + .s_ctrl = mt9m001_s_ctrl, + .g_chip_ident = mt9m001_g_chip_ident, +#ifdef CONFIG_VIDEO_ADV_DEBUG + .g_register = mt9m001_g_register, + .s_register = mt9m001_s_register, +#endif +}; + +static struct v4l2_subdev_video_ops mt9m001_subdev_video_ops = { + .s_stream = mt9m001_s_stream, + .s_fmt = mt9m001_s_fmt, + .try_fmt = mt9m001_try_fmt, +}; + +static struct v4l2_subdev_ops mt9m001_subdev_ops = { + .core = &mt9m001_subdev_core_ops, + .video = &mt9m001_subdev_video_ops, +}; + static int mt9m001_probe(struct i2c_client *client, const struct i2c_device_id *did) { @@ -631,7 +610,7 @@ static int mt9m001_probe(struct i2c_client *client, if (!mt9m001) return -ENOMEM; - i2c_set_clientdata(client, mt9m001); + v4l2_i2c_subdev_init(&mt9m001->subdev, client, &mt9m001_subdev_ops); /* Second stage probe - when a capture adapter is there */ icd->ops = &mt9m001_ops; @@ -660,7 +639,7 @@ static int mt9m001_probe(struct i2c_client *client, static int mt9m001_remove(struct i2c_client *client) { - struct mt9m001 *mt9m001 = i2c_get_clientdata(client); + struct mt9m001 *mt9m001 = to_mt9m001(client); struct soc_camera_device *icd = client->dev.platform_data; icd->ops = NULL; diff --git a/drivers/media/video/mt9m111.c b/drivers/media/video/mt9m111.c index 95c2f089605..29f976afd46 100644 --- a/drivers/media/video/mt9m111.c +++ b/drivers/media/video/mt9m111.c @@ -148,6 +148,7 @@ enum mt9m111_context { }; struct mt9m111 { + struct v4l2_subdev subdev; int model; /* V4L2_IDENT_MT9M11x* codes from v4l2-chip-ident.h */ enum mt9m111_context context; struct v4l2_rect rect; @@ -164,6 +165,11 @@ struct mt9m111 { unsigned int autowhitebalance:1; }; +static struct mt9m111 *to_mt9m111(const struct i2c_client *client) +{ + return container_of(i2c_get_clientdata(client), struct mt9m111, subdev); +} + static int reg_page_map_set(struct i2c_client *client, const u16 reg) { int ret; @@ -227,10 +233,9 @@ static int mt9m111_reg_clear(struct i2c_client *client, const u16 reg, return mt9m111_reg_write(client, reg, ret & ~data); } -static int mt9m111_set_context(struct soc_camera_device *icd, +static int mt9m111_set_context(struct i2c_client *client, enum mt9m111_context ctxt) { - struct i2c_client *client = to_i2c_client(to_soc_camera_control(icd)); int valB = MT9M111_CTXT_CTRL_RESTART | MT9M111_CTXT_CTRL_DEFECTCOR_B | MT9M111_CTXT_CTRL_RESIZE_B | MT9M111_CTXT_CTRL_CTRL2_B | MT9M111_CTXT_CTRL_GAMMA_B | MT9M111_CTXT_CTRL_READ_MODE_B @@ -244,11 +249,10 @@ static int mt9m111_set_context(struct soc_camera_device *icd, return reg_write(CONTEXT_CONTROL, valA); } -static int mt9m111_setup_rect(struct soc_camera_device *icd, +static int mt9m111_setup_rect(struct i2c_client *client, struct v4l2_rect *rect) { - struct i2c_client *client = to_i2c_client(to_soc_camera_control(icd)); - struct mt9m111 *mt9m111 = i2c_get_clientdata(client); + struct mt9m111 *mt9m111 = to_mt9m111(client); int ret, is_raw_format; int width = rect->width; int height = rect->height; @@ -290,9 +294,8 @@ static int mt9m111_setup_rect(struct soc_camera_device *icd, return ret; } -static int mt9m111_setup_pixfmt(struct soc_camera_device *icd, u16 outfmt) +static int mt9m111_setup_pixfmt(struct i2c_client *client, u16 outfmt) { - struct i2c_client *client = to_i2c_client(to_soc_camera_control(icd)); int ret; ret = reg_write(OUTPUT_FORMAT_CTRL2_A, outfmt); @@ -301,20 +304,19 @@ static int mt9m111_setup_pixfmt(struct soc_camera_device *icd, u16 outfmt) return ret; } -static int mt9m111_setfmt_bayer8(struct soc_camera_device *icd) +static int mt9m111_setfmt_bayer8(struct i2c_client *client) { - return mt9m111_setup_pixfmt(icd, MT9M111_OUTFMT_PROCESSED_BAYER); + return mt9m111_setup_pixfmt(client, MT9M111_OUTFMT_PROCESSED_BAYER); } -static int mt9m111_setfmt_bayer10(struct soc_camera_device *icd) +static int mt9m111_setfmt_bayer10(struct i2c_client *client) { - return mt9m111_setup_pixfmt(icd, MT9M111_OUTFMT_BYPASS_IFP); + return mt9m111_setup_pixfmt(client, MT9M111_OUTFMT_BYPASS_IFP); } -static int mt9m111_setfmt_rgb565(struct soc_camera_device *icd) +static int mt9m111_setfmt_rgb565(struct i2c_client *client) { - struct i2c_client *client = to_i2c_client(to_soc_camera_control(icd)); - struct mt9m111 *mt9m111 = i2c_get_clientdata(client); + struct mt9m111 *mt9m111 = to_mt9m111(client); int val = 0; if (mt9m111->swap_rgb_red_blue) @@ -323,13 +325,12 @@ static int mt9m111_setfmt_rgb565(struct soc_camera_device *icd) val |= MT9M111_OUTFMT_SWAP_RGB_EVEN; val |= MT9M111_OUTFMT_RGB | MT9M111_OUTFMT_RGB565; - return mt9m111_setup_pixfmt(icd, val); + return mt9m111_setup_pixfmt(client, val); } -static int mt9m111_setfmt_rgb555(struct soc_camera_device *icd) +static int mt9m111_setfmt_rgb555(struct i2c_client *client) { - struct i2c_client *client = to_i2c_client(to_soc_camera_control(icd)); - struct mt9m111 *mt9m111 = i2c_get_clientdata(client); + struct mt9m111 *mt9m111 = to_mt9m111(client); int val = 0; if (mt9m111->swap_rgb_red_blue) @@ -338,13 +339,12 @@ static int mt9m111_setfmt_rgb555(struct soc_camera_device *icd) val |= MT9M111_OUTFMT_SWAP_RGB_EVEN; val |= MT9M111_OUTFMT_RGB | MT9M111_OUTFMT_RGB555; - return mt9m111_setup_pixfmt(icd, val); + return mt9m111_setup_pixfmt(client, val); } -static int mt9m111_setfmt_yuv(struct soc_camera_device *icd) +static int mt9m111_setfmt_yuv(struct i2c_client *client) { - struct i2c_client *client = to_i2c_client(to_soc_camera_control(icd)); - struct mt9m111 *mt9m111 = i2c_get_clientdata(client); + struct mt9m111 *mt9m111 = to_mt9m111(client); int val = 0; if (mt9m111->swap_yuv_cb_cr) @@ -352,52 +352,22 @@ static int mt9m111_setfmt_yuv(struct soc_camera_device *icd) if (mt9m111->swap_yuv_y_chromas) val |= MT9M111_OUTFMT_SWAP_YCbCr_C_Y; - return mt9m111_setup_pixfmt(icd, val); + return mt9m111_setup_pixfmt(client, val); } -static int mt9m111_enable(struct soc_camera_device *icd) +static int mt9m111_enable(struct i2c_client *client) { - struct i2c_client *client = to_i2c_client(to_soc_camera_control(icd)); - struct soc_camera_link *icl = to_soc_camera_link(icd); - struct mt9m111 *mt9m111 = i2c_get_clientdata(client); + struct mt9m111 *mt9m111 = to_mt9m111(client); int ret; - if (icl->power) { - ret = icl->power(&client->dev, 1); - if (ret < 0) { - dev_err(icd->vdev->parent, - "Platform failed to power-on the camera.\n"); - return ret; - } - } - ret = reg_set(RESET, MT9M111_RESET_CHIP_ENABLE); if (!ret) mt9m111->powered = 1; return ret; } -static int mt9m111_disable(struct soc_camera_device *icd) -{ - struct i2c_client *client = to_i2c_client(to_soc_camera_control(icd)); - struct soc_camera_link *icl = to_soc_camera_link(icd); - struct mt9m111 *mt9m111 = i2c_get_clientdata(client); - int ret; - - ret = reg_clear(RESET, MT9M111_RESET_CHIP_ENABLE); - if (!ret) - mt9m111->powered = 0; - - if (icl->power) - icl->power(&client->dev, 0); - - return ret; -} - -static int mt9m111_reset(struct soc_camera_device *icd) +static int mt9m111_reset(struct i2c_client *client) { - struct i2c_client *client = to_i2c_client(to_soc_camera_control(icd)); - struct soc_camera_link *icl = to_soc_camera_link(icd); int ret; ret = reg_set(RESET, MT9M111_RESET_RESET_MODE); @@ -407,22 +377,9 @@ static int mt9m111_reset(struct soc_camera_device *icd) ret = reg_clear(RESET, MT9M111_RESET_RESET_MODE | MT9M111_RESET_RESET_SOC); - if (icl->reset) - icl->reset(&client->dev); - return ret; } -static int mt9m111_start_capture(struct soc_camera_device *icd) -{ - return 0; -} - -static int mt9m111_stop_capture(struct soc_camera_device *icd) -{ - return 0; -} - static unsigned long mt9m111_query_bus_param(struct soc_camera_device *icd) { struct soc_camera_link *icl = to_soc_camera_link(icd); @@ -442,60 +399,59 @@ static int mt9m111_set_crop(struct soc_camera_device *icd, struct v4l2_rect *rect) { struct i2c_client *client = to_i2c_client(to_soc_camera_control(icd)); - struct mt9m111 *mt9m111 = i2c_get_clientdata(client); + struct mt9m111 *mt9m111 = to_mt9m111(client); int ret; dev_dbg(&icd->dev, "%s left=%d, top=%d, width=%d, height=%d\n", __func__, rect->left, rect->top, rect->width, rect->height); - ret = mt9m111_setup_rect(icd, rect); + ret = mt9m111_setup_rect(client, rect); if (!ret) mt9m111->rect = *rect; return ret; } -static int mt9m111_set_pixfmt(struct soc_camera_device *icd, u32 pixfmt) +static int mt9m111_set_pixfmt(struct i2c_client *client, u32 pixfmt) { - struct i2c_client *client = to_i2c_client(to_soc_camera_control(icd)); - struct mt9m111 *mt9m111 = i2c_get_clientdata(client); + struct mt9m111 *mt9m111 = to_mt9m111(client); int ret; switch (pixfmt) { case V4L2_PIX_FMT_SBGGR8: - ret = mt9m111_setfmt_bayer8(icd); + ret = mt9m111_setfmt_bayer8(client); break; case V4L2_PIX_FMT_SBGGR16: - ret = mt9m111_setfmt_bayer10(icd); + ret = mt9m111_setfmt_bayer10(client); break; case V4L2_PIX_FMT_RGB555: - ret = mt9m111_setfmt_rgb555(icd); + ret = mt9m111_setfmt_rgb555(client); break; case V4L2_PIX_FMT_RGB565: - ret = mt9m111_setfmt_rgb565(icd); + ret = mt9m111_setfmt_rgb565(client); break; case V4L2_PIX_FMT_UYVY: mt9m111->swap_yuv_y_chromas = 0; mt9m111->swap_yuv_cb_cr = 0; - ret = mt9m111_setfmt_yuv(icd); + ret = mt9m111_setfmt_yuv(client); break; case V4L2_PIX_FMT_VYUY: mt9m111->swap_yuv_y_chromas = 0; mt9m111->swap_yuv_cb_cr = 1; - ret = mt9m111_setfmt_yuv(icd); + ret = mt9m111_setfmt_yuv(client); break; case V4L2_PIX_FMT_YUYV: mt9m111->swap_yuv_y_chromas = 1; mt9m111->swap_yuv_cb_cr = 0; - ret = mt9m111_setfmt_yuv(icd); + ret = mt9m111_setfmt_yuv(client); break; case V4L2_PIX_FMT_YVYU: mt9m111->swap_yuv_y_chromas = 1; mt9m111->swap_yuv_cb_cr = 1; - ret = mt9m111_setfmt_yuv(icd); + ret = mt9m111_setfmt_yuv(client); break; default: - dev_err(&icd->dev, "Pixel format not handled : %x\n", pixfmt); + dev_err(&client->dev, "Pixel format not handled : %x\n", pixfmt); ret = -EINVAL; } @@ -505,11 +461,10 @@ static int mt9m111_set_pixfmt(struct soc_camera_device *icd, u32 pixfmt) return ret; } -static int mt9m111_set_fmt(struct soc_camera_device *icd, - struct v4l2_format *f) +static int mt9m111_s_fmt(struct v4l2_subdev *sd, struct v4l2_format *f) { - struct i2c_client *client = to_i2c_client(to_soc_camera_control(icd)); - struct mt9m111 *mt9m111 = i2c_get_clientdata(client); + struct i2c_client *client = sd->priv; + struct mt9m111 *mt9m111 = to_mt9m111(client); struct v4l2_pix_format *pix = &f->fmt.pix; struct v4l2_rect rect = { .left = mt9m111->rect.left, @@ -519,20 +474,19 @@ static int mt9m111_set_fmt(struct soc_camera_device *icd, }; int ret; - dev_dbg(&icd->dev, "%s fmt=%x left=%d, top=%d, width=%d, height=%d\n", + dev_dbg(&client->dev, "%s fmt=%x left=%d, top=%d, width=%d, height=%d\n", __func__, pix->pixelformat, rect.left, rect.top, rect.width, rect.height); - ret = mt9m111_setup_rect(icd, &rect); + ret = mt9m111_setup_rect(client, &rect); if (!ret) - ret = mt9m111_set_pixfmt(icd, pix->pixelformat); + ret = mt9m111_set_pixfmt(client, pix->pixelformat); if (!ret) mt9m111->rect = rect; return ret; } -static int mt9m111_try_fmt(struct soc_camera_device *icd, - struct v4l2_format *f) +static int mt9m111_try_fmt(struct v4l2_subdev *sd, struct v4l2_format *f) { struct v4l2_pix_format *pix = &f->fmt.pix; @@ -544,11 +498,11 @@ static int mt9m111_try_fmt(struct soc_camera_device *icd, return 0; } -static int mt9m111_get_chip_id(struct soc_camera_device *icd, - struct v4l2_dbg_chip_ident *id) +static int mt9m111_g_chip_ident(struct v4l2_subdev *sd, + struct v4l2_dbg_chip_ident *id) { - struct i2c_client *client = to_i2c_client(to_soc_camera_control(icd)); - struct mt9m111 *mt9m111 = i2c_get_clientdata(client); + struct i2c_client *client = sd->priv; + struct mt9m111 *mt9m111 = to_mt9m111(client); if (id->match.type != V4L2_CHIP_MATCH_I2C_ADDR) return -EINVAL; @@ -563,10 +517,10 @@ static int mt9m111_get_chip_id(struct soc_camera_device *icd, } #ifdef CONFIG_VIDEO_ADV_DEBUG -static int mt9m111_get_register(struct soc_camera_device *icd, - struct v4l2_dbg_register *reg) +static int mt9m111_g_register(struct v4l2_subdev *sd, + struct v4l2_dbg_register *reg) { - struct i2c_client *client = to_i2c_client(to_soc_camera_control(icd)); + struct i2c_client *client = sd->priv; int val; if (reg->match.type != V4L2_CHIP_MATCH_I2C_ADDR || reg->reg > 0x2ff) @@ -584,10 +538,10 @@ static int mt9m111_get_register(struct soc_camera_device *icd, return 0; } -static int mt9m111_set_register(struct soc_camera_device *icd, - struct v4l2_dbg_register *reg) +static int mt9m111_s_register(struct v4l2_subdev *sd, + struct v4l2_dbg_register *reg) { - struct i2c_client *client = to_i2c_client(to_soc_camera_control(icd)); + struct i2c_client *client = sd->priv; if (reg->match.type != V4L2_CHIP_MATCH_I2C_ADDR || reg->reg > 0x2ff) return -EINVAL; @@ -639,41 +593,24 @@ static const struct v4l2_queryctrl mt9m111_controls[] = { } }; -static int mt9m111_get_control(struct soc_camera_device *, - struct v4l2_control *); -static int mt9m111_set_control(struct soc_camera_device *, - struct v4l2_control *); static int mt9m111_resume(struct soc_camera_device *icd); static int mt9m111_init(struct soc_camera_device *icd); static int mt9m111_release(struct soc_camera_device *icd); static struct soc_camera_ops mt9m111_ops = { - .owner = THIS_MODULE, .init = mt9m111_init, .resume = mt9m111_resume, .release = mt9m111_release, - .start_capture = mt9m111_start_capture, - .stop_capture = mt9m111_stop_capture, .set_crop = mt9m111_set_crop, - .set_fmt = mt9m111_set_fmt, - .try_fmt = mt9m111_try_fmt, .query_bus_param = mt9m111_query_bus_param, .set_bus_param = mt9m111_set_bus_param, .controls = mt9m111_controls, .num_controls = ARRAY_SIZE(mt9m111_controls), - .get_control = mt9m111_get_control, - .set_control = mt9m111_set_control, - .get_chip_id = mt9m111_get_chip_id, -#ifdef CONFIG_VIDEO_ADV_DEBUG - .get_register = mt9m111_get_register, - .set_register = mt9m111_set_register, -#endif }; -static int mt9m111_set_flip(struct soc_camera_device *icd, int flip, int mask) +static int mt9m111_set_flip(struct i2c_client *client, int flip, int mask) { - struct i2c_client *client = to_i2c_client(to_soc_camera_control(icd)); - struct mt9m111 *mt9m111 = i2c_get_clientdata(client); + struct mt9m111 *mt9m111 = to_mt9m111(client); int ret; if (mt9m111->context == HIGHPOWER) { @@ -691,9 +628,8 @@ static int mt9m111_set_flip(struct soc_camera_device *icd, int flip, int mask) return ret; } -static int mt9m111_get_global_gain(struct soc_camera_device *icd) +static int mt9m111_get_global_gain(struct i2c_client *client) { - struct i2c_client *client = to_i2c_client(to_soc_camera_control(icd)); int data; data = reg_read(GLOBAL_GAIN); @@ -703,9 +639,9 @@ static int mt9m111_get_global_gain(struct soc_camera_device *icd) return data; } -static int mt9m111_set_global_gain(struct soc_camera_device *icd, int gain) +static int mt9m111_set_global_gain(struct i2c_client *client, int gain) { - struct i2c_client *client = to_i2c_client(to_soc_camera_control(icd)); + struct soc_camera_device *icd = client->dev.platform_data; u16 val; if (gain > 63 * 2 * 2) @@ -722,10 +658,9 @@ static int mt9m111_set_global_gain(struct soc_camera_device *icd, int gain) return reg_write(GLOBAL_GAIN, val); } -static int mt9m111_set_autoexposure(struct soc_camera_device *icd, int on) +static int mt9m111_set_autoexposure(struct i2c_client *client, int on) { - struct i2c_client *client = to_i2c_client(to_soc_camera_control(icd)); - struct mt9m111 *mt9m111 = i2c_get_clientdata(client); + struct mt9m111 *mt9m111 = to_mt9m111(client); int ret; if (on) @@ -739,10 +674,9 @@ static int mt9m111_set_autoexposure(struct soc_camera_device *icd, int on) return ret; } -static int mt9m111_set_autowhitebalance(struct soc_camera_device *icd, int on) +static int mt9m111_set_autowhitebalance(struct i2c_client *client, int on) { - struct i2c_client *client = to_i2c_client(to_soc_camera_control(icd)); - struct mt9m111 *mt9m111 = i2c_get_clientdata(client); + struct mt9m111 *mt9m111 = to_mt9m111(client); int ret; if (on) @@ -756,11 +690,10 @@ static int mt9m111_set_autowhitebalance(struct soc_camera_device *icd, int on) return ret; } -static int mt9m111_get_control(struct soc_camera_device *icd, - struct v4l2_control *ctrl) +static int mt9m111_g_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl) { - struct i2c_client *client = to_i2c_client(to_soc_camera_control(icd)); - struct mt9m111 *mt9m111 = i2c_get_clientdata(client); + struct i2c_client *client = sd->priv; + struct mt9m111 *mt9m111 = to_mt9m111(client); int data; switch (ctrl->id) { @@ -785,7 +718,7 @@ static int mt9m111_get_control(struct soc_camera_device *icd, ctrl->value = !!(data & MT9M111_RMB_MIRROR_COLS); break; case V4L2_CID_GAIN: - data = mt9m111_get_global_gain(icd); + data = mt9m111_get_global_gain(client); if (data < 0) return data; ctrl->value = data; @@ -800,38 +733,36 @@ static int mt9m111_get_control(struct soc_camera_device *icd, return 0; } -static int mt9m111_set_control(struct soc_camera_device *icd, - struct v4l2_control *ctrl) +static int mt9m111_s_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl) { - struct i2c_client *client = to_i2c_client(to_soc_camera_control(icd)); - struct mt9m111 *mt9m111 = i2c_get_clientdata(client); + struct i2c_client *client = sd->priv; + struct mt9m111 *mt9m111 = to_mt9m111(client); const struct v4l2_queryctrl *qctrl; int ret; qctrl = soc_camera_find_qctrl(&mt9m111_ops, ctrl->id); - if (!qctrl) return -EINVAL; switch (ctrl->id) { case V4L2_CID_VFLIP: mt9m111->vflip = ctrl->value; - ret = mt9m111_set_flip(icd, ctrl->value, + ret = mt9m111_set_flip(client, ctrl->value, MT9M111_RMB_MIRROR_ROWS); break; case V4L2_CID_HFLIP: mt9m111->hflip = ctrl->value; - ret = mt9m111_set_flip(icd, ctrl->value, + ret = mt9m111_set_flip(client, ctrl->value, MT9M111_RMB_MIRROR_COLS); break; case V4L2_CID_GAIN: - ret = mt9m111_set_global_gain(icd, ctrl->value); + ret = mt9m111_set_global_gain(client, ctrl->value); break; case V4L2_CID_EXPOSURE_AUTO: - ret = mt9m111_set_autoexposure(icd, ctrl->value); + ret = mt9m111_set_autoexposure(client, ctrl->value); break; case V4L2_CID_AUTO_WHITE_BALANCE: - ret = mt9m111_set_autowhitebalance(icd, ctrl->value); + ret = mt9m111_set_autowhitebalance(client, ctrl->value); break; default: ret = -EINVAL; @@ -840,34 +771,34 @@ static int mt9m111_set_control(struct soc_camera_device *icd, return ret; } -static int mt9m111_restore_state(struct soc_camera_device *icd) +static int mt9m111_restore_state(struct i2c_client *client) { - struct i2c_client *client = to_i2c_client(to_soc_camera_control(icd)); - struct mt9m111 *mt9m111 = i2c_get_clientdata(client); - - mt9m111_set_context(icd, mt9m111->context); - mt9m111_set_pixfmt(icd, mt9m111->pixfmt); - mt9m111_setup_rect(icd, &mt9m111->rect); - mt9m111_set_flip(icd, mt9m111->hflip, MT9M111_RMB_MIRROR_COLS); - mt9m111_set_flip(icd, mt9m111->vflip, MT9M111_RMB_MIRROR_ROWS); - mt9m111_set_global_gain(icd, icd->gain); - mt9m111_set_autoexposure(icd, mt9m111->autoexposure); - mt9m111_set_autowhitebalance(icd, mt9m111->autowhitebalance); + struct mt9m111 *mt9m111 = to_mt9m111(client); + struct soc_camera_device *icd = client->dev.platform_data; + + mt9m111_set_context(client, mt9m111->context); + mt9m111_set_pixfmt(client, mt9m111->pixfmt); + mt9m111_setup_rect(client, &mt9m111->rect); + mt9m111_set_flip(client, mt9m111->hflip, MT9M111_RMB_MIRROR_COLS); + mt9m111_set_flip(client, mt9m111->vflip, MT9M111_RMB_MIRROR_ROWS); + mt9m111_set_global_gain(client, icd->gain); + mt9m111_set_autoexposure(client, mt9m111->autoexposure); + mt9m111_set_autowhitebalance(client, mt9m111->autowhitebalance); return 0; } static int mt9m111_resume(struct soc_camera_device *icd) { struct i2c_client *client = to_i2c_client(to_soc_camera_control(icd)); - struct mt9m111 *mt9m111 = i2c_get_clientdata(client); + struct mt9m111 *mt9m111 = to_mt9m111(client); int ret = 0; if (mt9m111->powered) { - ret = mt9m111_enable(icd); + ret = mt9m111_enable(client); if (!ret) - ret = mt9m111_reset(icd); + ret = mt9m111_reset(client); if (!ret) - ret = mt9m111_restore_state(icd); + ret = mt9m111_restore_state(client); } return ret; } @@ -875,17 +806,17 @@ static int mt9m111_resume(struct soc_camera_device *icd) static int mt9m111_init(struct soc_camera_device *icd) { struct i2c_client *client = to_i2c_client(to_soc_camera_control(icd)); - struct mt9m111 *mt9m111 = i2c_get_clientdata(client); + struct mt9m111 *mt9m111 = to_mt9m111(client); int ret; mt9m111->context = HIGHPOWER; - ret = mt9m111_enable(icd); + ret = mt9m111_enable(client); if (!ret) - ret = mt9m111_reset(icd); + ret = mt9m111_reset(client); if (!ret) - ret = mt9m111_set_context(icd, mt9m111->context); + ret = mt9m111_set_context(client, mt9m111->context); if (!ret) - ret = mt9m111_set_autoexposure(icd, mt9m111->autoexposure); + ret = mt9m111_set_autoexposure(client, mt9m111->autoexposure); if (ret) dev_err(&icd->dev, "mt9m11x init failed: %d\n", ret); return ret; @@ -893,9 +824,14 @@ static int mt9m111_init(struct soc_camera_device *icd) static int mt9m111_release(struct soc_camera_device *icd) { + struct i2c_client *client = to_i2c_client(to_soc_camera_control(icd)); + struct mt9m111 *mt9m111 = to_mt9m111(client); int ret; - ret = mt9m111_disable(icd); + ret = reg_clear(RESET, MT9M111_RESET_CHIP_ENABLE); + if (!ret) + mt9m111->powered = 0; + if (ret < 0) dev_err(&icd->dev, "mt9m11x release failed: %d\n", ret); @@ -909,7 +845,7 @@ static int mt9m111_release(struct soc_camera_device *icd) static int mt9m111_video_probe(struct soc_camera_device *icd, struct i2c_client *client) { - struct mt9m111 *mt9m111 = i2c_get_clientdata(client); + struct mt9m111 *mt9m111 = to_mt9m111(client); s32 data; int ret; @@ -921,15 +857,10 @@ static int mt9m111_video_probe(struct soc_camera_device *icd, to_soc_camera_host(icd->dev.parent)->nr != icd->iface) return -ENODEV; - /* Switch master clock on */ - ret = soc_camera_video_start(icd, &client->dev); - if (ret) - goto evstart; - - ret = mt9m111_enable(icd); + ret = mt9m111_enable(client); if (ret) goto ei2c; - ret = mt9m111_reset(icd); + ret = mt9m111_reset(client); if (ret) goto ei2c; @@ -961,11 +892,29 @@ static int mt9m111_video_probe(struct soc_camera_device *icd, mt9m111->swap_rgb_red_blue = 1; ei2c: - soc_camera_video_stop(icd); -evstart: return ret; } +static struct v4l2_subdev_core_ops mt9m111_subdev_core_ops = { + .g_ctrl = mt9m111_g_ctrl, + .s_ctrl = mt9m111_s_ctrl, + .g_chip_ident = mt9m111_g_chip_ident, +#ifdef CONFIG_VIDEO_ADV_DEBUG + .g_register = mt9m111_g_register, + .s_register = mt9m111_s_register, +#endif +}; + +static struct v4l2_subdev_video_ops mt9m111_subdev_video_ops = { + .s_fmt = mt9m111_s_fmt, + .try_fmt = mt9m111_try_fmt, +}; + +static struct v4l2_subdev_ops mt9m111_subdev_ops = { + .core = &mt9m111_subdev_core_ops, + .video = &mt9m111_subdev_video_ops, +}; + static int mt9m111_probe(struct i2c_client *client, const struct i2c_device_id *did) { @@ -996,7 +945,7 @@ static int mt9m111_probe(struct i2c_client *client, if (!mt9m111) return -ENOMEM; - i2c_set_clientdata(client, mt9m111); + v4l2_i2c_subdev_init(&mt9m111->subdev, client, &mt9m111_subdev_ops); /* Second stage probe - when a capture adapter is there */ icd->ops = &mt9m111_ops; @@ -1022,7 +971,7 @@ static int mt9m111_probe(struct i2c_client *client, static int mt9m111_remove(struct i2c_client *client) { - struct mt9m111 *mt9m111 = i2c_get_clientdata(client); + struct mt9m111 *mt9m111 = to_mt9m111(client); struct soc_camera_device *icd = client->dev.platform_data; icd->ops = NULL; diff --git a/drivers/media/video/mt9t031.c b/drivers/media/video/mt9t031.c index d9c7c2fd698..27a5edda902 100644 --- a/drivers/media/video/mt9t031.c +++ b/drivers/media/video/mt9t031.c @@ -13,13 +13,13 @@ #include #include -#include +#include #include #include /* mt9t031 i2c address 0x5d - * The platform has to define i2c_board_info - * and call i2c_register_board_info() */ + * The platform has to define i2c_board_info and link to it from + * struct soc_camera_link */ /* mt9t031 selected register addresses */ #define MT9T031_CHIP_VERSION 0x00 @@ -68,12 +68,18 @@ static const struct soc_camera_data_format mt9t031_colour_formats[] = { }; struct mt9t031 { + struct v4l2_subdev subdev; int model; /* V4L2_IDENT_MT9T031* codes from v4l2-chip-ident.h */ unsigned char autoexposure; u16 xskip; u16 yskip; }; +static struct mt9t031 *to_mt9t031(const struct i2c_client *client) +{ + return container_of(i2c_get_clientdata(client), struct mt9t031, subdev); +} + static int reg_read(struct i2c_client *client, const u8 reg) { s32 data = i2c_smbus_read_word_data(client, reg); @@ -134,21 +140,10 @@ static int get_shutter(struct i2c_client *client, u32 *data) return ret < 0 ? ret : 0; } -static int mt9t031_init(struct soc_camera_device *icd) +static int mt9t031_idle(struct i2c_client *client) { - struct i2c_client *client = to_i2c_client(to_soc_camera_control(icd)); - struct soc_camera_link *icl = to_soc_camera_link(icd); int ret; - if (icl->power) { - ret = icl->power(&client->dev, 1); - if (ret < 0) { - dev_err(icd->vdev->parent, - "Platform failed to power-on the camera.\n"); - return ret; - } - } - /* Disable chip output, synchronous option update */ ret = reg_write(client, MT9T031_RESET, 1); if (ret >= 0) @@ -156,43 +151,46 @@ static int mt9t031_init(struct soc_camera_device *icd) if (ret >= 0) ret = reg_clear(client, MT9T031_OUTPUT_CONTROL, 2); - if (ret < 0 && icl->power) - icl->power(&client->dev, 0); - return ret >= 0 ? 0 : -EIO; } -static int mt9t031_release(struct soc_camera_device *icd) +static int mt9t031_disable(struct i2c_client *client) { - struct i2c_client *client = to_i2c_client(to_soc_camera_control(icd)); - struct soc_camera_link *icl = to_soc_camera_link(icd); - /* Disable the chip */ reg_clear(client, MT9T031_OUTPUT_CONTROL, 2); - if (icl->power) - icl->power(&client->dev, 0); - return 0; } -static int mt9t031_start_capture(struct soc_camera_device *icd) +static int mt9t031_init(struct soc_camera_device *icd) { struct i2c_client *client = to_i2c_client(to_soc_camera_control(icd)); - /* Switch to master "normal" mode */ - if (reg_set(client, MT9T031_OUTPUT_CONTROL, 2) < 0) - return -EIO; - return 0; + return mt9t031_idle(client); } -static int mt9t031_stop_capture(struct soc_camera_device *icd) +static int mt9t031_release(struct soc_camera_device *icd) { struct i2c_client *client = to_i2c_client(to_soc_camera_control(icd)); - /* Stop sensor readout */ - if (reg_clear(client, MT9T031_OUTPUT_CONTROL, 2) < 0) + return mt9t031_disable(client); +} + +static int mt9t031_s_stream(struct v4l2_subdev *sd, int enable) +{ + struct i2c_client *client = sd->priv; + int ret; + + if (enable) + /* Switch to master "normal" mode */ + ret = reg_set(client, MT9T031_OUTPUT_CONTROL, 2); + else + /* Stop sensor readout */ + ret = reg_clear(client, MT9T031_OUTPUT_CONTROL, 2); + + if (ret < 0) return -EIO; + return 0; } @@ -236,7 +234,7 @@ static int mt9t031_set_params(struct soc_camera_device *icd, struct v4l2_rect *rect, u16 xskip, u16 yskip) { struct i2c_client *client = to_i2c_client(to_soc_camera_control(icd)); - struct mt9t031 *mt9t031 = i2c_get_clientdata(client); + struct mt9t031 *mt9t031 = to_mt9t031(client); int ret; u16 xbin, ybin, width, height, left, top; const u16 hblank = MT9T031_HORIZONTAL_BLANK, @@ -334,17 +332,17 @@ static int mt9t031_set_crop(struct soc_camera_device *icd, struct v4l2_rect *rect) { struct i2c_client *client = to_i2c_client(to_soc_camera_control(icd)); - struct mt9t031 *mt9t031 = i2c_get_clientdata(client); + struct mt9t031 *mt9t031 = to_mt9t031(client); /* CROP - no change in scaling, or in limits */ return mt9t031_set_params(icd, rect, mt9t031->xskip, mt9t031->yskip); } -static int mt9t031_set_fmt(struct soc_camera_device *icd, - struct v4l2_format *f) +static int mt9t031_s_fmt(struct v4l2_subdev *sd, struct v4l2_format *f) { - struct i2c_client *client = to_i2c_client(to_soc_camera_control(icd)); - struct mt9t031 *mt9t031 = i2c_get_clientdata(client); + struct i2c_client *client = sd->priv; + struct mt9t031 *mt9t031 = to_mt9t031(client); + struct soc_camera_device *icd = client->dev.platform_data; int ret; u16 xskip, yskip; struct v4l2_rect rect = { @@ -379,8 +377,7 @@ static int mt9t031_set_fmt(struct soc_camera_device *icd, return ret; } -static int mt9t031_try_fmt(struct soc_camera_device *icd, - struct v4l2_format *f) +static int mt9t031_try_fmt(struct v4l2_subdev *sd, struct v4l2_format *f) { struct v4l2_pix_format *pix = &f->fmt.pix; @@ -391,11 +388,11 @@ static int mt9t031_try_fmt(struct soc_camera_device *icd, return 0; } -static int mt9t031_get_chip_id(struct soc_camera_device *icd, - struct v4l2_dbg_chip_ident *id) +static int mt9t031_g_chip_ident(struct v4l2_subdev *sd, + struct v4l2_dbg_chip_ident *id) { - struct i2c_client *client = to_i2c_client(to_soc_camera_control(icd)); - struct mt9t031 *mt9t031 = i2c_get_clientdata(client); + struct i2c_client *client = sd->priv; + struct mt9t031 *mt9t031 = to_mt9t031(client); if (id->match.type != V4L2_CHIP_MATCH_I2C_ADDR) return -EINVAL; @@ -410,10 +407,10 @@ static int mt9t031_get_chip_id(struct soc_camera_device *icd, } #ifdef CONFIG_VIDEO_ADV_DEBUG -static int mt9t031_get_register(struct soc_camera_device *icd, - struct v4l2_dbg_register *reg) +static int mt9t031_g_register(struct v4l2_subdev *sd, + struct v4l2_dbg_register *reg) { - struct i2c_client *client = to_i2c_client(to_soc_camera_control(icd)); + struct i2c_client *client = sd->priv; if (reg->match.type != V4L2_CHIP_MATCH_I2C_ADDR || reg->reg > 0xff) return -EINVAL; @@ -429,10 +426,10 @@ static int mt9t031_get_register(struct soc_camera_device *icd, return 0; } -static int mt9t031_set_register(struct soc_camera_device *icd, - struct v4l2_dbg_register *reg) +static int mt9t031_s_register(struct v4l2_subdev *sd, + struct v4l2_dbg_register *reg) { - struct i2c_client *client = to_i2c_client(to_soc_camera_control(icd)); + struct i2c_client *client = sd->priv; if (reg->match.type != V4L2_CHIP_MATCH_I2C_ADDR || reg->reg > 0xff) return -EINVAL; @@ -493,35 +490,20 @@ static const struct v4l2_queryctrl mt9t031_controls[] = { } }; -static int mt9t031_get_control(struct soc_camera_device *, struct v4l2_control *); -static int mt9t031_set_control(struct soc_camera_device *, struct v4l2_control *); - static struct soc_camera_ops mt9t031_ops = { - .owner = THIS_MODULE, .init = mt9t031_init, .release = mt9t031_release, - .start_capture = mt9t031_start_capture, - .stop_capture = mt9t031_stop_capture, .set_crop = mt9t031_set_crop, - .set_fmt = mt9t031_set_fmt, - .try_fmt = mt9t031_try_fmt, .set_bus_param = mt9t031_set_bus_param, .query_bus_param = mt9t031_query_bus_param, .controls = mt9t031_controls, .num_controls = ARRAY_SIZE(mt9t031_controls), - .get_control = mt9t031_get_control, - .set_control = mt9t031_set_control, - .get_chip_id = mt9t031_get_chip_id, -#ifdef CONFIG_VIDEO_ADV_DEBUG - .get_register = mt9t031_get_register, - .set_register = mt9t031_set_register, -#endif }; -static int mt9t031_get_control(struct soc_camera_device *icd, struct v4l2_control *ctrl) +static int mt9t031_g_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl) { - struct i2c_client *client = to_i2c_client(to_soc_camera_control(icd)); - struct mt9t031 *mt9t031 = i2c_get_clientdata(client); + struct i2c_client *client = sd->priv; + struct mt9t031 *mt9t031 = to_mt9t031(client); int data; switch (ctrl->id) { @@ -544,10 +526,11 @@ static int mt9t031_get_control(struct soc_camera_device *icd, struct v4l2_contro return 0; } -static int mt9t031_set_control(struct soc_camera_device *icd, struct v4l2_control *ctrl) +static int mt9t031_s_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl) { - struct i2c_client *client = to_i2c_client(to_soc_camera_control(icd)); - struct mt9t031 *mt9t031 = i2c_get_clientdata(client); + struct i2c_client *client = sd->priv; + struct mt9t031 *mt9t031 = to_mt9t031(client); + struct soc_camera_device *icd = client->dev.platform_data; const struct v4l2_queryctrl *qctrl; int data; @@ -653,12 +636,11 @@ static int mt9t031_set_control(struct soc_camera_device *icd, struct v4l2_contro /* Interface active, can use i2c. If it fails, it can indeed mean, that * this wasn't our capture interface, so, we wait for the right one */ -static int mt9t031_video_probe(struct soc_camera_device *icd, - struct i2c_client *client) +static int mt9t031_video_probe(struct i2c_client *client) { - struct mt9t031 *mt9t031 = i2c_get_clientdata(client); + struct soc_camera_device *icd = client->dev.platform_data; + struct mt9t031 *mt9t031 = to_mt9t031(client); s32 data; - int ret; /* We must have a parent by now. And it cannot be a wrong one. * So this entire test is completely redundant. */ @@ -666,11 +648,6 @@ static int mt9t031_video_probe(struct soc_camera_device *icd, to_soc_camera_host(icd->dev.parent)->nr != icd->iface) return -ENODEV; - /* Switch master clock on */ - ret = soc_camera_video_start(icd, &client->dev); - if (ret) - return ret; - /* Enable the chip */ data = reg_write(client, MT9T031_CHIP_ENABLE, 1); dev_dbg(&icd->dev, "write: %d\n", data); @@ -678,8 +655,6 @@ static int mt9t031_video_probe(struct soc_camera_device *icd, /* Read out the chip version register */ data = reg_read(client, MT9T031_CHIP_VERSION); - soc_camera_video_stop(icd); - switch (data) { case 0x1621: mt9t031->model = V4L2_IDENT_MT9T031; @@ -697,6 +672,27 @@ static int mt9t031_video_probe(struct soc_camera_device *icd, return 0; } +static struct v4l2_subdev_core_ops mt9t031_subdev_core_ops = { + .g_ctrl = mt9t031_g_ctrl, + .s_ctrl = mt9t031_s_ctrl, + .g_chip_ident = mt9t031_g_chip_ident, +#ifdef CONFIG_VIDEO_ADV_DEBUG + .g_register = mt9t031_g_register, + .s_register = mt9t031_s_register, +#endif +}; + +static struct v4l2_subdev_video_ops mt9t031_subdev_video_ops = { + .s_stream = mt9t031_s_stream, + .s_fmt = mt9t031_s_fmt, + .try_fmt = mt9t031_try_fmt, +}; + +static struct v4l2_subdev_ops mt9t031_subdev_ops = { + .core = &mt9t031_subdev_core_ops, + .video = &mt9t031_subdev_video_ops, +}; + static int mt9t031_probe(struct i2c_client *client, const struct i2c_device_id *did) { @@ -727,7 +723,7 @@ static int mt9t031_probe(struct i2c_client *client, if (!mt9t031) return -ENOMEM; - i2c_set_clientdata(client, mt9t031); + v4l2_i2c_subdev_init(&mt9t031->subdev, client, &mt9t031_subdev_ops); /* Second stage probe - when a capture adapter is there */ icd->ops = &mt9t031_ops; @@ -747,7 +743,12 @@ static int mt9t031_probe(struct i2c_client *client, mt9t031->xskip = 1; mt9t031->yskip = 1; - ret = mt9t031_video_probe(icd, client); + mt9t031_idle(client); + + ret = mt9t031_video_probe(client); + + mt9t031_disable(client); + if (ret) { icd->ops = NULL; i2c_set_clientdata(client, NULL); @@ -759,7 +760,7 @@ static int mt9t031_probe(struct i2c_client *client, static int mt9t031_remove(struct i2c_client *client) { - struct mt9t031 *mt9t031 = i2c_get_clientdata(client); + struct mt9t031 *mt9t031 = to_mt9t031(client); struct soc_camera_device *icd = client->dev.platform_data; icd->ops = NULL; diff --git a/drivers/media/video/mt9v022.c b/drivers/media/video/mt9v022.c index 959cc299f1a..3cb9f0f1e25 100644 --- a/drivers/media/video/mt9v022.c +++ b/drivers/media/video/mt9v022.c @@ -14,13 +14,13 @@ #include #include -#include +#include #include #include /* mt9v022 i2c address 0x48, 0x4c, 0x58, 0x5c - * The platform has to define i2c_board_info - * and call i2c_register_board_info() */ + * The platform has to define ctruct i2c_board_info objects and link to them + * from struct soc_camera_link */ static char *sensor_type; module_param(sensor_type, charp, S_IRUGO); @@ -85,10 +85,16 @@ static const struct soc_camera_data_format mt9v022_monochrome_formats[] = { }; struct mt9v022 { + struct v4l2_subdev subdev; int model; /* V4L2_IDENT_MT9V022* codes from v4l2-chip-ident.h */ u16 chip_control; }; +static struct mt9v022 *to_mt9v022(const struct i2c_client *client) +{ + return container_of(i2c_get_clientdata(client), struct mt9v022, subdev); +} + static int reg_read(struct i2c_client *client, const u8 reg) { s32 data = i2c_smbus_read_word_data(client, reg); @@ -126,26 +132,9 @@ static int reg_clear(struct i2c_client *client, const u8 reg, static int mt9v022_init(struct soc_camera_device *icd) { struct i2c_client *client = to_i2c_client(to_soc_camera_control(icd)); - struct soc_camera_link *icl = to_soc_camera_link(icd); - struct mt9v022 *mt9v022 = i2c_get_clientdata(client); + struct mt9v022 *mt9v022 = to_mt9v022(client); int ret; - if (icl->power) { - ret = icl->power(&client->dev, 1); - if (ret < 0) { - dev_err(icd->vdev->parent, - "Platform failed to power-on the camera.\n"); - return ret; - } - } - - /* - * The camera could have been already on, we hard-reset it additionally, - * if available. Soft reset is done in video_probe(). - */ - if (icl->reset) - icl->reset(&client->dev); - /* Almost the default mode: master, parallel, simultaneous, and an * undocumented bit 0x200, which is present in table 7, but not in 8, * plus snapshot mode to disable scan for now */ @@ -169,37 +158,19 @@ static int mt9v022_init(struct soc_camera_device *icd) return ret; } -static int mt9v022_release(struct soc_camera_device *icd) +static int mt9v022_s_stream(struct v4l2_subdev *sd, int enable) { - struct i2c_client *client = to_i2c_client(to_soc_camera_control(icd)); - struct soc_camera_link *icl = to_soc_camera_link(icd); + struct i2c_client *client = sd->priv; + struct mt9v022 *mt9v022 = to_mt9v022(client); - if (icl->power) - icl->power(&client->dev, 0); - - return 0; -} - -static int mt9v022_start_capture(struct soc_camera_device *icd) -{ - struct i2c_client *client = to_i2c_client(to_soc_camera_control(icd)); - struct mt9v022 *mt9v022 = i2c_get_clientdata(client); - /* Switch to master "normal" mode */ - mt9v022->chip_control &= ~0x10; - if (reg_write(client, MT9V022_CHIP_CONTROL, - mt9v022->chip_control) < 0) - return -EIO; - return 0; -} + if (enable) + /* Switch to master "normal" mode */ + mt9v022->chip_control &= ~0x10; + else + /* Switch to snapshot mode */ + mt9v022->chip_control |= 0x10; -static int mt9v022_stop_capture(struct soc_camera_device *icd) -{ - struct i2c_client *client = to_i2c_client(to_soc_camera_control(icd)); - struct mt9v022 *mt9v022 = i2c_get_clientdata(client); - /* Switch to snapshot mode */ - mt9v022->chip_control |= 0x10; - if (reg_write(client, MT9V022_CHIP_CONTROL, - mt9v022->chip_control) < 0) + if (reg_write(client, MT9V022_CHIP_CONTROL, mt9v022->chip_control) < 0) return -EIO; return 0; } @@ -208,7 +179,7 @@ static int mt9v022_set_bus_param(struct soc_camera_device *icd, unsigned long flags) { struct i2c_client *client = to_i2c_client(to_soc_camera_control(icd)); - struct mt9v022 *mt9v022 = i2c_get_clientdata(client); + struct mt9v022 *mt9v022 = to_mt9v022(client); struct soc_camera_link *icl = to_soc_camera_link(icd); unsigned int width_flag = flags & SOCAM_DATAWIDTH_MASK; int ret; @@ -320,11 +291,11 @@ static int mt9v022_set_crop(struct soc_camera_device *icd, return 0; } -static int mt9v022_set_fmt(struct soc_camera_device *icd, - struct v4l2_format *f) +static int mt9v022_s_fmt(struct v4l2_subdev *sd, struct v4l2_format *f) { - struct i2c_client *client = to_i2c_client(to_soc_camera_control(icd)); - struct mt9v022 *mt9v022 = i2c_get_clientdata(client); + struct i2c_client *client = sd->priv; + struct mt9v022 *mt9v022 = to_mt9v022(client); + struct soc_camera_device *icd = client->dev.platform_data; struct v4l2_pix_format *pix = &f->fmt.pix; struct v4l2_rect rect = { .left = icd->x_current, @@ -357,9 +328,10 @@ static int mt9v022_set_fmt(struct soc_camera_device *icd, return mt9v022_set_crop(icd, &rect); } -static int mt9v022_try_fmt(struct soc_camera_device *icd, - struct v4l2_format *f) +static int mt9v022_try_fmt(struct v4l2_subdev *sd, struct v4l2_format *f) { + struct i2c_client *client = sd->priv; + struct soc_camera_device *icd = client->dev.platform_data; struct v4l2_pix_format *pix = &f->fmt.pix; v4l_bound_align_image(&pix->width, 48, 752, 2 /* ? */, @@ -369,11 +341,11 @@ static int mt9v022_try_fmt(struct soc_camera_device *icd, return 0; } -static int mt9v022_get_chip_id(struct soc_camera_device *icd, - struct v4l2_dbg_chip_ident *id) +static int mt9v022_g_chip_ident(struct v4l2_subdev *sd, + struct v4l2_dbg_chip_ident *id) { - struct i2c_client *client = to_i2c_client(to_soc_camera_control(icd)); - struct mt9v022 *mt9v022 = i2c_get_clientdata(client); + struct i2c_client *client = sd->priv; + struct mt9v022 *mt9v022 = to_mt9v022(client); if (id->match.type != V4L2_CHIP_MATCH_I2C_ADDR) return -EINVAL; @@ -388,10 +360,10 @@ static int mt9v022_get_chip_id(struct soc_camera_device *icd, } #ifdef CONFIG_VIDEO_ADV_DEBUG -static int mt9v022_get_register(struct soc_camera_device *icd, - struct v4l2_dbg_register *reg) +static int mt9v022_g_register(struct v4l2_subdev *sd, + struct v4l2_dbg_register *reg) { - struct i2c_client *client = to_i2c_client(to_soc_camera_control(icd)); + struct i2c_client *client = sd->priv; if (reg->match.type != V4L2_CHIP_MATCH_I2C_ADDR || reg->reg > 0xff) return -EINVAL; @@ -408,10 +380,10 @@ static int mt9v022_get_register(struct soc_camera_device *icd, return 0; } -static int mt9v022_set_register(struct soc_camera_device *icd, - struct v4l2_dbg_register *reg) +static int mt9v022_s_register(struct v4l2_subdev *sd, + struct v4l2_dbg_register *reg) { - struct i2c_client *client = to_i2c_client(to_soc_camera_control(icd)); + struct i2c_client *client = sd->priv; if (reg->match.type != V4L2_CHIP_MATCH_I2C_ADDR || reg->reg > 0xff) return -EINVAL; @@ -480,35 +452,18 @@ static const struct v4l2_queryctrl mt9v022_controls[] = { } }; -static int mt9v022_get_control(struct soc_camera_device *, struct v4l2_control *); -static int mt9v022_set_control(struct soc_camera_device *, struct v4l2_control *); - static struct soc_camera_ops mt9v022_ops = { - .owner = THIS_MODULE, .init = mt9v022_init, - .release = mt9v022_release, - .start_capture = mt9v022_start_capture, - .stop_capture = mt9v022_stop_capture, .set_crop = mt9v022_set_crop, - .set_fmt = mt9v022_set_fmt, - .try_fmt = mt9v022_try_fmt, .set_bus_param = mt9v022_set_bus_param, .query_bus_param = mt9v022_query_bus_param, .controls = mt9v022_controls, .num_controls = ARRAY_SIZE(mt9v022_controls), - .get_control = mt9v022_get_control, - .set_control = mt9v022_set_control, - .get_chip_id = mt9v022_get_chip_id, -#ifdef CONFIG_VIDEO_ADV_DEBUG - .get_register = mt9v022_get_register, - .set_register = mt9v022_set_register, -#endif }; -static int mt9v022_get_control(struct soc_camera_device *icd, - struct v4l2_control *ctrl) +static int mt9v022_g_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl) { - struct i2c_client *client = to_i2c_client(to_soc_camera_control(icd)); + struct i2c_client *client = sd->priv; int data; switch (ctrl->id) { @@ -540,15 +495,14 @@ static int mt9v022_get_control(struct soc_camera_device *icd, return 0; } -static int mt9v022_set_control(struct soc_camera_device *icd, - struct v4l2_control *ctrl) +static int mt9v022_s_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl) { int data; - struct i2c_client *client = to_i2c_client(to_soc_camera_control(icd)); + struct i2c_client *client = sd->priv; + struct soc_camera_device *icd = client->dev.platform_data; const struct v4l2_queryctrl *qctrl; qctrl = soc_camera_find_qctrl(&mt9v022_ops, ctrl->id); - if (!qctrl) return -EINVAL; @@ -644,7 +598,7 @@ static int mt9v022_set_control(struct soc_camera_device *icd, static int mt9v022_video_probe(struct soc_camera_device *icd, struct i2c_client *client) { - struct mt9v022 *mt9v022 = i2c_get_clientdata(client); + struct mt9v022 *mt9v022 = to_mt9v022(client); struct soc_camera_link *icl = to_soc_camera_link(icd); s32 data; int ret; @@ -654,11 +608,6 @@ static int mt9v022_video_probe(struct soc_camera_device *icd, to_soc_camera_host(icd->dev.parent)->nr != icd->iface) return -ENODEV; - /* Switch master clock on */ - ret = soc_camera_video_start(icd, &client->dev); - if (ret) - return ret; - /* Read out the chip version register */ data = reg_read(client, MT9V022_CHIP_VERSION); @@ -723,8 +672,6 @@ static int mt9v022_video_probe(struct soc_camera_device *icd, "monochrome" : "colour"); ei2c: - soc_camera_video_stop(icd); - return ret; } @@ -739,6 +686,27 @@ static void mt9v022_video_remove(struct soc_camera_device *icd) icl->free_bus(icl); } +static struct v4l2_subdev_core_ops mt9v022_subdev_core_ops = { + .g_ctrl = mt9v022_g_ctrl, + .s_ctrl = mt9v022_s_ctrl, + .g_chip_ident = mt9v022_g_chip_ident, +#ifdef CONFIG_VIDEO_ADV_DEBUG + .g_register = mt9v022_g_register, + .s_register = mt9v022_s_register, +#endif +}; + +static struct v4l2_subdev_video_ops mt9v022_subdev_video_ops = { + .s_stream = mt9v022_s_stream, + .s_fmt = mt9v022_s_fmt, + .try_fmt = mt9v022_try_fmt, +}; + +static struct v4l2_subdev_ops mt9v022_subdev_ops = { + .core = &mt9v022_subdev_core_ops, + .video = &mt9v022_subdev_video_ops, +}; + static int mt9v022_probe(struct i2c_client *client, const struct i2c_device_id *did) { @@ -769,8 +737,9 @@ static int mt9v022_probe(struct i2c_client *client, if (!mt9v022) return -ENOMEM; + v4l2_i2c_subdev_init(&mt9v022->subdev, client, &mt9v022_subdev_ops); + mt9v022->chip_control = MT9V022_CHIP_CONTROL_DEFAULT; - i2c_set_clientdata(client, mt9v022); icd->ops = &mt9v022_ops; icd->x_min = 1; @@ -795,7 +764,7 @@ static int mt9v022_probe(struct i2c_client *client, static int mt9v022_remove(struct i2c_client *client) { - struct mt9v022 *mt9v022 = i2c_get_clientdata(client); + struct mt9v022 *mt9v022 = to_mt9v022(client); struct soc_camera_device *icd = client->dev.platform_data; icd->ops = NULL; diff --git a/drivers/media/video/mx1_camera.c b/drivers/media/video/mx1_camera.c index 736c31d2319..ea4ceaec85f 100644 --- a/drivers/media/video/mx1_camera.c +++ b/drivers/media/video/mx1_camera.c @@ -219,7 +219,7 @@ static int mx1_camera_setup_dma(struct mx1_camera_dev *pcdev) int ret; if (unlikely(!pcdev->active)) { - dev_err(pcdev->soc_host.dev, "DMA End IRQ with no active buffer\n"); + dev_err(pcdev->icd->dev.parent, "DMA End IRQ with no active buffer\n"); return -EFAULT; } @@ -229,7 +229,7 @@ static int mx1_camera_setup_dma(struct mx1_camera_dev *pcdev) vbuf->size, pcdev->res->start + CSIRXR, DMA_MODE_READ); if (unlikely(ret)) - dev_err(pcdev->soc_host.dev, "Failed to setup DMA sg list\n"); + dev_err(pcdev->icd->dev.parent, "Failed to setup DMA sg list\n"); return ret; } @@ -334,14 +334,14 @@ static void mx1_camera_dma_irq(int channel, void *data) imx_dma_disable(channel); if (unlikely(!pcdev->active)) { - dev_err(pcdev->soc_host.dev, "DMA End IRQ with no active buffer\n"); + dev_err(pcdev->icd->dev.parent, "DMA End IRQ with no active buffer\n"); goto out; } vb = &pcdev->active->vb; buf = container_of(vb, struct mx1_buffer, vb); WARN_ON(buf->inwork || list_empty(&vb->queue)); - dev_dbg(pcdev->soc_host.dev, "%s (vb=0x%p) 0x%08lx %d\n", __func__, + dev_dbg(pcdev->icd->dev.parent, "%s (vb=0x%p) 0x%08lx %d\n", __func__, vb, vb->baddr, vb->bsize); mx1_camera_wakeup(pcdev, vb, buf); @@ -362,7 +362,7 @@ static void mx1_camera_init_videobuf(struct videobuf_queue *q, struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent); struct mx1_camera_dev *pcdev = ici->priv; - videobuf_queue_dma_contig_init(q, &mx1_videobuf_ops, ici->dev, + videobuf_queue_dma_contig_init(q, &mx1_videobuf_ops, icd->dev.parent, &pcdev->lock, V4L2_BUF_TYPE_VIDEO_CAPTURE, V4L2_FIELD_NONE, @@ -381,7 +381,7 @@ static int mclk_get_divisor(struct mx1_camera_dev *pcdev) * they get a nice Oops */ div = (lcdclk + 2 * mclk - 1) / (2 * mclk) - 1; - dev_dbg(pcdev->soc_host.dev, "System clock %lukHz, target freq %dkHz, " + dev_dbg(pcdev->icd->dev.parent, "System clock %lukHz, target freq %dkHz, " "divisor %lu\n", lcdclk / 1000, mclk / 1000, div); return div; @@ -391,7 +391,7 @@ static void mx1_camera_activate(struct mx1_camera_dev *pcdev) { unsigned int csicr1 = CSICR1_EN; - dev_dbg(pcdev->soc_host.dev, "Activate device\n"); + dev_dbg(pcdev->icd->dev.parent, "Activate device\n"); clk_enable(pcdev->clk); @@ -407,7 +407,7 @@ static void mx1_camera_activate(struct mx1_camera_dev *pcdev) static void mx1_camera_deactivate(struct mx1_camera_dev *pcdev) { - dev_dbg(pcdev->soc_host.dev, "Deactivate device\n"); + dev_dbg(pcdev->icd->dev.parent, "Deactivate device\n"); /* Disable all CSI interface */ __raw_writel(0x00, pcdev->base + CSICR1); @@ -432,10 +432,8 @@ static int mx1_camera_add_device(struct soc_camera_device *icd) icd->devnum); mx1_camera_activate(pcdev); - ret = icd->ops->init(icd); - if (!ret) - pcdev->icd = icd; + pcdev->icd = icd; ebusy: return ret; @@ -459,8 +457,6 @@ static void mx1_camera_remove_device(struct soc_camera_device *icd) dev_info(&icd->dev, "MX1 Camera driver detached from camera %d\n", icd->devnum); - icd->ops->release(icd); - mx1_camera_deactivate(pcdev); pcdev->icd = NULL; @@ -546,11 +542,11 @@ static int mx1_camera_set_fmt(struct soc_camera_device *icd, xlate = soc_camera_xlate_by_fourcc(icd, pix->pixelformat); if (!xlate) { - dev_warn(ici->dev, "Format %x not found\n", pix->pixelformat); + dev_warn(icd->dev.parent, "Format %x not found\n", pix->pixelformat); return -EINVAL; } - ret = icd->ops->set_fmt(icd, f); + ret = v4l2_device_call_until_err(&ici->v4l2_dev, 0, video, s_fmt, f); if (!ret) { icd->buswidth = xlate->buswidth; icd->current_fmt = xlate->host_fmt; @@ -562,10 +558,11 @@ static int mx1_camera_set_fmt(struct soc_camera_device *icd, static int mx1_camera_try_fmt(struct soc_camera_device *icd, struct v4l2_format *f) { + struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent); /* TODO: limit to mx1 hardware capabilities */ /* limit to sensor capabilities */ - return icd->ops->try_fmt(icd, f); + return v4l2_device_call_until_err(&ici->v4l2_dev, 0, video, try_fmt, f); } static int mx1_camera_reqbufs(struct soc_camera_file *icf, @@ -737,7 +734,7 @@ static int __init mx1_camera_probe(struct platform_device *pdev) pcdev->soc_host.drv_name = DRIVER_NAME; pcdev->soc_host.ops = &mx1_soc_camera_host_ops; pcdev->soc_host.priv = pcdev; - pcdev->soc_host.dev = &pdev->dev; + pcdev->soc_host.v4l2_dev.dev = &pdev->dev; pcdev->soc_host.nr = pdev->id; err = soc_camera_host_register(&pcdev->soc_host); if (err) diff --git a/drivers/media/video/mx3_camera.c b/drivers/media/video/mx3_camera.c index 2edf77a6256..677d355be8f 100644 --- a/drivers/media/video/mx3_camera.c +++ b/drivers/media/video/mx3_camera.c @@ -431,7 +431,7 @@ static void mx3_camera_init_videobuf(struct videobuf_queue *q, struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent); struct mx3_camera_dev *mx3_cam = ici->priv; - videobuf_queue_dma_contig_init(q, &mx3_videobuf_ops, ici->dev, + videobuf_queue_dma_contig_init(q, &mx3_videobuf_ops, icd->dev.parent, &mx3_cam->lock, V4L2_BUF_TYPE_VIDEO_CAPTURE, V4L2_FIELD_NONE, @@ -494,17 +494,11 @@ static int mx3_camera_add_device(struct soc_camera_device *icd) { struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent); struct mx3_camera_dev *mx3_cam = ici->priv; - int ret; - if (mx3_cam->icd) { - ret = -EBUSY; - goto ebusy; - } + if (mx3_cam->icd) + return -EBUSY; mx3_camera_activate(mx3_cam, icd); - ret = icd->ops->init(icd); - if (ret < 0) - goto einit; mx3_cam->icd = icd; @@ -512,12 +506,6 @@ static int mx3_camera_add_device(struct soc_camera_device *icd) icd->devnum); return 0; - -einit: - clk_disable(mx3_cam->clk); -ebusy: - - return ret; } /* Called with .video_lock held */ @@ -534,8 +522,6 @@ static void mx3_camera_remove_device(struct soc_camera_device *icd) *ichan = NULL; } - icd->ops->release(icd); - clk_disable(mx3_cam->clk); mx3_cam->icd = NULL; @@ -600,7 +586,7 @@ static int test_platform_param(struct mx3_camera_dev *mx3_cam, *flags |= SOCAM_DATAWIDTH_4; break; default: - dev_info(mx3_cam->soc_host.dev, "Unsupported bus width %d\n", + dev_info(mx3_cam->soc_host.v4l2_dev.dev, "Unsupported bus width %d\n", buswidth); return -EINVAL; } @@ -616,7 +602,7 @@ static int mx3_camera_try_bus_param(struct soc_camera_device *icd, unsigned long bus_flags, camera_flags; int ret = test_platform_param(mx3_cam, depth, &bus_flags); - dev_dbg(ici->dev, "requested bus width %d bit: %d\n", depth, ret); + dev_dbg(icd->dev.parent, "requested bus width %d bit: %d\n", depth, ret); if (ret < 0) return ret; @@ -639,7 +625,7 @@ static bool chan_filter(struct dma_chan *chan, void *arg) if (!rq) return false; - pdata = rq->mx3_cam->soc_host.dev->platform_data; + pdata = rq->mx3_cam->soc_host.v4l2_dev.dev->platform_data; return rq->id == chan->chan_id && pdata->dma_dev == chan->device->dev; @@ -699,7 +685,7 @@ static int mx3_camera_get_formats(struct soc_camera_device *icd, int idx, xlate->cam_fmt = icd->formats + idx; xlate->buswidth = buswidth; xlate++; - dev_dbg(ici->dev, "Providing format %s using %s\n", + dev_dbg(icd->dev.parent, "Providing format %s using %s\n", mx3_camera_formats[0].name, icd->formats[idx].name); } @@ -711,7 +697,7 @@ static int mx3_camera_get_formats(struct soc_camera_device *icd, int idx, xlate->cam_fmt = icd->formats + idx; xlate->buswidth = buswidth; xlate++; - dev_dbg(ici->dev, "Providing format %s using %s\n", + dev_dbg(icd->dev.parent, "Providing format %s using %s\n", mx3_camera_formats[0].name, icd->formats[idx].name); } @@ -724,7 +710,7 @@ passthrough: xlate->cam_fmt = icd->formats + idx; xlate->buswidth = buswidth; xlate++; - dev_dbg(ici->dev, + dev_dbg(icd->dev.parent, "Providing format %s in pass-through mode\n", icd->formats[idx].name); } @@ -831,7 +817,7 @@ static int mx3_camera_set_fmt(struct soc_camera_device *icd, xlate = soc_camera_xlate_by_fourcc(icd, pix->pixelformat); if (!xlate) { - dev_warn(ici->dev, "Format %x not found\n", pix->pixelformat); + dev_warn(icd->dev.parent, "Format %x not found\n", pix->pixelformat); return -EINVAL; } @@ -847,7 +833,7 @@ static int mx3_camera_set_fmt(struct soc_camera_device *icd, configure_geometry(mx3_cam, &rect); - ret = icd->ops->set_fmt(icd, f); + ret = v4l2_device_call_until_err(&ici->v4l2_dev, 0, video, s_fmt, f); if (!ret) { icd->buswidth = xlate->buswidth; icd->current_fmt = xlate->host_fmt; @@ -868,7 +854,7 @@ static int mx3_camera_try_fmt(struct soc_camera_device *icd, xlate = soc_camera_xlate_by_fourcc(icd, pixfmt); if (pixfmt && !xlate) { - dev_warn(ici->dev, "Format %x not found\n", pixfmt); + dev_warn(icd->dev.parent, "Format %x not found\n", pixfmt); return -EINVAL; } @@ -885,7 +871,7 @@ static int mx3_camera_try_fmt(struct soc_camera_device *icd, /* camera has to see its format, but the user the original one */ pix->pixelformat = xlate->cam_fmt->fourcc; /* limit to sensor capabilities */ - ret = icd->ops->try_fmt(icd, f); + ret = v4l2_device_call_until_err(&ici->v4l2_dev, 0, video, try_fmt, f); pix->pixelformat = xlate->host_fmt->fourcc; field = pix->field; @@ -935,11 +921,11 @@ static int mx3_camera_set_bus_param(struct soc_camera_device *icd, __u32 pixfmt) xlate = soc_camera_xlate_by_fourcc(icd, pixfmt); if (!xlate) { - dev_warn(ici->dev, "Format %x not found\n", pixfmt); + dev_warn(icd->dev.parent, "Format %x not found\n", pixfmt); return -EINVAL; } - dev_dbg(ici->dev, "requested bus width %d bit: %d\n", + dev_dbg(icd->dev.parent, "requested bus width %d bit: %d\n", icd->buswidth, ret); if (ret < 0) @@ -948,10 +934,10 @@ static int mx3_camera_set_bus_param(struct soc_camera_device *icd, __u32 pixfmt) camera_flags = icd->ops->query_bus_param(icd); common_flags = soc_camera_bus_param_compatible(camera_flags, bus_flags); - dev_dbg(ici->dev, "Flags cam: 0x%lx host: 0x%lx common: 0x%lx\n", + dev_dbg(icd->dev.parent, "Flags cam: 0x%lx host: 0x%lx common: 0x%lx\n", camera_flags, bus_flags, common_flags); if (!common_flags) { - dev_dbg(ici->dev, "no common flags"); + dev_dbg(icd->dev.parent, "no common flags"); return -EINVAL; } @@ -1005,7 +991,7 @@ static int mx3_camera_set_bus_param(struct soc_camera_device *icd, __u32 pixfmt) ret = icd->ops->set_bus_param(icd, common_flags); if (ret < 0) { - dev_dbg(ici->dev, "camera set_bus_param(%lx) returned %d\n", + dev_dbg(icd->dev.parent, "camera set_bus_param(%lx) returned %d\n", common_flags, ret); return ret; } @@ -1060,7 +1046,7 @@ static int mx3_camera_set_bus_param(struct soc_camera_device *icd, __u32 pixfmt) csi_reg_write(mx3_cam, sens_conf | dw, CSI_SENS_CONF); - dev_dbg(ici->dev, "Set SENS_CONF to %x\n", sens_conf | dw); + dev_dbg(icd->dev.parent, "Set SENS_CONF to %x\n", sens_conf | dw); return 0; } @@ -1145,7 +1131,7 @@ static int __devinit mx3_camera_probe(struct platform_device *pdev) soc_host->drv_name = MX3_CAM_DRV_NAME; soc_host->ops = &mx3_soc_camera_host_ops; soc_host->priv = mx3_cam; - soc_host->dev = &pdev->dev; + soc_host->v4l2_dev.dev = &pdev->dev; soc_host->nr = pdev->id; err = soc_camera_host_register(soc_host); diff --git a/drivers/media/video/ov772x.c b/drivers/media/video/ov772x.c index 3ea650d55b1..119159773a6 100644 --- a/drivers/media/video/ov772x.c +++ b/drivers/media/video/ov772x.c @@ -22,7 +22,7 @@ #include #include #include -#include +#include #include #include @@ -398,6 +398,7 @@ struct ov772x_win_size { }; struct ov772x_priv { + struct v4l2_subdev subdev; struct ov772x_camera_info *info; const struct ov772x_color_format *fmt; const struct ov772x_win_size *win; @@ -575,6 +576,11 @@ static const struct v4l2_queryctrl ov772x_controls[] = { * general function */ +static struct ov772x_priv *to_ov772x(const struct i2c_client *client) +{ + return container_of(i2c_get_clientdata(client), struct ov772x_priv, subdev); +} + static int ov772x_write_array(struct i2c_client *client, const struct regval_list *vals) { @@ -615,61 +621,29 @@ static int ov772x_reset(struct i2c_client *client) * soc_camera_ops function */ -static int ov772x_init(struct soc_camera_device *icd) +static int ov772x_s_stream(struct v4l2_subdev *sd, int enable) { - struct i2c_client *client = to_i2c_client(to_soc_camera_control(icd)); - struct soc_camera_link *icl = to_soc_camera_link(icd); - int ret = 0; + struct i2c_client *client = sd->priv; + struct ov772x_priv *priv = to_ov772x(client); - if (icl->power) { - ret = icl->power(&client->dev, 1); - if (ret < 0) - return ret; + if (!enable) { + ov772x_mask_set(client, COM2, SOFT_SLEEP_MODE, SOFT_SLEEP_MODE); + return 0; } - if (icl->reset) - ret = icl->reset(&client->dev); - - return ret; -} - -static int ov772x_release(struct soc_camera_device *icd) -{ - struct i2c_client *client = to_i2c_client(to_soc_camera_control(icd)); - struct soc_camera_link *icl = to_soc_camera_link(icd); - int ret = 0; - - if (icl->power) - ret = icl->power(&client->dev, 0); - - return ret; -} - -static int ov772x_start_capture(struct soc_camera_device *icd) -{ - struct i2c_client *client = to_i2c_client(to_soc_camera_control(icd)); - struct ov772x_priv *priv = i2c_get_clientdata(client); - if (!priv->win || !priv->fmt) { - dev_err(&icd->dev, "norm or win select error\n"); + dev_err(&client->dev, "norm or win select error\n"); return -EPERM; } ov772x_mask_set(client, COM2, SOFT_SLEEP_MODE, 0); - dev_dbg(&icd->dev, + dev_dbg(&client->dev, "format %s, win %s\n", priv->fmt->name, priv->win->name); return 0; } -static int ov772x_stop_capture(struct soc_camera_device *icd) -{ - struct i2c_client *client = to_i2c_client(to_soc_camera_control(icd)); - ov772x_mask_set(client, COM2, SOFT_SLEEP_MODE, SOFT_SLEEP_MODE); - return 0; -} - static int ov772x_set_bus_param(struct soc_camera_device *icd, unsigned long flags) { @@ -688,11 +662,10 @@ static unsigned long ov772x_query_bus_param(struct soc_camera_device *icd) return soc_camera_apply_sensor_flags(icl, flags); } -static int ov772x_get_control(struct soc_camera_device *icd, - struct v4l2_control *ctrl) +static int ov772x_g_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl) { - struct i2c_client *client = to_i2c_client(to_soc_camera_control(icd)); - struct ov772x_priv *priv = i2c_get_clientdata(client); + struct i2c_client *client = sd->priv; + struct ov772x_priv *priv = to_ov772x(client); switch (ctrl->id) { case V4L2_CID_VFLIP: @@ -705,11 +678,10 @@ static int ov772x_get_control(struct soc_camera_device *icd, return 0; } -static int ov772x_set_control(struct soc_camera_device *icd, - struct v4l2_control *ctrl) +static int ov772x_s_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl) { - struct i2c_client *client = to_i2c_client(to_soc_camera_control(icd)); - struct ov772x_priv *priv = i2c_get_clientdata(client); + struct i2c_client *client = sd->priv; + struct ov772x_priv *priv = to_ov772x(client); int ret = 0; u8 val; @@ -733,11 +705,11 @@ static int ov772x_set_control(struct soc_camera_device *icd, return ret; } -static int ov772x_get_chip_id(struct soc_camera_device *icd, - struct v4l2_dbg_chip_ident *id) +static int ov772x_g_chip_ident(struct v4l2_subdev *sd, + struct v4l2_dbg_chip_ident *id) { - struct i2c_client *client = to_i2c_client(to_soc_camera_control(icd)); - struct ov772x_priv *priv = i2c_get_clientdata(client); + struct i2c_client *client = sd->priv; + struct ov772x_priv *priv = to_ov772x(client); id->ident = priv->model; id->revision = 0; @@ -746,10 +718,10 @@ static int ov772x_get_chip_id(struct soc_camera_device *icd, } #ifdef CONFIG_VIDEO_ADV_DEBUG -static int ov772x_get_register(struct soc_camera_device *icd, - struct v4l2_dbg_register *reg) +static int ov772x_g_register(struct v4l2_subdev *sd, + struct v4l2_dbg_register *reg) { - struct i2c_client *client = to_i2c_client(to_soc_camera_control(icd)); + struct i2c_client *client = sd->priv; int ret; reg->size = 1; @@ -765,10 +737,10 @@ static int ov772x_get_register(struct soc_camera_device *icd, return 0; } -static int ov772x_set_register(struct soc_camera_device *icd, - struct v4l2_dbg_register *reg) +static int ov772x_s_register(struct v4l2_subdev *sd, + struct v4l2_dbg_register *reg) { - struct i2c_client *client = to_i2c_client(to_soc_camera_control(icd)); + struct i2c_client *client = sd->priv; if (reg->reg > 0xff || reg->val > 0xff) @@ -778,8 +750,7 @@ static int ov772x_set_register(struct soc_camera_device *icd, } #endif -static const struct ov772x_win_size* -ov772x_select_win(u32 width, u32 height) +static const struct ov772x_win_size *ov772x_select_win(u32 width, u32 height) { __u32 diff; const struct ov772x_win_size *win; @@ -798,11 +769,10 @@ ov772x_select_win(u32 width, u32 height) return win; } -static int ov772x_set_params(struct soc_camera_device *icd, +static int ov772x_set_params(struct i2c_client *client, u32 width, u32 height, u32 pixfmt) { - struct i2c_client *client = to_i2c_client(to_soc_camera_control(icd)); - struct ov772x_priv *priv = i2c_get_clientdata(client); + struct ov772x_priv *priv = to_ov772x(client); int ret = -EINVAL; u8 val; int i; @@ -817,7 +787,6 @@ static int ov772x_set_params(struct soc_camera_device *icd, break; } } - dev_dbg(&icd->dev, "Using fmt %x #%d\n", pixfmt, i); if (!priv->fmt) goto ov772x_set_fmt_error; @@ -939,26 +908,26 @@ static int ov772x_set_crop(struct soc_camera_device *icd, struct v4l2_rect *rect) { struct i2c_client *client = to_i2c_client(to_soc_camera_control(icd)); - struct ov772x_priv *priv = i2c_get_clientdata(client); + struct ov772x_priv *priv = to_ov772x(client); if (!priv->fmt) return -EINVAL; - return ov772x_set_params(icd, rect->width, rect->height, + return ov772x_set_params(client, rect->width, rect->height, priv->fmt->fourcc); } -static int ov772x_set_fmt(struct soc_camera_device *icd, - struct v4l2_format *f) +static int ov772x_s_fmt(struct v4l2_subdev *sd, struct v4l2_format *f) { + struct i2c_client *client = sd->priv; struct v4l2_pix_format *pix = &f->fmt.pix; - return ov772x_set_params(icd, pix->width, pix->height, + return ov772x_set_params(client, pix->width, pix->height, pix->pixelformat); } -static int ov772x_try_fmt(struct soc_camera_device *icd, - struct v4l2_format *f) +static int ov772x_try_fmt(struct v4l2_subdev *sd, + struct v4l2_format *f) { struct v4l2_pix_format *pix = &f->fmt.pix; const struct ov772x_win_size *win; @@ -978,10 +947,9 @@ static int ov772x_try_fmt(struct soc_camera_device *icd, static int ov772x_video_probe(struct soc_camera_device *icd, struct i2c_client *client) { - struct ov772x_priv *priv = i2c_get_clientdata(client); + struct ov772x_priv *priv = to_ov772x(client); u8 pid, ver; const char *devname; - int ret; /* * We must have a parent by now. And it cannot be a wrong one. @@ -1003,11 +971,6 @@ static int ov772x_video_probe(struct soc_camera_device *icd, icd->formats = ov772x_fmt_lists; icd->num_formats = ARRAY_SIZE(ov772x_fmt_lists); - /* Switch master clock on */ - ret = soc_camera_video_start(icd, &client->dev); - if (ret) - return ret; - /* * check and show product ID and manufacturer ID */ @@ -1026,8 +989,7 @@ static int ov772x_video_probe(struct soc_camera_device *icd, default: dev_err(&icd->dev, "Product ID error %x:%x\n", pid, ver); - ret = -ENODEV; - goto ever; + return -ENODEV; } dev_info(&icd->dev, @@ -1038,34 +1000,38 @@ static int ov772x_video_probe(struct soc_camera_device *icd, i2c_smbus_read_byte_data(client, MIDH), i2c_smbus_read_byte_data(client, MIDL)); - soc_camera_video_stop(icd); - -ever: - return ret; + return 0; } static struct soc_camera_ops ov772x_ops = { - .owner = THIS_MODULE, - .init = ov772x_init, - .release = ov772x_release, - .start_capture = ov772x_start_capture, - .stop_capture = ov772x_stop_capture, .set_crop = ov772x_set_crop, - .set_fmt = ov772x_set_fmt, - .try_fmt = ov772x_try_fmt, .set_bus_param = ov772x_set_bus_param, .query_bus_param = ov772x_query_bus_param, .controls = ov772x_controls, .num_controls = ARRAY_SIZE(ov772x_controls), - .get_control = ov772x_get_control, - .set_control = ov772x_set_control, - .get_chip_id = ov772x_get_chip_id, +}; + +static struct v4l2_subdev_core_ops ov772x_subdev_core_ops = { + .g_ctrl = ov772x_g_ctrl, + .s_ctrl = ov772x_s_ctrl, + .g_chip_ident = ov772x_g_chip_ident, #ifdef CONFIG_VIDEO_ADV_DEBUG - .get_register = ov772x_get_register, - .set_register = ov772x_set_register, + .g_register = ov772x_g_register, + .s_register = ov772x_s_register, #endif }; +static struct v4l2_subdev_video_ops ov772x_subdev_video_ops = { + .s_stream = ov772x_s_stream, + .s_fmt = ov772x_s_fmt, + .try_fmt = ov772x_try_fmt, +}; + +static struct v4l2_subdev_ops ov772x_subdev_ops = { + .core = &ov772x_subdev_core_ops, + .video = &ov772x_subdev_video_ops, +}; + /* * i2c_driver function */ @@ -1081,7 +1047,7 @@ static int ov772x_probe(struct i2c_client *client, int ret; if (!icd) { - dev_err(&client->dev, "MT9M001: missing soc-camera data!\n"); + dev_err(&client->dev, "OV772X: missing soc-camera data!\n"); return -EINVAL; } @@ -1102,8 +1068,9 @@ static int ov772x_probe(struct i2c_client *client, if (!priv) return -ENOMEM; - priv->info = info; - i2c_set_clientdata(client, priv); + priv->info = info; + + v4l2_i2c_subdev_init(&priv->subdev, client, &ov772x_subdev_ops); icd->ops = &ov772x_ops; icd->width_max = MAX_WIDTH; @@ -1121,7 +1088,7 @@ static int ov772x_probe(struct i2c_client *client, static int ov772x_remove(struct i2c_client *client) { - struct ov772x_priv *priv = i2c_get_clientdata(client); + struct ov772x_priv *priv = to_ov772x(client); struct soc_camera_device *icd = client->dev.platform_data; icd->ops = NULL; diff --git a/drivers/media/video/pxa_camera.c b/drivers/media/video/pxa_camera.c index 8b9b44d8683..bdc0d85c461 100644 --- a/drivers/media/video/pxa_camera.c +++ b/drivers/media/video/pxa_camera.c @@ -270,7 +270,7 @@ static void free_buffer(struct videobuf_queue *vq, struct pxa_buffer *buf) for (i = 0; i < ARRAY_SIZE(buf->dmas); i++) { if (buf->dmas[i].sg_cpu) - dma_free_coherent(ici->dev, buf->dmas[i].sg_size, + dma_free_coherent(ici->v4l2_dev.dev, buf->dmas[i].sg_size, buf->dmas[i].sg_cpu, buf->dmas[i].sg_dma); buf->dmas[i].sg_cpu = NULL; @@ -325,19 +325,20 @@ static int pxa_init_dma_channel(struct pxa_camera_dev *pcdev, struct scatterlist **sg_first, int *sg_first_ofs) { struct pxa_cam_dma *pxa_dma = &buf->dmas[channel]; + struct device *dev = pcdev->soc_host.v4l2_dev.dev; struct scatterlist *sg; int i, offset, sglen; int dma_len = 0, xfer_len = 0; if (pxa_dma->sg_cpu) - dma_free_coherent(pcdev->soc_host.dev, pxa_dma->sg_size, + dma_free_coherent(dev, pxa_dma->sg_size, pxa_dma->sg_cpu, pxa_dma->sg_dma); sglen = calculate_dma_sglen(*sg_first, dma->sglen, *sg_first_ofs, size); pxa_dma->sg_size = (sglen + 1) * sizeof(struct pxa_dma_desc); - pxa_dma->sg_cpu = dma_alloc_coherent(pcdev->soc_host.dev, pxa_dma->sg_size, + pxa_dma->sg_cpu = dma_alloc_coherent(dev, pxa_dma->sg_size, &pxa_dma->sg_dma, GFP_KERNEL); if (!pxa_dma->sg_cpu) return -ENOMEM; @@ -345,7 +346,7 @@ static int pxa_init_dma_channel(struct pxa_camera_dev *pcdev, pxa_dma->sglen = sglen; offset = *sg_first_ofs; - dev_dbg(pcdev->soc_host.dev, "DMA: sg_first=%p, sglen=%d, ofs=%d, dma.desc=%x\n", + dev_dbg(dev, "DMA: sg_first=%p, sglen=%d, ofs=%d, dma.desc=%x\n", *sg_first, sglen, *sg_first_ofs, pxa_dma->sg_dma); @@ -368,7 +369,7 @@ static int pxa_init_dma_channel(struct pxa_camera_dev *pcdev, pxa_dma->sg_cpu[i].ddadr = pxa_dma->sg_dma + (i + 1) * sizeof(struct pxa_dma_desc); - dev_vdbg(pcdev->soc_host.dev, "DMA: desc.%08x->@phys=0x%08x, len=%d\n", + dev_vdbg(dev, "DMA: desc.%08x->@phys=0x%08x, len=%d\n", pxa_dma->sg_dma + i * sizeof(struct pxa_dma_desc), sg_dma_address(sg) + offset, xfer_len); offset = 0; @@ -418,11 +419,12 @@ static int pxa_videobuf_prepare(struct videobuf_queue *vq, struct soc_camera_device *icd = vq->priv_data; struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent); struct pxa_camera_dev *pcdev = ici->priv; + struct device *dev = pcdev->soc_host.v4l2_dev.dev; struct pxa_buffer *buf = container_of(vb, struct pxa_buffer, vb); int ret; int size_y, size_u = 0, size_v = 0; - dev_dbg(&icd->dev, "%s (vb=0x%p) 0x%08lx %d\n", __func__, + dev_dbg(dev, "%s (vb=0x%p) 0x%08lx %d\n", __func__, vb, vb->baddr, vb->bsize); /* Added list head initialization on alloc */ @@ -480,8 +482,7 @@ static int pxa_videobuf_prepare(struct videobuf_queue *vq, ret = pxa_init_dma_channel(pcdev, buf, dma, 0, CIBR0, size_y, &sg, &next_ofs); if (ret) { - dev_err(pcdev->soc_host.dev, - "DMA initialization for Y/RGB failed\n"); + dev_err(dev, "DMA initialization for Y/RGB failed\n"); goto fail; } @@ -490,8 +491,7 @@ static int pxa_videobuf_prepare(struct videobuf_queue *vq, ret = pxa_init_dma_channel(pcdev, buf, dma, 1, CIBR1, size_u, &sg, &next_ofs); if (ret) { - dev_err(pcdev->soc_host.dev, - "DMA initialization for U failed\n"); + dev_err(dev, "DMA initialization for U failed\n"); goto fail_u; } @@ -500,8 +500,7 @@ static int pxa_videobuf_prepare(struct videobuf_queue *vq, ret = pxa_init_dma_channel(pcdev, buf, dma, 2, CIBR2, size_v, &sg, &next_ofs); if (ret) { - dev_err(pcdev->soc_host.dev, - "DMA initialization for V failed\n"); + dev_err(dev, "DMA initialization for V failed\n"); goto fail_v; } @@ -514,10 +513,10 @@ static int pxa_videobuf_prepare(struct videobuf_queue *vq, return 0; fail_v: - dma_free_coherent(pcdev->soc_host.dev, buf->dmas[1].sg_size, + dma_free_coherent(dev, buf->dmas[1].sg_size, buf->dmas[1].sg_cpu, buf->dmas[1].sg_dma); fail_u: - dma_free_coherent(pcdev->soc_host.dev, buf->dmas[0].sg_size, + dma_free_coherent(dev, buf->dmas[0].sg_size, buf->dmas[0].sg_cpu, buf->dmas[0].sg_dma); fail: free_buffer(vq, buf); @@ -541,7 +540,7 @@ static void pxa_dma_start_channels(struct pxa_camera_dev *pcdev) active = pcdev->active; for (i = 0; i < pcdev->channels; i++) { - dev_dbg(pcdev->soc_host.dev, "%s (channel=%d) ddadr=%08x\n", __func__, + dev_dbg(pcdev->soc_host.v4l2_dev.dev, "%s (channel=%d) ddadr=%08x\n", __func__, i, active->dmas[i].sg_dma); DDADR(pcdev->dma_chans[i]) = active->dmas[i].sg_dma; DCSR(pcdev->dma_chans[i]) = DCSR_RUN; @@ -553,7 +552,7 @@ static void pxa_dma_stop_channels(struct pxa_camera_dev *pcdev) int i; for (i = 0; i < pcdev->channels; i++) { - dev_dbg(pcdev->soc_host.dev, "%s (channel=%d)\n", __func__, i); + dev_dbg(pcdev->soc_host.v4l2_dev.dev, "%s (channel=%d)\n", __func__, i); DCSR(pcdev->dma_chans[i]) = 0; } } @@ -589,7 +588,7 @@ static void pxa_camera_start_capture(struct pxa_camera_dev *pcdev) { unsigned long cicr0, cifr; - dev_dbg(pcdev->soc_host.dev, "%s\n", __func__); + dev_dbg(pcdev->soc_host.v4l2_dev.dev, "%s\n", __func__); /* Reset the FIFOs */ cifr = __raw_readl(pcdev->base + CIFR) | CIFR_RESET_F; __raw_writel(cifr, pcdev->base + CIFR); @@ -609,7 +608,7 @@ static void pxa_camera_stop_capture(struct pxa_camera_dev *pcdev) __raw_writel(cicr0, pcdev->base + CICR0); pcdev->active = NULL; - dev_dbg(pcdev->soc_host.dev, "%s\n", __func__); + dev_dbg(pcdev->soc_host.v4l2_dev.dev, "%s\n", __func__); } /* Called under spinlock_irqsave(&pcdev->lock, ...) */ @@ -674,7 +673,8 @@ static void pxa_camera_wakeup(struct pxa_camera_dev *pcdev, do_gettimeofday(&vb->ts); vb->field_count++; wake_up(&vb->done); - dev_dbg(pcdev->soc_host.dev, "%s dequeud buffer (vb=0x%p)\n", __func__, vb); + dev_dbg(pcdev->soc_host.v4l2_dev.dev, "%s dequeud buffer (vb=0x%p)\n", + __func__, vb); if (list_empty(&pcdev->capture)) { pxa_camera_stop_capture(pcdev); @@ -710,7 +710,8 @@ static void pxa_camera_check_link_miss(struct pxa_camera_dev *pcdev) for (i = 0; i < pcdev->channels; i++) if (DDADR(pcdev->dma_chans[i]) != DDADR_STOP) is_dma_stopped = 0; - dev_dbg(pcdev->soc_host.dev, "%s : top queued buffer=%p, dma_stopped=%d\n", + dev_dbg(pcdev->soc_host.v4l2_dev.dev, + "%s : top queued buffer=%p, dma_stopped=%d\n", __func__, pcdev->active, is_dma_stopped); if (pcdev->active && is_dma_stopped) pxa_camera_start_capture(pcdev); @@ -719,6 +720,7 @@ static void pxa_camera_check_link_miss(struct pxa_camera_dev *pcdev) static void pxa_camera_dma_irq(int channel, struct pxa_camera_dev *pcdev, enum pxa_camera_active_dma act_dma) { + struct device *dev = pcdev->soc_host.v4l2_dev.dev; struct pxa_buffer *buf; unsigned long flags; u32 status, camera_status, overrun; @@ -735,13 +737,13 @@ static void pxa_camera_dma_irq(int channel, struct pxa_camera_dev *pcdev, overrun |= CISR_IFO_1 | CISR_IFO_2; if (status & DCSR_BUSERR) { - dev_err(pcdev->soc_host.dev, "DMA Bus Error IRQ!\n"); + dev_err(dev, "DMA Bus Error IRQ!\n"); goto out; } if (!(status & (DCSR_ENDINTR | DCSR_STARTINTR))) { - dev_err(pcdev->soc_host.dev, "Unknown DMA IRQ source, " - "status: 0x%08x\n", status); + dev_err(dev, "Unknown DMA IRQ source, status: 0x%08x\n", + status); goto out; } @@ -764,7 +766,7 @@ static void pxa_camera_dma_irq(int channel, struct pxa_camera_dev *pcdev, buf = container_of(vb, struct pxa_buffer, vb); WARN_ON(buf->inwork || list_empty(&vb->queue)); - dev_dbg(pcdev->soc_host.dev, "%s channel=%d %s%s(vb=0x%p) dma.desc=%x\n", + dev_dbg(dev, "%s channel=%d %s%s(vb=0x%p) dma.desc=%x\n", __func__, channel, status & DCSR_STARTINTR ? "SOF " : "", status & DCSR_ENDINTR ? "EOF " : "", vb, DDADR(channel)); @@ -775,7 +777,7 @@ static void pxa_camera_dma_irq(int channel, struct pxa_camera_dev *pcdev, */ if (camera_status & overrun && !list_is_last(pcdev->capture.next, &pcdev->capture)) { - dev_dbg(pcdev->soc_host.dev, "FIFO overrun! CISR: %x\n", + dev_dbg(dev, "FIFO overrun! CISR: %x\n", camera_status); pxa_camera_stop_capture(pcdev); pxa_camera_start_capture(pcdev); @@ -834,6 +836,7 @@ static u32 mclk_get_divisor(struct platform_device *pdev, struct pxa_camera_dev *pcdev) { unsigned long mclk = pcdev->mclk; + struct device *dev = pcdev->soc_host.v4l2_dev.dev; u32 div; unsigned long lcdclk; @@ -843,7 +846,7 @@ static u32 mclk_get_divisor(struct platform_device *pdev, /* mclk <= ciclk / 4 (27.4.2) */ if (mclk > lcdclk / 4) { mclk = lcdclk / 4; - dev_warn(&pdev->dev, "Limiting master clock to %lu\n", mclk); + dev_warn(dev, "Limiting master clock to %lu\n", mclk); } /* We verify mclk != 0, so if anyone breaks it, here comes their Oops */ @@ -853,7 +856,7 @@ static u32 mclk_get_divisor(struct platform_device *pdev, if (pcdev->platform_flags & PXA_CAMERA_MCLK_EN) pcdev->mclk = lcdclk / (2 * (div + 1)); - dev_dbg(&pdev->dev, "LCD clock %luHz, target freq %luHz, divisor %u\n", + dev_dbg(dev, "LCD clock %luHz, target freq %luHz, divisor %u\n", lcdclk, mclk, div); return div; @@ -871,14 +874,15 @@ static void recalculate_fifo_timeout(struct pxa_camera_dev *pcdev, static void pxa_camera_activate(struct pxa_camera_dev *pcdev) { struct pxacamera_platform_data *pdata = pcdev->pdata; + struct device *dev = pcdev->soc_host.v4l2_dev.dev; u32 cicr4 = 0; - dev_dbg(pcdev->soc_host.dev, "Registered platform device at %p data %p\n", + dev_dbg(dev, "Registered platform device at %p data %p\n", pcdev, pdata); if (pdata && pdata->init) { - dev_dbg(pcdev->soc_host.dev, "%s: Init gpios\n", __func__); - pdata->init(pcdev->soc_host.dev); + dev_dbg(dev, "%s: Init gpios\n", __func__); + pdata->init(dev); } /* disable all interrupts */ @@ -920,7 +924,7 @@ static irqreturn_t pxa_camera_irq(int irq, void *data) struct videobuf_buffer *vb; status = __raw_readl(pcdev->base + CISR); - dev_dbg(pcdev->soc_host.dev, "Camera interrupt status 0x%lx\n", status); + dev_dbg(pcdev->soc_host.v4l2_dev.dev, "Camera interrupt status 0x%lx\n", status); if (!status) return IRQ_NONE; @@ -952,17 +956,11 @@ static int pxa_camera_add_device(struct soc_camera_device *icd) { struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent); struct pxa_camera_dev *pcdev = ici->priv; - int ret; - if (pcdev->icd) { - ret = -EBUSY; - goto ebusy; - } + if (pcdev->icd) + return -EBUSY; pxa_camera_activate(pcdev); - ret = icd->ops->init(icd); - if (ret < 0) - goto einit; pcdev->icd = icd; @@ -970,11 +968,6 @@ static int pxa_camera_add_device(struct soc_camera_device *icd) icd->devnum); return 0; - -einit: - pxa_camera_deactivate(pcdev); -ebusy: - return ret; } /* Called with .video_lock held */ @@ -996,8 +989,6 @@ static void pxa_camera_remove_device(struct soc_camera_device *icd) DCSR(pcdev->dma_chans[1]) = 0; DCSR(pcdev->dma_chans[2]) = 0; - icd->ops->release(icd); - pxa_camera_deactivate(pcdev); pcdev->icd = NULL; @@ -1253,7 +1244,7 @@ static int pxa_camera_get_formats(struct soc_camera_device *icd, int idx, xlate->cam_fmt = icd->formats + idx; xlate->buswidth = buswidth; xlate++; - dev_dbg(ici->dev, "Providing format %s using %s\n", + dev_dbg(ici->v4l2_dev.dev, "Providing format %s using %s\n", pxa_camera_formats[0].name, icd->formats[idx].name); } @@ -1268,7 +1259,7 @@ static int pxa_camera_get_formats(struct soc_camera_device *icd, int idx, xlate->cam_fmt = icd->formats + idx; xlate->buswidth = buswidth; xlate++; - dev_dbg(ici->dev, "Providing format %s packed\n", + dev_dbg(ici->v4l2_dev.dev, "Providing format %s packed\n", icd->formats[idx].name); } break; @@ -1280,7 +1271,7 @@ static int pxa_camera_get_formats(struct soc_camera_device *icd, int idx, xlate->cam_fmt = icd->formats + idx; xlate->buswidth = icd->formats[idx].depth; xlate++; - dev_dbg(ici->dev, + dev_dbg(ici->v4l2_dev.dev, "Providing format %s in pass-through mode\n", icd->formats[idx].name); } @@ -1309,11 +1300,11 @@ static int pxa_camera_set_crop(struct soc_camera_device *icd, icd->sense = NULL; if (ret < 0) { - dev_warn(ici->dev, "Failed to crop to %ux%u@%u:%u\n", + dev_warn(ici->v4l2_dev.dev, "Failed to crop to %ux%u@%u:%u\n", rect->width, rect->height, rect->left, rect->top); } else if (sense.flags & SOCAM_SENSE_PCLK_CHANGED) { if (sense.pixel_clock > sense.pixel_clock_max) { - dev_err(ici->dev, + dev_err(ici->v4l2_dev.dev, "pixel clock %lu set by the camera too high!", sense.pixel_clock); return -EIO; @@ -1341,7 +1332,7 @@ static int pxa_camera_set_fmt(struct soc_camera_device *icd, xlate = soc_camera_xlate_by_fourcc(icd, pix->pixelformat); if (!xlate) { - dev_warn(ici->dev, "Format %x not found\n", pix->pixelformat); + dev_warn(ici->v4l2_dev.dev, "Format %x not found\n", pix->pixelformat); return -EINVAL; } @@ -1352,16 +1343,16 @@ static int pxa_camera_set_fmt(struct soc_camera_device *icd, icd->sense = &sense; cam_f.fmt.pix.pixelformat = cam_fmt->fourcc; - ret = icd->ops->set_fmt(icd, &cam_f); + ret = v4l2_device_call_until_err(&ici->v4l2_dev, 0, video, s_fmt, f); icd->sense = NULL; if (ret < 0) { - dev_warn(ici->dev, "Failed to configure for format %x\n", + dev_warn(ici->v4l2_dev.dev, "Failed to configure for format %x\n", pix->pixelformat); } else if (sense.flags & SOCAM_SENSE_PCLK_CHANGED) { if (sense.pixel_clock > sense.pixel_clock_max) { - dev_err(ici->dev, + dev_err(ici->v4l2_dev.dev, "pixel clock %lu set by the camera too high!", sense.pixel_clock); return -EIO; @@ -1389,7 +1380,7 @@ static int pxa_camera_try_fmt(struct soc_camera_device *icd, xlate = soc_camera_xlate_by_fourcc(icd, pixfmt); if (!xlate) { - dev_warn(ici->dev, "Format %x not found\n", pixfmt); + dev_warn(ici->v4l2_dev.dev, "Format %x not found\n", pixfmt); return -EINVAL; } @@ -1410,7 +1401,7 @@ static int pxa_camera_try_fmt(struct soc_camera_device *icd, /* camera has to see its format, but the user the original one */ pix->pixelformat = xlate->cam_fmt->fourcc; /* limit to sensor capabilities */ - ret = icd->ops->try_fmt(icd, f); + ret = v4l2_device_call_until_err(&ici->v4l2_dev, 0, video, try_fmt, f); pix->pixelformat = xlate->host_fmt->fourcc; field = pix->field; @@ -1646,7 +1637,7 @@ static int __devinit pxa_camera_probe(struct platform_device *pdev) pcdev->soc_host.drv_name = PXA_CAM_DRV_NAME; pcdev->soc_host.ops = &pxa_soc_camera_host_ops; pcdev->soc_host.priv = pcdev; - pcdev->soc_host.dev = &pdev->dev; + pcdev->soc_host.v4l2_dev.dev = &pdev->dev; pcdev->soc_host.nr = pdev->id; err = soc_camera_host_register(&pcdev->soc_host); diff --git a/drivers/media/video/sh_mobile_ceu_camera.c b/drivers/media/video/sh_mobile_ceu_camera.c index e7ac84daf67..5101fa7cdb2 100644 --- a/drivers/media/video/sh_mobile_ceu_camera.c +++ b/drivers/media/video/sh_mobile_ceu_camera.c @@ -347,10 +347,9 @@ static int sh_mobile_ceu_add_device(struct soc_camera_device *icd) { struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent); struct sh_mobile_ceu_dev *pcdev = ici->priv; - int ret = -EBUSY; if (pcdev->icd) - goto err; + return -EBUSY; dev_info(&icd->dev, "SuperH Mobile CEU driver attached to camera %d\n", @@ -358,19 +357,13 @@ static int sh_mobile_ceu_add_device(struct soc_camera_device *icd) clk_enable(pcdev->clk); - ret = icd->ops->init(icd); - if (ret) { - clk_disable(pcdev->clk); - goto err; - } - ceu_write(pcdev, CAPSR, 1 << 16); /* reset */ while (ceu_read(pcdev, CSTSR) & 1) msleep(1); pcdev->icd = icd; -err: - return ret; + + return 0; } /* Called with .video_lock held */ @@ -396,8 +389,6 @@ static void sh_mobile_ceu_remove_device(struct soc_camera_device *icd) } spin_unlock_irqrestore(&pcdev->lock, flags); - icd->ops->release(icd); - clk_disable(pcdev->clk); dev_info(&icd->dev, @@ -614,7 +605,7 @@ static int sh_mobile_ceu_get_formats(struct soc_camera_device *icd, int idx, xlate->cam_fmt = icd->formats + idx; xlate->buswidth = icd->formats[idx].depth; xlate++; - dev_dbg(ici->dev, "Providing format %s using %s\n", + dev_dbg(ici->v4l2_dev.dev, "Providing format %s using %s\n", sh_mobile_ceu_formats[k].name, icd->formats[idx].name); } @@ -627,7 +618,7 @@ add_single_format: xlate->cam_fmt = icd->formats + idx; xlate->buswidth = icd->formats[idx].depth; xlate++; - dev_dbg(ici->dev, + dev_dbg(ici->v4l2_dev.dev, "Providing format %s in pass-through mode\n", icd->formats[idx].name); } @@ -649,18 +640,17 @@ static int sh_mobile_ceu_set_fmt(struct soc_camera_device *icd, struct sh_mobile_ceu_dev *pcdev = ici->priv; __u32 pixfmt = f->fmt.pix.pixelformat; const struct soc_camera_format_xlate *xlate; - struct v4l2_format cam_f = *f; int ret; xlate = soc_camera_xlate_by_fourcc(icd, pixfmt); if (!xlate) { - dev_warn(ici->dev, "Format %x not found\n", pixfmt); + dev_warn(ici->v4l2_dev.dev, "Format %x not found\n", pixfmt); return -EINVAL; } - cam_f.fmt.pix.pixelformat = xlate->cam_fmt->fourcc; - ret = icd->ops->set_fmt(icd, &cam_f); - + f->fmt.pix.pixelformat = xlate->cam_fmt->fourcc; + ret = v4l2_device_call_until_err(&ici->v4l2_dev, (__u32)icd, video, s_fmt, f); + f->fmt.pix.pixelformat = pixfmt; if (!ret) { icd->buswidth = xlate->buswidth; icd->current_fmt = xlate->host_fmt; @@ -681,7 +671,7 @@ static int sh_mobile_ceu_try_fmt(struct soc_camera_device *icd, xlate = soc_camera_xlate_by_fourcc(icd, pixfmt); if (!xlate) { - dev_warn(ici->dev, "Format %x not found\n", pixfmt); + dev_warn(ici->v4l2_dev.dev, "Format %x not found\n", pixfmt); return -EINVAL; } @@ -694,8 +684,11 @@ static int sh_mobile_ceu_try_fmt(struct soc_camera_device *icd, DIV_ROUND_UP(xlate->host_fmt->depth, 8); f->fmt.pix.sizeimage = f->fmt.pix.height * f->fmt.pix.bytesperline; + f->fmt.pix.pixelformat = xlate->cam_fmt->fourcc; + /* limit to sensor capabilities */ - ret = icd->ops->try_fmt(icd, f); + ret = v4l2_device_call_until_err(&ici->v4l2_dev, (__u32)icd, video, try_fmt, f); + f->fmt.pix.pixelformat = pixfmt; if (ret < 0) return ret; @@ -771,7 +764,7 @@ static void sh_mobile_ceu_init_videobuf(struct videobuf_queue *q, videobuf_queue_dma_contig_init(q, &sh_mobile_ceu_videobuf_ops, - ici->dev, &pcdev->lock, + ici->v4l2_dev.dev, &pcdev->lock, V4L2_BUF_TYPE_VIDEO_CAPTURE, pcdev->is_interlaced ? V4L2_FIELD_INTERLACED : V4L2_FIELD_NONE, @@ -794,7 +787,7 @@ static struct soc_camera_host_ops sh_mobile_ceu_host_ops = { .init_videobuf = sh_mobile_ceu_init_videobuf, }; -static int sh_mobile_ceu_probe(struct platform_device *pdev) +static int __devinit sh_mobile_ceu_probe(struct platform_device *pdev) { struct sh_mobile_ceu_dev *pcdev; struct resource *res; @@ -867,7 +860,7 @@ static int sh_mobile_ceu_probe(struct platform_device *pdev) pm_runtime_resume(&pdev->dev); pcdev->ici.priv = pcdev; - pcdev->ici.dev = &pdev->dev; + pcdev->ici.v4l2_dev.dev = &pdev->dev; pcdev->ici.nr = pdev->id; pcdev->ici.drv_name = dev_name(&pdev->dev); pcdev->ici.ops = &sh_mobile_ceu_host_ops; @@ -891,7 +884,7 @@ exit: return err; } -static int sh_mobile_ceu_remove(struct platform_device *pdev) +static int __devexit sh_mobile_ceu_remove(struct platform_device *pdev) { struct soc_camera_host *soc_host = to_soc_camera_host(&pdev->dev); struct sh_mobile_ceu_dev *pcdev = container_of(soc_host, @@ -929,7 +922,7 @@ static struct platform_driver sh_mobile_ceu_driver = { .pm = &sh_mobile_ceu_dev_pm_ops, }, .probe = sh_mobile_ceu_probe, - .remove = sh_mobile_ceu_remove, + .remove = __exit_p(sh_mobile_ceu_remove), }; static int __init sh_mobile_ceu_init(void) diff --git a/drivers/media/video/soc_camera.c b/drivers/media/video/soc_camera.c index 20ef5c773fa..0a1cb40bfbf 100644 --- a/drivers/media/video/soc_camera.c +++ b/drivers/media/video/soc_camera.c @@ -170,8 +170,6 @@ static int soc_camera_reqbufs(struct file *file, void *priv, WARN_ON(priv != file->private_data); - dev_dbg(&icd->dev, "%s: %d\n", __func__, p->memory); - ret = videobuf_reqbufs(&icf->vb_vidq, p); if (ret < 0) return ret; @@ -285,7 +283,7 @@ static int soc_camera_set_fmt(struct soc_camera_file *icf, return ret; } else if (!icd->current_fmt || icd->current_fmt->fourcc != pix->pixelformat) { - dev_err(ici->dev, + dev_err(ici->v4l2_dev.dev, "Host driver hasn't set up current format correctly!\n"); return -EINVAL; } @@ -308,20 +306,13 @@ static int soc_camera_set_fmt(struct soc_camera_file *icf, static int soc_camera_open(struct file *file) { - struct video_device *vdev; - struct soc_camera_device *icd; + struct video_device *vdev = video_devdata(file); + struct soc_camera_device *icd = container_of(vdev->parent, struct soc_camera_device, dev); + struct soc_camera_link *icl = to_soc_camera_link(icd); struct soc_camera_host *ici; struct soc_camera_file *icf; int ret; - /* - * It is safe to dereference these pointers now as long as a user has - * the video device open - we are protected by the held cdev reference. - */ - - vdev = video_devdata(file); - icd = container_of(vdev->parent, struct soc_camera_device, dev); - if (!icd->ops) /* No device driver attached */ return -ENODEV; @@ -332,12 +323,6 @@ static int soc_camera_open(struct file *file) if (!icf) return -ENOMEM; - if (!try_module_get(icd->ops->owner)) { - dev_err(&icd->dev, "Couldn't lock sensor driver.\n"); - ret = -EINVAL; - goto emgd; - } - if (!try_module_get(ici->ops->owner)) { dev_err(&icd->dev, "Couldn't lock capture bus driver.\n"); ret = -EINVAL; @@ -366,47 +351,65 @@ static int soc_camera_open(struct file *file) if (ret < 0) goto eiufmt; - dev_dbg(&icd->dev, "Using fmt %x\n", icd->current_fmt->fourcc); - f.fmt.pix.pixelformat = icd->current_fmt->fourcc; f.fmt.pix.colorspace = icd->current_fmt->colorspace; + if (icl->power) { + ret = icl->power(icd->pdev, 1); + if (ret < 0) + goto epower; + } + + /* The camera could have been already on, try to reset */ + if (icl->reset) + icl->reset(icd->pdev); + ret = ici->ops->add(icd); if (ret < 0) { dev_err(&icd->dev, "Couldn't activate the camera: %d\n", ret); goto eiciadd; } + if (icd->ops->init) { + ret = icd->ops->init(icd); + if (ret < 0) + goto einit; + } + /* Try to configure with default parameters */ ret = soc_camera_set_fmt(icf, &f); if (ret < 0) goto esfmt; } - mutex_unlock(&icd->video_lock); - file->private_data = icf; dev_dbg(&icd->dev, "camera device open\n"); ici->ops->init_videobuf(&icf->vb_vidq, icd); + mutex_unlock(&icd->video_lock); + return 0; /* - * First three errors are entered with the .video_lock held + * First five errors are entered with the .video_lock held * and use_count == 1 */ esfmt: + if (icd->ops->release) + icd->ops->release(icd); +einit: ici->ops->remove(icd); eiciadd: + if (icl->power) + icl->power(icd->pdev, 0); +epower: soc_camera_free_user_formats(icd); eiufmt: icd->use_count--; mutex_unlock(&icd->video_lock); module_put(ici->ops->owner); emgi: - module_put(icd->ops->owner); -emgd: vfree(icf); return ret; } @@ -421,13 +424,18 @@ static int soc_camera_close(struct file *file) mutex_lock(&icd->video_lock); icd->use_count--; if (!icd->use_count) { + struct soc_camera_link *icl = to_soc_camera_link(icd); + + if (icd->ops->release) + icd->ops->release(icd); ici->ops->remove(icd); + if (icl->power) + icl->power(icd->pdev, 0); soc_camera_free_user_formats(icd); } mutex_unlock(&icd->video_lock); - module_put(icd->ops->owner); module_put(ici->ops->owner); vfree(icf); @@ -575,18 +583,17 @@ static int soc_camera_streamon(struct file *file, void *priv, { struct soc_camera_file *icf = file->private_data; struct soc_camera_device *icd = icf->icd; + struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent); int ret; WARN_ON(priv != file->private_data); - dev_dbg(&icd->dev, "%s\n", __func__); - if (i != V4L2_BUF_TYPE_VIDEO_CAPTURE) return -EINVAL; mutex_lock(&icd->video_lock); - icd->ops->start_capture(icd); + v4l2_device_call_until_err(&ici->v4l2_dev, (__u32)icd, video, s_stream, 1); /* This calls buf_queue from host driver's videobuf_queue_ops */ ret = videobuf_streamon(&icf->vb_vidq); @@ -601,11 +608,10 @@ static int soc_camera_streamoff(struct file *file, void *priv, { struct soc_camera_file *icf = file->private_data; struct soc_camera_device *icd = icf->icd; + struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent); WARN_ON(priv != file->private_data); - dev_dbg(&icd->dev, "%s\n", __func__); - if (i != V4L2_BUF_TYPE_VIDEO_CAPTURE) return -EINVAL; @@ -615,7 +621,7 @@ static int soc_camera_streamoff(struct file *file, void *priv, * remaining buffers. When the last buffer is freed, stop capture */ videobuf_streamoff(&icf->vb_vidq); - icd->ops->stop_capture(icd); + v4l2_device_call_until_err(&ici->v4l2_dev, (__u32)icd, video, s_stream, 0); mutex_unlock(&icd->video_lock); @@ -649,6 +655,7 @@ static int soc_camera_g_ctrl(struct file *file, void *priv, { struct soc_camera_file *icf = file->private_data; struct soc_camera_device *icd = icf->icd; + struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent); WARN_ON(priv != file->private_data); @@ -665,9 +672,7 @@ static int soc_camera_g_ctrl(struct file *file, void *priv, return 0; } - if (icd->ops->get_control) - return icd->ops->get_control(icd, ctrl); - return -EINVAL; + return v4l2_device_call_until_err(&ici->v4l2_dev, (__u32)icd, core, g_ctrl, ctrl); } static int soc_camera_s_ctrl(struct file *file, void *priv, @@ -675,12 +680,11 @@ static int soc_camera_s_ctrl(struct file *file, void *priv, { struct soc_camera_file *icf = file->private_data; struct soc_camera_device *icd = icf->icd; + struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent); WARN_ON(priv != file->private_data); - if (icd->ops->set_control) - return icd->ops->set_control(icd, ctrl); - return -EINVAL; + return v4l2_device_call_until_err(&ici->v4l2_dev, (__u32)icd, core, s_ctrl, ctrl); } static int soc_camera_cropcap(struct file *file, void *fh, @@ -751,11 +755,9 @@ static int soc_camera_g_chip_ident(struct file *file, void *fh, { struct soc_camera_file *icf = file->private_data; struct soc_camera_device *icd = icf->icd; + struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent); - if (!icd->ops->get_chip_id) - return -EINVAL; - - return icd->ops->get_chip_id(icd, id); + return v4l2_device_call_until_err(&ici->v4l2_dev, (__u32)icd, core, g_chip_ident, id); } #ifdef CONFIG_VIDEO_ADV_DEBUG @@ -764,11 +766,9 @@ static int soc_camera_g_register(struct file *file, void *fh, { struct soc_camera_file *icf = file->private_data; struct soc_camera_device *icd = icf->icd; + struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent); - if (!icd->ops->get_register) - return -EINVAL; - - return icd->ops->get_register(icd, reg); + return v4l2_device_call_until_err(&ici->v4l2_dev, (__u32)icd, core, g_register, reg); } static int soc_camera_s_register(struct file *file, void *fh, @@ -776,11 +776,9 @@ static int soc_camera_s_register(struct file *file, void *fh, { struct soc_camera_file *icf = file->private_data; struct soc_camera_device *icd = icf->icd; + struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent); - if (!icd->ops->set_register) - return -EINVAL; - - return icd->ops->set_register(icd, reg); + return v4l2_device_call_until_err(&ici->v4l2_dev, (__u32)icd, core, s_register, reg); } #endif @@ -794,7 +792,7 @@ static void scan_add_host(struct soc_camera_host *ici) list_for_each_entry(icd, &devices, list) { if (icd->iface == ici->nr) { int ret; - icd->dev.parent = ici->dev; + icd->dev.parent = ici->v4l2_dev.dev; dev_set_name(&icd->dev, "%u-%u", icd->iface, icd->devnum); ret = device_register(&icd->dev); @@ -814,7 +812,9 @@ static int soc_camera_init_i2c(struct soc_camera_device *icd, struct soc_camera_link *icl) { struct i2c_client *client; + struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent); struct i2c_adapter *adap = i2c_get_adapter(icl->i2c_adapter_id); + struct v4l2_subdev *subdev; int ret; if (!adap) { @@ -826,17 +826,16 @@ static int soc_camera_init_i2c(struct soc_camera_device *icd, icl->board_info->platform_data = icd; - client = i2c_new_device(adap, icl->board_info); - if (!client) { + subdev = v4l2_i2c_new_subdev_board(&ici->v4l2_dev, adap, + icl->module_name, icl->board_info, NULL); + if (!subdev) { ret = -ENOMEM; goto ei2cnd; } - /* - * We set icd drvdata at two locations - here and in - * soc_camera_video_start(). Depending on the module loading / - * initialisation order one of these locations will be entered first - */ + subdev->grp_id = (__u32)icd; + client = subdev->priv; + /* Use to_i2c_client(dev) to recover the i2c client */ dev_set_drvdata(&icd->dev, &client->dev); @@ -852,6 +851,7 @@ static void soc_camera_free_i2c(struct soc_camera_device *icd) struct i2c_client *client = to_i2c_client(to_soc_camera_control(icd)); dev_set_drvdata(&icd->dev, NULL); + v4l2_device_unregister_subdev(i2c_get_clientdata(client)); i2c_unregister_device(client); i2c_put_adapter(client->adapter); } @@ -860,16 +860,37 @@ static void soc_camera_free_i2c(struct soc_camera_device *icd) #define soc_camera_free_i2c(icd) do {} while (0) #endif +static int soc_camera_video_start(struct soc_camera_device *icd); static int video_dev_create(struct soc_camera_device *icd); /* Called during host-driver probe */ static int soc_camera_probe(struct device *dev) { struct soc_camera_device *icd = to_soc_camera_dev(dev); + struct soc_camera_host *ici = to_soc_camera_host(dev->parent); struct soc_camera_link *icl = to_soc_camera_link(icd); + struct device *control = NULL; int ret; dev_info(dev, "Probing %s\n", dev_name(dev)); + if (icl->power) { + ret = icl->power(icd->pdev, 1); + if (ret < 0) { + dev_err(dev, + "Platform failed to power-on the camera.\n"); + goto epower; + } + } + + /* The camera could have been already on, try to reset */ + if (icl->reset) + icl->reset(icd->pdev); + + ret = ici->ops->add(icd); + if (ret < 0) + goto eadd; + + /* Must have icd->vdev before registering the device */ ret = video_dev_create(icd); if (ret < 0) goto evdc; @@ -883,34 +904,61 @@ static int soc_camera_probe(struct device *dev) ret = -EINVAL; goto eadddev; } else { + if (icl->module_name) + ret = request_module(icl->module_name); + ret = icl->add_device(icl, &icd->dev); if (ret < 0) goto eadddev; - } - ret = video_register_device(icd->vdev, VFL_TYPE_GRABBER, icd->vdev->minor); - if (ret < 0) { - dev_err(&icd->dev, "video_register_device failed: %d\n", ret); - goto evidregd; + /* FIXME: this is racy, have to use driver-binding notification */ + control = to_soc_camera_control(icd); + if (!control || !control->driver || + !try_module_get(control->driver->owner)) { + icl->del_device(icl); + goto enodrv; + } } + /* ..._video_start() will create a device node, so we have to protect */ + mutex_lock(&icd->video_lock); + + ret = soc_camera_video_start(icd); + if (ret < 0) + goto evidstart; + /* Do we have to sysfs_remove_link() before device_unregister()? */ if (to_soc_camera_control(icd) && sysfs_create_link(&icd->dev.kobj, &to_soc_camera_control(icd)->kobj, "control")) dev_warn(&icd->dev, "Failed creating the control symlink\n"); + ici->ops->remove(icd); + + if (icl->power) + icl->power(icd->pdev, 0); + + mutex_unlock(&icd->video_lock); return 0; -evidregd: - if (icl->board_info) +evidstart: + mutex_unlock(&icd->video_lock); + if (icl->board_info) { soc_camera_free_i2c(icd); - else + } else { icl->del_device(icl); + module_put(control->driver->owner); + } +enodrv: eadddev: video_device_release(icd->vdev); evdc: + ici->ops->remove(icd); +eadd: + if (icl->power) + icl->power(icd->pdev, 0); +epower: return ret; } @@ -931,10 +979,16 @@ static int soc_camera_remove(struct device *dev) mutex_unlock(&icd->video_lock); } - if (icl->board_info) + if (icl->board_info) { soc_camera_free_i2c(icd); - else - icl->del_device(icl); + } else { + struct device_driver *drv = to_soc_camera_control(icd) ? + to_soc_camera_control(icd)->driver : NULL; + if (drv) { + icl->del_device(icl); + module_put(drv->owner); + } + } return 0; } @@ -984,6 +1038,7 @@ static void dummy_release(struct device *dev) int soc_camera_host_register(struct soc_camera_host *ici) { struct soc_camera_host *ix; + int ret; if (!ici || !ici->ops || !ici->ops->try_fmt || @@ -996,18 +1051,20 @@ int soc_camera_host_register(struct soc_camera_host *ici) !ici->ops->add || !ici->ops->remove || !ici->ops->poll || - !ici->dev) + !ici->v4l2_dev.dev) return -EINVAL; mutex_lock(&list_lock); list_for_each_entry(ix, &hosts, list) { if (ix->nr == ici->nr) { - mutex_unlock(&list_lock); - return -EBUSY; + ret = -EBUSY; + goto edevreg; } } - dev_set_drvdata(ici->dev, ici); + ret = v4l2_device_register(ici->v4l2_dev.dev, &ici->v4l2_dev); + if (ret < 0) + goto edevreg; list_add_tail(&ici->list, &hosts); mutex_unlock(&list_lock); @@ -1015,6 +1072,10 @@ int soc_camera_host_register(struct soc_camera_host *ici) scan_add_host(ici); return 0; + +edevreg: + mutex_unlock(&list_lock); + return ret; } EXPORT_SYMBOL(soc_camera_host_register); @@ -1028,7 +1089,7 @@ void soc_camera_host_unregister(struct soc_camera_host *ici) list_del(&ici->list); list_for_each_entry(icd, &devices, list) { - if (icd->dev.parent == ici->dev) { + if (icd->iface == ici->nr) { /* The bus->remove will be called */ device_unregister(&icd->dev); /* Not before device_unregister(), .remove @@ -1043,7 +1104,7 @@ void soc_camera_host_unregister(struct soc_camera_host *ici) mutex_unlock(&list_lock); - dev_set_drvdata(ici->dev, NULL); + v4l2_device_unregister(&ici->v4l2_dev); } EXPORT_SYMBOL(soc_camera_host_unregister); @@ -1123,7 +1184,6 @@ static int video_dev_create(struct soc_camera_device *icd) if (!vdev) return -ENOMEM; - dev_dbg(ici->dev, "Allocated video_device %p\n", vdev); strlcpy(vdev->name, ici->drv_name, sizeof(vdev->name)); @@ -1141,50 +1201,35 @@ static int video_dev_create(struct soc_camera_device *icd) } /* - * Usually called from the struct soc_camera_ops .probe() method, i.e., from - * soc_camera_probe() above with .video_lock held + * Called from soc_camera_probe() above (with .video_lock held???) */ -int soc_camera_video_start(struct soc_camera_device *icd, struct device *dev) +static int soc_camera_video_start(struct soc_camera_device *icd) { - struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent); const struct v4l2_queryctrl *qctrl; + int ret; if (!icd->dev.parent) return -ENODEV; if (!icd->ops || - !icd->ops->init || - !icd->ops->release || - !icd->ops->start_capture || - !icd->ops->stop_capture || - !icd->ops->set_fmt || - !icd->ops->try_fmt || !icd->ops->query_bus_param || !icd->ops->set_bus_param) return -EINVAL; - /* See comment in soc_camera_probe() */ - dev_set_drvdata(&icd->dev, dev); + ret = video_register_device(icd->vdev, VFL_TYPE_GRABBER, + icd->vdev->minor); + if (ret < 0) { + dev_err(&icd->dev, "video_register_device failed: %d\n", ret); + return ret; + } qctrl = soc_camera_find_qctrl(icd->ops, V4L2_CID_GAIN); icd->gain = qctrl ? qctrl->default_value : (unsigned short)~0; qctrl = soc_camera_find_qctrl(icd->ops, V4L2_CID_EXPOSURE); icd->exposure = qctrl ? qctrl->default_value : (unsigned short)~0; - return ici->ops->add(icd); -} -EXPORT_SYMBOL(soc_camera_video_start); - -/* Called from client .remove() methods with .video_lock held */ -void soc_camera_video_stop(struct soc_camera_device *icd) -{ - struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent); - - dev_dbg(&icd->dev, "%s\n", __func__); - - ici->ops->remove(icd); + return 0; } -EXPORT_SYMBOL(soc_camera_video_stop); static int __devinit soc_camera_pdrv_probe(struct platform_device *pdev) { @@ -1200,6 +1245,7 @@ static int __devinit soc_camera_pdrv_probe(struct platform_device *pdev) return -ENOMEM; icd->iface = icl->bus_id; + icd->pdev = &pdev->dev; platform_set_drvdata(pdev, icd); icd->dev.platform_data = icl; diff --git a/drivers/media/video/soc_camera_platform.c b/drivers/media/video/soc_camera_platform.c index d84c134f8d5..8168cf470eb 100644 --- a/drivers/media/video/soc_camera_platform.c +++ b/drivers/media/video/soc_camera_platform.c @@ -16,11 +16,12 @@ #include #include #include -#include +#include #include #include struct soc_camera_platform_priv { + struct v4l2_subdev subdev; struct soc_camera_data_format format; }; @@ -31,36 +32,10 @@ soc_camera_platform_get_info(struct soc_camera_device *icd) return pdev->dev.platform_data; } -static int soc_camera_platform_init(struct soc_camera_device *icd) +static int soc_camera_platform_s_stream(struct v4l2_subdev *sd, int enable) { - struct soc_camera_link *icl = to_soc_camera_link(icd); - - if (icl->power) - icl->power(dev_get_drvdata(&icd->dev), 1); - - return 0; -} - -static int soc_camera_platform_release(struct soc_camera_device *icd) -{ - struct soc_camera_link *icl = to_soc_camera_link(icd); - - if (icl->power) - icl->power(dev_get_drvdata(&icd->dev), 0); - - return 0; -} - -static int soc_camera_platform_start_capture(struct soc_camera_device *icd) -{ - struct soc_camera_platform_info *p = soc_camera_platform_get_info(icd); - return p->set_capture(p, 1); -} - -static int soc_camera_platform_stop_capture(struct soc_camera_device *icd) -{ - struct soc_camera_platform_info *p = soc_camera_platform_get_info(icd); - return p->set_capture(p, 0); + struct soc_camera_platform_info *p = v4l2_get_subdevdata(sd); + return p->set_capture(p, enable); } static int soc_camera_platform_set_bus_param(struct soc_camera_device *icd, @@ -82,16 +57,10 @@ static int soc_camera_platform_set_crop(struct soc_camera_device *icd, return 0; } -static int soc_camera_platform_set_fmt(struct soc_camera_device *icd, +static int soc_camera_platform_try_fmt(struct v4l2_subdev *sd, struct v4l2_format *f) { - return 0; -} - -static int soc_camera_platform_try_fmt(struct soc_camera_device *icd, - struct v4l2_format *f) -{ - struct soc_camera_platform_info *p = soc_camera_platform_get_info(icd); + struct soc_camera_platform_info *p = v4l2_get_subdevdata(sd); struct v4l2_pix_format *pix = &f->fmt.pix; pix->width = p->format.width; @@ -99,12 +68,11 @@ static int soc_camera_platform_try_fmt(struct soc_camera_device *icd, return 0; } -static int soc_camera_platform_video_probe(struct soc_camera_device *icd, - struct platform_device *pdev) +static void soc_camera_platform_video_probe(struct soc_camera_device *icd, + struct platform_device *pdev) { struct soc_camera_platform_priv *priv = platform_get_drvdata(pdev); struct soc_camera_platform_info *p = pdev->dev.platform_data; - int ret; priv->format.name = p->format_name; priv->format.depth = p->format_depth; @@ -113,28 +81,29 @@ static int soc_camera_platform_video_probe(struct soc_camera_device *icd, icd->formats = &priv->format; icd->num_formats = 1; - - /* ..._video_start() does dev_set_drvdata(&icd->dev, &pdev->dev) */ - ret = soc_camera_video_start(icd, &pdev->dev); - soc_camera_video_stop(icd); - return ret; } +static struct v4l2_subdev_core_ops platform_subdev_core_ops; + +static struct v4l2_subdev_video_ops platform_subdev_video_ops = { + .s_stream = soc_camera_platform_s_stream, + .try_fmt = soc_camera_platform_try_fmt, +}; + +static struct v4l2_subdev_ops platform_subdev_ops = { + .core = &platform_subdev_core_ops, + .video = &platform_subdev_video_ops, +}; + static struct soc_camera_ops soc_camera_platform_ops = { - .owner = THIS_MODULE, - .init = soc_camera_platform_init, - .release = soc_camera_platform_release, - .start_capture = soc_camera_platform_start_capture, - .stop_capture = soc_camera_platform_stop_capture, .set_crop = soc_camera_platform_set_crop, - .set_fmt = soc_camera_platform_set_fmt, - .try_fmt = soc_camera_platform_try_fmt, .set_bus_param = soc_camera_platform_set_bus_param, .query_bus_param = soc_camera_platform_query_bus_param, }; static int soc_camera_platform_probe(struct platform_device *pdev) { + struct soc_camera_host *ici; struct soc_camera_platform_priv *priv; struct soc_camera_platform_info *p = pdev->dev.platform_data; struct soc_camera_device *icd; @@ -143,35 +112,48 @@ static int soc_camera_platform_probe(struct platform_device *pdev) if (!p) return -EINVAL; + if (!p->dev) { + dev_err(&pdev->dev, + "Platform has not set soc_camera_device pointer!\n"); + return -EINVAL; + } + priv = kzalloc(sizeof(*priv), GFP_KERNEL); if (!priv) return -ENOMEM; - platform_set_drvdata(pdev, priv); - icd = to_soc_camera_dev(p->dev); - if (!icd) - goto enoicd; - icd->ops = &soc_camera_platform_ops; + platform_set_drvdata(pdev, priv); dev_set_drvdata(&icd->dev, &pdev->dev); + icd->width_min = 0; icd->width_max = p->format.width; icd->height_min = 0; icd->height_max = p->format.height; icd->y_skip_top = 0; + icd->ops = &soc_camera_platform_ops; - ret = soc_camera_platform_video_probe(icd, pdev); - if (ret) { - icd->ops = NULL; - kfree(priv); - } + ici = to_soc_camera_host(icd->dev.parent); + + soc_camera_platform_video_probe(icd, pdev); + + v4l2_subdev_init(&priv->subdev, &platform_subdev_ops); + v4l2_set_subdevdata(&priv->subdev, p); + priv->subdev.grp_id = (__u32)icd; + strncpy(priv->subdev.name, dev_name(&pdev->dev), V4L2_SUBDEV_NAME_SIZE); + + ret = v4l2_device_register_subdev(&ici->v4l2_dev, &priv->subdev); + if (ret) + goto evdrs; return ret; -enoicd: +evdrs: + icd->ops = NULL; + platform_set_drvdata(pdev, NULL); kfree(priv); - return -EINVAL; + return ret; } static int soc_camera_platform_remove(struct platform_device *pdev) @@ -180,7 +162,9 @@ static int soc_camera_platform_remove(struct platform_device *pdev) struct soc_camera_platform_info *p = pdev->dev.platform_data; struct soc_camera_device *icd = to_soc_camera_dev(p->dev); + v4l2_device_unregister_subdev(&priv->subdev); icd->ops = NULL; + platform_set_drvdata(pdev, NULL); kfree(priv); return 0; } @@ -188,6 +172,7 @@ static int soc_camera_platform_remove(struct platform_device *pdev) static struct platform_driver soc_camera_platform_driver = { .driver = { .name = "soc_camera_platform", + .owner = THIS_MODULE, }, .probe = soc_camera_platform_probe, .remove = soc_camera_platform_remove, diff --git a/drivers/media/video/tw9910.c b/drivers/media/video/tw9910.c index d780a509faa..a006df1d28e 100644 --- a/drivers/media/video/tw9910.c +++ b/drivers/media/video/tw9910.c @@ -24,7 +24,7 @@ #include #include #include -#include +#include #include #include @@ -223,6 +223,7 @@ struct tw9910_hsync_ctrl { }; struct tw9910_priv { + struct v4l2_subdev subdev; struct tw9910_video_info *info; const struct tw9910_scale_ctrl *scale; }; @@ -354,6 +355,11 @@ static const struct tw9910_hsync_ctrl tw9910_hsync_ctrl = { /* * general function */ +static struct tw9910_priv *to_tw9910(const struct i2c_client *client) +{ + return container_of(i2c_get_clientdata(client), struct tw9910_priv, subdev); +} + static int tw9910_set_scale(struct i2c_client *client, const struct tw9910_scale_ctrl *scale) { @@ -507,47 +513,20 @@ tw9910_select_norm(struct soc_camera_device *icd, u32 width, u32 height) /* * soc_camera_ops function */ -static int tw9910_init(struct soc_camera_device *icd) +static int tw9910_s_stream(struct v4l2_subdev *sd, int enable) { - struct i2c_client *client = to_i2c_client(to_soc_camera_control(icd)); - struct soc_camera_link *icl = to_soc_camera_link(icd); - int ret = 0; + struct i2c_client *client = sd->priv; + struct tw9910_priv *priv = to_tw9910(client); - if (icl->power) { - ret = icl->power(&client->dev, 1); - if (ret < 0) - return ret; - } - - if (icl->reset) - ret = icl->reset(&client->dev); - - return ret; -} - -static int tw9910_release(struct soc_camera_device *icd) -{ - struct i2c_client *client = to_i2c_client(to_soc_camera_control(icd)); - struct soc_camera_link *icl = to_soc_camera_link(icd); - int ret = 0; - - if (icl->power) - ret = icl->power(&client->dev, 0); - - return ret; -} - -static int tw9910_start_capture(struct soc_camera_device *icd) -{ - struct i2c_client *client = to_i2c_client(to_soc_camera_control(icd)); - struct tw9910_priv *priv = i2c_get_clientdata(client); + if (!enable) + return 0; if (!priv->scale) { - dev_err(&icd->dev, "norm select error\n"); + dev_err(&client->dev, "norm select error\n"); return -EPERM; } - dev_dbg(&icd->dev, "%s %dx%d\n", + dev_dbg(&client->dev, "%s %dx%d\n", priv->scale->name, priv->scale->width, priv->scale->height); @@ -555,11 +534,6 @@ static int tw9910_start_capture(struct soc_camera_device *icd) return 0; } -static int tw9910_stop_capture(struct soc_camera_device *icd) -{ - return 0; -} - static int tw9910_set_bus_param(struct soc_camera_device *icd, unsigned long flags) { @@ -569,7 +543,7 @@ static int tw9910_set_bus_param(struct soc_camera_device *icd, static unsigned long tw9910_query_bus_param(struct soc_camera_device *icd) { struct i2c_client *client = to_i2c_client(to_soc_camera_control(icd)); - struct tw9910_priv *priv = i2c_get_clientdata(client); + struct tw9910_priv *priv = to_tw9910(client); struct soc_camera_link *icl = to_soc_camera_link(icd); unsigned long flags = SOCAM_PCLK_SAMPLE_RISING | SOCAM_MASTER | SOCAM_VSYNC_ACTIVE_HIGH | SOCAM_HSYNC_ACTIVE_HIGH | @@ -578,21 +552,11 @@ static unsigned long tw9910_query_bus_param(struct soc_camera_device *icd) return soc_camera_apply_sensor_flags(icl, flags); } -static int tw9910_get_chip_id(struct soc_camera_device *icd, - struct v4l2_dbg_chip_ident *id) -{ - id->ident = V4L2_IDENT_TW9910; - id->revision = 0; - - return 0; -} - -static int tw9910_set_std(struct soc_camera_device *icd, - v4l2_std_id *a) +static int tw9910_s_std(struct v4l2_subdev *sd, v4l2_std_id norm) { int ret = -EINVAL; - if (*a & (V4L2_STD_NTSC | V4L2_STD_PAL)) + if (norm & (V4L2_STD_NTSC | V4L2_STD_PAL)) ret = 0; return ret; @@ -608,11 +572,20 @@ static int tw9910_enum_input(struct soc_camera_device *icd, return 0; } +static int tw9910_g_chip_ident(struct v4l2_subdev *sd, + struct v4l2_dbg_chip_ident *id) +{ + id->ident = V4L2_IDENT_TW9910; + id->revision = 0; + + return 0; +} + #ifdef CONFIG_VIDEO_ADV_DEBUG -static int tw9910_get_register(struct soc_camera_device *icd, - struct v4l2_dbg_register *reg) +static int tw9910_g_register(struct v4l2_subdev *sd, + struct v4l2_dbg_register *reg) { - struct i2c_client *client = to_i2c_client(to_soc_camera_control(icd)); + struct i2c_client *client = sd->priv; int ret; if (reg->reg > 0xff) @@ -630,10 +603,10 @@ static int tw9910_get_register(struct soc_camera_device *icd, return 0; } -static int tw9910_set_register(struct soc_camera_device *icd, - struct v4l2_dbg_register *reg) +static int tw9910_s_register(struct v4l2_subdev *sd, + struct v4l2_dbg_register *reg) { - struct i2c_client *client = to_i2c_client(to_soc_camera_control(icd)); + struct i2c_client *client = sd->priv; if (reg->reg > 0xff || reg->val > 0xff) @@ -647,7 +620,7 @@ static int tw9910_set_crop(struct soc_camera_device *icd, struct v4l2_rect *rect) { struct i2c_client *client = to_i2c_client(to_soc_camera_control(icd)); - struct tw9910_priv *priv = i2c_get_clientdata(client); + struct tw9910_priv *priv = to_tw9910(client); int ret = -EINVAL; u8 val; @@ -736,9 +709,10 @@ tw9910_set_fmt_error: return ret; } -static int tw9910_set_fmt(struct soc_camera_device *icd, - struct v4l2_format *f) +static int tw9910_s_fmt(struct v4l2_subdev *sd, struct v4l2_format *f) { + struct i2c_client *client = sd->priv; + struct soc_camera_device *icd = client->dev.platform_data; struct v4l2_pix_format *pix = &f->fmt.pix; struct v4l2_rect rect = { .left = icd->x_current, @@ -761,16 +735,17 @@ static int tw9910_set_fmt(struct soc_camera_device *icd, return tw9910_set_crop(icd, &rect); } -static int tw9910_try_fmt(struct soc_camera_device *icd, - struct v4l2_format *f) +static int tw9910_try_fmt(struct v4l2_subdev *sd, struct v4l2_format *f) { + struct i2c_client *client = sd->priv; + struct soc_camera_device *icd = client->dev.platform_data; struct v4l2_pix_format *pix = &f->fmt.pix; const struct tw9910_scale_ctrl *scale; if (V4L2_FIELD_ANY == pix->field) { pix->field = V4L2_FIELD_INTERLACED; } else if (V4L2_FIELD_INTERLACED != pix->field) { - dev_err(&icd->dev, "Field type invalid.\n"); + dev_err(&client->dev, "Field type invalid.\n"); return -EINVAL; } @@ -790,9 +765,8 @@ static int tw9910_try_fmt(struct soc_camera_device *icd, static int tw9910_video_probe(struct soc_camera_device *icd, struct i2c_client *client) { - struct tw9910_priv *priv = i2c_get_clientdata(client); + struct tw9910_priv *priv = to_tw9910(client); s32 val; - int ret; /* * We must have a parent by now. And it cannot be a wrong one. @@ -814,18 +788,11 @@ static int tw9910_video_probe(struct soc_camera_device *icd, icd->formats = tw9910_color_fmt; icd->num_formats = ARRAY_SIZE(tw9910_color_fmt); - /* Switch master clock on */ - ret = soc_camera_video_start(icd, &client->dev); - if (ret) - return ret; - /* * check and show Product ID */ val = i2c_smbus_read_byte_data(client, ID); - soc_camera_video_stop(icd); - if (0x0B != GET_ID(val) || 0x00 != GET_ReV(val)) { dev_err(&icd->dev, @@ -839,29 +806,36 @@ static int tw9910_video_probe(struct soc_camera_device *icd, icd->vdev->tvnorms = V4L2_STD_NTSC | V4L2_STD_PAL; icd->vdev->current_norm = V4L2_STD_NTSC; - return ret; + return 0; } static struct soc_camera_ops tw9910_ops = { - .owner = THIS_MODULE, - .init = tw9910_init, - .release = tw9910_release, - .start_capture = tw9910_start_capture, - .stop_capture = tw9910_stop_capture, .set_crop = tw9910_set_crop, - .set_fmt = tw9910_set_fmt, - .try_fmt = tw9910_try_fmt, .set_bus_param = tw9910_set_bus_param, .query_bus_param = tw9910_query_bus_param, - .get_chip_id = tw9910_get_chip_id, - .set_std = tw9910_set_std, .enum_input = tw9910_enum_input, +}; + +static struct v4l2_subdev_core_ops tw9910_subdev_core_ops = { + .g_chip_ident = tw9910_g_chip_ident, + .s_std = tw9910_s_std, #ifdef CONFIG_VIDEO_ADV_DEBUG - .get_register = tw9910_get_register, - .set_register = tw9910_set_register, + .g_register = tw9910_g_register, + .s_register = tw9910_s_register, #endif }; +static struct v4l2_subdev_video_ops tw9910_subdev_video_ops = { + .s_stream = tw9910_s_stream, + .s_fmt = tw9910_s_fmt, + .try_fmt = tw9910_try_fmt, +}; + +static struct v4l2_subdev_ops tw9910_subdev_ops = { + .core = &tw9910_subdev_core_ops, + .video = &tw9910_subdev_video_ops, +}; + /* * i2c_driver function */ @@ -902,7 +876,8 @@ static int tw9910_probe(struct i2c_client *client, return -ENOMEM; priv->info = info; - i2c_set_clientdata(client, priv); + + v4l2_i2c_subdev_init(&priv->subdev, client, &tw9910_subdev_ops); icd->ops = &tw9910_ops; icd->iface = info->link.bus_id; @@ -942,7 +917,7 @@ static int tw9910_probe(struct i2c_client *client, static int tw9910_remove(struct i2c_client *client) { - struct tw9910_priv *priv = i2c_get_clientdata(client); + struct tw9910_priv *priv = to_tw9910(client); struct soc_camera_device *icd = client->dev.platform_data; icd->ops = NULL; diff --git a/include/media/soc_camera.h b/include/media/soc_camera.h index d8b4256126a..3bc5b6b20f6 100644 --- a/include/media/soc_camera.h +++ b/include/media/soc_camera.h @@ -16,10 +16,12 @@ #include #include #include +#include struct soc_camera_device { struct list_head list; struct device dev; + struct device *pdev; /* Platform device */ unsigned short width; /* Current window */ unsigned short height; /* sizes */ unsigned short x_min; /* Camera capabilities */ @@ -45,7 +47,6 @@ struct soc_camera_device { struct soc_camera_format_xlate *user_formats; int num_user_formats; enum v4l2_field field; /* Preserve field over close() */ - struct module *owner; void *host_priv; /* Per-device host private data */ /* soc_camera.c private count. Only accessed with .video_lock held */ int use_count; @@ -58,8 +59,8 @@ struct soc_camera_file { }; struct soc_camera_host { + struct v4l2_device v4l2_dev; struct list_head list; - struct device *dev; unsigned char nr; /* Host number */ void *priv; const char *drv_name; @@ -127,7 +128,9 @@ static inline struct soc_camera_device *to_soc_camera_dev(struct device *dev) static inline struct soc_camera_host *to_soc_camera_host(struct device *dev) { - return dev_get_drvdata(dev); + struct v4l2_device *v4l2_dev = dev_get_drvdata(dev); + + return container_of(v4l2_dev, struct soc_camera_host, v4l2_dev); } static inline struct soc_camera_link *to_soc_camera_link(struct soc_camera_device *icd) @@ -143,9 +146,6 @@ static inline struct device *to_soc_camera_control(struct soc_camera_device *icd int soc_camera_host_register(struct soc_camera_host *ici); void soc_camera_host_unregister(struct soc_camera_host *ici); -int soc_camera_video_start(struct soc_camera_device *icd, struct device *dev); -void soc_camera_video_stop(struct soc_camera_device *icd); - const struct soc_camera_data_format *soc_camera_format_by_fourcc( struct soc_camera_device *icd, unsigned int fourcc); const struct soc_camera_format_xlate *soc_camera_xlate_by_fourcc( @@ -176,28 +176,17 @@ struct soc_camera_format_xlate { }; struct soc_camera_ops { - struct module *owner; int (*suspend)(struct soc_camera_device *, pm_message_t state); int (*resume)(struct soc_camera_device *); int (*init)(struct soc_camera_device *); int (*release)(struct soc_camera_device *); - int (*start_capture)(struct soc_camera_device *); - int (*stop_capture)(struct soc_camera_device *); int (*set_crop)(struct soc_camera_device *, struct v4l2_rect *); - int (*set_fmt)(struct soc_camera_device *, struct v4l2_format *); - int (*try_fmt)(struct soc_camera_device *, struct v4l2_format *); unsigned long (*query_bus_param)(struct soc_camera_device *); int (*set_bus_param)(struct soc_camera_device *, unsigned long); int (*get_chip_id)(struct soc_camera_device *, struct v4l2_dbg_chip_ident *); int (*set_std)(struct soc_camera_device *, v4l2_std_id *); int (*enum_input)(struct soc_camera_device *, struct v4l2_input *); -#ifdef CONFIG_VIDEO_ADV_DEBUG - int (*get_register)(struct soc_camera_device *, struct v4l2_dbg_register *); - int (*set_register)(struct soc_camera_device *, struct v4l2_dbg_register *); -#endif - int (*get_control)(struct soc_camera_device *, struct v4l2_control *); - int (*set_control)(struct soc_camera_device *, struct v4l2_control *); const struct v4l2_queryctrl *controls; int num_controls; }; -- cgit v1.2.3-70-g09d2 From 3418f165cc1cbcb30a8c017d7ebbc774710066bb Mon Sep 17 00:00:00 2001 From: Guennadi Liakhovetski Date: Tue, 25 Aug 2009 11:44:14 -0300 Subject: V4L/DVB (12511): V4L2: add a new V4L2_CID_BAND_STOP_FILTER integer control Add a new V4L2_CID_BAND_STOP_FILTER integer control, which either switches the band-stop filter off, or sets it to a certain strength. Signed-off-by: Guennadi Liakhovetski Signed-off-by: Mauro Carvalho Chehab --- include/linux/videodev2.h | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'include') diff --git a/include/linux/videodev2.h b/include/linux/videodev2.h index 3689d7d81fe..b59e78c5716 100644 --- a/include/linux/videodev2.h +++ b/include/linux/videodev2.h @@ -910,9 +910,10 @@ enum v4l2_colorfx { V4L2_COLORFX_SEPIA = 2, }; #define V4L2_CID_AUTOBRIGHTNESS (V4L2_CID_BASE+32) +#define V4L2_CID_BAND_STOP_FILTER (V4L2_CID_BASE+33) /* last CID + 1 */ -#define V4L2_CID_LASTP1 (V4L2_CID_BASE+33) +#define V4L2_CID_LASTP1 (V4L2_CID_BASE+34) /* MPEG-class control IDs defined by V4L2 */ #define V4L2_CID_MPEG_BASE (V4L2_CTRL_CLASS_MPEG | 0x900) -- cgit v1.2.3-70-g09d2 From 2840d2497b912f25d2957477faa1c922ddd733e0 Mon Sep 17 00:00:00 2001 From: Guennadi Liakhovetski Date: Tue, 25 Aug 2009 11:44:15 -0300 Subject: V4L/DVB (12513): soc-camera: add support for camera-host controls Until now soc-camera only supported client (sensor) controls. This patch enables camera-host drivers to implement their own controls too. Signed-off-by: Guennadi Liakhovetski Signed-off-by: Mauro Carvalho Chehab --- drivers/media/video/soc_camera.c | 24 ++++++++++++++++++++++++ include/media/soc_camera.h | 4 ++++ 2 files changed, 28 insertions(+) (limited to 'include') diff --git a/drivers/media/video/soc_camera.c b/drivers/media/video/soc_camera.c index 0a1cb40bfbf..b3fb8f290ad 100644 --- a/drivers/media/video/soc_camera.c +++ b/drivers/media/video/soc_camera.c @@ -633,6 +633,7 @@ static int soc_camera_queryctrl(struct file *file, void *priv, { struct soc_camera_file *icf = file->private_data; struct soc_camera_device *icd = icf->icd; + struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent); int i; WARN_ON(priv != file->private_data); @@ -640,6 +641,15 @@ static int soc_camera_queryctrl(struct file *file, void *priv, if (!qc->id) return -EINVAL; + /* First check host controls */ + for (i = 0; i < ici->ops->num_controls; i++) + if (qc->id == ici->ops->controls[i].id) { + memcpy(qc, &(ici->ops->controls[i]), + sizeof(*qc)); + return 0; + } + + /* Then device controls */ for (i = 0; i < icd->ops->num_controls; i++) if (qc->id == icd->ops->controls[i].id) { memcpy(qc, &(icd->ops->controls[i]), @@ -656,6 +666,7 @@ static int soc_camera_g_ctrl(struct file *file, void *priv, struct soc_camera_file *icf = file->private_data; struct soc_camera_device *icd = icf->icd; struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent); + int ret; WARN_ON(priv != file->private_data); @@ -672,6 +683,12 @@ static int soc_camera_g_ctrl(struct file *file, void *priv, return 0; } + if (ici->ops->get_ctrl) { + ret = ici->ops->get_ctrl(icd, ctrl); + if (ret != -ENOIOCTLCMD) + return ret; + } + return v4l2_device_call_until_err(&ici->v4l2_dev, (__u32)icd, core, g_ctrl, ctrl); } @@ -681,9 +698,16 @@ static int soc_camera_s_ctrl(struct file *file, void *priv, struct soc_camera_file *icf = file->private_data; struct soc_camera_device *icd = icf->icd; struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent); + int ret; WARN_ON(priv != file->private_data); + if (ici->ops->set_ctrl) { + ret = ici->ops->set_ctrl(icd, ctrl); + if (ret != -ENOIOCTLCMD) + return ret; + } + return v4l2_device_call_until_err(&ici->v4l2_dev, (__u32)icd, core, s_ctrl, ctrl); } diff --git a/include/media/soc_camera.h b/include/media/soc_camera.h index 3bc5b6b20f6..2d116bbbcce 100644 --- a/include/media/soc_camera.h +++ b/include/media/soc_camera.h @@ -83,7 +83,11 @@ struct soc_camera_host_ops { int (*reqbufs)(struct soc_camera_file *, struct v4l2_requestbuffers *); int (*querycap)(struct soc_camera_host *, struct v4l2_capability *); int (*set_bus_param)(struct soc_camera_device *, __u32); + int (*get_ctrl)(struct soc_camera_device *, struct v4l2_control *); + int (*set_ctrl)(struct soc_camera_device *, struct v4l2_control *); unsigned int (*poll)(struct file *, poll_table *); + const struct v4l2_queryctrl *controls; + int num_controls; }; #define SOCAM_SENSOR_INVERT_PCLK (1 << 0) -- cgit v1.2.3-70-g09d2 From a0705b07f1816ae2b85388fcda71de69c221b4b8 Mon Sep 17 00:00:00 2001 From: Guennadi Liakhovetski Date: Tue, 25 Aug 2009 11:46:17 -0300 Subject: V4L/DVB (12515): soc-camera: use struct v4l2_rect in struct soc_camera_device Switch to using struct v4l2_rect in struct soc_camera_device for uniformity and simplicity. Signed-off-by: Guennadi Liakhovetski Signed-off-by: Mauro Carvalho Chehab --- drivers/media/video/mt9m001.c | 30 ++++++++------- drivers/media/video/mt9m111.c | 20 +++++----- drivers/media/video/mt9t031.c | 49 ++++++++++++----------- drivers/media/video/mt9v022.c | 24 ++++++------ drivers/media/video/mx1_camera.c | 10 ++--- drivers/media/video/mx3_camera.c | 25 ++++++------ drivers/media/video/ov772x.c | 6 +-- drivers/media/video/pxa_camera.c | 14 +++---- drivers/media/video/sh_mobile_ceu_camera.c | 28 ++++++++------ drivers/media/video/soc_camera.c | 62 +++++++++++++++++------------- drivers/media/video/soc_camera_platform.c | 12 +++--- drivers/media/video/tw9910.c | 38 ++++++++++-------- include/media/soc_camera.h | 10 +---- 13 files changed, 174 insertions(+), 154 deletions(-) (limited to 'include') diff --git a/drivers/media/video/mt9m001.c b/drivers/media/video/mt9m001.c index 2a73dac11d3..8b36a74b1be 100644 --- a/drivers/media/video/mt9m001.c +++ b/drivers/media/video/mt9m001.c @@ -240,8 +240,8 @@ static int mt9m001_s_fmt(struct v4l2_subdev *sd, struct v4l2_format *f) struct i2c_client *client = sd->priv; struct soc_camera_device *icd = client->dev.platform_data; struct v4l2_rect rect = { - .left = icd->x_current, - .top = icd->y_current, + .left = icd->rect_current.left, + .top = icd->rect_current.top, .width = f->fmt.pix.width, .height = f->fmt.pix.height, }; @@ -467,11 +467,13 @@ static int mt9m001_s_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl) case V4L2_CID_EXPOSURE_AUTO: if (ctrl->value) { const u16 vblank = 25; - if (reg_write(client, MT9M001_SHUTTER_WIDTH, icd->height + + if (reg_write(client, MT9M001_SHUTTER_WIDTH, + icd->rect_current.height + icd->y_skip_top + vblank) < 0) return -EIO; qctrl = soc_camera_find_qctrl(icd->ops, V4L2_CID_EXPOSURE); - icd->exposure = (524 + (icd->height + icd->y_skip_top + vblank - 1) * + icd->exposure = (524 + (icd->rect_current.height + + icd->y_skip_top + vblank - 1) * (qctrl->maximum - qctrl->minimum)) / 1048 + qctrl->minimum; mt9m001->autoexposure = 1; @@ -613,16 +615,16 @@ static int mt9m001_probe(struct i2c_client *client, v4l2_i2c_subdev_init(&mt9m001->subdev, client, &mt9m001_subdev_ops); /* Second stage probe - when a capture adapter is there */ - icd->ops = &mt9m001_ops; - icd->x_min = 20; - icd->y_min = 12; - icd->x_current = 20; - icd->y_current = 12; - icd->width_min = 48; - icd->width_max = 1280; - icd->height_min = 32; - icd->height_max = 1024; - icd->y_skip_top = 1; + icd->ops = &mt9m001_ops; + icd->rect_max.left = 20; + icd->rect_max.top = 12; + icd->rect_max.width = 1280; + icd->rect_max.height = 1024; + icd->rect_current.left = 20; + icd->rect_current.top = 12; + icd->width_min = 48; + icd->height_min = 32; + icd->y_skip_top = 1; /* Simulated autoexposure. If enabled, we calculate shutter width * ourselves in the driver based on vertical blanking and frame width */ mt9m001->autoexposure = 1; diff --git a/drivers/media/video/mt9m111.c b/drivers/media/video/mt9m111.c index 29f976afd46..45101fd90ce 100644 --- a/drivers/media/video/mt9m111.c +++ b/drivers/media/video/mt9m111.c @@ -948,16 +948,16 @@ static int mt9m111_probe(struct i2c_client *client, v4l2_i2c_subdev_init(&mt9m111->subdev, client, &mt9m111_subdev_ops); /* Second stage probe - when a capture adapter is there */ - icd->ops = &mt9m111_ops; - icd->x_min = MT9M111_MIN_DARK_COLS; - icd->y_min = MT9M111_MIN_DARK_ROWS; - icd->x_current = icd->x_min; - icd->y_current = icd->y_min; - icd->width_min = MT9M111_MIN_DARK_ROWS; - icd->width_max = MT9M111_MAX_WIDTH; - icd->height_min = MT9M111_MIN_DARK_COLS; - icd->height_max = MT9M111_MAX_HEIGHT; - icd->y_skip_top = 0; + icd->ops = &mt9m111_ops; + icd->rect_max.left = MT9M111_MIN_DARK_COLS; + icd->rect_max.top = MT9M111_MIN_DARK_ROWS; + icd->rect_max.width = MT9M111_MAX_WIDTH; + icd->rect_max.height = MT9M111_MAX_HEIGHT; + icd->rect_current.left = icd->rect_max.left; + icd->rect_current.top = icd->rect_max.top; + icd->width_min = MT9M111_MIN_DARK_ROWS; + icd->height_min = MT9M111_MIN_DARK_COLS; + icd->y_skip_top = 0; ret = mt9m111_video_probe(icd, client); if (ret) { diff --git a/drivers/media/video/mt9t031.c b/drivers/media/video/mt9t031.c index 27a5edda902..dc3eb652a7c 100644 --- a/drivers/media/video/mt9t031.c +++ b/drivers/media/video/mt9t031.c @@ -222,12 +222,12 @@ static unsigned long mt9t031_query_bus_param(struct soc_camera_device *icd) static void recalculate_limits(struct soc_camera_device *icd, u16 xskip, u16 yskip) { - icd->x_min = (MT9T031_COLUMN_SKIP + xskip - 1) / xskip; - icd->y_min = (MT9T031_ROW_SKIP + yskip - 1) / yskip; + icd->rect_max.left = (MT9T031_COLUMN_SKIP + xskip - 1) / xskip; + icd->rect_max.top = (MT9T031_ROW_SKIP + yskip - 1) / yskip; icd->width_min = (MT9T031_MIN_WIDTH + xskip - 1) / xskip; icd->height_min = (MT9T031_MIN_HEIGHT + yskip - 1) / yskip; - icd->width_max = MT9T031_MAX_WIDTH / xskip; - icd->height_max = MT9T031_MAX_HEIGHT / yskip; + icd->rect_max.width = MT9T031_MAX_WIDTH / xskip; + icd->rect_max.height = MT9T031_MAX_HEIGHT / yskip; } static int mt9t031_set_params(struct soc_camera_device *icd, @@ -241,11 +241,13 @@ static int mt9t031_set_params(struct soc_camera_device *icd, vblank = MT9T031_VERTICAL_BLANK; /* Make sure we don't exceed sensor limits */ - if (rect->left + rect->width > icd->width_max) - rect->left = (icd->width_max - rect->width) / 2 + icd->x_min; + if (rect->left + rect->width > icd->rect_max.width) + rect->left = (icd->rect_max.width - rect->width) / 2 + + icd->rect_max.left; - if (rect->top + rect->height > icd->height_max) - rect->top = (icd->height_max - rect->height) / 2 + icd->y_min; + if (rect->top + rect->height > icd->rect_max.height) + rect->top = (icd->rect_max.height - rect->height) / 2 + + icd->rect_max.top; width = rect->width * xskip; height = rect->height * yskip; @@ -346,8 +348,8 @@ static int mt9t031_s_fmt(struct v4l2_subdev *sd, struct v4l2_format *f) int ret; u16 xskip, yskip; struct v4l2_rect rect = { - .left = icd->x_current, - .top = icd->y_current, + .left = icd->rect_current.left, + .top = icd->rect_current.top, .width = f->fmt.pix.width, .height = f->fmt.pix.height, }; @@ -618,12 +620,13 @@ static int mt9t031_s_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl) if (ctrl->value) { const u16 vblank = MT9T031_VERTICAL_BLANK; const u32 shutter_max = MT9T031_MAX_HEIGHT + vblank; - if (set_shutter(client, icd->height + + if (set_shutter(client, icd->rect_current.height + icd->y_skip_top + vblank) < 0) return -EIO; qctrl = soc_camera_find_qctrl(icd->ops, V4L2_CID_EXPOSURE); - icd->exposure = (shutter_max / 2 + (icd->height + - icd->y_skip_top + vblank - 1) * + icd->exposure = (shutter_max / 2 + + (icd->rect_current.height + + icd->y_skip_top + vblank - 1) * (qctrl->maximum - qctrl->minimum)) / shutter_max + qctrl->minimum; mt9t031->autoexposure = 1; @@ -726,16 +729,16 @@ static int mt9t031_probe(struct i2c_client *client, v4l2_i2c_subdev_init(&mt9t031->subdev, client, &mt9t031_subdev_ops); /* Second stage probe - when a capture adapter is there */ - icd->ops = &mt9t031_ops; - icd->x_min = MT9T031_COLUMN_SKIP; - icd->y_min = MT9T031_ROW_SKIP; - icd->x_current = icd->x_min; - icd->y_current = icd->y_min; - icd->width_min = MT9T031_MIN_WIDTH; - icd->width_max = MT9T031_MAX_WIDTH; - icd->height_min = MT9T031_MIN_HEIGHT; - icd->height_max = MT9T031_MAX_HEIGHT; - icd->y_skip_top = 0; + icd->ops = &mt9t031_ops; + icd->rect_max.left = MT9T031_COLUMN_SKIP; + icd->rect_max.top = MT9T031_ROW_SKIP; + icd->rect_current.left = icd->rect_max.left; + icd->rect_current.top = icd->rect_max.top; + icd->width_min = MT9T031_MIN_WIDTH; + icd->rect_max.width = MT9T031_MAX_WIDTH; + icd->height_min = MT9T031_MIN_HEIGHT; + icd->rect_max.height = MT9T031_MAX_HEIGHT; + icd->y_skip_top = 0; /* Simulated autoexposure. If enabled, we calculate shutter width * ourselves in the driver based on vertical blanking and frame width */ mt9t031->autoexposure = 1; diff --git a/drivers/media/video/mt9v022.c b/drivers/media/video/mt9v022.c index 3cb9f0f1e25..d2b0981ec1c 100644 --- a/drivers/media/video/mt9v022.c +++ b/drivers/media/video/mt9v022.c @@ -298,8 +298,8 @@ static int mt9v022_s_fmt(struct v4l2_subdev *sd, struct v4l2_format *f) struct soc_camera_device *icd = client->dev.platform_data; struct v4l2_pix_format *pix = &f->fmt.pix; struct v4l2_rect rect = { - .left = icd->x_current, - .top = icd->y_current, + .left = icd->rect_current.left, + .top = icd->rect_current.top, .width = pix->width, .height = pix->height, }; @@ -741,16 +741,16 @@ static int mt9v022_probe(struct i2c_client *client, mt9v022->chip_control = MT9V022_CHIP_CONTROL_DEFAULT; - icd->ops = &mt9v022_ops; - icd->x_min = 1; - icd->y_min = 4; - icd->x_current = 1; - icd->y_current = 4; - icd->width_min = 48; - icd->width_max = 752; - icd->height_min = 32; - icd->height_max = 480; - icd->y_skip_top = 1; + icd->ops = &mt9v022_ops; + icd->rect_max.left = 1; + icd->rect_max.top = 4; + icd->rect_max.width = 752; + icd->rect_max.height = 480; + icd->rect_current.left = 1; + icd->rect_current.top = 4; + icd->width_min = 48; + icd->height_min = 32; + icd->y_skip_top = 1; ret = mt9v022_video_probe(icd, client); if (ret) { diff --git a/drivers/media/video/mx1_camera.c b/drivers/media/video/mx1_camera.c index ea4ceaec85f..948a4714be9 100644 --- a/drivers/media/video/mx1_camera.c +++ b/drivers/media/video/mx1_camera.c @@ -126,7 +126,7 @@ static int mx1_videobuf_setup(struct videobuf_queue *vq, unsigned int *count, { struct soc_camera_device *icd = vq->priv_data; - *size = icd->width * icd->height * + *size = icd->rect_current.width * icd->rect_current.height * ((icd->current_fmt->depth + 7) >> 3); if (!*count) @@ -178,12 +178,12 @@ static int mx1_videobuf_prepare(struct videobuf_queue *vq, buf->inwork = 1; if (buf->fmt != icd->current_fmt || - vb->width != icd->width || - vb->height != icd->height || + vb->width != icd->rect_current.width || + vb->height != icd->rect_current.height || vb->field != field) { buf->fmt = icd->current_fmt; - vb->width = icd->width; - vb->height = icd->height; + vb->width = icd->rect_current.width; + vb->height = icd->rect_current.height; vb->field = field; vb->state = VIDEOBUF_NEEDS_INIT; } diff --git a/drivers/media/video/mx3_camera.c b/drivers/media/video/mx3_camera.c index 677d355be8f..6c3b7f9b906 100644 --- a/drivers/media/video/mx3_camera.c +++ b/drivers/media/video/mx3_camera.c @@ -220,7 +220,7 @@ static int mx3_videobuf_setup(struct videobuf_queue *vq, unsigned int *count, if (!mx3_cam->idmac_channel[0]) return -EINVAL; - *size = icd->width * icd->height * bpp; + *size = icd->rect_current.width * icd->rect_current.height * bpp; if (!*count) *count = 32; @@ -241,7 +241,7 @@ static int mx3_videobuf_prepare(struct videobuf_queue *vq, struct mx3_camera_buffer *buf = container_of(vb, struct mx3_camera_buffer, vb); /* current_fmt _must_ always be set */ - size_t new_size = icd->width * icd->height * + size_t new_size = icd->rect_current.width * icd->rect_current.height * ((icd->current_fmt->depth + 7) >> 3); int ret; @@ -251,12 +251,12 @@ static int mx3_videobuf_prepare(struct videobuf_queue *vq, */ if (buf->fmt != icd->current_fmt || - vb->width != icd->width || - vb->height != icd->height || + vb->width != icd->rect_current.width || + vb->height != icd->rect_current.height || vb->field != field) { buf->fmt = icd->current_fmt; - vb->width = icd->width; - vb->height = icd->height; + vb->width = icd->rect_current.width; + vb->height = icd->rect_current.height; vb->field = field; if (vb->state != VIDEOBUF_NEEDS_INIT) free_buffer(vq, buf); @@ -354,9 +354,9 @@ static void mx3_videobuf_queue(struct videobuf_queue *vq, /* This is the configuration of one sg-element */ video->out_pixel_fmt = fourcc_to_ipu_pix(data_fmt->fourcc); - video->out_width = icd->width; - video->out_height = icd->height; - video->out_stride = icd->width; + video->out_width = icd->rect_current.width; + video->out_height = icd->rect_current.height; + video->out_stride = icd->rect_current.width; #ifdef DEBUG /* helps to see what DMA actually has written */ @@ -538,7 +538,8 @@ static bool channel_change_requested(struct soc_camera_device *icd, struct idmac_channel *ichan = mx3_cam->idmac_channel[0]; /* Do buffers have to be re-allocated or channel re-configured? */ - return ichan && rect->width * rect->height > icd->width * icd->height; + return ichan && rect->width * rect->height > + icd->rect_current.width * icd->rect_current.height; } static int test_platform_param(struct mx3_camera_dev *mx3_cam, @@ -808,8 +809,8 @@ static int mx3_camera_set_fmt(struct soc_camera_device *icd, const struct soc_camera_format_xlate *xlate; struct v4l2_pix_format *pix = &f->fmt.pix; struct v4l2_rect rect = { - .left = icd->x_current, - .top = icd->y_current, + .left = icd->rect_current.left, + .top = icd->rect_current.top, .width = pix->width, .height = pix->height, }; diff --git a/drivers/media/video/ov772x.c b/drivers/media/video/ov772x.c index 4c550f91ca2..3417398e1b5 100644 --- a/drivers/media/video/ov772x.c +++ b/drivers/media/video/ov772x.c @@ -1120,9 +1120,9 @@ static int ov772x_probe(struct i2c_client *client, v4l2_i2c_subdev_init(&priv->subdev, client, &ov772x_subdev_ops); - icd->ops = &ov772x_ops; - icd->width_max = MAX_WIDTH; - icd->height_max = MAX_HEIGHT; + icd->ops = &ov772x_ops; + icd->rect_max.width = MAX_WIDTH; + icd->rect_max.height = MAX_HEIGHT; ret = ov772x_video_probe(icd, client); if (ret) { diff --git a/drivers/media/video/pxa_camera.c b/drivers/media/video/pxa_camera.c index bdc0d85c461..0e4daaad2f4 100644 --- a/drivers/media/video/pxa_camera.c +++ b/drivers/media/video/pxa_camera.c @@ -239,7 +239,7 @@ static int pxa_videobuf_setup(struct videobuf_queue *vq, unsigned int *count, dev_dbg(&icd->dev, "count=%d, size=%d\n", *count, *size); - *size = roundup(icd->width * icd->height * + *size = roundup(icd->rect_current.width * icd->rect_current.height * ((icd->current_fmt->depth + 7) >> 3), 8); if (0 == *count) @@ -443,12 +443,12 @@ static int pxa_videobuf_prepare(struct videobuf_queue *vq, buf->inwork = 1; if (buf->fmt != icd->current_fmt || - vb->width != icd->width || - vb->height != icd->height || + vb->width != icd->rect_current.width || + vb->height != icd->rect_current.height || vb->field != field) { buf->fmt = icd->current_fmt; - vb->width = icd->width; - vb->height = icd->height; + vb->width = icd->rect_current.width; + vb->height = icd->rect_current.height; vb->field = field; vb->state = VIDEOBUF_NEEDS_INIT; } @@ -1118,7 +1118,7 @@ static int pxa_camera_set_bus_param(struct soc_camera_device *icd, __u32 pixfmt) if (cicr0 & CICR0_ENB) __raw_writel(cicr0 & ~CICR0_ENB, pcdev->base + CICR0); - cicr1 = CICR1_PPL_VAL(icd->width - 1) | bpp | dw; + cicr1 = CICR1_PPL_VAL(icd->rect_current.width - 1) | bpp | dw; switch (pixfmt) { case V4L2_PIX_FMT_YUV422P: @@ -1147,7 +1147,7 @@ static int pxa_camera_set_bus_param(struct soc_camera_device *icd, __u32 pixfmt) } cicr2 = 0; - cicr3 = CICR3_LPF_VAL(icd->height - 1) | + cicr3 = CICR3_LPF_VAL(icd->rect_current.height - 1) | CICR3_BFW_VAL(min((unsigned short)255, icd->y_skip_top)); cicr4 |= pcdev->mclk_divisor; diff --git a/drivers/media/video/sh_mobile_ceu_camera.c b/drivers/media/video/sh_mobile_ceu_camera.c index 16fa56efaf9..4c4b60c3226 100644 --- a/drivers/media/video/sh_mobile_ceu_camera.c +++ b/drivers/media/video/sh_mobile_ceu_camera.c @@ -146,7 +146,8 @@ static int sh_mobile_ceu_videobuf_setup(struct videobuf_queue *vq, struct sh_mobile_ceu_dev *pcdev = ici->priv; int bytes_per_pixel = (icd->current_fmt->depth + 7) >> 3; - *size = PAGE_ALIGN(icd->width * icd->height * bytes_per_pixel); + *size = PAGE_ALIGN(icd->rect_current.width * icd->rect_current.height * + bytes_per_pixel); if (0 == *count) *count = 2; @@ -205,7 +206,7 @@ static void sh_mobile_ceu_capture(struct sh_mobile_ceu_dev *pcdev) phys_addr_top = videobuf_to_dma_contig(pcdev->active); ceu_write(pcdev, CDAYR, phys_addr_top); if (pcdev->is_interlaced) { - phys_addr_bottom = phys_addr_top + icd->width; + phys_addr_bottom = phys_addr_top + icd->rect_current.width; ceu_write(pcdev, CDBYR, phys_addr_bottom); } @@ -214,10 +215,12 @@ static void sh_mobile_ceu_capture(struct sh_mobile_ceu_dev *pcdev) case V4L2_PIX_FMT_NV21: case V4L2_PIX_FMT_NV16: case V4L2_PIX_FMT_NV61: - phys_addr_top += icd->width * icd->height; + phys_addr_top += icd->rect_current.width * + icd->rect_current.height; ceu_write(pcdev, CDACR, phys_addr_top); if (pcdev->is_interlaced) { - phys_addr_bottom = phys_addr_top + icd->width; + phys_addr_bottom = phys_addr_top + + icd->rect_current.width; ceu_write(pcdev, CDBCR, phys_addr_bottom); } } @@ -251,12 +254,12 @@ static int sh_mobile_ceu_videobuf_prepare(struct videobuf_queue *vq, BUG_ON(NULL == icd->current_fmt); if (buf->fmt != icd->current_fmt || - vb->width != icd->width || - vb->height != icd->height || + vb->width != icd->rect_current.width || + vb->height != icd->rect_current.height || vb->field != field) { buf->fmt = icd->current_fmt; - vb->width = icd->width; - vb->height = icd->height; + vb->width = icd->rect_current.width; + vb->height = icd->rect_current.height; vb->field = field; vb->state = VIDEOBUF_NEEDS_INIT; } @@ -475,17 +478,18 @@ static int sh_mobile_ceu_set_bus_param(struct soc_camera_device *icd, mdelay(1); if (yuv_mode) { - width = icd->width * 2; + width = icd->rect_current.width * 2; width = buswidth == 16 ? width / 2 : width; - cfszr_width = cdwdr_width = icd->width; + cfszr_width = cdwdr_width = icd->rect_current.width; } else { - width = icd->width * ((icd->current_fmt->depth + 7) >> 3); + width = icd->rect_current.width * + ((icd->current_fmt->depth + 7) >> 3); width = buswidth == 16 ? width / 2 : width; cfszr_width = buswidth == 8 ? width / 2 : width; cdwdr_width = buswidth == 16 ? width * 2 : width; } - height = icd->height; + height = icd->rect_current.height; if (pcdev->is_interlaced) { height /= 2; cdwdr_width *= 2; diff --git a/drivers/media/video/soc_camera.c b/drivers/media/video/soc_camera.c index b3fb8f290ad..5028023b72a 100644 --- a/drivers/media/video/soc_camera.c +++ b/drivers/media/video/soc_camera.c @@ -288,17 +288,17 @@ static int soc_camera_set_fmt(struct soc_camera_file *icf, return -EINVAL; } - icd->width = pix->width; - icd->height = pix->height; - icf->vb_vidq.field = - icd->field = pix->field; + icd->rect_current.width = pix->width; + icd->rect_current.height = pix->height; + icf->vb_vidq.field = + icd->field = pix->field; if (f->type != V4L2_BUF_TYPE_VIDEO_CAPTURE) dev_warn(&icd->dev, "Attention! Wrong buf-type %d\n", f->type); dev_dbg(&icd->dev, "set width: %d height: %d\n", - icd->width, icd->height); + icd->rect_current.width, icd->rect_current.height); /* set physical bus parameters */ return ici->ops->set_bus_param(icd, pix->pixelformat); @@ -341,8 +341,8 @@ static int soc_camera_open(struct file *file) struct v4l2_format f = { .type = V4L2_BUF_TYPE_VIDEO_CAPTURE, .fmt.pix = { - .width = icd->width, - .height = icd->height, + .width = icd->rect_current.width, + .height = icd->rect_current.height, .field = icd->field, }, }; @@ -553,8 +553,8 @@ static int soc_camera_g_fmt_vid_cap(struct file *file, void *priv, WARN_ON(priv != file->private_data); - pix->width = icd->width; - pix->height = icd->height; + pix->width = icd->rect_current.width; + pix->height = icd->rect_current.height; pix->field = icf->vb_vidq.field; pix->pixelformat = icd->current_fmt->fourcc; pix->bytesperline = pix->width * @@ -718,12 +718,9 @@ static int soc_camera_cropcap(struct file *file, void *fh, struct soc_camera_device *icd = icf->icd; a->type = V4L2_BUF_TYPE_VIDEO_CAPTURE; - a->bounds.left = icd->x_min; - a->bounds.top = icd->y_min; - a->bounds.width = icd->width_max; - a->bounds.height = icd->height_max; - a->defrect.left = icd->x_min; - a->defrect.top = icd->y_min; + a->bounds = icd->rect_max; + a->defrect.left = icd->rect_max.left; + a->defrect.top = icd->rect_max.top; a->defrect.width = DEFAULT_WIDTH; a->defrect.height = DEFAULT_HEIGHT; a->pixelaspect.numerator = 1; @@ -738,11 +735,8 @@ static int soc_camera_g_crop(struct file *file, void *fh, struct soc_camera_file *icf = file->private_data; struct soc_camera_device *icd = icf->icd; - a->type = V4L2_BUF_TYPE_VIDEO_CAPTURE; - a->c.left = icd->x_current; - a->c.top = icd->y_current; - a->c.width = icd->width; - a->c.height = icd->height; + a->type = V4L2_BUF_TYPE_VIDEO_CAPTURE; + a->c = icd->rect_current; return 0; } @@ -761,13 +755,29 @@ static int soc_camera_s_crop(struct file *file, void *fh, /* Cropping is allowed during a running capture, guard consistency */ mutex_lock(&icf->vb_vidq.vb_lock); + if (a->c.width > icd->rect_max.width) + a->c.width = icd->rect_max.width; + + if (a->c.width < icd->width_min) + a->c.width = icd->width_min; + + if (a->c.height > icd->rect_max.height) + a->c.height = icd->rect_max.height; + + if (a->c.height < icd->height_min) + a->c.height = icd->height_min; + + if (a->c.width + a->c.left > icd->rect_max.width + icd->rect_max.left) + a->c.left = icd->rect_max.width + icd->rect_max.left - + a->c.width; + + if (a->c.height + a->c.top > icd->rect_max.height + icd->rect_max.top) + a->c.top = icd->rect_max.height + icd->rect_max.top - + a->c.height; + ret = ici->ops->set_crop(icd, &a->c); - if (!ret) { - icd->width = a->c.width; - icd->height = a->c.height; - icd->x_current = a->c.left; - icd->y_current = a->c.top; - } + if (!ret) + icd->rect_current = a->c; mutex_unlock(&icf->vb_vidq.vb_lock); diff --git a/drivers/media/video/soc_camera_platform.c b/drivers/media/video/soc_camera_platform.c index 8168cf470eb..9e406c113aa 100644 --- a/drivers/media/video/soc_camera_platform.c +++ b/drivers/media/video/soc_camera_platform.c @@ -127,12 +127,12 @@ static int soc_camera_platform_probe(struct platform_device *pdev) platform_set_drvdata(pdev, priv); dev_set_drvdata(&icd->dev, &pdev->dev); - icd->width_min = 0; - icd->width_max = p->format.width; - icd->height_min = 0; - icd->height_max = p->format.height; - icd->y_skip_top = 0; - icd->ops = &soc_camera_platform_ops; + icd->width_min = 0; + icd->rect_max.width = p->format.width; + icd->height_min = 0; + icd->rect_max.height = p->format.height; + icd->y_skip_top = 0; + icd->ops = &soc_camera_platform_ops; ici = to_soc_camera_host(icd->dev.parent); diff --git a/drivers/media/video/tw9910.c b/drivers/media/video/tw9910.c index a006df1d28e..7199e0f71b2 100644 --- a/drivers/media/video/tw9910.c +++ b/drivers/media/video/tw9910.c @@ -715,8 +715,8 @@ static int tw9910_s_fmt(struct v4l2_subdev *sd, struct v4l2_format *f) struct soc_camera_device *icd = client->dev.platform_data; struct v4l2_pix_format *pix = &f->fmt.pix; struct v4l2_rect rect = { - .left = icd->x_current, - .top = icd->y_current, + .left = icd->rect_current.left, + .top = icd->rect_current.top, .width = pix->width, .height = pix->height, }; @@ -840,6 +840,19 @@ static struct v4l2_subdev_ops tw9910_subdev_ops = { * i2c_driver function */ +static void limit_to_scale(struct soc_camera_device *icd, + const struct tw9910_scale_ctrl *scale) +{ + if (scale->width > icd->rect_max.width) + icd->rect_max.width = scale->width; + if (scale->width < icd->width_min) + icd->width_min = scale->width; + if (scale->height > icd->rect_max.height) + icd->rect_max.height = scale->height; + if (scale->height < icd->height_min) + icd->height_min = scale->height; +} + static int tw9910_probe(struct i2c_client *client, const struct i2c_device_id *did) @@ -885,25 +898,18 @@ static int tw9910_probe(struct i2c_client *client, /* * set width and height */ - icd->width_max = tw9910_ntsc_scales[0].width; /* set default */ + icd->rect_max.width = tw9910_ntsc_scales[0].width; /* set default */ icd->width_min = tw9910_ntsc_scales[0].width; - icd->height_max = tw9910_ntsc_scales[0].height; + icd->rect_max.height = tw9910_ntsc_scales[0].height; icd->height_min = tw9910_ntsc_scales[0].height; scale = tw9910_ntsc_scales; - for (i = 0; i < ARRAY_SIZE(tw9910_ntsc_scales); i++) { - icd->width_max = max(scale[i].width, icd->width_max); - icd->width_min = min(scale[i].width, icd->width_min); - icd->height_max = max(scale[i].height, icd->height_max); - icd->height_min = min(scale[i].height, icd->height_min); - } + for (i = 0; i < ARRAY_SIZE(tw9910_ntsc_scales); i++) + limit_to_scale(icd, scale + i); + scale = tw9910_pal_scales; - for (i = 0; i < ARRAY_SIZE(tw9910_pal_scales); i++) { - icd->width_max = max(scale[i].width, icd->width_max); - icd->width_min = min(scale[i].width, icd->width_min); - icd->height_max = max(scale[i].height, icd->height_max); - icd->height_min = min(scale[i].height, icd->height_min); - } + for (i = 0; i < ARRAY_SIZE(tw9910_pal_scales); i++) + limit_to_scale(icd, scale + i); ret = tw9910_video_probe(icd, client); if (ret) { diff --git a/include/media/soc_camera.h b/include/media/soc_camera.h index 2d116bbbcce..f623c010a53 100644 --- a/include/media/soc_camera.h +++ b/include/media/soc_camera.h @@ -22,16 +22,10 @@ struct soc_camera_device { struct list_head list; struct device dev; struct device *pdev; /* Platform device */ - unsigned short width; /* Current window */ - unsigned short height; /* sizes */ - unsigned short x_min; /* Camera capabilities */ - unsigned short y_min; - unsigned short x_current; /* Current window location */ - unsigned short y_current; + struct v4l2_rect rect_current; /* Current window */ + struct v4l2_rect rect_max; /* Maximum window */ unsigned short width_min; - unsigned short width_max; unsigned short height_min; - unsigned short height_max; unsigned short y_skip_top; /* Lines to skip at the top */ unsigned short gain; unsigned short exposure; -- cgit v1.2.3-70-g09d2 From fa48984e36ee73e964eeb994a45de6525114e871 Mon Sep 17 00:00:00 2001 From: Guennadi Liakhovetski Date: Tue, 25 Aug 2009 11:46:43 -0300 Subject: V4L/DVB (12519): soc-camera: put pixel format initialisation back in probe, add .put_formats() The move of format translation initialisation into soc_camera_open() was temporary for the soc-camera as platform driver intermediate step, put it back into soc_camera_probe(). Also add a .put_formats() method to soc_camera_host_ops to free any resources host driver might have allocated in .get_formats(). Signed-off-by: Guennadi Liakhovetski Signed-off-by: Mauro Carvalho Chehab --- drivers/media/video/soc_camera.c | 50 ++++++++++++++++++++++++++++------------ include/media/soc_camera.h | 7 ++++++ 2 files changed, 42 insertions(+), 15 deletions(-) (limited to 'include') diff --git a/drivers/media/video/soc_camera.c b/drivers/media/video/soc_camera.c index 5028023b72a..aa6614b60d6 100644 --- a/drivers/media/video/soc_camera.c +++ b/drivers/media/video/soc_camera.c @@ -211,7 +211,7 @@ static int soc_camera_dqbuf(struct file *file, void *priv, static int soc_camera_init_user_formats(struct soc_camera_device *icd) { struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent); - int i, fmts = 0; + int i, fmts = 0, ret; if (!ici->ops->get_formats) /* @@ -224,8 +224,12 @@ static int soc_camera_init_user_formats(struct soc_camera_device *icd) * First pass - only count formats this host-sensor * configuration can provide */ - for (i = 0; i < icd->num_formats; i++) - fmts += ici->ops->get_formats(icd, i, NULL); + for (i = 0; i < icd->num_formats; i++) { + ret = ici->ops->get_formats(icd, i, NULL); + if (ret < 0) + return ret; + fmts += ret; + } if (!fmts) return -ENXIO; @@ -247,19 +251,32 @@ static int soc_camera_init_user_formats(struct soc_camera_device *icd) icd->user_formats[i].cam_fmt = icd->formats + i; icd->user_formats[i].buswidth = icd->formats[i].depth; } else { - fmts += ici->ops->get_formats(icd, i, - &icd->user_formats[fmts]); + ret = ici->ops->get_formats(icd, i, + &icd->user_formats[fmts]); + if (ret < 0) + goto egfmt; + fmts += ret; } icd->current_fmt = icd->user_formats[0].host_fmt; return 0; + +egfmt: + icd->num_user_formats = 0; + vfree(icd->user_formats); + return ret; } /* Always entered with .video_lock held */ static void soc_camera_free_user_formats(struct soc_camera_device *icd) { + struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent); + + if (ici->ops->put_formats) + ici->ops->put_formats(icd); icd->current_fmt = NULL; + icd->num_user_formats = 0; vfree(icd->user_formats); icd->user_formats = NULL; } @@ -344,16 +361,11 @@ static int soc_camera_open(struct file *file) .width = icd->rect_current.width, .height = icd->rect_current.height, .field = icd->field, + .pixelformat = icd->current_fmt->fourcc, + .colorspace = icd->current_fmt->colorspace, }, }; - ret = soc_camera_init_user_formats(icd); - if (ret < 0) - goto eiufmt; - - f.fmt.pix.pixelformat = icd->current_fmt->fourcc; - f.fmt.pix.colorspace = icd->current_fmt->colorspace; - if (icl->power) { ret = icl->power(icd->pdev, 1); if (ret < 0) @@ -404,8 +416,6 @@ eiciadd: if (icl->power) icl->power(icd->pdev, 0); epower: - soc_camera_free_user_formats(icd); -eiufmt: icd->use_count--; mutex_unlock(&icd->video_lock); module_put(ici->ops->owner); @@ -431,7 +441,6 @@ static int soc_camera_close(struct file *file) ici->ops->remove(icd); if (icl->power) icl->power(icd->pdev, 0); - soc_camera_free_user_formats(icd); } mutex_unlock(&icd->video_lock); @@ -954,6 +963,14 @@ static int soc_camera_probe(struct device *dev) } } + /* At this point client .probe() should have run already */ + ret = soc_camera_init_user_formats(icd); + if (ret < 0) + goto eiufmt; + + icd->rect_current = icd->rect_max; + icd->field = V4L2_FIELD_ANY; + /* ..._video_start() will create a device node, so we have to protect */ mutex_lock(&icd->video_lock); @@ -978,6 +995,8 @@ static int soc_camera_probe(struct device *dev) evidstart: mutex_unlock(&icd->video_lock); + soc_camera_free_user_formats(icd); +eiufmt: if (icl->board_info) { soc_camera_free_i2c(icd); } else { @@ -1023,6 +1042,7 @@ static int soc_camera_remove(struct device *dev) module_put(drv->owner); } } + soc_camera_free_user_formats(icd); return 0; } diff --git a/include/media/soc_camera.h b/include/media/soc_camera.h index f623c010a53..2b7a8c66360 100644 --- a/include/media/soc_camera.h +++ b/include/media/soc_camera.h @@ -67,8 +67,15 @@ struct soc_camera_host_ops { void (*remove)(struct soc_camera_device *); int (*suspend)(struct soc_camera_device *, pm_message_t); int (*resume)(struct soc_camera_device *); + /* + * .get_formats() is called for each client device format, but + * .put_formats() is only called once. Further, if any of the calls to + * .get_formats() fail, .put_formats() will not be called at all, the + * failing .get_formats() must then clean up internally. + */ int (*get_formats)(struct soc_camera_device *, int, struct soc_camera_format_xlate *); + void (*put_formats)(struct soc_camera_device *); int (*set_crop)(struct soc_camera_device *, struct v4l2_rect *); int (*set_fmt)(struct soc_camera_device *, struct v4l2_format *); int (*try_fmt)(struct soc_camera_device *, struct v4l2_format *); -- cgit v1.2.3-70-g09d2 From a12222a73e7a9efd927eb99d1dec1cedc9887e0a Mon Sep 17 00:00:00 2001 From: Guennadi Liakhovetski Date: Tue, 25 Aug 2009 11:46:51 -0300 Subject: V4L/DVB (12521): soc-camera: use .s_std() from struct v4l2_subdev_core_ops Remove .set_std() method from struct soc_camera_ops, use .s_std() from struct v4l2_subdev_core_ops instead. Signed-off-by: Guennadi Liakhovetski Signed-off-by: Mauro Carvalho Chehab --- drivers/media/video/soc_camera.c | 7 ++----- include/media/soc_camera.h | 1 - 2 files changed, 2 insertions(+), 6 deletions(-) (limited to 'include') diff --git a/drivers/media/video/soc_camera.c b/drivers/media/video/soc_camera.c index aa6614b60d6..44a94dc934f 100644 --- a/drivers/media/video/soc_camera.c +++ b/drivers/media/video/soc_camera.c @@ -152,12 +152,9 @@ static int soc_camera_s_std(struct file *file, void *priv, v4l2_std_id *a) { struct soc_camera_file *icf = file->private_data; struct soc_camera_device *icd = icf->icd; - int ret = 0; - - if (icd->ops->set_std) - ret = icd->ops->set_std(icd, a); + struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent); - return ret; + return v4l2_device_call_until_err(&ici->v4l2_dev, (__u32)icd, core, s_std, *a); } static int soc_camera_reqbufs(struct file *file, void *priv, diff --git a/include/media/soc_camera.h b/include/media/soc_camera.h index 2b7a8c66360..7c44d401656 100644 --- a/include/media/soc_camera.h +++ b/include/media/soc_camera.h @@ -190,7 +190,6 @@ struct soc_camera_ops { int (*set_bus_param)(struct soc_camera_device *, unsigned long); int (*get_chip_id)(struct soc_camera_device *, struct v4l2_dbg_chip_ident *); - int (*set_std)(struct soc_camera_device *, v4l2_std_id *); int (*enum_input)(struct soc_camera_device *, struct v4l2_input *); const struct v4l2_queryctrl *controls; int num_controls; -- cgit v1.2.3-70-g09d2 From 08590b9613f7f624fe3a052586eea2dbb3584b38 Mon Sep 17 00:00:00 2001 From: Guennadi Liakhovetski Date: Tue, 25 Aug 2009 11:46:54 -0300 Subject: V4L/DVB (12529): soc-camera: switch to s_crop v4l2-subdev video operation Remove set_crop soc-camera device method and switch to s_crop from v4l2-subdev video operations. Also extend non-i2c drivers to also hold a pointer to their v4l2-subdev instance in control device driver-data, i.e., in dev_get_drvdata((struct device *)to_soc_camera_control(icd)) Signed-off-by: Guennadi Liakhovetski Signed-off-by: Mauro Carvalho Chehab --- drivers/media/video/mt9m001.c | 23 +++--- drivers/media/video/mt9m111.c | 9 +-- drivers/media/video/mt9t031.c | 9 +-- drivers/media/video/mt9v022.c | 23 +++--- drivers/media/video/mx1_camera.c | 8 ++- drivers/media/video/mx3_camera.c | 7 +- drivers/media/video/ov772x.c | 19 ----- drivers/media/video/pxa_camera.c | 7 +- drivers/media/video/sh_mobile_ceu_camera.c | 110 +++++++++++++++-------------- drivers/media/video/soc_camera.c | 4 +- drivers/media/video/soc_camera_platform.c | 28 ++++---- drivers/media/video/tw9910.c | 31 ++++---- include/media/soc_camera.h | 3 +- 13 files changed, 142 insertions(+), 139 deletions(-) (limited to 'include') diff --git a/drivers/media/video/mt9m001.c b/drivers/media/video/mt9m001.c index 8b36a74b1be..6e762cd06e3 100644 --- a/drivers/media/video/mt9m001.c +++ b/drivers/media/video/mt9m001.c @@ -194,11 +194,12 @@ static unsigned long mt9m001_query_bus_param(struct soc_camera_device *icd) return soc_camera_apply_sensor_flags(icl, flags); } -static int mt9m001_set_crop(struct soc_camera_device *icd, - struct v4l2_rect *rect) +static int mt9m001_s_crop(struct v4l2_subdev *sd, struct v4l2_crop *a) { - struct i2c_client *client = to_i2c_client(to_soc_camera_control(icd)); + struct v4l2_rect *rect = &a->c; + struct i2c_client *client = sd->priv; struct mt9m001 *mt9m001 = to_mt9m001(client); + struct soc_camera_device *icd = client->dev.platform_data; int ret; const u16 hblank = 9, vblank = 25; @@ -239,15 +240,17 @@ static int mt9m001_s_fmt(struct v4l2_subdev *sd, struct v4l2_format *f) { struct i2c_client *client = sd->priv; struct soc_camera_device *icd = client->dev.platform_data; - struct v4l2_rect rect = { - .left = icd->rect_current.left, - .top = icd->rect_current.top, - .width = f->fmt.pix.width, - .height = f->fmt.pix.height, + struct v4l2_crop a = { + .c = { + .left = icd->rect_current.left, + .top = icd->rect_current.top, + .width = f->fmt.pix.width, + .height = f->fmt.pix.height, + }, }; /* No support for scaling so far, just crop. TODO: use skipping */ - return mt9m001_set_crop(icd, &rect); + return mt9m001_s_crop(sd, &a); } static int mt9m001_try_fmt(struct v4l2_subdev *sd, struct v4l2_format *f) @@ -361,7 +364,6 @@ static const struct v4l2_queryctrl mt9m001_controls[] = { static struct soc_camera_ops mt9m001_ops = { .init = mt9m001_init, .release = mt9m001_release, - .set_crop = mt9m001_set_crop, .set_bus_param = mt9m001_set_bus_param, .query_bus_param = mt9m001_query_bus_param, .controls = mt9m001_controls, @@ -575,6 +577,7 @@ static struct v4l2_subdev_video_ops mt9m001_subdev_video_ops = { .s_stream = mt9m001_s_stream, .s_fmt = mt9m001_s_fmt, .try_fmt = mt9m001_try_fmt, + .s_crop = mt9m001_s_crop, }; static struct v4l2_subdev_ops mt9m001_subdev_ops = { diff --git a/drivers/media/video/mt9m111.c b/drivers/media/video/mt9m111.c index 45101fd90ce..bef41517018 100644 --- a/drivers/media/video/mt9m111.c +++ b/drivers/media/video/mt9m111.c @@ -395,11 +395,12 @@ static int mt9m111_set_bus_param(struct soc_camera_device *icd, unsigned long f) return 0; } -static int mt9m111_set_crop(struct soc_camera_device *icd, - struct v4l2_rect *rect) +static int mt9m111_s_crop(struct v4l2_subdev *sd, struct v4l2_crop *a) { - struct i2c_client *client = to_i2c_client(to_soc_camera_control(icd)); + struct v4l2_rect *rect = &a->c; + struct i2c_client *client = sd->priv; struct mt9m111 *mt9m111 = to_mt9m111(client); + struct soc_camera_device *icd = client->dev.platform_data; int ret; dev_dbg(&icd->dev, "%s left=%d, top=%d, width=%d, height=%d\n", @@ -601,7 +602,6 @@ static struct soc_camera_ops mt9m111_ops = { .init = mt9m111_init, .resume = mt9m111_resume, .release = mt9m111_release, - .set_crop = mt9m111_set_crop, .query_bus_param = mt9m111_query_bus_param, .set_bus_param = mt9m111_set_bus_param, .controls = mt9m111_controls, @@ -908,6 +908,7 @@ static struct v4l2_subdev_core_ops mt9m111_subdev_core_ops = { static struct v4l2_subdev_video_ops mt9m111_subdev_video_ops = { .s_fmt = mt9m111_s_fmt, .try_fmt = mt9m111_try_fmt, + .s_crop = mt9m111_s_crop, }; static struct v4l2_subdev_ops mt9m111_subdev_ops = { diff --git a/drivers/media/video/mt9t031.c b/drivers/media/video/mt9t031.c index 125973bf08b..3fa87be2fc6 100644 --- a/drivers/media/video/mt9t031.c +++ b/drivers/media/video/mt9t031.c @@ -321,11 +321,12 @@ static int mt9t031_set_params(struct soc_camera_device *icd, return ret < 0 ? ret : 0; } -static int mt9t031_set_crop(struct soc_camera_device *icd, - struct v4l2_rect *rect) +static int mt9t031_s_crop(struct v4l2_subdev *sd, struct v4l2_crop *a) { - struct i2c_client *client = to_i2c_client(to_soc_camera_control(icd)); + struct v4l2_rect *rect = &a->c; + struct i2c_client *client = sd->priv; struct mt9t031 *mt9t031 = to_mt9t031(client); + struct soc_camera_device *icd = client->dev.platform_data; /* Make sure we don't exceed sensor limits */ if (rect->left + rect->width > icd->rect_max.left + icd->rect_max.width) @@ -495,7 +496,6 @@ static const struct v4l2_queryctrl mt9t031_controls[] = { static struct soc_camera_ops mt9t031_ops = { .init = mt9t031_init, .release = mt9t031_release, - .set_crop = mt9t031_set_crop, .set_bus_param = mt9t031_set_bus_param, .query_bus_param = mt9t031_query_bus_param, .controls = mt9t031_controls, @@ -689,6 +689,7 @@ static struct v4l2_subdev_video_ops mt9t031_subdev_video_ops = { .s_stream = mt9t031_s_stream, .s_fmt = mt9t031_s_fmt, .try_fmt = mt9t031_try_fmt, + .s_crop = mt9t031_s_crop, }; static struct v4l2_subdev_ops mt9t031_subdev_ops = { diff --git a/drivers/media/video/mt9v022.c b/drivers/media/video/mt9v022.c index d2b0981ec1c..e609ff51aa6 100644 --- a/drivers/media/video/mt9v022.c +++ b/drivers/media/video/mt9v022.c @@ -248,10 +248,11 @@ static unsigned long mt9v022_query_bus_param(struct soc_camera_device *icd) width_flag; } -static int mt9v022_set_crop(struct soc_camera_device *icd, - struct v4l2_rect *rect) +static int mt9v022_s_crop(struct v4l2_subdev *sd, struct v4l2_crop *a) { - struct i2c_client *client = to_i2c_client(to_soc_camera_control(icd)); + struct v4l2_rect *rect = &a->c; + struct i2c_client *client = sd->priv; + struct soc_camera_device *icd = client->dev.platform_data; int ret; /* Like in example app. Contradicts the datasheet though */ @@ -297,11 +298,13 @@ static int mt9v022_s_fmt(struct v4l2_subdev *sd, struct v4l2_format *f) struct mt9v022 *mt9v022 = to_mt9v022(client); struct soc_camera_device *icd = client->dev.platform_data; struct v4l2_pix_format *pix = &f->fmt.pix; - struct v4l2_rect rect = { - .left = icd->rect_current.left, - .top = icd->rect_current.top, - .width = pix->width, - .height = pix->height, + struct v4l2_crop a = { + .c = { + .left = icd->rect_current.left, + .top = icd->rect_current.top, + .width = pix->width, + .height = pix->height, + }, }; /* The caller provides a supported format, as verified per call to @@ -325,7 +328,7 @@ static int mt9v022_s_fmt(struct v4l2_subdev *sd, struct v4l2_format *f) } /* No support for scaling on this camera, just crop. */ - return mt9v022_set_crop(icd, &rect); + return mt9v022_s_crop(sd, &a); } static int mt9v022_try_fmt(struct v4l2_subdev *sd, struct v4l2_format *f) @@ -454,7 +457,6 @@ static const struct v4l2_queryctrl mt9v022_controls[] = { static struct soc_camera_ops mt9v022_ops = { .init = mt9v022_init, - .set_crop = mt9v022_set_crop, .set_bus_param = mt9v022_set_bus_param, .query_bus_param = mt9v022_query_bus_param, .controls = mt9v022_controls, @@ -700,6 +702,7 @@ static struct v4l2_subdev_video_ops mt9v022_subdev_video_ops = { .s_stream = mt9v022_s_stream, .s_fmt = mt9v022_s_fmt, .try_fmt = mt9v022_try_fmt, + .s_crop = mt9v022_s_crop, }; static struct v4l2_subdev_ops mt9v022_subdev_ops = { diff --git a/drivers/media/video/mx1_camera.c b/drivers/media/video/mx1_camera.c index 948a4714be9..add496fca4d 100644 --- a/drivers/media/video/mx1_camera.c +++ b/drivers/media/video/mx1_camera.c @@ -463,9 +463,13 @@ static void mx1_camera_remove_device(struct soc_camera_device *icd) } static int mx1_camera_set_crop(struct soc_camera_device *icd, - struct v4l2_rect *rect) + struct v4l2_crop *a) { - return icd->ops->set_crop(icd, rect); + struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent); + struct device *control = to_soc_camera_control(icd); + struct v4l2_subdev *sd = dev_get_drvdata(control); + + return v4l2_subdev_call(sd, video, s_crop, a); } static int mx1_camera_set_bus_param(struct soc_camera_device *icd, __u32 pixfmt) diff --git a/drivers/media/video/mx3_camera.c b/drivers/media/video/mx3_camera.c index 6c3b7f9b906..de7ebfbf039 100644 --- a/drivers/media/video/mx3_camera.c +++ b/drivers/media/video/mx3_camera.c @@ -781,10 +781,13 @@ static int acquire_dma_channel(struct mx3_camera_dev *mx3_cam) } static int mx3_camera_set_crop(struct soc_camera_device *icd, - struct v4l2_rect *rect) + struct v4l2_crop *a) { + struct v4l2_rect *rect = &a->c; struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent); struct mx3_camera_dev *mx3_cam = ici->priv; + struct device *control = to_soc_camera_control(icd); + struct v4l2_subdev *sd = dev_get_drvdata(control); /* * We now know pixel formats and can decide upon DMA-channel(s) @@ -798,7 +801,7 @@ static int mx3_camera_set_crop(struct soc_camera_device *icd, configure_geometry(mx3_cam, rect); - return icd->ops->set_crop(icd, rect); + return v4l2_subdev_call(sd, video, s_crop, a); } static int mx3_camera_set_fmt(struct soc_camera_device *icd, diff --git a/drivers/media/video/ov772x.c b/drivers/media/video/ov772x.c index 03488f9e1c8..bbe6f2d71c1 100644 --- a/drivers/media/video/ov772x.c +++ b/drivers/media/video/ov772x.c @@ -955,24 +955,6 @@ ov772x_set_fmt_error: return ret; } -/* Cannot crop, just return the current geometry */ -static int ov772x_set_crop(struct soc_camera_device *icd, - struct v4l2_rect *rect) -{ - struct i2c_client *client = to_i2c_client(to_soc_camera_control(icd)); - struct ov772x_priv *priv = to_ov772x(client); - - if (!priv->fmt || !priv->win) - return -EINVAL; - - rect->left = 0; - rect->top = 0; - rect->width = priv->win->width; - rect->height = priv->win->height; - - return 0; -} - static int ov772x_s_fmt(struct v4l2_subdev *sd, struct v4l2_format *f) { struct i2c_client *client = sd->priv; @@ -1060,7 +1042,6 @@ static int ov772x_video_probe(struct soc_camera_device *icd, } static struct soc_camera_ops ov772x_ops = { - .set_crop = ov772x_set_crop, .set_bus_param = ov772x_set_bus_param, .query_bus_param = ov772x_query_bus_param, .controls = ov772x_controls, diff --git a/drivers/media/video/pxa_camera.c b/drivers/media/video/pxa_camera.c index 0e4daaad2f4..c38ce84b944 100644 --- a/drivers/media/video/pxa_camera.c +++ b/drivers/media/video/pxa_camera.c @@ -1281,10 +1281,13 @@ static int pxa_camera_get_formats(struct soc_camera_device *icd, int idx, } static int pxa_camera_set_crop(struct soc_camera_device *icd, - struct v4l2_rect *rect) + struct v4l2_crop *a) { + struct v4l2_rect *rect = &a->c; struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent); struct pxa_camera_dev *pcdev = ici->priv; + struct device *control = to_soc_camera_control(icd); + struct v4l2_subdev *sd = dev_get_drvdata(control); struct soc_camera_sense sense = { .master_clock = pcdev->mclk, .pixel_clock_max = pcdev->ciclk / 4, @@ -1295,7 +1298,7 @@ static int pxa_camera_set_crop(struct soc_camera_device *icd, if (pcdev->platform_flags & PXA_CAMERA_PCLK_EN) icd->sense = &sense; - ret = icd->ops->set_crop(icd, rect); + ret = v4l2_subdev_call(sd, video, s_crop, a); icd->sense = NULL; diff --git a/drivers/media/video/sh_mobile_ceu_camera.c b/drivers/media/video/sh_mobile_ceu_camera.c index 499d1a235fd..726cf0e4dc2 100644 --- a/drivers/media/video/sh_mobile_ceu_camera.c +++ b/drivers/media/video/sh_mobile_ceu_camera.c @@ -846,12 +846,16 @@ static bool is_inside(struct v4l2_rect *r1, struct v4l2_rect *r2) * 3. if (2) failed, try to request the maximum image */ static int sh_mobile_ceu_set_crop(struct soc_camera_device *icd, - struct v4l2_rect *rect) + struct v4l2_crop *a) { + struct v4l2_rect *rect = &a->c; struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent); struct sh_mobile_ceu_dev *pcdev = ici->priv; - struct v4l2_rect cam_rect, target, cam_max; + struct v4l2_crop cam_crop; + struct v4l2_rect *cam_rect = &cam_crop.c, target, cam_max; struct sh_mobile_ceu_cam *cam = icd->host_priv; + struct device *control = to_soc_camera_control(icd); + struct v4l2_subdev *sd = dev_get_drvdata(control); unsigned int hscale = pcdev->cflcr & 0xffff; unsigned int vscale = (pcdev->cflcr >> 16) & 0xffff; unsigned short width, height; @@ -859,80 +863,80 @@ static int sh_mobile_ceu_set_crop(struct soc_camera_device *icd, int ret; /* Scale back up into client units */ - cam_rect.left = size_src(rect->left, hscale); - cam_rect.width = size_src(rect->width, hscale); - cam_rect.top = size_src(rect->top, vscale); - cam_rect.height = size_src(rect->height, vscale); + cam_rect->left = size_src(rect->left, hscale); + cam_rect->width = size_src(rect->width, hscale); + cam_rect->top = size_src(rect->top, vscale); + cam_rect->height = size_src(rect->height, vscale); - target = cam_rect; + target = *cam_rect; capsr = capture_save_reset(pcdev); dev_dbg(&icd->dev, "CAPSR 0x%x, CFLCR 0x%x\n", capsr, pcdev->cflcr); /* First attempt - see if the client can deliver a perfect result */ - ret = icd->ops->set_crop(icd, &cam_rect); + ret = v4l2_subdev_call(sd, video, s_crop, &cam_crop); if (!ret && !memcmp(&target, &cam_rect, sizeof(target))) { dev_dbg(&icd->dev, "Camera S_CROP successful for %ux%u@%u:%u\n", - cam_rect.width, cam_rect.height, - cam_rect.left, cam_rect.top); + cam_rect->width, cam_rect->height, + cam_rect->left, cam_rect->top); goto ceu_set_rect; } /* Try to fix cropping, that camera hasn't managed to do */ dev_dbg(&icd->dev, "Fix camera S_CROP %d for %ux%u@%u:%u" " to %ux%u@%u:%u\n", - ret, cam_rect.width, cam_rect.height, - cam_rect.left, cam_rect.top, + ret, cam_rect->width, cam_rect->height, + cam_rect->left, cam_rect->top, target.width, target.height, target.left, target.top); /* * Popular special case - some cameras can only handle fixed sizes like * QVGA, VGA,... Take care to avoid infinite loop. */ - width = max(cam_rect.width, 1); - height = max(cam_rect.height, 1); + width = max(cam_rect->width, 1); + height = max(cam_rect->height, 1); cam_max.width = size_src(icd->rect_max.width, hscale); cam_max.left = size_src(icd->rect_max.left, hscale); cam_max.height = size_src(icd->rect_max.height, vscale); cam_max.top = size_src(icd->rect_max.top, vscale); - while (!ret && (is_smaller(&cam_rect, &target) || - is_inside(&cam_rect, &target)) && + while (!ret && (is_smaller(cam_rect, &target) || + is_inside(cam_rect, &target)) && cam_max.width >= width && cam_max.height >= height) { width *= 2; height *= 2; - cam_rect.width = width; - cam_rect.height = height; + cam_rect->width = width; + cam_rect->height = height; /* We do not know what the camera is capable of, play safe */ - if (cam_rect.left > target.left) - cam_rect.left = cam_max.left; + if (cam_rect->left > target.left) + cam_rect->left = cam_max.left; - if (cam_rect.left + cam_rect.width < target.left + target.width) - cam_rect.width = target.left + target.width - - cam_rect.left; + if (cam_rect->left + cam_rect->width < target.left + target.width) + cam_rect->width = target.left + target.width - + cam_rect->left; - if (cam_rect.top > target.top) - cam_rect.top = cam_max.top; + if (cam_rect->top > target.top) + cam_rect->top = cam_max.top; - if (cam_rect.top + cam_rect.height < target.top + target.height) - cam_rect.height = target.top + target.height - - cam_rect.top; + if (cam_rect->top + cam_rect->height < target.top + target.height) + cam_rect->height = target.top + target.height - + cam_rect->top; - if (cam_rect.width + cam_rect.left > + if (cam_rect->width + cam_rect->left > cam_max.width + cam_max.left) - cam_rect.left = max(cam_max.width + cam_max.left - - cam_rect.width, cam_max.left); + cam_rect->left = max(cam_max.width + cam_max.left - + cam_rect->width, cam_max.left); - if (cam_rect.height + cam_rect.top > + if (cam_rect->height + cam_rect->top > cam_max.height + cam_max.top) - cam_rect.top = max(cam_max.height + cam_max.top - - cam_rect.height, cam_max.top); + cam_rect->top = max(cam_max.height + cam_max.top - + cam_rect->height, cam_max.top); - ret = icd->ops->set_crop(icd, &cam_rect); + ret = v4l2_subdev_call(sd, video, s_crop, &cam_crop); dev_dbg(&icd->dev, "Camera S_CROP %d for %ux%u@%u:%u\n", - ret, cam_rect.width, cam_rect.height, - cam_rect.left, cam_rect.top); + ret, cam_rect->width, cam_rect->height, + cam_rect->left, cam_rect->top); } /* @@ -941,30 +945,30 @@ static int sh_mobile_ceu_set_crop(struct soc_camera_device *icd, */ if ((ret < 0 && (is_smaller(&icd->rect_current, rect) || is_inside(&icd->rect_current, rect))) || - is_smaller(&cam_rect, &target) || is_inside(&cam_rect, &target)) { + is_smaller(cam_rect, &target) || is_inside(cam_rect, &target)) { /* * The camera failed to configure a suitable cropping, * we cannot use the current rectangle, set to max */ - cam_rect = cam_max; - ret = icd->ops->set_crop(icd, &cam_rect); + *cam_rect = cam_max; + ret = v4l2_subdev_call(sd, video, s_crop, &cam_crop); dev_dbg(&icd->dev, "Camera S_CROP %d for max %ux%u@%u:%u\n", - ret, cam_rect.width, cam_rect.height, - cam_rect.left, cam_rect.top); - if (ret < 0) + ret, cam_rect->width, cam_rect->height, + cam_rect->left, cam_rect->top); + if (ret < 0 && ret != -ENOIOCTLCMD) /* All failed, hopefully resume current capture */ goto resume_capture; /* Finally, adjust the target rectangle */ - if (target.width > cam_rect.width) - target.width = cam_rect.width; - if (target.height > cam_rect.height) - target.height = cam_rect.height; - if (target.left + target.width > cam_rect.left + cam_rect.width) - target.left = cam_rect.left + cam_rect.width - + if (target.width > cam_rect->width) + target.width = cam_rect->width; + if (target.height > cam_rect->height) + target.height = cam_rect->height; + if (target.left + target.width > cam_rect->left + cam_rect->width) + target.left = cam_rect->left + cam_rect->width - target.width; - if (target.top + target.height > cam_rect.top + cam_rect.height) - target.top = cam_rect.top + cam_rect.height - + if (target.top + target.height > cam_rect->top + cam_rect->height) + target.top = cam_rect->top + cam_rect->height - target.height; } @@ -978,14 +982,14 @@ static int sh_mobile_ceu_set_crop(struct soc_camera_device *icd, */ dev_dbg(&icd->dev, "SH S_CROP from %ux%u@%u:%u to %ux%u@%u:%u, scale to %ux%u@%u:%u\n", - cam_rect.width, cam_rect.height, cam_rect.left, cam_rect.top, + cam_rect->width, cam_rect->height, cam_rect->left, cam_rect->top, target.width, target.height, target.left, target.top, rect->width, rect->height, rect->left, rect->top); ret = 0; ceu_set_rect: - cam->camera_rect = cam_rect; + cam->camera_rect = *cam_rect; rect->width = size_dst(target.width, hscale); rect->left = size_dst(target.left, hscale); diff --git a/drivers/media/video/soc_camera.c b/drivers/media/video/soc_camera.c index 21a8aa586da..d9ccc286659 100644 --- a/drivers/media/video/soc_camera.c +++ b/drivers/media/video/soc_camera.c @@ -797,7 +797,7 @@ static int soc_camera_s_crop(struct file *file, void *fh, rect.top = icd->rect_max.height + icd->rect_max.top - rect.height; - ret = ici->ops->set_crop(icd, &rect); + ret = ici->ops->set_crop(icd, a); if (!ret) icd->rect_current = rect; @@ -970,7 +970,7 @@ static int soc_camera_probe(struct device *dev) /* FIXME: this is racy, have to use driver-binding notification */ control = to_soc_camera_control(icd); - if (!control || !control->driver || + if (!control || !control->driver || !dev_get_drvdata(control) || !try_module_get(control->driver->owner)) { icl->del_device(icl); goto enodrv; diff --git a/drivers/media/video/soc_camera_platform.c b/drivers/media/video/soc_camera_platform.c index 9e406c113aa..aec2cadbd2e 100644 --- a/drivers/media/video/soc_camera_platform.c +++ b/drivers/media/video/soc_camera_platform.c @@ -25,10 +25,15 @@ struct soc_camera_platform_priv { struct soc_camera_data_format format; }; -static struct soc_camera_platform_info * -soc_camera_platform_get_info(struct soc_camera_device *icd) +static struct soc_camera_platform_priv *get_priv(struct platform_device *pdev) { - struct platform_device *pdev = to_platform_device(dev_get_drvdata(&icd->dev)); + struct v4l2_subdev *subdev = platform_get_drvdata(pdev); + return container_of(subdev, struct soc_camera_platform_priv, subdev); +} + +static struct soc_camera_platform_info *get_info(struct soc_camera_device *icd) +{ + struct platform_device *pdev = to_platform_device(to_soc_camera_control(icd)); return pdev->dev.platform_data; } @@ -47,16 +52,10 @@ static int soc_camera_platform_set_bus_param(struct soc_camera_device *icd, static unsigned long soc_camera_platform_query_bus_param(struct soc_camera_device *icd) { - struct soc_camera_platform_info *p = soc_camera_platform_get_info(icd); + struct soc_camera_platform_info *p = get_info(icd); return p->bus_param; } -static int soc_camera_platform_set_crop(struct soc_camera_device *icd, - struct v4l2_rect *rect) -{ - return 0; -} - static int soc_camera_platform_try_fmt(struct v4l2_subdev *sd, struct v4l2_format *f) { @@ -71,7 +70,7 @@ static int soc_camera_platform_try_fmt(struct v4l2_subdev *sd, static void soc_camera_platform_video_probe(struct soc_camera_device *icd, struct platform_device *pdev) { - struct soc_camera_platform_priv *priv = platform_get_drvdata(pdev); + struct soc_camera_platform_priv *priv = get_priv(pdev); struct soc_camera_platform_info *p = pdev->dev.platform_data; priv->format.name = p->format_name; @@ -96,7 +95,6 @@ static struct v4l2_subdev_ops platform_subdev_ops = { }; static struct soc_camera_ops soc_camera_platform_ops = { - .set_crop = soc_camera_platform_set_crop, .set_bus_param = soc_camera_platform_set_bus_param, .query_bus_param = soc_camera_platform_query_bus_param, }; @@ -124,7 +122,9 @@ static int soc_camera_platform_probe(struct platform_device *pdev) icd = to_soc_camera_dev(p->dev); - platform_set_drvdata(pdev, priv); + /* soc-camera convention: control's drvdata points to the subdev */ + platform_set_drvdata(pdev, &priv->subdev); + /* Set the control device reference */ dev_set_drvdata(&icd->dev, &pdev->dev); icd->width_min = 0; @@ -158,7 +158,7 @@ evdrs: static int soc_camera_platform_remove(struct platform_device *pdev) { - struct soc_camera_platform_priv *priv = platform_get_drvdata(pdev); + struct soc_camera_platform_priv *priv = get_priv(pdev); struct soc_camera_platform_info *p = pdev->dev.platform_data; struct soc_camera_device *icd = to_soc_camera_dev(p->dev); diff --git a/drivers/media/video/tw9910.c b/drivers/media/video/tw9910.c index 735d0bd4bb1..b6b15468b40 100644 --- a/drivers/media/video/tw9910.c +++ b/drivers/media/video/tw9910.c @@ -616,11 +616,12 @@ static int tw9910_s_register(struct v4l2_subdev *sd, } #endif -static int tw9910_set_crop(struct soc_camera_device *icd, - struct v4l2_rect *rect) +static int tw9910_s_crop(struct v4l2_subdev *sd, struct v4l2_crop *a) { - struct i2c_client *client = to_i2c_client(to_soc_camera_control(icd)); + struct v4l2_rect *rect = &a->c; + struct i2c_client *client = sd->priv; struct tw9910_priv *priv = to_tw9910(client); + struct soc_camera_device *icd = client->dev.platform_data; int ret = -EINVAL; u8 val; @@ -716,15 +717,15 @@ tw9910_set_fmt_error: static int tw9910_s_fmt(struct v4l2_subdev *sd, struct v4l2_format *f) { - struct i2c_client *client = sd->priv; - struct soc_camera_device *icd = client->dev.platform_data; struct v4l2_pix_format *pix = &f->fmt.pix; - /* See tw9910_set_crop() - no proper cropping support */ - struct v4l2_rect rect = { - .left = 0, - .top = 0, - .width = pix->width, - .height = pix->height, + /* See tw9910_s_crop() - no proper cropping support */ + struct v4l2_crop a = { + .c = { + .left = 0, + .top = 0, + .width = pix->width, + .height = pix->height, + }, }; int i, ret; @@ -738,10 +739,10 @@ static int tw9910_s_fmt(struct v4l2_subdev *sd, struct v4l2_format *f) if (i == ARRAY_SIZE(tw9910_color_fmt)) return -EINVAL; - ret = tw9910_set_crop(icd, &rect); + ret = tw9910_s_crop(sd, &a); if (!ret) { - pix->width = rect.width; - pix->height = rect.height; + pix->width = a.c.width; + pix->height = a.c.height; } return ret; } @@ -821,7 +822,6 @@ static int tw9910_video_probe(struct soc_camera_device *icd, } static struct soc_camera_ops tw9910_ops = { - .set_crop = tw9910_set_crop, .set_bus_param = tw9910_set_bus_param, .query_bus_param = tw9910_query_bus_param, .enum_input = tw9910_enum_input, @@ -840,6 +840,7 @@ static struct v4l2_subdev_video_ops tw9910_subdev_video_ops = { .s_stream = tw9910_s_stream, .s_fmt = tw9910_s_fmt, .try_fmt = tw9910_try_fmt, + .s_crop = tw9910_s_crop, }; static struct v4l2_subdev_ops tw9910_subdev_ops = { diff --git a/include/media/soc_camera.h b/include/media/soc_camera.h index 7c44d401656..0bad8f1d7e8 100644 --- a/include/media/soc_camera.h +++ b/include/media/soc_camera.h @@ -76,7 +76,7 @@ struct soc_camera_host_ops { int (*get_formats)(struct soc_camera_device *, int, struct soc_camera_format_xlate *); void (*put_formats)(struct soc_camera_device *); - int (*set_crop)(struct soc_camera_device *, struct v4l2_rect *); + int (*set_crop)(struct soc_camera_device *, struct v4l2_crop *); int (*set_fmt)(struct soc_camera_device *, struct v4l2_format *); int (*try_fmt)(struct soc_camera_device *, struct v4l2_format *); void (*init_videobuf)(struct videobuf_queue *, @@ -185,7 +185,6 @@ struct soc_camera_ops { int (*resume)(struct soc_camera_device *); int (*init)(struct soc_camera_device *); int (*release)(struct soc_camera_device *); - int (*set_crop)(struct soc_camera_device *, struct v4l2_rect *); unsigned long (*query_bus_param)(struct soc_camera_device *); int (*set_bus_param)(struct soc_camera_device *, unsigned long); int (*get_chip_id)(struct soc_camera_device *, -- cgit v1.2.3-70-g09d2 From c9c1f1c0dbe90b82939917fdc3e4c9ccad42342d Mon Sep 17 00:00:00 2001 From: Guennadi Liakhovetski Date: Tue, 25 Aug 2009 11:46:59 -0300 Subject: V4L/DVB (12530): soc-camera: switch to using v4l2_subdev_call() Use v4l2_subdev_call() instead of v4l2_device_call_until_err() in all host drivers and in soc-camera core. Signed-off-by: Guennadi Liakhovetski Signed-off-by: Mauro Carvalho Chehab --- drivers/media/video/mx1_camera.c | 12 +++++------- drivers/media/video/mx3_camera.c | 10 +++++----- drivers/media/video/pxa_camera.c | 9 +++++---- drivers/media/video/sh_mobile_ceu_camera.c | 17 +++++++---------- drivers/media/video/soc_camera.c | 30 ++++++++++++++++-------------- include/media/soc_camera.h | 14 ++++++++++---- 6 files changed, 48 insertions(+), 44 deletions(-) (limited to 'include') diff --git a/drivers/media/video/mx1_camera.c b/drivers/media/video/mx1_camera.c index add496fca4d..ed7856bdad4 100644 --- a/drivers/media/video/mx1_camera.c +++ b/drivers/media/video/mx1_camera.c @@ -465,9 +465,7 @@ static void mx1_camera_remove_device(struct soc_camera_device *icd) static int mx1_camera_set_crop(struct soc_camera_device *icd, struct v4l2_crop *a) { - struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent); - struct device *control = to_soc_camera_control(icd); - struct v4l2_subdev *sd = dev_get_drvdata(control); + struct v4l2_subdev *sd = soc_camera_to_subdev(icd); return v4l2_subdev_call(sd, video, s_crop, a); } @@ -539,7 +537,7 @@ static int mx1_camera_set_bus_param(struct soc_camera_device *icd, __u32 pixfmt) static int mx1_camera_set_fmt(struct soc_camera_device *icd, struct v4l2_format *f) { - struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent); + struct v4l2_subdev *sd = soc_camera_to_subdev(icd); const struct soc_camera_format_xlate *xlate; struct v4l2_pix_format *pix = &f->fmt.pix; int ret; @@ -550,7 +548,7 @@ static int mx1_camera_set_fmt(struct soc_camera_device *icd, return -EINVAL; } - ret = v4l2_device_call_until_err(&ici->v4l2_dev, 0, video, s_fmt, f); + ret = v4l2_subdev_call(sd, video, s_fmt, f); if (!ret) { icd->buswidth = xlate->buswidth; icd->current_fmt = xlate->host_fmt; @@ -562,11 +560,11 @@ static int mx1_camera_set_fmt(struct soc_camera_device *icd, static int mx1_camera_try_fmt(struct soc_camera_device *icd, struct v4l2_format *f) { - struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent); + struct v4l2_subdev *sd = soc_camera_to_subdev(icd); /* TODO: limit to mx1 hardware capabilities */ /* limit to sensor capabilities */ - return v4l2_device_call_until_err(&ici->v4l2_dev, 0, video, try_fmt, f); + return v4l2_subdev_call(sd, video, try_fmt, f); } static int mx1_camera_reqbufs(struct soc_camera_file *icf, diff --git a/drivers/media/video/mx3_camera.c b/drivers/media/video/mx3_camera.c index de7ebfbf039..f7888f30da5 100644 --- a/drivers/media/video/mx3_camera.c +++ b/drivers/media/video/mx3_camera.c @@ -786,8 +786,7 @@ static int mx3_camera_set_crop(struct soc_camera_device *icd, struct v4l2_rect *rect = &a->c; struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent); struct mx3_camera_dev *mx3_cam = ici->priv; - struct device *control = to_soc_camera_control(icd); - struct v4l2_subdev *sd = dev_get_drvdata(control); + struct v4l2_subdev *sd = soc_camera_to_subdev(icd); /* * We now know pixel formats and can decide upon DMA-channel(s) @@ -809,6 +808,7 @@ static int mx3_camera_set_fmt(struct soc_camera_device *icd, { struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent); struct mx3_camera_dev *mx3_cam = ici->priv; + struct v4l2_subdev *sd = soc_camera_to_subdev(icd); const struct soc_camera_format_xlate *xlate; struct v4l2_pix_format *pix = &f->fmt.pix; struct v4l2_rect rect = { @@ -837,7 +837,7 @@ static int mx3_camera_set_fmt(struct soc_camera_device *icd, configure_geometry(mx3_cam, &rect); - ret = v4l2_device_call_until_err(&ici->v4l2_dev, 0, video, s_fmt, f); + ret = v4l2_subdev_call(sd, video, s_fmt, f); if (!ret) { icd->buswidth = xlate->buswidth; icd->current_fmt = xlate->host_fmt; @@ -849,7 +849,7 @@ static int mx3_camera_set_fmt(struct soc_camera_device *icd, static int mx3_camera_try_fmt(struct soc_camera_device *icd, struct v4l2_format *f) { - struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent); + struct v4l2_subdev *sd = soc_camera_to_subdev(icd); const struct soc_camera_format_xlate *xlate; struct v4l2_pix_format *pix = &f->fmt.pix; __u32 pixfmt = pix->pixelformat; @@ -875,7 +875,7 @@ static int mx3_camera_try_fmt(struct soc_camera_device *icd, /* camera has to see its format, but the user the original one */ pix->pixelformat = xlate->cam_fmt->fourcc; /* limit to sensor capabilities */ - ret = v4l2_device_call_until_err(&ici->v4l2_dev, 0, video, try_fmt, f); + ret = v4l2_subdev_call(sd, video, try_fmt, f); pix->pixelformat = xlate->host_fmt->fourcc; field = pix->field; diff --git a/drivers/media/video/pxa_camera.c b/drivers/media/video/pxa_camera.c index c38ce84b944..4bc2a4f81f7 100644 --- a/drivers/media/video/pxa_camera.c +++ b/drivers/media/video/pxa_camera.c @@ -1286,8 +1286,7 @@ static int pxa_camera_set_crop(struct soc_camera_device *icd, struct v4l2_rect *rect = &a->c; struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent); struct pxa_camera_dev *pcdev = ici->priv; - struct device *control = to_soc_camera_control(icd); - struct v4l2_subdev *sd = dev_get_drvdata(control); + struct v4l2_subdev *sd = soc_camera_to_subdev(icd); struct soc_camera_sense sense = { .master_clock = pcdev->mclk, .pixel_clock_max = pcdev->ciclk / 4, @@ -1323,6 +1322,7 @@ static int pxa_camera_set_fmt(struct soc_camera_device *icd, { struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent); struct pxa_camera_dev *pcdev = ici->priv; + struct v4l2_subdev *sd = soc_camera_to_subdev(icd); const struct soc_camera_data_format *cam_fmt = NULL; const struct soc_camera_format_xlate *xlate = NULL; struct soc_camera_sense sense = { @@ -1346,7 +1346,7 @@ static int pxa_camera_set_fmt(struct soc_camera_device *icd, icd->sense = &sense; cam_f.fmt.pix.pixelformat = cam_fmt->fourcc; - ret = v4l2_device_call_until_err(&ici->v4l2_dev, 0, video, s_fmt, f); + ret = v4l2_subdev_call(sd, video, s_fmt, f); icd->sense = NULL; @@ -1375,6 +1375,7 @@ static int pxa_camera_try_fmt(struct soc_camera_device *icd, struct v4l2_format *f) { struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent); + struct v4l2_subdev *sd = soc_camera_to_subdev(icd); const struct soc_camera_format_xlate *xlate; struct v4l2_pix_format *pix = &f->fmt.pix; __u32 pixfmt = pix->pixelformat; @@ -1404,7 +1405,7 @@ static int pxa_camera_try_fmt(struct soc_camera_device *icd, /* camera has to see its format, but the user the original one */ pix->pixelformat = xlate->cam_fmt->fourcc; /* limit to sensor capabilities */ - ret = v4l2_device_call_until_err(&ici->v4l2_dev, 0, video, try_fmt, f); + ret = v4l2_subdev_call(sd, video, try_fmt, f); pix->pixelformat = xlate->host_fmt->fourcc; field = pix->field; diff --git a/drivers/media/video/sh_mobile_ceu_camera.c b/drivers/media/video/sh_mobile_ceu_camera.c index 726cf0e4dc2..28c3affe882 100644 --- a/drivers/media/video/sh_mobile_ceu_camera.c +++ b/drivers/media/video/sh_mobile_ceu_camera.c @@ -854,8 +854,7 @@ static int sh_mobile_ceu_set_crop(struct soc_camera_device *icd, struct v4l2_crop cam_crop; struct v4l2_rect *cam_rect = &cam_crop.c, target, cam_max; struct sh_mobile_ceu_cam *cam = icd->host_priv; - struct device *control = to_soc_camera_control(icd); - struct v4l2_subdev *sd = dev_get_drvdata(control); + struct v4l2_subdev *sd = soc_camera_to_subdev(icd); unsigned int hscale = pcdev->cflcr & 0xffff; unsigned int vscale = (pcdev->cflcr >> 16) & 0xffff; unsigned short width, height; @@ -1016,6 +1015,7 @@ static int sh_mobile_ceu_set_fmt(struct soc_camera_device *icd, struct sh_mobile_ceu_dev *pcdev = ici->priv; struct sh_mobile_ceu_cam *cam = icd->host_priv; struct v4l2_pix_format *pix = &f->fmt.pix; + struct v4l2_subdev *sd = soc_camera_to_subdev(icd); __u32 pixfmt = pix->pixelformat; const struct soc_camera_format_xlate *xlate; unsigned int width = pix->width, height = pix->height, tmp_w, tmp_h; @@ -1042,7 +1042,7 @@ static int sh_mobile_ceu_set_fmt(struct soc_camera_device *icd, } pix->pixelformat = xlate->cam_fmt->fourcc; - ret = v4l2_device_call_until_err(&ici->v4l2_dev, (__u32)icd, video, s_fmt, f); + ret = v4l2_subdev_call(sd, video, s_fmt, f); pix->pixelformat = pixfmt; dev_dbg(&icd->dev, "Camera %d fmt %ux%u, requested %ux%u, max %ux%u\n", ret, pix->width, pix->height, width, height, @@ -1082,8 +1082,7 @@ static int sh_mobile_ceu_set_fmt(struct soc_camera_device *icd, pix->width = tmp_w; pix->height = tmp_h; pix->pixelformat = xlate->cam_fmt->fourcc; - ret = v4l2_device_call_until_err(&ici->v4l2_dev, (__u32)icd, - video, s_fmt, f); + ret = v4l2_subdev_call(sd, video, s_fmt, f); pix->pixelformat = pixfmt; dev_dbg(&icd->dev, "Camera scaled to %ux%u\n", pix->width, pix->height); @@ -1140,6 +1139,7 @@ static int sh_mobile_ceu_try_fmt(struct soc_camera_device *icd, struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent); const struct soc_camera_format_xlate *xlate; struct v4l2_pix_format *pix = &f->fmt.pix; + struct v4l2_subdev *sd = soc_camera_to_subdev(icd); __u32 pixfmt = pix->pixelformat; int width, height; int ret; @@ -1165,8 +1165,7 @@ static int sh_mobile_ceu_try_fmt(struct soc_camera_device *icd, pix->pixelformat = xlate->cam_fmt->fourcc; /* limit to sensor capabilities */ - ret = v4l2_device_call_until_err(&ici->v4l2_dev, (__u32)icd, video, - try_fmt, f); + ret = v4l2_subdev_call(sd, video, try_fmt, f); pix->pixelformat = pixfmt; if (ret < 0) return ret; @@ -1182,9 +1181,7 @@ static int sh_mobile_ceu_try_fmt(struct soc_camera_device *icd, int tmp_w = pix->width, tmp_h = pix->height; pix->width = 2560; pix->height = 1920; - ret = v4l2_device_call_until_err(&ici->v4l2_dev, - (__u32)icd, video, - try_fmt, f); + ret = v4l2_subdev_call(sd, video, try_fmt, f); if (ret < 0) { /* Shouldn't actually happen... */ dev_err(&icd->dev, diff --git a/drivers/media/video/soc_camera.c b/drivers/media/video/soc_camera.c index d9ccc286659..dd023bdb189 100644 --- a/drivers/media/video/soc_camera.c +++ b/drivers/media/video/soc_camera.c @@ -152,9 +152,9 @@ static int soc_camera_s_std(struct file *file, void *priv, v4l2_std_id *a) { struct soc_camera_file *icf = file->private_data; struct soc_camera_device *icd = icf->icd; - struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent); + struct v4l2_subdev *sd = soc_camera_to_subdev(icd); - return v4l2_device_call_until_err(&ici->v4l2_dev, (__u32)icd, core, s_std, *a); + return v4l2_subdev_call(sd, core, s_std, *a); } static int soc_camera_reqbufs(struct file *file, void *priv, @@ -589,7 +589,7 @@ static int soc_camera_streamon(struct file *file, void *priv, { struct soc_camera_file *icf = file->private_data; struct soc_camera_device *icd = icf->icd; - struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent); + struct v4l2_subdev *sd = soc_camera_to_subdev(icd); int ret; WARN_ON(priv != file->private_data); @@ -599,7 +599,7 @@ static int soc_camera_streamon(struct file *file, void *priv, mutex_lock(&icd->video_lock); - v4l2_device_call_until_err(&ici->v4l2_dev, (__u32)icd, video, s_stream, 1); + v4l2_subdev_call(sd, video, s_stream, 1); /* This calls buf_queue from host driver's videobuf_queue_ops */ ret = videobuf_streamon(&icf->vb_vidq); @@ -614,7 +614,7 @@ static int soc_camera_streamoff(struct file *file, void *priv, { struct soc_camera_file *icf = file->private_data; struct soc_camera_device *icd = icf->icd; - struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent); + struct v4l2_subdev *sd = soc_camera_to_subdev(icd); WARN_ON(priv != file->private_data); @@ -627,7 +627,7 @@ static int soc_camera_streamoff(struct file *file, void *priv, * remaining buffers. When the last buffer is freed, stop capture */ videobuf_streamoff(&icf->vb_vidq); - v4l2_device_call_until_err(&ici->v4l2_dev, (__u32)icd, video, s_stream, 0); + v4l2_subdev_call(sd, video, s_stream, 0); mutex_unlock(&icd->video_lock); @@ -672,6 +672,7 @@ static int soc_camera_g_ctrl(struct file *file, void *priv, struct soc_camera_file *icf = file->private_data; struct soc_camera_device *icd = icf->icd; struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent); + struct v4l2_subdev *sd = soc_camera_to_subdev(icd); int ret; WARN_ON(priv != file->private_data); @@ -695,7 +696,7 @@ static int soc_camera_g_ctrl(struct file *file, void *priv, return ret; } - return v4l2_device_call_until_err(&ici->v4l2_dev, (__u32)icd, core, g_ctrl, ctrl); + return v4l2_subdev_call(sd, core, g_ctrl, ctrl); } static int soc_camera_s_ctrl(struct file *file, void *priv, @@ -704,6 +705,7 @@ static int soc_camera_s_ctrl(struct file *file, void *priv, struct soc_camera_file *icf = file->private_data; struct soc_camera_device *icd = icf->icd; struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent); + struct v4l2_subdev *sd = soc_camera_to_subdev(icd); int ret; WARN_ON(priv != file->private_data); @@ -714,7 +716,7 @@ static int soc_camera_s_ctrl(struct file *file, void *priv, return ret; } - return v4l2_device_call_until_err(&ici->v4l2_dev, (__u32)icd, core, s_ctrl, ctrl); + return v4l2_subdev_call(sd, core, s_ctrl, ctrl); } static int soc_camera_cropcap(struct file *file, void *fh, @@ -812,9 +814,9 @@ static int soc_camera_g_chip_ident(struct file *file, void *fh, { struct soc_camera_file *icf = file->private_data; struct soc_camera_device *icd = icf->icd; - struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent); + struct v4l2_subdev *sd = soc_camera_to_subdev(icd); - return v4l2_device_call_until_err(&ici->v4l2_dev, (__u32)icd, core, g_chip_ident, id); + return v4l2_subdev_call(sd, core, g_chip_ident, id); } #ifdef CONFIG_VIDEO_ADV_DEBUG @@ -823,9 +825,9 @@ static int soc_camera_g_register(struct file *file, void *fh, { struct soc_camera_file *icf = file->private_data; struct soc_camera_device *icd = icf->icd; - struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent); + struct v4l2_subdev *sd = soc_camera_to_subdev(icd); - return v4l2_device_call_until_err(&ici->v4l2_dev, (__u32)icd, core, g_register, reg); + return v4l2_subdev_call(sd, core, g_register, reg); } static int soc_camera_s_register(struct file *file, void *fh, @@ -833,9 +835,9 @@ static int soc_camera_s_register(struct file *file, void *fh, { struct soc_camera_file *icf = file->private_data; struct soc_camera_device *icd = icf->icd; - struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent); + struct v4l2_subdev *sd = soc_camera_to_subdev(icd); - return v4l2_device_call_until_err(&ici->v4l2_dev, (__u32)icd, core, s_register, reg); + return v4l2_subdev_call(sd, core, s_register, reg); } #endif diff --git a/include/media/soc_camera.h b/include/media/soc_camera.h index 0bad8f1d7e8..344d8990477 100644 --- a/include/media/soc_camera.h +++ b/include/media/soc_camera.h @@ -126,28 +126,34 @@ struct soc_camera_link { void (*free_bus)(struct soc_camera_link *); }; -static inline struct soc_camera_device *to_soc_camera_dev(struct device *dev) +static inline struct soc_camera_device *to_soc_camera_dev(const struct device *dev) { return container_of(dev, struct soc_camera_device, dev); } -static inline struct soc_camera_host *to_soc_camera_host(struct device *dev) +static inline struct soc_camera_host *to_soc_camera_host(const struct device *dev) { struct v4l2_device *v4l2_dev = dev_get_drvdata(dev); return container_of(v4l2_dev, struct soc_camera_host, v4l2_dev); } -static inline struct soc_camera_link *to_soc_camera_link(struct soc_camera_device *icd) +static inline struct soc_camera_link *to_soc_camera_link(const struct soc_camera_device *icd) { return icd->dev.platform_data; } -static inline struct device *to_soc_camera_control(struct soc_camera_device *icd) +static inline struct device *to_soc_camera_control(const struct soc_camera_device *icd) { return dev_get_drvdata(&icd->dev); } +static inline struct v4l2_subdev *soc_camera_to_subdev(const struct soc_camera_device *icd) +{ + struct device *control = to_soc_camera_control(icd); + return dev_get_drvdata(control); +} + int soc_camera_host_register(struct soc_camera_host *ici); void soc_camera_host_unregister(struct soc_camera_host *ici); -- cgit v1.2.3-70-g09d2 From 6a6c8786725c0b3d143674effa8b772f47b1c189 Mon Sep 17 00:00:00 2001 From: Guennadi Liakhovetski Date: Tue, 25 Aug 2009 11:50:46 -0300 Subject: V4L/DVB (12534): soc-camera: V4L2 API compliant scaling (S_FMT) and cropping (S_CROP) The initial soc-camera scaling and cropping implementation turned out to be incompliant with the V4L2 API, e.g., it expected the user to specify cropping in output window pixels, instead of input window pixels. This patch converts the soc-camera core and all drivers to comply with the standard. Signed-off-by: Guennadi Liakhovetski Signed-off-by: Mauro Carvalho Chehab --- Documentation/video4linux/soc-camera.txt | 40 ++ drivers/media/video/mt9m001.c | 142 ++++- drivers/media/video/mt9m111.c | 112 +++- drivers/media/video/mt9t031.c | 220 ++++---- drivers/media/video/mt9v022.c | 131 ++++- drivers/media/video/mx1_camera.c | 10 +- drivers/media/video/mx3_camera.c | 114 ++-- drivers/media/video/ov772x.c | 84 ++- drivers/media/video/pxa_camera.c | 201 +++++-- drivers/media/video/sh_mobile_ceu_camera.c | 828 ++++++++++++++++++++--------- drivers/media/video/soc_camera.c | 130 +++-- drivers/media/video/soc_camera_platform.c | 4 - drivers/media/video/tw9910.c | 120 +++-- include/media/soc_camera.h | 21 +- 14 files changed, 1524 insertions(+), 633 deletions(-) (limited to 'include') diff --git a/Documentation/video4linux/soc-camera.txt b/Documentation/video4linux/soc-camera.txt index 178ef3c5e57..3f87c7da4ca 100644 --- a/Documentation/video4linux/soc-camera.txt +++ b/Documentation/video4linux/soc-camera.txt @@ -116,5 +116,45 @@ functionality. struct soc_camera_device also links to an array of struct soc_camera_data_format, listing pixel formats, supported by the camera. +VIDIOC_S_CROP and VIDIOC_S_FMT behaviour +---------------------------------------- + +Above user ioctls modify image geometry as follows: + +VIDIOC_S_CROP: sets location and sizes of the sensor window. Unit is one sensor +pixel. Changing sensor window sizes preserves any scaling factors, therefore +user window sizes change as well. + +VIDIOC_S_FMT: sets user window. Should preserve previously set sensor window as +much as possible by modifying scaling factors. If the sensor window cannot be +preserved precisely, it may be changed too. + +In soc-camera there are two locations, where scaling and cropping can taks +place: in the camera driver and in the host driver. User ioctls are first passed +to the host driver, which then generally passes them down to the camera driver. +It is more efficient to perform scaling and cropping in the camera driver to +save camera bus bandwidth and maximise the framerate. However, if the camera +driver failed to set the required parameters with sufficient precision, the host +driver may decide to also use its own scaling and cropping to fulfill the user's +request. + +Camera drivers are interfaced to the soc-camera core and to host drivers over +the v4l2-subdev API, which is completely functional, it doesn't pass any data. +Therefore all camera drivers shall reply to .g_fmt() requests with their current +output geometry. This is necessary to correctly configure the camera bus. +.s_fmt() and .try_fmt() have to be implemented too. Sensor window and scaling +factors have to be maintained by camera drivers internally. According to the +V4L2 API all capture drivers must support the VIDIOC_CROPCAP ioctl, hence we +rely on camera drivers implementing .cropcap(). If the camera driver does not +support cropping, it may choose to not implement .s_crop(), but to enable +cropping support by the camera host driver at least the .g_crop method must be +implemented. + +User window geometry is kept in .user_width and .user_height fields in struct +soc_camera_device and used by the soc-camera core and host drivers. The core +updates these fields upon successful completion of a .s_fmt() call, but if these +fields change elsewhere, e.g., during .s_crop() processing, the host driver is +responsible for updating them. + -- Author: Guennadi Liakhovetski diff --git a/drivers/media/video/mt9m001.c b/drivers/media/video/mt9m001.c index 775e1a3c98d..e8cf56189ef 100644 --- a/drivers/media/video/mt9m001.c +++ b/drivers/media/video/mt9m001.c @@ -39,6 +39,13 @@ #define MT9M001_GLOBAL_GAIN 0x35 #define MT9M001_CHIP_ENABLE 0xF1 +#define MT9M001_MAX_WIDTH 1280 +#define MT9M001_MAX_HEIGHT 1024 +#define MT9M001_MIN_WIDTH 48 +#define MT9M001_MIN_HEIGHT 32 +#define MT9M001_COLUMN_SKIP 20 +#define MT9M001_ROW_SKIP 12 + static const struct soc_camera_data_format mt9m001_colour_formats[] = { /* Order important: first natively supported, * second supported with a GPIO extender */ @@ -70,6 +77,8 @@ static const struct soc_camera_data_format mt9m001_monochrome_formats[] = { struct mt9m001 { struct v4l2_subdev subdev; + struct v4l2_rect rect; /* Sensor window */ + __u32 fourcc; int model; /* V4L2_IDENT_MT9M001* codes from v4l2-chip-ident.h */ unsigned char autoexposure; }; @@ -196,13 +205,31 @@ static unsigned long mt9m001_query_bus_param(struct soc_camera_device *icd) static int mt9m001_s_crop(struct v4l2_subdev *sd, struct v4l2_crop *a) { - struct v4l2_rect *rect = &a->c; struct i2c_client *client = sd->priv; struct mt9m001 *mt9m001 = to_mt9m001(client); + struct v4l2_rect rect = a->c; struct soc_camera_device *icd = client->dev.platform_data; int ret; const u16 hblank = 9, vblank = 25; + if (mt9m001->fourcc == V4L2_PIX_FMT_SBGGR8 || + mt9m001->fourcc == V4L2_PIX_FMT_SBGGR16) + /* + * Bayer format - even number of rows for simplicity, + * but let the user play with the top row. + */ + rect.height = ALIGN(rect.height, 2); + + /* Datasheet requirement: see register description */ + rect.width = ALIGN(rect.width, 2); + rect.left = ALIGN(rect.left, 2); + + soc_camera_limit_side(&rect.left, &rect.width, + MT9M001_COLUMN_SKIP, MT9M001_MIN_WIDTH, MT9M001_MAX_WIDTH); + + soc_camera_limit_side(&rect.top, &rect.height, + MT9M001_ROW_SKIP, MT9M001_MIN_HEIGHT, MT9M001_MAX_HEIGHT); + /* Blanking and start values - default... */ ret = reg_write(client, MT9M001_HORIZONTAL_BLANKING, hblank); if (!ret) @@ -211,46 +238,98 @@ static int mt9m001_s_crop(struct v4l2_subdev *sd, struct v4l2_crop *a) /* The caller provides a supported format, as verified per * call to icd->try_fmt() */ if (!ret) - ret = reg_write(client, MT9M001_COLUMN_START, rect->left); + ret = reg_write(client, MT9M001_COLUMN_START, rect.left); if (!ret) - ret = reg_write(client, MT9M001_ROW_START, rect->top); + ret = reg_write(client, MT9M001_ROW_START, rect.top); if (!ret) - ret = reg_write(client, MT9M001_WINDOW_WIDTH, rect->width - 1); + ret = reg_write(client, MT9M001_WINDOW_WIDTH, rect.width - 1); if (!ret) ret = reg_write(client, MT9M001_WINDOW_HEIGHT, - rect->height + icd->y_skip_top - 1); + rect.height + icd->y_skip_top - 1); if (!ret && mt9m001->autoexposure) { ret = reg_write(client, MT9M001_SHUTTER_WIDTH, - rect->height + icd->y_skip_top + vblank); + rect.height + icd->y_skip_top + vblank); if (!ret) { const struct v4l2_queryctrl *qctrl = soc_camera_find_qctrl(icd->ops, V4L2_CID_EXPOSURE); - icd->exposure = (524 + (rect->height + icd->y_skip_top + + icd->exposure = (524 + (rect.height + icd->y_skip_top + vblank - 1) * (qctrl->maximum - qctrl->minimum)) / 1048 + qctrl->minimum; } } + if (!ret) + mt9m001->rect = rect; + return ret; } +static int mt9m001_g_crop(struct v4l2_subdev *sd, struct v4l2_crop *a) +{ + struct i2c_client *client = sd->priv; + struct mt9m001 *mt9m001 = to_mt9m001(client); + + a->c = mt9m001->rect; + a->type = V4L2_BUF_TYPE_VIDEO_CAPTURE; + + return 0; +} + +static int mt9m001_cropcap(struct v4l2_subdev *sd, struct v4l2_cropcap *a) +{ + a->bounds.left = MT9M001_COLUMN_SKIP; + a->bounds.top = MT9M001_ROW_SKIP; + a->bounds.width = MT9M001_MAX_WIDTH; + a->bounds.height = MT9M001_MAX_HEIGHT; + a->defrect = a->bounds; + a->type = V4L2_BUF_TYPE_VIDEO_CAPTURE; + a->pixelaspect.numerator = 1; + a->pixelaspect.denominator = 1; + + return 0; +} + +static int mt9m001_g_fmt(struct v4l2_subdev *sd, struct v4l2_format *f) +{ + struct i2c_client *client = sd->priv; + struct mt9m001 *mt9m001 = to_mt9m001(client); + struct v4l2_pix_format *pix = &f->fmt.pix; + + pix->width = mt9m001->rect.width; + pix->height = mt9m001->rect.height; + pix->pixelformat = mt9m001->fourcc; + pix->field = V4L2_FIELD_NONE; + pix->colorspace = V4L2_COLORSPACE_SRGB; + + return 0; +} + static int mt9m001_s_fmt(struct v4l2_subdev *sd, struct v4l2_format *f) { struct i2c_client *client = sd->priv; - struct soc_camera_device *icd = client->dev.platform_data; + struct mt9m001 *mt9m001 = to_mt9m001(client); + struct v4l2_pix_format *pix = &f->fmt.pix; struct v4l2_crop a = { .c = { - .left = icd->rect_current.left, - .top = icd->rect_current.top, - .width = f->fmt.pix.width, - .height = f->fmt.pix.height, + .left = mt9m001->rect.left, + .top = mt9m001->rect.top, + .width = pix->width, + .height = pix->height, }, }; + int ret; /* No support for scaling so far, just crop. TODO: use skipping */ - return mt9m001_s_crop(sd, &a); + ret = mt9m001_s_crop(sd, &a); + if (!ret) { + pix->width = mt9m001->rect.width; + pix->height = mt9m001->rect.height; + mt9m001->fourcc = pix->pixelformat; + } + + return ret; } static int mt9m001_try_fmt(struct v4l2_subdev *sd, struct v4l2_format *f) @@ -259,9 +338,14 @@ static int mt9m001_try_fmt(struct v4l2_subdev *sd, struct v4l2_format *f) struct soc_camera_device *icd = client->dev.platform_data; struct v4l2_pix_format *pix = &f->fmt.pix; - v4l_bound_align_image(&pix->width, 48, 1280, 1, - &pix->height, 32 + icd->y_skip_top, - 1024 + icd->y_skip_top, 0, 0); + v4l_bound_align_image(&pix->width, MT9M001_MIN_WIDTH, + MT9M001_MAX_WIDTH, 1, + &pix->height, MT9M001_MIN_HEIGHT + icd->y_skip_top, + MT9M001_MAX_HEIGHT + icd->y_skip_top, 0, 0); + + if (pix->pixelformat == V4L2_PIX_FMT_SBGGR8 || + pix->pixelformat == V4L2_PIX_FMT_SBGGR16) + pix->height = ALIGN(pix->height - 1, 2); return 0; } @@ -472,11 +556,11 @@ static int mt9m001_s_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl) if (ctrl->value) { const u16 vblank = 25; if (reg_write(client, MT9M001_SHUTTER_WIDTH, - icd->rect_current.height + + mt9m001->rect.height + icd->y_skip_top + vblank) < 0) return -EIO; qctrl = soc_camera_find_qctrl(icd->ops, V4L2_CID_EXPOSURE); - icd->exposure = (524 + (icd->rect_current.height + + icd->exposure = (524 + (mt9m001->rect.height + icd->y_skip_top + vblank - 1) * (qctrl->maximum - qctrl->minimum)) / 1048 + qctrl->minimum; @@ -548,6 +632,8 @@ static int mt9m001_video_probe(struct soc_camera_device *icd, if (flags & SOCAM_DATAWIDTH_8) icd->num_formats++; + mt9m001->fourcc = icd->formats->fourcc; + dev_info(&client->dev, "Detected a MT9M001 chip ID %x (%s)\n", data, data == 0x8431 ? "C12STM" : "C12ST"); @@ -556,10 +642,9 @@ static int mt9m001_video_probe(struct soc_camera_device *icd, static void mt9m001_video_remove(struct soc_camera_device *icd) { - struct i2c_client *client = to_i2c_client(to_soc_camera_control(icd)); struct soc_camera_link *icl = to_soc_camera_link(icd); - dev_dbg(&client->dev, "Video %x removed: %p, %p\n", client->addr, + dev_dbg(&icd->dev, "Video removed: %p, %p\n", icd->dev.parent, icd->vdev); if (icl->free_bus) icl->free_bus(icl); @@ -578,8 +663,11 @@ static struct v4l2_subdev_core_ops mt9m001_subdev_core_ops = { static struct v4l2_subdev_video_ops mt9m001_subdev_video_ops = { .s_stream = mt9m001_s_stream, .s_fmt = mt9m001_s_fmt, + .g_fmt = mt9m001_g_fmt, .try_fmt = mt9m001_try_fmt, .s_crop = mt9m001_s_crop, + .g_crop = mt9m001_g_crop, + .cropcap = mt9m001_cropcap, }; static struct v4l2_subdev_ops mt9m001_subdev_ops = { @@ -621,15 +709,13 @@ static int mt9m001_probe(struct i2c_client *client, /* Second stage probe - when a capture adapter is there */ icd->ops = &mt9m001_ops; - icd->rect_max.left = 20; - icd->rect_max.top = 12; - icd->rect_max.width = 1280; - icd->rect_max.height = 1024; - icd->rect_current.left = 20; - icd->rect_current.top = 12; - icd->width_min = 48; - icd->height_min = 32; icd->y_skip_top = 1; + + mt9m001->rect.left = MT9M001_COLUMN_SKIP; + mt9m001->rect.top = MT9M001_ROW_SKIP; + mt9m001->rect.width = MT9M001_MAX_WIDTH; + mt9m001->rect.height = MT9M001_MAX_HEIGHT; + /* Simulated autoexposure. If enabled, we calculate shutter width * ourselves in the driver based on vertical blanking and frame width */ mt9m001->autoexposure = 1; diff --git a/drivers/media/video/mt9m111.c b/drivers/media/video/mt9m111.c index 3637376da75..920dd53c4cf 100644 --- a/drivers/media/video/mt9m111.c +++ b/drivers/media/video/mt9m111.c @@ -194,7 +194,7 @@ static int mt9m111_reg_read(struct i2c_client *client, const u16 reg) ret = reg_page_map_set(client, reg); if (!ret) - ret = swab16(i2c_smbus_read_word_data(client, (reg & 0xff))); + ret = swab16(i2c_smbus_read_word_data(client, reg & 0xff)); dev_dbg(&client->dev, "read reg.%03x -> %04x\n", reg, ret); return ret; @@ -257,8 +257,8 @@ static int mt9m111_setup_rect(struct i2c_client *client, int width = rect->width; int height = rect->height; - if ((mt9m111->pixfmt == V4L2_PIX_FMT_SBGGR8) - || (mt9m111->pixfmt == V4L2_PIX_FMT_SBGGR16)) + if (mt9m111->pixfmt == V4L2_PIX_FMT_SBGGR8 || + mt9m111->pixfmt == V4L2_PIX_FMT_SBGGR16) is_raw_format = 1; else is_raw_format = 0; @@ -395,23 +395,85 @@ static int mt9m111_set_bus_param(struct soc_camera_device *icd, unsigned long f) return 0; } +static int mt9m111_make_rect(struct i2c_client *client, + struct v4l2_rect *rect) +{ + struct mt9m111 *mt9m111 = to_mt9m111(client); + + if (mt9m111->pixfmt == V4L2_PIX_FMT_SBGGR8 || + mt9m111->pixfmt == V4L2_PIX_FMT_SBGGR16) { + /* Bayer format - even size lengths */ + rect->width = ALIGN(rect->width, 2); + rect->height = ALIGN(rect->height, 2); + /* Let the user play with the starting pixel */ + } + + /* FIXME: the datasheet doesn't specify minimum sizes */ + soc_camera_limit_side(&rect->left, &rect->width, + MT9M111_MIN_DARK_COLS, 2, MT9M111_MAX_WIDTH); + + soc_camera_limit_side(&rect->top, &rect->height, + MT9M111_MIN_DARK_ROWS, 2, MT9M111_MAX_HEIGHT); + + return mt9m111_setup_rect(client, rect); +} + static int mt9m111_s_crop(struct v4l2_subdev *sd, struct v4l2_crop *a) { - struct v4l2_rect *rect = &a->c; + struct v4l2_rect rect = a->c; struct i2c_client *client = sd->priv; struct mt9m111 *mt9m111 = to_mt9m111(client); int ret; dev_dbg(&client->dev, "%s left=%d, top=%d, width=%d, height=%d\n", - __func__, rect->left, rect->top, rect->width, - rect->height); + __func__, rect.left, rect.top, rect.width, rect.height); - ret = mt9m111_setup_rect(client, rect); + ret = mt9m111_make_rect(client, &rect); if (!ret) - mt9m111->rect = *rect; + mt9m111->rect = rect; return ret; } +static int mt9m111_g_crop(struct v4l2_subdev *sd, struct v4l2_crop *a) +{ + struct i2c_client *client = sd->priv; + struct mt9m111 *mt9m111 = to_mt9m111(client); + + a->c = mt9m111->rect; + a->type = V4L2_BUF_TYPE_VIDEO_CAPTURE; + + return 0; +} + +static int mt9m111_cropcap(struct v4l2_subdev *sd, struct v4l2_cropcap *a) +{ + a->bounds.left = MT9M111_MIN_DARK_COLS; + a->bounds.top = MT9M111_MIN_DARK_ROWS; + a->bounds.width = MT9M111_MAX_WIDTH; + a->bounds.height = MT9M111_MAX_HEIGHT; + a->defrect = a->bounds; + a->type = V4L2_BUF_TYPE_VIDEO_CAPTURE; + a->pixelaspect.numerator = 1; + a->pixelaspect.denominator = 1; + + return 0; +} + +static int mt9m111_g_fmt(struct v4l2_subdev *sd, struct v4l2_format *f) +{ + struct i2c_client *client = sd->priv; + struct mt9m111 *mt9m111 = to_mt9m111(client); + struct v4l2_pix_format *pix = &f->fmt.pix; + + pix->width = mt9m111->rect.width; + pix->height = mt9m111->rect.height; + pix->pixelformat = mt9m111->pixfmt; + pix->field = V4L2_FIELD_NONE; + pix->colorspace = V4L2_COLORSPACE_SRGB; + + return 0; +} + static int mt9m111_set_pixfmt(struct i2c_client *client, u32 pixfmt) { struct mt9m111 *mt9m111 = to_mt9m111(client); @@ -478,7 +540,7 @@ static int mt9m111_s_fmt(struct v4l2_subdev *sd, struct v4l2_format *f) __func__, pix->pixelformat, rect.left, rect.top, rect.width, rect.height); - ret = mt9m111_setup_rect(client, &rect); + ret = mt9m111_make_rect(client, &rect); if (!ret) ret = mt9m111_set_pixfmt(client, pix->pixelformat); if (!ret) @@ -489,11 +551,27 @@ static int mt9m111_s_fmt(struct v4l2_subdev *sd, struct v4l2_format *f) static int mt9m111_try_fmt(struct v4l2_subdev *sd, struct v4l2_format *f) { struct v4l2_pix_format *pix = &f->fmt.pix; + bool bayer = pix->pixelformat == V4L2_PIX_FMT_SBGGR8 || + pix->pixelformat == V4L2_PIX_FMT_SBGGR16; + + /* + * With Bayer format enforce even side lengths, but let the user play + * with the starting pixel + */ if (pix->height > MT9M111_MAX_HEIGHT) pix->height = MT9M111_MAX_HEIGHT; + else if (pix->height < 2) + pix->height = 2; + else if (bayer) + pix->height = ALIGN(pix->height, 2); + if (pix->width > MT9M111_MAX_WIDTH) pix->width = MT9M111_MAX_WIDTH; + else if (pix->width < 2) + pix->width = 2; + else if (bayer) + pix->width = ALIGN(pix->width, 2); return 0; } @@ -906,8 +984,11 @@ static struct v4l2_subdev_core_ops mt9m111_subdev_core_ops = { static struct v4l2_subdev_video_ops mt9m111_subdev_video_ops = { .s_fmt = mt9m111_s_fmt, + .g_fmt = mt9m111_g_fmt, .try_fmt = mt9m111_try_fmt, .s_crop = mt9m111_s_crop, + .g_crop = mt9m111_g_crop, + .cropcap = mt9m111_cropcap, }; static struct v4l2_subdev_ops mt9m111_subdev_ops = { @@ -949,16 +1030,13 @@ static int mt9m111_probe(struct i2c_client *client, /* Second stage probe - when a capture adapter is there */ icd->ops = &mt9m111_ops; - icd->rect_max.left = MT9M111_MIN_DARK_COLS; - icd->rect_max.top = MT9M111_MIN_DARK_ROWS; - icd->rect_max.width = MT9M111_MAX_WIDTH; - icd->rect_max.height = MT9M111_MAX_HEIGHT; - icd->rect_current.left = icd->rect_max.left; - icd->rect_current.top = icd->rect_max.top; - icd->width_min = MT9M111_MIN_DARK_ROWS; - icd->height_min = MT9M111_MIN_DARK_COLS; icd->y_skip_top = 0; + mt9m111->rect.left = MT9M111_MIN_DARK_COLS; + mt9m111->rect.top = MT9M111_MIN_DARK_ROWS; + mt9m111->rect.width = MT9M111_MAX_WIDTH; + mt9m111->rect.height = MT9M111_MAX_HEIGHT; + ret = mt9m111_video_probe(icd, client); if (ret) { icd->ops = NULL; diff --git a/drivers/media/video/mt9t031.c b/drivers/media/video/mt9t031.c index cd3eb7731ac..f234ba60204 100644 --- a/drivers/media/video/mt9t031.c +++ b/drivers/media/video/mt9t031.c @@ -47,7 +47,7 @@ #define MT9T031_MAX_HEIGHT 1536 #define MT9T031_MAX_WIDTH 2048 #define MT9T031_MIN_HEIGHT 2 -#define MT9T031_MIN_WIDTH 2 +#define MT9T031_MIN_WIDTH 18 #define MT9T031_HORIZONTAL_BLANK 142 #define MT9T031_VERTICAL_BLANK 25 #define MT9T031_COLUMN_SKIP 32 @@ -69,10 +69,11 @@ static const struct soc_camera_data_format mt9t031_colour_formats[] = { struct mt9t031 { struct v4l2_subdev subdev; + struct v4l2_rect rect; /* Sensor window */ int model; /* V4L2_IDENT_MT9T031* codes from v4l2-chip-ident.h */ - unsigned char autoexposure; u16 xskip; u16 yskip; + unsigned char autoexposure; }; static struct mt9t031 *to_mt9t031(const struct i2c_client *client) @@ -218,56 +219,68 @@ static unsigned long mt9t031_query_bus_param(struct soc_camera_device *icd) return soc_camera_apply_sensor_flags(icl, MT9T031_BUS_PARAM); } -/* Round up minima and round down maxima */ -static void recalculate_limits(struct soc_camera_device *icd, - u16 xskip, u16 yskip) +/* target must be _even_ */ +static u16 mt9t031_skip(s32 *source, s32 target, s32 max) { - icd->rect_max.left = (MT9T031_COLUMN_SKIP + xskip - 1) / xskip; - icd->rect_max.top = (MT9T031_ROW_SKIP + yskip - 1) / yskip; - icd->width_min = (MT9T031_MIN_WIDTH + xskip - 1) / xskip; - icd->height_min = (MT9T031_MIN_HEIGHT + yskip - 1) / yskip; - icd->rect_max.width = MT9T031_MAX_WIDTH / xskip; - icd->rect_max.height = MT9T031_MAX_HEIGHT / yskip; + unsigned int skip; + + if (*source < target + target / 2) { + *source = target; + return 1; + } + + skip = min(max, *source + target / 2) / target; + if (skip > 8) + skip = 8; + *source = target * skip; + + return skip; } +/* rect is the sensor rectangle, the caller guarantees parameter validity */ static int mt9t031_set_params(struct soc_camera_device *icd, struct v4l2_rect *rect, u16 xskip, u16 yskip) { struct i2c_client *client = to_i2c_client(to_soc_camera_control(icd)); struct mt9t031 *mt9t031 = to_mt9t031(client); int ret; - u16 xbin, ybin, width, height, left, top; + u16 xbin, ybin; const u16 hblank = MT9T031_HORIZONTAL_BLANK, vblank = MT9T031_VERTICAL_BLANK; - width = rect->width * xskip; - height = rect->height * yskip; - left = rect->left * xskip; - top = rect->top * yskip; - xbin = min(xskip, (u16)3); ybin = min(yskip, (u16)3); - dev_dbg(&client->dev, "xskip %u, width %u/%u, yskip %u, height %u/%u\n", - xskip, width, rect->width, yskip, height, rect->height); - - /* Could just do roundup(rect->left, [xy]bin * 2); but this is cheaper */ + /* + * Could just do roundup(rect->left, [xy]bin * 2); but this is cheaper. + * There is always a valid suitably aligned value. The worst case is + * xbin = 3, width = 2048. Then we will start at 36, the last read out + * pixel will be 2083, which is < 2085 - first black pixel. + * + * MT9T031 datasheet imposes window left border alignment, depending on + * the selected xskip. Failing to conform to this requirement produces + * dark horizontal stripes in the image. However, even obeying to this + * requirement doesn't eliminate the stripes in all configurations. They + * appear "locally reproducibly," but can differ between tests under + * different lighting conditions. + */ switch (xbin) { - case 2: - left = (left + 3) & ~3; + case 1: + rect->left &= ~1; break; - case 3: - left = roundup(left, 6); - } - - switch (ybin) { case 2: - top = (top + 3) & ~3; + rect->left &= ~3; break; case 3: - top = roundup(top, 6); + rect->left = rect->left > roundup(MT9T031_COLUMN_SKIP, 6) ? + (rect->left / 6) * 6 : roundup(MT9T031_COLUMN_SKIP, 6); } + rect->top &= ~1; + + dev_dbg(&client->dev, "skip %u:%u, rect %ux%u@%u:%u\n", + xskip, yskip, rect->width, rect->height, rect->left, rect->top); + /* Disable register update, reconfigure atomically */ ret = reg_set(client, MT9T031_OUTPUT_CONTROL, 1); if (ret < 0) @@ -287,27 +300,29 @@ static int mt9t031_set_params(struct soc_camera_device *icd, ret = reg_write(client, MT9T031_ROW_ADDRESS_MODE, ((ybin - 1) << 4) | (yskip - 1)); } - dev_dbg(&client->dev, "new physical left %u, top %u\n", left, top); + dev_dbg(&client->dev, "new physical left %u, top %u\n", + rect->left, rect->top); /* The caller provides a supported format, as guaranteed by * icd->try_fmt_cap(), soc_camera_s_crop() and soc_camera_cropcap() */ if (ret >= 0) - ret = reg_write(client, MT9T031_COLUMN_START, left); + ret = reg_write(client, MT9T031_COLUMN_START, rect->left); if (ret >= 0) - ret = reg_write(client, MT9T031_ROW_START, top); + ret = reg_write(client, MT9T031_ROW_START, rect->top); if (ret >= 0) - ret = reg_write(client, MT9T031_WINDOW_WIDTH, width - 1); + ret = reg_write(client, MT9T031_WINDOW_WIDTH, rect->width - 1); if (ret >= 0) ret = reg_write(client, MT9T031_WINDOW_HEIGHT, - height + icd->y_skip_top - 1); + rect->height + icd->y_skip_top - 1); if (ret >= 0 && mt9t031->autoexposure) { - ret = set_shutter(client, height + icd->y_skip_top + vblank); + ret = set_shutter(client, + rect->height + icd->y_skip_top + vblank); if (ret >= 0) { const u32 shutter_max = MT9T031_MAX_HEIGHT + vblank; const struct v4l2_queryctrl *qctrl = soc_camera_find_qctrl(icd->ops, V4L2_CID_EXPOSURE); - icd->exposure = (shutter_max / 2 + (height + + icd->exposure = (shutter_max / 2 + (rect->height + icd->y_skip_top + vblank - 1) * (qctrl->maximum - qctrl->minimum)) / shutter_max + qctrl->minimum; @@ -318,27 +333,72 @@ static int mt9t031_set_params(struct soc_camera_device *icd, if (ret >= 0) ret = reg_clear(client, MT9T031_OUTPUT_CONTROL, 1); + if (ret >= 0) { + mt9t031->rect = *rect; + mt9t031->xskip = xskip; + mt9t031->yskip = yskip; + } + return ret < 0 ? ret : 0; } static int mt9t031_s_crop(struct v4l2_subdev *sd, struct v4l2_crop *a) { - struct v4l2_rect *rect = &a->c; + struct v4l2_rect rect = a->c; struct i2c_client *client = sd->priv; struct mt9t031 *mt9t031 = to_mt9t031(client); struct soc_camera_device *icd = client->dev.platform_data; - /* Make sure we don't exceed sensor limits */ - if (rect->left + rect->width > icd->rect_max.left + icd->rect_max.width) - rect->left = icd->rect_max.width + icd->rect_max.left - - rect->width; + rect.width = ALIGN(rect.width, 2); + rect.height = ALIGN(rect.height, 2); + + soc_camera_limit_side(&rect.left, &rect.width, + MT9T031_COLUMN_SKIP, MT9T031_MIN_WIDTH, MT9T031_MAX_WIDTH); + + soc_camera_limit_side(&rect.top, &rect.height, + MT9T031_ROW_SKIP, MT9T031_MIN_HEIGHT, MT9T031_MAX_HEIGHT); + + return mt9t031_set_params(icd, &rect, mt9t031->xskip, mt9t031->yskip); +} + +static int mt9t031_g_crop(struct v4l2_subdev *sd, struct v4l2_crop *a) +{ + struct i2c_client *client = sd->priv; + struct mt9t031 *mt9t031 = to_mt9t031(client); + + a->c = mt9t031->rect; + a->type = V4L2_BUF_TYPE_VIDEO_CAPTURE; - if (rect->top + rect->height > icd->rect_max.height + icd->rect_max.top) - rect->top = icd->rect_max.height + icd->rect_max.top - - rect->height; + return 0; +} + +static int mt9t031_cropcap(struct v4l2_subdev *sd, struct v4l2_cropcap *a) +{ + a->bounds.left = MT9T031_COLUMN_SKIP; + a->bounds.top = MT9T031_ROW_SKIP; + a->bounds.width = MT9T031_MAX_WIDTH; + a->bounds.height = MT9T031_MAX_HEIGHT; + a->defrect = a->bounds; + a->type = V4L2_BUF_TYPE_VIDEO_CAPTURE; + a->pixelaspect.numerator = 1; + a->pixelaspect.denominator = 1; - /* CROP - no change in scaling, or in limits */ - return mt9t031_set_params(icd, rect, mt9t031->xskip, mt9t031->yskip); + return 0; +} + +static int mt9t031_g_fmt(struct v4l2_subdev *sd, struct v4l2_format *f) +{ + struct i2c_client *client = sd->priv; + struct mt9t031 *mt9t031 = to_mt9t031(client); + struct v4l2_pix_format *pix = &f->fmt.pix; + + pix->width = mt9t031->rect.width / mt9t031->xskip; + pix->height = mt9t031->rect.height / mt9t031->yskip; + pix->pixelformat = V4L2_PIX_FMT_SGRBG10; + pix->field = V4L2_FIELD_NONE; + pix->colorspace = V4L2_COLORSPACE_SRGB; + + return 0; } static int mt9t031_s_fmt(struct v4l2_subdev *sd, struct v4l2_format *f) @@ -346,40 +406,25 @@ static int mt9t031_s_fmt(struct v4l2_subdev *sd, struct v4l2_format *f) struct i2c_client *client = sd->priv; struct mt9t031 *mt9t031 = to_mt9t031(client); struct soc_camera_device *icd = client->dev.platform_data; - int ret; + struct v4l2_pix_format *pix = &f->fmt.pix; u16 xskip, yskip; - struct v4l2_rect rect = { - .left = icd->rect_current.left, - .top = icd->rect_current.top, - .width = f->fmt.pix.width, - .height = f->fmt.pix.height, - }; + struct v4l2_rect rect = mt9t031->rect; /* - * try_fmt has put rectangle within limits. - * S_FMT - use binning and skipping for scaling, recalculate - * limits, used for cropping + * try_fmt has put width and height within limits. + * S_FMT: use binning and skipping for scaling */ - /* Is this more optimal than just a division? */ - for (xskip = 8; xskip > 1; xskip--) - if (rect.width * xskip <= MT9T031_MAX_WIDTH) - break; - - for (yskip = 8; yskip > 1; yskip--) - if (rect.height * yskip <= MT9T031_MAX_HEIGHT) - break; - - recalculate_limits(icd, xskip, yskip); - - ret = mt9t031_set_params(icd, &rect, xskip, yskip); - if (!ret) { - mt9t031->xskip = xskip; - mt9t031->yskip = yskip; - } + xskip = mt9t031_skip(&rect.width, pix->width, MT9T031_MAX_WIDTH); + yskip = mt9t031_skip(&rect.height, pix->height, MT9T031_MAX_HEIGHT); - return ret; + /* mt9t031_set_params() doesn't change width and height */ + return mt9t031_set_params(icd, &rect, xskip, yskip); } +/* + * If a user window larger than sensor window is requested, we'll increase the + * sensor window. + */ static int mt9t031_try_fmt(struct v4l2_subdev *sd, struct v4l2_format *f) { struct v4l2_pix_format *pix = &f->fmt.pix; @@ -620,12 +665,12 @@ static int mt9t031_s_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl) if (ctrl->value) { const u16 vblank = MT9T031_VERTICAL_BLANK; const u32 shutter_max = MT9T031_MAX_HEIGHT + vblank; - if (set_shutter(client, icd->rect_current.height + + if (set_shutter(client, mt9t031->rect.height + icd->y_skip_top + vblank) < 0) return -EIO; qctrl = soc_camera_find_qctrl(icd->ops, V4L2_CID_EXPOSURE); icd->exposure = (shutter_max / 2 + - (icd->rect_current.height + + (mt9t031->rect.height + icd->y_skip_top + vblank - 1) * (qctrl->maximum - qctrl->minimum)) / shutter_max + qctrl->minimum; @@ -645,12 +690,6 @@ static int mt9t031_video_probe(struct i2c_client *client) struct mt9t031 *mt9t031 = to_mt9t031(client); s32 data; - /* We must have a parent by now. And it cannot be a wrong one. - * So this entire test is completely redundant. */ - if (!icd->dev.parent || - to_soc_camera_host(icd->dev.parent)->nr != icd->iface) - return -ENODEV; - /* Enable the chip */ data = reg_write(client, MT9T031_CHIP_ENABLE, 1); dev_dbg(&client->dev, "write: %d\n", data); @@ -688,8 +727,11 @@ static struct v4l2_subdev_core_ops mt9t031_subdev_core_ops = { static struct v4l2_subdev_video_ops mt9t031_subdev_video_ops = { .s_stream = mt9t031_s_stream, .s_fmt = mt9t031_s_fmt, + .g_fmt = mt9t031_g_fmt, .try_fmt = mt9t031_try_fmt, .s_crop = mt9t031_s_crop, + .g_crop = mt9t031_g_crop, + .cropcap = mt9t031_cropcap, }; static struct v4l2_subdev_ops mt9t031_subdev_ops = { @@ -731,15 +773,13 @@ static int mt9t031_probe(struct i2c_client *client, /* Second stage probe - when a capture adapter is there */ icd->ops = &mt9t031_ops; - icd->rect_max.left = MT9T031_COLUMN_SKIP; - icd->rect_max.top = MT9T031_ROW_SKIP; - icd->rect_current.left = icd->rect_max.left; - icd->rect_current.top = icd->rect_max.top; - icd->width_min = MT9T031_MIN_WIDTH; - icd->rect_max.width = MT9T031_MAX_WIDTH; - icd->height_min = MT9T031_MIN_HEIGHT; - icd->rect_max.height = MT9T031_MAX_HEIGHT; icd->y_skip_top = 0; + + mt9t031->rect.left = MT9T031_COLUMN_SKIP; + mt9t031->rect.top = MT9T031_ROW_SKIP; + mt9t031->rect.width = MT9T031_MAX_WIDTH; + mt9t031->rect.height = MT9T031_MAX_HEIGHT; + /* Simulated autoexposure. If enabled, we calculate shutter width * ourselves in the driver based on vertical blanking and frame width */ mt9t031->autoexposure = 1; diff --git a/drivers/media/video/mt9v022.c b/drivers/media/video/mt9v022.c index ab196542528..35ea0ddd071 100644 --- a/drivers/media/video/mt9v022.c +++ b/drivers/media/video/mt9v022.c @@ -55,6 +55,13 @@ MODULE_PARM_DESC(sensor_type, "Sensor type: \"colour\" or \"monochrome\""); /* Progressive scan, master, defaults */ #define MT9V022_CHIP_CONTROL_DEFAULT 0x188 +#define MT9V022_MAX_WIDTH 752 +#define MT9V022_MAX_HEIGHT 480 +#define MT9V022_MIN_WIDTH 48 +#define MT9V022_MIN_HEIGHT 32 +#define MT9V022_COLUMN_SKIP 1 +#define MT9V022_ROW_SKIP 4 + static const struct soc_camera_data_format mt9v022_colour_formats[] = { /* Order important: first natively supported, * second supported with a GPIO extender */ @@ -86,6 +93,8 @@ static const struct soc_camera_data_format mt9v022_monochrome_formats[] = { struct mt9v022 { struct v4l2_subdev subdev; + struct v4l2_rect rect; /* Sensor window */ + __u32 fourcc; int model; /* V4L2_IDENT_MT9V022* codes from v4l2-chip-ident.h */ u16 chip_control; }; @@ -250,44 +259,101 @@ static unsigned long mt9v022_query_bus_param(struct soc_camera_device *icd) static int mt9v022_s_crop(struct v4l2_subdev *sd, struct v4l2_crop *a) { - struct v4l2_rect *rect = &a->c; struct i2c_client *client = sd->priv; + struct mt9v022 *mt9v022 = to_mt9v022(client); + struct v4l2_rect rect = a->c; struct soc_camera_device *icd = client->dev.platform_data; int ret; + /* Bayer format - even size lengths */ + if (mt9v022->fourcc == V4L2_PIX_FMT_SBGGR8 || + mt9v022->fourcc == V4L2_PIX_FMT_SBGGR16) { + rect.width = ALIGN(rect.width, 2); + rect.height = ALIGN(rect.height, 2); + /* Let the user play with the starting pixel */ + } + + soc_camera_limit_side(&rect.left, &rect.width, + MT9V022_COLUMN_SKIP, MT9V022_MIN_WIDTH, MT9V022_MAX_WIDTH); + + soc_camera_limit_side(&rect.top, &rect.height, + MT9V022_ROW_SKIP, MT9V022_MIN_HEIGHT, MT9V022_MAX_HEIGHT); + /* Like in example app. Contradicts the datasheet though */ ret = reg_read(client, MT9V022_AEC_AGC_ENABLE); if (ret >= 0) { if (ret & 1) /* Autoexposure */ ret = reg_write(client, MT9V022_MAX_TOTAL_SHUTTER_WIDTH, - rect->height + icd->y_skip_top + 43); + rect.height + icd->y_skip_top + 43); else ret = reg_write(client, MT9V022_TOTAL_SHUTTER_WIDTH, - rect->height + icd->y_skip_top + 43); + rect.height + icd->y_skip_top + 43); } /* Setup frame format: defaults apart from width and height */ if (!ret) - ret = reg_write(client, MT9V022_COLUMN_START, rect->left); + ret = reg_write(client, MT9V022_COLUMN_START, rect.left); if (!ret) - ret = reg_write(client, MT9V022_ROW_START, rect->top); + ret = reg_write(client, MT9V022_ROW_START, rect.top); if (!ret) /* Default 94, Phytec driver says: * "width + horizontal blank >= 660" */ ret = reg_write(client, MT9V022_HORIZONTAL_BLANKING, - rect->width > 660 - 43 ? 43 : - 660 - rect->width); + rect.width > 660 - 43 ? 43 : + 660 - rect.width); if (!ret) ret = reg_write(client, MT9V022_VERTICAL_BLANKING, 45); if (!ret) - ret = reg_write(client, MT9V022_WINDOW_WIDTH, rect->width); + ret = reg_write(client, MT9V022_WINDOW_WIDTH, rect.width); if (!ret) ret = reg_write(client, MT9V022_WINDOW_HEIGHT, - rect->height + icd->y_skip_top); + rect.height + icd->y_skip_top); if (ret < 0) return ret; - dev_dbg(&client->dev, "Frame %ux%u pixel\n", rect->width, rect->height); + dev_dbg(&client->dev, "Frame %ux%u pixel\n", rect.width, rect.height); + + mt9v022->rect = rect; + + return 0; +} + +static int mt9v022_g_crop(struct v4l2_subdev *sd, struct v4l2_crop *a) +{ + struct i2c_client *client = sd->priv; + struct mt9v022 *mt9v022 = to_mt9v022(client); + + a->c = mt9v022->rect; + a->type = V4L2_BUF_TYPE_VIDEO_CAPTURE; + + return 0; +} + +static int mt9v022_cropcap(struct v4l2_subdev *sd, struct v4l2_cropcap *a) +{ + a->bounds.left = MT9V022_COLUMN_SKIP; + a->bounds.top = MT9V022_ROW_SKIP; + a->bounds.width = MT9V022_MAX_WIDTH; + a->bounds.height = MT9V022_MAX_HEIGHT; + a->defrect = a->bounds; + a->type = V4L2_BUF_TYPE_VIDEO_CAPTURE; + a->pixelaspect.numerator = 1; + a->pixelaspect.denominator = 1; + + return 0; +} + +static int mt9v022_g_fmt(struct v4l2_subdev *sd, struct v4l2_format *f) +{ + struct i2c_client *client = sd->priv; + struct mt9v022 *mt9v022 = to_mt9v022(client); + struct v4l2_pix_format *pix = &f->fmt.pix; + + pix->width = mt9v022->rect.width; + pix->height = mt9v022->rect.height; + pix->pixelformat = mt9v022->fourcc; + pix->field = V4L2_FIELD_NONE; + pix->colorspace = V4L2_COLORSPACE_SRGB; return 0; } @@ -296,16 +362,16 @@ static int mt9v022_s_fmt(struct v4l2_subdev *sd, struct v4l2_format *f) { struct i2c_client *client = sd->priv; struct mt9v022 *mt9v022 = to_mt9v022(client); - struct soc_camera_device *icd = client->dev.platform_data; struct v4l2_pix_format *pix = &f->fmt.pix; struct v4l2_crop a = { .c = { - .left = icd->rect_current.left, - .top = icd->rect_current.top, + .left = mt9v022->rect.left, + .top = mt9v022->rect.top, .width = pix->width, .height = pix->height, }, }; + int ret; /* The caller provides a supported format, as verified per call to * icd->try_fmt(), datawidth is from our supported format list */ @@ -328,7 +394,14 @@ static int mt9v022_s_fmt(struct v4l2_subdev *sd, struct v4l2_format *f) } /* No support for scaling on this camera, just crop. */ - return mt9v022_s_crop(sd, &a); + ret = mt9v022_s_crop(sd, &a); + if (!ret) { + pix->width = mt9v022->rect.width; + pix->height = mt9v022->rect.height; + mt9v022->fourcc = pix->pixelformat; + } + + return ret; } static int mt9v022_try_fmt(struct v4l2_subdev *sd, struct v4l2_format *f) @@ -336,10 +409,13 @@ static int mt9v022_try_fmt(struct v4l2_subdev *sd, struct v4l2_format *f) struct i2c_client *client = sd->priv; struct soc_camera_device *icd = client->dev.platform_data; struct v4l2_pix_format *pix = &f->fmt.pix; + int align = pix->pixelformat == V4L2_PIX_FMT_SBGGR8 || + pix->pixelformat == V4L2_PIX_FMT_SBGGR16; - v4l_bound_align_image(&pix->width, 48, 752, 2 /* ? */, - &pix->height, 32 + icd->y_skip_top, - 480 + icd->y_skip_top, 0, 0); + v4l_bound_align_image(&pix->width, MT9V022_MIN_WIDTH, + MT9V022_MAX_WIDTH, align, + &pix->height, MT9V022_MIN_HEIGHT + icd->y_skip_top, + MT9V022_MAX_HEIGHT + icd->y_skip_top, align, 0); return 0; } @@ -669,6 +745,8 @@ static int mt9v022_video_probe(struct soc_camera_device *icd, if (flags & SOCAM_DATAWIDTH_8) icd->num_formats++; + mt9v022->fourcc = icd->formats->fourcc; + dev_info(&client->dev, "Detected a MT9V022 chip ID %x, %s sensor\n", data, mt9v022->model == V4L2_IDENT_MT9V022IX7ATM ? "monochrome" : "colour"); @@ -679,10 +757,9 @@ ei2c: static void mt9v022_video_remove(struct soc_camera_device *icd) { - struct i2c_client *client = to_i2c_client(to_soc_camera_control(icd)); struct soc_camera_link *icl = to_soc_camera_link(icd); - dev_dbg(&client->dev, "Video %x removed: %p, %p\n", client->addr, + dev_dbg(&icd->dev, "Video removed: %p, %p\n", icd->dev.parent, icd->vdev); if (icl->free_bus) icl->free_bus(icl); @@ -701,8 +778,11 @@ static struct v4l2_subdev_core_ops mt9v022_subdev_core_ops = { static struct v4l2_subdev_video_ops mt9v022_subdev_video_ops = { .s_stream = mt9v022_s_stream, .s_fmt = mt9v022_s_fmt, + .g_fmt = mt9v022_g_fmt, .try_fmt = mt9v022_try_fmt, .s_crop = mt9v022_s_crop, + .g_crop = mt9v022_g_crop, + .cropcap = mt9v022_cropcap, }; static struct v4l2_subdev_ops mt9v022_subdev_ops = { @@ -745,16 +825,13 @@ static int mt9v022_probe(struct i2c_client *client, mt9v022->chip_control = MT9V022_CHIP_CONTROL_DEFAULT; icd->ops = &mt9v022_ops; - icd->rect_max.left = 1; - icd->rect_max.top = 4; - icd->rect_max.width = 752; - icd->rect_max.height = 480; - icd->rect_current.left = 1; - icd->rect_current.top = 4; - icd->width_min = 48; - icd->height_min = 32; icd->y_skip_top = 1; + mt9v022->rect.left = MT9V022_COLUMN_SKIP; + mt9v022->rect.top = MT9V022_ROW_SKIP; + mt9v022->rect.width = MT9V022_MAX_WIDTH; + mt9v022->rect.height = MT9V022_MAX_HEIGHT; + ret = mt9v022_video_probe(icd, client); if (ret) { icd->ops = NULL; diff --git a/drivers/media/video/mx1_camera.c b/drivers/media/video/mx1_camera.c index 1f1324a1d49..3875483ab9d 100644 --- a/drivers/media/video/mx1_camera.c +++ b/drivers/media/video/mx1_camera.c @@ -126,7 +126,7 @@ static int mx1_videobuf_setup(struct videobuf_queue *vq, unsigned int *count, { struct soc_camera_device *icd = vq->priv_data; - *size = icd->rect_current.width * icd->rect_current.height * + *size = icd->user_width * icd->user_height * ((icd->current_fmt->depth + 7) >> 3); if (!*count) @@ -178,12 +178,12 @@ static int mx1_videobuf_prepare(struct videobuf_queue *vq, buf->inwork = 1; if (buf->fmt != icd->current_fmt || - vb->width != icd->rect_current.width || - vb->height != icd->rect_current.height || + vb->width != icd->user_width || + vb->height != icd->user_height || vb->field != field) { buf->fmt = icd->current_fmt; - vb->width = icd->rect_current.width; - vb->height = icd->rect_current.height; + vb->width = icd->user_width; + vb->height = icd->user_height; vb->field = field; vb->state = VIDEOBUF_NEEDS_INIT; } diff --git a/drivers/media/video/mx3_camera.c b/drivers/media/video/mx3_camera.c index d5b51e9900b..dff2e5e2d8c 100644 --- a/drivers/media/video/mx3_camera.c +++ b/drivers/media/video/mx3_camera.c @@ -220,7 +220,7 @@ static int mx3_videobuf_setup(struct videobuf_queue *vq, unsigned int *count, if (!mx3_cam->idmac_channel[0]) return -EINVAL; - *size = icd->rect_current.width * icd->rect_current.height * bpp; + *size = icd->user_width * icd->user_height * bpp; if (!*count) *count = 32; @@ -241,7 +241,7 @@ static int mx3_videobuf_prepare(struct videobuf_queue *vq, struct mx3_camera_buffer *buf = container_of(vb, struct mx3_camera_buffer, vb); /* current_fmt _must_ always be set */ - size_t new_size = icd->rect_current.width * icd->rect_current.height * + size_t new_size = icd->user_width * icd->user_height * ((icd->current_fmt->depth + 7) >> 3); int ret; @@ -251,12 +251,12 @@ static int mx3_videobuf_prepare(struct videobuf_queue *vq, */ if (buf->fmt != icd->current_fmt || - vb->width != icd->rect_current.width || - vb->height != icd->rect_current.height || + vb->width != icd->user_width || + vb->height != icd->user_height || vb->field != field) { buf->fmt = icd->current_fmt; - vb->width = icd->rect_current.width; - vb->height = icd->rect_current.height; + vb->width = icd->user_width; + vb->height = icd->user_height; vb->field = field; if (vb->state != VIDEOBUF_NEEDS_INIT) free_buffer(vq, buf); @@ -354,9 +354,9 @@ static void mx3_videobuf_queue(struct videobuf_queue *vq, /* This is the configuration of one sg-element */ video->out_pixel_fmt = fourcc_to_ipu_pix(data_fmt->fourcc); - video->out_width = icd->rect_current.width; - video->out_height = icd->rect_current.height; - video->out_stride = icd->rect_current.width; + video->out_width = icd->user_width; + video->out_height = icd->user_height; + video->out_stride = icd->user_width; #ifdef DEBUG /* helps to see what DMA actually has written */ @@ -541,7 +541,7 @@ static bool channel_change_requested(struct soc_camera_device *icd, /* Do buffers have to be re-allocated or channel re-configured? */ return ichan && rect->width * rect->height > - icd->rect_current.width * icd->rect_current.height; + icd->user_width * icd->user_height; } static int test_platform_param(struct mx3_camera_dev *mx3_cam, @@ -589,8 +589,8 @@ static int test_platform_param(struct mx3_camera_dev *mx3_cam, *flags |= SOCAM_DATAWIDTH_4; break; default: - dev_info(mx3_cam->soc_host.v4l2_dev.dev, "Unsupported bus width %d\n", - buswidth); + dev_warn(mx3_cam->soc_host.v4l2_dev.dev, + "Unsupported bus width %d\n", buswidth); return -EINVAL; } @@ -605,8 +605,7 @@ static int mx3_camera_try_bus_param(struct soc_camera_device *icd, unsigned long bus_flags, camera_flags; int ret = test_platform_param(mx3_cam, depth, &bus_flags); - dev_dbg(icd->dev.parent, "requested bus width %d bit: %d\n", - depth, ret); + dev_dbg(icd->dev.parent, "request bus width %d bit: %d\n", depth, ret); if (ret < 0) return ret; @@ -727,13 +726,13 @@ passthrough: } static void configure_geometry(struct mx3_camera_dev *mx3_cam, - struct v4l2_rect *rect) + unsigned int width, unsigned int height) { u32 ctrl, width_field, height_field; /* Setup frame size - this cannot be changed on-the-fly... */ - width_field = rect->width - 1; - height_field = rect->height - 1; + width_field = width - 1; + height_field = height - 1; csi_reg_write(mx3_cam, width_field | (height_field << 16), CSI_SENS_FRM_SIZE); csi_reg_write(mx3_cam, width_field << 16, CSI_FLASH_STROBE_1); @@ -745,11 +744,6 @@ static void configure_geometry(struct mx3_camera_dev *mx3_cam, ctrl = csi_reg_read(mx3_cam, CSI_OUT_FRM_CTRL) & 0xffff0000; /* Sensor does the cropping */ csi_reg_write(mx3_cam, ctrl | 0 | (0 << 8), CSI_OUT_FRM_CTRL); - - /* - * No need to free resources here if we fail, we'll see if we need to - * do this next time we are called - */ } static int acquire_dma_channel(struct mx3_camera_dev *mx3_cam) @@ -786,6 +780,22 @@ static int acquire_dma_channel(struct mx3_camera_dev *mx3_cam) return 0; } +/* + * FIXME: learn to use stride != width, then we can keep stride properly aligned + * and support arbitrary (even) widths. + */ +static inline void stride_align(__s32 *width) +{ + if (((*width + 7) & ~7) < 4096) + *width = (*width + 7) & ~7; + else + *width = *width & ~7; +} + +/* + * As long as we don't implement host-side cropping and scaling, we can use + * default g_crop and cropcap from soc_camera.c + */ static int mx3_camera_set_crop(struct soc_camera_device *icd, struct v4l2_crop *a) { @@ -793,20 +803,51 @@ static int mx3_camera_set_crop(struct soc_camera_device *icd, struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent); struct mx3_camera_dev *mx3_cam = ici->priv; struct v4l2_subdev *sd = soc_camera_to_subdev(icd); + struct v4l2_format f = {.type = V4L2_BUF_TYPE_VIDEO_CAPTURE}; + struct v4l2_pix_format *pix = &f.fmt.pix; + int ret; - /* - * We now know pixel formats and can decide upon DMA-channel(s) - * So far only direct camera-to-memory is supported - */ - if (channel_change_requested(icd, rect)) { - int ret = acquire_dma_channel(mx3_cam); + soc_camera_limit_side(&rect->left, &rect->width, 0, 2, 4096); + soc_camera_limit_side(&rect->top, &rect->height, 0, 2, 4096); + + ret = v4l2_subdev_call(sd, video, s_crop, a); + if (ret < 0) + return ret; + + /* The capture device might have changed its output */ + ret = v4l2_subdev_call(sd, video, g_fmt, &f); + if (ret < 0) + return ret; + + if (pix->width & 7) { + /* Ouch! We can only handle 8-byte aligned width... */ + stride_align(&pix->width); + ret = v4l2_subdev_call(sd, video, s_fmt, &f); if (ret < 0) return ret; } - configure_geometry(mx3_cam, rect); + if (pix->width != icd->user_width || pix->height != icd->user_height) { + /* + * We now know pixel formats and can decide upon DMA-channel(s) + * So far only direct camera-to-memory is supported + */ + if (channel_change_requested(icd, rect)) { + int ret = acquire_dma_channel(mx3_cam); + if (ret < 0) + return ret; + } - return v4l2_subdev_call(sd, video, s_crop, a); + configure_geometry(mx3_cam, pix->width, pix->height); + } + + dev_dbg(icd->dev.parent, "Sensor cropped %dx%d\n", + pix->width, pix->height); + + icd->user_width = pix->width; + icd->user_height = pix->height; + + return ret; } static int mx3_camera_set_fmt(struct soc_camera_device *icd, @@ -817,12 +858,6 @@ static int mx3_camera_set_fmt(struct soc_camera_device *icd, struct v4l2_subdev *sd = soc_camera_to_subdev(icd); const struct soc_camera_format_xlate *xlate; struct v4l2_pix_format *pix = &f->fmt.pix; - struct v4l2_rect rect = { - .left = icd->rect_current.left, - .top = icd->rect_current.top, - .width = pix->width, - .height = pix->height, - }; int ret; xlate = soc_camera_xlate_by_fourcc(icd, pix->pixelformat); @@ -832,6 +867,9 @@ static int mx3_camera_set_fmt(struct soc_camera_device *icd, return -EINVAL; } + stride_align(&pix->width); + dev_dbg(icd->dev.parent, "Set format %dx%d\n", pix->width, pix->height); + ret = acquire_dma_channel(mx3_cam); if (ret < 0) return ret; @@ -842,7 +880,7 @@ static int mx3_camera_set_fmt(struct soc_camera_device *icd, * mxc_v4l2_s_fmt() */ - configure_geometry(mx3_cam, &rect); + configure_geometry(mx3_cam, pix->width, pix->height); ret = v4l2_subdev_call(sd, video, s_fmt, f); if (!ret) { @@ -850,6 +888,8 @@ static int mx3_camera_set_fmt(struct soc_camera_device *icd, icd->current_fmt = xlate->host_fmt; } + dev_dbg(icd->dev.parent, "Sensor set %dx%d\n", pix->width, pix->height); + return ret; } diff --git a/drivers/media/video/ov772x.c b/drivers/media/video/ov772x.c index bbf5331a2ea..776a91dcfbe 100644 --- a/drivers/media/video/ov772x.c +++ b/drivers/media/video/ov772x.c @@ -382,11 +382,10 @@ struct regval_list { }; struct ov772x_color_format { - char *name; - __u32 fourcc; - u8 dsp3; - u8 com3; - u8 com7; + const struct soc_camera_data_format *format; + u8 dsp3; + u8 com3; + u8 com7; }; struct ov772x_win_size { @@ -481,43 +480,43 @@ static const struct soc_camera_data_format ov772x_fmt_lists[] = { */ static const struct ov772x_color_format ov772x_cfmts[] = { { - SETFOURCC(YUYV), + .format = &ov772x_fmt_lists[0], .dsp3 = 0x0, .com3 = SWAP_YUV, .com7 = OFMT_YUV, }, { - SETFOURCC(YVYU), + .format = &ov772x_fmt_lists[1], .dsp3 = UV_ON, .com3 = SWAP_YUV, .com7 = OFMT_YUV, }, { - SETFOURCC(UYVY), + .format = &ov772x_fmt_lists[2], .dsp3 = 0x0, .com3 = 0x0, .com7 = OFMT_YUV, }, { - SETFOURCC(RGB555), + .format = &ov772x_fmt_lists[3], .dsp3 = 0x0, .com3 = SWAP_RGB, .com7 = FMT_RGB555 | OFMT_RGB, }, { - SETFOURCC(RGB555X), + .format = &ov772x_fmt_lists[4], .dsp3 = 0x0, .com3 = 0x0, .com7 = FMT_RGB555 | OFMT_RGB, }, { - SETFOURCC(RGB565), + .format = &ov772x_fmt_lists[5], .dsp3 = 0x0, .com3 = SWAP_RGB, .com7 = FMT_RGB565 | OFMT_RGB, }, { - SETFOURCC(RGB565X), + .format = &ov772x_fmt_lists[6], .dsp3 = 0x0, .com3 = 0x0, .com7 = FMT_RGB565 | OFMT_RGB, @@ -648,8 +647,8 @@ static int ov772x_s_stream(struct v4l2_subdev *sd, int enable) ov772x_mask_set(client, COM2, SOFT_SLEEP_MODE, 0); - dev_dbg(&client->dev, - "format %s, win %s\n", priv->fmt->name, priv->win->name); + dev_dbg(&client->dev, "format %s, win %s\n", + priv->fmt->format->name, priv->win->name); return 0; } @@ -818,7 +817,7 @@ static int ov772x_set_params(struct i2c_client *client, */ priv->fmt = NULL; for (i = 0; i < ARRAY_SIZE(ov772x_cfmts); i++) { - if (pixfmt == ov772x_cfmts[i].fourcc) { + if (pixfmt == ov772x_cfmts[i].format->fourcc) { priv->fmt = ov772x_cfmts + i; break; } @@ -955,6 +954,56 @@ ov772x_set_fmt_error: return ret; } +static int ov772x_g_crop(struct v4l2_subdev *sd, struct v4l2_crop *a) +{ + a->c.left = 0; + a->c.top = 0; + a->c.width = VGA_WIDTH; + a->c.height = VGA_HEIGHT; + a->type = V4L2_BUF_TYPE_VIDEO_CAPTURE; + + return 0; +} + +static int ov772x_cropcap(struct v4l2_subdev *sd, struct v4l2_cropcap *a) +{ + a->bounds.left = 0; + a->bounds.top = 0; + a->bounds.width = VGA_WIDTH; + a->bounds.height = VGA_HEIGHT; + a->defrect = a->bounds; + a->type = V4L2_BUF_TYPE_VIDEO_CAPTURE; + a->pixelaspect.numerator = 1; + a->pixelaspect.denominator = 1; + + return 0; +} + +static int ov772x_g_fmt(struct v4l2_subdev *sd, struct v4l2_format *f) +{ + struct i2c_client *client = sd->priv; + struct ov772x_priv *priv = to_ov772x(client); + struct v4l2_pix_format *pix = &f->fmt.pix; + + if (!priv->win || !priv->fmt) { + u32 width = VGA_WIDTH, height = VGA_HEIGHT; + int ret = ov772x_set_params(client, &width, &height, + V4L2_PIX_FMT_YUYV); + if (ret < 0) + return ret; + } + + f->type = V4L2_BUF_TYPE_VIDEO_CAPTURE; + + pix->width = priv->win->width; + pix->height = priv->win->height; + pix->pixelformat = priv->fmt->format->fourcc; + pix->colorspace = priv->fmt->format->colorspace; + pix->field = V4L2_FIELD_NONE; + + return 0; +} + static int ov772x_s_fmt(struct v4l2_subdev *sd, struct v4l2_format *f) { struct i2c_client *client = sd->priv; @@ -1060,8 +1109,11 @@ static struct v4l2_subdev_core_ops ov772x_subdev_core_ops = { static struct v4l2_subdev_video_ops ov772x_subdev_video_ops = { .s_stream = ov772x_s_stream, + .g_fmt = ov772x_g_fmt, .s_fmt = ov772x_s_fmt, .try_fmt = ov772x_try_fmt, + .cropcap = ov772x_cropcap, + .g_crop = ov772x_g_crop, }; static struct v4l2_subdev_ops ov772x_subdev_ops = { @@ -1110,8 +1162,6 @@ static int ov772x_probe(struct i2c_client *client, v4l2_i2c_subdev_init(&priv->subdev, client, &ov772x_subdev_ops); icd->ops = &ov772x_ops; - icd->rect_max.width = MAX_WIDTH; - icd->rect_max.height = MAX_HEIGHT; ret = ov772x_video_probe(icd, client); if (ret) { diff --git a/drivers/media/video/pxa_camera.c b/drivers/media/video/pxa_camera.c index 1fd6ef392a5..a19bb76e175 100644 --- a/drivers/media/video/pxa_camera.c +++ b/drivers/media/video/pxa_camera.c @@ -225,6 +225,10 @@ struct pxa_camera_dev { u32 save_cicr[5]; }; +struct pxa_cam { + unsigned long flags; +}; + static const char *pxa_cam_driver_description = "PXA_Camera"; static unsigned int vid_limit = 16; /* Video memory limit, in Mb */ @@ -239,7 +243,7 @@ static int pxa_videobuf_setup(struct videobuf_queue *vq, unsigned int *count, dev_dbg(icd->dev.parent, "count=%d, size=%d\n", *count, *size); - *size = roundup(icd->rect_current.width * icd->rect_current.height * + *size = roundup(icd->user_width * icd->user_height * ((icd->current_fmt->depth + 7) >> 3), 8); if (0 == *count) @@ -443,12 +447,12 @@ static int pxa_videobuf_prepare(struct videobuf_queue *vq, buf->inwork = 1; if (buf->fmt != icd->current_fmt || - vb->width != icd->rect_current.width || - vb->height != icd->rect_current.height || + vb->width != icd->user_width || + vb->height != icd->user_height || vb->field != field) { buf->fmt = icd->current_fmt; - vb->width = icd->rect_current.width; - vb->height = icd->rect_current.height; + vb->width = icd->user_width; + vb->height = icd->user_height; vb->field = field; vb->state = VIDEOBUF_NEEDS_INIT; } @@ -839,7 +843,7 @@ static u32 mclk_get_divisor(struct platform_device *pdev, struct pxa_camera_dev *pcdev) { unsigned long mclk = pcdev->mclk; - struct device *dev = pcdev->soc_host.v4l2_dev.dev; + struct device *dev = &pdev->dev; u32 div; unsigned long lcdclk; @@ -1040,57 +1044,17 @@ static int test_platform_param(struct pxa_camera_dev *pcdev, return 0; } -static int pxa_camera_set_bus_param(struct soc_camera_device *icd, __u32 pixfmt) +static void pxa_camera_setup_cicr(struct soc_camera_device *icd, + unsigned long flags, __u32 pixfmt) { struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent); struct pxa_camera_dev *pcdev = ici->priv; - unsigned long dw, bpp, bus_flags, camera_flags, common_flags; + unsigned long dw, bpp; u32 cicr0, cicr1, cicr2, cicr3, cicr4 = 0; - int ret = test_platform_param(pcdev, icd->buswidth, &bus_flags); - - if (ret < 0) - return ret; - - camera_flags = icd->ops->query_bus_param(icd); - - common_flags = soc_camera_bus_param_compatible(camera_flags, bus_flags); - if (!common_flags) - return -EINVAL; - - pcdev->channels = 1; - - /* Make choises, based on platform preferences */ - if ((common_flags & SOCAM_HSYNC_ACTIVE_HIGH) && - (common_flags & SOCAM_HSYNC_ACTIVE_LOW)) { - if (pcdev->platform_flags & PXA_CAMERA_HSP) - common_flags &= ~SOCAM_HSYNC_ACTIVE_HIGH; - else - common_flags &= ~SOCAM_HSYNC_ACTIVE_LOW; - } - - if ((common_flags & SOCAM_VSYNC_ACTIVE_HIGH) && - (common_flags & SOCAM_VSYNC_ACTIVE_LOW)) { - if (pcdev->platform_flags & PXA_CAMERA_VSP) - common_flags &= ~SOCAM_VSYNC_ACTIVE_HIGH; - else - common_flags &= ~SOCAM_VSYNC_ACTIVE_LOW; - } - - if ((common_flags & SOCAM_PCLK_SAMPLE_RISING) && - (common_flags & SOCAM_PCLK_SAMPLE_FALLING)) { - if (pcdev->platform_flags & PXA_CAMERA_PCP) - common_flags &= ~SOCAM_PCLK_SAMPLE_RISING; - else - common_flags &= ~SOCAM_PCLK_SAMPLE_FALLING; - } - - ret = icd->ops->set_bus_param(icd, common_flags); - if (ret < 0) - return ret; /* Datawidth is now guaranteed to be equal to one of the three values. * We fix bit-per-pixel equal to data-width... */ - switch (common_flags & SOCAM_DATAWIDTH_MASK) { + switch (flags & SOCAM_DATAWIDTH_MASK) { case SOCAM_DATAWIDTH_10: dw = 4; bpp = 0x40; @@ -1111,18 +1075,18 @@ static int pxa_camera_set_bus_param(struct soc_camera_device *icd, __u32 pixfmt) cicr4 |= CICR4_PCLK_EN; if (pcdev->platform_flags & PXA_CAMERA_MCLK_EN) cicr4 |= CICR4_MCLK_EN; - if (common_flags & SOCAM_PCLK_SAMPLE_FALLING) + if (flags & SOCAM_PCLK_SAMPLE_FALLING) cicr4 |= CICR4_PCP; - if (common_flags & SOCAM_HSYNC_ACTIVE_LOW) + if (flags & SOCAM_HSYNC_ACTIVE_LOW) cicr4 |= CICR4_HSP; - if (common_flags & SOCAM_VSYNC_ACTIVE_LOW) + if (flags & SOCAM_VSYNC_ACTIVE_LOW) cicr4 |= CICR4_VSP; cicr0 = __raw_readl(pcdev->base + CICR0); if (cicr0 & CICR0_ENB) __raw_writel(cicr0 & ~CICR0_ENB, pcdev->base + CICR0); - cicr1 = CICR1_PPL_VAL(icd->rect_current.width - 1) | bpp | dw; + cicr1 = CICR1_PPL_VAL(icd->user_width - 1) | bpp | dw; switch (pixfmt) { case V4L2_PIX_FMT_YUV422P: @@ -1151,7 +1115,7 @@ static int pxa_camera_set_bus_param(struct soc_camera_device *icd, __u32 pixfmt) } cicr2 = 0; - cicr3 = CICR3_LPF_VAL(icd->rect_current.height - 1) | + cicr3 = CICR3_LPF_VAL(icd->user_height - 1) | CICR3_BFW_VAL(min((unsigned short)255, icd->y_skip_top)); cicr4 |= pcdev->mclk_divisor; @@ -1165,6 +1129,59 @@ static int pxa_camera_set_bus_param(struct soc_camera_device *icd, __u32 pixfmt) CICR0_SIM_MP : (CICR0_SL_CAP_EN | CICR0_SIM_SP)); cicr0 |= CICR0_DMAEN | CICR0_IRQ_MASK; __raw_writel(cicr0, pcdev->base + CICR0); +} + +static int pxa_camera_set_bus_param(struct soc_camera_device *icd, __u32 pixfmt) +{ + struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent); + struct pxa_camera_dev *pcdev = ici->priv; + unsigned long bus_flags, camera_flags, common_flags; + int ret = test_platform_param(pcdev, icd->buswidth, &bus_flags); + struct pxa_cam *cam = icd->host_priv; + + if (ret < 0) + return ret; + + camera_flags = icd->ops->query_bus_param(icd); + + common_flags = soc_camera_bus_param_compatible(camera_flags, bus_flags); + if (!common_flags) + return -EINVAL; + + pcdev->channels = 1; + + /* Make choises, based on platform preferences */ + if ((common_flags & SOCAM_HSYNC_ACTIVE_HIGH) && + (common_flags & SOCAM_HSYNC_ACTIVE_LOW)) { + if (pcdev->platform_flags & PXA_CAMERA_HSP) + common_flags &= ~SOCAM_HSYNC_ACTIVE_HIGH; + else + common_flags &= ~SOCAM_HSYNC_ACTIVE_LOW; + } + + if ((common_flags & SOCAM_VSYNC_ACTIVE_HIGH) && + (common_flags & SOCAM_VSYNC_ACTIVE_LOW)) { + if (pcdev->platform_flags & PXA_CAMERA_VSP) + common_flags &= ~SOCAM_VSYNC_ACTIVE_HIGH; + else + common_flags &= ~SOCAM_VSYNC_ACTIVE_LOW; + } + + if ((common_flags & SOCAM_PCLK_SAMPLE_RISING) && + (common_flags & SOCAM_PCLK_SAMPLE_FALLING)) { + if (pcdev->platform_flags & PXA_CAMERA_PCP) + common_flags &= ~SOCAM_PCLK_SAMPLE_RISING; + else + common_flags &= ~SOCAM_PCLK_SAMPLE_FALLING; + } + + cam->flags = common_flags; + + ret = icd->ops->set_bus_param(icd, common_flags); + if (ret < 0) + return ret; + + pxa_camera_setup_cicr(icd, common_flags, pixfmt); return 0; } @@ -1230,6 +1247,7 @@ static int pxa_camera_get_formats(struct soc_camera_device *icd, int idx, { struct device *dev = icd->dev.parent; int formats = 0, buswidth, ret; + struct pxa_cam *cam; buswidth = required_buswidth(icd->formats + idx); @@ -1240,6 +1258,16 @@ static int pxa_camera_get_formats(struct soc_camera_device *icd, int idx, if (ret < 0) return 0; + if (!icd->host_priv) { + cam = kzalloc(sizeof(*cam), GFP_KERNEL); + if (!cam) + return -ENOMEM; + + icd->host_priv = cam; + } else { + cam = icd->host_priv; + } + switch (icd->formats[idx].fourcc) { case V4L2_PIX_FMT_UYVY: formats++; @@ -1284,6 +1312,19 @@ static int pxa_camera_get_formats(struct soc_camera_device *icd, int idx, return formats; } +static void pxa_camera_put_formats(struct soc_camera_device *icd) +{ + kfree(icd->host_priv); + icd->host_priv = NULL; +} + +static int pxa_camera_check_frame(struct v4l2_pix_format *pix) +{ + /* limit to pxa hardware capabilities */ + return pix->height < 32 || pix->height > 2048 || pix->width < 48 || + pix->width > 2048 || (pix->width & 0x01); +} + static int pxa_camera_set_crop(struct soc_camera_device *icd, struct v4l2_crop *a) { @@ -1296,6 +1337,9 @@ static int pxa_camera_set_crop(struct soc_camera_device *icd, .master_clock = pcdev->mclk, .pixel_clock_max = pcdev->ciclk / 4, }; + struct v4l2_format f; + struct v4l2_pix_format *pix = &f.fmt.pix, pix_tmp; + struct pxa_cam *cam = icd->host_priv; int ret; /* If PCLK is used to latch data from the sensor, check sense */ @@ -1309,7 +1353,37 @@ static int pxa_camera_set_crop(struct soc_camera_device *icd, if (ret < 0) { dev_warn(dev, "Failed to crop to %ux%u@%u:%u\n", rect->width, rect->height, rect->left, rect->top); - } else if (sense.flags & SOCAM_SENSE_PCLK_CHANGED) { + return ret; + } + + f.type = V4L2_BUF_TYPE_VIDEO_CAPTURE; + + ret = v4l2_subdev_call(sd, video, g_fmt, &f); + if (ret < 0) + return ret; + + pix_tmp = *pix; + if (pxa_camera_check_frame(pix)) { + /* + * Camera cropping produced a frame beyond our capabilities. + * FIXME: just extract a subframe, that we can process. + */ + v4l_bound_align_image(&pix->width, 48, 2048, 1, + &pix->height, 32, 2048, 0, + icd->current_fmt->fourcc == V4L2_PIX_FMT_YUV422P ? + 4 : 0); + ret = v4l2_subdev_call(sd, video, s_fmt, &f); + if (ret < 0) + return ret; + + if (pxa_camera_check_frame(pix)) { + dev_warn(icd->dev.parent, + "Inconsistent state. Use S_FMT to repair\n"); + return -EINVAL; + } + } + + if (sense.flags & SOCAM_SENSE_PCLK_CHANGED) { if (sense.pixel_clock > sense.pixel_clock_max) { dev_err(dev, "pixel clock %lu set by the camera too high!", @@ -1319,6 +1393,11 @@ static int pxa_camera_set_crop(struct soc_camera_device *icd, recalculate_fifo_timeout(pcdev, sense.pixel_clock); } + icd->user_width = pix->width; + icd->user_height = pix->height; + + pxa_camera_setup_cicr(icd, cam->flags, icd->current_fmt->fourcc); + return ret; } @@ -1359,6 +1438,11 @@ static int pxa_camera_set_fmt(struct soc_camera_device *icd, if (ret < 0) { dev_warn(dev, "Failed to configure for format %x\n", pix->pixelformat); + } else if (pxa_camera_check_frame(pix)) { + dev_warn(dev, + "Camera driver produced an unsupported frame %dx%d\n", + pix->width, pix->height); + ret = -EINVAL; } else if (sense.flags & SOCAM_SENSE_PCLK_CHANGED) { if (sense.pixel_clock > sense.pixel_clock_max) { dev_err(dev, @@ -1402,7 +1486,7 @@ static int pxa_camera_try_fmt(struct soc_camera_device *icd, */ v4l_bound_align_image(&pix->width, 48, 2048, 1, &pix->height, 32, 2048, 0, - xlate->host_fmt->fourcc == V4L2_PIX_FMT_YUV422P ? 4 : 0); + pixfmt == V4L2_PIX_FMT_YUV422P ? 4 : 0); pix->bytesperline = pix->width * DIV_ROUND_UP(xlate->host_fmt->depth, 8); @@ -1412,7 +1496,7 @@ static int pxa_camera_try_fmt(struct soc_camera_device *icd, pix->pixelformat = xlate->cam_fmt->fourcc; /* limit to sensor capabilities */ ret = v4l2_subdev_call(sd, video, try_fmt, f); - pix->pixelformat = xlate->host_fmt->fourcc; + pix->pixelformat = pixfmt; field = pix->field; @@ -1525,6 +1609,7 @@ static struct soc_camera_host_ops pxa_soc_camera_host_ops = { .resume = pxa_camera_resume, .set_crop = pxa_camera_set_crop, .get_formats = pxa_camera_get_formats, + .put_formats = pxa_camera_put_formats, .set_fmt = pxa_camera_set_fmt, .try_fmt = pxa_camera_try_fmt, .init_videobuf = pxa_camera_init_videobuf, diff --git a/drivers/media/video/sh_mobile_ceu_camera.c b/drivers/media/video/sh_mobile_ceu_camera.c index 3457bababd3..5ab7c5aefd6 100644 --- a/drivers/media/video/sh_mobile_ceu_camera.c +++ b/drivers/media/video/sh_mobile_ceu_camera.c @@ -74,6 +74,13 @@ #define CDBYR2 0x98 /* Capture data bottom-field address Y register 2 */ #define CDBCR2 0x9c /* Capture data bottom-field address C register 2 */ +#undef DEBUG_GEOMETRY +#ifdef DEBUG_GEOMETRY +#define dev_geo dev_info +#else +#define dev_geo dev_dbg +#endif + /* per video frame buffer */ struct sh_mobile_ceu_buffer { struct videobuf_buffer vb; /* v4l buffer must be first */ @@ -103,8 +110,9 @@ struct sh_mobile_ceu_dev { }; struct sh_mobile_ceu_cam { - struct v4l2_rect camera_rect; - struct v4l2_rect camera_max; + struct v4l2_rect ceu_rect; + unsigned int cam_width; + unsigned int cam_height; const struct soc_camera_data_format *extra_fmt; const struct soc_camera_data_format *camera_fmt; }; @@ -156,7 +164,7 @@ static int sh_mobile_ceu_videobuf_setup(struct videobuf_queue *vq, struct sh_mobile_ceu_dev *pcdev = ici->priv; int bytes_per_pixel = (icd->current_fmt->depth + 7) >> 3; - *size = PAGE_ALIGN(icd->rect_current.width * icd->rect_current.height * + *size = PAGE_ALIGN(icd->user_width * icd->user_height * bytes_per_pixel); if (0 == *count) @@ -176,8 +184,9 @@ static void free_buffer(struct videobuf_queue *vq, struct sh_mobile_ceu_buffer *buf) { struct soc_camera_device *icd = vq->priv_data; + struct device *dev = icd->dev.parent; - dev_dbg(icd->dev.parent, "%s (vb=0x%p) 0x%08lx %zd\n", __func__, + dev_dbg(dev, "%s (vb=0x%p) 0x%08lx %zd\n", __func__, &buf->vb, buf->vb.baddr, buf->vb.bsize); if (in_interrupt()) @@ -185,7 +194,7 @@ static void free_buffer(struct videobuf_queue *vq, videobuf_waiton(&buf->vb, 0, 0); videobuf_dma_contig_free(vq, &buf->vb); - dev_dbg(icd->dev.parent, "%s freed\n", __func__); + dev_dbg(dev, "%s freed\n", __func__); buf->vb.state = VIDEOBUF_NEEDS_INIT; } @@ -216,7 +225,7 @@ static void sh_mobile_ceu_capture(struct sh_mobile_ceu_dev *pcdev) phys_addr_top = videobuf_to_dma_contig(pcdev->active); ceu_write(pcdev, CDAYR, phys_addr_top); if (pcdev->is_interlaced) { - phys_addr_bottom = phys_addr_top + icd->rect_current.width; + phys_addr_bottom = phys_addr_top + icd->user_width; ceu_write(pcdev, CDBYR, phys_addr_bottom); } @@ -225,12 +234,12 @@ static void sh_mobile_ceu_capture(struct sh_mobile_ceu_dev *pcdev) case V4L2_PIX_FMT_NV21: case V4L2_PIX_FMT_NV16: case V4L2_PIX_FMT_NV61: - phys_addr_top += icd->rect_current.width * - icd->rect_current.height; + phys_addr_top += icd->user_width * + icd->user_height; ceu_write(pcdev, CDACR, phys_addr_top); if (pcdev->is_interlaced) { phys_addr_bottom = phys_addr_top + - icd->rect_current.width; + icd->user_width; ceu_write(pcdev, CDBCR, phys_addr_bottom); } } @@ -264,12 +273,12 @@ static int sh_mobile_ceu_videobuf_prepare(struct videobuf_queue *vq, BUG_ON(NULL == icd->current_fmt); if (buf->fmt != icd->current_fmt || - vb->width != icd->rect_current.width || - vb->height != icd->rect_current.height || + vb->width != icd->user_width || + vb->height != icd->user_height || vb->field != field) { buf->fmt = icd->current_fmt; - vb->width = icd->rect_current.width; - vb->height = icd->rect_current.height; + vb->width = icd->user_width; + vb->height = icd->user_height; vb->field = field; vb->state = VIDEOBUF_NEEDS_INIT; } @@ -451,18 +460,6 @@ static unsigned int size_dst(unsigned int src, unsigned int scale) mant_pre * 4096 / scale + 1; } -static unsigned int size_src(unsigned int dst, unsigned int scale) -{ - unsigned int mant_pre = scale >> 12, tmp; - if (!dst || !scale) - return dst; - for (tmp = ((dst - 1) * scale + 2048 * mant_pre) / 4096 + 1; - size_dst(tmp, scale) < dst; - tmp++) - ; - return tmp; -} - static u16 calc_scale(unsigned int src, unsigned int *dst) { u16 scale; @@ -482,65 +479,46 @@ static u16 calc_scale(unsigned int src, unsigned int *dst) /* rect is guaranteed to not exceed the scaled camera rectangle */ static void sh_mobile_ceu_set_rect(struct soc_camera_device *icd, - struct v4l2_rect *rect) + unsigned int out_width, + unsigned int out_height) { struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent); struct sh_mobile_ceu_cam *cam = icd->host_priv; + struct v4l2_rect *rect = &cam->ceu_rect; struct sh_mobile_ceu_dev *pcdev = ici->priv; - int width, height, cfszr_width, cdwdr_width, in_width, in_height; - unsigned int left_offset, top_offset, left, top; - unsigned int hscale = pcdev->cflcr & 0xffff; - unsigned int vscale = (pcdev->cflcr >> 16) & 0xffff; + unsigned int height, width, cdwdr_width, in_width, in_height; + unsigned int left_offset, top_offset; u32 camor; - /* Switch to the camera scale */ - left = size_src(rect->left, hscale); - top = size_src(rect->top, vscale); - - dev_dbg(icd->dev.parent, "Left %u * 0x%x = %u, top %u * 0x%x = %u\n", - rect->left, hscale, left, rect->top, vscale, top); - - if (left > cam->camera_rect.left) { - left_offset = left - cam->camera_rect.left; - } else { - left_offset = 0; - left = cam->camera_rect.left; - } - - if (top > cam->camera_rect.top) { - top_offset = top - cam->camera_rect.top; - } else { - top_offset = 0; - top = cam->camera_rect.top; - } + dev_dbg(icd->dev.parent, "Crop %ux%u@%u:%u\n", + rect->width, rect->height, rect->left, rect->top); - dev_dbg(icd->dev.parent, "New left %u, top %u, offsets %u:%u\n", - rect->left, rect->top, left_offset, top_offset); + left_offset = rect->left; + top_offset = rect->top; if (pcdev->image_mode) { - width = rect->width; - in_width = cam->camera_rect.width; + in_width = rect->width; if (!pcdev->is_16bit) { - width *= 2; in_width *= 2; left_offset *= 2; } - cfszr_width = cdwdr_width = rect->width; + width = cdwdr_width = out_width; } else { unsigned int w_factor = (icd->current_fmt->depth + 7) >> 3; + + width = out_width * w_factor / 2; + if (!pcdev->is_16bit) w_factor *= 2; - width = rect->width * w_factor / 2; - in_width = cam->camera_rect.width * w_factor / 2; + in_width = rect->width * w_factor / 2; left_offset = left_offset * w_factor / 2; - cfszr_width = pcdev->is_16bit ? width : width / 2; - cdwdr_width = pcdev->is_16bit ? width * 2 : width; + cdwdr_width = width * 2; } - height = rect->height; - in_height = cam->camera_rect.height; + height = out_height; + in_height = rect->height; if (pcdev->is_interlaced) { height /= 2; in_height /= 2; @@ -548,10 +526,17 @@ static void sh_mobile_ceu_set_rect(struct soc_camera_device *icd, cdwdr_width *= 2; } + /* Set CAMOR, CAPWR, CFSZR, take care of CDWDR */ camor = left_offset | (top_offset << 16); + + dev_geo(icd->dev.parent, + "CAMOR 0x%x, CAPWR 0x%x, CFSZR 0x%x, CDWDR 0x%x\n", camor, + (in_height << 16) | in_width, (height << 16) | width, + cdwdr_width); + ceu_write(pcdev, CAMOR, camor); ceu_write(pcdev, CAPWR, (in_height << 16) | in_width); - ceu_write(pcdev, CFSZR, (height << 16) | cfszr_width); + ceu_write(pcdev, CFSZR, (height << 16) | width); ceu_write(pcdev, CDWDR, cdwdr_width); } @@ -663,8 +648,8 @@ static int sh_mobile_ceu_set_bus_param(struct soc_camera_device *icd, ceu_write(pcdev, CAPCR, 0x00300000); ceu_write(pcdev, CAIFR, pcdev->is_interlaced ? 0x101 : 0); + sh_mobile_ceu_set_rect(icd, icd->user_width, icd->user_height); mdelay(1); - sh_mobile_ceu_set_rect(icd, &icd->rect_current); ceu_write(pcdev, CFLCR, pcdev->cflcr); @@ -687,11 +672,10 @@ static int sh_mobile_ceu_set_bus_param(struct soc_camera_device *icd, ceu_write(pcdev, CDOCR, value); ceu_write(pcdev, CFWCR, 0); /* keep "datafetch firewall" disabled */ - dev_dbg(icd->dev.parent, "S_FMT successful for %c%c%c%c %ux%u@%u:%u\n", + dev_dbg(icd->dev.parent, "S_FMT successful for %c%c%c%c %ux%u\n", pixfmt & 0xff, (pixfmt >> 8) & 0xff, (pixfmt >> 16) & 0xff, (pixfmt >> 24) & 0xff, - icd->rect_current.width, icd->rect_current.height, - icd->rect_current.left, icd->rect_current.top); + icd->user_width, icd->user_height); capture_restore(pcdev, capsr); @@ -744,6 +728,7 @@ static const struct soc_camera_data_format sh_mobile_ceu_formats[] = { static int sh_mobile_ceu_get_formats(struct soc_camera_device *icd, int idx, struct soc_camera_format_xlate *xlate) { + struct device *dev = icd->dev.parent; int ret, k, n; int formats = 0; struct sh_mobile_ceu_cam *cam; @@ -758,7 +743,6 @@ static int sh_mobile_ceu_get_formats(struct soc_camera_device *icd, int idx, return -ENOMEM; icd->host_priv = cam; - cam->camera_max = icd->rect_max; } else { cam = icd->host_priv; } @@ -793,8 +777,7 @@ static int sh_mobile_ceu_get_formats(struct soc_camera_device *icd, int idx, xlate->cam_fmt = icd->formats + idx; xlate->buswidth = icd->formats[idx].depth; xlate++; - dev_dbg(icd->dev.parent, - "Providing format %s using %s\n", + dev_dbg(dev, "Providing format %s using %s\n", sh_mobile_ceu_formats[k].name, icd->formats[idx].name); } @@ -807,7 +790,7 @@ add_single_format: xlate->cam_fmt = icd->formats + idx; xlate->buswidth = icd->formats[idx].depth; xlate++; - dev_dbg(icd->dev.parent, + dev_dbg(dev, "Providing format %s in pass-through mode\n", icd->formats[idx].name); } @@ -836,176 +819,487 @@ static bool is_inside(struct v4l2_rect *r1, struct v4l2_rect *r2) r1->top + r1->height < r2->top + r2->height; } +static unsigned int scale_down(unsigned int size, unsigned int scale) +{ + return (size * 4096 + scale / 2) / scale; +} + +static unsigned int scale_up(unsigned int size, unsigned int scale) +{ + return (size * scale + 2048) / 4096; +} + +static unsigned int calc_generic_scale(unsigned int input, unsigned int output) +{ + return (input * 4096 + output / 2) / output; +} + +static int client_g_rect(struct v4l2_subdev *sd, struct v4l2_rect *rect) +{ + struct v4l2_crop crop; + struct v4l2_cropcap cap; + int ret; + + crop.type = V4L2_BUF_TYPE_VIDEO_CAPTURE; + + ret = v4l2_subdev_call(sd, video, g_crop, &crop); + if (!ret) { + *rect = crop.c; + return ret; + } + + /* Camera driver doesn't support .g_crop(), assume default rectangle */ + cap.type = V4L2_BUF_TYPE_VIDEO_CAPTURE; + + ret = v4l2_subdev_call(sd, video, cropcap, &cap); + if (ret < 0) + return ret; + + *rect = cap.defrect; + + return ret; +} + /* - * CEU can scale and crop, but we don't want to waste bandwidth and kill the - * framerate by always requesting the maximum image from the client. For - * cropping we also have to take care of the current scale. The common for both - * scaling and cropping approach is: + * The common for both scaling and cropping iterative approach is: * 1. try if the client can produce exactly what requested by the user * 2. if (1) failed, try to double the client image until we get one big enough * 3. if (2) failed, try to request the maximum image */ -static int sh_mobile_ceu_set_crop(struct soc_camera_device *icd, - struct v4l2_crop *a) +static int client_s_crop(struct v4l2_subdev *sd, struct v4l2_crop *crop, + struct v4l2_crop *cam_crop) { - struct v4l2_rect *rect = &a->c; - struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent); - struct sh_mobile_ceu_dev *pcdev = ici->priv; - struct v4l2_crop cam_crop; - struct v4l2_rect *cam_rect = &cam_crop.c, target, cam_max; - struct sh_mobile_ceu_cam *cam = icd->host_priv; - struct v4l2_subdev *sd = soc_camera_to_subdev(icd); - unsigned int hscale = pcdev->cflcr & 0xffff; - unsigned int vscale = (pcdev->cflcr >> 16) & 0xffff; - unsigned short width, height; - u32 capsr; + struct v4l2_rect *rect = &crop->c, *cam_rect = &cam_crop->c; + struct device *dev = sd->v4l2_dev->dev; + struct v4l2_cropcap cap; int ret; + unsigned int width, height; - /* Scale back up into client units */ - cam_rect->left = size_src(rect->left, hscale); - cam_rect->width = size_src(rect->width, hscale); - cam_rect->top = size_src(rect->top, vscale); - cam_rect->height = size_src(rect->height, vscale); - - target = *cam_rect; + v4l2_subdev_call(sd, video, s_crop, crop); + ret = client_g_rect(sd, cam_rect); + if (ret < 0) + return ret; - capsr = capture_save_reset(pcdev); - dev_dbg(icd->dev.parent, "CAPSR 0x%x, CFLCR 0x%x\n", - capsr, pcdev->cflcr); - - /* First attempt - see if the client can deliver a perfect result */ - ret = v4l2_subdev_call(sd, video, s_crop, &cam_crop); - if (!ret && !memcmp(&target, &cam_rect, sizeof(target))) { - dev_dbg(icd->dev.parent, - "Camera S_CROP successful for %ux%u@%u:%u\n", - cam_rect->width, cam_rect->height, - cam_rect->left, cam_rect->top); - goto ceu_set_rect; + /* + * Now cam_crop contains the current camera input rectangle, and it must + * be within camera cropcap bounds + */ + if (!memcmp(rect, cam_rect, sizeof(*rect))) { + /* Even if camera S_CROP failed, but camera rectangle matches */ + dev_dbg(dev, "Camera S_CROP successful for %ux%u@%u:%u\n", + rect->width, rect->height, rect->left, rect->top); + return 0; } - /* Try to fix cropping, that camera hasn't managed to do */ - dev_dbg(icd->dev.parent, "Fix camera S_CROP %d for %ux%u@%u:%u" - " to %ux%u@%u:%u\n", - ret, cam_rect->width, cam_rect->height, + /* Try to fix cropping, that camera hasn't managed to set */ + dev_geo(dev, "Fix camera S_CROP for %ux%u@%u:%u to %ux%u@%u:%u\n", + cam_rect->width, cam_rect->height, cam_rect->left, cam_rect->top, - target.width, target.height, target.left, target.top); + rect->width, rect->height, rect->left, rect->top); + + /* We need sensor maximum rectangle */ + ret = v4l2_subdev_call(sd, video, cropcap, &cap); + if (ret < 0) + return ret; + + soc_camera_limit_side(&rect->left, &rect->width, cap.bounds.left, 2, + cap.bounds.width); + soc_camera_limit_side(&rect->top, &rect->height, cap.bounds.top, 4, + cap.bounds.height); /* * Popular special case - some cameras can only handle fixed sizes like * QVGA, VGA,... Take care to avoid infinite loop. */ - width = max(cam_rect->width, 1); - height = max(cam_rect->height, 1); - cam_max.width = size_src(icd->rect_max.width, hscale); - cam_max.left = size_src(icd->rect_max.left, hscale); - cam_max.height = size_src(icd->rect_max.height, vscale); - cam_max.top = size_src(icd->rect_max.top, vscale); - while (!ret && (is_smaller(cam_rect, &target) || - is_inside(cam_rect, &target)) && - cam_max.width >= width && cam_max.height >= height) { + width = max(cam_rect->width, 2); + height = max(cam_rect->height, 2); + + while (!ret && (is_smaller(cam_rect, rect) || + is_inside(cam_rect, rect)) && + (cap.bounds.width > width || cap.bounds.height > height)) { width *= 2; height *= 2; + cam_rect->width = width; cam_rect->height = height; - /* We do not know what the camera is capable of, play safe */ - if (cam_rect->left > target.left) - cam_rect->left = cam_max.left; + /* + * We do not know what capabilities the camera has to set up + * left and top borders. We could try to be smarter in iterating + * them, e.g., if camera current left is to the right of the + * target left, set it to the middle point between the current + * left and minimum left. But that would add too much + * complexity: we would have to iterate each border separately. + */ + if (cam_rect->left > rect->left) + cam_rect->left = cap.bounds.left; - if (cam_rect->left + cam_rect->width < target.left + target.width) - cam_rect->width = target.left + target.width - + if (cam_rect->left + cam_rect->width < rect->left + rect->width) + cam_rect->width = rect->left + rect->width - cam_rect->left; - if (cam_rect->top > target.top) - cam_rect->top = cam_max.top; + if (cam_rect->top > rect->top) + cam_rect->top = cap.bounds.top; - if (cam_rect->top + cam_rect->height < target.top + target.height) - cam_rect->height = target.top + target.height - + if (cam_rect->top + cam_rect->height < rect->top + rect->height) + cam_rect->height = rect->top + rect->height - cam_rect->top; - if (cam_rect->width + cam_rect->left > - cam_max.width + cam_max.left) - cam_rect->left = max(cam_max.width + cam_max.left - - cam_rect->width, cam_max.left); - - if (cam_rect->height + cam_rect->top > - cam_max.height + cam_max.top) - cam_rect->top = max(cam_max.height + cam_max.top - - cam_rect->height, cam_max.top); - - ret = v4l2_subdev_call(sd, video, s_crop, &cam_crop); - dev_dbg(icd->dev.parent, "Camera S_CROP %d for %ux%u@%u:%u\n", - ret, cam_rect->width, cam_rect->height, + v4l2_subdev_call(sd, video, s_crop, cam_crop); + ret = client_g_rect(sd, cam_rect); + dev_geo(dev, "Camera S_CROP %d for %ux%u@%u:%u\n", ret, + cam_rect->width, cam_rect->height, cam_rect->left, cam_rect->top); } - /* - * If the camera failed to configure cropping, it should not modify the - * rectangle - */ - if ((ret < 0 && (is_smaller(&icd->rect_current, rect) || - is_inside(&icd->rect_current, rect))) || - is_smaller(cam_rect, &target) || is_inside(cam_rect, &target)) { + /* S_CROP must not modify the rectangle */ + if (is_smaller(cam_rect, rect) || is_inside(cam_rect, rect)) { /* * The camera failed to configure a suitable cropping, * we cannot use the current rectangle, set to max */ - *cam_rect = cam_max; - ret = v4l2_subdev_call(sd, video, s_crop, &cam_crop); - dev_dbg(icd->dev.parent, - "Camera S_CROP %d for max %ux%u@%u:%u\n", - ret, cam_rect->width, cam_rect->height, + *cam_rect = cap.bounds; + v4l2_subdev_call(sd, video, s_crop, cam_crop); + ret = client_g_rect(sd, cam_rect); + dev_geo(dev, "Camera S_CROP %d for max %ux%u@%u:%u\n", ret, + cam_rect->width, cam_rect->height, cam_rect->left, cam_rect->top); - if (ret < 0 && ret != -ENOIOCTLCMD) - /* All failed, hopefully resume current capture */ - goto resume_capture; - - /* Finally, adjust the target rectangle */ - if (target.width > cam_rect->width) - target.width = cam_rect->width; - if (target.height > cam_rect->height) - target.height = cam_rect->height; - if (target.left + target.width > cam_rect->left + cam_rect->width) - target.left = cam_rect->left + cam_rect->width - - target.width; - if (target.top + target.height > cam_rect->top + cam_rect->height) - target.top = cam_rect->top + cam_rect->height - - target.height; } - /* We now have a rectangle, larger than requested, let's crop */ + return ret; +} + +static int get_camera_scales(struct v4l2_subdev *sd, struct v4l2_rect *rect, + unsigned int *scale_h, unsigned int *scale_v) +{ + struct v4l2_format f; + int ret; + + f.type = V4L2_BUF_TYPE_VIDEO_CAPTURE; + + ret = v4l2_subdev_call(sd, video, g_fmt, &f); + if (ret < 0) + return ret; + + *scale_h = calc_generic_scale(rect->width, f.fmt.pix.width); + *scale_v = calc_generic_scale(rect->height, f.fmt.pix.height); + + return 0; +} + +static int get_camera_subwin(struct soc_camera_device *icd, + struct v4l2_rect *cam_subrect, + unsigned int cam_hscale, unsigned int cam_vscale) +{ + struct sh_mobile_ceu_cam *cam = icd->host_priv; + struct v4l2_rect *ceu_rect = &cam->ceu_rect; + + if (!ceu_rect->width) { + struct v4l2_subdev *sd = soc_camera_to_subdev(icd); + struct device *dev = icd->dev.parent; + struct v4l2_format f; + struct v4l2_pix_format *pix = &f.fmt.pix; + int ret; + /* First time */ + + f.type = V4L2_BUF_TYPE_VIDEO_CAPTURE; + + ret = v4l2_subdev_call(sd, video, g_fmt, &f); + if (ret < 0) + return ret; + + dev_geo(dev, "camera fmt %ux%u\n", pix->width, pix->height); + + if (pix->width > 2560) { + ceu_rect->width = 2560; + ceu_rect->left = (pix->width - 2560) / 2; + } else { + ceu_rect->width = pix->width; + ceu_rect->left = 0; + } + + if (pix->height > 1920) { + ceu_rect->height = 1920; + ceu_rect->top = (pix->height - 1920) / 2; + } else { + ceu_rect->height = pix->height; + ceu_rect->top = 0; + } + + dev_geo(dev, "initialised CEU rect %ux%u@%u:%u\n", + ceu_rect->width, ceu_rect->height, + ceu_rect->left, ceu_rect->top); + } + + cam_subrect->width = scale_up(ceu_rect->width, cam_hscale); + cam_subrect->left = scale_up(ceu_rect->left, cam_hscale); + cam_subrect->height = scale_up(ceu_rect->height, cam_vscale); + cam_subrect->top = scale_up(ceu_rect->top, cam_vscale); + + return 0; +} + +static int client_s_fmt(struct soc_camera_device *icd, struct v4l2_format *f, + bool ceu_can_scale) +{ + struct v4l2_subdev *sd = soc_camera_to_subdev(icd); + struct device *dev = icd->dev.parent; + struct v4l2_pix_format *pix = &f->fmt.pix; + unsigned int width = pix->width, height = pix->height, tmp_w, tmp_h; + unsigned int max_width, max_height; + struct v4l2_cropcap cap; + int ret; + + cap.type = V4L2_BUF_TYPE_VIDEO_CAPTURE; + + ret = v4l2_subdev_call(sd, video, cropcap, &cap); + if (ret < 0) + return ret; + + max_width = min(cap.bounds.width, 2560); + max_height = min(cap.bounds.height, 1920); + + ret = v4l2_subdev_call(sd, video, s_fmt, f); + if (ret < 0) + return ret; + + dev_geo(dev, "camera scaled to %ux%u\n", pix->width, pix->height); + + if ((width == pix->width && height == pix->height) || !ceu_can_scale) + return 0; + + /* Camera set a format, but geometry is not precise, try to improve */ + tmp_w = pix->width; + tmp_h = pix->height; + + /* width <= max_width && height <= max_height - guaranteed by try_fmt */ + while ((width > tmp_w || height > tmp_h) && + tmp_w < max_width && tmp_h < max_height) { + tmp_w = min(2 * tmp_w, max_width); + tmp_h = min(2 * tmp_h, max_height); + pix->width = tmp_w; + pix->height = tmp_h; + ret = v4l2_subdev_call(sd, video, s_fmt, f); + dev_geo(dev, "Camera scaled to %ux%u\n", + pix->width, pix->height); + if (ret < 0) { + /* This shouldn't happen */ + dev_err(dev, "Client failed to set format: %d\n", ret); + return ret; + } + } + + return 0; +} + +/** + * @rect - camera cropped rectangle + * @sub_rect - CEU cropped rectangle, mapped back to camera input area + * @ceu_rect - on output calculated CEU crop rectangle + */ +static int client_scale(struct soc_camera_device *icd, struct v4l2_rect *rect, + struct v4l2_rect *sub_rect, struct v4l2_rect *ceu_rect, + struct v4l2_format *f, bool ceu_can_scale) +{ + struct v4l2_subdev *sd = soc_camera_to_subdev(icd); + struct sh_mobile_ceu_cam *cam = icd->host_priv; + struct device *dev = icd->dev.parent; + struct v4l2_format f_tmp = *f; + struct v4l2_pix_format *pix_tmp = &f_tmp.fmt.pix; + unsigned int scale_h, scale_v; + int ret; + + /* 5. Apply iterative camera S_FMT for camera user window. */ + ret = client_s_fmt(icd, &f_tmp, ceu_can_scale); + if (ret < 0) + return ret; + + dev_geo(dev, "5: camera scaled to %ux%u\n", + pix_tmp->width, pix_tmp->height); + + /* 6. Retrieve camera output window (g_fmt) */ + + /* unneeded - it is already in "f_tmp" */ + + /* 7. Calculate new camera scales. */ + ret = get_camera_scales(sd, rect, &scale_h, &scale_v); + if (ret < 0) + return ret; + + dev_geo(dev, "7: camera scales %u:%u\n", scale_h, scale_v); + + cam->cam_width = pix_tmp->width; + cam->cam_height = pix_tmp->height; + f->fmt.pix.width = pix_tmp->width; + f->fmt.pix.height = pix_tmp->height; /* - * We have to preserve camera rectangle between close() / open(), - * because soc-camera core calls .set_fmt() on each first open() with - * last before last close() _user_ rectangle, which can be different - * from camera rectangle. + * 8. Calculate new CEU crop - apply camera scales to previously + * calculated "effective" crop. */ - dev_dbg(icd->dev.parent, - "SH S_CROP from %ux%u@%u:%u to %ux%u@%u:%u, scale to %ux%u@%u:%u\n", - cam_rect->width, cam_rect->height, cam_rect->left, cam_rect->top, - target.width, target.height, target.left, target.top, - rect->width, rect->height, rect->left, rect->top); + ceu_rect->left = scale_down(sub_rect->left, scale_h); + ceu_rect->width = scale_down(sub_rect->width, scale_h); + ceu_rect->top = scale_down(sub_rect->top, scale_v); + ceu_rect->height = scale_down(sub_rect->height, scale_v); + + dev_geo(dev, "8: new CEU rect %ux%u@%u:%u\n", + ceu_rect->width, ceu_rect->height, + ceu_rect->left, ceu_rect->top); + + return 0; +} + +/* Get combined scales */ +static int get_scales(struct soc_camera_device *icd, + unsigned int *scale_h, unsigned int *scale_v) +{ + struct sh_mobile_ceu_cam *cam = icd->host_priv; + struct v4l2_subdev *sd = soc_camera_to_subdev(icd); + struct v4l2_crop cam_crop; + unsigned int width_in, height_in; + int ret; - ret = 0; + cam_crop.type = V4L2_BUF_TYPE_VIDEO_CAPTURE; -ceu_set_rect: - cam->camera_rect = *cam_rect; + ret = client_g_rect(sd, &cam_crop.c); + if (ret < 0) + return ret; - rect->width = size_dst(target.width, hscale); - rect->left = size_dst(target.left, hscale); - rect->height = size_dst(target.height, vscale); - rect->top = size_dst(target.top, vscale); + ret = get_camera_scales(sd, &cam_crop.c, scale_h, scale_v); + if (ret < 0) + return ret; - sh_mobile_ceu_set_rect(icd, rect); + width_in = scale_up(cam->ceu_rect.width, *scale_h); + height_in = scale_up(cam->ceu_rect.height, *scale_v); -resume_capture: - /* Set CAMOR, CAPWR, CFSZR, take care of CDWDR */ + *scale_h = calc_generic_scale(cam->ceu_rect.width, icd->user_width); + *scale_v = calc_generic_scale(cam->ceu_rect.height, icd->user_height); + + return 0; +} + +/* + * CEU can scale and crop, but we don't want to waste bandwidth and kill the + * framerate by always requesting the maximum image from the client. See + * Documentation/video4linux/sh_mobile_camera_ceu.txt for a description of + * scaling and cropping algorithms and for the meaning of referenced here steps. + */ +static int sh_mobile_ceu_set_crop(struct soc_camera_device *icd, + struct v4l2_crop *a) +{ + struct v4l2_rect *rect = &a->c; + struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent); + struct sh_mobile_ceu_dev *pcdev = ici->priv; + struct v4l2_crop cam_crop; + struct sh_mobile_ceu_cam *cam = icd->host_priv; + struct v4l2_rect *cam_rect = &cam_crop.c, *ceu_rect = &cam->ceu_rect; + struct v4l2_subdev *sd = soc_camera_to_subdev(icd); + struct device *dev = icd->dev.parent; + struct v4l2_format f; + struct v4l2_pix_format *pix = &f.fmt.pix; + unsigned int scale_comb_h, scale_comb_v, scale_ceu_h, scale_ceu_v, + out_width, out_height; + u32 capsr, cflcr; + int ret; + + /* 1. Calculate current combined scales. */ + ret = get_scales(icd, &scale_comb_h, &scale_comb_v); + if (ret < 0) + return ret; + + dev_geo(dev, "1: combined scales %u:%u\n", scale_comb_h, scale_comb_v); + + /* 2. Apply iterative camera S_CROP for new input window. */ + ret = client_s_crop(sd, a, &cam_crop); + if (ret < 0) + return ret; + + dev_geo(dev, "2: camera cropped to %ux%u@%u:%u\n", + cam_rect->width, cam_rect->height, + cam_rect->left, cam_rect->top); + + /* On success cam_crop contains current camera crop */ + + /* + * 3. If old combined scales applied to new crop produce an impossible + * user window, adjust scales to produce nearest possible window. + */ + out_width = scale_down(rect->width, scale_comb_h); + out_height = scale_down(rect->height, scale_comb_v); + + if (out_width > 2560) + out_width = 2560; + else if (out_width < 2) + out_width = 2; + + if (out_height > 1920) + out_height = 1920; + else if (out_height < 4) + out_height = 4; + + dev_geo(dev, "3: Adjusted output %ux%u\n", out_width, out_height); + + /* 4. Use G_CROP to retrieve actual input window: already in cam_crop */ + + /* + * 5. Using actual input window and calculated combined scales calculate + * camera target output window. + */ + pix->width = scale_down(cam_rect->width, scale_comb_h); + pix->height = scale_down(cam_rect->height, scale_comb_v); + + dev_geo(dev, "5: camera target %ux%u\n", pix->width, pix->height); + + /* 6. - 9. */ + pix->pixelformat = cam->camera_fmt->fourcc; + pix->colorspace = cam->camera_fmt->colorspace; + + capsr = capture_save_reset(pcdev); + dev_dbg(dev, "CAPSR 0x%x, CFLCR 0x%x\n", capsr, pcdev->cflcr); + + /* Make relative to camera rectangle */ + rect->left -= cam_rect->left; + rect->top -= cam_rect->top; + + f.type = V4L2_BUF_TYPE_VIDEO_CAPTURE; + + ret = client_scale(icd, cam_rect, rect, ceu_rect, &f, + pcdev->image_mode && !pcdev->is_interlaced); + + dev_geo(dev, "6-9: %d\n", ret); + + /* 10. Use CEU cropping to crop to the new window. */ + sh_mobile_ceu_set_rect(icd, out_width, out_height); + + dev_geo(dev, "10: CEU cropped to %ux%u@%u:%u\n", + ceu_rect->width, ceu_rect->height, + ceu_rect->left, ceu_rect->top); + + /* + * 11. Calculate CEU scales from camera scales from results of (10) and + * user window from (3) + */ + scale_ceu_h = calc_scale(ceu_rect->width, &out_width); + scale_ceu_v = calc_scale(ceu_rect->height, &out_height); + + dev_geo(dev, "11: CEU scales %u:%u\n", scale_ceu_h, scale_ceu_v); + + /* 12. Apply CEU scales. */ + cflcr = scale_ceu_h | (scale_ceu_v << 16); + if (cflcr != pcdev->cflcr) { + pcdev->cflcr = cflcr; + ceu_write(pcdev, CFLCR, cflcr); + } + + /* Restore capture */ if (pcdev->active) capsr |= 1; capture_restore(pcdev, capsr); + icd->user_width = out_width; + icd->user_height = out_height; + /* Even if only camera cropping succeeded */ return ret; } @@ -1018,121 +1312,137 @@ static int sh_mobile_ceu_set_fmt(struct soc_camera_device *icd, struct sh_mobile_ceu_dev *pcdev = ici->priv; struct sh_mobile_ceu_cam *cam = icd->host_priv; struct v4l2_pix_format *pix = &f->fmt.pix; + struct v4l2_format cam_f = *f; + struct v4l2_pix_format *cam_pix = &cam_f.fmt.pix; struct v4l2_subdev *sd = soc_camera_to_subdev(icd); + struct device *dev = icd->dev.parent; __u32 pixfmt = pix->pixelformat; const struct soc_camera_format_xlate *xlate; - unsigned int width = pix->width, height = pix->height, tmp_w, tmp_h; - u16 vscale, hscale; - int ret, is_interlaced; + struct v4l2_crop cam_crop; + struct v4l2_rect *cam_rect = &cam_crop.c, cam_subrect, ceu_rect; + unsigned int scale_cam_h, scale_cam_v; + u16 scale_v, scale_h; + int ret; + bool is_interlaced, image_mode; switch (pix->field) { case V4L2_FIELD_INTERLACED: - is_interlaced = 1; + is_interlaced = true; break; case V4L2_FIELD_ANY: default: pix->field = V4L2_FIELD_NONE; /* fall-through */ case V4L2_FIELD_NONE: - is_interlaced = 0; + is_interlaced = false; break; } xlate = soc_camera_xlate_by_fourcc(icd, pixfmt); if (!xlate) { - dev_warn(icd->dev.parent, "Format %x not found\n", pixfmt); + dev_warn(dev, "Format %x not found\n", pixfmt); return -EINVAL; } - pix->pixelformat = xlate->cam_fmt->fourcc; - ret = v4l2_subdev_call(sd, video, s_fmt, f); - pix->pixelformat = pixfmt; - dev_dbg(icd->dev.parent, - "Camera %d fmt %ux%u, requested %ux%u, max %ux%u\n", - ret, pix->width, pix->height, width, height, - icd->rect_max.width, icd->rect_max.height); + /* 1. Calculate current camera scales. */ + cam_crop.type = V4L2_BUF_TYPE_VIDEO_CAPTURE; + + ret = client_g_rect(sd, cam_rect); + if (ret < 0) + return ret; + + ret = get_camera_scales(sd, cam_rect, &scale_cam_h, &scale_cam_v); + if (ret < 0) + return ret; + + dev_geo(dev, "1: camera scales %u:%u\n", scale_cam_h, scale_cam_v); + + /* + * 2. Calculate "effective" input crop (sensor subwindow) - CEU crop + * scaled back at current camera scales onto input window. + */ + ret = get_camera_subwin(icd, &cam_subrect, scale_cam_h, scale_cam_v); if (ret < 0) return ret; + dev_geo(dev, "2: subwin %ux%u@%u:%u\n", + cam_subrect.width, cam_subrect.height, + cam_subrect.left, cam_subrect.top); + + /* + * 3. Calculate new combined scales from "effective" input window to + * requested user window. + */ + scale_h = calc_generic_scale(cam_subrect.width, pix->width); + scale_v = calc_generic_scale(cam_subrect.height, pix->height); + + dev_geo(dev, "3: scales %u:%u\n", scale_h, scale_v); + + /* + * 4. Calculate camera output window by applying combined scales to real + * input window. + */ + cam_pix->width = scale_down(cam_rect->width, scale_h); + cam_pix->height = scale_down(cam_rect->height, scale_v); + cam_pix->pixelformat = xlate->cam_fmt->fourcc; + switch (pixfmt) { case V4L2_PIX_FMT_NV12: case V4L2_PIX_FMT_NV21: case V4L2_PIX_FMT_NV16: case V4L2_PIX_FMT_NV61: - pcdev->image_mode = 1; + image_mode = true; break; default: - pcdev->image_mode = 0; + image_mode = false; } - if ((abs(width - pix->width) < 4 && abs(height - pix->height) < 4) || - !pcdev->image_mode || is_interlaced) { - hscale = 0; - vscale = 0; - goto out; - } + dev_geo(dev, "4: camera output %ux%u\n", + cam_pix->width, cam_pix->height); - /* Camera set a format, but geometry is not precise, try to improve */ - /* - * FIXME: when soc-camera is converted to implement traditional S_FMT - * and S_CROP semantics, replace CEU limits with camera maxima - */ - tmp_w = pix->width; - tmp_h = pix->height; - while ((width > tmp_w || height > tmp_h) && - tmp_w < 2560 && tmp_h < 1920) { - tmp_w = min(2 * tmp_w, (__u32)2560); - tmp_h = min(2 * tmp_h, (__u32)1920); - pix->width = tmp_w; - pix->height = tmp_h; - pix->pixelformat = xlate->cam_fmt->fourcc; - ret = v4l2_subdev_call(sd, video, s_fmt, f); - pix->pixelformat = pixfmt; - dev_dbg(icd->dev.parent, "Camera scaled to %ux%u\n", - pix->width, pix->height); - if (ret < 0) { - /* This shouldn't happen */ - dev_err(icd->dev.parent, - "Client failed to set format: %d\n", ret); - return ret; - } - } + /* 5. - 9. */ + ret = client_scale(icd, cam_rect, &cam_subrect, &ceu_rect, &cam_f, + image_mode && !is_interlaced); + + dev_geo(dev, "5-9: client scale %d\n", ret); + + /* Done with the camera. Now see if we can improve the result */ + + dev_dbg(dev, "Camera %d fmt %ux%u, requested %ux%u\n", + ret, cam_pix->width, cam_pix->height, pix->width, pix->height); + if (ret < 0) + return ret; + + /* 10. Use CEU scaling to scale to the requested user window. */ /* We cannot scale up */ - if (width > pix->width) - width = pix->width; + if (pix->width > cam_pix->width) + pix->width = cam_pix->width; + if (pix->width > ceu_rect.width) + pix->width = ceu_rect.width; - if (height > pix->height) - height = pix->height; + if (pix->height > cam_pix->height) + pix->height = cam_pix->height; + if (pix->height > ceu_rect.height) + pix->height = ceu_rect.height; /* Let's rock: scale pix->{width x height} down to width x height */ - hscale = calc_scale(pix->width, &width); - vscale = calc_scale(pix->height, &height); + scale_h = calc_scale(ceu_rect.width, &pix->width); + scale_v = calc_scale(ceu_rect.height, &pix->height); - dev_dbg(icd->dev.parent, "W: %u : 0x%x = %u, H: %u : 0x%x = %u\n", - pix->width, hscale, width, pix->height, vscale, height); + dev_geo(dev, "10: W: %u : 0x%x = %u, H: %u : 0x%x = %u\n", + ceu_rect.width, scale_h, pix->width, + ceu_rect.height, scale_v, pix->height); -out: - pcdev->cflcr = hscale | (vscale << 16); + pcdev->cflcr = scale_h | (scale_v << 16); icd->buswidth = xlate->buswidth; icd->current_fmt = xlate->host_fmt; cam->camera_fmt = xlate->cam_fmt; - cam->camera_rect.width = pix->width; - cam->camera_rect.height = pix->height; - - icd->rect_max.left = size_dst(cam->camera_max.left, hscale); - icd->rect_max.width = size_dst(cam->camera_max.width, hscale); - icd->rect_max.top = size_dst(cam->camera_max.top, vscale); - icd->rect_max.height = size_dst(cam->camera_max.height, vscale); - - icd->rect_current.left = icd->rect_max.left; - icd->rect_current.top = icd->rect_max.top; + cam->ceu_rect = ceu_rect; pcdev->is_interlaced = is_interlaced; - - pix->width = width; - pix->height = height; + pcdev->image_mode = image_mode; return 0; } diff --git a/drivers/media/video/soc_camera.c b/drivers/media/video/soc_camera.c index c6cccdf8daf..86e0648f65a 100644 --- a/drivers/media/video/soc_camera.c +++ b/drivers/media/video/soc_camera.c @@ -278,6 +278,9 @@ static void soc_camera_free_user_formats(struct soc_camera_device *icd) icd->user_formats = NULL; } +#define pixfmtstr(x) (x) & 0xff, ((x) >> 8) & 0xff, ((x) >> 16) & 0xff, \ + ((x) >> 24) & 0xff + /* Called with .vb_lock held */ static int soc_camera_set_fmt(struct soc_camera_file *icf, struct v4l2_format *f) @@ -287,6 +290,9 @@ static int soc_camera_set_fmt(struct soc_camera_file *icf, struct v4l2_pix_format *pix = &f->fmt.pix; int ret; + dev_dbg(&icd->dev, "S_FMT(%c%c%c%c, %ux%u)\n", + pixfmtstr(pix->pixelformat), pix->width, pix->height); + /* We always call try_fmt() before set_fmt() or set_crop() */ ret = ici->ops->try_fmt(icd, f); if (ret < 0) @@ -302,17 +308,17 @@ static int soc_camera_set_fmt(struct soc_camera_file *icf, return -EINVAL; } - icd->rect_current.width = pix->width; - icd->rect_current.height = pix->height; - icf->vb_vidq.field = - icd->field = pix->field; + icd->user_width = pix->width; + icd->user_height = pix->height; + icf->vb_vidq.field = + icd->field = pix->field; if (f->type != V4L2_BUF_TYPE_VIDEO_CAPTURE) dev_warn(&icd->dev, "Attention! Wrong buf-type %d\n", f->type); dev_dbg(&icd->dev, "set width: %d height: %d\n", - icd->rect_current.width, icd->rect_current.height); + icd->user_width, icd->user_height); /* set physical bus parameters */ return ici->ops->set_bus_param(icd, pix->pixelformat); @@ -355,8 +361,8 @@ static int soc_camera_open(struct file *file) struct v4l2_format f = { .type = V4L2_BUF_TYPE_VIDEO_CAPTURE, .fmt.pix = { - .width = icd->rect_current.width, - .height = icd->rect_current.height, + .width = icd->user_width, + .height = icd->user_height, .field = icd->field, .pixelformat = icd->current_fmt->fourcc, .colorspace = icd->current_fmt->colorspace, @@ -557,8 +563,8 @@ static int soc_camera_g_fmt_vid_cap(struct file *file, void *priv, WARN_ON(priv != file->private_data); - pix->width = icd->rect_current.width; - pix->height = icd->rect_current.height; + pix->width = icd->user_width; + pix->height = icd->user_height; pix->field = icf->vb_vidq.field; pix->pixelformat = icd->current_fmt->fourcc; pix->bytesperline = pix->width * @@ -722,17 +728,9 @@ static int soc_camera_cropcap(struct file *file, void *fh, { struct soc_camera_file *icf = file->private_data; struct soc_camera_device *icd = icf->icd; + struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent); - a->type = V4L2_BUF_TYPE_VIDEO_CAPTURE; - a->bounds = icd->rect_max; - a->defrect.left = icd->rect_max.left; - a->defrect.top = icd->rect_max.top; - a->defrect.width = DEFAULT_WIDTH; - a->defrect.height = DEFAULT_HEIGHT; - a->pixelaspect.numerator = 1; - a->pixelaspect.denominator = 1; - - return 0; + return ici->ops->cropcap(icd, a); } static int soc_camera_g_crop(struct file *file, void *fh, @@ -740,11 +738,14 @@ static int soc_camera_g_crop(struct file *file, void *fh, { struct soc_camera_file *icf = file->private_data; struct soc_camera_device *icd = icf->icd; + struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent); + int ret; - a->type = V4L2_BUF_TYPE_VIDEO_CAPTURE; - a->c = icd->rect_current; + mutex_lock(&icf->vb_vidq.vb_lock); + ret = ici->ops->get_crop(icd, a); + mutex_unlock(&icf->vb_vidq.vb_lock); - return 0; + return ret; } /* @@ -759,49 +760,33 @@ static int soc_camera_s_crop(struct file *file, void *fh, struct soc_camera_file *icf = file->private_data; struct soc_camera_device *icd = icf->icd; struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent); - struct v4l2_rect rect = a->c; + struct v4l2_rect *rect = &a->c; + struct v4l2_crop current_crop; int ret; if (a->type != V4L2_BUF_TYPE_VIDEO_CAPTURE) return -EINVAL; + dev_dbg(&icd->dev, "S_CROP(%ux%u@%u:%u)\n", + rect->width, rect->height, rect->left, rect->top); + /* Cropping is allowed during a running capture, guard consistency */ mutex_lock(&icf->vb_vidq.vb_lock); + /* If get_crop fails, we'll let host and / or client drivers decide */ + ret = ici->ops->get_crop(icd, ¤t_crop); + /* Prohibit window size change with initialised buffers */ - if (icf->vb_vidq.bufs[0] && (rect.width != icd->rect_current.width || - rect.height != icd->rect_current.height)) { + if (icf->vb_vidq.bufs[0] && !ret && + (a->c.width != current_crop.c.width || + a->c.height != current_crop.c.height)) { dev_err(&icd->dev, "S_CROP denied: queue initialised and sizes differ\n"); ret = -EBUSY; - goto unlock; + } else { + ret = ici->ops->set_crop(icd, a); } - if (rect.width > icd->rect_max.width) - rect.width = icd->rect_max.width; - - if (rect.width < icd->width_min) - rect.width = icd->width_min; - - if (rect.height > icd->rect_max.height) - rect.height = icd->rect_max.height; - - if (rect.height < icd->height_min) - rect.height = icd->height_min; - - if (rect.width + rect.left > icd->rect_max.width + icd->rect_max.left) - rect.left = icd->rect_max.width + icd->rect_max.left - - rect.width; - - if (rect.height + rect.top > icd->rect_max.height + icd->rect_max.top) - rect.top = icd->rect_max.height + icd->rect_max.top - - rect.height; - - ret = ici->ops->set_crop(icd, a); - if (!ret) - icd->rect_current = rect; - -unlock: mutex_unlock(&icf->vb_vidq.vb_lock); return ret; @@ -926,6 +911,8 @@ static int soc_camera_probe(struct device *dev) struct soc_camera_host *ici = to_soc_camera_host(dev->parent); struct soc_camera_link *icl = to_soc_camera_link(icd); struct device *control = NULL; + struct v4l2_subdev *sd; + struct v4l2_format f = {.type = V4L2_BUF_TYPE_VIDEO_CAPTURE}; int ret; dev_info(dev, "Probing %s\n", dev_name(dev)); @@ -982,7 +969,6 @@ static int soc_camera_probe(struct device *dev) if (ret < 0) goto eiufmt; - icd->rect_current = icd->rect_max; icd->field = V4L2_FIELD_ANY; /* ..._video_start() will create a device node, so we have to protect */ @@ -992,9 +978,15 @@ static int soc_camera_probe(struct device *dev) if (ret < 0) goto evidstart; + /* Try to improve our guess of a reasonable window format */ + sd = soc_camera_to_subdev(icd); + if (!v4l2_subdev_call(sd, video, g_fmt, &f)) { + icd->user_width = f.fmt.pix.width; + icd->user_height = f.fmt.pix.height; + } + /* Do we have to sysfs_remove_link() before device_unregister()? */ - if (to_soc_camera_control(icd) && - sysfs_create_link(&icd->dev.kobj, &to_soc_camera_control(icd)->kobj, + if (sysfs_create_link(&icd->dev.kobj, &to_soc_camera_control(icd)->kobj, "control")) dev_warn(&icd->dev, "Failed creating the control symlink\n"); @@ -1103,6 +1095,25 @@ static void dummy_release(struct device *dev) { } +static int default_cropcap(struct soc_camera_device *icd, + struct v4l2_cropcap *a) +{ + struct v4l2_subdev *sd = soc_camera_to_subdev(icd); + return v4l2_subdev_call(sd, video, cropcap, a); +} + +static int default_g_crop(struct soc_camera_device *icd, struct v4l2_crop *a) +{ + struct v4l2_subdev *sd = soc_camera_to_subdev(icd); + return v4l2_subdev_call(sd, video, g_crop, a); +} + +static int default_s_crop(struct soc_camera_device *icd, struct v4l2_crop *a) +{ + struct v4l2_subdev *sd = soc_camera_to_subdev(icd); + return v4l2_subdev_call(sd, video, s_crop, a); +} + int soc_camera_host_register(struct soc_camera_host *ici) { struct soc_camera_host *ix; @@ -1111,7 +1122,6 @@ int soc_camera_host_register(struct soc_camera_host *ici) if (!ici || !ici->ops || !ici->ops->try_fmt || !ici->ops->set_fmt || - !ici->ops->set_crop || !ici->ops->set_bus_param || !ici->ops->querycap || !ici->ops->init_videobuf || @@ -1122,6 +1132,13 @@ int soc_camera_host_register(struct soc_camera_host *ici) !ici->v4l2_dev.dev) return -EINVAL; + if (!ici->ops->set_crop) + ici->ops->set_crop = default_s_crop; + if (!ici->ops->get_crop) + ici->ops->get_crop = default_g_crop; + if (!ici->ops->cropcap) + ici->ops->cropcap = default_cropcap; + mutex_lock(&list_lock); list_for_each_entry(ix, &hosts, list) { if (ix->nr == ici->nr) { @@ -1321,6 +1338,9 @@ static int __devinit soc_camera_pdrv_probe(struct platform_device *pdev) if (ret < 0) goto escdevreg; + icd->user_width = DEFAULT_WIDTH; + icd->user_height = DEFAULT_HEIGHT; + return 0; escdevreg: diff --git a/drivers/media/video/soc_camera_platform.c b/drivers/media/video/soc_camera_platform.c index aec2cadbd2e..3825c358172 100644 --- a/drivers/media/video/soc_camera_platform.c +++ b/drivers/media/video/soc_camera_platform.c @@ -127,10 +127,6 @@ static int soc_camera_platform_probe(struct platform_device *pdev) /* Set the control device reference */ dev_set_drvdata(&icd->dev, &pdev->dev); - icd->width_min = 0; - icd->rect_max.width = p->format.width; - icd->height_min = 0; - icd->rect_max.height = p->format.height; icd->y_skip_top = 0; icd->ops = &soc_camera_platform_ops; diff --git a/drivers/media/video/tw9910.c b/drivers/media/video/tw9910.c index 94bd5b09f05..fbf4130dfc5 100644 --- a/drivers/media/video/tw9910.c +++ b/drivers/media/video/tw9910.c @@ -715,8 +715,88 @@ tw9910_set_fmt_error: return ret; } +static int tw9910_g_crop(struct v4l2_subdev *sd, struct v4l2_crop *a) +{ + struct i2c_client *client = sd->priv; + struct tw9910_priv *priv = to_tw9910(client); + + if (!priv->scale) { + int ret; + struct v4l2_crop crop = { + .c = { + .left = 0, + .top = 0, + .width = 640, + .height = 480, + }, + }; + ret = tw9910_s_crop(sd, &crop); + if (ret < 0) + return ret; + } + + a->c.left = 0; + a->c.top = 0; + a->c.width = priv->scale->width; + a->c.height = priv->scale->height; + a->type = V4L2_BUF_TYPE_VIDEO_CAPTURE; + + return 0; +} + +static int tw9910_cropcap(struct v4l2_subdev *sd, struct v4l2_cropcap *a) +{ + a->bounds.left = 0; + a->bounds.top = 0; + a->bounds.width = 768; + a->bounds.height = 576; + a->defrect.left = 0; + a->defrect.top = 0; + a->defrect.width = 640; + a->defrect.height = 480; + a->type = V4L2_BUF_TYPE_VIDEO_CAPTURE; + a->pixelaspect.numerator = 1; + a->pixelaspect.denominator = 1; + + return 0; +} + +static int tw9910_g_fmt(struct v4l2_subdev *sd, struct v4l2_format *f) +{ + struct i2c_client *client = sd->priv; + struct tw9910_priv *priv = to_tw9910(client); + struct v4l2_pix_format *pix = &f->fmt.pix; + + if (!priv->scale) { + int ret; + struct v4l2_crop crop = { + .c = { + .left = 0, + .top = 0, + .width = 640, + .height = 480, + }, + }; + ret = tw9910_s_crop(sd, &crop); + if (ret < 0) + return ret; + } + + f->type = V4L2_BUF_TYPE_VIDEO_CAPTURE; + + pix->width = priv->scale->width; + pix->height = priv->scale->height; + pix->pixelformat = V4L2_PIX_FMT_VYUY; + pix->colorspace = V4L2_COLORSPACE_SMPTE170M; + pix->field = V4L2_FIELD_INTERLACED; + + return 0; +} + static int tw9910_s_fmt(struct v4l2_subdev *sd, struct v4l2_format *f) { + struct i2c_client *client = sd->priv; + struct tw9910_priv *priv = to_tw9910(client); struct v4l2_pix_format *pix = &f->fmt.pix; /* See tw9910_s_crop() - no proper cropping support */ struct v4l2_crop a = { @@ -741,8 +821,8 @@ static int tw9910_s_fmt(struct v4l2_subdev *sd, struct v4l2_format *f) ret = tw9910_s_crop(sd, &a); if (!ret) { - pix->width = a.c.width; - pix->height = a.c.height; + pix->width = priv->scale->width; + pix->height = priv->scale->height; } return ret; } @@ -838,8 +918,11 @@ static struct v4l2_subdev_core_ops tw9910_subdev_core_ops = { static struct v4l2_subdev_video_ops tw9910_subdev_video_ops = { .s_stream = tw9910_s_stream, + .g_fmt = tw9910_g_fmt, .s_fmt = tw9910_s_fmt, .try_fmt = tw9910_try_fmt, + .cropcap = tw9910_cropcap, + .g_crop = tw9910_g_crop, .s_crop = tw9910_s_crop, }; @@ -852,20 +935,6 @@ static struct v4l2_subdev_ops tw9910_subdev_ops = { * i2c_driver function */ -/* This is called during probe, so, setting rect_max is Ok here: scale == 1 */ -static void limit_to_scale(struct soc_camera_device *icd, - const struct tw9910_scale_ctrl *scale) -{ - if (scale->width > icd->rect_max.width) - icd->rect_max.width = scale->width; - if (scale->width < icd->width_min) - icd->width_min = scale->width; - if (scale->height > icd->rect_max.height) - icd->rect_max.height = scale->height; - if (scale->height < icd->height_min) - icd->height_min = scale->height; -} - static int tw9910_probe(struct i2c_client *client, const struct i2c_device_id *did) @@ -876,8 +945,7 @@ static int tw9910_probe(struct i2c_client *client, struct i2c_adapter *adapter = to_i2c_adapter(client->dev.parent); struct soc_camera_link *icl; - const struct tw9910_scale_ctrl *scale; - int i, ret; + int ret; if (!icd) { dev_err(&client->dev, "TW9910: missing soc-camera data!\n"); @@ -908,22 +976,6 @@ static int tw9910_probe(struct i2c_client *client, icd->ops = &tw9910_ops; icd->iface = info->link.bus_id; - /* - * set width and height - */ - icd->rect_max.width = tw9910_ntsc_scales[0].width; /* set default */ - icd->width_min = tw9910_ntsc_scales[0].width; - icd->rect_max.height = tw9910_ntsc_scales[0].height; - icd->height_min = tw9910_ntsc_scales[0].height; - - scale = tw9910_ntsc_scales; - for (i = 0; i < ARRAY_SIZE(tw9910_ntsc_scales); i++) - limit_to_scale(icd, scale + i); - - scale = tw9910_pal_scales; - for (i = 0; i < ARRAY_SIZE(tw9910_pal_scales); i++) - limit_to_scale(icd, scale + i); - ret = tw9910_video_probe(icd, client); if (ret) { icd->ops = NULL; diff --git a/include/media/soc_camera.h b/include/media/soc_camera.h index 344d8990477..3185e8daaa0 100644 --- a/include/media/soc_camera.h +++ b/include/media/soc_camera.h @@ -22,8 +22,8 @@ struct soc_camera_device { struct list_head list; struct device dev; struct device *pdev; /* Platform device */ - struct v4l2_rect rect_current; /* Current window */ - struct v4l2_rect rect_max; /* Maximum window */ + s32 user_width; + s32 user_height; unsigned short width_min; unsigned short height_min; unsigned short y_skip_top; /* Lines to skip at the top */ @@ -76,6 +76,8 @@ struct soc_camera_host_ops { int (*get_formats)(struct soc_camera_device *, int, struct soc_camera_format_xlate *); void (*put_formats)(struct soc_camera_device *); + int (*cropcap)(struct soc_camera_device *, struct v4l2_cropcap *); + int (*get_crop)(struct soc_camera_device *, struct v4l2_crop *); int (*set_crop)(struct soc_camera_device *, struct v4l2_crop *); int (*set_fmt)(struct soc_camera_device *, struct v4l2_format *); int (*try_fmt)(struct soc_camera_device *, struct v4l2_format *); @@ -277,6 +279,21 @@ static inline unsigned long soc_camera_bus_param_compatible( common_flags; } +static inline void soc_camera_limit_side(unsigned int *start, + unsigned int *length, unsigned int start_min, + unsigned int length_min, unsigned int length_max) +{ + if (*length < length_min) + *length = length_min; + else if (*length > length_max) + *length = length_max; + + if (*start < start_min) + *start = start_min; + else if (*start > start_min + length_max - *length) + *start = start_min + length_max - *length; +} + extern unsigned long soc_camera_apply_sensor_flags(struct soc_camera_link *icl, unsigned long flags); -- cgit v1.2.3-70-g09d2 From a4c56fd8892e51d675f7665ddee4fd9d7e5c2cc3 Mon Sep 17 00:00:00 2001 From: Guennadi Liakhovetski Date: Tue, 25 Aug 2009 11:53:23 -0300 Subject: V4L/DVB (12535): soc-camera: remove .init() and .release() methods from struct soc_camera_ops Remove unneeded soc-camera operations, this also makes the soc-camera API to v4l2 subdevices thinner. Signed-off-by: Guennadi Liakhovetski Signed-off-by: Mauro Carvalho Chehab --- drivers/media/video/mt9m001.c | 22 +++++++--------------- drivers/media/video/mt9m111.c | 40 ++++++++-------------------------------- drivers/media/video/mt9t031.c | 23 ++++++----------------- drivers/media/video/mt9v022.c | 8 +++++--- drivers/media/video/soc_camera.c | 11 ----------- include/media/soc_camera.h | 4 ---- 6 files changed, 26 insertions(+), 82 deletions(-) (limited to 'include') diff --git a/drivers/media/video/mt9m001.c b/drivers/media/video/mt9m001.c index e8cf56189ef..4b394798a29 100644 --- a/drivers/media/video/mt9m001.c +++ b/drivers/media/video/mt9m001.c @@ -122,9 +122,8 @@ static int reg_clear(struct i2c_client *client, const u8 reg, return reg_write(client, reg, ret & ~data); } -static int mt9m001_init(struct soc_camera_device *icd) +static int mt9m001_init(struct i2c_client *client) { - struct i2c_client *client = to_i2c_client(to_soc_camera_control(icd)); int ret; dev_dbg(&client->dev, "%s\n", __func__); @@ -144,16 +143,6 @@ static int mt9m001_init(struct soc_camera_device *icd) return ret; } -static int mt9m001_release(struct soc_camera_device *icd) -{ - struct i2c_client *client = to_i2c_client(to_soc_camera_control(icd)); - - /* Disable the chip */ - reg_write(client, MT9M001_OUTPUT_CONTROL, 0); - - return 0; -} - static int mt9m001_s_stream(struct v4l2_subdev *sd, int enable) { struct i2c_client *client = sd->priv; @@ -446,8 +435,6 @@ static const struct v4l2_queryctrl mt9m001_controls[] = { }; static struct soc_camera_ops mt9m001_ops = { - .init = mt9m001_init, - .release = mt9m001_release, .set_bus_param = mt9m001_set_bus_param, .query_bus_param = mt9m001_query_bus_param, .controls = mt9m001_controls, @@ -581,6 +568,7 @@ static int mt9m001_video_probe(struct soc_camera_device *icd, struct soc_camera_link *icl = to_soc_camera_link(icd); s32 data; unsigned long flags; + int ret; /* We must have a parent by now. And it cannot be a wrong one. * So this entire test is completely redundant. */ @@ -637,7 +625,11 @@ static int mt9m001_video_probe(struct soc_camera_device *icd, dev_info(&client->dev, "Detected a MT9M001 chip ID %x (%s)\n", data, data == 0x8431 ? "C12STM" : "C12ST"); - return 0; + ret = mt9m001_init(client); + if (ret < 0) + dev_err(&client->dev, "Failed to initialise the camera\n"); + + return ret; } static void mt9m001_video_remove(struct soc_camera_device *icd) diff --git a/drivers/media/video/mt9m111.c b/drivers/media/video/mt9m111.c index 920dd53c4cf..186902f9be2 100644 --- a/drivers/media/video/mt9m111.c +++ b/drivers/media/video/mt9m111.c @@ -672,13 +672,9 @@ static const struct v4l2_queryctrl mt9m111_controls[] = { }; static int mt9m111_resume(struct soc_camera_device *icd); -static int mt9m111_init(struct soc_camera_device *icd); -static int mt9m111_release(struct soc_camera_device *icd); static struct soc_camera_ops mt9m111_ops = { - .init = mt9m111_init, .resume = mt9m111_resume, - .release = mt9m111_release, .query_bus_param = mt9m111_query_bus_param, .set_bus_param = mt9m111_set_bus_param, .controls = mt9m111_controls, @@ -880,9 +876,8 @@ static int mt9m111_resume(struct soc_camera_device *icd) return ret; } -static int mt9m111_init(struct soc_camera_device *icd) +static int mt9m111_init(struct i2c_client *client) { - struct i2c_client *client = to_i2c_client(to_soc_camera_control(icd)); struct mt9m111 *mt9m111 = to_mt9m111(client); int ret; @@ -899,22 +894,6 @@ static int mt9m111_init(struct soc_camera_device *icd) return ret; } -static int mt9m111_release(struct soc_camera_device *icd) -{ - struct i2c_client *client = to_i2c_client(to_soc_camera_control(icd)); - struct mt9m111 *mt9m111 = to_mt9m111(client); - int ret; - - ret = reg_clear(RESET, MT9M111_RESET_CHIP_ENABLE); - if (!ret) - mt9m111->powered = 0; - - if (ret < 0) - dev_err(&client->dev, "mt9m11x release failed: %d\n", ret); - - return ret; -} - /* * Interface active, can use i2c. If it fails, it can indeed mean, that * this wasn't our capture interface, so, we wait for the right one @@ -934,10 +913,13 @@ static int mt9m111_video_probe(struct soc_camera_device *icd, to_soc_camera_host(icd->dev.parent)->nr != icd->iface) return -ENODEV; - ret = mt9m111_enable(client); - if (ret) - goto ei2c; - ret = mt9m111_reset(client); + mt9m111->autoexposure = 1; + mt9m111->autowhitebalance = 1; + + mt9m111->swap_rgb_even_odd = 1; + mt9m111->swap_rgb_red_blue = 1; + + ret = mt9m111_init(client); if (ret) goto ei2c; @@ -962,12 +944,6 @@ static int mt9m111_video_probe(struct soc_camera_device *icd, dev_info(&client->dev, "Detected a MT9M11x chip ID %x\n", data); - mt9m111->autoexposure = 1; - mt9m111->autowhitebalance = 1; - - mt9m111->swap_rgb_even_odd = 1; - mt9m111->swap_rgb_red_blue = 1; - ei2c: return ret; } diff --git a/drivers/media/video/mt9t031.c b/drivers/media/video/mt9t031.c index f234ba60204..9a648968938 100644 --- a/drivers/media/video/mt9t031.c +++ b/drivers/media/video/mt9t031.c @@ -163,20 +163,6 @@ static int mt9t031_disable(struct i2c_client *client) return 0; } -static int mt9t031_init(struct soc_camera_device *icd) -{ - struct i2c_client *client = to_i2c_client(to_soc_camera_control(icd)); - - return mt9t031_idle(client); -} - -static int mt9t031_release(struct soc_camera_device *icd) -{ - struct i2c_client *client = to_i2c_client(to_soc_camera_control(icd)); - - return mt9t031_disable(client); -} - static int mt9t031_s_stream(struct v4l2_subdev *sd, int enable) { struct i2c_client *client = sd->priv; @@ -539,8 +525,6 @@ static const struct v4l2_queryctrl mt9t031_controls[] = { }; static struct soc_camera_ops mt9t031_ops = { - .init = mt9t031_init, - .release = mt9t031_release, .set_bus_param = mt9t031_set_bus_param, .query_bus_param = mt9t031_query_bus_param, .controls = mt9t031_controls, @@ -689,6 +673,7 @@ static int mt9t031_video_probe(struct i2c_client *client) struct soc_camera_device *icd = client->dev.platform_data; struct mt9t031 *mt9t031 = to_mt9t031(client); s32 data; + int ret; /* Enable the chip */ data = reg_write(client, MT9T031_CHIP_ENABLE, 1); @@ -711,7 +696,11 @@ static int mt9t031_video_probe(struct i2c_client *client) dev_info(&client->dev, "Detected a MT9T031 chip ID %x\n", data); - return 0; + ret = mt9t031_idle(client); + if (ret < 0) + dev_err(&client->dev, "Failed to initialise the camera\n"); + + return ret; } static struct v4l2_subdev_core_ops mt9t031_subdev_core_ops = { diff --git a/drivers/media/video/mt9v022.c b/drivers/media/video/mt9v022.c index 35ea0ddd071..5c47b55823c 100644 --- a/drivers/media/video/mt9v022.c +++ b/drivers/media/video/mt9v022.c @@ -138,9 +138,8 @@ static int reg_clear(struct i2c_client *client, const u8 reg, return reg_write(client, reg, ret & ~data); } -static int mt9v022_init(struct soc_camera_device *icd) +static int mt9v022_init(struct i2c_client *client) { - struct i2c_client *client = to_i2c_client(to_soc_camera_control(icd)); struct mt9v022 *mt9v022 = to_mt9v022(client); int ret; @@ -532,7 +531,6 @@ static const struct v4l2_queryctrl mt9v022_controls[] = { }; static struct soc_camera_ops mt9v022_ops = { - .init = mt9v022_init, .set_bus_param = mt9v022_set_bus_param, .query_bus_param = mt9v022_query_bus_param, .controls = mt9v022_controls, @@ -751,6 +749,10 @@ static int mt9v022_video_probe(struct soc_camera_device *icd, data, mt9v022->model == V4L2_IDENT_MT9V022IX7ATM ? "monochrome" : "colour"); + ret = mt9v022_init(client); + if (ret < 0) + dev_err(&client->dev, "Failed to initialise the camera\n"); + ei2c: return ret; } diff --git a/drivers/media/video/soc_camera.c b/drivers/media/video/soc_camera.c index 86e0648f65a..27921162514 100644 --- a/drivers/media/video/soc_camera.c +++ b/drivers/media/video/soc_camera.c @@ -385,12 +385,6 @@ static int soc_camera_open(struct file *file) goto eiciadd; } - if (icd->ops->init) { - ret = icd->ops->init(icd); - if (ret < 0) - goto einit; - } - /* Try to configure with default parameters */ ret = soc_camera_set_fmt(icf, &f); if (ret < 0) @@ -411,9 +405,6 @@ static int soc_camera_open(struct file *file) * and use_count == 1 */ esfmt: - if (icd->ops->release) - icd->ops->release(icd); -einit: ici->ops->remove(icd); eiciadd: if (icl->power) @@ -438,8 +429,6 @@ static int soc_camera_close(struct file *file) if (!icd->use_count) { struct soc_camera_link *icl = to_soc_camera_link(icd); - if (icd->ops->release) - icd->ops->release(icd); ici->ops->remove(icd); if (icl->power) icl->power(icd->pdev, 0); diff --git a/include/media/soc_camera.h b/include/media/soc_camera.h index 3185e8daaa0..f95cc4a2d9a 100644 --- a/include/media/soc_camera.h +++ b/include/media/soc_camera.h @@ -191,12 +191,8 @@ struct soc_camera_format_xlate { struct soc_camera_ops { int (*suspend)(struct soc_camera_device *, pm_message_t state); int (*resume)(struct soc_camera_device *); - int (*init)(struct soc_camera_device *); - int (*release)(struct soc_camera_device *); unsigned long (*query_bus_param)(struct soc_camera_device *); int (*set_bus_param)(struct soc_camera_device *, unsigned long); - int (*get_chip_id)(struct soc_camera_device *, - struct v4l2_dbg_chip_ident *); int (*enum_input)(struct soc_camera_device *, struct v4l2_input *); const struct v4l2_queryctrl *controls; int num_controls; -- cgit v1.2.3-70-g09d2 From 96c75399544838e1752001c8abdde36dd459cf8f Mon Sep 17 00:00:00 2001 From: Guennadi Liakhovetski Date: Tue, 25 Aug 2009 11:53:23 -0300 Subject: V4L/DVB (12536): soc-camera: remove .gain and .exposure struct soc_camera_device members This makes the soc-camera interface for V4L2 subdevices thinner yet. Handle gain and exposure internally in each driver just like all other controls. Signed-off-by: Guennadi Liakhovetski Signed-off-by: Mauro Carvalho Chehab --- drivers/media/video/mt9m001.c | 43 +++++++++++++++++++---------- drivers/media/video/mt9m111.c | 29 +++++++++++++------ drivers/media/video/mt9t031.c | 37 ++++++++++++++++--------- drivers/media/video/mt9v022.c | 46 +++++++++++++++++++++++-------- drivers/media/video/mx1_camera.c | 3 +- drivers/media/video/ov772x.c | 6 ++-- drivers/media/video/pxa_camera.c | 3 +- drivers/media/video/soc_camera.c | 33 ++++++++-------------- drivers/media/video/soc_camera_platform.c | 3 +- drivers/media/video/tw9910.c | 3 +- include/media/soc_camera.h | 17 +++++++----- 11 files changed, 141 insertions(+), 82 deletions(-) (limited to 'include') diff --git a/drivers/media/video/mt9m001.c b/drivers/media/video/mt9m001.c index 4b394798a29..45388d2ce2f 100644 --- a/drivers/media/video/mt9m001.c +++ b/drivers/media/video/mt9m001.c @@ -80,6 +80,8 @@ struct mt9m001 { struct v4l2_rect rect; /* Sensor window */ __u32 fourcc; int model; /* V4L2_IDENT_MT9M001* codes from v4l2-chip-ident.h */ + unsigned int gain; + unsigned int exposure; unsigned char autoexposure; }; @@ -129,8 +131,8 @@ static int mt9m001_init(struct i2c_client *client) dev_dbg(&client->dev, "%s\n", __func__); /* - * We don't know, whether platform provides reset, - * issue a soft reset too + * We don't know, whether platform provides reset, issue a soft reset + * too. This returns all registers to their default values. */ ret = reg_write(client, MT9M001_RESET, 1); if (!ret) @@ -200,6 +202,7 @@ static int mt9m001_s_crop(struct v4l2_subdev *sd, struct v4l2_crop *a) struct soc_camera_device *icd = client->dev.platform_data; int ret; const u16 hblank = 9, vblank = 25; + unsigned int total_h; if (mt9m001->fourcc == V4L2_PIX_FMT_SBGGR8 || mt9m001->fourcc == V4L2_PIX_FMT_SBGGR16) @@ -219,6 +222,8 @@ static int mt9m001_s_crop(struct v4l2_subdev *sd, struct v4l2_crop *a) soc_camera_limit_side(&rect.top, &rect.height, MT9M001_ROW_SKIP, MT9M001_MIN_HEIGHT, MT9M001_MAX_HEIGHT); + total_h = rect.height + icd->y_skip_top + vblank; + /* Blanking and start values - default... */ ret = reg_write(client, MT9M001_HORIZONTAL_BLANKING, hblank); if (!ret) @@ -236,15 +241,13 @@ static int mt9m001_s_crop(struct v4l2_subdev *sd, struct v4l2_crop *a) ret = reg_write(client, MT9M001_WINDOW_HEIGHT, rect.height + icd->y_skip_top - 1); if (!ret && mt9m001->autoexposure) { - ret = reg_write(client, MT9M001_SHUTTER_WIDTH, - rect.height + icd->y_skip_top + vblank); + ret = reg_write(client, MT9M001_SHUTTER_WIDTH, total_h); if (!ret) { const struct v4l2_queryctrl *qctrl = soc_camera_find_qctrl(icd->ops, V4L2_CID_EXPOSURE); - icd->exposure = (524 + (rect.height + icd->y_skip_top + - vblank - 1) * - (qctrl->maximum - qctrl->minimum)) / + mt9m001->exposure = (524 + (total_h - 1) * + (qctrl->maximum - qctrl->minimum)) / 1048 + qctrl->minimum; } } @@ -457,6 +460,12 @@ static int mt9m001_g_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl) case V4L2_CID_EXPOSURE_AUTO: ctrl->value = mt9m001->autoexposure; break; + case V4L2_CID_GAIN: + ctrl->value = mt9m001->gain; + break; + case V4L2_CID_EXPOSURE: + ctrl->value = mt9m001->exposure; + break; } return 0; } @@ -518,7 +527,7 @@ static int mt9m001_s_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl) } /* Success */ - icd->gain = ctrl->value; + mt9m001->gain = ctrl->value; break; case V4L2_CID_EXPOSURE: /* mt9m001 has maximum == default */ @@ -535,21 +544,21 @@ static int mt9m001_s_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl) shutter); if (reg_write(client, MT9M001_SHUTTER_WIDTH, shutter) < 0) return -EIO; - icd->exposure = ctrl->value; + mt9m001->exposure = ctrl->value; mt9m001->autoexposure = 0; } break; case V4L2_CID_EXPOSURE_AUTO: if (ctrl->value) { const u16 vblank = 25; + unsigned int total_h = mt9m001->rect.height + + icd->y_skip_top + vblank; if (reg_write(client, MT9M001_SHUTTER_WIDTH, - mt9m001->rect.height + - icd->y_skip_top + vblank) < 0) + total_h) < 0) return -EIO; qctrl = soc_camera_find_qctrl(icd->ops, V4L2_CID_EXPOSURE); - icd->exposure = (524 + (mt9m001->rect.height + - icd->y_skip_top + vblank - 1) * - (qctrl->maximum - qctrl->minimum)) / + mt9m001->exposure = (524 + (total_h - 1) * + (qctrl->maximum - qctrl->minimum)) / 1048 + qctrl->minimum; mt9m001->autoexposure = 1; } else @@ -629,6 +638,10 @@ static int mt9m001_video_probe(struct soc_camera_device *icd, if (ret < 0) dev_err(&client->dev, "Failed to initialise the camera\n"); + /* mt9m001_init() has reset the chip, returning registers to defaults */ + mt9m001->gain = 64; + mt9m001->exposure = 255; + return ret; } @@ -701,7 +714,7 @@ static int mt9m001_probe(struct i2c_client *client, /* Second stage probe - when a capture adapter is there */ icd->ops = &mt9m001_ops; - icd->y_skip_top = 1; + icd->y_skip_top = 0; mt9m001->rect.left = MT9M001_COLUMN_SKIP; mt9m001->rect.top = MT9M001_ROW_SKIP; diff --git a/drivers/media/video/mt9m111.c b/drivers/media/video/mt9m111.c index 186902f9be2..90da699601e 100644 --- a/drivers/media/video/mt9m111.c +++ b/drivers/media/video/mt9m111.c @@ -153,6 +153,7 @@ struct mt9m111 { enum mt9m111_context context; struct v4l2_rect rect; u32 pixfmt; + unsigned int gain; unsigned char autoexposure; unsigned char datawidth; unsigned int powered:1; @@ -513,7 +514,8 @@ static int mt9m111_set_pixfmt(struct i2c_client *client, u32 pixfmt) ret = mt9m111_setfmt_yuv(client); break; default: - dev_err(&client->dev, "Pixel format not handled : %x\n", pixfmt); + dev_err(&client->dev, "Pixel format not handled : %x\n", + pixfmt); ret = -EINVAL; } @@ -536,9 +538,9 @@ static int mt9m111_s_fmt(struct v4l2_subdev *sd, struct v4l2_format *f) }; int ret; - dev_dbg(&client->dev, "%s fmt=%x left=%d, top=%d, width=%d, height=%d\n", - __func__, pix->pixelformat, rect.left, rect.top, rect.width, - rect.height); + dev_dbg(&client->dev, + "%s fmt=%x left=%d, top=%d, width=%d, height=%d\n", __func__, + pix->pixelformat, rect.left, rect.top, rect.width, rect.height); ret = mt9m111_make_rect(client, &rect); if (!ret) @@ -672,8 +674,10 @@ static const struct v4l2_queryctrl mt9m111_controls[] = { }; static int mt9m111_resume(struct soc_camera_device *icd); +static int mt9m111_suspend(struct soc_camera_device *icd, pm_message_t state); static struct soc_camera_ops mt9m111_ops = { + .suspend = mt9m111_suspend, .resume = mt9m111_resume, .query_bus_param = mt9m111_query_bus_param, .set_bus_param = mt9m111_set_bus_param, @@ -714,13 +718,13 @@ static int mt9m111_get_global_gain(struct i2c_client *client) static int mt9m111_set_global_gain(struct i2c_client *client, int gain) { - struct soc_camera_device *icd = client->dev.platform_data; + struct mt9m111 *mt9m111 = to_mt9m111(client); u16 val; if (gain > 63 * 2 * 2) return -EINVAL; - icd->gain = gain; + mt9m111->gain = gain; if ((gain >= 64 * 2) && (gain < 63 * 2 * 2)) val = (1 << 10) | (1 << 9) | (gain / 4); else if ((gain >= 64) && (gain < 64 * 2)) @@ -844,17 +848,26 @@ static int mt9m111_s_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl) return ret; } +static int mt9m111_suspend(struct soc_camera_device *icd, pm_message_t state) +{ + struct i2c_client *client = to_i2c_client(to_soc_camera_control(icd)); + struct mt9m111 *mt9m111 = to_mt9m111(client); + + mt9m111->gain = mt9m111_get_global_gain(client); + + return 0; +} + static int mt9m111_restore_state(struct i2c_client *client) { struct mt9m111 *mt9m111 = to_mt9m111(client); - struct soc_camera_device *icd = client->dev.platform_data; mt9m111_set_context(client, mt9m111->context); mt9m111_set_pixfmt(client, mt9m111->pixfmt); mt9m111_setup_rect(client, &mt9m111->rect); mt9m111_set_flip(client, mt9m111->hflip, MT9M111_RMB_MIRROR_COLS); mt9m111_set_flip(client, mt9m111->vflip, MT9M111_RMB_MIRROR_ROWS); - mt9m111_set_global_gain(client, icd->gain); + mt9m111_set_global_gain(client, mt9m111->gain); mt9m111_set_autoexposure(client, mt9m111->autoexposure); mt9m111_set_autowhitebalance(client, mt9m111->autowhitebalance); return 0; diff --git a/drivers/media/video/mt9t031.c b/drivers/media/video/mt9t031.c index 9a648968938..6966f644977 100644 --- a/drivers/media/video/mt9t031.c +++ b/drivers/media/video/mt9t031.c @@ -73,6 +73,8 @@ struct mt9t031 { int model; /* V4L2_IDENT_MT9T031* codes from v4l2-chip-ident.h */ u16 xskip; u16 yskip; + unsigned int gain; + unsigned int exposure; unsigned char autoexposure; }; @@ -301,16 +303,15 @@ static int mt9t031_set_params(struct soc_camera_device *icd, ret = reg_write(client, MT9T031_WINDOW_HEIGHT, rect->height + icd->y_skip_top - 1); if (ret >= 0 && mt9t031->autoexposure) { - ret = set_shutter(client, - rect->height + icd->y_skip_top + vblank); + unsigned int total_h = rect->height + icd->y_skip_top + vblank; + ret = set_shutter(client, total_h); if (ret >= 0) { const u32 shutter_max = MT9T031_MAX_HEIGHT + vblank; const struct v4l2_queryctrl *qctrl = soc_camera_find_qctrl(icd->ops, V4L2_CID_EXPOSURE); - icd->exposure = (shutter_max / 2 + (rect->height + - icd->y_skip_top + vblank - 1) * - (qctrl->maximum - qctrl->minimum)) / + mt9t031->exposure = (shutter_max / 2 + (total_h - 1) * + (qctrl->maximum - qctrl->minimum)) / shutter_max + qctrl->minimum; } } @@ -553,6 +554,12 @@ static int mt9t031_g_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl) case V4L2_CID_EXPOSURE_AUTO: ctrl->value = mt9t031->autoexposure; break; + case V4L2_CID_GAIN: + ctrl->value = mt9t031->gain; + break; + case V4L2_CID_EXPOSURE: + ctrl->value = mt9t031->exposure; + break; } return 0; } @@ -624,7 +631,7 @@ static int mt9t031_s_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl) } /* Success */ - icd->gain = ctrl->value; + mt9t031->gain = ctrl->value; break; case V4L2_CID_EXPOSURE: /* mt9t031 has maximum == default */ @@ -641,7 +648,7 @@ static int mt9t031_s_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl) old, shutter); if (set_shutter(client, shutter) < 0) return -EIO; - icd->exposure = ctrl->value; + mt9t031->exposure = ctrl->value; mt9t031->autoexposure = 0; } break; @@ -649,14 +656,14 @@ static int mt9t031_s_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl) if (ctrl->value) { const u16 vblank = MT9T031_VERTICAL_BLANK; const u32 shutter_max = MT9T031_MAX_HEIGHT + vblank; - if (set_shutter(client, mt9t031->rect.height + - icd->y_skip_top + vblank) < 0) + unsigned int total_h = mt9t031->rect.height + + icd->y_skip_top + vblank; + + if (set_shutter(client, total_h) < 0) return -EIO; qctrl = soc_camera_find_qctrl(icd->ops, V4L2_CID_EXPOSURE); - icd->exposure = (shutter_max / 2 + - (mt9t031->rect.height + - icd->y_skip_top + vblank - 1) * - (qctrl->maximum - qctrl->minimum)) / + mt9t031->exposure = (shutter_max / 2 + (total_h - 1) * + (qctrl->maximum - qctrl->minimum)) / shutter_max + qctrl->minimum; mt9t031->autoexposure = 1; } else @@ -700,6 +707,10 @@ static int mt9t031_video_probe(struct i2c_client *client) if (ret < 0) dev_err(&client->dev, "Failed to initialise the camera\n"); + /* mt9t031_idle() has reset the chip to default. */ + mt9t031->exposure = 255; + mt9t031->gain = 64; + return ret; } diff --git a/drivers/media/video/mt9v022.c b/drivers/media/video/mt9v022.c index 5c47b55823c..995607f9d3b 100644 --- a/drivers/media/video/mt9v022.c +++ b/drivers/media/video/mt9v022.c @@ -45,7 +45,7 @@ MODULE_PARM_DESC(sensor_type, "Sensor type: \"colour\" or \"monochrome\""); #define MT9V022_PIXEL_OPERATION_MODE 0x0f #define MT9V022_LED_OUT_CONTROL 0x1b #define MT9V022_ADC_MODE_CONTROL 0x1c -#define MT9V022_ANALOG_GAIN 0x34 +#define MT9V022_ANALOG_GAIN 0x35 #define MT9V022_BLACK_LEVEL_CALIB_CTRL 0x47 #define MT9V022_PIXCLK_FV_LV 0x74 #define MT9V022_DIGITAL_TEST_PATTERN 0x7f @@ -155,6 +155,10 @@ static int mt9v022_init(struct i2c_client *client) if (!ret) /* AEC, AGC on */ ret = reg_set(client, MT9V022_AEC_AGC_ENABLE, 0x3); + if (!ret) + ret = reg_write(client, MT9V022_ANALOG_GAIN, 16); + if (!ret) + ret = reg_write(client, MT9V022_TOTAL_SHUTTER_WIDTH, 480); if (!ret) ret = reg_write(client, MT9V022_MAX_TOTAL_SHUTTER_WIDTH, 480); if (!ret) @@ -540,8 +544,12 @@ static struct soc_camera_ops mt9v022_ops = { static int mt9v022_g_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl) { struct i2c_client *client = sd->priv; + const struct v4l2_queryctrl *qctrl; + unsigned long range; int data; + qctrl = soc_camera_find_qctrl(&mt9v022_ops, ctrl->id); + switch (ctrl->id) { case V4L2_CID_VFLIP: data = reg_read(client, MT9V022_READ_MODE); @@ -566,6 +574,24 @@ static int mt9v022_g_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl) if (data < 0) return -EIO; ctrl->value = !!(data & 0x2); + break; + case V4L2_CID_GAIN: + data = reg_read(client, MT9V022_ANALOG_GAIN); + if (data < 0) + return -EIO; + + range = qctrl->maximum - qctrl->minimum; + ctrl->value = ((data - 16) * range + 24) / 48 + qctrl->minimum; + + break; + case V4L2_CID_EXPOSURE: + data = reg_read(client, MT9V022_TOTAL_SHUTTER_WIDTH); + if (data < 0) + return -EIO; + + range = qctrl->maximum - qctrl->minimum; + ctrl->value = ((data - 1) * range + 239) / 479 + qctrl->minimum; + break; } return 0; @@ -575,7 +601,6 @@ static int mt9v022_s_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl) { int data; struct i2c_client *client = sd->priv; - struct soc_camera_device *icd = client->dev.platform_data; const struct v4l2_queryctrl *qctrl; qctrl = soc_camera_find_qctrl(&mt9v022_ops, ctrl->id); @@ -605,12 +630,9 @@ static int mt9v022_s_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl) return -EINVAL; else { unsigned long range = qctrl->maximum - qctrl->minimum; - /* Datasheet says 16 to 64. autogain only works properly - * after setting gain to maximum 14. Larger values - * produce "white fly" noise effect. On the whole, - * manually setting analog gain does no good. */ + /* Valid values 16 to 64, 32 to 64 must be even. */ unsigned long gain = ((ctrl->value - qctrl->minimum) * - 10 + range / 2) / range + 4; + 48 + range / 2) / range + 16; if (gain >= 32) gain &= ~1; /* The user wants to set gain manually, hope, she @@ -619,11 +641,10 @@ static int mt9v022_s_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl) if (reg_clear(client, MT9V022_AEC_AGC_ENABLE, 0x2) < 0) return -EIO; - dev_info(&client->dev, "Setting gain from %d to %lu\n", - reg_read(client, MT9V022_ANALOG_GAIN), gain); + dev_dbg(&client->dev, "Setting gain from %d to %lu\n", + reg_read(client, MT9V022_ANALOG_GAIN), gain); if (reg_write(client, MT9V022_ANALOG_GAIN, gain) < 0) return -EIO; - icd->gain = ctrl->value; } break; case V4L2_CID_EXPOSURE: @@ -646,7 +667,6 @@ static int mt9v022_s_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl) if (reg_write(client, MT9V022_TOTAL_SHUTTER_WIDTH, shutter) < 0) return -EIO; - icd->exposure = ctrl->value; } break; case V4L2_CID_AUTOGAIN: @@ -827,6 +847,10 @@ static int mt9v022_probe(struct i2c_client *client, mt9v022->chip_control = MT9V022_CHIP_CONTROL_DEFAULT; icd->ops = &mt9v022_ops; + /* + * MT9V022 _really_ corrupts the first read out line. + * TODO: verify on i.MX31 + */ icd->y_skip_top = 1; mt9v022->rect.left = MT9V022_COLUMN_SKIP; diff --git a/drivers/media/video/mx1_camera.c b/drivers/media/video/mx1_camera.c index 3875483ab9d..5f37952c75c 100644 --- a/drivers/media/video/mx1_camera.c +++ b/drivers/media/video/mx1_camera.c @@ -548,7 +548,8 @@ static int mx1_camera_set_fmt(struct soc_camera_device *icd, xlate = soc_camera_xlate_by_fourcc(icd, pix->pixelformat); if (!xlate) { - dev_warn(icd->dev.parent, "Format %x not found\n", pix->pixelformat); + dev_warn(icd->dev.parent, "Format %x not found\n", + pix->pixelformat); return -EINVAL; } diff --git a/drivers/media/video/ov772x.c b/drivers/media/video/ov772x.c index 776a91dcfbe..eccb40ab7fe 100644 --- a/drivers/media/video/ov772x.c +++ b/drivers/media/video/ov772x.c @@ -404,7 +404,8 @@ struct ov772x_priv { int model; unsigned short flag_vflip:1; unsigned short flag_hflip:1; - unsigned short band_filter; /* 256 - BDBASE, 0 if (!COM8[5]) */ + /* band_filter = COM8[5] ? 256 - BDBASE : 0 */ + unsigned short band_filter; }; #define ENDMARKER { 0xff, 0xff } @@ -587,7 +588,8 @@ static const struct v4l2_queryctrl ov772x_controls[] = { static struct ov772x_priv *to_ov772x(const struct i2c_client *client) { - return container_of(i2c_get_clientdata(client), struct ov772x_priv, subdev); + return container_of(i2c_get_clientdata(client), struct ov772x_priv, + subdev); } static int ov772x_write_array(struct i2c_client *client, diff --git a/drivers/media/video/pxa_camera.c b/drivers/media/video/pxa_camera.c index a19bb76e175..6952e9602d5 100644 --- a/drivers/media/video/pxa_camera.c +++ b/drivers/media/video/pxa_camera.c @@ -274,7 +274,8 @@ static void free_buffer(struct videobuf_queue *vq, struct pxa_buffer *buf) for (i = 0; i < ARRAY_SIZE(buf->dmas); i++) { if (buf->dmas[i].sg_cpu) - dma_free_coherent(ici->v4l2_dev.dev, buf->dmas[i].sg_size, + dma_free_coherent(ici->v4l2_dev.dev, + buf->dmas[i].sg_size, buf->dmas[i].sg_cpu, buf->dmas[i].sg_dma); buf->dmas[i].sg_cpu = NULL; diff --git a/drivers/media/video/soc_camera.c b/drivers/media/video/soc_camera.c index 27921162514..e8248ba0c03 100644 --- a/drivers/media/video/soc_camera.c +++ b/drivers/media/video/soc_camera.c @@ -327,7 +327,9 @@ static int soc_camera_set_fmt(struct soc_camera_file *icf, static int soc_camera_open(struct file *file) { struct video_device *vdev = video_devdata(file); - struct soc_camera_device *icd = container_of(vdev->parent, struct soc_camera_device, dev); + struct soc_camera_device *icd = container_of(vdev->parent, + struct soc_camera_device, + dev); struct soc_camera_link *icl = to_soc_camera_link(icd); struct soc_camera_host *ici; struct soc_camera_file *icf; @@ -349,7 +351,10 @@ static int soc_camera_open(struct file *file) goto emgi; } - /* Protect against icd->ops->remove() until we module_get() both drivers. */ + /* + * Protect against icd->ops->remove() until we module_get() both + * drivers. + */ mutex_lock(&icd->video_lock); icf->icd = icd; @@ -670,19 +675,6 @@ static int soc_camera_g_ctrl(struct file *file, void *priv, WARN_ON(priv != file->private_data); - switch (ctrl->id) { - case V4L2_CID_GAIN: - if (icd->gain == (unsigned short)~0) - return -EINVAL; - ctrl->value = icd->gain; - return 0; - case V4L2_CID_EXPOSURE: - if (icd->exposure == (unsigned short)~0) - return -EINVAL; - ctrl->value = icd->exposure; - return 0; - } - if (ici->ops->get_ctrl) { ret = ici->ops->get_ctrl(icd, ctrl); if (ret != -ENOIOCTLCMD) @@ -944,7 +936,10 @@ static int soc_camera_probe(struct device *dev) if (ret < 0) goto eadddev; - /* FIXME: this is racy, have to use driver-binding notification */ + /* + * FIXME: this is racy, have to use driver-binding notification, + * when it is available + */ control = to_soc_camera_control(icd); if (!control || !control->driver || !dev_get_drvdata(control) || !try_module_get(control->driver->owner)) { @@ -1279,7 +1274,6 @@ static int video_dev_create(struct soc_camera_device *icd) */ static int soc_camera_video_start(struct soc_camera_device *icd) { - const struct v4l2_queryctrl *qctrl; int ret; if (!icd->dev.parent) @@ -1297,11 +1291,6 @@ static int soc_camera_video_start(struct soc_camera_device *icd) return ret; } - qctrl = soc_camera_find_qctrl(icd->ops, V4L2_CID_GAIN); - icd->gain = qctrl ? qctrl->default_value : (unsigned short)~0; - qctrl = soc_camera_find_qctrl(icd->ops, V4L2_CID_EXPOSURE); - icd->exposure = qctrl ? qctrl->default_value : (unsigned short)~0; - return 0; } diff --git a/drivers/media/video/soc_camera_platform.c b/drivers/media/video/soc_camera_platform.c index 3825c358172..1b6dd02a801 100644 --- a/drivers/media/video/soc_camera_platform.c +++ b/drivers/media/video/soc_camera_platform.c @@ -33,7 +33,8 @@ static struct soc_camera_platform_priv *get_priv(struct platform_device *pdev) static struct soc_camera_platform_info *get_info(struct soc_camera_device *icd) { - struct platform_device *pdev = to_platform_device(to_soc_camera_control(icd)); + struct platform_device *pdev = + to_platform_device(to_soc_camera_control(icd)); return pdev->dev.platform_data; } diff --git a/drivers/media/video/tw9910.c b/drivers/media/video/tw9910.c index fbf4130dfc5..269ab044072 100644 --- a/drivers/media/video/tw9910.c +++ b/drivers/media/video/tw9910.c @@ -357,7 +357,8 @@ static const struct tw9910_hsync_ctrl tw9910_hsync_ctrl = { */ static struct tw9910_priv *to_tw9910(const struct i2c_client *client) { - return container_of(i2c_get_clientdata(client), struct tw9910_priv, subdev); + return container_of(i2c_get_clientdata(client), struct tw9910_priv, + subdev); } static int tw9910_set_scale(struct i2c_client *client, diff --git a/include/media/soc_camera.h b/include/media/soc_camera.h index f95cc4a2d9a..3d74e60032d 100644 --- a/include/media/soc_camera.h +++ b/include/media/soc_camera.h @@ -27,8 +27,6 @@ struct soc_camera_device { unsigned short width_min; unsigned short height_min; unsigned short y_skip_top; /* Lines to skip at the top */ - unsigned short gain; - unsigned short exposure; unsigned char iface; /* Host number */ unsigned char devnum; /* Device number per host */ unsigned char buswidth; /* See comment in .c */ @@ -128,29 +126,34 @@ struct soc_camera_link { void (*free_bus)(struct soc_camera_link *); }; -static inline struct soc_camera_device *to_soc_camera_dev(const struct device *dev) +static inline struct soc_camera_device *to_soc_camera_dev( + const struct device *dev) { return container_of(dev, struct soc_camera_device, dev); } -static inline struct soc_camera_host *to_soc_camera_host(const struct device *dev) +static inline struct soc_camera_host *to_soc_camera_host( + const struct device *dev) { struct v4l2_device *v4l2_dev = dev_get_drvdata(dev); return container_of(v4l2_dev, struct soc_camera_host, v4l2_dev); } -static inline struct soc_camera_link *to_soc_camera_link(const struct soc_camera_device *icd) +static inline struct soc_camera_link *to_soc_camera_link( + const struct soc_camera_device *icd) { return icd->dev.platform_data; } -static inline struct device *to_soc_camera_control(const struct soc_camera_device *icd) +static inline struct device *to_soc_camera_control( + const struct soc_camera_device *icd) { return dev_get_drvdata(&icd->dev); } -static inline struct v4l2_subdev *soc_camera_to_subdev(const struct soc_camera_device *icd) +static inline struct v4l2_subdev *soc_camera_to_subdev( + const struct soc_camera_device *icd) { struct device *control = to_soc_camera_control(icd); return dev_get_drvdata(control); -- cgit v1.2.3-70-g09d2 From 53dacb15705901e14b03dcba27e40364fedd9d09 Mon Sep 17 00:00:00 2001 From: Hans Verkuil Date: Mon, 10 Aug 2009 02:49:08 -0300 Subject: V4L/DVB (12540): v4l: simplify v4l2_i2c_new_subdev and friends Rewrite v4l2_i2c_new_subdev as a simplified version of v4l2_i2c_new_subdev_cfg and remove v4l2_i2c_new_probed_subdev and v4l2_i2c_new_probed_subdev_addr. This simplifies this API substantially. Signed-off-by: Hans Verkuil Signed-off-by: Mauro Carvalho Chehab --- Documentation/video4linux/v4l2-framework.txt | 16 ++-- drivers/media/video/au0828/au0828-cards.c | 4 +- drivers/media/video/bt8xx/bttv-cards.c | 44 ++++----- drivers/media/video/cafe_ccic.c | 2 +- drivers/media/video/cx18/cx18-i2c.c | 14 +-- drivers/media/video/cx231xx/cx231xx-cards.c | 4 +- drivers/media/video/cx23885/cx23885-cards.c | 2 +- drivers/media/video/cx23885/cx23885-video.c | 6 +- drivers/media/video/cx88/cx88-cards.c | 14 +-- drivers/media/video/cx88/cx88-video.c | 6 +- drivers/media/video/davinci/vpif_display.c | 4 +- drivers/media/video/em28xx/em28xx-cards.c | 30 +++--- drivers/media/video/ivtv/ivtv-i2c.c | 18 ++-- drivers/media/video/mxb.c | 14 +-- drivers/media/video/pvrusb2/pvrusb2-hdw.c | 10 +- drivers/media/video/saa7134/saa7134-cards.c | 12 +-- drivers/media/video/saa7134/saa7134-core.c | 6 +- drivers/media/video/usbvision/usbvision-i2c.c | 12 +-- drivers/media/video/v4l2-common.c | 133 -------------------------- drivers/media/video/vino.c | 8 +- drivers/media/video/w9968cf.c | 4 +- drivers/media/video/zoran/zoran_card.c | 8 +- include/media/v4l2-common.h | 24 ++--- 23 files changed, 126 insertions(+), 269 deletions(-) (limited to 'include') diff --git a/Documentation/video4linux/v4l2-framework.txt b/Documentation/video4linux/v4l2-framework.txt index ba4706afc5f..e395a9cdc53 100644 --- a/Documentation/video4linux/v4l2-framework.txt +++ b/Documentation/video4linux/v4l2-framework.txt @@ -370,19 +370,20 @@ from the remove() callback ensures that this is always done correctly. The bridge driver also has some helper functions it can use: struct v4l2_subdev *sd = v4l2_i2c_new_subdev(v4l2_dev, adapter, - "module_foo", "chipid", 0x36); + "module_foo", "chipid", 0x36, NULL); This loads the given module (can be NULL if no module needs to be loaded) and calls i2c_new_device() with the given i2c_adapter and chip/address arguments. If all goes well, then it registers the subdev with the v4l2_device. -You can also use v4l2_i2c_new_probed_subdev() which is very similar to -v4l2_i2c_new_subdev(), except that it has an array of possible I2C addresses -that it should probe. Internally it calls i2c_new_probed_device(). +You can also use the last argument of v4l2_i2c_new_subdev() to pass an array +of possible I2C addresses that it should probe. These probe addresses are +only used if the previous argument is 0. A non-zero argument means that you +know the exact i2c address so in that case no probing will take place. Both functions return NULL if something went wrong. -Note that the chipid you pass to v4l2_i2c_new_(probed_)subdev() is usually +Note that the chipid you pass to v4l2_i2c_new_subdev() is usually the same as the module name. It allows you to specify a chip variant, e.g. "saa7114" or "saa7115". In general though the i2c driver autodetects this. The use of chipid is something that needs to be looked at more closely at a @@ -410,11 +411,6 @@ the irq and platform_data arguments after the subdev was setup. The older v4l2_i2c_new_(probed_)subdev functions will call s_config as well, but with irq set to 0 and platform_data set to NULL. -Note that in the next kernel release the functions v4l2_i2c_new_subdev, -v4l2_i2c_new_probed_subdev and v4l2_i2c_new_probed_subdev_addr will all be -replaced by a single v4l2_i2c_new_subdev that is identical to -v4l2_i2c_new_subdev_cfg but without the irq and platform_data arguments. - struct video_device ------------------- diff --git a/drivers/media/video/au0828/au0828-cards.c b/drivers/media/video/au0828/au0828-cards.c index 830c4a933f6..57dd9195daf 100644 --- a/drivers/media/video/au0828/au0828-cards.c +++ b/drivers/media/video/au0828/au0828-cards.c @@ -212,7 +212,7 @@ void au0828_card_setup(struct au0828_dev *dev) be abstracted out if we ever need to support a different demod) */ sd = v4l2_i2c_new_subdev(&dev->v4l2_dev, &dev->i2c_adap, - "au8522", "au8522", 0x8e >> 1); + "au8522", "au8522", 0x8e >> 1, NULL); if (sd == NULL) printk(KERN_ERR "analog subdev registration failed\n"); } @@ -221,7 +221,7 @@ void au0828_card_setup(struct au0828_dev *dev) if (dev->board.tuner_type != TUNER_ABSENT) { /* Load the tuner module, which does the attach */ sd = v4l2_i2c_new_subdev(&dev->v4l2_dev, &dev->i2c_adap, - "tuner", "tuner", dev->board.tuner_addr); + "tuner", "tuner", dev->board.tuner_addr, NULL); if (sd == NULL) printk(KERN_ERR "tuner subdev registration fail\n"); diff --git a/drivers/media/video/bt8xx/bttv-cards.c b/drivers/media/video/bt8xx/bttv-cards.c index b42251fa96b..12279f6d9bc 100644 --- a/drivers/media/video/bt8xx/bttv-cards.c +++ b/drivers/media/video/bt8xx/bttv-cards.c @@ -3524,8 +3524,8 @@ void __devinit bttv_init_card2(struct bttv *btv) }; struct v4l2_subdev *sd; - sd = v4l2_i2c_new_probed_subdev(&btv->c.v4l2_dev, - &btv->c.i2c_adap, "saa6588", "saa6588", addrs); + sd = v4l2_i2c_new_subdev(&btv->c.v4l2_dev, + &btv->c.i2c_adap, "saa6588", "saa6588", 0, addrs); btv->has_saa6588 = (sd != NULL); } @@ -3549,8 +3549,8 @@ void __devinit bttv_init_card2(struct bttv *btv) I2C_CLIENT_END }; - btv->sd_msp34xx = v4l2_i2c_new_probed_subdev(&btv->c.v4l2_dev, - &btv->c.i2c_adap, "msp3400", "msp3400", addrs); + btv->sd_msp34xx = v4l2_i2c_new_subdev(&btv->c.v4l2_dev, + &btv->c.i2c_adap, "msp3400", "msp3400", 0, addrs); if (btv->sd_msp34xx) return; goto no_audio; @@ -3563,16 +3563,16 @@ void __devinit bttv_init_card2(struct bttv *btv) I2C_CLIENT_END }; - if (v4l2_i2c_new_probed_subdev(&btv->c.v4l2_dev, - &btv->c.i2c_adap, "tda7432", "tda7432", addrs)) + if (v4l2_i2c_new_subdev(&btv->c.v4l2_dev, + &btv->c.i2c_adap, "tda7432", "tda7432", 0, addrs)) return; goto no_audio; } case 3: { /* The user specified that we should probe for tvaudio */ - btv->sd_tvaudio = v4l2_i2c_new_probed_subdev(&btv->c.v4l2_dev, - &btv->c.i2c_adap, "tvaudio", "tvaudio", tvaudio_addrs()); + btv->sd_tvaudio = v4l2_i2c_new_subdev(&btv->c.v4l2_dev, + &btv->c.i2c_adap, "tvaudio", "tvaudio", 0, tvaudio_addrs()); if (btv->sd_tvaudio) return; goto no_audio; @@ -3591,13 +3591,13 @@ void __devinit bttv_init_card2(struct bttv *btv) it really is a msp3400, so it will return NULL when the device found is really something else (e.g. a tea6300). */ if (!bttv_tvcards[btv->c.type].no_msp34xx) { - btv->sd_msp34xx = v4l2_i2c_new_probed_subdev_addr(&btv->c.v4l2_dev, + btv->sd_msp34xx = v4l2_i2c_new_subdev(&btv->c.v4l2_dev, &btv->c.i2c_adap, "msp3400", "msp3400", - I2C_ADDR_MSP3400 >> 1); + 0, I2C_ADDRS(I2C_ADDR_MSP3400 >> 1)); } else if (bttv_tvcards[btv->c.type].msp34xx_alt) { - btv->sd_msp34xx = v4l2_i2c_new_probed_subdev_addr(&btv->c.v4l2_dev, + btv->sd_msp34xx = v4l2_i2c_new_subdev(&btv->c.v4l2_dev, &btv->c.i2c_adap, "msp3400", "msp3400", - I2C_ADDR_MSP3400_ALT >> 1); + 0, I2C_ADDRS(I2C_ADDR_MSP3400_ALT >> 1)); } /* If we found a msp34xx, then we're done. */ @@ -3611,14 +3611,14 @@ void __devinit bttv_init_card2(struct bttv *btv) I2C_CLIENT_END }; - if (v4l2_i2c_new_probed_subdev(&btv->c.v4l2_dev, - &btv->c.i2c_adap, "tda7432", "tda7432", addrs)) + if (v4l2_i2c_new_subdev(&btv->c.v4l2_dev, + &btv->c.i2c_adap, "tda7432", "tda7432", 0, addrs)) return; } /* Now see if we can find one of the tvaudio devices. */ - btv->sd_tvaudio = v4l2_i2c_new_probed_subdev(&btv->c.v4l2_dev, - &btv->c.i2c_adap, "tvaudio", "tvaudio", tvaudio_addrs()); + btv->sd_tvaudio = v4l2_i2c_new_subdev(&btv->c.v4l2_dev, + &btv->c.i2c_adap, "tvaudio", "tvaudio", 0, tvaudio_addrs()); if (btv->sd_tvaudio) return; @@ -3641,15 +3641,15 @@ void __devinit bttv_init_tuner(struct bttv *btv) /* Load tuner module before issuing tuner config call! */ if (bttv_tvcards[btv->c.type].has_radio) - v4l2_i2c_new_probed_subdev(&btv->c.v4l2_dev, + v4l2_i2c_new_subdev(&btv->c.v4l2_dev, &btv->c.i2c_adap, "tuner", "tuner", - v4l2_i2c_tuner_addrs(ADDRS_RADIO)); - v4l2_i2c_new_probed_subdev(&btv->c.v4l2_dev, + 0, v4l2_i2c_tuner_addrs(ADDRS_RADIO)); + v4l2_i2c_new_subdev(&btv->c.v4l2_dev, &btv->c.i2c_adap, "tuner", "tuner", - v4l2_i2c_tuner_addrs(ADDRS_DEMOD)); - v4l2_i2c_new_probed_subdev(&btv->c.v4l2_dev, + 0, v4l2_i2c_tuner_addrs(ADDRS_DEMOD)); + v4l2_i2c_new_subdev(&btv->c.v4l2_dev, &btv->c.i2c_adap, "tuner", "tuner", - v4l2_i2c_tuner_addrs(ADDRS_TV_WITH_DEMOD)); + 0, v4l2_i2c_tuner_addrs(ADDRS_TV_WITH_DEMOD)); tun_setup.mode_mask = T_ANALOG_TV | T_DIGITAL_TV; tun_setup.type = btv->tuner_type; diff --git a/drivers/media/video/cafe_ccic.c b/drivers/media/video/cafe_ccic.c index 9c149a78129..657c481d255 100644 --- a/drivers/media/video/cafe_ccic.c +++ b/drivers/media/video/cafe_ccic.c @@ -1955,7 +1955,7 @@ static int cafe_pci_probe(struct pci_dev *pdev, cam->sensor_addr = 0x42; cam->sensor = v4l2_i2c_new_subdev(&cam->v4l2_dev, &cam->i2c_adapter, - "ov7670", "ov7670", cam->sensor_addr); + "ov7670", "ov7670", cam->sensor_addr, NULL); if (cam->sensor == NULL) { ret = -ENODEV; goto out_smbus; diff --git a/drivers/media/video/cx18/cx18-i2c.c b/drivers/media/video/cx18/cx18-i2c.c index dbbf93d2eee..2477461e84d 100644 --- a/drivers/media/video/cx18/cx18-i2c.c +++ b/drivers/media/video/cx18/cx18-i2c.c @@ -139,16 +139,16 @@ int cx18_i2c_register(struct cx18 *cx, unsigned idx) if (hw == CX18_HW_TUNER) { /* special tuner group handling */ - sd = v4l2_i2c_new_probed_subdev(&cx->v4l2_dev, - adap, mod, type, cx->card_i2c->radio); + sd = v4l2_i2c_new_subdev(&cx->v4l2_dev, + adap, mod, type, 0, cx->card_i2c->radio); if (sd != NULL) sd->grp_id = hw; - sd = v4l2_i2c_new_probed_subdev(&cx->v4l2_dev, - adap, mod, type, cx->card_i2c->demod); + sd = v4l2_i2c_new_subdev(&cx->v4l2_dev, + adap, mod, type, 0, cx->card_i2c->demod); if (sd != NULL) sd->grp_id = hw; - sd = v4l2_i2c_new_probed_subdev(&cx->v4l2_dev, - adap, mod, type, cx->card_i2c->tv); + sd = v4l2_i2c_new_subdev(&cx->v4l2_dev, + adap, mod, type, 0, cx->card_i2c->tv); if (sd != NULL) sd->grp_id = hw; return sd != NULL ? 0 : -1; @@ -162,7 +162,7 @@ int cx18_i2c_register(struct cx18 *cx, unsigned idx) return -1; /* It's an I2C device other than an analog tuner or IR chip */ - sd = v4l2_i2c_new_subdev(&cx->v4l2_dev, adap, mod, type, hw_addrs[idx]); + sd = v4l2_i2c_new_subdev(&cx->v4l2_dev, adap, mod, type, hw_addrs[idx], NULL); if (sd != NULL) sd->grp_id = hw; return sd != NULL ? 0 : -1; diff --git a/drivers/media/video/cx231xx/cx231xx-cards.c b/drivers/media/video/cx231xx/cx231xx-cards.c index 63d2239fd32..319c459459e 100644 --- a/drivers/media/video/cx231xx/cx231xx-cards.c +++ b/drivers/media/video/cx231xx/cx231xx-cards.c @@ -313,7 +313,7 @@ void cx231xx_card_setup(struct cx231xx *dev) if (dev->board.decoder == CX231XX_AVDECODER) { dev->sd_cx25840 = v4l2_i2c_new_subdev(&dev->v4l2_dev, &dev->i2c_bus[0].i2c_adap, - "cx25840", "cx25840", 0x88 >> 1); + "cx25840", "cx25840", 0x88 >> 1, NULL); if (dev->sd_cx25840 == NULL) cx231xx_info("cx25840 subdev registration failure\n"); cx25840_call(dev, core, load_fw); @@ -323,7 +323,7 @@ void cx231xx_card_setup(struct cx231xx *dev) if (dev->board.tuner_type != TUNER_ABSENT) { dev->sd_tuner = v4l2_i2c_new_subdev(&dev->v4l2_dev, &dev->i2c_bus[1].i2c_adap, - "tuner", "tuner", 0xc2 >> 1); + "tuner", "tuner", 0xc2 >> 1, NULL); if (dev->sd_tuner == NULL) cx231xx_info("tuner subdev registration failure\n"); diff --git a/drivers/media/video/cx23885/cx23885-cards.c b/drivers/media/video/cx23885/cx23885-cards.c index 3143d85ef31..02ba4aec7d9 100644 --- a/drivers/media/video/cx23885/cx23885-cards.c +++ b/drivers/media/video/cx23885/cx23885-cards.c @@ -929,7 +929,7 @@ void cx23885_card_setup(struct cx23885_dev *dev) case CX23885_BOARD_NETUP_DUAL_DVBS2_CI: dev->sd_cx25840 = v4l2_i2c_new_subdev(&dev->v4l2_dev, &dev->i2c_bus[2].i2c_adap, - "cx25840", "cx25840", 0x88 >> 1); + "cx25840", "cx25840", 0x88 >> 1, NULL); v4l2_subdev_call(dev->sd_cx25840, core, load_fw); break; } diff --git a/drivers/media/video/cx23885/cx23885-video.c b/drivers/media/video/cx23885/cx23885-video.c index 5d609333630..654cc253cd5 100644 --- a/drivers/media/video/cx23885/cx23885-video.c +++ b/drivers/media/video/cx23885/cx23885-video.c @@ -1521,11 +1521,11 @@ int cx23885_video_register(struct cx23885_dev *dev) if (dev->tuner_addr) sd = v4l2_i2c_new_subdev(&dev->v4l2_dev, &dev->i2c_bus[1].i2c_adap, - "tuner", "tuner", dev->tuner_addr); + "tuner", "tuner", dev->tuner_addr, NULL); else - sd = v4l2_i2c_new_probed_subdev(&dev->v4l2_dev, + sd = v4l2_i2c_new_subdev(&dev->v4l2_dev, &dev->i2c_bus[1].i2c_adap, - "tuner", "tuner", v4l2_i2c_tuner_addrs(ADDRS_TV)); + "tuner", "tuner", 0, v4l2_i2c_tuner_addrs(ADDRS_TV)); if (sd) { struct tuner_setup tun_setup; diff --git a/drivers/media/video/cx88/cx88-cards.c b/drivers/media/video/cx88/cx88-cards.c index e5f07fbd5a3..33be6369871 100644 --- a/drivers/media/video/cx88/cx88-cards.c +++ b/drivers/media/video/cx88/cx88-cards.c @@ -3439,20 +3439,20 @@ struct cx88_core *cx88_core_create(struct pci_dev *pci, int nr) The radio_type is sometimes missing, or set to UNSET but later code configures a tea5767. */ - v4l2_i2c_new_probed_subdev(&core->v4l2_dev, &core->i2c_adap, + v4l2_i2c_new_subdev(&core->v4l2_dev, &core->i2c_adap, "tuner", "tuner", - v4l2_i2c_tuner_addrs(ADDRS_RADIO)); + 0, v4l2_i2c_tuner_addrs(ADDRS_RADIO)); if (has_demod) - v4l2_i2c_new_probed_subdev(&core->v4l2_dev, + v4l2_i2c_new_subdev(&core->v4l2_dev, &core->i2c_adap, "tuner", "tuner", - v4l2_i2c_tuner_addrs(ADDRS_DEMOD)); + 0, v4l2_i2c_tuner_addrs(ADDRS_DEMOD)); if (core->board.tuner_addr == ADDR_UNSET) { - v4l2_i2c_new_probed_subdev(&core->v4l2_dev, + v4l2_i2c_new_subdev(&core->v4l2_dev, &core->i2c_adap, "tuner", "tuner", - has_demod ? tv_addrs + 4 : tv_addrs); + 0, has_demod ? tv_addrs + 4 : tv_addrs); } else { v4l2_i2c_new_subdev(&core->v4l2_dev, &core->i2c_adap, - "tuner", "tuner", core->board.tuner_addr); + "tuner", "tuner", core->board.tuner_addr, NULL); } } diff --git a/drivers/media/video/cx88/cx88-video.c b/drivers/media/video/cx88/cx88-video.c index 2bb54c3ef5c..81d2b5dea18 100644 --- a/drivers/media/video/cx88/cx88-video.c +++ b/drivers/media/video/cx88/cx88-video.c @@ -1881,14 +1881,14 @@ static int __devinit cx8800_initdev(struct pci_dev *pci_dev, if (core->board.audio_chip == V4L2_IDENT_WM8775) v4l2_i2c_new_subdev(&core->v4l2_dev, &core->i2c_adap, - "wm8775", "wm8775", 0x36 >> 1); + "wm8775", "wm8775", 0x36 >> 1, NULL); if (core->board.audio_chip == V4L2_IDENT_TVAUDIO) { /* This probes for a tda9874 as is used on some Pixelview Ultra boards. */ - v4l2_i2c_new_probed_subdev_addr(&core->v4l2_dev, + v4l2_i2c_new_subdev(&core->v4l2_dev, &core->i2c_adap, - "tvaudio", "tvaudio", 0xb0 >> 1); + "tvaudio", "tvaudio", 0, I2C_ADDRS(0xb0 >> 1)); } switch (core->boardnr) { diff --git a/drivers/media/video/davinci/vpif_display.c b/drivers/media/video/davinci/vpif_display.c index 8ea65d794db..a125a452d24 100644 --- a/drivers/media/video/davinci/vpif_display.c +++ b/drivers/media/video/davinci/vpif_display.c @@ -1566,10 +1566,10 @@ static __init int vpif_probe(struct platform_device *pdev) } for (i = 0; i < subdev_count; i++) { - vpif_obj.sd[i] = v4l2_i2c_new_probed_subdev(&vpif_obj.v4l2_dev, + vpif_obj.sd[i] = v4l2_i2c_new_subdev(&vpif_obj.v4l2_dev, i2c_adap, subdevdata[i].name, subdevdata[i].name, - &subdevdata[i].addr); + 0, I2C_ADDRS(subdevdata[i].addr)); if (!vpif_obj.sd[i]) { vpif_err("Error registering v4l2 subdevice\n"); goto probe_subdev_out; diff --git a/drivers/media/video/em28xx/em28xx-cards.c b/drivers/media/video/em28xx/em28xx-cards.c index 8a5ce818170..bdb249bd9d5 100644 --- a/drivers/media/video/em28xx/em28xx-cards.c +++ b/drivers/media/video/em28xx/em28xx-cards.c @@ -2372,55 +2372,55 @@ void em28xx_card_setup(struct em28xx *dev) /* request some modules */ if (dev->board.has_msp34xx) - v4l2_i2c_new_probed_subdev(&dev->v4l2_dev, &dev->i2c_adap, - "msp3400", "msp3400", msp3400_addrs); + v4l2_i2c_new_subdev(&dev->v4l2_dev, &dev->i2c_adap, + "msp3400", "msp3400", 0, msp3400_addrs); if (dev->board.decoder == EM28XX_SAA711X) - v4l2_i2c_new_probed_subdev(&dev->v4l2_dev, &dev->i2c_adap, - "saa7115", "saa7115_auto", saa711x_addrs); + v4l2_i2c_new_subdev(&dev->v4l2_dev, &dev->i2c_adap, + "saa7115", "saa7115_auto", 0, saa711x_addrs); if (dev->board.decoder == EM28XX_TVP5150) - v4l2_i2c_new_probed_subdev(&dev->v4l2_dev, &dev->i2c_adap, - "tvp5150", "tvp5150", tvp5150_addrs); + v4l2_i2c_new_subdev(&dev->v4l2_dev, &dev->i2c_adap, + "tvp5150", "tvp5150", 0, tvp5150_addrs); if (dev->em28xx_sensor == EM28XX_MT9V011) { struct v4l2_subdev *sd; - sd = v4l2_i2c_new_probed_subdev(&dev->v4l2_dev, - &dev->i2c_adap, "mt9v011", "mt9v011", mt9v011_addrs); + sd = v4l2_i2c_new_subdev(&dev->v4l2_dev, + &dev->i2c_adap, "mt9v011", "mt9v011", 0, mt9v011_addrs); v4l2_subdev_call(sd, core, s_config, 0, &dev->sensor_xtal); } if (dev->board.adecoder == EM28XX_TVAUDIO) v4l2_i2c_new_subdev(&dev->v4l2_dev, &dev->i2c_adap, - "tvaudio", "tvaudio", dev->board.tvaudio_addr); + "tvaudio", "tvaudio", dev->board.tvaudio_addr, NULL); if (dev->board.tuner_type != TUNER_ABSENT) { int has_demod = (dev->tda9887_conf & TDA9887_PRESENT); if (dev->board.radio.type) v4l2_i2c_new_subdev(&dev->v4l2_dev, &dev->i2c_adap, - "tuner", "tuner", dev->board.radio_addr); + "tuner", "tuner", dev->board.radio_addr, NULL); if (has_demod) - v4l2_i2c_new_probed_subdev(&dev->v4l2_dev, + v4l2_i2c_new_subdev(&dev->v4l2_dev, &dev->i2c_adap, "tuner", "tuner", - v4l2_i2c_tuner_addrs(ADDRS_DEMOD)); + 0, v4l2_i2c_tuner_addrs(ADDRS_DEMOD)); if (dev->tuner_addr == 0) { enum v4l2_i2c_tuner_type type = has_demod ? ADDRS_TV_WITH_DEMOD : ADDRS_TV; struct v4l2_subdev *sd; - sd = v4l2_i2c_new_probed_subdev(&dev->v4l2_dev, + sd = v4l2_i2c_new_subdev(&dev->v4l2_dev, &dev->i2c_adap, "tuner", "tuner", - v4l2_i2c_tuner_addrs(type)); + 0, v4l2_i2c_tuner_addrs(type)); if (sd) dev->tuner_addr = v4l2_i2c_subdev_addr(sd); } else { v4l2_i2c_new_subdev(&dev->v4l2_dev, &dev->i2c_adap, - "tuner", "tuner", dev->tuner_addr); + "tuner", "tuner", dev->tuner_addr, NULL); } } diff --git a/drivers/media/video/ivtv/ivtv-i2c.c b/drivers/media/video/ivtv/ivtv-i2c.c index 8f15a31d3f6..b9c71e61f7d 100644 --- a/drivers/media/video/ivtv/ivtv-i2c.c +++ b/drivers/media/video/ivtv/ivtv-i2c.c @@ -161,19 +161,19 @@ int ivtv_i2c_register(struct ivtv *itv, unsigned idx) return -1; if (hw == IVTV_HW_TUNER) { /* special tuner handling */ - sd = v4l2_i2c_new_probed_subdev(&itv->v4l2_dev, + sd = v4l2_i2c_new_subdev(&itv->v4l2_dev, adap, mod, type, - itv->card_i2c->radio); + 0, itv->card_i2c->radio); if (sd) sd->grp_id = 1 << idx; - sd = v4l2_i2c_new_probed_subdev(&itv->v4l2_dev, + sd = v4l2_i2c_new_subdev(&itv->v4l2_dev, adap, mod, type, - itv->card_i2c->demod); + 0, itv->card_i2c->demod); if (sd) sd->grp_id = 1 << idx; - sd = v4l2_i2c_new_probed_subdev(&itv->v4l2_dev, + sd = v4l2_i2c_new_subdev(&itv->v4l2_dev, adap, mod, type, - itv->card_i2c->tv); + 0, itv->card_i2c->tv); if (sd) sd->grp_id = 1 << idx; return sd ? 0 : -1; @@ -181,11 +181,11 @@ int ivtv_i2c_register(struct ivtv *itv, unsigned idx) if (!hw_addrs[idx]) return -1; if (hw == IVTV_HW_UPD64031A || hw == IVTV_HW_UPD6408X) { - sd = v4l2_i2c_new_probed_subdev_addr(&itv->v4l2_dev, - adap, mod, type, hw_addrs[idx]); + sd = v4l2_i2c_new_subdev(&itv->v4l2_dev, + adap, mod, type, 0, I2C_ADDRS(hw_addrs[idx])); } else { sd = v4l2_i2c_new_subdev(&itv->v4l2_dev, - adap, mod, type, hw_addrs[idx]); + adap, mod, type, hw_addrs[idx], NULL); } if (sd) sd->grp_id = 1 << idx; diff --git a/drivers/media/video/mxb.c b/drivers/media/video/mxb.c index 35890e8b243..3454070e63f 100644 --- a/drivers/media/video/mxb.c +++ b/drivers/media/video/mxb.c @@ -186,19 +186,19 @@ static int mxb_probe(struct saa7146_dev *dev) } mxb->saa7111a = v4l2_i2c_new_subdev(&dev->v4l2_dev, &mxb->i2c_adapter, - "saa7115", "saa7111", I2C_SAA7111A); + "saa7115", "saa7111", I2C_SAA7111A, NULL); mxb->tea6420_1 = v4l2_i2c_new_subdev(&dev->v4l2_dev, &mxb->i2c_adapter, - "tea6420", "tea6420", I2C_TEA6420_1); + "tea6420", "tea6420", I2C_TEA6420_1, NULL); mxb->tea6420_2 = v4l2_i2c_new_subdev(&dev->v4l2_dev, &mxb->i2c_adapter, - "tea6420", "tea6420", I2C_TEA6420_2); + "tea6420", "tea6420", I2C_TEA6420_2, NULL); mxb->tea6415c = v4l2_i2c_new_subdev(&dev->v4l2_dev, &mxb->i2c_adapter, - "tea6415c", "tea6415c", I2C_TEA6415C); + "tea6415c", "tea6415c", I2C_TEA6415C, NULL); mxb->tda9840 = v4l2_i2c_new_subdev(&dev->v4l2_dev, &mxb->i2c_adapter, - "tda9840", "tda9840", I2C_TDA9840); + "tda9840", "tda9840", I2C_TDA9840, NULL); mxb->tuner = v4l2_i2c_new_subdev(&dev->v4l2_dev, &mxb->i2c_adapter, - "tuner", "tuner", I2C_TUNER); + "tuner", "tuner", I2C_TUNER, NULL); if (v4l2_i2c_new_subdev(&dev->v4l2_dev, &mxb->i2c_adapter, - "saa5246a", "saa5246a", I2C_SAA5246A)) { + "saa5246a", "saa5246a", I2C_SAA5246A, NULL)) { printk(KERN_INFO "mxb: found teletext decoder\n"); } diff --git a/drivers/media/video/pvrusb2/pvrusb2-hdw.c b/drivers/media/video/pvrusb2/pvrusb2-hdw.c index cbc388729d7..13639b30270 100644 --- a/drivers/media/video/pvrusb2/pvrusb2-hdw.c +++ b/drivers/media/video/pvrusb2/pvrusb2-hdw.c @@ -2063,8 +2063,8 @@ static int pvr2_hdw_load_subdev(struct pvr2_hdw *hdw, return -EINVAL; } - /* Note how the 2nd and 3rd arguments are the same for both - * v4l2_i2c_new_subdev() and v4l2_i2c_new_probed_subdev(). Why? + /* Note how the 2nd and 3rd arguments are the same for + * v4l2_i2c_new_subdev(). Why? * Well the 2nd argument is the module name to load, while the 3rd * argument is documented in the framework as being the "chipid" - * and every other place where I can find examples of this, the @@ -2077,15 +2077,15 @@ static int pvr2_hdw_load_subdev(struct pvr2_hdw *hdw, mid, i2caddr[0]); sd = v4l2_i2c_new_subdev(&hdw->v4l2_dev, &hdw->i2c_adap, fname, fname, - i2caddr[0]); + i2caddr[0], NULL); } else { pvr2_trace(PVR2_TRACE_INIT, "Module ID %u:" " Setting up with address probe list", mid); - sd = v4l2_i2c_new_probed_subdev(&hdw->v4l2_dev, &hdw->i2c_adap, + sd = v4l2_i2c_new_subdev(&hdw->v4l2_dev, &hdw->i2c_adap, fname, fname, - i2caddr); + 0, i2caddr); } if (!sd) { diff --git a/drivers/media/video/saa7134/saa7134-cards.c b/drivers/media/video/saa7134/saa7134-cards.c index 1b29487fd25..14b9ba4579b 100644 --- a/drivers/media/video/saa7134/saa7134-cards.c +++ b/drivers/media/video/saa7134/saa7134-cards.c @@ -7208,22 +7208,22 @@ int saa7134_board_init2(struct saa7134_dev *dev) if (dev->radio_type != UNSET) v4l2_i2c_new_subdev(&dev->v4l2_dev, &dev->i2c_adap, "tuner", "tuner", - dev->radio_addr); + dev->radio_addr, NULL); if (has_demod) - v4l2_i2c_new_probed_subdev(&dev->v4l2_dev, + v4l2_i2c_new_subdev(&dev->v4l2_dev, &dev->i2c_adap, "tuner", "tuner", - v4l2_i2c_tuner_addrs(ADDRS_DEMOD)); + 0, v4l2_i2c_tuner_addrs(ADDRS_DEMOD)); if (dev->tuner_addr == ADDR_UNSET) { enum v4l2_i2c_tuner_type type = has_demod ? ADDRS_TV_WITH_DEMOD : ADDRS_TV; - v4l2_i2c_new_probed_subdev(&dev->v4l2_dev, + v4l2_i2c_new_subdev(&dev->v4l2_dev, &dev->i2c_adap, "tuner", "tuner", - v4l2_i2c_tuner_addrs(type)); + 0, v4l2_i2c_tuner_addrs(type)); } else { v4l2_i2c_new_subdev(&dev->v4l2_dev, &dev->i2c_adap, "tuner", "tuner", - dev->tuner_addr); + dev->tuner_addr, NULL); } } diff --git a/drivers/media/video/saa7134/saa7134-core.c b/drivers/media/video/saa7134/saa7134-core.c index cb78c956d81..f87757fccc7 100644 --- a/drivers/media/video/saa7134/saa7134-core.c +++ b/drivers/media/video/saa7134/saa7134-core.c @@ -1000,7 +1000,7 @@ static int __devinit saa7134_initdev(struct pci_dev *pci_dev, struct v4l2_subdev *sd = v4l2_i2c_new_subdev(&dev->v4l2_dev, &dev->i2c_adap, "saa6752hs", "saa6752hs", - saa7134_boards[dev->board].empress_addr); + saa7134_boards[dev->board].empress_addr, NULL); if (sd) sd->grp_id = GRP_EMPRESS; @@ -1009,9 +1009,9 @@ static int __devinit saa7134_initdev(struct pci_dev *pci_dev, if (saa7134_boards[dev->board].rds_addr) { struct v4l2_subdev *sd; - sd = v4l2_i2c_new_probed_subdev_addr(&dev->v4l2_dev, + sd = v4l2_i2c_new_subdev(&dev->v4l2_dev, &dev->i2c_adap, "saa6588", "saa6588", - saa7134_boards[dev->board].rds_addr); + 0, I2C_ADDRS(saa7134_boards[dev->board].rds_addr)); if (sd) { printk(KERN_INFO "%s: found RDS decoder\n", dev->name); dev->has_rds = 1; diff --git a/drivers/media/video/usbvision/usbvision-i2c.c b/drivers/media/video/usbvision/usbvision-i2c.c index 1fe5befbbf8..f97fd06d594 100644 --- a/drivers/media/video/usbvision/usbvision-i2c.c +++ b/drivers/media/video/usbvision/usbvision-i2c.c @@ -246,9 +246,9 @@ int usbvision_i2c_register(struct usb_usbvision *usbvision) switch (usbvision_device_data[usbvision->DevModel].Codec) { case CODEC_SAA7113: case CODEC_SAA7111: - v4l2_i2c_new_probed_subdev(&usbvision->v4l2_dev, + v4l2_i2c_new_subdev(&usbvision->v4l2_dev, &usbvision->i2c_adap, "saa7115", - "saa7115_auto", saa711x_addrs); + "saa7115_auto", 0, saa711x_addrs); break; } if (usbvision_device_data[usbvision->DevModel].Tuner == 1) { @@ -256,16 +256,16 @@ int usbvision_i2c_register(struct usb_usbvision *usbvision) enum v4l2_i2c_tuner_type type; struct tuner_setup tun_setup; - sd = v4l2_i2c_new_probed_subdev(&usbvision->v4l2_dev, + sd = v4l2_i2c_new_subdev(&usbvision->v4l2_dev, &usbvision->i2c_adap, "tuner", - "tuner", v4l2_i2c_tuner_addrs(ADDRS_DEMOD)); + "tuner", 0, v4l2_i2c_tuner_addrs(ADDRS_DEMOD)); /* depending on whether we found a demod or not, select the tuner type. */ type = sd ? ADDRS_TV_WITH_DEMOD : ADDRS_TV; - sd = v4l2_i2c_new_probed_subdev(&usbvision->v4l2_dev, + sd = v4l2_i2c_new_subdev(&usbvision->v4l2_dev, &usbvision->i2c_adap, "tuner", - "tuner", v4l2_i2c_tuner_addrs(type)); + "tuner", 0, v4l2_i2c_tuner_addrs(type)); if (usbvision->tuner_type != -1) { tun_setup.mode_mask = T_ANALOG_TV | T_RADIO; diff --git a/drivers/media/video/v4l2-common.c b/drivers/media/video/v4l2-common.c index 3a0c64935b0..f5a93ae3cdf 100644 --- a/drivers/media/video/v4l2-common.c +++ b/drivers/media/video/v4l2-common.c @@ -813,139 +813,6 @@ EXPORT_SYMBOL_GPL(v4l2_i2c_subdev_init); -/* Load an i2c sub-device. */ -struct v4l2_subdev *v4l2_i2c_new_subdev(struct v4l2_device *v4l2_dev, - struct i2c_adapter *adapter, - const char *module_name, const char *client_type, u8 addr) -{ - struct v4l2_subdev *sd = NULL; - struct i2c_client *client; - struct i2c_board_info info; - - BUG_ON(!v4l2_dev); - - if (module_name) - request_module(module_name); - - /* Setup the i2c board info with the device type and - the device address. */ - memset(&info, 0, sizeof(info)); - strlcpy(info.type, client_type, sizeof(info.type)); - info.addr = addr; - - /* Create the i2c client */ - client = i2c_new_device(adapter, &info); - /* Note: it is possible in the future that - c->driver is NULL if the driver is still being loaded. - We need better support from the kernel so that we - can easily wait for the load to finish. */ - if (client == NULL || client->driver == NULL) - goto error; - - /* Lock the module so we can safely get the v4l2_subdev pointer */ - if (!try_module_get(client->driver->driver.owner)) - goto error; - sd = i2c_get_clientdata(client); - - /* Register with the v4l2_device which increases the module's - use count as well. */ - if (v4l2_device_register_subdev(v4l2_dev, sd)) - sd = NULL; - /* Decrease the module use count to match the first try_module_get. */ - module_put(client->driver->driver.owner); - - if (sd) { - /* We return errors from v4l2_subdev_call only if we have the - callback as the .s_config is not mandatory */ - int err = v4l2_subdev_call(sd, core, s_config, 0, NULL); - - if (err && err != -ENOIOCTLCMD) { - v4l2_device_unregister_subdev(sd); - sd = NULL; - } - } - -error: - /* If we have a client but no subdev, then something went wrong and - we must unregister the client. */ - if (client && sd == NULL) - i2c_unregister_device(client); - return sd; -} -EXPORT_SYMBOL_GPL(v4l2_i2c_new_subdev); - -/* Probe and load an i2c sub-device. */ -struct v4l2_subdev *v4l2_i2c_new_probed_subdev(struct v4l2_device *v4l2_dev, - struct i2c_adapter *adapter, - const char *module_name, const char *client_type, - const unsigned short *addrs) -{ - struct v4l2_subdev *sd = NULL; - struct i2c_client *client = NULL; - struct i2c_board_info info; - - BUG_ON(!v4l2_dev); - - if (module_name) - request_module(module_name); - - /* Setup the i2c board info with the device type and - the device address. */ - memset(&info, 0, sizeof(info)); - strlcpy(info.type, client_type, sizeof(info.type)); - - /* Probe and create the i2c client */ - client = i2c_new_probed_device(adapter, &info, addrs); - /* Note: it is possible in the future that - c->driver is NULL if the driver is still being loaded. - We need better support from the kernel so that we - can easily wait for the load to finish. */ - if (client == NULL || client->driver == NULL) - goto error; - - /* Lock the module so we can safely get the v4l2_subdev pointer */ - if (!try_module_get(client->driver->driver.owner)) - goto error; - sd = i2c_get_clientdata(client); - - /* Register with the v4l2_device which increases the module's - use count as well. */ - if (v4l2_device_register_subdev(v4l2_dev, sd)) - sd = NULL; - /* Decrease the module use count to match the first try_module_get. */ - module_put(client->driver->driver.owner); - - if (sd) { - /* We return errors from v4l2_subdev_call only if we have the - callback as the .s_config is not mandatory */ - int err = v4l2_subdev_call(sd, core, s_config, 0, NULL); - - if (err && err != -ENOIOCTLCMD) { - v4l2_device_unregister_subdev(sd); - sd = NULL; - } - } - -error: - /* If we have a client but no subdev, then something went wrong and - we must unregister the client. */ - if (client && sd == NULL) - i2c_unregister_device(client); - return sd; -} -EXPORT_SYMBOL_GPL(v4l2_i2c_new_probed_subdev); - -struct v4l2_subdev *v4l2_i2c_new_probed_subdev_addr(struct v4l2_device *v4l2_dev, - struct i2c_adapter *adapter, - const char *module_name, const char *client_type, u8 addr) -{ - unsigned short addrs[2] = { addr, I2C_CLIENT_END }; - - return v4l2_i2c_new_probed_subdev(v4l2_dev, adapter, - module_name, client_type, addrs); -} -EXPORT_SYMBOL_GPL(v4l2_i2c_new_probed_subdev_addr); - /* Load an i2c sub-device. */ struct v4l2_subdev *v4l2_i2c_new_subdev_board(struct v4l2_device *v4l2_dev, struct i2c_adapter *adapter, const char *module_name, diff --git a/drivers/media/video/vino.c b/drivers/media/video/vino.c index f3b6e15d91f..cd6a3446ab7 100644 --- a/drivers/media/video/vino.c +++ b/drivers/media/video/vino.c @@ -4333,11 +4333,11 @@ static int __init vino_module_init(void) vino_init_stage++; vino_drvdata->decoder = - v4l2_i2c_new_probed_subdev_addr(&vino_drvdata->v4l2_dev, - &vino_i2c_adapter, "saa7191", "saa7191", 0x45); + v4l2_i2c_new_subdev(&vino_drvdata->v4l2_dev, &vino_i2c_adapter, + "saa7191", "saa7191", 0, I2C_ADDRS(0x45)); vino_drvdata->camera = - v4l2_i2c_new_probed_subdev_addr(&vino_drvdata->v4l2_dev, - &vino_i2c_adapter, "indycam", "indycam", 0x2b); + v4l2_i2c_new_subdev(&vino_drvdata->v4l2_dev, &vino_i2c_adapter, + "indycam", "indycam", 0, I2C_ADDRS(0x2b)); dprintk("init complete!\n"); diff --git a/drivers/media/video/w9968cf.c b/drivers/media/video/w9968cf.c index 602484dd3da..37fcdc447db 100644 --- a/drivers/media/video/w9968cf.c +++ b/drivers/media/video/w9968cf.c @@ -3515,9 +3515,9 @@ w9968cf_usb_probe(struct usb_interface* intf, const struct usb_device_id* id) w9968cf_turn_on_led(cam); w9968cf_i2c_init(cam); - cam->sensor_sd = v4l2_i2c_new_probed_subdev(&cam->v4l2_dev, + cam->sensor_sd = v4l2_i2c_new_subdev(&cam->v4l2_dev, &cam->i2c_adapter, - "ovcamchip", "ovcamchip", addrs); + "ovcamchip", "ovcamchip", 0, addrs); usb_set_intfdata(intf, cam); mutex_unlock(&cam->dev_mutex); diff --git a/drivers/media/video/zoran/zoran_card.c b/drivers/media/video/zoran/zoran_card.c index 0c4d9b1f8e6..be70574870d 100644 --- a/drivers/media/video/zoran/zoran_card.c +++ b/drivers/media/video/zoran/zoran_card.c @@ -1357,15 +1357,15 @@ static int __devinit zoran_probe(struct pci_dev *pdev, goto zr_free_irq; } - zr->decoder = v4l2_i2c_new_probed_subdev(&zr->v4l2_dev, + zr->decoder = v4l2_i2c_new_subdev(&zr->v4l2_dev, &zr->i2c_adapter, zr->card.mod_decoder, zr->card.i2c_decoder, - zr->card.addrs_decoder); + 0, zr->card.addrs_decoder); if (zr->card.mod_encoder) - zr->encoder = v4l2_i2c_new_probed_subdev(&zr->v4l2_dev, + zr->encoder = v4l2_i2c_new_subdev(&zr->v4l2_dev, &zr->i2c_adapter, zr->card.mod_encoder, zr->card.i2c_encoder, - zr->card.addrs_encoder); + 0, zr->card.addrs_encoder); dprintk(2, KERN_INFO "%s: Initializing videocodec bus...\n", diff --git a/include/media/v4l2-common.h b/include/media/v4l2-common.h index 33a18426ab9..1c25b10da34 100644 --- a/include/media/v4l2-common.h +++ b/include/media/v4l2-common.h @@ -139,29 +139,23 @@ struct v4l2_subdev_ops; /* Load an i2c module and return an initialized v4l2_subdev struct. Only call request_module if module_name != NULL. The client_type argument is the name of the chip that's on the adapter. */ -struct v4l2_subdev *v4l2_i2c_new_subdev(struct v4l2_device *v4l2_dev, - struct i2c_adapter *adapter, - const char *module_name, const char *client_type, u8 addr); -/* Probe and load an i2c module and return an initialized v4l2_subdev struct. - Only call request_module if module_name != NULL. - The client_type argument is the name of the chip that's on the adapter. */ -struct v4l2_subdev *v4l2_i2c_new_probed_subdev(struct v4l2_device *v4l2_dev, +struct v4l2_subdev *v4l2_i2c_new_subdev_cfg(struct v4l2_device *v4l2_dev, struct i2c_adapter *adapter, const char *module_name, const char *client_type, - const unsigned short *addrs); -/* Like v4l2_i2c_new_probed_subdev, except probe for a single address. */ -struct v4l2_subdev *v4l2_i2c_new_probed_subdev_addr(struct v4l2_device *v4l2_dev, - struct i2c_adapter *adapter, - const char *module_name, const char *client_type, u8 addr); + int irq, void *platform_data, + u8 addr, const unsigned short *probe_addrs); /* Load an i2c module and return an initialized v4l2_subdev struct. Only call request_module if module_name != NULL. The client_type argument is the name of the chip that's on the adapter. */ -struct v4l2_subdev *v4l2_i2c_new_subdev_cfg(struct v4l2_device *v4l2_dev, +static inline struct v4l2_subdev *v4l2_i2c_new_subdev(struct v4l2_device *v4l2_dev, struct i2c_adapter *adapter, const char *module_name, const char *client_type, - int irq, void *platform_data, - u8 addr, const unsigned short *probe_addrs); + u8 addr, const unsigned short *probe_addrs) +{ + return v4l2_i2c_new_subdev_cfg(v4l2_dev, adapter, module_name, + client_type, 0, NULL, addr, probe_addrs); +} struct i2c_board_info; -- cgit v1.2.3-70-g09d2 From 7ae0cd9bc793e16d8d68df3c17c601732cc1d3c7 Mon Sep 17 00:00:00 2001 From: Hans Verkuil Date: Fri, 19 Jun 2009 11:32:56 -0300 Subject: V4L/DVB (12541): v4l: remove video_register_device_index video_register_device_index is never actually called, instead the stream index number is always calculated automatically. This patch removes this function and simplifies the internal get_index function since that can now always just return the first free index. Signed-off-by: Hans Verkuil Signed-off-by: Mauro Carvalho Chehab --- Documentation/video4linux/v4l2-framework.txt | 17 +++------ drivers/media/video/v4l2-dev.c | 55 ++++++++-------------------- include/media/v4l2-dev.h | 2 - 3 files changed, 20 insertions(+), 54 deletions(-) (limited to 'include') diff --git a/Documentation/video4linux/v4l2-framework.txt b/Documentation/video4linux/v4l2-framework.txt index e395a9cdc53..cb6c7eb5147 100644 --- a/Documentation/video4linux/v4l2-framework.txt +++ b/Documentation/video4linux/v4l2-framework.txt @@ -500,17 +500,11 @@ first free number. Whenever a device node is created some attributes are also created for you. If you look in /sys/class/video4linux you see the devices. Go into e.g. video0 and you will see 'name' and 'index' attributes. The 'name' attribute -is the 'name' field of the video_device struct. The 'index' attribute is -a device node index that can be assigned by the driver, or that is calculated -for you. - -If you call video_register_device(), then the index is just increased by -1 for each device node you register. The first video device node you register -always starts off with 0. +is the 'name' field of the video_device struct. -Alternatively you can call video_register_device_index() which is identical -to video_register_device(), but with an extra index argument. Here you can -pass a specific index value (between 0 and 31) that should be used. +The 'index' attribute is the index of the device node: for each call to +video_register_device() the index is just increased by 1. The first video +device node you register always starts with index 0. Users can setup udev rules that utilize the index attribute to make fancy device names (e.g. 'mpegX' for MPEG video capture device nodes). @@ -520,8 +514,7 @@ After the device was successfully registered, then you can use these fields: - vfl_type: the device type passed to video_register_device. - minor: the assigned device minor number. - num: the device kernel number (i.e. the X in videoX). -- index: the device index number (calculated or set explicitly using - video_register_device_index). +- index: the device index number. If the registration failed, then you need to call video_device_release() to free the allocated video_device struct, or free your own struct if the diff --git a/drivers/media/video/v4l2-dev.c b/drivers/media/video/v4l2-dev.c index a7f1b69a7da..1219721894a 100644 --- a/drivers/media/video/v4l2-dev.c +++ b/drivers/media/video/v4l2-dev.c @@ -299,32 +299,28 @@ static const struct file_operations v4l2_fops = { }; /** - * get_index - assign stream number based on parent device + * get_index - assign stream index number based on parent device * @vdev: video_device to assign index number to, vdev->parent should be assigned - * @num: -1 if auto assign, requested number otherwise * * Note that when this is called the new device has not yet been registered - * in the video_device array. + * in the video_device array, but it was able to obtain a minor number. * - * Returns -ENFILE if num is already in use, a free index number if - * successful. + * This means that we can always obtain a free stream index number since + * the worst case scenario is that there are VIDEO_NUM_DEVICES - 1 slots in + * use of the video_device array. + * + * Returns a free index number. */ -static int get_index(struct video_device *vdev, int num) +static int get_index(struct video_device *vdev) { /* This can be static since this function is called with the global videodev_lock held. */ static DECLARE_BITMAP(used, VIDEO_NUM_DEVICES); int i; - if (num >= VIDEO_NUM_DEVICES) { - printk(KERN_ERR "videodev: %s num is too large\n", __func__); - return -EINVAL; - } - - /* Some drivers do not set the parent. In that case always return - num or 0. */ + /* Some drivers do not set the parent. In that case always return 0. */ if (vdev->parent == NULL) - return num >= 0 ? num : 0; + return 0; bitmap_zero(used, VIDEO_NUM_DEVICES); @@ -335,30 +331,15 @@ static int get_index(struct video_device *vdev, int num) } } - if (num >= 0) { - if (test_bit(num, used)) - return -ENFILE; - return num; - } - - i = find_first_zero_bit(used, VIDEO_NUM_DEVICES); - return i == VIDEO_NUM_DEVICES ? -ENFILE : i; + return find_first_zero_bit(used, VIDEO_NUM_DEVICES); } -int video_register_device(struct video_device *vdev, int type, int nr) -{ - return video_register_device_index(vdev, type, nr, -1); -} -EXPORT_SYMBOL(video_register_device); - /** - * video_register_device_index - register video4linux devices + * video_register_device - register video4linux devices * @vdev: video device structure we want to register * @type: type of device to register * @nr: which device number (0 == /dev/video0, 1 == /dev/video1, ... * -1 == first free) - * @index: stream number based on parent device; - * -1 if auto assign, requested number otherwise * * The registration code assigns minor numbers based on the type * requested. -ENFILE is returned in all the device slots for this @@ -377,8 +358,7 @@ EXPORT_SYMBOL(video_register_device); * * %VFL_TYPE_RADIO - A radio card */ -int video_register_device_index(struct video_device *vdev, int type, int nr, - int index) +int video_register_device(struct video_device *vdev, int type, int nr) { int i = 0; int ret; @@ -481,14 +461,9 @@ int video_register_device_index(struct video_device *vdev, int type, int nr, set_bit(nr, video_nums[type]); /* Should not happen since we thought this minor was free */ WARN_ON(video_device[vdev->minor] != NULL); - ret = vdev->index = get_index(vdev, index); + vdev->index = get_index(vdev); mutex_unlock(&videodev_lock); - if (ret < 0) { - printk(KERN_ERR "%s: get_index failed\n", __func__); - goto cleanup; - } - /* Part 3: Initialize the character device */ vdev->cdev = cdev_alloc(); if (vdev->cdev == NULL) { @@ -543,7 +518,7 @@ cleanup: vdev->minor = -1; return ret; } -EXPORT_SYMBOL(video_register_device_index); +EXPORT_SYMBOL(video_register_device); /** * video_unregister_device - unregister a video4linux device diff --git a/include/media/v4l2-dev.h b/include/media/v4l2-dev.h index 2058dd45e91..255f6442b63 100644 --- a/include/media/v4l2-dev.h +++ b/include/media/v4l2-dev.h @@ -100,8 +100,6 @@ struct video_device Also note that vdev->minor is set to -1 if the registration failed. */ int __must_check video_register_device(struct video_device *vdev, int type, int nr); -int __must_check video_register_device_index(struct video_device *vdev, - int type, int nr, int index); /* Unregister video devices. Will do nothing if vdev == NULL or vdev->minor < 0. */ -- cgit v1.2.3-70-g09d2 From 6b5270d21202fcf6ae16a6266fed83a30ccece7a Mon Sep 17 00:00:00 2001 From: Hans Verkuil Date: Sun, 6 Sep 2009 07:54:00 -0300 Subject: V4L/DVB (12725): v4l: warn when desired devnodenr is in use & add _no_warn function Warn when the desired device node number is already in use, except when the new video_register_device_no_warn function is called since in some use-cases that warning is not relevant. Signed-off-by: Hans Verkuil Signed-off-by: Mauro Carvalho Chehab --- Documentation/video4linux/v4l2-framework.txt | 22 ++++++++++++++++------ drivers/media/video/cx18/cx18-streams.c | 2 +- drivers/media/video/ivtv/ivtv-streams.c | 2 +- drivers/media/video/v4l2-dev.c | 20 +++++++++++++++++++- include/media/v4l2-dev.h | 4 ++++ 5 files changed, 41 insertions(+), 9 deletions(-) (limited to 'include') diff --git a/Documentation/video4linux/v4l2-framework.txt b/Documentation/video4linux/v4l2-framework.txt index 38b3716d864..b806edaf3e7 100644 --- a/Documentation/video4linux/v4l2-framework.txt +++ b/Documentation/video4linux/v4l2-framework.txt @@ -486,17 +486,27 @@ VFL_TYPE_RADIO: radioX for radio tuners VFL_TYPE_VTX: vtxX for teletext devices (deprecated, don't use) The last argument gives you a certain amount of control over the device -device node number used (i.e. the X in videoX). Normally you will pass -1 to -let the v4l2 framework pick the first free number. But if a driver creates -many devices, then it can be useful to have different video devices in -separate ranges. For example, video capture devices start at 0, video -output devices start at 16. - +device node number used (i.e. the X in videoX). Normally you will pass -1 +to let the v4l2 framework pick the first free number. But sometimes users +want to select a specific node number. It is common that drivers allow +the user to select a specific device node number through a driver module +option. That number is then passed to this function and video_register_device +will attempt to select that device node number. If that number was already +in use, then the next free device node number will be selected and it +will send a warning to the kernel log. + +Another use-case is if a driver creates many devices. In that case it can +be useful to place different video devices in separate ranges. For example, +video capture devices start at 0, video output devices start at 16. So you can use the last argument to specify a minimum device node number and the v4l2 framework will try to pick the first free number that is equal or higher to what you passed. If that fails, then it will just pick the first free number. +Since in this case you do not care about a warning about not being able +to select the specified device node number, you can call the function +video_register_device_no_warn() instead. + Whenever a device node is created some attributes are also created for you. If you look in /sys/class/video4linux you see the devices. Go into e.g. video0 and you will see 'name' and 'index' attributes. The 'name' attribute diff --git a/drivers/media/video/cx18/cx18-streams.c b/drivers/media/video/cx18/cx18-streams.c index 6c988b95adc..7df513a2dba 100644 --- a/drivers/media/video/cx18/cx18-streams.c +++ b/drivers/media/video/cx18/cx18-streams.c @@ -245,7 +245,7 @@ static int cx18_reg_dev(struct cx18 *cx, int type) video_set_drvdata(s->video_dev, s); /* Register device. First try the desired minor, then any free one. */ - ret = video_register_device(s->video_dev, vfl_type, num); + ret = video_register_device_no_warn(s->video_dev, vfl_type, num); if (ret < 0) { CX18_ERR("Couldn't register v4l2 device for %s (device node number %d)\n", s->name, num); diff --git a/drivers/media/video/ivtv/ivtv-streams.c b/drivers/media/video/ivtv/ivtv-streams.c index 23400035240..67699e3f2aa 100644 --- a/drivers/media/video/ivtv/ivtv-streams.c +++ b/drivers/media/video/ivtv/ivtv-streams.c @@ -261,7 +261,7 @@ static int ivtv_reg_dev(struct ivtv *itv, int type) video_set_drvdata(s->vdev, s); /* Register device. First try the desired minor, then any free one. */ - if (video_register_device(s->vdev, vfl_type, num)) { + if (video_register_device_no_warn(s->vdev, vfl_type, num)) { IVTV_ERR("Couldn't register v4l2 device for %s (device node number %d)\n", s->name, num); video_device_release(s->vdev); diff --git a/drivers/media/video/v4l2-dev.c b/drivers/media/video/v4l2-dev.c index 4715f08157b..500cbe9891a 100644 --- a/drivers/media/video/v4l2-dev.c +++ b/drivers/media/video/v4l2-dev.c @@ -382,6 +382,8 @@ static int get_index(struct video_device *vdev) * @type: type of device to register * @nr: which device node number (0 == /dev/video0, 1 == /dev/video1, ... * -1 == first free) + * @warn_if_nr_in_use: warn if the desired device node number + * was already in use and another number was chosen instead. * * The registration code assigns minor numbers and device node numbers * based on the requested type and registers the new device node with @@ -401,7 +403,8 @@ static int get_index(struct video_device *vdev) * * %VFL_TYPE_RADIO - A radio card */ -int video_register_device(struct video_device *vdev, int type, int nr) +static int __video_register_device(struct video_device *vdev, int type, int nr, + int warn_if_nr_in_use) { int i = 0; int ret; @@ -547,6 +550,10 @@ int video_register_device(struct video_device *vdev, int type, int nr) reference to the device goes away. */ vdev->dev.release = v4l2_device_release; + if (nr != -1 && nr != vdev->num && warn_if_nr_in_use) + printk(KERN_WARNING "%s: requested %s%d, got %s%d\n", + __func__, name_base, nr, name_base, vdev->num); + /* Part 5: Activate this minor. The char device can now be used. */ mutex_lock(&videodev_lock); video_device[vdev->minor] = vdev; @@ -563,8 +570,19 @@ cleanup: vdev->minor = -1; return ret; } + +int video_register_device(struct video_device *vdev, int type, int nr) +{ + return __video_register_device(vdev, type, nr, 1); +} EXPORT_SYMBOL(video_register_device); +int video_register_device_no_warn(struct video_device *vdev, int type, int nr) +{ + return __video_register_device(vdev, type, nr, 0); +} +EXPORT_SYMBOL(video_register_device_no_warn); + /** * video_unregister_device - unregister a video4linux device * @vdev: the device to unregister diff --git a/include/media/v4l2-dev.h b/include/media/v4l2-dev.h index 255f6442b63..73c9867d744 100644 --- a/include/media/v4l2-dev.h +++ b/include/media/v4l2-dev.h @@ -101,6 +101,10 @@ struct video_device Also note that vdev->minor is set to -1 if the registration failed. */ int __must_check video_register_device(struct video_device *vdev, int type, int nr); +/* Same as video_register_device, but no warning is issued if the desired + device node number was already in use. */ +int __must_check video_register_device_no_warn(struct video_device *vdev, int type, int nr); + /* Unregister video devices. Will do nothing if vdev == NULL or vdev->minor < 0. */ void video_unregister_device(struct video_device *vdev); -- cgit v1.2.3-70-g09d2 From 98293ef3e54f9f2175f11b4d14c119a2ff753d61 Mon Sep 17 00:00:00 2001 From: HIRANO Takahito Date: Fri, 18 Sep 2009 11:17:54 -0300 Subject: V4L/DVB (12997): Add the DTV_ISDB_TS_ID property for ISDB_S In ISDB-S, time-devision duplex is used to multiplexing several waves in the same frequency. Each wave is identified by its own transport stream ID, or TS ID. We need to provide some way to specify this ID from user applications to handle ISDB-S frontends. This code has been tested with the Earthsoft PT1 driver. [mchehab@infradead.org: Fix merge conflicts with isdbt and rename the new parameter to DTV_ISDBS_TS_ID] Signed-off-by: HIRANO Takahito Signed-off-by: Mauro Carvalho Chehab --- drivers/media/dvb/dvb-core/dvb_frontend.c | 8 ++++++++ drivers/media/dvb/dvb-core/dvb_frontend.h | 3 +++ include/linux/dvb/frontend.h | 4 +++- 3 files changed, 14 insertions(+), 1 deletion(-) (limited to 'include') diff --git a/drivers/media/dvb/dvb-core/dvb_frontend.c b/drivers/media/dvb/dvb-core/dvb_frontend.c index 3c9482660ea..ddf639ed2fd 100644 --- a/drivers/media/dvb/dvb-core/dvb_frontend.c +++ b/drivers/media/dvb/dvb-core/dvb_frontend.c @@ -1031,6 +1031,8 @@ static struct dtv_cmds_h dtv_cmds[] = { _DTV_CMD(DTV_ISDBT_LAYERC_SEGMENT_COUNT, 0, 0), _DTV_CMD(DTV_ISDBT_LAYERC_TIME_INTERLEAVING, 0, 0), + _DTV_CMD(DTV_ISDBS_TS_ID, 1, 0), + /* Get */ [DTV_DISEQC_SLAVE_REPLY] = { .name = "DTV_DISEQC_SLAVE_REPLY", @@ -1420,6 +1422,9 @@ static int dtv_property_process_get(struct dvb_frontend *fe, case DTV_ISDBT_LAYERC_TIME_INTERLEAVING: tvp->u.data = fe->dtv_property_cache.layer[2].interleaving; break; + case DTV_ISDBS_TS_ID: + tvp->u.data = fe->dtv_property_cache.isdbs_ts_id; + break; default: r = -1; } @@ -1571,6 +1576,9 @@ static int dtv_property_process_set(struct dvb_frontend *fe, case DTV_ISDBT_LAYERC_TIME_INTERLEAVING: fe->dtv_property_cache.layer[2].interleaving = tvp->u.data; break; + case DTV_ISDBS_TS_ID: + fe->dtv_property_cache.isdbs_ts_id = tvp->u.data; + break; default: r = -1; } diff --git a/drivers/media/dvb/dvb-core/dvb_frontend.h b/drivers/media/dvb/dvb-core/dvb_frontend.h index 9e46f1772c5..810f07d6324 100644 --- a/drivers/media/dvb/dvb-core/dvb_frontend.h +++ b/drivers/media/dvb/dvb-core/dvb_frontend.h @@ -355,6 +355,9 @@ struct dtv_frontend_properties { fe_modulation_t modulation; u8 interleaving; } layer[3]; + + /* ISDB-T specifics */ + u32 isdbs_ts_id; }; struct dvb_frontend { diff --git a/include/linux/dvb/frontend.h b/include/linux/dvb/frontend.h index 25b01c14727..b6cb5425cde 100644 --- a/include/linux/dvb/frontend.h +++ b/include/linux/dvb/frontend.h @@ -302,7 +302,9 @@ struct dvb_frontend_event { #define DTV_ISDBT_LAYER_ENABLED 41 -#define DTV_MAX_COMMAND DTV_ISDBT_LAYER_ENABLED +#define DTV_ISDBS_TS_ID 42 + +#define DTV_MAX_COMMAND DTV_ISDBS_TS_ID typedef enum fe_pilot { PILOT_ON, -- cgit v1.2.3-70-g09d2 From 6789cb5230f8b06271b6a89ace20449af14be303 Mon Sep 17 00:00:00 2001 From: Richard Röjfors Date: Fri, 18 Sep 2009 21:17:20 -0300 Subject: V4L/DVB (13019): video: initial support for ADV7180 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This is an initial driver for Analog Devices ADV7180 Video Decoder. So far it only supports query standard. [akpm@linux-foundation.org: remove unneeded cast] Cc: Hans Verkuil Signed-off-by: Richard Röjfors Signed-off-by: Andrew Morton Signed-off-by: Douglas Schilling Landgraf Signed-off-by: Mauro Carvalho Chehab --- drivers/media/video/Kconfig | 9 ++ drivers/media/video/Makefile | 1 + drivers/media/video/adv7180.c | 202 ++++++++++++++++++++++++++++++++++++++++ include/media/v4l2-chip-ident.h | 3 + 4 files changed, 215 insertions(+) create mode 100644 drivers/media/video/adv7180.c (limited to 'include') diff --git a/drivers/media/video/Kconfig b/drivers/media/video/Kconfig index dc71eaea6af..e6186b338a1 100644 --- a/drivers/media/video/Kconfig +++ b/drivers/media/video/Kconfig @@ -265,6 +265,15 @@ config VIDEO_SAA6588 comment "Video decoders" +config VIDEO_ADV7180 + tristate "Analog Devices ADV7180 decoder" + depends on VIDEO_V4L2 && I2C + ---help--- + Support for the Analog Devices ADV7180 video decoder. + + To compile this driver as a module, choose M here: the + module will be called adv7180. + config VIDEO_BT819 tristate "BT819A VideoStream decoder" depends on VIDEO_V4L2 && I2C diff --git a/drivers/media/video/Makefile b/drivers/media/video/Makefile index 040bc049200..e541932a789 100644 --- a/drivers/media/video/Makefile +++ b/drivers/media/video/Makefile @@ -45,6 +45,7 @@ obj-$(CONFIG_VIDEO_SAA7185) += saa7185.o obj-$(CONFIG_VIDEO_SAA7191) += saa7191.o obj-$(CONFIG_VIDEO_ADV7170) += adv7170.o obj-$(CONFIG_VIDEO_ADV7175) += adv7175.o +obj-$(CONFIG_VIDEO_ADV7180) += adv7180.o obj-$(CONFIG_VIDEO_ADV7343) += adv7343.o obj-$(CONFIG_VIDEO_VPX3220) += vpx3220.o obj-$(CONFIG_VIDEO_BT819) += bt819.o diff --git a/drivers/media/video/adv7180.c b/drivers/media/video/adv7180.c new file mode 100644 index 00000000000..1b3cbd02a7f --- /dev/null +++ b/drivers/media/video/adv7180.c @@ -0,0 +1,202 @@ +/* + * adv7180.c Analog Devices ADV7180 video decoder driver + * Copyright (c) 2009 Intel Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define DRIVER_NAME "adv7180" + +#define ADV7180_INPUT_CONTROL_REG 0x00 +#define ADV7180_INPUT_CONTROL_PAL_BG_NTSC_J_SECAM 0x00 +#define ADV7180_AUTODETECT_ENABLE_REG 0x07 +#define ADV7180_AUTODETECT_DEFAULT 0x7f + + +#define ADV7180_STATUS1_REG 0x10 +#define ADV7180_STATUS1_AUTOD_MASK 0x70 +#define ADV7180_STATUS1_AUTOD_NTSM_M_J 0x00 +#define ADV7180_STATUS1_AUTOD_NTSC_4_43 0x10 +#define ADV7180_STATUS1_AUTOD_PAL_M 0x20 +#define ADV7180_STATUS1_AUTOD_PAL_60 0x30 +#define ADV7180_STATUS1_AUTOD_PAL_B_G 0x40 +#define ADV7180_STATUS1_AUTOD_SECAM 0x50 +#define ADV7180_STATUS1_AUTOD_PAL_COMB 0x60 +#define ADV7180_STATUS1_AUTOD_SECAM_525 0x70 + +#define ADV7180_IDENT_REG 0x11 +#define ADV7180_ID_7180 0x18 + + +struct adv7180_state { + struct v4l2_subdev sd; +}; + +static v4l2_std_id determine_norm(struct i2c_client *client) +{ + u8 status1 = i2c_smbus_read_byte_data(client, ADV7180_STATUS1_REG); + + switch (status1 & ADV7180_STATUS1_AUTOD_MASK) { + case ADV7180_STATUS1_AUTOD_NTSM_M_J: + return V4L2_STD_NTSC_M_JP; + case ADV7180_STATUS1_AUTOD_NTSC_4_43: + return V4L2_STD_NTSC_443; + case ADV7180_STATUS1_AUTOD_PAL_M: + return V4L2_STD_PAL_M; + case ADV7180_STATUS1_AUTOD_PAL_60: + return V4L2_STD_PAL_60; + case ADV7180_STATUS1_AUTOD_PAL_B_G: + return V4L2_STD_PAL; + case ADV7180_STATUS1_AUTOD_SECAM: + return V4L2_STD_SECAM; + case ADV7180_STATUS1_AUTOD_PAL_COMB: + return V4L2_STD_PAL_Nc | V4L2_STD_PAL_N; + case ADV7180_STATUS1_AUTOD_SECAM_525: + return V4L2_STD_SECAM; + default: + return V4L2_STD_UNKNOWN; + } +} + +static inline struct adv7180_state *to_state(struct v4l2_subdev *sd) +{ + return container_of(sd, struct adv7180_state, sd); +} + +static int adv7180_querystd(struct v4l2_subdev *sd, v4l2_std_id *std) +{ + struct i2c_client *client = v4l2_get_subdevdata(sd); + + *std = determine_norm(client); + return 0; +} + +static int adv7180_g_chip_ident(struct v4l2_subdev *sd, + struct v4l2_dbg_chip_ident *chip) +{ + struct i2c_client *client = v4l2_get_subdevdata(sd); + + return v4l2_chip_ident_i2c_client(client, chip, V4L2_IDENT_ADV7180, 0); +} + +static const struct v4l2_subdev_video_ops adv7180_video_ops = { + .querystd = adv7180_querystd, +}; + +static const struct v4l2_subdev_core_ops adv7180_core_ops = { + .g_chip_ident = adv7180_g_chip_ident, +}; + +static const struct v4l2_subdev_ops adv7180_ops = { + .core = &adv7180_core_ops, + .video = &adv7180_video_ops, +}; + +/* + * Generic i2c probe + * concerning the addresses: i2c wants 7 bit (without the r/w bit), so '>>1' + */ + +static int adv7180_probe(struct i2c_client *client, + const struct i2c_device_id *id) +{ + struct adv7180_state *state; + struct v4l2_subdev *sd; + int ret; + + /* Check if the adapter supports the needed features */ + if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_BYTE_DATA)) + return -EIO; + + v4l_info(client, "chip found @ 0x%02x (%s)\n", + client->addr << 1, client->adapter->name); + + state = kzalloc(sizeof(struct adv7180_state), GFP_KERNEL); + if (state == NULL) + return -ENOMEM; + sd = &state->sd; + v4l2_i2c_subdev_init(sd, client, &adv7180_ops); + + /* Initialize adv7180 */ + /* enable autodetection */ + ret = i2c_smbus_write_byte_data(client, ADV7180_INPUT_CONTROL_REG, + ADV7180_INPUT_CONTROL_PAL_BG_NTSC_J_SECAM); + if (ret > 0) + ret = i2c_smbus_write_byte_data(client, + ADV7180_AUTODETECT_ENABLE_REG, + ADV7180_AUTODETECT_DEFAULT); + if (ret < 0) { + printk(KERN_ERR DRIVER_NAME + ": Failed to communicate to chip: %d\n", ret); + return ret; + } + + return 0; +} + +static int adv7180_remove(struct i2c_client *client) +{ + struct v4l2_subdev *sd = i2c_get_clientdata(client); + + v4l2_device_unregister_subdev(sd); + kfree(to_state(sd)); + return 0; +} + +static const struct i2c_device_id adv7180_id[] = { + {DRIVER_NAME, 0}, + {}, +}; + +MODULE_DEVICE_TABLE(i2c, adv7180_id); + +static struct i2c_driver adv7180_driver = { + .driver = { + .owner = THIS_MODULE, + .name = DRIVER_NAME, + }, + .probe = adv7180_probe, + .remove = adv7180_remove, + .id_table = adv7180_id, +}; + +static __init int adv7180_init(void) +{ + return i2c_add_driver(&adv7180_driver); +} + +static __exit void adv7180_exit(void) +{ + i2c_del_driver(&adv7180_driver); +} + +module_init(adv7180_init); +module_exit(adv7180_exit); + +MODULE_DESCRIPTION("Analog Devices ADV7180 video decoder driver"); +MODULE_AUTHOR("Mocean Laboratories"); +MODULE_LICENSE("GPL v2"); + diff --git a/include/media/v4l2-chip-ident.h b/include/media/v4l2-chip-ident.h index 94e908c0d7a..cf16689adba 100644 --- a/include/media/v4l2-chip-ident.h +++ b/include/media/v4l2-chip-ident.h @@ -135,6 +135,9 @@ enum { /* module adv7175: just ident 7175 */ V4L2_IDENT_ADV7175 = 7175, + /* module adv7180: just ident 7180 */ + V4L2_IDENT_ADV7180 = 7180, + /* module saa7185: just ident 7185 */ V4L2_IDENT_SAA7185 = 7185, -- cgit v1.2.3-70-g09d2 From eb27cae8adaa658a0bf31631baa1ce29d8183759 Mon Sep 17 00:00:00 2001 From: Len Brown Date: Mon, 6 Jul 2009 23:40:19 -0400 Subject: ACPI: linux/acpi.h should not include linux/dmi.h users of acpi.h that need dmi.h should include it directly. Signed-off-by: Len Brown --- drivers/acpi/bus.c | 1 + drivers/acpi/ec.c | 1 + drivers/acpi/pci_slot.c | 1 + drivers/acpi/video.c | 2 +- drivers/pci/dmar.c | 1 + include/linux/acpi.h | 2 -- 6 files changed, 5 insertions(+), 3 deletions(-) (limited to 'include') diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c index 2876fc70c3a..0fb6b2a8b10 100644 --- a/drivers/acpi/bus.c +++ b/drivers/acpi/bus.c @@ -38,6 +38,7 @@ #include #include #include +#include #include "internal.h" diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c index 391f331674c..c434f6571ab 100644 --- a/drivers/acpi/ec.c +++ b/drivers/acpi/ec.c @@ -42,6 +42,7 @@ #include #include #include +#include #define ACPI_EC_CLASS "embedded_controller" #define ACPI_EC_DEVICE_NAME "Embedded Controller" diff --git a/drivers/acpi/pci_slot.c b/drivers/acpi/pci_slot.c index 12158e0d009..7aa6802c0ee 100644 --- a/drivers/acpi/pci_slot.c +++ b/drivers/acpi/pci_slot.c @@ -31,6 +31,7 @@ #include #include #include +#include static int debug; static int check_sta_before_sun; diff --git a/drivers/acpi/video.c b/drivers/acpi/video.c index 60ea984c84a..055a455b6c7 100644 --- a/drivers/acpi/video.c +++ b/drivers/acpi/video.c @@ -40,7 +40,7 @@ #include #include #include - +#include #include #include diff --git a/drivers/pci/dmar.c b/drivers/pci/dmar.c index 7b287cb38b7..4484ac08977 100644 --- a/drivers/pci/dmar.c +++ b/drivers/pci/dmar.c @@ -33,6 +33,7 @@ #include #include #include +#include #undef PREFIX #define PREFIX "DMAR:" diff --git a/include/linux/acpi.h b/include/linux/acpi.h index 34321cfffea..7950c65c43a 100644 --- a/include/linux/acpi.h +++ b/include/linux/acpi.h @@ -41,8 +41,6 @@ #include #include #include -#include - enum acpi_irq_model_id { ACPI_IRQ_MODEL_PIC = 0, -- cgit v1.2.3-70-g09d2 From e4f55966d02c5dfade2978c2aa05fb202a78a4d1 Mon Sep 17 00:00:00 2001 From: Len Brown Date: Mon, 6 Jul 2009 23:42:10 -0400 Subject: ACPI: remove unnecessary #ifdef CONFIG_DMI acpi_osi_setup() does not depend on CONFIG_DMI acpi_dmi_osi_linux()'s definition doesn't depend on CONFIG_DMI either Signed-off-by: Len Brown --- include/linux/acpi.h | 2 -- 1 file changed, 2 deletions(-) (limited to 'include') diff --git a/include/linux/acpi.h b/include/linux/acpi.h index 7950c65c43a..9260408bf4e 100644 --- a/include/linux/acpi.h +++ b/include/linux/acpi.h @@ -217,10 +217,8 @@ static inline int acpi_video_display_switch_support(void) #endif /* defined(CONFIG_ACPI_VIDEO) || defined(CONFIG_ACPI_VIDEO_MODULE) */ extern int acpi_blacklisted(void); -#ifdef CONFIG_DMI extern void acpi_dmi_osi_linux(int enable, const struct dmi_system_id *d); extern int acpi_osi_setup(char *str); -#endif #ifdef CONFIG_ACPI_NUMA int acpi_get_pxm(acpi_handle handle); -- cgit v1.2.3-70-g09d2 From a71fca58b7f4abca551ae2256ac08dd9123a03f9 Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Fri, 18 Sep 2009 10:28:19 -0700 Subject: rcu: Fix whitespace inconsistencies Fix a number of whitespace ^Ierrors in the include/linux/rcu* and the kernel/rcu* files. Signed-off-by: Paul E. McKenney Cc: laijs@cn.fujitsu.com Cc: dipankar@in.ibm.com Cc: akpm@linux-foundation.org Cc: mathieu.desnoyers@polymtl.ca Cc: josh@joshtriplett.org Cc: dvhltc@us.ibm.com Cc: niv@us.ibm.com Cc: peterz@infradead.org Cc: rostedt@goodmis.org Cc: Valdis.Kletnieks@vt.edu LKML-Reference: <20090918172819.GA24405@linux.vnet.ibm.com> [ did more checkpatch fixlets ] Signed-off-by: Ingo Molnar --- include/linux/rculist_nulls.h | 2 +- include/linux/rcupdate.h | 6 +++--- include/linux/rcutree.h | 2 +- kernel/rcupdate.c | 4 ++-- kernel/rcutorture.c | 28 +++++++++++++++------------- kernel/rcutree.c | 2 +- kernel/rcutree.h | 2 +- kernel/rcutree_plugin.h | 3 +-- kernel/rcutree_trace.c | 2 +- 9 files changed, 26 insertions(+), 25 deletions(-) (limited to 'include') diff --git a/include/linux/rculist_nulls.h b/include/linux/rculist_nulls.h index f9ddd03961a..589a40919f0 100644 --- a/include/linux/rculist_nulls.h +++ b/include/linux/rculist_nulls.h @@ -102,7 +102,7 @@ static inline void hlist_nulls_add_head_rcu(struct hlist_nulls_node *n, */ #define hlist_nulls_for_each_entry_rcu(tpos, pos, head, member) \ for (pos = rcu_dereference((head)->first); \ - (!is_a_nulls(pos)) && \ + (!is_a_nulls(pos)) && \ ({ tpos = hlist_nulls_entry(pos, typeof(*tpos), member); 1; }); \ pos = rcu_dereference(pos->next)) diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h index 39dce83c486..6fe0363724e 100644 --- a/include/linux/rcupdate.h +++ b/include/linux/rcupdate.h @@ -1,5 +1,5 @@ /* - * Read-Copy Update mechanism for mutual exclusion + * Read-Copy Update mechanism for mutual exclusion * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -18,7 +18,7 @@ * Copyright IBM Corporation, 2001 * * Author: Dipankar Sarma - * + * * Based on the original work by Paul McKenney * and inputs from Rusty Russell, Andrea Arcangeli and Andi Kleen. * Papers: @@ -26,7 +26,7 @@ * http://lse.sourceforge.net/locking/rclock_OLS.2001.05.01c.sc.pdf (OLS2001) * * For detailed explanation of Read-Copy Update mechanism see - - * http://lse.sourceforge.net/locking/rcupdate.html + * http://lse.sourceforge.net/locking/rcupdate.html * */ diff --git a/include/linux/rcutree.h b/include/linux/rcutree.h index 00d08c0cbcc..37682770e9d 100644 --- a/include/linux/rcutree.h +++ b/include/linux/rcutree.h @@ -24,7 +24,7 @@ * and inputs from Rusty Russell, Andrea Arcangeli and Andi Kleen. * * For detailed explanation of Read-Copy Update mechanism see - - * Documentation/RCU + * Documentation/RCU */ #ifndef __LINUX_RCUTREE_H diff --git a/kernel/rcupdate.c b/kernel/rcupdate.c index 28d2f24e787..37ac4548308 100644 --- a/kernel/rcupdate.c +++ b/kernel/rcupdate.c @@ -19,7 +19,7 @@ * * Authors: Dipankar Sarma * Manfred Spraul - * + * * Based on the original work by Paul McKenney * and inputs from Rusty Russell, Andrea Arcangeli and Andi Kleen. * Papers: @@ -27,7 +27,7 @@ * http://lse.sourceforge.net/locking/rclock_OLS.2001.05.01c.sc.pdf (OLS2001) * * For detailed explanation of Read-Copy Update mechanism see - - * http://lse.sourceforge.net/locking/rcupdate.html + * http://lse.sourceforge.net/locking/rcupdate.html * */ #include diff --git a/kernel/rcutorture.c b/kernel/rcutorture.c index 328a8257c88..233768f21f9 100644 --- a/kernel/rcutorture.c +++ b/kernel/rcutorture.c @@ -18,7 +18,7 @@ * Copyright (C) IBM Corporation, 2005, 2006 * * Authors: Paul E. McKenney - * Josh Triplett + * Josh Triplett * * See also: Documentation/RCU/torture.txt */ @@ -50,7 +50,7 @@ MODULE_LICENSE("GPL"); MODULE_AUTHOR("Paul E. McKenney and " - "Josh Triplett "); + "Josh Triplett "); static int nreaders = -1; /* # reader threads, defaults to 2*ncpus */ static int nfakewriters = 4; /* # fake writer threads */ @@ -110,8 +110,8 @@ struct rcu_torture { }; static LIST_HEAD(rcu_torture_freelist); -static struct rcu_torture *rcu_torture_current = NULL; -static long rcu_torture_current_version = 0; +static struct rcu_torture *rcu_torture_current; +static long rcu_torture_current_version; static struct rcu_torture rcu_tortures[10 * RCU_TORTURE_PIPE_LEN]; static DEFINE_SPINLOCK(rcu_torture_lock); static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_count) = @@ -124,11 +124,11 @@ static atomic_t n_rcu_torture_alloc_fail; static atomic_t n_rcu_torture_free; static atomic_t n_rcu_torture_mberror; static atomic_t n_rcu_torture_error; -static long n_rcu_torture_timers = 0; +static long n_rcu_torture_timers; static struct list_head rcu_torture_removed; static cpumask_var_t shuffle_tmp_mask; -static int stutter_pause_test = 0; +static int stutter_pause_test; #if defined(MODULE) || defined(CONFIG_RCU_TORTURE_TEST_RUNNABLE) #define RCUTORTURE_RUNNABLE_INIT 1 @@ -267,7 +267,8 @@ struct rcu_torture_ops { int irq_capable; char *name; }; -static struct rcu_torture_ops *cur_ops = NULL; + +static struct rcu_torture_ops *cur_ops; /* * Definitions for rcu torture testing. @@ -342,8 +343,8 @@ static struct rcu_torture_ops rcu_ops = { .sync = synchronize_rcu, .cb_barrier = rcu_barrier, .stats = NULL, - .irq_capable = 1, - .name = "rcu" + .irq_capable = 1, + .name = "rcu" }; static void rcu_sync_torture_deferred_free(struct rcu_torture *p) @@ -641,7 +642,8 @@ rcu_torture_writer(void *arg) do { schedule_timeout_uninterruptible(1); - if ((rp = rcu_torture_alloc()) == NULL) + rp = rcu_torture_alloc(); + if (rp == NULL) continue; rp->rtort_pipe_count = 0; udelay(rcu_random(&rand) & 0x3ff); @@ -1113,7 +1115,7 @@ rcu_torture_init(void) printk(KERN_ALERT "rcutorture: invalid torture type: \"%s\"\n", torture_type); mutex_unlock(&fullstop_mutex); - return (-EINVAL); + return -EINVAL; } if (cur_ops->init) cur_ops->init(); /* no "goto unwind" prior to this point!!! */ @@ -1164,7 +1166,7 @@ rcu_torture_init(void) goto unwind; } fakewriter_tasks = kzalloc(nfakewriters * sizeof(fakewriter_tasks[0]), - GFP_KERNEL); + GFP_KERNEL); if (fakewriter_tasks == NULL) { VERBOSE_PRINTK_ERRSTRING("out of memory"); firsterr = -ENOMEM; @@ -1173,7 +1175,7 @@ rcu_torture_init(void) for (i = 0; i < nfakewriters; i++) { VERBOSE_PRINTK_STRING("Creating rcu_torture_fakewriter task"); fakewriter_tasks[i] = kthread_run(rcu_torture_fakewriter, NULL, - "rcu_torture_fakewriter"); + "rcu_torture_fakewriter"); if (IS_ERR(fakewriter_tasks[i])) { firsterr = PTR_ERR(fakewriter_tasks[i]); VERBOSE_PRINTK_ERRSTRING("Failed to create fakewriter"); diff --git a/kernel/rcutree.c b/kernel/rcutree.c index 1b32cdd1b2e..52b06f6e158 100644 --- a/kernel/rcutree.c +++ b/kernel/rcutree.c @@ -25,7 +25,7 @@ * and inputs from Rusty Russell, Andrea Arcangeli and Andi Kleen. * * For detailed explanation of Read-Copy Update mechanism see - - * Documentation/RCU + * Documentation/RCU */ #include #include diff --git a/kernel/rcutree.h b/kernel/rcutree.h index bf8a6f9f134..8e8287a983c 100644 --- a/kernel/rcutree.h +++ b/kernel/rcutree.h @@ -142,7 +142,7 @@ struct rcu_data { */ struct rcu_head *nxtlist; struct rcu_head **nxttail[RCU_NEXT_SIZE]; - long qlen; /* # of queued callbacks */ + long qlen; /* # of queued callbacks */ long blimit; /* Upper limit on a processed batch */ #ifdef CONFIG_NO_HZ diff --git a/kernel/rcutree_plugin.h b/kernel/rcutree_plugin.h index 09b7325baad..1cee04f627e 100644 --- a/kernel/rcutree_plugin.h +++ b/kernel/rcutree_plugin.h @@ -370,9 +370,8 @@ static void rcu_preempt_check_callbacks(int cpu) rcu_preempt_qs(cpu); return; } - if (per_cpu(rcu_preempt_data, cpu).qs_pending) { + if (per_cpu(rcu_preempt_data, cpu).qs_pending) t->rcu_read_unlock_special |= RCU_READ_UNLOCK_NEED_QS; - } } /* diff --git a/kernel/rcutree_trace.c b/kernel/rcutree_trace.c index 0ea1bff6972..c89f5e9fd17 100644 --- a/kernel/rcutree_trace.c +++ b/kernel/rcutree_trace.c @@ -20,7 +20,7 @@ * Papers: http://www.rdrop.com/users/paulmck/RCU * * For detailed explanation of Read-Copy Update mechanism see - - * Documentation/RCU + * Documentation/RCU * */ #include -- cgit v1.2.3-70-g09d2 From 393b2ad8c757ba3ccd2a99ca5bbcd6db4d3fa53d Mon Sep 17 00:00:00 2001 From: Arjan van de Ven Date: Sat, 12 Sep 2009 07:52:47 +0200 Subject: perf: Add a timestamp to fork events perf timechart needs to know when a process forked, in order to be able to visualize properly when tasks start. This patch adds a time field to the event structure, and fills it in appropriately. Signed-off-by: Arjan van de Ven Acked-by: Peter Zijlstra Cc: Paul Mackerras Cc: Frederic Weisbecker LKML-Reference: <20090912130341.51ad2de2@infradead.org> Signed-off-by: Ingo Molnar --- include/linux/perf_counter.h | 2 ++ kernel/perf_counter.c | 11 +++++++++-- tools/perf/util/event.h | 1 + 3 files changed, 12 insertions(+), 2 deletions(-) (limited to 'include') diff --git a/include/linux/perf_counter.h b/include/linux/perf_counter.h index c7375f97aa1..bd341007c4f 100644 --- a/include/linux/perf_counter.h +++ b/include/linux/perf_counter.h @@ -336,6 +336,7 @@ enum perf_event_type { * struct perf_event_header header; * u32 pid, ppid; * u32 tid, ptid; + * u64 time; * }; */ PERF_EVENT_EXIT = 4, @@ -356,6 +357,7 @@ enum perf_event_type { * struct perf_event_header header; * u32 pid, ppid; * u32 tid, ptid; + * { u64 time; } && PERF_SAMPLE_TIME * }; */ PERF_EVENT_FORK = 7, diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c index d013f4e89e9..d5899b62b27 100644 --- a/kernel/perf_counter.c +++ b/kernel/perf_counter.c @@ -3083,6 +3083,7 @@ struct perf_task_event { u32 ppid; u32 tid; u32 ptid; + u64 time; } event; }; @@ -3090,9 +3091,12 @@ static void perf_counter_task_output(struct perf_counter *counter, struct perf_task_event *task_event) { struct perf_output_handle handle; - int size = task_event->event.header.size; + int size; struct task_struct *task = task_event->task; - int ret = perf_output_begin(&handle, counter, size, 0, 0); + int ret; + + size = task_event->event.header.size; + ret = perf_output_begin(&handle, counter, size, 0, 0); if (ret) return; @@ -3103,7 +3107,10 @@ static void perf_counter_task_output(struct perf_counter *counter, task_event->event.tid = perf_counter_tid(counter, task); task_event->event.ptid = perf_counter_tid(counter, current); + task_event->event.time = perf_clock(); + perf_output_put(&handle, task_event->event); + perf_output_end(&handle); } diff --git a/tools/perf/util/event.h b/tools/perf/util/event.h index 2495529cae7..28a579f8fa8 100644 --- a/tools/perf/util/event.h +++ b/tools/perf/util/event.h @@ -39,6 +39,7 @@ struct fork_event { struct perf_event_header header; u32 pid, ppid; u32 tid, ptid; + u64 time; }; struct lost_event { -- cgit v1.2.3-70-g09d2 From 6161352142d5fed4cd753b32e5ccde66e705b14e Mon Sep 17 00:00:00 2001 From: Arjan van de Ven Date: Thu, 17 Sep 2009 16:11:28 +0200 Subject: tracing, perf: Convert the power tracer into an event tracer This patch converts the existing power tracer into an event tracer, so that power events (C states and frequency changes) can be tracked via "perf". This also removes the perl script that was used to demo the tracer; its functionality is being replaced entirely with timechart. Signed-off-by: Arjan van de Ven Acked-by: Peter Zijlstra Cc: Paul Mackerras Cc: Frederic Weisbecker LKML-Reference: <20090912130542.6d314860@infradead.org> Signed-off-by: Ingo Molnar --- Documentation/trace/power.txt | 17 --- arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c | 7 +- arch/x86/kernel/process.c | 28 ++-- include/trace/events/power.h | 81 +++++++++++ kernel/trace/Makefile | 2 +- kernel/trace/power-traces.c | 20 +++ kernel/trace/trace.h | 3 - kernel/trace/trace_entries.h | 17 --- kernel/trace/trace_power.c | 218 ----------------------------- scripts/tracing/power.pl | 108 -------------- 10 files changed, 113 insertions(+), 388 deletions(-) delete mode 100644 Documentation/trace/power.txt create mode 100644 include/trace/events/power.h create mode 100644 kernel/trace/power-traces.c delete mode 100644 kernel/trace/trace_power.c delete mode 100644 scripts/tracing/power.pl (limited to 'include') diff --git a/Documentation/trace/power.txt b/Documentation/trace/power.txt deleted file mode 100644 index cd805e16dc2..00000000000 --- a/Documentation/trace/power.txt +++ /dev/null @@ -1,17 +0,0 @@ -The power tracer collects detailed information about C-state and P-state -transitions, instead of just looking at the high-level "average" -information. - -There is a helper script found in scrips/tracing/power.pl in the kernel -sources which can be used to parse this information and create a -Scalable Vector Graphics (SVG) picture from the trace data. - -To use this tracer: - - echo 0 > /sys/kernel/debug/tracing/tracing_enabled - echo power > /sys/kernel/debug/tracing/current_tracer - echo 1 > /sys/kernel/debug/tracing/tracing_enabled - sleep 1 - echo 0 > /sys/kernel/debug/tracing/tracing_enabled - cat /sys/kernel/debug/tracing/trace | \ - perl scripts/tracing/power.pl > out.sv diff --git a/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c b/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c index 4109679863c..479cc8c418c 100644 --- a/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c +++ b/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c @@ -33,7 +33,7 @@ #include #include #include -#include +#include #include #include @@ -72,8 +72,6 @@ static DEFINE_PER_CPU(struct acpi_cpufreq_data *, drv_data); static DEFINE_PER_CPU(struct aperfmperf, old_perf); -DEFINE_TRACE(power_mark); - /* acpi_perf_data is a pointer to percpu data. */ static struct acpi_processor_performance *acpi_perf_data; @@ -332,7 +330,6 @@ static int acpi_cpufreq_target(struct cpufreq_policy *policy, unsigned int next_perf_state = 0; /* Index into perf table */ unsigned int i; int result = 0; - struct power_trace it; dprintk("acpi_cpufreq_target %d (%d)\n", target_freq, policy->cpu); @@ -364,7 +361,7 @@ static int acpi_cpufreq_target(struct cpufreq_policy *policy, } } - trace_power_mark(&it, POWER_PSTATE, next_perf_state); + trace_power_frequency(POWER_PSTATE, data->freq_table[next_state].frequency); switch (data->cpu_feature) { case SYSTEM_INTEL_MSR_CAPABLE: diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c index 071166a4ba8..7b60e390688 100644 --- a/arch/x86/kernel/process.c +++ b/arch/x86/kernel/process.c @@ -9,7 +9,7 @@ #include #include #include -#include +#include #include #include #include @@ -25,9 +25,6 @@ EXPORT_SYMBOL(idle_nomwait); struct kmem_cache *task_xstate_cachep; -DEFINE_TRACE(power_start); -DEFINE_TRACE(power_end); - int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src) { *dst = *src; @@ -299,9 +296,7 @@ static inline int hlt_use_halt(void) void default_idle(void) { if (hlt_use_halt()) { - struct power_trace it; - - trace_power_start(&it, POWER_CSTATE, 1); + trace_power_start(POWER_CSTATE, 1); current_thread_info()->status &= ~TS_POLLING; /* * TS_POLLING-cleared state must be visible before we @@ -314,7 +309,7 @@ void default_idle(void) else local_irq_enable(); current_thread_info()->status |= TS_POLLING; - trace_power_end(&it); + trace_power_end(0); } else { local_irq_enable(); /* loop is done by the caller */ @@ -372,9 +367,7 @@ EXPORT_SYMBOL_GPL(cpu_idle_wait); */ void mwait_idle_with_hints(unsigned long ax, unsigned long cx) { - struct power_trace it; - - trace_power_start(&it, POWER_CSTATE, (ax>>4)+1); + trace_power_start(POWER_CSTATE, (ax>>4)+1); if (!need_resched()) { if (cpu_has(¤t_cpu_data, X86_FEATURE_CLFLUSH_MONITOR)) clflush((void *)¤t_thread_info()->flags); @@ -384,15 +377,14 @@ void mwait_idle_with_hints(unsigned long ax, unsigned long cx) if (!need_resched()) __mwait(ax, cx); } - trace_power_end(&it); + trace_power_end(0); } /* Default MONITOR/MWAIT with no hints, used for default C1 state */ static void mwait_idle(void) { - struct power_trace it; if (!need_resched()) { - trace_power_start(&it, POWER_CSTATE, 1); + trace_power_start(POWER_CSTATE, 1); if (cpu_has(¤t_cpu_data, X86_FEATURE_CLFLUSH_MONITOR)) clflush((void *)¤t_thread_info()->flags); @@ -402,7 +394,7 @@ static void mwait_idle(void) __sti_mwait(0, 0); else local_irq_enable(); - trace_power_end(&it); + trace_power_end(0); } else local_irq_enable(); } @@ -414,13 +406,11 @@ static void mwait_idle(void) */ static void poll_idle(void) { - struct power_trace it; - - trace_power_start(&it, POWER_CSTATE, 0); + trace_power_start(POWER_CSTATE, 0); local_irq_enable(); while (!need_resched()) cpu_relax(); - trace_power_end(&it); + trace_power_end(0); } /* diff --git a/include/trace/events/power.h b/include/trace/events/power.h new file mode 100644 index 00000000000..ea6d579261a --- /dev/null +++ b/include/trace/events/power.h @@ -0,0 +1,81 @@ +#undef TRACE_SYSTEM +#define TRACE_SYSTEM power + +#if !defined(_TRACE_POWER_H) || defined(TRACE_HEADER_MULTI_READ) +#define _TRACE_POWER_H + +#include +#include + +#ifndef _TRACE_POWER_ENUM_ +#define _TRACE_POWER_ENUM_ +enum { + POWER_NONE = 0, + POWER_CSTATE = 1, + POWER_PSTATE = 2, +}; +#endif + + + +TRACE_EVENT(power_start, + + TP_PROTO(unsigned int type, unsigned int state), + + TP_ARGS(type, state), + + TP_STRUCT__entry( + __field( u64, type ) + __field( u64, state ) + ), + + TP_fast_assign( + __entry->type = type; + __entry->state = state; + ), + + TP_printk("type=%lu state=%lu", (unsigned long)__entry->type, (unsigned long)__entry->state) +); + +TRACE_EVENT(power_end, + + TP_PROTO(int dummy), + + TP_ARGS(dummy), + + TP_STRUCT__entry( + __field( u64, dummy ) + ), + + TP_fast_assign( + __entry->dummy = 0xffff; + ), + + TP_printk("dummy=%lu", (unsigned long)__entry->dummy) + +); + + +TRACE_EVENT(power_frequency, + + TP_PROTO(unsigned int type, unsigned int state), + + TP_ARGS(type, state), + + TP_STRUCT__entry( + __field( u64, type ) + __field( u64, state ) + ), + + TP_fast_assign( + __entry->type = type; + __entry->state = state; + ), + + TP_printk("type=%lu state=%lu", (unsigned long)__entry->type, (unsigned long) __entry->state) +); + +#endif /* _TRACE_POWER_H */ + +/* This part must be outside protection */ +#include diff --git a/kernel/trace/Makefile b/kernel/trace/Makefile index 844164dca90..26f03ac07c2 100644 --- a/kernel/trace/Makefile +++ b/kernel/trace/Makefile @@ -42,7 +42,6 @@ obj-$(CONFIG_BOOT_TRACER) += trace_boot.o obj-$(CONFIG_FUNCTION_GRAPH_TRACER) += trace_functions_graph.o obj-$(CONFIG_TRACE_BRANCH_PROFILING) += trace_branch.o obj-$(CONFIG_HW_BRANCH_TRACER) += trace_hw_branches.o -obj-$(CONFIG_POWER_TRACER) += trace_power.o obj-$(CONFIG_KMEMTRACE) += kmemtrace.o obj-$(CONFIG_WORKQUEUE_TRACER) += trace_workqueue.o obj-$(CONFIG_BLK_DEV_IO_TRACE) += blktrace.o @@ -54,5 +53,6 @@ obj-$(CONFIG_EVENT_TRACING) += trace_export.o obj-$(CONFIG_FTRACE_SYSCALLS) += trace_syscalls.o obj-$(CONFIG_EVENT_PROFILE) += trace_event_profile.o obj-$(CONFIG_EVENT_TRACING) += trace_events_filter.o +obj-$(CONFIG_EVENT_TRACING) += power-traces.o libftrace-y := ftrace.o diff --git a/kernel/trace/power-traces.c b/kernel/trace/power-traces.c new file mode 100644 index 00000000000..e06c6e3d56a --- /dev/null +++ b/kernel/trace/power-traces.c @@ -0,0 +1,20 @@ +/* + * Power trace points + * + * Copyright (C) 2009 Arjan van de Ven + */ + +#include +#include +#include +#include +#include +#include + +#define CREATE_TRACE_POINTS +#include + +EXPORT_TRACEPOINT_SYMBOL_GPL(power_start); +EXPORT_TRACEPOINT_SYMBOL_GPL(power_end); +EXPORT_TRACEPOINT_SYMBOL_GPL(power_frequency); + diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h index 86bcff94791..405cb850b75 100644 --- a/kernel/trace/trace.h +++ b/kernel/trace/trace.h @@ -11,7 +11,6 @@ #include #include #include -#include #include #include @@ -37,7 +36,6 @@ enum trace_type { TRACE_HW_BRANCHES, TRACE_KMEM_ALLOC, TRACE_KMEM_FREE, - TRACE_POWER, TRACE_BLK, __TRACE_LAST_TYPE, @@ -207,7 +205,6 @@ extern void __ftrace_bad_type(void); IF_ASSIGN(var, ent, struct ftrace_graph_ret_entry, \ TRACE_GRAPH_RET); \ IF_ASSIGN(var, ent, struct hw_branch_entry, TRACE_HW_BRANCHES);\ - IF_ASSIGN(var, ent, struct trace_power, TRACE_POWER); \ IF_ASSIGN(var, ent, struct kmemtrace_alloc_entry, \ TRACE_KMEM_ALLOC); \ IF_ASSIGN(var, ent, struct kmemtrace_free_entry, \ diff --git a/kernel/trace/trace_entries.h b/kernel/trace/trace_entries.h index a431748ddd6..ead3d724599 100644 --- a/kernel/trace/trace_entries.h +++ b/kernel/trace/trace_entries.h @@ -330,23 +330,6 @@ FTRACE_ENTRY(hw_branch, hw_branch_entry, F_printk("from: %llx to: %llx", __entry->from, __entry->to) ); -FTRACE_ENTRY(power, trace_power, - - TRACE_POWER, - - F_STRUCT( - __field_struct( struct power_trace, state_data ) - __field_desc( s64, state_data, stamp ) - __field_desc( s64, state_data, end ) - __field_desc( int, state_data, type ) - __field_desc( int, state_data, state ) - ), - - F_printk("%llx->%llx type:%u state:%u", - __entry->stamp, __entry->end, - __entry->type, __entry->state) -); - FTRACE_ENTRY(kmem_alloc, kmemtrace_alloc_entry, TRACE_KMEM_ALLOC, diff --git a/kernel/trace/trace_power.c b/kernel/trace/trace_power.c deleted file mode 100644 index fe1a00f1445..00000000000 --- a/kernel/trace/trace_power.c +++ /dev/null @@ -1,218 +0,0 @@ -/* - * ring buffer based C-state tracer - * - * Arjan van de Ven - * Copyright (C) 2008 Intel Corporation - * - * Much is borrowed from trace_boot.c which is - * Copyright (C) 2008 Frederic Weisbecker - * - */ - -#include -#include -#include -#include -#include - -#include "trace.h" -#include "trace_output.h" - -static struct trace_array *power_trace; -static int __read_mostly trace_power_enabled; - -static void probe_power_start(struct power_trace *it, unsigned int type, - unsigned int level) -{ - if (!trace_power_enabled) - return; - - memset(it, 0, sizeof(struct power_trace)); - it->state = level; - it->type = type; - it->stamp = ktime_get(); -} - - -static void probe_power_end(struct power_trace *it) -{ - struct ftrace_event_call *call = &event_power; - struct ring_buffer_event *event; - struct ring_buffer *buffer; - struct trace_power *entry; - struct trace_array_cpu *data; - struct trace_array *tr = power_trace; - - if (!trace_power_enabled) - return; - - buffer = tr->buffer; - - preempt_disable(); - it->end = ktime_get(); - data = tr->data[smp_processor_id()]; - - event = trace_buffer_lock_reserve(buffer, TRACE_POWER, - sizeof(*entry), 0, 0); - if (!event) - goto out; - entry = ring_buffer_event_data(event); - entry->state_data = *it; - if (!filter_check_discard(call, entry, buffer, event)) - trace_buffer_unlock_commit(buffer, event, 0, 0); - out: - preempt_enable(); -} - -static void probe_power_mark(struct power_trace *it, unsigned int type, - unsigned int level) -{ - struct ftrace_event_call *call = &event_power; - struct ring_buffer_event *event; - struct ring_buffer *buffer; - struct trace_power *entry; - struct trace_array_cpu *data; - struct trace_array *tr = power_trace; - - if (!trace_power_enabled) - return; - - buffer = tr->buffer; - - memset(it, 0, sizeof(struct power_trace)); - it->state = level; - it->type = type; - it->stamp = ktime_get(); - preempt_disable(); - it->end = it->stamp; - data = tr->data[smp_processor_id()]; - - event = trace_buffer_lock_reserve(buffer, TRACE_POWER, - sizeof(*entry), 0, 0); - if (!event) - goto out; - entry = ring_buffer_event_data(event); - entry->state_data = *it; - if (!filter_check_discard(call, entry, buffer, event)) - trace_buffer_unlock_commit(buffer, event, 0, 0); - out: - preempt_enable(); -} - -static int tracing_power_register(void) -{ - int ret; - - ret = register_trace_power_start(probe_power_start); - if (ret) { - pr_info("power trace: Couldn't activate tracepoint" - " probe to trace_power_start\n"); - return ret; - } - ret = register_trace_power_end(probe_power_end); - if (ret) { - pr_info("power trace: Couldn't activate tracepoint" - " probe to trace_power_end\n"); - goto fail_start; - } - ret = register_trace_power_mark(probe_power_mark); - if (ret) { - pr_info("power trace: Couldn't activate tracepoint" - " probe to trace_power_mark\n"); - goto fail_end; - } - return ret; -fail_end: - unregister_trace_power_end(probe_power_end); -fail_start: - unregister_trace_power_start(probe_power_start); - return ret; -} - -static void start_power_trace(struct trace_array *tr) -{ - trace_power_enabled = 1; -} - -static void stop_power_trace(struct trace_array *tr) -{ - trace_power_enabled = 0; -} - -static void power_trace_reset(struct trace_array *tr) -{ - trace_power_enabled = 0; - unregister_trace_power_start(probe_power_start); - unregister_trace_power_end(probe_power_end); - unregister_trace_power_mark(probe_power_mark); -} - - -static int power_trace_init(struct trace_array *tr) -{ - power_trace = tr; - - trace_power_enabled = 1; - tracing_power_register(); - - tracing_reset_online_cpus(tr); - return 0; -} - -static enum print_line_t power_print_line(struct trace_iterator *iter) -{ - int ret = 0; - struct trace_entry *entry = iter->ent; - struct trace_power *field ; - struct power_trace *it; - struct trace_seq *s = &iter->seq; - struct timespec stamp; - struct timespec duration; - - trace_assign_type(field, entry); - it = &field->state_data; - stamp = ktime_to_timespec(it->stamp); - duration = ktime_to_timespec(ktime_sub(it->end, it->stamp)); - - if (entry->type == TRACE_POWER) { - if (it->type == POWER_CSTATE) - ret = trace_seq_printf(s, "[%5ld.%09ld] CSTATE: Going to C%i on cpu %i for %ld.%09ld\n", - stamp.tv_sec, - stamp.tv_nsec, - it->state, iter->cpu, - duration.tv_sec, - duration.tv_nsec); - if (it->type == POWER_PSTATE) - ret = trace_seq_printf(s, "[%5ld.%09ld] PSTATE: Going to P%i on cpu %i\n", - stamp.tv_sec, - stamp.tv_nsec, - it->state, iter->cpu); - if (!ret) - return TRACE_TYPE_PARTIAL_LINE; - return TRACE_TYPE_HANDLED; - } - return TRACE_TYPE_UNHANDLED; -} - -static void power_print_header(struct seq_file *s) -{ - seq_puts(s, "# TIMESTAMP STATE EVENT\n"); - seq_puts(s, "# | | |\n"); -} - -static struct tracer power_tracer __read_mostly = -{ - .name = "power", - .init = power_trace_init, - .start = start_power_trace, - .stop = stop_power_trace, - .reset = power_trace_reset, - .print_line = power_print_line, - .print_header = power_print_header, -}; - -static int init_power_trace(void) -{ - return register_tracer(&power_tracer); -} -device_initcall(init_power_trace); diff --git a/scripts/tracing/power.pl b/scripts/tracing/power.pl deleted file mode 100644 index 4f729b3501e..00000000000 --- a/scripts/tracing/power.pl +++ /dev/null @@ -1,108 +0,0 @@ -#!/usr/bin/perl - -# Copyright 2008, Intel Corporation -# -# This file is part of the Linux kernel -# -# This program file is free software; you can redistribute it and/or modify it -# under the terms of the GNU General Public License as published by the -# Free Software Foundation; version 2 of the License. -# -# This program is distributed in the hope that it will be useful, but WITHOUT -# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or -# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License -# for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program in a file named COPYING; if not, write to the -# Free Software Foundation, Inc., -# 51 Franklin Street, Fifth Floor, -# Boston, MA 02110-1301 USA -# -# Authors: -# Arjan van de Ven - - -# -# This script turns a cstate ftrace output into a SVG graphic that shows -# historic C-state information -# -# -# cat /sys/kernel/debug/tracing/trace | perl power.pl > out.svg -# - -my @styles; -my $base = 0; - -my @pstate_last; -my @pstate_level; - -$styles[0] = "fill:rgb(0,0,255);fill-opacity:0.5;stroke-width:1;stroke:rgb(0,0,0)"; -$styles[1] = "fill:rgb(0,255,0);fill-opacity:0.5;stroke-width:1;stroke:rgb(0,0,0)"; -$styles[2] = "fill:rgb(255,0,20);fill-opacity:0.5;stroke-width:1;stroke:rgb(0,0,0)"; -$styles[3] = "fill:rgb(255,255,20);fill-opacity:0.5;stroke-width:1;stroke:rgb(0,0,0)"; -$styles[4] = "fill:rgb(255,0,255);fill-opacity:0.5;stroke-width:1;stroke:rgb(0,0,0)"; -$styles[5] = "fill:rgb(0,255,255);fill-opacity:0.5;stroke-width:1;stroke:rgb(0,0,0)"; -$styles[6] = "fill:rgb(0,128,255);fill-opacity:0.5;stroke-width:1;stroke:rgb(0,0,0)"; -$styles[7] = "fill:rgb(0,255,128);fill-opacity:0.5;stroke-width:1;stroke:rgb(0,0,0)"; -$styles[8] = "fill:rgb(0,25,20);fill-opacity:0.5;stroke-width:1;stroke:rgb(0,0,0)"; - - -print " \n"; -print "\n"; - -my $scale = 30000.0; -while (<>) { - my $line = $_; - if ($line =~ /([0-9\.]+)\] CSTATE: Going to C([0-9]) on cpu ([0-9]+) for ([0-9\.]+)/) { - if ($base == 0) { - $base = $1; - } - my $time = $1 - $base; - $time = $time * $scale; - my $C = $2; - my $cpu = $3; - my $y = 400 * $cpu; - my $duration = $4 * $scale; - my $msec = int($4 * 100000)/100.0; - my $height = $C * 20; - $style = $styles[$C]; - - $y = $y + 140 - $height; - - $x2 = $time + 4; - $y2 = $y + 4; - - - print "\n"; - print "C$C $msec\n"; - } - if ($line =~ /([0-9\.]+)\] PSTATE: Going to P([0-9]) on cpu ([0-9]+)/) { - my $time = $1 - $base; - my $state = $2; - my $cpu = $3; - - if (defined($pstate_last[$cpu])) { - my $from = $pstate_last[$cpu]; - my $oldstate = $pstate_state[$cpu]; - my $duration = ($time-$from) * $scale; - - $from = $from * $scale; - my $to = $from + $duration; - my $height = 140 - ($oldstate * (140/8)); - - my $y = 400 * $cpu + 200 + $height; - my $y2 = $y+4; - my $style = $styles[8]; - - print "\n"; - print "P$oldstate (cpu $cpu)\n"; - }; - - $pstate_last[$cpu] = $time; - $pstate_state[$cpu] = $state; - } -} - - -print "\n"; -- cgit v1.2.3-70-g09d2 From 778dbcc1ebea6f9a560020110987449bf4607e5f Mon Sep 17 00:00:00 2001 From: Magnus Damm Date: Fri, 18 Sep 2009 12:51:44 -0700 Subject: mtd: onenand: make onenand/generic.c more generic Remove the ARM dependency from the generic "onenand" platform device driver. This change makes the driver useful for other architectures as well. Needed for the SuperH kfr2r09 board. Apart from the obvious Kconfig bits, the most important change is the move away from ARM specific includes and platform data. Together with this change the only in-tree board code gets an update, and the driver name is also changed gracefully break potential out of tree drivers. The driver is also updated to allow NULL as platform data together with a few changes to make use of resource_size() and dev_name(). Signed-off-by: Magnus Damm Cc: Paul Mundt Cc: Tony Lindgren Cc: Kyungmin Park Signed-off-by: Andrew Morton Signed-off-by: David Woodhouse --- arch/arm/mach-omap2/board-apollon.c | 4 ++-- drivers/mtd/onenand/Kconfig | 1 - drivers/mtd/onenand/generic.c | 24 ++++++++++++++---------- include/linux/mtd/onenand.h | 8 ++++++++ 4 files changed, 24 insertions(+), 13 deletions(-) (limited to 'include') diff --git a/arch/arm/mach-omap2/board-apollon.c b/arch/arm/mach-omap2/board-apollon.c index dcfc20d0389..b4d4ef6d90b 100644 --- a/arch/arm/mach-omap2/board-apollon.c +++ b/arch/arm/mach-omap2/board-apollon.c @@ -87,7 +87,7 @@ static struct mtd_partition apollon_partitions[] = { }, }; -static struct flash_platform_data apollon_flash_data = { +static struct onenand_platform_data apollon_flash_data = { .parts = apollon_partitions, .nr_parts = ARRAY_SIZE(apollon_partitions), }; @@ -99,7 +99,7 @@ static struct resource apollon_flash_resource[] = { }; static struct platform_device apollon_onenand_device = { - .name = "onenand", + .name = "onenand-flash", .id = -1, .dev = { .platform_data = &apollon_flash_data, diff --git a/drivers/mtd/onenand/Kconfig b/drivers/mtd/onenand/Kconfig index 3a094d1bf8c..a38f580c2bb 100644 --- a/drivers/mtd/onenand/Kconfig +++ b/drivers/mtd/onenand/Kconfig @@ -24,7 +24,6 @@ config MTD_ONENAND_VERIFY_WRITE config MTD_ONENAND_GENERIC tristate "OneNAND Flash device via platform device driver" - depends on ARM help Support for OneNAND flash via platform device driver. diff --git a/drivers/mtd/onenand/generic.c b/drivers/mtd/onenand/generic.c index 3a496c33fb5..e78914938c5 100644 --- a/drivers/mtd/onenand/generic.c +++ b/drivers/mtd/onenand/generic.c @@ -19,12 +19,16 @@ #include #include #include - #include -#include - -#define DRIVER_NAME "onenand" +/* + * Note: Driver name and platform data format have been updated! + * + * This version of the driver is named "onenand-flash" and takes struct + * onenand_platform_data as platform data. The old ARM-specific version + * with the name "onenand" used to take struct flash_platform_data. + */ +#define DRIVER_NAME "onenand-flash" #ifdef CONFIG_MTD_PARTITIONS static const char *part_probes[] = { "cmdlinepart", NULL, }; @@ -39,16 +43,16 @@ struct onenand_info { static int __devinit generic_onenand_probe(struct platform_device *pdev) { struct onenand_info *info; - struct flash_platform_data *pdata = pdev->dev.platform_data; + struct onenand_platform_data *pdata = pdev->dev.platform_data; struct resource *res = pdev->resource; - unsigned long size = res->end - res->start + 1; + unsigned long size = resource_size(res); int err; info = kzalloc(sizeof(struct onenand_info), GFP_KERNEL); if (!info) return -ENOMEM; - if (!request_mem_region(res->start, size, pdev->dev.driver->name)) { + if (!request_mem_region(res->start, size, dev_name(&pdev->dev))) { err = -EBUSY; goto out_free_info; } @@ -59,7 +63,7 @@ static int __devinit generic_onenand_probe(struct platform_device *pdev) goto out_release_mem_region; } - info->onenand.mmcontrol = pdata->mmcontrol; + info->onenand.mmcontrol = pdata ? pdata->mmcontrol : 0; info->onenand.irq = platform_get_irq(pdev, 0); info->mtd.name = dev_name(&pdev->dev); @@ -75,7 +79,7 @@ static int __devinit generic_onenand_probe(struct platform_device *pdev) err = parse_mtd_partitions(&info->mtd, part_probes, &info->parts, 0); if (err > 0) add_mtd_partitions(&info->mtd, info->parts, err); - else if (err <= 0 && pdata->parts) + else if (err <= 0 && pdata && pdata->parts) add_mtd_partitions(&info->mtd, pdata->parts, pdata->nr_parts); else #endif @@ -99,7 +103,7 @@ static int __devexit generic_onenand_remove(struct platform_device *pdev) { struct onenand_info *info = platform_get_drvdata(pdev); struct resource *res = pdev->resource; - unsigned long size = res->end - res->start + 1; + unsigned long size = resource_size(res); platform_set_drvdata(pdev, NULL); diff --git a/include/linux/mtd/onenand.h b/include/linux/mtd/onenand.h index 8ed87337438..4e49f335067 100644 --- a/include/linux/mtd/onenand.h +++ b/include/linux/mtd/onenand.h @@ -214,4 +214,12 @@ unsigned onenand_block(struct onenand_chip *this, loff_t addr); loff_t onenand_addr(struct onenand_chip *this, int block); int flexonenand_region(struct mtd_info *mtd, loff_t addr); +struct mtd_partition; + +struct onenand_platform_data { + void (*mmcontrol)(struct mtd_info *mtd, int sync_read); + struct mtd_partition *parts; + unsigned int nr_parts; +}; + #endif /* __LINUX_MTD_ONENAND_H */ -- cgit v1.2.3-70-g09d2 From 46a8cf2df2232c0051f29716ff8a166ebeb08daf Mon Sep 17 00:00:00 2001 From: Sneha Narnakaje Date: Fri, 18 Sep 2009 12:51:46 -0700 Subject: mtd: nand: add "page" parameter to all read_page/read_page_raw APIs This patch adds a new "page" parameter to all NAND read_page/read_page_raw APIs. The read_page API for the new mode ECC_HW_OOB_FIRST requires the page information to send the READOOB command and read the OOB area before the data area. Reviewed-by: David Brownell Signed-off-by: Sneha Narnakaje Signed-off-by: Sandeep Paulraj Cc: Thomas Gleixner Signed-off-by: Andrew Morton Signed-off-by: David Woodhouse --- drivers/mtd/nand/atmel_nand.c | 2 +- drivers/mtd/nand/cafe_nand.c | 2 +- drivers/mtd/nand/fsl_elbc_nand.c | 3 ++- drivers/mtd/nand/nand_base.c | 18 ++++++++++-------- drivers/mtd/nand/sh_flctl.c | 2 +- include/linux/mtd/nand.h | 4 ++-- 6 files changed, 17 insertions(+), 14 deletions(-) (limited to 'include') diff --git a/drivers/mtd/nand/atmel_nand.c b/drivers/mtd/nand/atmel_nand.c index 20c828ba940..f8e9975c86e 100644 --- a/drivers/mtd/nand/atmel_nand.c +++ b/drivers/mtd/nand/atmel_nand.c @@ -218,7 +218,7 @@ static int atmel_nand_calculate(struct mtd_info *mtd, * buf: buffer to store read data */ static int atmel_nand_read_page(struct mtd_info *mtd, - struct nand_chip *chip, uint8_t *buf) + struct nand_chip *chip, uint8_t *buf, int page) { int eccsize = chip->ecc.size; int eccbytes = chip->ecc.bytes; diff --git a/drivers/mtd/nand/cafe_nand.c b/drivers/mtd/nand/cafe_nand.c index 29acd06b1c3..a70f40e161d 100644 --- a/drivers/mtd/nand/cafe_nand.c +++ b/drivers/mtd/nand/cafe_nand.c @@ -381,7 +381,7 @@ static int cafe_nand_read_oob(struct mtd_info *mtd, struct nand_chip *chip, * we need a special oob layout and handling. */ static int cafe_nand_read_page(struct mtd_info *mtd, struct nand_chip *chip, - uint8_t *buf) + uint8_t *buf, int page) { struct cafe_priv *cafe = mtd->priv; diff --git a/drivers/mtd/nand/fsl_elbc_nand.c b/drivers/mtd/nand/fsl_elbc_nand.c index 1f6eb257871..ddd37d2554e 100644 --- a/drivers/mtd/nand/fsl_elbc_nand.c +++ b/drivers/mtd/nand/fsl_elbc_nand.c @@ -739,7 +739,8 @@ static int fsl_elbc_chip_init_tail(struct mtd_info *mtd) static int fsl_elbc_read_page(struct mtd_info *mtd, struct nand_chip *chip, - uint8_t *buf) + uint8_t *buf, + int page) { fsl_elbc_read_buf(mtd, buf, mtd->writesize); fsl_elbc_read_buf(mtd, chip->oob_poi, mtd->oobsize); diff --git a/drivers/mtd/nand/nand_base.c b/drivers/mtd/nand/nand_base.c index 4c5e8a74e1b..17bbd506202 100644 --- a/drivers/mtd/nand/nand_base.c +++ b/drivers/mtd/nand/nand_base.c @@ -765,7 +765,7 @@ static int nand_wait(struct mtd_info *mtd, struct nand_chip *chip) * Not for syndrome calculating ecc controllers, which use a special oob layout */ static int nand_read_page_raw(struct mtd_info *mtd, struct nand_chip *chip, - uint8_t *buf) + uint8_t *buf, int page) { chip->read_buf(mtd, buf, mtd->writesize); chip->read_buf(mtd, chip->oob_poi, mtd->oobsize); @@ -781,7 +781,7 @@ static int nand_read_page_raw(struct mtd_info *mtd, struct nand_chip *chip, * We need a special oob layout and handling even when OOB isn't used. */ static int nand_read_page_raw_syndrome(struct mtd_info *mtd, struct nand_chip *chip, - uint8_t *buf) + uint8_t *buf, int page) { int eccsize = chip->ecc.size; int eccbytes = chip->ecc.bytes; @@ -820,7 +820,7 @@ static int nand_read_page_raw_syndrome(struct mtd_info *mtd, struct nand_chip *c * @buf: buffer to store read data */ static int nand_read_page_swecc(struct mtd_info *mtd, struct nand_chip *chip, - uint8_t *buf) + uint8_t *buf, int page) { int i, eccsize = chip->ecc.size; int eccbytes = chip->ecc.bytes; @@ -830,7 +830,7 @@ static int nand_read_page_swecc(struct mtd_info *mtd, struct nand_chip *chip, uint8_t *ecc_code = chip->buffers->ecccode; uint32_t *eccpos = chip->ecc.layout->eccpos; - chip->ecc.read_page_raw(mtd, chip, buf); + chip->ecc.read_page_raw(mtd, chip, buf, page); for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) chip->ecc.calculate(mtd, p, &ecc_calc[i]); @@ -943,7 +943,7 @@ static int nand_read_subpage(struct mtd_info *mtd, struct nand_chip *chip, uint3 * Not for syndrome calculating ecc controllers which need a special oob layout */ static int nand_read_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip, - uint8_t *buf) + uint8_t *buf, int page) { int i, eccsize = chip->ecc.size; int eccbytes = chip->ecc.bytes; @@ -988,7 +988,7 @@ static int nand_read_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip, * we need a special oob layout and handling. */ static int nand_read_page_syndrome(struct mtd_info *mtd, struct nand_chip *chip, - uint8_t *buf) + uint8_t *buf, int page) { int i, eccsize = chip->ecc.size; int eccbytes = chip->ecc.bytes; @@ -1130,11 +1130,13 @@ static int nand_do_read_ops(struct mtd_info *mtd, loff_t from, /* Now read the page into the buffer */ if (unlikely(ops->mode == MTD_OOB_RAW)) - ret = chip->ecc.read_page_raw(mtd, chip, bufpoi); + ret = chip->ecc.read_page_raw(mtd, chip, + bufpoi, page); else if (!aligned && NAND_SUBPAGE_READ(chip) && !oob) ret = chip->ecc.read_subpage(mtd, chip, col, bytes, bufpoi); else - ret = chip->ecc.read_page(mtd, chip, bufpoi); + ret = chip->ecc.read_page(mtd, chip, bufpoi, + page); if (ret < 0) break; diff --git a/drivers/mtd/nand/sh_flctl.c b/drivers/mtd/nand/sh_flctl.c index 2bc896623e2..c5df28596a4 100644 --- a/drivers/mtd/nand/sh_flctl.c +++ b/drivers/mtd/nand/sh_flctl.c @@ -329,7 +329,7 @@ static void set_cmd_regs(struct mtd_info *mtd, uint32_t cmd, uint32_t flcmcdr_va } static int flctl_read_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip, - uint8_t *buf) + uint8_t *buf, int page) { int i, eccsize = chip->ecc.size; int eccbytes = chip->ecc.bytes; diff --git a/include/linux/mtd/nand.h b/include/linux/mtd/nand.h index 4030ebada49..686f3701f2f 100644 --- a/include/linux/mtd/nand.h +++ b/include/linux/mtd/nand.h @@ -271,13 +271,13 @@ struct nand_ecc_ctrl { uint8_t *calc_ecc); int (*read_page_raw)(struct mtd_info *mtd, struct nand_chip *chip, - uint8_t *buf); + uint8_t *buf, int page); void (*write_page_raw)(struct mtd_info *mtd, struct nand_chip *chip, const uint8_t *buf); int (*read_page)(struct mtd_info *mtd, struct nand_chip *chip, - uint8_t *buf); + uint8_t *buf, int page); int (*read_subpage)(struct mtd_info *mtd, struct nand_chip *chip, uint32_t offs, uint32_t len, -- cgit v1.2.3-70-g09d2 From 6e0cb135b3f3713b95ea41a11155e83a8c70f5f8 Mon Sep 17 00:00:00 2001 From: Sneha Narnakaje Date: Fri, 18 Sep 2009 12:51:47 -0700 Subject: mtd: nand: add new ECC mode - ECC_HW_OOB_FIRST This patch adds the new mode NAND_ECC_HW_OOB_FIRST in the nand code to support 4-bit ECC on TI DaVinci devices with large page (up to 2KiB) NAND chips. This ECC mode is similar to NAND_ECC_HW, with the exception of read_page API that first reads the OOB area, reads the data in chunks, feeds the ECC from OOB area to the ECC hw engine and perform any correction on the data as per the ECC status reported by the engine. "ECC_HW_OOB_FIRST" name suggested by Thomas Gleixner Reviewed-by: David Brownell Signed-off-by: Sneha Narnakaje Signed-off-by: Sandeep Paulraj Cc: Thomas Gleixner Signed-off-by: Andrew Morton Signed-off-by: David Woodhouse --- drivers/mtd/nand/nand_base.c | 61 +++++++++++++++++++++++++++++++++++++++++++- include/linux/mtd/nand.h | 1 + 2 files changed, 61 insertions(+), 1 deletion(-) (limited to 'include') diff --git a/drivers/mtd/nand/nand_base.c b/drivers/mtd/nand/nand_base.c index 17bbd506202..22113865438 100644 --- a/drivers/mtd/nand/nand_base.c +++ b/drivers/mtd/nand/nand_base.c @@ -978,6 +978,54 @@ static int nand_read_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip, return 0; } +/** + * nand_read_page_hwecc_oob_first - [REPLACABLE] hw ecc, read oob first + * @mtd: mtd info structure + * @chip: nand chip info structure + * @buf: buffer to store read data + * + * Hardware ECC for large page chips, require OOB to be read first. + * For this ECC mode, the write_page method is re-used from ECC_HW. + * These methods read/write ECC from the OOB area, unlike the + * ECC_HW_SYNDROME support with multiple ECC steps, follows the + * "infix ECC" scheme and reads/writes ECC from the data area, by + * overwriting the NAND manufacturer bad block markings. + */ +static int nand_read_page_hwecc_oob_first(struct mtd_info *mtd, + struct nand_chip *chip, uint8_t *buf, int page) +{ + int i, eccsize = chip->ecc.size; + int eccbytes = chip->ecc.bytes; + int eccsteps = chip->ecc.steps; + uint8_t *p = buf; + uint8_t *ecc_code = chip->buffers->ecccode; + uint32_t *eccpos = chip->ecc.layout->eccpos; + uint8_t *ecc_calc = chip->buffers->ecccalc; + + /* Read the OOB area first */ + chip->cmdfunc(mtd, NAND_CMD_READOOB, 0, page); + chip->read_buf(mtd, chip->oob_poi, mtd->oobsize); + chip->cmdfunc(mtd, NAND_CMD_READ0, 0, page); + + for (i = 0; i < chip->ecc.total; i++) + ecc_code[i] = chip->oob_poi[eccpos[i]]; + + for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) { + int stat; + + chip->ecc.hwctl(mtd, NAND_ECC_READ); + chip->read_buf(mtd, p, eccsize); + chip->ecc.calculate(mtd, p, &ecc_calc[i]); + + stat = chip->ecc.correct(mtd, p, &ecc_code[i], NULL); + if (stat < 0) + mtd->ecc_stats.failed++; + else + mtd->ecc_stats.corrected += stat; + } + return 0; +} + /** * nand_read_page_syndrome - [REPLACABLE] hardware ecc syndrom based page read * @mtd: mtd info structure @@ -2673,6 +2721,17 @@ int nand_scan_tail(struct mtd_info *mtd) */ switch (chip->ecc.mode) { + case NAND_ECC_HW_OOB_FIRST: + /* Similar to NAND_ECC_HW, but a separate read_page handle */ + if (!chip->ecc.calculate || !chip->ecc.correct || + !chip->ecc.hwctl) { + printk(KERN_WARNING "No ECC functions supplied; " + "Hardware ECC not possible\n"); + BUG(); + } + if (!chip->ecc.read_page) + chip->ecc.read_page = nand_read_page_hwecc_oob_first; + case NAND_ECC_HW: /* Use standard hwecc read page function ? */ if (!chip->ecc.read_page) @@ -2695,7 +2754,7 @@ int nand_scan_tail(struct mtd_info *mtd) chip->ecc.read_page == nand_read_page_hwecc || !chip->ecc.write_page || chip->ecc.write_page == nand_write_page_hwecc)) { - printk(KERN_WARNING "No ECC functions supplied, " + printk(KERN_WARNING "No ECC functions supplied; " "Hardware ECC not possible\n"); BUG(); } diff --git a/include/linux/mtd/nand.h b/include/linux/mtd/nand.h index 686f3701f2f..7a232a9bdd6 100644 --- a/include/linux/mtd/nand.h +++ b/include/linux/mtd/nand.h @@ -121,6 +121,7 @@ typedef enum { NAND_ECC_SOFT, NAND_ECC_HW, NAND_ECC_HW_SYNDROME, + NAND_ECC_HW_OOB_FIRST, } nand_ecc_modes_t; /* -- cgit v1.2.3-70-g09d2 From e454cea20bdcff10ee698d11b8882662a0153a47 Mon Sep 17 00:00:00 2001 From: Kay Sievers Date: Fri, 18 Sep 2009 23:01:12 +0200 Subject: Driver-Core: extend devnode callbacks to provide permissions This allows subsytems to provide devtmpfs with non-default permissions for the device node. Instead of the default mode of 0600, null, zero, random, urandom, full, tty, ptmx now have a mode of 0666, which allows non-privileged processes to access standard device nodes in case no other userspace process applies the expected permissions. This also fixes a wrong assignment in pktcdvd and a checkpatch.pl complain. Signed-off-by: Kay Sievers Signed-off-by: Greg Kroah-Hartman --- arch/x86/kernel/cpuid.c | 4 ++-- arch/x86/kernel/microcode_core.c | 2 +- arch/x86/kernel/msr.c | 4 ++-- block/bsg.c | 4 ++-- block/genhd.c | 8 ++++---- drivers/base/core.c | 19 ++++++++++++------- drivers/base/devtmpfs.c | 24 ++++++++++++++++-------- drivers/block/aoe/aoechr.c | 4 ++-- drivers/block/pktcdvd.c | 6 +++--- drivers/char/hw_random/core.c | 2 +- drivers/char/mem.c | 29 +++++++++++++++++++---------- drivers/char/misc.c | 10 ++++++---- drivers/char/raw.c | 4 ++-- drivers/char/tty_io.c | 11 +++++++++++ drivers/gpu/drm/drm_sysfs.c | 4 ++-- drivers/hid/usbhid/hiddev.c | 4 ++-- drivers/input/input.c | 4 ++-- drivers/md/dm-ioctl.c | 2 +- drivers/media/dvb/dvb-core/dvbdev.c | 4 ++-- drivers/net/tun.c | 2 +- drivers/usb/class/usblp.c | 4 ++-- drivers/usb/core/file.c | 8 ++++---- drivers/usb/core/usb.c | 4 ++-- drivers/usb/misc/iowarrior.c | 4 ++-- drivers/usb/misc/legousbtower.c | 4 ++-- include/linux/device.h | 7 ++++--- include/linux/genhd.h | 2 +- include/linux/miscdevice.h | 3 ++- include/linux/usb.h | 4 ++-- sound/sound_core.c | 4 ++-- 30 files changed, 116 insertions(+), 79 deletions(-) (limited to 'include') diff --git a/arch/x86/kernel/cpuid.c b/arch/x86/kernel/cpuid.c index b07af886124..6a52d4b36a3 100644 --- a/arch/x86/kernel/cpuid.c +++ b/arch/x86/kernel/cpuid.c @@ -182,7 +182,7 @@ static struct notifier_block __refdata cpuid_class_cpu_notifier = .notifier_call = cpuid_class_cpu_callback, }; -static char *cpuid_nodename(struct device *dev) +static char *cpuid_devnode(struct device *dev, mode_t *mode) { return kasprintf(GFP_KERNEL, "cpu/%u/cpuid", MINOR(dev->devt)); } @@ -203,7 +203,7 @@ static int __init cpuid_init(void) err = PTR_ERR(cpuid_class); goto out_chrdev; } - cpuid_class->nodename = cpuid_nodename; + cpuid_class->devnode = cpuid_devnode; for_each_online_cpu(i) { err = cpuid_device_create(i); if (err != 0) diff --git a/arch/x86/kernel/microcode_core.c b/arch/x86/kernel/microcode_core.c index 9371448290a..0db7969b0dd 100644 --- a/arch/x86/kernel/microcode_core.c +++ b/arch/x86/kernel/microcode_core.c @@ -236,7 +236,7 @@ static const struct file_operations microcode_fops = { static struct miscdevice microcode_dev = { .minor = MICROCODE_MINOR, .name = "microcode", - .devnode = "cpu/microcode", + .nodename = "cpu/microcode", .fops = µcode_fops, }; diff --git a/arch/x86/kernel/msr.c b/arch/x86/kernel/msr.c index 7dd95009417..6a3cefc7dda 100644 --- a/arch/x86/kernel/msr.c +++ b/arch/x86/kernel/msr.c @@ -241,7 +241,7 @@ static struct notifier_block __refdata msr_class_cpu_notifier = { .notifier_call = msr_class_cpu_callback, }; -static char *msr_nodename(struct device *dev) +static char *msr_devnode(struct device *dev, mode_t *mode) { return kasprintf(GFP_KERNEL, "cpu/%u/msr", MINOR(dev->devt)); } @@ -262,7 +262,7 @@ static int __init msr_init(void) err = PTR_ERR(msr_class); goto out_chrdev; } - msr_class->nodename = msr_nodename; + msr_class->devnode = msr_devnode; for_each_online_cpu(i) { err = msr_device_create(i); if (err != 0) diff --git a/block/bsg.c b/block/bsg.c index 5f184bb3ff9..0676301f16d 100644 --- a/block/bsg.c +++ b/block/bsg.c @@ -1062,7 +1062,7 @@ EXPORT_SYMBOL_GPL(bsg_register_queue); static struct cdev bsg_cdev; -static char *bsg_nodename(struct device *dev) +static char *bsg_devnode(struct device *dev, mode_t *mode) { return kasprintf(GFP_KERNEL, "bsg/%s", dev_name(dev)); } @@ -1087,7 +1087,7 @@ static int __init bsg_init(void) ret = PTR_ERR(bsg_class); goto destroy_kmemcache; } - bsg_class->nodename = bsg_nodename; + bsg_class->devnode = bsg_devnode; ret = alloc_chrdev_region(&devid, 0, BSG_MAX_DEVS, "bsg"); if (ret) diff --git a/block/genhd.c b/block/genhd.c index 2ad91ddad8e..517e4332cb3 100644 --- a/block/genhd.c +++ b/block/genhd.c @@ -998,12 +998,12 @@ struct class block_class = { .name = "block", }; -static char *block_nodename(struct device *dev) +static char *block_devnode(struct device *dev, mode_t *mode) { struct gendisk *disk = dev_to_disk(dev); - if (disk->nodename) - return disk->nodename(disk); + if (disk->devnode) + return disk->devnode(disk, mode); return NULL; } @@ -1011,7 +1011,7 @@ static struct device_type disk_type = { .name = "disk", .groups = disk_attr_groups, .release = disk_release, - .nodename = block_nodename, + .devnode = block_devnode, }; #ifdef CONFIG_PROC_FS diff --git a/drivers/base/core.c b/drivers/base/core.c index 390e664ec1c..6bee6af8d8e 100644 --- a/drivers/base/core.c +++ b/drivers/base/core.c @@ -166,13 +166,16 @@ static int dev_uevent(struct kset *kset, struct kobject *kobj, if (MAJOR(dev->devt)) { const char *tmp; const char *name; + mode_t mode = 0; add_uevent_var(env, "MAJOR=%u", MAJOR(dev->devt)); add_uevent_var(env, "MINOR=%u", MINOR(dev->devt)); - name = device_get_nodename(dev, &tmp); + name = device_get_devnode(dev, &mode, &tmp); if (name) { add_uevent_var(env, "DEVNAME=%s", name); kfree(tmp); + if (mode) + add_uevent_var(env, "DEVMODE=%#o", mode & 0777); } } @@ -1148,8 +1151,9 @@ static struct device *next_device(struct klist_iter *i) } /** - * device_get_nodename - path of device node file + * device_get_devnode - path of device node file * @dev: device + * @mode: returned file access mode * @tmp: possibly allocated string * * Return the relative path of a possible device node. @@ -1157,21 +1161,22 @@ static struct device *next_device(struct klist_iter *i) * a name. This memory is returned in tmp and needs to be * freed by the caller. */ -const char *device_get_nodename(struct device *dev, const char **tmp) +const char *device_get_devnode(struct device *dev, + mode_t *mode, const char **tmp) { char *s; *tmp = NULL; /* the device type may provide a specific name */ - if (dev->type && dev->type->nodename) - *tmp = dev->type->nodename(dev); + if (dev->type && dev->type->devnode) + *tmp = dev->type->devnode(dev, mode); if (*tmp) return *tmp; /* the class may provide a specific name */ - if (dev->class && dev->class->nodename) - *tmp = dev->class->nodename(dev); + if (dev->class && dev->class->devnode) + *tmp = dev->class->devnode(dev, mode); if (*tmp) return *tmp; diff --git a/drivers/base/devtmpfs.c b/drivers/base/devtmpfs.c index fd488ad4263..a1cb5afe680 100644 --- a/drivers/base/devtmpfs.c +++ b/drivers/base/devtmpfs.c @@ -6,9 +6,10 @@ * During bootup, before any driver core device is registered, * devtmpfs, a tmpfs-based filesystem is created. Every driver-core * device which requests a device node, will add a node in this - * filesystem. The node is named after the the name of the device, - * or the susbsytem can provide a custom name. All devices are - * owned by root and have a mode of 0600. + * filesystem. + * By default, all devices are named after the the name of the + * device, owned by root and have a default mode of 0600. Subsystems + * can overwrite the default setting if needed. */ #include @@ -20,6 +21,7 @@ #include #include #include +#include #include static struct vfsmount *dev_mnt; @@ -134,7 +136,7 @@ int devtmpfs_create_node(struct device *dev) const char *tmp = NULL; const char *nodename; const struct cred *curr_cred; - mode_t mode; + mode_t mode = 0; struct nameidata nd; struct dentry *dentry; int err; @@ -142,14 +144,16 @@ int devtmpfs_create_node(struct device *dev) if (!dev_mnt) return 0; - nodename = device_get_nodename(dev, &tmp); + nodename = device_get_devnode(dev, &mode, &tmp); if (!nodename) return -ENOMEM; + if (mode == 0) + mode = 0600; if (is_blockdev(dev)) - mode = S_IFBLK|0600; + mode |= S_IFBLK; else - mode = S_IFCHR|0600; + mode |= S_IFCHR; curr_cred = override_creds(&init_cred); err = vfs_path_lookup(dev_mnt->mnt_root, dev_mnt, @@ -165,8 +169,12 @@ int devtmpfs_create_node(struct device *dev) dentry = lookup_create(&nd, 0); if (!IS_ERR(dentry)) { + int umask; + + umask = sys_umask(0000); err = vfs_mknod(nd.path.dentry->d_inode, dentry, mode, dev->devt); + sys_umask(umask); /* mark as kernel created inode */ if (!err) dentry->d_inode->i_private = &dev_mnt; @@ -271,7 +279,7 @@ int devtmpfs_delete_node(struct device *dev) if (!dev_mnt) return 0; - nodename = device_get_nodename(dev, &tmp); + nodename = device_get_devnode(dev, NULL, &tmp); if (!nodename) return -ENOMEM; diff --git a/drivers/block/aoe/aoechr.c b/drivers/block/aoe/aoechr.c index 19888354188..62141ec09a2 100644 --- a/drivers/block/aoe/aoechr.c +++ b/drivers/block/aoe/aoechr.c @@ -266,7 +266,7 @@ static const struct file_operations aoe_fops = { .owner = THIS_MODULE, }; -static char *aoe_nodename(struct device *dev) +static char *aoe_devnode(struct device *dev, mode_t *mode) { return kasprintf(GFP_KERNEL, "etherd/%s", dev_name(dev)); } @@ -288,7 +288,7 @@ aoechr_init(void) unregister_chrdev(AOE_MAJOR, "aoechr"); return PTR_ERR(aoe_class); } - aoe_class->nodename = aoe_nodename; + aoe_class->devnode = aoe_devnode; for (i = 0; i < ARRAY_SIZE(chardevs); ++i) device_create(aoe_class, NULL, diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c index 95f11cdef20..fd5bb8ad59a 100644 --- a/drivers/block/pktcdvd.c +++ b/drivers/block/pktcdvd.c @@ -2857,7 +2857,7 @@ static struct block_device_operations pktcdvd_ops = { .media_changed = pkt_media_changed, }; -static char *pktcdvd_nodename(struct gendisk *gd) +static char *pktcdvd_devnode(struct gendisk *gd, mode_t *mode) { return kasprintf(GFP_KERNEL, "pktcdvd/%s", gd->disk_name); } @@ -2914,7 +2914,7 @@ static int pkt_setup_dev(dev_t dev, dev_t* pkt_dev) disk->fops = &pktcdvd_ops; disk->flags = GENHD_FL_REMOVABLE; strcpy(disk->disk_name, pd->name); - disk->nodename = pktcdvd_nodename; + disk->devnode = pktcdvd_devnode; disk->private_data = pd; disk->queue = blk_alloc_queue(GFP_KERNEL); if (!disk->queue) @@ -3070,7 +3070,7 @@ static const struct file_operations pkt_ctl_fops = { static struct miscdevice pkt_misc = { .minor = MISC_DYNAMIC_MINOR, .name = DRIVER_NAME, - .name = "pktcdvd/control", + .nodename = "pktcdvd/control", .fops = &pkt_ctl_fops }; diff --git a/drivers/char/hw_random/core.c b/drivers/char/hw_random/core.c index fc93e2fc7c7..1573aebd54b 100644 --- a/drivers/char/hw_random/core.c +++ b/drivers/char/hw_random/core.c @@ -153,7 +153,7 @@ static const struct file_operations rng_chrdev_ops = { static struct miscdevice rng_miscdev = { .minor = RNG_MISCDEV_MINOR, .name = RNG_MODULE_NAME, - .devnode = "hwrng", + .nodename = "hwrng", .fops = &rng_chrdev_ops, }; diff --git a/drivers/char/mem.c b/drivers/char/mem.c index 0491cdf63f2..0aede1d6a9e 100644 --- a/drivers/char/mem.c +++ b/drivers/char/mem.c @@ -866,24 +866,25 @@ static const struct file_operations kmsg_fops = { static const struct memdev { const char *name; + mode_t mode; const struct file_operations *fops; struct backing_dev_info *dev_info; } devlist[] = { - [ 1] = { "mem", &mem_fops, &directly_mappable_cdev_bdi }, + [1] = { "mem", 0, &mem_fops, &directly_mappable_cdev_bdi }, #ifdef CONFIG_DEVKMEM - [ 2] = { "kmem", &kmem_fops, &directly_mappable_cdev_bdi }, + [2] = { "kmem", 0, &kmem_fops, &directly_mappable_cdev_bdi }, #endif - [ 3] = {"null", &null_fops, NULL }, + [3] = { "null", 0666, &null_fops, NULL }, #ifdef CONFIG_DEVPORT - [ 4] = { "port", &port_fops, NULL }, + [4] = { "port", 0, &port_fops, NULL }, #endif - [ 5] = { "zero", &zero_fops, &zero_bdi }, - [ 7] = { "full", &full_fops, NULL }, - [ 8] = { "random", &random_fops, NULL }, - [ 9] = { "urandom", &urandom_fops, NULL }, - [11] = { "kmsg", &kmsg_fops, NULL }, + [5] = { "zero", 0666, &zero_fops, &zero_bdi }, + [7] = { "full", 0666, &full_fops, NULL }, + [8] = { "random", 0666, &random_fops, NULL }, + [9] = { "urandom", 0666, &urandom_fops, NULL }, + [11] = { "kmsg", 0, &kmsg_fops, NULL }, #ifdef CONFIG_CRASH_DUMP - [12] = { "oldmem", &oldmem_fops, NULL }, + [12] = { "oldmem", 0, &oldmem_fops, NULL }, #endif }; @@ -920,6 +921,13 @@ static const struct file_operations memory_fops = { .open = memory_open, }; +static char *mem_devnode(struct device *dev, mode_t *mode) +{ + if (mode && devlist[MINOR(dev->devt)].mode) + *mode = devlist[MINOR(dev->devt)].mode; + return NULL; +} + static struct class *mem_class; static int __init chr_dev_init(void) @@ -935,6 +943,7 @@ static int __init chr_dev_init(void) printk("unable to get major %d for memory devs\n", MEM_MAJOR); mem_class = class_create(THIS_MODULE, "mem"); + mem_class->devnode = mem_devnode; for (minor = 1; minor < ARRAY_SIZE(devlist); minor++) { if (!devlist[minor].name) continue; diff --git a/drivers/char/misc.c b/drivers/char/misc.c index 62c99fa59e2..1ee27cc2342 100644 --- a/drivers/char/misc.c +++ b/drivers/char/misc.c @@ -263,12 +263,14 @@ int misc_deregister(struct miscdevice *misc) EXPORT_SYMBOL(misc_register); EXPORT_SYMBOL(misc_deregister); -static char *misc_nodename(struct device *dev) +static char *misc_devnode(struct device *dev, mode_t *mode) { struct miscdevice *c = dev_get_drvdata(dev); - if (c->devnode) - return kstrdup(c->devnode, GFP_KERNEL); + if (mode && c->mode) + *mode = c->mode; + if (c->nodename) + return kstrdup(c->nodename, GFP_KERNEL); return NULL; } @@ -287,7 +289,7 @@ static int __init misc_init(void) err = -EIO; if (register_chrdev(MISC_MAJOR,"misc",&misc_fops)) goto fail_printk; - misc_class->nodename = misc_nodename; + misc_class->devnode = misc_devnode; return 0; fail_printk: diff --git a/drivers/char/raw.c b/drivers/char/raw.c index 40268db02e2..64acd05f71c 100644 --- a/drivers/char/raw.c +++ b/drivers/char/raw.c @@ -261,7 +261,7 @@ static const struct file_operations raw_ctl_fops = { static struct cdev raw_cdev; -static char *raw_nodename(struct device *dev) +static char *raw_devnode(struct device *dev, mode_t *mode) { return kasprintf(GFP_KERNEL, "raw/%s", dev_name(dev)); } @@ -289,7 +289,7 @@ static int __init raw_init(void) ret = PTR_ERR(raw_class); goto error_region; } - raw_class->nodename = raw_nodename; + raw_class->devnode = raw_devnode; device_create(raw_class, NULL, MKDEV(RAW_MAJOR, 0), NULL, "rawctl"); return 0; diff --git a/drivers/char/tty_io.c b/drivers/char/tty_io.c index a3afa0c387c..c70d9dabefa 100644 --- a/drivers/char/tty_io.c +++ b/drivers/char/tty_io.c @@ -3056,11 +3056,22 @@ void __init console_init(void) } } +static char *tty_devnode(struct device *dev, mode_t *mode) +{ + if (!mode) + return NULL; + if (dev->devt == MKDEV(TTYAUX_MAJOR, 0) || + dev->devt == MKDEV(TTYAUX_MAJOR, 2)) + *mode = 0666; + return NULL; +} + static int __init tty_class_init(void) { tty_class = class_create(THIS_MODULE, "tty"); if (IS_ERR(tty_class)) return PTR_ERR(tty_class); + tty_class->devnode = tty_devnode; return 0; } diff --git a/drivers/gpu/drm/drm_sysfs.c b/drivers/gpu/drm/drm_sysfs.c index f7a615b80c7..5301f226cb1 100644 --- a/drivers/gpu/drm/drm_sysfs.c +++ b/drivers/gpu/drm/drm_sysfs.c @@ -76,7 +76,7 @@ static ssize_t version_show(struct class *dev, char *buf) CORE_MINOR, CORE_PATCHLEVEL, CORE_DATE); } -static char *drm_nodename(struct device *dev) +static char *drm_devnode(struct device *dev, mode_t *mode) { return kasprintf(GFP_KERNEL, "dri/%s", dev_name(dev)); } @@ -112,7 +112,7 @@ struct class *drm_sysfs_create(struct module *owner, char *name) if (err) goto err_out_class; - class->nodename = drm_nodename; + class->devnode = drm_devnode; return class; diff --git a/drivers/hid/usbhid/hiddev.c b/drivers/hid/usbhid/hiddev.c index 4d1dc0cf140..8b6ee247bfe 100644 --- a/drivers/hid/usbhid/hiddev.c +++ b/drivers/hid/usbhid/hiddev.c @@ -852,14 +852,14 @@ static const struct file_operations hiddev_fops = { #endif }; -static char *hiddev_nodename(struct device *dev) +static char *hiddev_devnode(struct device *dev, mode_t *mode) { return kasprintf(GFP_KERNEL, "usb/%s", dev_name(dev)); } static struct usb_class_driver hiddev_class = { .name = "hiddev%d", - .nodename = hiddev_nodename, + .devnode = hiddev_devnode, .fops = &hiddev_fops, .minor_base = HIDDEV_MINOR_BASE, }; diff --git a/drivers/input/input.c b/drivers/input/input.c index 851791d955f..556539d617a 100644 --- a/drivers/input/input.c +++ b/drivers/input/input.c @@ -1265,14 +1265,14 @@ static struct device_type input_dev_type = { .uevent = input_dev_uevent, }; -static char *input_nodename(struct device *dev) +static char *input_devnode(struct device *dev, mode_t *mode) { return kasprintf(GFP_KERNEL, "input/%s", dev_name(dev)); } struct class input_class = { .name = "input", - .nodename = input_nodename, + .devnode = input_devnode, }; EXPORT_SYMBOL_GPL(input_class); diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c index 7f77f18fcaf..a6794293158 100644 --- a/drivers/md/dm-ioctl.c +++ b/drivers/md/dm-ioctl.c @@ -1532,7 +1532,7 @@ static const struct file_operations _ctl_fops = { static struct miscdevice _dm_misc = { .minor = MISC_DYNAMIC_MINOR, .name = DM_NAME, - .devnode = "mapper/control", + .nodename = "mapper/control", .fops = &_ctl_fops }; diff --git a/drivers/media/dvb/dvb-core/dvbdev.c b/drivers/media/dvb/dvb-core/dvbdev.c index 479dd05762a..94159b90f73 100644 --- a/drivers/media/dvb/dvb-core/dvbdev.c +++ b/drivers/media/dvb/dvb-core/dvbdev.c @@ -447,7 +447,7 @@ static int dvb_uevent(struct device *dev, struct kobj_uevent_env *env) return 0; } -static char *dvb_nodename(struct device *dev) +static char *dvb_devnode(struct device *dev, mode_t *mode) { struct dvb_device *dvbdev = dev_get_drvdata(dev); @@ -478,7 +478,7 @@ static int __init init_dvbdev(void) goto error; } dvb_class->dev_uevent = dvb_uevent; - dvb_class->nodename = dvb_nodename; + dvb_class->devnode = dvb_devnode; return 0; error: diff --git a/drivers/net/tun.c b/drivers/net/tun.c index 3f5d28851aa..d3ee1994b02 100644 --- a/drivers/net/tun.c +++ b/drivers/net/tun.c @@ -1370,7 +1370,7 @@ static const struct file_operations tun_fops = { static struct miscdevice tun_miscdev = { .minor = TUN_MINOR, .name = "tun", - .devnode = "net/tun", + .nodename = "net/tun", .fops = &tun_fops, }; diff --git a/drivers/usb/class/usblp.c b/drivers/usb/class/usblp.c index 26c09f0257d..9bc112ee780 100644 --- a/drivers/usb/class/usblp.c +++ b/drivers/usb/class/usblp.c @@ -1057,14 +1057,14 @@ static const struct file_operations usblp_fops = { .release = usblp_release, }; -static char *usblp_nodename(struct device *dev) +static char *usblp_devnode(struct device *dev, mode_t *mode) { return kasprintf(GFP_KERNEL, "usb/%s", dev_name(dev)); } static struct usb_class_driver usblp_class = { .name = "lp%d", - .nodename = usblp_nodename, + .devnode = usblp_devnode, .fops = &usblp_fops, .minor_base = USBLP_MINOR_BASE, }; diff --git a/drivers/usb/core/file.c b/drivers/usb/core/file.c index 5cef88929b3..222ee07ea68 100644 --- a/drivers/usb/core/file.c +++ b/drivers/usb/core/file.c @@ -67,14 +67,14 @@ static struct usb_class { struct class *class; } *usb_class; -static char *usb_nodename(struct device *dev) +static char *usb_devnode(struct device *dev, mode_t *mode) { struct usb_class_driver *drv; drv = dev_get_drvdata(dev); - if (!drv || !drv->nodename) + if (!drv || !drv->devnode) return NULL; - return drv->nodename(dev); + return drv->devnode(dev, mode); } static int init_usb_class(void) @@ -100,7 +100,7 @@ static int init_usb_class(void) kfree(usb_class); usb_class = NULL; } - usb_class->class->nodename = usb_nodename; + usb_class->class->devnode = usb_devnode; exit: return result; diff --git a/drivers/usb/core/usb.c b/drivers/usb/core/usb.c index a26f73880c3..43ee943d757 100644 --- a/drivers/usb/core/usb.c +++ b/drivers/usb/core/usb.c @@ -311,7 +311,7 @@ static struct dev_pm_ops usb_device_pm_ops = { #endif /* CONFIG_PM */ -static char *usb_nodename(struct device *dev) +static char *usb_devnode(struct device *dev, mode_t *mode) { struct usb_device *usb_dev; @@ -324,7 +324,7 @@ struct device_type usb_device_type = { .name = "usb_device", .release = usb_release_dev, .uevent = usb_dev_uevent, - .nodename = usb_nodename, + .devnode = usb_devnode, .pm = &usb_device_pm_ops, }; diff --git a/drivers/usb/misc/iowarrior.c b/drivers/usb/misc/iowarrior.c index 90e1a8dedfa..e75bb87ee92 100644 --- a/drivers/usb/misc/iowarrior.c +++ b/drivers/usb/misc/iowarrior.c @@ -727,7 +727,7 @@ static const struct file_operations iowarrior_fops = { .poll = iowarrior_poll, }; -static char *iowarrior_nodename(struct device *dev) +static char *iowarrior_devnode(struct device *dev, mode_t *mode) { return kasprintf(GFP_KERNEL, "usb/%s", dev_name(dev)); } @@ -738,7 +738,7 @@ static char *iowarrior_nodename(struct device *dev) */ static struct usb_class_driver iowarrior_class = { .name = "iowarrior%d", - .nodename = iowarrior_nodename, + .devnode = iowarrior_devnode, .fops = &iowarrior_fops, .minor_base = IOWARRIOR_MINOR_BASE, }; diff --git a/drivers/usb/misc/legousbtower.c b/drivers/usb/misc/legousbtower.c index c1e2433f640..97efeaec4d5 100644 --- a/drivers/usb/misc/legousbtower.c +++ b/drivers/usb/misc/legousbtower.c @@ -266,7 +266,7 @@ static const struct file_operations tower_fops = { .llseek = tower_llseek, }; -static char *legousbtower_nodename(struct device *dev) +static char *legousbtower_devnode(struct device *dev, mode_t *mode) { return kasprintf(GFP_KERNEL, "usb/%s", dev_name(dev)); } @@ -277,7 +277,7 @@ static char *legousbtower_nodename(struct device *dev) */ static struct usb_class_driver tower_class = { .name = "legousbtower%d", - .nodename = legousbtower_nodename, + .devnode = legousbtower_devnode, .fops = &tower_fops, .minor_base = LEGO_USB_TOWER_MINOR_BASE, }; diff --git a/include/linux/device.h b/include/linux/device.h index 847b763e40e..aca31bf7d8e 100644 --- a/include/linux/device.h +++ b/include/linux/device.h @@ -193,7 +193,7 @@ struct class { struct kobject *dev_kobj; int (*dev_uevent)(struct device *dev, struct kobj_uevent_env *env); - char *(*nodename)(struct device *dev); + char *(*devnode)(struct device *dev, mode_t *mode); void (*class_release)(struct class *class); void (*dev_release)(struct device *dev); @@ -298,7 +298,7 @@ struct device_type { const char *name; const struct attribute_group **groups; int (*uevent)(struct device *dev, struct kobj_uevent_env *env); - char *(*nodename)(struct device *dev); + char *(*devnode)(struct device *dev, mode_t *mode); void (*release)(struct device *dev); const struct dev_pm_ops *pm; @@ -487,7 +487,8 @@ extern struct device *device_find_child(struct device *dev, void *data, extern int device_rename(struct device *dev, char *new_name); extern int device_move(struct device *dev, struct device *new_parent, enum dpm_order dpm_order); -extern const char *device_get_nodename(struct device *dev, const char **tmp); +extern const char *device_get_devnode(struct device *dev, + mode_t *mode, const char **tmp); extern void *dev_get_drvdata(const struct device *dev); extern void dev_set_drvdata(struct device *dev, void *data); diff --git a/include/linux/genhd.h b/include/linux/genhd.h index 44263cb2712..109d179adb9 100644 --- a/include/linux/genhd.h +++ b/include/linux/genhd.h @@ -142,7 +142,7 @@ struct gendisk { * disks that can't be partitioned. */ char disk_name[DISK_NAME_LEN]; /* name of major driver */ - char *(*nodename)(struct gendisk *gd); + char *(*devnode)(struct gendisk *gd, mode_t *mode); /* Array of pointers to partitions indexed by partno. * Protected with matching bdev lock but stat and other * non-critical accesses use RCU. Always access through diff --git a/include/linux/miscdevice.h b/include/linux/miscdevice.h index 05211774462..adaf3c15e44 100644 --- a/include/linux/miscdevice.h +++ b/include/linux/miscdevice.h @@ -41,7 +41,8 @@ struct miscdevice { struct list_head list; struct device *parent; struct device *this_device; - const char *devnode; + const char *nodename; + mode_t mode; }; extern int misc_register(struct miscdevice * misc); diff --git a/include/linux/usb.h b/include/linux/usb.h index b1e3c2fbfe1..a8fe05f224e 100644 --- a/include/linux/usb.h +++ b/include/linux/usb.h @@ -922,7 +922,7 @@ extern struct bus_type usb_bus_type; /** * struct usb_class_driver - identifies a USB driver that wants to use the USB major number * @name: the usb class device name for this driver. Will show up in sysfs. - * @nodename: Callback to provide a naming hint for a possible + * @devnode: Callback to provide a naming hint for a possible * device node to create. * @fops: pointer to the struct file_operations of this driver. * @minor_base: the start of the minor range for this driver. @@ -933,7 +933,7 @@ extern struct bus_type usb_bus_type; */ struct usb_class_driver { char *name; - char *(*nodename)(struct device *dev); + char *(*devnode)(struct device *dev, mode_t *mode); const struct file_operations *fops; int minor_base; }; diff --git a/sound/sound_core.c b/sound/sound_core.c index bb4b88e606b..49c99818659 100644 --- a/sound/sound_core.c +++ b/sound/sound_core.c @@ -29,7 +29,7 @@ MODULE_DESCRIPTION("Core sound module"); MODULE_AUTHOR("Alan Cox"); MODULE_LICENSE("GPL"); -static char *sound_nodename(struct device *dev) +static char *sound_devnode(struct device *dev, mode_t *mode) { if (MAJOR(dev->devt) == SOUND_MAJOR) return NULL; @@ -50,7 +50,7 @@ static int __init init_soundcore(void) return PTR_ERR(sound_class); } - sound_class->nodename = sound_nodename; + sound_class->devnode = sound_devnode; return 0; } -- cgit v1.2.3-70-g09d2 From f0eefdc30e55e761facf645bd1be1339b21c30e6 Mon Sep 17 00:00:00 2001 From: Jiri Slaby Date: Sat, 19 Sep 2009 13:13:13 -0700 Subject: cyclades: avoid addresses recomputation Don't fetch firmware address and recompute channel control on each port access. Precompute the values on init and use them later all the time. The same for board control. This simplify code and improves readability. Signed-off-by: Jiri Slaby Signed-off-by: Alan Cox Signed-off-by: Greg Kroah-Hartman --- drivers/char/cyclades.c | 241 ++++++++++++++--------------------------------- include/linux/cyclades.h | 10 ++ 2 files changed, 81 insertions(+), 170 deletions(-) (limited to 'include') diff --git a/drivers/char/cyclades.c b/drivers/char/cyclades.c index 9e0cd7020ae..7a7092ae5d3 100644 --- a/drivers/char/cyclades.c +++ b/drivers/char/cyclades.c @@ -814,7 +814,7 @@ static char rflow_thr[] = { /* rflow threshold */ /* The Cyclom-Ye has placed the sequential chips in non-sequential * address order. This look-up table overcomes that problem. */ -static int cy_chip_offset[] = { 0x0000, +static const unsigned int cy_chip_offset[] = { 0x0000, 0x0400, 0x0800, 0x0C00, @@ -1406,15 +1406,9 @@ static int cyz_fetch_msg(struct cyclades_card *cinfo, __u32 *channel, __u8 *cmd, __u32 *param) { - struct FIRM_ID __iomem *firm_id; - struct ZFW_CTRL __iomem *zfw_ctrl; - struct BOARD_CTRL __iomem *board_ctrl; + struct BOARD_CTRL __iomem *board_ctrl = cinfo->board_ctrl; unsigned long loc_doorbell; - firm_id = cinfo->base_addr + ID_ADDRESS; - zfw_ctrl = cinfo->base_addr + (readl(&firm_id->zfwctrl_addr) & 0xfffff); - board_ctrl = &zfw_ctrl->board_ctrl; - loc_doorbell = readl(&cinfo->ctl_addr.p9060->loc_doorbell); if (loc_doorbell) { *cmd = (char)(0xff & loc_doorbell); @@ -1430,19 +1424,13 @@ static int cyz_issue_cmd(struct cyclades_card *cinfo, __u32 channel, __u8 cmd, __u32 param) { - struct FIRM_ID __iomem *firm_id; - struct ZFW_CTRL __iomem *zfw_ctrl; - struct BOARD_CTRL __iomem *board_ctrl; + struct BOARD_CTRL __iomem *board_ctrl = cinfo->board_ctrl; __u32 __iomem *pci_doorbell; unsigned int index; - firm_id = cinfo->base_addr + ID_ADDRESS; if (!cyz_is_loaded(cinfo)) return -1; - zfw_ctrl = cinfo->base_addr + (readl(&firm_id->zfwctrl_addr) & 0xfffff); - board_ctrl = &zfw_ctrl->board_ctrl; - index = 0; pci_doorbell = &cinfo->ctl_addr.p9060->pci_doorbell; while ((readl(pci_doorbell) & 0xff) != 0) { @@ -1457,9 +1445,9 @@ cyz_issue_cmd(struct cyclades_card *cinfo, return 0; } /* cyz_issue_cmd */ -static void cyz_handle_rx(struct cyclades_port *info, struct tty_struct *tty, - struct BUF_CTRL __iomem *buf_ctrl) +static void cyz_handle_rx(struct cyclades_port *info, struct tty_struct *tty) { + struct BUF_CTRL __iomem *buf_ctrl = info->u.cyz.buf_ctrl; struct cyclades_card *cinfo = info->card; unsigned int char_count; int len; @@ -1549,9 +1537,9 @@ static void cyz_handle_rx(struct cyclades_port *info, struct tty_struct *tty, } } -static void cyz_handle_tx(struct cyclades_port *info, struct tty_struct *tty, - struct BUF_CTRL __iomem *buf_ctrl) +static void cyz_handle_tx(struct cyclades_port *info, struct tty_struct *tty) { + struct BUF_CTRL __iomem *buf_ctrl = info->u.cyz.buf_ctrl; struct cyclades_card *cinfo = info->card; u8 data; unsigned int char_count; @@ -1627,21 +1615,14 @@ ztxdone: static void cyz_handle_cmd(struct cyclades_card *cinfo) { + struct BOARD_CTRL __iomem *board_ctrl = cinfo->board_ctrl; struct tty_struct *tty; struct cyclades_port *info; - static struct FIRM_ID __iomem *firm_id; - static struct ZFW_CTRL __iomem *zfw_ctrl; - static struct BOARD_CTRL __iomem *board_ctrl; - static struct CH_CTRL __iomem *ch_ctrl; - static struct BUF_CTRL __iomem *buf_ctrl; __u32 channel, param, fw_ver; __u8 cmd; int special_count; int delta_count; - firm_id = cinfo->base_addr + ID_ADDRESS; - zfw_ctrl = cinfo->base_addr + (readl(&firm_id->zfwctrl_addr) & 0xfffff); - board_ctrl = &zfw_ctrl->board_ctrl; fw_ver = readl(&board_ctrl->fw_version); while (cyz_fetch_msg(cinfo, &channel, &cmd, ¶m) == 1) { @@ -1652,9 +1633,6 @@ static void cyz_handle_cmd(struct cyclades_card *cinfo) if (tty == NULL) continue; - ch_ctrl = &(zfw_ctrl->ch_ctrl[channel]); - buf_ctrl = &(zfw_ctrl->buf_ctrl[channel]); - switch (cmd) { case C_CM_PR_ERROR: tty_insert_flip_char(tty, 0, TTY_PARITY); @@ -1675,9 +1653,9 @@ static void cyz_handle_cmd(struct cyclades_card *cinfo) info->icount.dcd++; delta_count++; if (info->port.flags & ASYNC_CHECK_CD) { - if ((fw_ver > 241 ? ((u_long) param) : - readl(&ch_ctrl->rs_status)) & - C_RS_DCD) { + u32 dcd = fw_ver > 241 ? param : + readl(&info->u.cyz.ch_ctrl->rs_status); + if (dcd & C_RS_DCD) { wake_up_interruptible(&info->port.open_wait); } else { tty_hangup(tty); @@ -1712,7 +1690,7 @@ static void cyz_handle_cmd(struct cyclades_card *cinfo) printk(KERN_DEBUG "cyz_interrupt: rcvd intr, card %d, " "port %ld\n", info->card, channel); #endif - cyz_handle_rx(info, tty, buf_ctrl); + cyz_handle_rx(info, tty); break; case C_CM_TXBEMPTY: case C_CM_TXLOWWM: @@ -1722,7 +1700,7 @@ static void cyz_handle_cmd(struct cyclades_card *cinfo) printk(KERN_DEBUG "cyz_interrupt: xmit intr, card %d, " "port %ld\n", info->card, channel); #endif - cyz_handle_tx(info, tty, buf_ctrl); + cyz_handle_tx(info, tty); break; #endif /* CONFIG_CYZ_INTR */ case C_CM_FATAL: @@ -1781,9 +1759,6 @@ static void cyz_poll(unsigned long arg) { struct cyclades_card *cinfo; struct cyclades_port *info; - struct FIRM_ID __iomem *firm_id; - struct ZFW_CTRL __iomem *zfw_ctrl; - struct BUF_CTRL __iomem *buf_ctrl; unsigned long expires = jiffies + HZ; unsigned int port, card; @@ -1795,10 +1770,6 @@ static void cyz_poll(unsigned long arg) if (!cyz_is_loaded(cinfo)) continue; - firm_id = cinfo->base_addr + ID_ADDRESS; - zfw_ctrl = cinfo->base_addr + - (readl(&firm_id->zfwctrl_addr) & 0xfffff); - /* Skip first polling cycle to avoid racing conditions with the FW */ if (!cinfo->intr_enabled) { cinfo->intr_enabled = 1; @@ -1811,15 +1782,13 @@ static void cyz_poll(unsigned long arg) struct tty_struct *tty; info = &cinfo->ports[port]; - buf_ctrl = &(zfw_ctrl->buf_ctrl[port]); - tty = tty_port_tty_get(&info->port); /* OK to pass NULL to the handle functions below. They need to drop the data in that case. */ if (!info->throttle) - cyz_handle_rx(info, tty, buf_ctrl); - cyz_handle_tx(info, tty, buf_ctrl); + cyz_handle_rx(info, tty); + cyz_handle_tx(info, tty); tty_kref_put(tty); } /* poll every 'cyz_polling_cycle' period */ @@ -1922,45 +1891,34 @@ static int cy_startup(struct cyclades_port *info, struct tty_struct *tty) spin_unlock_irqrestore(&card->card_lock, flags); } else { - struct FIRM_ID __iomem *firm_id; - struct ZFW_CTRL __iomem *zfw_ctrl; - struct BOARD_CTRL __iomem *board_ctrl; - struct CH_CTRL __iomem *ch_ctrl; - - base_addr = card->base_addr; + struct CH_CTRL __iomem *ch_ctrl = info->u.cyz.ch_ctrl; - firm_id = base_addr + ID_ADDRESS; if (!cyz_is_loaded(card)) return -ENODEV; - zfw_ctrl = card->base_addr + - (readl(&firm_id->zfwctrl_addr) & 0xfffff); - board_ctrl = &zfw_ctrl->board_ctrl; - ch_ctrl = zfw_ctrl->ch_ctrl; - #ifdef CY_DEBUG_OPEN printk(KERN_DEBUG "cyc startup Z card %d, channel %d, " - "base_addr %p\n", card, channel, base_addr); + "base_addr %p\n", card, channel, card->base_addr); #endif spin_lock_irqsave(&card->card_lock, flags); - cy_writel(&ch_ctrl[channel].op_mode, C_CH_ENABLE); + cy_writel(&ch_ctrl->op_mode, C_CH_ENABLE); #ifdef Z_WAKE #ifdef CONFIG_CYZ_INTR - cy_writel(&ch_ctrl[channel].intr_enable, + cy_writel(&ch_ctrl->intr_enable, C_IN_TXBEMPTY | C_IN_TXLOWWM | C_IN_RXHIWM | C_IN_RXNNDT | C_IN_IOCTLW | C_IN_MDCD); #else - cy_writel(&ch_ctrl[channel].intr_enable, + cy_writel(&ch_ctrl->intr_enable, C_IN_IOCTLW | C_IN_MDCD); #endif /* CONFIG_CYZ_INTR */ #else #ifdef CONFIG_CYZ_INTR - cy_writel(&ch_ctrl[channel].intr_enable, + cy_writel(&ch_ctrl->intr_enable, C_IN_TXBEMPTY | C_IN_TXLOWWM | C_IN_RXHIWM | C_IN_RXNNDT | C_IN_MDCD); #else - cy_writel(&ch_ctrl[channel].intr_enable, C_IN_MDCD); + cy_writel(&ch_ctrl->intr_enable, C_IN_MDCD); #endif /* CONFIG_CYZ_INTR */ #endif /* Z_WAKE */ @@ -1979,9 +1937,8 @@ static int cy_startup(struct cyclades_port *info, struct tty_struct *tty) /* set timeout !!! */ /* set RTS and DTR !!! */ - cy_writel(&ch_ctrl[channel].rs_control, - readl(&ch_ctrl[channel].rs_control) | C_RS_RTS | - C_RS_DTR); + cy_writel(&ch_ctrl->rs_control, readl(&ch_ctrl->rs_control) | + C_RS_RTS | C_RS_DTR); retval = cyz_issue_cmd(card, channel, C_CM_IOCTLM, 0L); if (retval != 0) { printk(KERN_ERR "cyc:startup(3) retval on ttyC%d was " @@ -2110,27 +2067,17 @@ static void cy_shutdown(struct cyclades_port *info, struct tty_struct *tty) info->port.flags &= ~ASYNC_INITIALIZED; spin_unlock_irqrestore(&card->card_lock, flags); } else { - struct FIRM_ID __iomem *firm_id; - struct ZFW_CTRL __iomem *zfw_ctrl; - struct BOARD_CTRL __iomem *board_ctrl; - struct CH_CTRL __iomem *ch_ctrl; + struct CH_CTRL __iomem *ch_ctrl = info->u.cyz.ch_ctrl; int retval; - base_addr = card->base_addr; #ifdef CY_DEBUG_OPEN printk(KERN_DEBUG "cyc shutdown Z card %d, channel %d, " - "base_addr %p\n", card, channel, base_addr); + "base_addr %p\n", card, channel, card->base_addr); #endif - firm_id = base_addr + ID_ADDRESS; if (!cyz_is_loaded(card)) return; - zfw_ctrl = card->base_addr + - (readl(&firm_id->zfwctrl_addr) & 0xfffff); - board_ctrl = &zfw_ctrl->board_ctrl; - ch_ctrl = zfw_ctrl->ch_ctrl; - spin_lock_irqsave(&card->card_lock, flags); if (info->port.xmit_buf) { @@ -2141,9 +2088,9 @@ static void cy_shutdown(struct cyclades_port *info, struct tty_struct *tty) } if (tty->termios->c_cflag & HUPCL) { - cy_writel(&ch_ctrl[channel].rs_control, - (__u32)(readl(&ch_ctrl[channel].rs_control) & - ~(C_RS_RTS | C_RS_DTR))); + cy_writel(&ch_ctrl->rs_control, + readl(&ch_ctrl->rs_control) & + ~(C_RS_RTS | C_RS_DTR)); retval = cyz_issue_cmd(info->card, channel, C_CM_IOCTLM, 0L); if (retval != 0) { @@ -2497,15 +2444,11 @@ static void cy_close(struct tty_struct *tty, struct file *filp) #ifdef Z_WAKE /* Waiting for on-board buffers to be empty before closing the port */ - void __iomem *base_addr = card->base_addr; - struct FIRM_ID __iomem *firm_id = base_addr + ID_ADDRESS; - struct ZFW_CTRL __iomem *zfw_ctrl = - base_addr + (readl(&firm_id->zfwctrl_addr) & 0xfffff); - struct CH_CTRL __iomem *ch_ctrl = zfw_ctrl->ch_ctrl; + struct CH_CTRL __iomem *ch_ctrl = info->u.cyz.ch_ctrl; int channel = info->line - card->first_line; int retval; - if (readl(&ch_ctrl[channel].flow_status) != C_FS_TXIDLE) { + if (readl(&ch_ctrl->flow_status) != C_FS_TXIDLE) { retval = cyz_issue_cmd(card, channel, C_CM_IOCTLW, 0L); if (retval != 0) { printk(KERN_DEBUG "cyc:cy_close retval on " @@ -2685,18 +2628,13 @@ static int cy_write_room(struct tty_struct *tty) static int cy_chars_in_buffer(struct tty_struct *tty) { - struct cyclades_card *card; struct cyclades_port *info = tty->driver_data; - int channel; if (serial_paranoia_check(info, tty->name, "cy_chars_in_buffer")) return 0; - card = info->card; - channel = (info->line) - (card->first_line); - #ifdef Z_EXT_CHARS_IN_BUFFER - if (!cy_is_Z(card)) { + if (!cy_is_Z(info->card)) { #endif /* Z_EXT_CHARS_IN_BUFFER */ #ifdef CY_DEBUG_IO printk(KERN_DEBUG "cyc:cy_chars_in_buffer ttyC%d %d\n", @@ -2705,20 +2643,11 @@ static int cy_chars_in_buffer(struct tty_struct *tty) return info->xmit_cnt; #ifdef Z_EXT_CHARS_IN_BUFFER } else { - static struct FIRM_ID *firm_id; - static struct ZFW_CTRL *zfw_ctrl; - static struct CH_CTRL *ch_ctrl; - static struct BUF_CTRL *buf_ctrl; + struct BUF_CTRL __iomem *buf_ctrl = info->u.cyz.buf_ctrl; int char_count; __u32 tx_put, tx_get, tx_bufsize; lock_kernel(); - firm_id = card->base_addr + ID_ADDRESS; - zfw_ctrl = card->base_addr + - (readl(&firm_id->zfwctrl_addr) & 0xfffff); - ch_ctrl = &(zfw_ctrl->ch_ctrl[channel]); - buf_ctrl = &(zfw_ctrl->buf_ctrl[channel]); - tx_get = readl(&buf_ctrl->tx_get); tx_put = readl(&buf_ctrl->tx_put); tx_bufsize = readl(&buf_ctrl->tx_bufsize); @@ -3019,20 +2948,13 @@ static void cy_set_line_char(struct cyclades_port *info, struct tty_struct *tty) spin_unlock_irqrestore(&card->card_lock, flags); } else { - struct FIRM_ID __iomem *firm_id; - struct ZFW_CTRL __iomem *zfw_ctrl; - struct CH_CTRL __iomem *ch_ctrl; + struct CH_CTRL __iomem *ch_ctrl = info->u.cyz.ch_ctrl; __u32 sw_flow; int retval; - firm_id = card->base_addr + ID_ADDRESS; if (!cyz_is_loaded(card)) return; - zfw_ctrl = card->base_addr + - (readl(&firm_id->zfwctrl_addr) & 0xfffff); - ch_ctrl = &(zfw_ctrl->ch_ctrl[channel]); - /* baud rate */ baud = tty_get_baud_rate(tty); if (baud == 38400 && (info->port.flags & ASYNC_SPD_MASK) == @@ -3268,10 +3190,6 @@ static int cy_tiocmget(struct tty_struct *tty, struct file *file) unsigned char status; unsigned long lstatus; unsigned int result; - struct FIRM_ID __iomem *firm_id; - struct ZFW_CTRL __iomem *zfw_ctrl; - struct BOARD_CTRL __iomem *board_ctrl; - struct CH_CTRL __iomem *ch_ctrl; if (serial_paranoia_check(info, tty->name, __func__)) return -ENODEV; @@ -3304,14 +3222,8 @@ static int cy_tiocmget(struct tty_struct *tty, struct file *file) ((status & CyDSR) ? TIOCM_DSR : 0) | ((status & CyCTS) ? TIOCM_CTS : 0); } else { - base_addr = card->base_addr; - firm_id = card->base_addr + ID_ADDRESS; if (cyz_is_loaded(card)) { - zfw_ctrl = card->base_addr + - (readl(&firm_id->zfwctrl_addr) & 0xfffff); - board_ctrl = &zfw_ctrl->board_ctrl; - ch_ctrl = zfw_ctrl->ch_ctrl; - lstatus = readl(&ch_ctrl[channel].rs_status); + lstatus = readl(&info->u.cyz.ch_ctrl->rs_status); result = ((lstatus & C_RS_RTS) ? TIOCM_RTS : 0) | ((lstatus & C_RS_DTR) ? TIOCM_DTR : 0) | ((lstatus & C_RS_DCD) ? TIOCM_CAR : 0) | @@ -3336,12 +3248,7 @@ cy_tiocmset(struct tty_struct *tty, struct file *file, struct cyclades_port *info = tty->driver_data; struct cyclades_card *card; int chip, channel, index; - void __iomem *base_addr; unsigned long flags; - struct FIRM_ID __iomem *firm_id; - struct ZFW_CTRL __iomem *zfw_ctrl; - struct BOARD_CTRL __iomem *board_ctrl; - struct CH_CTRL __iomem *ch_ctrl; int retval; if (serial_paranoia_check(info, tty->name, __func__)) @@ -3350,6 +3257,7 @@ cy_tiocmset(struct tty_struct *tty, struct file *file, card = info->card; channel = (info->line) - (card->first_line); if (!cy_is_Z(card)) { + void __iomem *base_addr; chip = channel >> 2; channel &= 0x03; index = card->bus_index; @@ -3421,34 +3329,26 @@ cy_tiocmset(struct tty_struct *tty, struct file *file, spin_unlock_irqrestore(&card->card_lock, flags); } } else { - base_addr = card->base_addr; - - firm_id = card->base_addr + ID_ADDRESS; if (cyz_is_loaded(card)) { - zfw_ctrl = card->base_addr + - (readl(&firm_id->zfwctrl_addr) & 0xfffff); - board_ctrl = &zfw_ctrl->board_ctrl; - ch_ctrl = zfw_ctrl->ch_ctrl; + struct CH_CTRL __iomem *ch_ctrl = info->u.cyz.ch_ctrl; if (set & TIOCM_RTS) { spin_lock_irqsave(&card->card_lock, flags); - cy_writel(&ch_ctrl[channel].rs_control, - readl(&ch_ctrl[channel].rs_control) | - C_RS_RTS); + cy_writel(&ch_ctrl->rs_control, + readl(&ch_ctrl->rs_control) | C_RS_RTS); spin_unlock_irqrestore(&card->card_lock, flags); } if (clear & TIOCM_RTS) { spin_lock_irqsave(&card->card_lock, flags); - cy_writel(&ch_ctrl[channel].rs_control, - readl(&ch_ctrl[channel].rs_control) & + cy_writel(&ch_ctrl->rs_control, + readl(&ch_ctrl->rs_control) & ~C_RS_RTS); spin_unlock_irqrestore(&card->card_lock, flags); } if (set & TIOCM_DTR) { spin_lock_irqsave(&card->card_lock, flags); - cy_writel(&ch_ctrl[channel].rs_control, - readl(&ch_ctrl[channel].rs_control) | - C_RS_DTR); + cy_writel(&ch_ctrl->rs_control, + readl(&ch_ctrl->rs_control) | C_RS_DTR); #ifdef CY_DEBUG_DTR printk(KERN_DEBUG "cyc:set_modem_info raising " "Z DTR\n"); @@ -3457,8 +3357,8 @@ cy_tiocmset(struct tty_struct *tty, struct file *file, } if (clear & TIOCM_DTR) { spin_lock_irqsave(&card->card_lock, flags); - cy_writel(&ch_ctrl[channel].rs_control, - readl(&ch_ctrl[channel].rs_control) & + cy_writel(&ch_ctrl->rs_control, + readl(&ch_ctrl->rs_control) & ~C_RS_DTR); #ifdef CY_DEBUG_DTR printk(KERN_DEBUG "cyc:set_modem_info clearing " @@ -4171,17 +4071,8 @@ static int cyz_carrier_raised(struct tty_port *port) { struct cyclades_port *info = container_of(port, struct cyclades_port, port); - struct cyclades_card *cinfo = info->card; - void __iomem *base = cinfo->base_addr; - struct FIRM_ID __iomem *firm_id = base + ID_ADDRESS; - struct ZFW_CTRL __iomem *zfw_ctrl; - struct CH_CTRL __iomem *ch_ctrl; - int channel = info->line - cinfo->first_line; - zfw_ctrl = base + (readl(&firm_id->zfwctrl_addr) & 0xfffff); - ch_ctrl = zfw_ctrl->ch_ctrl; - - return readl(&ch_ctrl[channel].rs_status) & C_RS_DCD; + return readl(&info->u.cyz.ch_ctrl->rs_status) & C_RS_DCD; } static void cyz_dtr_rts(struct tty_port *port, int raise) @@ -4189,22 +4080,16 @@ static void cyz_dtr_rts(struct tty_port *port, int raise) struct cyclades_port *info = container_of(port, struct cyclades_port, port); struct cyclades_card *cinfo = info->card; - void __iomem *base = cinfo->base_addr; - struct FIRM_ID __iomem *firm_id = base + ID_ADDRESS; - struct ZFW_CTRL __iomem *zfw_ctrl; - struct CH_CTRL __iomem *ch_ctrl; + struct CH_CTRL __iomem *ch_ctrl = info->u.cyz.ch_ctrl; int ret, channel = info->line - cinfo->first_line; u32 rs; - zfw_ctrl = base + (readl(&firm_id->zfwctrl_addr) & 0xfffff); - ch_ctrl = zfw_ctrl->ch_ctrl; - - rs = readl(&ch_ctrl[channel].rs_control); + rs = readl(&ch_ctrl->rs_control); if (raise) rs |= C_RS_RTS | C_RS_DTR; else rs &= ~(C_RS_RTS | C_RS_DTR); - cy_writel(&ch_ctrl[channel].rs_control, rs); + cy_writel(&ch_ctrl->rs_control, rs); ret = cyz_issue_cmd(cinfo, channel, C_CM_IOCTLM, 0L); if (ret != 0) printk(KERN_ERR "%s: retval on ttyC%d was %x\n", @@ -4235,8 +4120,7 @@ static const struct tty_port_operations cyz_port_ops = { static int __devinit cy_init_card(struct cyclades_card *cinfo) { struct cyclades_port *info; - unsigned int port; - unsigned short chip_number; + unsigned int channel, port; spin_lock_init(&cinfo->card_lock); cinfo->intr_enabled = 0; @@ -4248,9 +4132,9 @@ static int __devinit cy_init_card(struct cyclades_card *cinfo) return -ENOMEM; } - for (port = cinfo->first_line; port < cinfo->first_line + cinfo->nports; - port++) { - info = &cinfo->ports[port - cinfo->first_line]; + for (channel = 0, port = cinfo->first_line; channel < cinfo->nports; + channel++, port++) { + info = &cinfo->ports[channel]; tty_port_init(&info->port); info->magic = CYCLADES_MAGIC; info->card = cinfo; @@ -4263,8 +4147,17 @@ static int __devinit cy_init_card(struct cyclades_card *cinfo) init_waitqueue_head(&info->delta_msr_wait); if (cy_is_Z(cinfo)) { + struct FIRM_ID *firm_id = cinfo->base_addr + ID_ADDRESS; + struct ZFW_CTRL *zfw_ctrl; + info->port.ops = &cyz_port_ops; info->type = PORT_STARTECH; + + zfw_ctrl = cinfo->base_addr + + (readl(&firm_id->zfwctrl_addr) & 0xfffff); + info->u.cyz.ch_ctrl = &zfw_ctrl->ch_ctrl[channel]; + info->u.cyz.buf_ctrl = &zfw_ctrl->buf_ctrl[channel]; + if (cinfo->hw_ver == ZO_V1) info->xmit_fifo_size = CYZ_FIFO_SIZE; else @@ -4274,7 +4167,9 @@ static int __devinit cy_init_card(struct cyclades_card *cinfo) cyz_rx_restart, (unsigned long)info); #endif } else { + unsigned short chip_number; int index = cinfo->bus_index; + info->port.ops = &cyy_port_ops; info->type = PORT_CIRRUS; info->xmit_fifo_size = CyMAX_CHAR_FIFO; @@ -4282,7 +4177,7 @@ static int __devinit cy_init_card(struct cyclades_card *cinfo) info->cor2 = CyETC; info->cor3 = 0x08; /* _very_ small rcv threshold */ - chip_number = (port - cinfo->first_line) / 4; + chip_number = channel / CyPORTS_PER_CHIP; info->chip_rev = readb(cinfo->base_addr + (cy_chip_offset[chip_number] << index) + (CyGFRCR << index)); @@ -4976,8 +4871,14 @@ static int __devinit cy_pci_probe(struct pci_dev *pdev, } cy_card[card_no].num_chips = nchan / CyPORTS_PER_CHIP; } else { + struct FIRM_ID __iomem *firm_id = addr2 + ID_ADDRESS; + struct ZFW_CTRL __iomem *zfw_ctrl; + + zfw_ctrl = addr2 + (readl(&firm_id->zfwctrl_addr) & 0xfffff); + cy_card[card_no].hw_ver = mailbox; cy_card[card_no].num_chips = (unsigned int)-1; + cy_card[card_no].board_ctrl = &zfw_ctrl->board_ctrl; #ifdef CONFIG_CYZ_INTR /* allocate IRQ only if board has an IRQ */ if (irq != 0 && irq != 255) { diff --git a/include/linux/cyclades.h b/include/linux/cyclades.h index 1fbdea4f08e..1eb87a6a2f6 100644 --- a/include/linux/cyclades.h +++ b/include/linux/cyclades.h @@ -499,6 +499,7 @@ struct cyclades_card { void __iomem *p9050; struct RUNTIME_9060 __iomem *p9060; } ctl_addr; + struct BOARD_CTRL __iomem *board_ctrl; /* cyz specific */ int irq; unsigned int num_chips; /* 0 if card absent, -1 if Z/PCI, else Y */ unsigned int first_line; /* minor number of first channel on card */ @@ -541,6 +542,15 @@ struct cyclades_port { int magic; struct tty_port port; struct cyclades_card *card; + union { + struct { + int filler; + } cyy; + struct { + struct CH_CTRL __iomem *ch_ctrl; + struct BUF_CTRL __iomem *buf_ctrl; + } cyz; + } u; int line; int flags; /* defined in tty.h */ int type; /* UART type */ -- cgit v1.2.3-70-g09d2 From 3aeea5b92210083c7cffd4f08a0bb141d3f2d574 Mon Sep 17 00:00:00 2001 From: Jiri Slaby Date: Sat, 19 Sep 2009 13:13:16 -0700 Subject: cyclades: introduce cyy_readb/writeb Add helpers for io operations, so that we can eliminate huge amount of supporting code. It is now centralized in those helpers and used values are precomputed in the init phase. Signed-off-by: Jiri Slaby Signed-off-by: Alan Cox Signed-off-by: Greg Kroah-Hartman --- drivers/char/cyclades.c | 448 +++++++++++++++++------------------------------ include/linux/cyclades.h | 2 +- 2 files changed, 165 insertions(+), 285 deletions(-) (limited to 'include') diff --git a/drivers/char/cyclades.c b/drivers/char/cyclades.c index ed66c3ab230..cf2874a89c8 100644 --- a/drivers/char/cyclades.c +++ b/drivers/char/cyclades.c @@ -90,7 +90,6 @@ #include #include -static void cy_throttle(struct tty_struct *tty); static void cy_send_xchar(struct tty_struct *tty, char ch); #ifndef SERIAL_XMIT_SIZE @@ -298,6 +297,20 @@ static void cyz_rx_restart(unsigned long); static struct timer_list cyz_rx_full_timer[NR_PORTS]; #endif /* CONFIG_CYZ_INTR */ +static inline void cyy_writeb(struct cyclades_port *port, u32 reg, u8 val) +{ + struct cyclades_card *card = port->card; + + cy_writeb(port->u.cyy.base_addr + (reg << card->bus_index), val); +} + +static inline u8 cyy_readb(struct cyclades_port *port, u32 reg) +{ + struct cyclades_card *card = port->card; + + return readb(port->u.cyy.base_addr + (reg << card->bus_index)); +} + static inline bool cy_is_Z(struct cyclades_card *card) { return card->num_chips == (unsigned int)-1; @@ -350,13 +363,14 @@ static inline int serial_paranoia_check(struct cyclades_port *info, This function is only called from inside spinlock-protected code. */ -static int cyy_issue_cmd(void __iomem *base_addr, u_char cmd, int index) +static int __cyy_issue_cmd(void __iomem *base_addr, u8 cmd, int index) { + void __iomem *ccr = base_addr + (CyCCR << index); unsigned int i; /* Check to see that the previous command has completed */ for (i = 0; i < 100; i++) { - if (readb(base_addr + (CyCCR << index)) == 0) + if (readb(ccr) == 0) break; udelay(10L); } @@ -366,10 +380,16 @@ static int cyy_issue_cmd(void __iomem *base_addr, u_char cmd, int index) return -1; /* Issue the new command */ - cy_writeb(base_addr + (CyCCR << index), cmd); + cy_writeb(ccr, cmd); return 0; -} /* cyy_issue_cmd */ +} + +static inline int cyy_issue_cmd(struct cyclades_port *port, u8 cmd) +{ + return __cyy_issue_cmd(port->u.cyy.base_addr, cmd, + port->card->bus_index); +} #ifdef CONFIG_ISA /* ISA interrupt detection code */ @@ -394,7 +414,7 @@ static unsigned detect_isa_irq(void __iomem *address) /* Enable the Tx interrupts on the CD1400 */ local_irq_save(flags); cy_writeb(address + (CyCAR << index), 0); - cyy_issue_cmd(address, CyCHAN_CTL | CyENB_XMTR, index); + __cyy_issue_cmd(address, CyCHAN_CTL | CyENB_XMTR, index); cy_writeb(address + (CyCAR << index), 0); cy_writeb(address + (CySRER << index), @@ -428,7 +448,7 @@ static void cyy_chip_rx(struct cyclades_card *cinfo, int chip, struct cyclades_port *info; struct tty_struct *tty; int len, index = cinfo->bus_index; - u8 save_xir, channel, save_car, data, char_count; + u8 ivr, save_xir, channel, save_car, data, char_count; #ifdef CY_DEBUG_INTERRUPTS printk(KERN_DEBUG "cyy_interrupt: rcvd intr, chip %d\n", chip); @@ -437,26 +457,25 @@ static void cyy_chip_rx(struct cyclades_card *cinfo, int chip, save_xir = readb(base_addr + (CyRIR << index)); channel = save_xir & CyIRChannel; info = &cinfo->ports[channel + chip * 4]; - save_car = readb(base_addr + (CyCAR << index)); - cy_writeb(base_addr + (CyCAR << index), save_xir); + save_car = cyy_readb(info, CyCAR); + cyy_writeb(info, CyCAR, save_xir); + ivr = cyy_readb(info, CyRIVR) & CyIVRMask; tty = tty_port_tty_get(&info->port); /* if there is nowhere to put the data, discard it */ if (tty == NULL) { - if ((readb(base_addr + (CyRIVR << index)) & CyIVRMask) == - CyIVRRxEx) { /* exception */ - data = readb(base_addr + (CyRDSR << index)); + if (ivr == CyIVRRxEx) { /* exception */ + data = cyy_readb(info, CyRDSR); } else { /* normal character reception */ - char_count = readb(base_addr + (CyRDCR << index)); + char_count = cyy_readb(info, CyRDCR); while (char_count--) - data = readb(base_addr + (CyRDSR << index)); + data = cyy_readb(info, CyRDSR); } goto end; } /* there is an open port for this data */ - if ((readb(base_addr + (CyRIVR << index)) & CyIVRMask) == - CyIVRRxEx) { /* exception */ - data = readb(base_addr + (CyRDSR << index)); + if (ivr == CyIVRRxEx) { /* exception */ + data = cyy_readb(info, CyRDSR); /* For statistics only */ if (data & CyBREAK) @@ -477,22 +496,22 @@ static void cyy_chip_rx(struct cyclades_card *cinfo, int chip, if (data & info->read_status_mask) { if (data & CyBREAK) { tty_insert_flip_char(tty, - readb(base_addr + (CyRDSR << - index)), TTY_BREAK); + cyy_readb(info, CyRDSR), + TTY_BREAK); info->icount.rx++; if (info->port.flags & ASYNC_SAK) do_SAK(tty); } else if (data & CyFRAME) { tty_insert_flip_char(tty, - readb(base_addr + (CyRDSR << - index)), TTY_FRAME); + cyy_readb(info, CyRDSR), + TTY_FRAME); info->icount.rx++; info->idle_stats.frame_errs++; } else if (data & CyPARITY) { /* Pieces of seven... */ tty_insert_flip_char(tty, - readb(base_addr + (CyRDSR << - index)), TTY_PARITY); + cyy_readb(info, CyRDSR), + TTY_PARITY); info->icount.rx++; info->idle_stats.parity_errs++; } else if (data & CyOVERRUN) { @@ -504,8 +523,8 @@ static void cyy_chip_rx(struct cyclades_card *cinfo, int chip, the next incoming character. */ tty_insert_flip_char(tty, - readb(base_addr + (CyRDSR << - index)), TTY_FRAME); + cyy_readb(info, CyRDSR), + TTY_FRAME); info->icount.rx++; info->idle_stats.overruns++; /* These two conditions may imply */ @@ -529,7 +548,7 @@ static void cyy_chip_rx(struct cyclades_card *cinfo, int chip, } } else { /* normal character reception */ /* load # chars available from the chip */ - char_count = readb(base_addr + (CyRDCR << index)); + char_count = cyy_readb(info, CyRDCR); #ifdef CY_ENABLE_MONITORING ++info->mon.int_count; @@ -540,7 +559,7 @@ static void cyy_chip_rx(struct cyclades_card *cinfo, int chip, #endif len = tty_buffer_request_room(tty, char_count); while (len--) { - data = readb(base_addr + (CyRDSR << index)); + data = cyy_readb(info, CyRDSR); tty_insert_flip_char(tty, data, TTY_NORMAL); info->idle_stats.recv_bytes++; info->icount.rx++; @@ -554,8 +573,8 @@ static void cyy_chip_rx(struct cyclades_card *cinfo, int chip, tty_kref_put(tty); end: /* end of service */ - cy_writeb(base_addr + (CyRIR << index), save_xir & 0x3f); - cy_writeb(base_addr + (CyCAR << index), save_car); + cyy_writeb(info, CyRIR, save_xir & 0x3f); + cyy_writeb(info, CyCAR, save_car); } static void cyy_chip_tx(struct cyclades_card *cinfo, unsigned int chip, @@ -588,8 +607,7 @@ static void cyy_chip_tx(struct cyclades_card *cinfo, unsigned int chip, info = &cinfo->ports[channel + chip * 4]; tty = tty_port_tty_get(&info->port); if (tty == NULL) { - cy_writeb(base_addr + (CySRER << index), - readb(base_addr + (CySRER << index)) & ~CyTxRdy); + cyy_writeb(info, CySRER, cyy_readb(info, CySRER) & ~CyTxRdy); goto end; } @@ -598,7 +616,7 @@ static void cyy_chip_tx(struct cyclades_card *cinfo, unsigned int chip, if (info->x_char) { /* send special char */ outch = info->x_char; - cy_writeb(base_addr + (CyTDR << index), outch); + cyy_writeb(info, CyTDR, outch); char_count--; info->icount.tx++; info->x_char = 0; @@ -606,14 +624,14 @@ static void cyy_chip_tx(struct cyclades_card *cinfo, unsigned int chip, if (info->breakon || info->breakoff) { if (info->breakon) { - cy_writeb(base_addr + (CyTDR << index), 0); - cy_writeb(base_addr + (CyTDR << index), 0x81); + cyy_writeb(info, CyTDR, 0); + cyy_writeb(info, CyTDR, 0x81); info->breakon = 0; char_count -= 2; } if (info->breakoff) { - cy_writeb(base_addr + (CyTDR << index), 0); - cy_writeb(base_addr + (CyTDR << index), 0x83); + cyy_writeb(info, CyTDR, 0); + cyy_writeb(info, CyTDR, 0x83); info->breakoff = 0; char_count -= 2; } @@ -621,27 +639,23 @@ static void cyy_chip_tx(struct cyclades_card *cinfo, unsigned int chip, while (char_count-- > 0) { if (!info->xmit_cnt) { - if (readb(base_addr + (CySRER << index)) & CyTxMpty) { - cy_writeb(base_addr + (CySRER << index), - readb(base_addr + (CySRER << index)) & - ~CyTxMpty); + if (cyy_readb(info, CySRER) & CyTxMpty) { + cyy_writeb(info, CySRER, + cyy_readb(info, CySRER) & ~CyTxMpty); } else { - cy_writeb(base_addr + (CySRER << index), - (readb(base_addr + (CySRER << index)) & - ~CyTxRdy) | CyTxMpty); + cyy_writeb(info, CySRER, CyTxMpty | + (cyy_readb(info, CySRER) & ~CyTxRdy)); } goto done; } if (info->port.xmit_buf == NULL) { - cy_writeb(base_addr + (CySRER << index), - readb(base_addr + (CySRER << index)) & - ~CyTxRdy); + cyy_writeb(info, CySRER, + cyy_readb(info, CySRER) & ~CyTxRdy); goto done; } if (tty->stopped || tty->hw_stopped) { - cy_writeb(base_addr + (CySRER << index), - readb(base_addr + (CySRER << index)) & - ~CyTxRdy); + cyy_writeb(info, CySRER, + cyy_readb(info, CySRER) & ~CyTxRdy); goto done; } /* Because the Embedded Transmit Commands have been enabled, @@ -658,15 +672,15 @@ static void cyy_chip_tx(struct cyclades_card *cinfo, unsigned int chip, info->xmit_cnt--; info->xmit_tail = (info->xmit_tail + 1) & (SERIAL_XMIT_SIZE - 1); - cy_writeb(base_addr + (CyTDR << index), outch); + cyy_writeb(info, CyTDR, outch); info->icount.tx++; } else { if (char_count > 1) { info->xmit_cnt--; info->xmit_tail = (info->xmit_tail + 1) & (SERIAL_XMIT_SIZE - 1); - cy_writeb(base_addr + (CyTDR << index), outch); - cy_writeb(base_addr + (CyTDR << index), 0); + cyy_writeb(info, CyTDR, outch); + cyy_writeb(info, CyTDR, 0); info->icount.tx++; char_count--; } @@ -678,8 +692,8 @@ done: tty_kref_put(tty); end: /* end of service */ - cy_writeb(base_addr + (CyTIR << index), save_xir & 0x3f); - cy_writeb(base_addr + (CyCAR << index), save_car); + cyy_writeb(info, CyTIR, save_xir & 0x3f); + cyy_writeb(info, CyCAR, save_car); } static void cyy_chip_modem(struct cyclades_card *cinfo, int chip, @@ -694,11 +708,11 @@ static void cyy_chip_modem(struct cyclades_card *cinfo, int chip, save_xir = readb(base_addr + (CyMIR << index)); channel = save_xir & CyIRChannel; info = &cinfo->ports[channel + chip * 4]; - save_car = readb(base_addr + (CyCAR << index)); - cy_writeb(base_addr + (CyCAR << index), save_xir); + save_car = cyy_readb(info, CyCAR); + cyy_writeb(info, CyCAR, save_xir); - mdm_change = readb(base_addr + (CyMISR << index)); - mdm_status = readb(base_addr + (CyMSVR1 << index)); + mdm_change = cyy_readb(info, CyMISR); + mdm_status = cyy_readb(info, CyMSVR1); tty = tty_port_tty_get(&info->port); if (!tty) @@ -730,9 +744,8 @@ static void cyy_chip_modem(struct cyclades_card *cinfo, int chip, /* cy_start isn't used because... !!! */ tty->hw_stopped = 0; - cy_writeb(base_addr + (CySRER << index), - readb(base_addr + (CySRER << index)) | - CyTxRdy); + cyy_writeb(info, CySRER, + cyy_readb(info, CySRER) | CyTxRdy); tty_wakeup(tty); } } else { @@ -740,9 +753,8 @@ static void cyy_chip_modem(struct cyclades_card *cinfo, int chip, /* cy_stop isn't used because ... !!! */ tty->hw_stopped = 1; - cy_writeb(base_addr + (CySRER << index), - readb(base_addr + (CySRER << index)) & - ~CyTxRdy); + cyy_writeb(info, CySRER, + cyy_readb(info, CySRER) & ~CyTxRdy); } } } @@ -753,8 +765,8 @@ static void cyy_chip_modem(struct cyclades_card *cinfo, int chip, tty_kref_put(tty); end: /* end of service */ - cy_writeb(base_addr + (CyMIR << index), save_xir & 0x3f); - cy_writeb(base_addr + (CyCAR << index), save_car); + cyy_writeb(info, CyMIR, save_xir & 0x3f); + cyy_writeb(info, CyCAR, save_car); } /* The real interrupt service routine is called @@ -829,15 +841,10 @@ static void cyy_change_rts_dtr(struct cyclades_port *info, unsigned int set, unsigned int clear) { struct cyclades_card *card = info->card; - void __iomem *base_addr; - int chip, channel, index; + int channel = info->line - card->first_line; u32 rts, dtr, msvrr, msvrd; - channel = info->line - card->first_line; - chip = channel >> 2; channel &= 0x03; - index = card->bus_index; - base_addr = card->base_addr + (cy_chip_offset[chip] << index); if (info->rtsdtr_inv) { msvrr = CyMSVR2; @@ -851,31 +858,31 @@ static void cyy_change_rts_dtr(struct cyclades_port *info, unsigned int set, dtr = CyDTR; } if (set & TIOCM_RTS) { - cy_writeb(base_addr + (CyCAR << index), (u8)channel); - cy_writeb(base_addr + (msvrr << index), rts); + cyy_writeb(info, CyCAR, channel); + cyy_writeb(info, msvrr, rts); } if (clear & TIOCM_RTS) { - cy_writeb(base_addr + (CyCAR << index), (u8)channel); - cy_writeb(base_addr + (msvrr << index), ~rts); + cyy_writeb(info, CyCAR, channel); + cyy_writeb(info, msvrr, ~rts); } if (set & TIOCM_DTR) { - cy_writeb(base_addr + (CyCAR << index), (u8)channel); - cy_writeb(base_addr + (msvrd << index), dtr); + cyy_writeb(info, CyCAR, channel); + cyy_writeb(info, msvrd, dtr); #ifdef CY_DEBUG_DTR printk(KERN_DEBUG "cyc:set_modem_info raising DTR\n"); printk(KERN_DEBUG " status: 0x%x, 0x%x\n", - readb(base_addr + (CyMSVR1 << index)), - readb(base_addr + (CyMSVR2 << index))); + cyy_readb(info, CyMSVR1), + cyy_readb(info, CyMSVR2)); #endif } if (clear & TIOCM_DTR) { - cy_writeb(base_addr + (CyCAR << index), (u8)channel); - cy_writeb(base_addr + (msvrd << index), ~dtr); + cyy_writeb(info, CyCAR, channel); + cyy_writeb(info, msvrd, ~dtr); #ifdef CY_DEBUG_DTR printk(KERN_DEBUG "cyc:set_modem_info dropping DTR\n"); printk(KERN_DEBUG " status: 0x%x, 0x%x\n", - readb(base_addr + (CyMSVR1 << index)), - readb(base_addr + (CyMSVR2 << index))); + cyy_readb(info, CyMSVR1), + cyy_readb(info, CyMSVR2)); #endif } } @@ -1290,7 +1297,6 @@ static int cy_startup(struct cyclades_port *info, struct tty_struct *tty) struct cyclades_card *card; unsigned long flags; int retval = 0; - void __iomem *base_addr; int channel; unsigned long page; @@ -1321,31 +1327,21 @@ static int cy_startup(struct cyclades_port *info, struct tty_struct *tty) cy_set_line_char(info, tty); if (!cy_is_Z(card)) { - int chip = channel >> 2; - int index = card->bus_index; channel &= 0x03; - base_addr = card->base_addr + (cy_chip_offset[chip] << index); -#ifdef CY_DEBUG_OPEN - printk(KERN_DEBUG "cyc startup card %d, chip %d, channel %d, " - "base_addr %p\n", - card, chip, channel, base_addr); -#endif spin_lock_irqsave(&card->card_lock, flags); - cy_writeb(base_addr + (CyCAR << index), (u_char) channel); + cyy_writeb(info, CyCAR, channel); - cy_writeb(base_addr + (CyRTPR << index), + cyy_writeb(info, CyRTPR, (info->default_timeout ? info->default_timeout : 0x02)); /* 10ms rx timeout */ - cyy_issue_cmd(base_addr, CyCHAN_CTL | CyENB_RCVR | CyENB_XMTR, - index); + cyy_issue_cmd(info, CyCHAN_CTL | CyENB_RCVR | CyENB_XMTR); cyy_change_rts_dtr(info, TIOCM_RTS | TIOCM_DTR, 0); - cy_writeb(base_addr + (CySRER << index), - readb(base_addr + (CySRER << index)) | CyRxData); + cyy_writeb(info, CySRER, cyy_readb(info, CySRER) | CyRxData); } else { struct CH_CTRL __iomem *ch_ctrl = info->u.cyz.ch_ctrl; @@ -1423,23 +1419,14 @@ errout: static void start_xmit(struct cyclades_port *info) { - struct cyclades_card *card; + struct cyclades_card *card = info->card; unsigned long flags; - void __iomem *base_addr; - int chip, channel, index; + int channel = info->line - card->first_line; - card = info->card; - channel = info->line - card->first_line; if (!cy_is_Z(card)) { - chip = channel >> 2; - channel &= 0x03; - index = card->bus_index; - base_addr = card->base_addr + (cy_chip_offset[chip] << index); - spin_lock_irqsave(&card->card_lock, flags); - cy_writeb(base_addr + (CyCAR << index), channel); - cy_writeb(base_addr + (CySRER << index), - readb(base_addr + (CySRER << index)) | CyTxRdy); + cyy_writeb(info, CyCAR, channel & 0x03); + cyy_writeb(info, CySRER, cyy_readb(info, CySRER) | CyTxRdy); spin_unlock_irqrestore(&card->card_lock, flags); } else { #ifdef CONFIG_CYZ_INTR @@ -1466,8 +1453,7 @@ static void cy_shutdown(struct cyclades_port *info, struct tty_struct *tty) { struct cyclades_card *card; unsigned long flags; - void __iomem *base_addr; - int chip, channel, index; + int channel; if (!(info->port.flags & ASYNC_INITIALIZED)) return; @@ -1475,17 +1461,6 @@ static void cy_shutdown(struct cyclades_port *info, struct tty_struct *tty) card = info->card; channel = info->line - card->first_line; if (!cy_is_Z(card)) { - chip = channel >> 2; - channel &= 0x03; - index = card->bus_index; - base_addr = card->base_addr + (cy_chip_offset[chip] << index); - -#ifdef CY_DEBUG_OPEN - printk(KERN_DEBUG "cyc shutdown Y card %d, chip %d, " - "channel %d, base_addr %p\n", - card, chip, channel, base_addr); -#endif - spin_lock_irqsave(&card->card_lock, flags); /* Clear delta_msr_wait queue to avoid mem leaks. */ @@ -1500,7 +1475,7 @@ static void cy_shutdown(struct cyclades_port *info, struct tty_struct *tty) if (tty->termios->c_cflag & HUPCL) cyy_change_rts_dtr(info, 0, TIOCM_RTS | TIOCM_DTR); - cyy_issue_cmd(base_addr, CyCHAN_CTL | CyDIS_RCVR, index); + cyy_issue_cmd(info, CyCHAN_CTL | CyDIS_RCVR); /* it may be appropriate to clear _XMIT at some later date (after testing)!!! */ @@ -1677,8 +1652,6 @@ static void cy_wait_until_sent(struct tty_struct *tty, int timeout) { struct cyclades_card *card; struct cyclades_port *info = tty->driver_data; - void __iomem *base_addr; - int chip, channel, index; unsigned long orig_jiffies; int char_time; @@ -1722,13 +1695,8 @@ static void cy_wait_until_sent(struct tty_struct *tty, int timeout) timeout, char_time, jiffies); #endif card = info->card; - channel = (info->line) - (card->first_line); if (!cy_is_Z(card)) { - chip = channel >> 2; - channel &= 0x03; - index = card->bus_index; - base_addr = card->base_addr + (cy_chip_offset[chip] << index); - while (readb(base_addr + (CySRER << index)) & CyTxRdy) { + while (cyy_readb(info, CySRER) & CyTxRdy) { #ifdef CY_DEBUG_WAIT_UNTIL_SENT printk(KERN_DEBUG "Not clean (jiff=%lu)...", jiffies); #endif @@ -1790,6 +1758,7 @@ static void cy_close(struct tty_struct *tty, struct file *filp) struct cyclades_port *info = tty->driver_data; struct cyclades_card *card; unsigned long flags; + int channel; if (!info || serial_paranoia_check(info, tty->name, "cy_close")) return; @@ -1799,18 +1768,13 @@ static void cy_close(struct tty_struct *tty, struct file *filp) if (!tty_port_close_start(&info->port, tty, filp)) return; + channel = info->line - card->first_line; spin_lock_irqsave(&card->card_lock, flags); if (!cy_is_Z(card)) { - int channel = info->line - card->first_line; - int index = card->bus_index; - void __iomem *base_addr = card->base_addr + - (cy_chip_offset[channel >> 2] << index); /* Stop accepting input */ - channel &= 0x03; - cy_writeb(base_addr + (CyCAR << index), (u_char) channel); - cy_writeb(base_addr + (CySRER << index), - readb(base_addr + (CySRER << index)) & ~CyRxData); + cyy_writeb(info, CyCAR, channel & 0x03); + cyy_writeb(info, CySRER, cyy_readb(info, CySRER) & ~CyRxData); if (info->port.flags & ASYNC_INITIALIZED) { /* Waiting for on-board buffers to be empty before closing the port */ @@ -1823,7 +1787,6 @@ static void cy_close(struct tty_struct *tty, struct file *filp) /* Waiting for on-board buffers to be empty before closing the port */ struct CH_CTRL __iomem *ch_ctrl = info->u.cyz.ch_ctrl; - int channel = info->line - card->first_line; int retval; if (readl(&ch_ctrl->flow_status) != C_FS_TXIDLE) { @@ -2064,8 +2027,7 @@ static void cy_set_line_char(struct cyclades_port *info, struct tty_struct *tty) { struct cyclades_card *card; unsigned long flags; - void __iomem *base_addr; - int chip, channel, index; + int channel; unsigned cflag, iflag; int baud, baud_rate = 0; int i; @@ -2095,9 +2057,6 @@ static void cy_set_line_char(struct cyclades_port *info, struct tty_struct *tty) channel = info->line - card->first_line; if (!cy_is_Z(card)) { - - index = card->bus_index; - /* baud rate */ baud = tty_get_baud_rate(tty); if (baud == 38400 && (info->port.flags & ASYNC_SPD_MASK) == @@ -2208,70 +2167,67 @@ static void cy_set_line_char(struct cyclades_port *info, struct tty_struct *tty) cable. Contact Marcio Saito for details. ***********************************************/ - chip = channel >> 2; channel &= 0x03; - base_addr = card->base_addr + (cy_chip_offset[chip] << index); spin_lock_irqsave(&card->card_lock, flags); - cy_writeb(base_addr + (CyCAR << index), (u_char) channel); + cyy_writeb(info, CyCAR, channel); /* tx and rx baud rate */ - cy_writeb(base_addr + (CyTCOR << index), info->tco); - cy_writeb(base_addr + (CyTBPR << index), info->tbpr); - cy_writeb(base_addr + (CyRCOR << index), info->rco); - cy_writeb(base_addr + (CyRBPR << index), info->rbpr); + cyy_writeb(info, CyTCOR, info->tco); + cyy_writeb(info, CyTBPR, info->tbpr); + cyy_writeb(info, CyRCOR, info->rco); + cyy_writeb(info, CyRBPR, info->rbpr); /* set line characteristics according configuration */ - cy_writeb(base_addr + (CySCHR1 << index), START_CHAR(tty)); - cy_writeb(base_addr + (CySCHR2 << index), STOP_CHAR(tty)); - cy_writeb(base_addr + (CyCOR1 << index), info->cor1); - cy_writeb(base_addr + (CyCOR2 << index), info->cor2); - cy_writeb(base_addr + (CyCOR3 << index), info->cor3); - cy_writeb(base_addr + (CyCOR4 << index), info->cor4); - cy_writeb(base_addr + (CyCOR5 << index), info->cor5); + cyy_writeb(info, CySCHR1, START_CHAR(tty)); + cyy_writeb(info, CySCHR2, STOP_CHAR(tty)); + cyy_writeb(info, CyCOR1, info->cor1); + cyy_writeb(info, CyCOR2, info->cor2); + cyy_writeb(info, CyCOR3, info->cor3); + cyy_writeb(info, CyCOR4, info->cor4); + cyy_writeb(info, CyCOR5, info->cor5); - cyy_issue_cmd(base_addr, CyCOR_CHANGE | CyCOR1ch | CyCOR2ch | - CyCOR3ch, index); + cyy_issue_cmd(info, CyCOR_CHANGE | CyCOR1ch | CyCOR2ch | + CyCOR3ch); /* !!! Is this needed? */ - cy_writeb(base_addr + (CyCAR << index), (u_char) channel); - cy_writeb(base_addr + (CyRTPR << index), + cyy_writeb(info, CyCAR, channel); + cyy_writeb(info, CyRTPR, (info->default_timeout ? info->default_timeout : 0x02)); /* 10ms rx timeout */ if (C_CLOCAL(tty)) { /* without modem intr */ - cy_writeb(base_addr + (CySRER << index), - readb(base_addr + (CySRER << index)) | CyMdmCh); + cyy_writeb(info, CySRER, + cyy_readb(info, CySRER) | CyMdmCh); /* act on 1->0 modem transitions */ if ((cflag & CRTSCTS) && info->rflow) { - cy_writeb(base_addr + (CyMCOR1 << index), + cyy_writeb(info, CyMCOR1, (CyCTS | rflow_thr[i])); } else { - cy_writeb(base_addr + (CyMCOR1 << index), + cyy_writeb(info, CyMCOR1, CyCTS); } /* act on 0->1 modem transitions */ - cy_writeb(base_addr + (CyMCOR2 << index), CyCTS); + cyy_writeb(info, CyMCOR2, CyCTS); } else { /* without modem intr */ - cy_writeb(base_addr + (CySRER << index), - readb(base_addr + - (CySRER << index)) | CyMdmCh); + cyy_writeb(info, CySRER, + cyy_readb(info, CySRER) | CyMdmCh); /* act on 1->0 modem transitions */ if ((cflag & CRTSCTS) && info->rflow) { - cy_writeb(base_addr + (CyMCOR1 << index), + cyy_writeb(info, CyMCOR1, (CyDSR | CyCTS | CyRI | CyDCD | rflow_thr[i])); } else { - cy_writeb(base_addr + (CyMCOR1 << index), - CyDSR | CyCTS | CyRI | CyDCD); + cyy_writeb(info, CyMCOR1, + CyDSR | CyCTS | CyRI | CyDCD); } /* act on 0->1 modem transitions */ - cy_writeb(base_addr + (CyMCOR2 << index), - CyDSR | CyCTS | CyRI | CyDCD); + cyy_writeb(info, CyMCOR2, + CyDSR | CyCTS | CyRI | CyDCD); } if (i == 0) /* baud rate is zero, turn off line */ @@ -2482,24 +2438,14 @@ check_and_exit: */ static int get_lsr_info(struct cyclades_port *info, unsigned int __user *value) { - struct cyclades_card *card; - int chip, channel, index; - unsigned char status; + struct cyclades_card *card = info->card; unsigned int result; unsigned long flags; - void __iomem *base_addr; + u8 status; - card = info->card; - channel = (info->line) - (card->first_line); if (!cy_is_Z(card)) { - chip = channel >> 2; - channel &= 0x03; - index = card->bus_index; - base_addr = card->base_addr + (cy_chip_offset[chip] << index); - spin_lock_irqsave(&card->card_lock, flags); - status = readb(base_addr + (CySRER << index)) & - (CyTxRdy | CyTxMpty); + status = cyy_readb(info, CySRER) & (CyTxRdy | CyTxMpty); spin_unlock_irqrestore(&card->card_lock, flags); result = (status ? 0 : TIOCSER_TEMT); } else { @@ -2513,29 +2459,23 @@ static int cy_tiocmget(struct tty_struct *tty, struct file *file) { struct cyclades_port *info = tty->driver_data; struct cyclades_card *card; - void __iomem *base_addr; - int result, channel; + int result; if (serial_paranoia_check(info, tty->name, __func__)) return -ENODEV; card = info->card; - channel = info->line - card->first_line; lock_kernel(); if (!cy_is_Z(card)) { unsigned long flags; - unsigned char status; - int chip = channel >> 2; - int index = card->bus_index; - - channel &= 0x03; - base_addr = card->base_addr + (cy_chip_offset[chip] << index); + int channel = info->line - card->first_line; + u8 status; spin_lock_irqsave(&card->card_lock, flags); - cy_writeb(base_addr + (CyCAR << index), (u8)channel); - status = readb(base_addr + (CyMSVR1 << index)); - status |= readb(base_addr + (CyMSVR2 << index)); + cyy_writeb(info, CyCAR, channel & 0x03); + status = cyy_readb(info, CyMSVR1); + status |= cyy_readb(info, CyMSVR2); spin_unlock_irqrestore(&card->card_lock, flags); if (info->rtsdtr_inv) { @@ -2689,26 +2629,16 @@ static int cy_break(struct tty_struct *tty, int break_state) static int set_threshold(struct cyclades_port *info, unsigned long value) { - struct cyclades_card *card; - void __iomem *base_addr; - int channel, chip, index; + struct cyclades_card *card = info->card; unsigned long flags; - card = info->card; - channel = info->line - card->first_line; if (!cy_is_Z(card)) { - chip = channel >> 2; - channel &= 0x03; - index = card->bus_index; - base_addr = - card->base_addr + (cy_chip_offset[chip] << index); - info->cor3 &= ~CyREC_FIFO; info->cor3 |= value & CyREC_FIFO; spin_lock_irqsave(&card->card_lock, flags); - cy_writeb(base_addr + (CyCOR3 << index), info->cor3); - cyy_issue_cmd(base_addr, CyCOR_CHANGE | CyCOR3ch, index); + cyy_writeb(info, CyCOR3, info->cor3); + cyy_issue_cmd(info, CyCOR_CHANGE | CyCOR3ch); spin_unlock_irqrestore(&card->card_lock, flags); } return 0; @@ -2717,20 +2647,10 @@ static int set_threshold(struct cyclades_port *info, unsigned long value) static int get_threshold(struct cyclades_port *info, unsigned long __user *value) { - struct cyclades_card *card; - void __iomem *base_addr; - int channel, chip, index; - unsigned long tmp; + struct cyclades_card *card = info->card; - card = info->card; - channel = info->line - card->first_line; if (!cy_is_Z(card)) { - chip = channel >> 2; - channel &= 0x03; - index = card->bus_index; - base_addr = card->base_addr + (cy_chip_offset[chip] << index); - - tmp = readb(base_addr + (CyCOR3 << index)) & CyREC_FIFO; + u8 tmp = cyy_readb(info, CyCOR3) & CyREC_FIFO; return put_user(tmp, value); } return 0; @@ -2738,21 +2658,12 @@ static int get_threshold(struct cyclades_port *info, static int set_timeout(struct cyclades_port *info, unsigned long value) { - struct cyclades_card *card; - void __iomem *base_addr; - int channel, chip, index; + struct cyclades_card *card = info->card; unsigned long flags; - card = info->card; - channel = info->line - card->first_line; if (!cy_is_Z(card)) { - chip = channel >> 2; - channel &= 0x03; - index = card->bus_index; - base_addr = card->base_addr + (cy_chip_offset[chip] << index); - spin_lock_irqsave(&card->card_lock, flags); - cy_writeb(base_addr + (CyRTPR << index), value & 0xff); + cyy_writeb(info, CyRTPR, value & 0xff); spin_unlock_irqrestore(&card->card_lock, flags); } return 0; @@ -2761,20 +2672,10 @@ static int set_timeout(struct cyclades_port *info, unsigned long value) static int get_timeout(struct cyclades_port *info, unsigned long __user *value) { - struct cyclades_card *card; - void __iomem *base_addr; - int channel, chip, index; - unsigned long tmp; + struct cyclades_card *card = info->card; - card = info->card; - channel = info->line - card->first_line; if (!cy_is_Z(card)) { - chip = channel >> 2; - channel &= 0x03; - index = card->bus_index; - base_addr = card->base_addr + (cy_chip_offset[chip] << index); - - tmp = readb(base_addr + (CyRTPR << index)); + u8 tmp = cyy_readb(info, CyRTPR); return put_user(tmp, value); } return 0; @@ -3101,8 +3002,7 @@ static void cy_stop(struct tty_struct *tty) { struct cyclades_card *cinfo; struct cyclades_port *info = tty->driver_data; - void __iomem *base_addr; - int chip, channel, index; + int channel; unsigned long flags; #ifdef CY_DEBUG_OTHER @@ -3115,16 +3015,9 @@ static void cy_stop(struct tty_struct *tty) cinfo = info->card; channel = info->line - cinfo->first_line; if (!cy_is_Z(cinfo)) { - index = cinfo->bus_index; - chip = channel >> 2; - channel &= 0x03; - base_addr = cinfo->base_addr + (cy_chip_offset[chip] << index); - spin_lock_irqsave(&cinfo->card_lock, flags); - cy_writeb(base_addr + (CyCAR << index), - (u_char)(channel & 0x0003)); /* index channel */ - cy_writeb(base_addr + (CySRER << index), - readb(base_addr + (CySRER << index)) & ~CyTxRdy); + cyy_writeb(info, CyCAR, channel & 0x03); + cyy_writeb(info, CySRER, cyy_readb(info, CySRER) & ~CyTxRdy); spin_unlock_irqrestore(&cinfo->card_lock, flags); } } /* cy_stop */ @@ -3133,8 +3026,7 @@ static void cy_start(struct tty_struct *tty) { struct cyclades_card *cinfo; struct cyclades_port *info = tty->driver_data; - void __iomem *base_addr; - int chip, channel, index; + int channel; unsigned long flags; #ifdef CY_DEBUG_OTHER @@ -3146,17 +3038,10 @@ static void cy_start(struct tty_struct *tty) cinfo = info->card; channel = info->line - cinfo->first_line; - index = cinfo->bus_index; if (!cy_is_Z(cinfo)) { - chip = channel >> 2; - channel &= 0x03; - base_addr = cinfo->base_addr + (cy_chip_offset[chip] << index); - spin_lock_irqsave(&cinfo->card_lock, flags); - cy_writeb(base_addr + (CyCAR << index), - (u_char) (channel & 0x0003)); /* index channel */ - cy_writeb(base_addr + (CySRER << index), - readb(base_addr + (CySRER << index)) | CyTxRdy); + cyy_writeb(info, CyCAR, channel & 0x03); + cyy_writeb(info, CySRER, cyy_readb(info, CySRER) | CyTxRdy); spin_unlock_irqrestore(&cinfo->card_lock, flags); } } /* cy_start */ @@ -3185,18 +3070,13 @@ static int cyy_carrier_raised(struct tty_port *port) struct cyclades_port *info = container_of(port, struct cyclades_port, port); struct cyclades_card *cinfo = info->card; - void __iomem *base = cinfo->base_addr; unsigned long flags; int channel = info->line - cinfo->first_line; - int chip = channel >> 2, index = cinfo->bus_index; u32 cd; - channel &= 0x03; - base += cy_chip_offset[chip] << index; - spin_lock_irqsave(&cinfo->card_lock, flags); - cy_writeb(base + (CyCAR << index), (u8)channel); - cd = readb(base + (CyMSVR1 << index)) & CyDCD; + cyy_writeb(info, CyCAR, channel & 0x03); + cd = cyy_readb(info, CyMSVR1) & CyDCD; spin_unlock_irqrestore(&cinfo->card_lock, flags); return cd; @@ -3326,9 +3206,9 @@ static int __devinit cy_init_card(struct cyclades_card *cinfo) info->cor3 = 0x08; /* _very_ small rcv threshold */ chip_number = channel / CyPORTS_PER_CHIP; - info->chip_rev = readb(cinfo->base_addr + - (cy_chip_offset[chip_number] << index) + - (CyGFRCR << index)); + info->u.cyy.base_addr = cinfo->base_addr + + (cy_chip_offset[chip_number] << index); + info->chip_rev = cyy_readb(info, CyGFRCR); if (info->chip_rev >= CD1400_REV_J) { /* It is a CD1400 rev. J or later */ diff --git a/include/linux/cyclades.h b/include/linux/cyclades.h index 1eb87a6a2f6..bbebef7713b 100644 --- a/include/linux/cyclades.h +++ b/include/linux/cyclades.h @@ -544,7 +544,7 @@ struct cyclades_port { struct cyclades_card *card; union { struct { - int filler; + void __iomem *base_addr; } cyy; struct { struct CH_CTRL __iomem *ch_ctrl; -- cgit v1.2.3-70-g09d2 From f8a7c1a976a6672204c7f4f0f694f33715dfa617 Mon Sep 17 00:00:00 2001 From: Alan Cox Date: Sat, 19 Sep 2009 13:13:17 -0700 Subject: kfifo: Use "const" definitions Currently kfifo cannot be used by parts of the kernel that use "const" properly as kfifo itself does not use const for passed data blocks which are indeed const. Signed-off-by: Alan Cox Signed-off-by: Greg Kroah-Hartman --- include/linux/kfifo.h | 4 ++-- kernel/kfifo.c | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) (limited to 'include') diff --git a/include/linux/kfifo.h b/include/linux/kfifo.h index 29f62e1733f..ad6bdf5a597 100644 --- a/include/linux/kfifo.h +++ b/include/linux/kfifo.h @@ -38,7 +38,7 @@ extern struct kfifo *kfifo_alloc(unsigned int size, gfp_t gfp_mask, spinlock_t *lock); extern void kfifo_free(struct kfifo *fifo); extern unsigned int __kfifo_put(struct kfifo *fifo, - unsigned char *buffer, unsigned int len); + const unsigned char *buffer, unsigned int len); extern unsigned int __kfifo_get(struct kfifo *fifo, unsigned char *buffer, unsigned int len); @@ -77,7 +77,7 @@ static inline void kfifo_reset(struct kfifo *fifo) * bytes copied. */ static inline unsigned int kfifo_put(struct kfifo *fifo, - unsigned char *buffer, unsigned int len) + const unsigned char *buffer, unsigned int len) { unsigned long flags; unsigned int ret; diff --git a/kernel/kfifo.c b/kernel/kfifo.c index 26539e3228e..3765ff3c1bb 100644 --- a/kernel/kfifo.c +++ b/kernel/kfifo.c @@ -117,7 +117,7 @@ EXPORT_SYMBOL(kfifo_free); * writer, you don't need extra locking to use these functions. */ unsigned int __kfifo_put(struct kfifo *fifo, - unsigned char *buffer, unsigned int len) + const unsigned char *buffer, unsigned int len) { unsigned int l; -- cgit v1.2.3-70-g09d2 From 1c2f04937b3e397a5695953c6b82aa4c77d21eb8 Mon Sep 17 00:00:00 2001 From: Vikram Pandita Date: Sat, 19 Sep 2009 13:13:19 -0700 Subject: serial: 8250: add IRQ trigger support There is currently no provision for passing IRQ trigger flags for serial IRQs with triggering requirements (such as GPIO IRQs) This patch adds irqflags to plat_serial8250_port that can be passed from board file to reqest_irq() of 8250 driver Changes are backward compatible with boards passing UPF_SHARE_IRQ flag Tested on Zoom2 board that has IRQF_TRIGGER_RISING requirement for 8250 irq [Moved new flag to end to fix bugs in the original with the old_serial array -- Alan] Signed-off-by: Vikram Pandita Signed-off-by: Alan Cox Signed-off-by: Greg Kroah-Hartman --- drivers/serial/8250.c | 14 +++++++++----- drivers/serial/8250.h | 1 + include/linux/serial_8250.h | 1 + include/linux/serial_core.h | 1 + 4 files changed, 12 insertions(+), 5 deletions(-) (limited to 'include') diff --git a/drivers/serial/8250.c b/drivers/serial/8250.c index fb867a9f55e..83168a6c3c0 100644 --- a/drivers/serial/8250.c +++ b/drivers/serial/8250.c @@ -1677,7 +1677,7 @@ static int serial_link_irq_chain(struct uart_8250_port *up) INIT_LIST_HEAD(&up->list); i->head = &up->list; spin_unlock_irq(&i->lock); - + irq_flags |= up->port.irqflags; ret = request_irq(up->port.irq, serial8250_interrupt, irq_flags, "serial", i); if (ret < 0) @@ -2026,7 +2026,7 @@ static int serial8250_startup(struct uart_port *port) * allow register changes to become visible. */ spin_lock_irqsave(&up->port.lock, flags); - if (up->port.flags & UPF_SHARE_IRQ) + if (up->port.irqflags & IRQF_SHARED) disable_irq_nosync(up->port.irq); wait_for_xmitr(up, UART_LSR_THRE); @@ -2039,7 +2039,7 @@ static int serial8250_startup(struct uart_port *port) iir = serial_in(up, UART_IIR); serial_out(up, UART_IER, 0); - if (up->port.flags & UPF_SHARE_IRQ) + if (up->port.irqflags & IRQF_SHARED) enable_irq(up->port.irq); spin_unlock_irqrestore(&up->port.lock, flags); @@ -2671,6 +2671,7 @@ static void __init serial8250_isa_init_ports(void) i++, up++) { up->port.iobase = old_serial_port[i].port; up->port.irq = irq_canonicalize(old_serial_port[i].irq); + up->port.irqflags = old_serial_port[i].irqflags; up->port.uartclk = old_serial_port[i].baud_base * 16; up->port.flags = old_serial_port[i].flags; up->port.hub6 = old_serial_port[i].hub6; @@ -2679,7 +2680,7 @@ static void __init serial8250_isa_init_ports(void) up->port.regshift = old_serial_port[i].iomem_reg_shift; set_io_from_upio(&up->port); if (share_irqs) - up->port.flags |= UPF_SHARE_IRQ; + up->port.irqflags |= IRQF_SHARED; } } @@ -2869,6 +2870,7 @@ int __init early_serial_setup(struct uart_port *port) p->iobase = port->iobase; p->membase = port->membase; p->irq = port->irq; + p->irqflags = port->irqflags; p->uartclk = port->uartclk; p->fifosize = port->fifosize; p->regshift = port->regshift; @@ -2942,6 +2944,7 @@ static int __devinit serial8250_probe(struct platform_device *dev) port.iobase = p->iobase; port.membase = p->membase; port.irq = p->irq; + port.irqflags = p->irqflags; port.uartclk = p->uartclk; port.regshift = p->regshift; port.iotype = p->iotype; @@ -2954,7 +2957,7 @@ static int __devinit serial8250_probe(struct platform_device *dev) port.serial_out = p->serial_out; port.dev = &dev->dev; if (share_irqs) - port.flags |= UPF_SHARE_IRQ; + port.irqflags |= IRQF_SHARED; ret = serial8250_register_port(&port); if (ret < 0) { dev_err(&dev->dev, "unable to register port at index %d " @@ -3096,6 +3099,7 @@ int serial8250_register_port(struct uart_port *port) uart->port.iobase = port->iobase; uart->port.membase = port->membase; uart->port.irq = port->irq; + uart->port.irqflags = port->irqflags; uart->port.uartclk = port->uartclk; uart->port.fifosize = port->fifosize; uart->port.regshift = port->regshift; diff --git a/drivers/serial/8250.h b/drivers/serial/8250.h index 520260326f3..6e19ea3e48d 100644 --- a/drivers/serial/8250.h +++ b/drivers/serial/8250.h @@ -25,6 +25,7 @@ struct old_serial_port { unsigned char io_type; unsigned char *iomem_base; unsigned short iomem_reg_shift; + unsigned long irqflags; }; /* diff --git a/include/linux/serial_8250.h b/include/linux/serial_8250.h index d4d2a78ad43..fb46aba11fb 100644 --- a/include/linux/serial_8250.h +++ b/include/linux/serial_8250.h @@ -22,6 +22,7 @@ struct plat_serial8250_port { void __iomem *membase; /* ioremap cookie or NULL */ resource_size_t mapbase; /* resource base */ unsigned int irq; /* interrupt number */ + unsigned long irqflags; /* request_irq flags */ unsigned int uartclk; /* UART clock rate */ void *private_data; unsigned char regshift; /* register shift */ diff --git a/include/linux/serial_core.h b/include/linux/serial_core.h index 23d2fb051f9..3cd255f0b21 100644 --- a/include/linux/serial_core.h +++ b/include/linux/serial_core.h @@ -265,6 +265,7 @@ struct uart_port { unsigned int (*serial_in)(struct uart_port *, int); void (*serial_out)(struct uart_port *, int, int); unsigned int irq; /* irq number */ + unsigned long irqflags; /* irq flags */ unsigned int uartclk; /* base uart clock */ unsigned int fifosize; /* tx fifo size */ unsigned char x_char; /* xon/xoff char */ -- cgit v1.2.3-70-g09d2 From 7ca0ff9ab3218ec443a7a9ad247e4650373ed41e Mon Sep 17 00:00:00 2001 From: Alan Cox Date: Sat, 19 Sep 2009 13:13:20 -0700 Subject: tty: Add a full port_close function Now we are extracting out methods for shutdown and the like we can add a proper tty_port_close method that knows all the innards of the tty closing process and hides the lot from the caller. At some point in the future this will be paired with a similar open() helper and the drivers can stick to hardware management. Signed-off-by: Alan Cox Cc: stable Signed-off-by: Greg Kroah-Hartman --- drivers/char/tty_port.c | 29 +++++++++++++++++++++++++++-- include/linux/tty.h | 8 +++++++- 2 files changed, 34 insertions(+), 3 deletions(-) (limited to 'include') diff --git a/drivers/char/tty_port.c b/drivers/char/tty_port.c index 9769b1149f7..549bd0fa8bb 100644 --- a/drivers/char/tty_port.c +++ b/drivers/char/tty_port.c @@ -96,6 +96,14 @@ void tty_port_tty_set(struct tty_port *port, struct tty_struct *tty) } EXPORT_SYMBOL(tty_port_tty_set); +static void tty_port_shutdown(struct tty_port *port) +{ + if (port->ops->shutdown && + test_and_clear_bit(ASYNC_INITIALIZED, &port->flags)) + port->ops->shutdown(port); + +} + /** * tty_port_hangup - hangup helper * @port: tty port @@ -116,6 +124,7 @@ void tty_port_hangup(struct tty_port *port) port->tty = NULL; spin_unlock_irqrestore(&port->lock, flags); wake_up_interruptible(&port->open_wait); + tty_port_shutdown(port); } EXPORT_SYMBOL(tty_port_hangup); @@ -296,15 +305,17 @@ int tty_port_close_start(struct tty_port *port, struct tty_struct *tty, struct f if (port->count) { spin_unlock_irqrestore(&port->lock, flags); + if (port->ops->drop) + port->ops->drop(port); return 0; } - port->flags |= ASYNC_CLOSING; + set_bit(ASYNC_CLOSING, &port->flags); tty->closing = 1; spin_unlock_irqrestore(&port->lock, flags); /* Don't block on a stalled port, just pull the chain */ if (tty->flow_stopped) tty_driver_flush_buffer(tty); - if (port->flags & ASYNC_INITIALIZED && + if (test_bit(ASYNCB_INITIALIZED, &port->flags) && port->closing_wait != ASYNC_CLOSING_WAIT_NONE) tty_wait_until_sent(tty, port->closing_wait); if (port->drain_delay) { @@ -318,6 +329,9 @@ int tty_port_close_start(struct tty_port *port, struct tty_struct *tty, struct f timeout = 2 * HZ; schedule_timeout_interruptible(timeout); } + /* Don't call port->drop for the last reference. Callers will want + to drop the last active reference in ->shutdown() or the tty + shutdown path */ return 1; } EXPORT_SYMBOL(tty_port_close_start); @@ -348,3 +362,14 @@ void tty_port_close_end(struct tty_port *port, struct tty_struct *tty) spin_unlock_irqrestore(&port->lock, flags); } EXPORT_SYMBOL(tty_port_close_end); + +void tty_port_close(struct tty_port *port, struct tty_struct *tty, + struct file *filp) +{ + if (tty_port_close_start(port, tty, filp) == 0) + return; + tty_port_shutdown(port); + tty_port_close_end(port, tty); + tty_port_tty_set(port, NULL); +} +EXPORT_SYMBOL(tty_port_close); diff --git a/include/linux/tty.h b/include/linux/tty.h index a916a318004..ecb3d1ba301 100644 --- a/include/linux/tty.h +++ b/include/linux/tty.h @@ -187,7 +187,12 @@ struct tty_port; struct tty_port_operations { /* Return 1 if the carrier is raised */ int (*carrier_raised)(struct tty_port *port); + /* Control the DTR line */ void (*dtr_rts)(struct tty_port *port, int raise); + /* Called when the last close completes or a hangup finishes + IFF the port was initialized. Do not use to free resources */ + void (*shutdown)(struct tty_port *port); + void (*drop)(struct tty_port *port); }; struct tty_port { @@ -459,7 +464,8 @@ extern int tty_port_block_til_ready(struct tty_port *port, extern int tty_port_close_start(struct tty_port *port, struct tty_struct *tty, struct file *filp); extern void tty_port_close_end(struct tty_port *port, struct tty_struct *tty); - +extern void tty_port_close(struct tty_port *port, + struct tty_struct *tty, struct file *filp); extern int tty_register_ldisc(int disc, struct tty_ldisc_ops *new_ldisc); extern int tty_unregister_ldisc(int disc); extern int tty_set_ldisc(struct tty_struct *tty, int ldisc); -- cgit v1.2.3-70-g09d2 From 8b92e87d39bfd046e7581e1fe0f40eac40f88608 Mon Sep 17 00:00:00 2001 From: Alan Cox Date: Sat, 19 Sep 2009 13:13:24 -0700 Subject: vt: add an event interface This is needed and requested in various forms for ConsoleKit, screenblank handling and the like so do the job with a single interface. Also build the interface so that unlike VT_WAITACTIVE and friends it won't miss events. FIXME: Should this be a waitactive ioctl or a new device file you can poll and read events from. We need the code anyway to fix up the existing broken wait for console switch logic but the ConsoleKit people would prefer the new device to the ioctl we have here Signed-off-by: Alan Cox Signed-off-by: Greg Kroah-Hartman --- drivers/char/vt.c | 4 +- drivers/char/vt_ioctl.c | 185 +++++++++++++++++++++++++++++++++++------------- include/linux/vt.h | 14 ++++ include/linux/vt_kern.h | 3 +- 4 files changed, 154 insertions(+), 52 deletions(-) (limited to 'include') diff --git a/drivers/char/vt.c b/drivers/char/vt.c index e47a4c88976..33214d92ca4 100644 --- a/drivers/char/vt.c +++ b/drivers/char/vt.c @@ -252,7 +252,6 @@ static void notify_update(struct vc_data *vc) struct vt_notifier_param param = { .vc = vc }; atomic_notifier_call_chain(&vt_notifier_list, VT_UPDATE, ¶m); } - /* * Low-Level Functions */ @@ -935,6 +934,7 @@ static int vc_do_resize(struct tty_struct *tty, struct vc_data *vc, if (CON_IS_VISIBLE(vc)) update_screen(vc); + vt_event_post(VT_EVENT_RESIZE, vc->vc_num, vc->vc_num); return err; } @@ -3637,6 +3637,7 @@ void do_blank_screen(int entering_gfx) blank_state = blank_vesa_wait; mod_timer(&console_timer, jiffies + vesa_off_interval); } + vt_event_post(VT_EVENT_BLANK, vc->vc_num, vc->vc_num); } EXPORT_SYMBOL(do_blank_screen); @@ -3681,6 +3682,7 @@ void do_unblank_screen(int leaving_gfx) console_blank_hook(0); set_palette(vc); set_cursor(vc); + vt_event_post(VT_EVENT_UNBLANK, vc->vc_num, vc->vc_num); } EXPORT_SYMBOL(do_unblank_screen); diff --git a/drivers/char/vt_ioctl.c b/drivers/char/vt_ioctl.c index 95189f288f8..35ad94e65d0 100644 --- a/drivers/char/vt_ioctl.c +++ b/drivers/char/vt_ioctl.c @@ -61,6 +61,133 @@ extern struct tty_driver *console_driver; static void complete_change_console(struct vc_data *vc); +/* + * User space VT_EVENT handlers + */ + +struct vt_event_wait { + struct list_head list; + struct vt_event event; + int done; +}; + +static LIST_HEAD(vt_events); +static DEFINE_SPINLOCK(vt_event_lock); +static DECLARE_WAIT_QUEUE_HEAD(vt_event_waitqueue); + +/** + * vt_event_post + * @event: the event that occurred + * @old: old console + * @new: new console + * + * Post an VT event to interested VT handlers + */ + +void vt_event_post(unsigned int event, unsigned int old, unsigned int new) +{ + struct list_head *pos, *head; + unsigned long flags; + int wake = 0; + + spin_lock_irqsave(&vt_event_lock, flags); + head = &vt_events; + + list_for_each(pos, head) { + struct vt_event_wait *ve = list_entry(pos, + struct vt_event_wait, list); + if (!(ve->event.event & event)) + continue; + ve->event.event = event; + /* kernel view is consoles 0..n-1, user space view is + console 1..n with 0 meaning current, so we must bias */ + ve->event.old = old + 1; + ve->event.new = new + 1; + wake = 1; + ve->done = 1; + } + spin_unlock_irqrestore(&vt_event_lock, flags); + if (wake) + wake_up_interruptible(&vt_event_waitqueue); +} + +/** + * vt_event_wait - wait for an event + * @vw: our event + * + * Waits for an event to occur which completes our vt_event_wait + * structure. On return the structure has wv->done set to 1 for success + * or 0 if some event such as a signal ended the wait. + */ + +static void vt_event_wait(struct vt_event_wait *vw) +{ + unsigned long flags; + /* Prepare the event */ + INIT_LIST_HEAD(&vw->list); + vw->done = 0; + /* Queue our event */ + spin_lock_irqsave(&vt_event_lock, flags); + list_add(&vw->list, &vt_events); + spin_unlock_irqrestore(&vt_event_lock, flags); + /* Wait for it to pass */ + wait_event_interruptible(vt_event_waitqueue, vw->done); + /* Dequeue it */ + spin_lock_irqsave(&vt_event_lock, flags); + list_del(&vw->list); + spin_unlock_irqrestore(&vt_event_lock, flags); +} + +/** + * vt_event_wait_ioctl - event ioctl handler + * @arg: argument to ioctl + * + * Implement the VT_WAITEVENT ioctl using the VT event interface + */ + +static int vt_event_wait_ioctl(struct vt_event __user *event) +{ + struct vt_event_wait vw; + + if (copy_from_user(&vw.event, event, sizeof(struct vt_event))) + return -EFAULT; + /* Highest supported event for now */ + if (vw.event.event & ~VT_MAX_EVENT) + return -EINVAL; + + vt_event_wait(&vw); + /* If it occurred report it */ + if (vw.done) { + if (copy_to_user(event, &vw.event, sizeof(struct vt_event))) + return -EFAULT; + return 0; + } + return -EINTR; +} + +/** + * vt_waitactive - active console wait + * @event: event code + * @n: new console + * + * Helper for event waits. Used to implement the legacy + * event waiting ioctls in terms of events + */ + +int vt_waitactive(int n) +{ + struct vt_event_wait vw; + do { + if (n == fg_console + 1) + break; + vw.event.event = VT_EVENT_SWITCH; + vt_event_wait(&vw); + if (vw.done == 0) + return -EINTR; + } while (vw.event.new != n); + return 0; +} + /* * these are the valid i/o ports we're allowed to change. they map all the * video ports @@ -360,6 +487,8 @@ do_unimap_ioctl(int cmd, struct unimapdesc __user *user_ud, int perm, struct vc_ return 0; } + + /* * We handle the console-specific ioctl's here. We allow the * capability to modify any console, not just the fg_console. @@ -851,7 +980,7 @@ int vt_ioctl(struct tty_struct *tty, struct file * file, if (arg == 0 || arg > MAX_NR_CONSOLES) ret = -ENXIO; else - ret = vt_waitactive(arg - 1); + ret = vt_waitactive(arg); break; /* @@ -1159,6 +1288,9 @@ int vt_ioctl(struct tty_struct *tty, struct file * file, ret = put_user(vc->vc_hi_font_mask, (unsigned short __user *)arg); break; + case VT_WAITEVENT: + ret = vt_event_wait_ioctl((struct vt_event __user *)arg); + break; default: ret = -ENOIOCTLCMD; } @@ -1170,54 +1302,6 @@ eperm: goto out; } -/* - * Sometimes we want to wait until a particular VT has been activated. We - * do it in a very simple manner. Everybody waits on a single queue and - * get woken up at once. Those that are satisfied go on with their business, - * while those not ready go back to sleep. Seems overkill to add a wait - * to each vt just for this - usually this does nothing! - */ -static DECLARE_WAIT_QUEUE_HEAD(vt_activate_queue); - -/* - * Sleeps until a vt is activated, or the task is interrupted. Returns - * 0 if activation, -EINTR if interrupted by a signal handler. - */ -int vt_waitactive(int vt) -{ - int retval; - DECLARE_WAITQUEUE(wait, current); - - add_wait_queue(&vt_activate_queue, &wait); - for (;;) { - retval = 0; - - /* - * Synchronize with redraw_screen(). By acquiring the console - * semaphore we make sure that the console switch is completed - * before we return. If we didn't wait for the semaphore, we - * could return at a point where fg_console has already been - * updated, but the console switch hasn't been completed. - */ - acquire_console_sem(); - set_current_state(TASK_INTERRUPTIBLE); - if (vt == fg_console) { - release_console_sem(); - break; - } - release_console_sem(); - retval = -ERESTARTNOHAND; - if (signal_pending(current)) - break; - schedule(); - } - remove_wait_queue(&vt_activate_queue, &wait); - __set_current_state(TASK_RUNNING); - return retval; -} - -#define vt_wake_waitactive() wake_up(&vt_activate_queue) - void reset_vc(struct vc_data *vc) { vc->vc_mode = KD_TEXT; @@ -1262,6 +1346,7 @@ void vc_SAK(struct work_struct *work) static void complete_change_console(struct vc_data *vc) { unsigned char old_vc_mode; + int old = fg_console; last_console = fg_console; @@ -1325,7 +1410,7 @@ static void complete_change_console(struct vc_data *vc) /* * Wake anyone waiting for their VT to activate */ - vt_wake_waitactive(); + vt_event_post(VT_EVENT_SWITCH, old, vc->vc_num); return; } diff --git a/include/linux/vt.h b/include/linux/vt.h index 02c1c028877..89c03a11e19 100644 --- a/include/linux/vt.h +++ b/include/linux/vt.h @@ -74,4 +74,18 @@ struct vt_consize { #define VT_UNLOCKSWITCH 0x560C /* allow vt switching */ #define VT_GETHIFONTMASK 0x560D /* return hi font mask */ +struct vt_event { + unsigned int event; +#define VT_EVENT_SWITCH 0x0001 /* Console switch */ +#define VT_EVENT_BLANK 0x0002 /* Screen blank */ +#define VT_EVENT_UNBLANK 0x0004 /* Screen unblank */ +#define VT_EVENT_RESIZE 0x0008 /* Resize display */ +#define VT_MAX_EVENT 0x000F + unsigned int old; /* Old console */ + unsigned int new; /* New console (if changing) */ + unsigned int pad[4]; /* Padding for expansion */ +}; + +#define VT_WAITEVENT 0x560E /* Wait for an event */ + #endif /* _LINUX_VT_H */ diff --git a/include/linux/vt_kern.h b/include/linux/vt_kern.h index 2f1113467f7..f8c797d57c8 100644 --- a/include/linux/vt_kern.h +++ b/include/linux/vt_kern.h @@ -91,7 +91,8 @@ int con_copy_unimap(struct vc_data *dst_vc, struct vc_data *src_vc); #endif /* vt.c */ -int vt_waitactive(int vt); +void vt_event_post(unsigned int event, unsigned int old, unsigned int new); +int vt_waitactive(int n); void change_console(struct vc_data *new_vc); void reset_vc(struct vc_data *vc); extern int unbind_con_driver(const struct consw *csw, int first, int last, -- cgit v1.2.3-70-g09d2 From 8d233558cd99a888571bb5a88a74970879e0aba4 Mon Sep 17 00:00:00 2001 From: Alan Cox Date: Sat, 19 Sep 2009 13:13:25 -0700 Subject: vt: remove power stuff from kernel/power In the past someone gratuitiously borrowed chunks of kernel internal vt code and dumped them in kernel/power. They have all sorts of deep relations with the vt code so put them in the vt tree instead Signed-off-by: Alan Cox Signed-off-by: Greg Kroah-Hartman --- drivers/char/vt_ioctl.c | 56 +++++++++++++++++++++++++++++++++++++++++++ include/linux/vt_kern.h | 1 + kernel/power/console.c | 63 ++++--------------------------------------------- 3 files changed, 62 insertions(+), 58 deletions(-) (limited to 'include') diff --git a/drivers/char/vt_ioctl.c b/drivers/char/vt_ioctl.c index 35ad94e65d0..d29fbd44bdc 100644 --- a/drivers/char/vt_ioctl.c +++ b/drivers/char/vt_ioctl.c @@ -16,6 +16,7 @@ #include #include #include +#include #include #include #include @@ -1483,3 +1484,58 @@ void change_console(struct vc_data *new_vc) complete_change_console(new_vc); } + +/* Perform a kernel triggered VT switch for suspend/resume */ + +static int disable_vt_switch; + +int vt_move_to_console(unsigned int vt, int alloc) +{ + int prev; + + acquire_console_sem(); + /* Graphics mode - up to X */ + if (disable_vt_switch) { + release_console_sem(); + return 0; + } + prev = fg_console; + + if (alloc && vc_allocate(vt)) { + /* we can't have a free VC for now. Too bad, + * we don't want to mess the screen for now. */ + release_console_sem(); + return -ENOSPC; + } + + if (set_console(vt)) { + /* + * We're unable to switch to the SUSPEND_CONSOLE. + * Let the calling function know so it can decide + * what to do. + */ + release_console_sem(); + return -EIO; + } + release_console_sem(); + if (vt_waitactive(vt)) { + pr_debug("Suspend: Can't switch VCs."); + return -EINTR; + } + return prev; +} + +/* + * Normally during a suspend, we allocate a new console and switch to it. + * When we resume, we switch back to the original console. This switch + * can be slow, so on systems where the framebuffer can handle restoration + * of video registers anyways, there's little point in doing the console + * switch. This function allows you to disable it by passing it '0'. + */ +void pm_set_vt_switch(int do_switch) +{ + acquire_console_sem(); + disable_vt_switch = !do_switch; + release_console_sem(); +} +EXPORT_SYMBOL(pm_set_vt_switch); diff --git a/include/linux/vt_kern.h b/include/linux/vt_kern.h index f8c797d57c8..5b53efe41b5 100644 --- a/include/linux/vt_kern.h +++ b/include/linux/vt_kern.h @@ -117,4 +117,5 @@ struct vt_spawn_console { }; extern struct vt_spawn_console vt_spawn_con; +extern int vt_move_to_console(unsigned int vt, int alloc); #endif /* _VT_KERN_H */ diff --git a/kernel/power/console.c b/kernel/power/console.c index a3961b205de..5187136fe1d 100644 --- a/kernel/power/console.c +++ b/kernel/power/console.c @@ -14,56 +14,13 @@ #define SUSPEND_CONSOLE (MAX_NR_CONSOLES-1) static int orig_fgconsole, orig_kmsg; -static int disable_vt_switch; - -/* - * Normally during a suspend, we allocate a new console and switch to it. - * When we resume, we switch back to the original console. This switch - * can be slow, so on systems where the framebuffer can handle restoration - * of video registers anyways, there's little point in doing the console - * switch. This function allows you to disable it by passing it '0'. - */ -void pm_set_vt_switch(int do_switch) -{ - acquire_console_sem(); - disable_vt_switch = !do_switch; - release_console_sem(); -} -EXPORT_SYMBOL(pm_set_vt_switch); int pm_prepare_console(void) { - acquire_console_sem(); - - if (disable_vt_switch) { - release_console_sem(); - return 0; - } - - orig_fgconsole = fg_console; - - if (vc_allocate(SUSPEND_CONSOLE)) { - /* we can't have a free VC for now. Too bad, - * we don't want to mess the screen for now. */ - release_console_sem(); + orig_fgconsole = vt_move_to_console(SUSPEND_CONSOLE, 1); + if (orig_fgconsole < 0) return 1; - } - if (set_console(SUSPEND_CONSOLE)) { - /* - * We're unable to switch to the SUSPEND_CONSOLE. - * Let the calling function know so it can decide - * what to do. - */ - release_console_sem(); - return 1; - } - release_console_sem(); - - if (vt_waitactive(SUSPEND_CONSOLE)) { - pr_debug("Suspend: Can't switch VCs."); - return 1; - } orig_kmsg = kmsg_redirect; kmsg_redirect = SUSPEND_CONSOLE; return 0; @@ -71,19 +28,9 @@ int pm_prepare_console(void) void pm_restore_console(void) { - acquire_console_sem(); - if (disable_vt_switch) { - release_console_sem(); - return; - } - set_console(orig_fgconsole); - release_console_sem(); - - if (vt_waitactive(orig_fgconsole)) { - pr_debug("Resume: Can't switch VCs."); - return; + if (orig_fgconsole >= 0) { + vt_move_to_console(orig_fgconsole, 0); + kmsg_redirect = orig_kmsg; } - - kmsg_redirect = orig_kmsg; } #endif -- cgit v1.2.3-70-g09d2 From a5eb56242d1e2d82938a066219ac1cdf0d68adc8 Mon Sep 17 00:00:00 2001 From: Alan Cox Date: Sat, 19 Sep 2009 13:13:25 -0700 Subject: vt: move kernel stuff out of vt.h We have vt_kern.h for this Signed-off-by: Alan Cox Signed-off-by: Greg Kroah-Hartman --- include/linux/vt.h | 11 ----------- include/linux/vt_kern.h | 12 ++++++++++++ 2 files changed, 12 insertions(+), 11 deletions(-) (limited to 'include') diff --git a/include/linux/vt.h b/include/linux/vt.h index 89c03a11e19..831daf64b90 100644 --- a/include/linux/vt.h +++ b/include/linux/vt.h @@ -1,17 +1,6 @@ #ifndef _LINUX_VT_H #define _LINUX_VT_H -#ifdef __KERNEL__ -struct notifier_block; - -struct vt_notifier_param { - struct vc_data *vc; /* VC on which the update happened */ - unsigned int c; /* Printed char */ -}; - -extern int register_vt_notifier(struct notifier_block *nb); -extern int unregister_vt_notifier(struct notifier_block *nb); -#endif /* * These constants are also useful for user-level apps (e.g., VC diff --git a/include/linux/vt_kern.h b/include/linux/vt_kern.h index 5b53efe41b5..c0c4e1103a7 100644 --- a/include/linux/vt_kern.h +++ b/include/linux/vt_kern.h @@ -13,6 +13,7 @@ #include #include #include +#include /* * Presently, a lot of graphics programs do not restore the contents of @@ -118,4 +119,15 @@ struct vt_spawn_console { extern struct vt_spawn_console vt_spawn_con; extern int vt_move_to_console(unsigned int vt, int alloc); + +/* Interfaces for VC notification of character events (for accessibility etc) */ + +struct vt_notifier_param { + struct vc_data *vc; /* VC on which the update happened */ + unsigned int c; /* Printed char */ +}; + +extern int register_vt_notifier(struct notifier_block *nb); +extern int unregister_vt_notifier(struct notifier_block *nb); + #endif /* _VT_KERN_H */ -- cgit v1.2.3-70-g09d2 From d3b5cffcf84a8bdc7073dce4745d67c72629af85 Mon Sep 17 00:00:00 2001 From: Alan Cox Date: Sat, 19 Sep 2009 13:13:26 -0700 Subject: vt: add an activate and lock X and other graphical interfaces need to be able to flip to a console and lock it into graphics mode without races. Signed-off-by: Alan Cox Signed-off-by: Greg Kroah-Hartman --- drivers/char/vt_ioctl.c | 38 +++++++++++++++++++++++++++++++++++++- include/linux/vt.h | 7 +++++++ 2 files changed, 44 insertions(+), 1 deletion(-) (limited to 'include') diff --git a/drivers/char/vt_ioctl.c b/drivers/char/vt_ioctl.c index d29fbd44bdc..0fceb8f4d6f 100644 --- a/drivers/char/vt_ioctl.c +++ b/drivers/char/vt_ioctl.c @@ -972,6 +972,41 @@ int vt_ioctl(struct tty_struct *tty, struct file * file, } break; + case VT_SETACTIVATE: + { + struct vt_setactivate vsa; + + if (!perm) + goto eperm; + + if (copy_from_user(&vsa, (struct vt_setactivate __user *)arg, + sizeof(struct vt_setactivate))) + return -EFAULT; + if (vsa.console == 0 || vsa.console > MAX_NR_CONSOLES) + ret = -ENXIO; + else { + vsa.console--; + acquire_console_sem(); + ret = vc_allocate(vsa.console); + if (ret == 0) { + struct vc_data *nvc; + /* This is safe providing we don't drop the + console sem between vc_allocate and + finishing referencing nvc */ + nvc = vc_cons[vsa.console].d; + nvc->vt_mode = vsa.mode; + nvc->vt_mode.frsig = 0; + put_pid(nvc->vt_pid); + nvc->vt_pid = get_pid(task_pid(current)); + } + release_console_sem(); + if (ret) + break; + /* Commence switch and lock */ + set_console(arg); + } + } + /* * wait until the specified VT has been activated */ @@ -1342,7 +1377,8 @@ void vc_SAK(struct work_struct *work) } /* - * Performs the back end of a vt switch + * Performs the back end of a vt switch. Called under the console + * semaphore. */ static void complete_change_console(struct vc_data *vc) { diff --git a/include/linux/vt.h b/include/linux/vt.h index 831daf64b90..7afca0d7213 100644 --- a/include/linux/vt.h +++ b/include/linux/vt.h @@ -77,4 +77,11 @@ struct vt_event { #define VT_WAITEVENT 0x560E /* Wait for an event */ +struct vt_setactivate { + unsigned int console; + struct vt_mode mode; +}; + +#define VT_SETACTIVATE 0x560F /* Activate and set the mode of a console */ + #endif /* _LINUX_VT_H */ -- cgit v1.2.3-70-g09d2 From a509a7e478e4766114d69f12d19d644ac63e9765 Mon Sep 17 00:00:00 2001 From: Alan Cox Date: Sat, 19 Sep 2009 13:13:26 -0700 Subject: tty: USB does not need the filp argument in the drivers And indeed none of them use it. Clean this up as it will make moving to a standard open method rather easier. Signed-off-by: Alan Cox Signed-off-by: Greg Kroah-Hartman --- drivers/usb/serial/ark3116.c | 5 ++--- drivers/usb/serial/belkin_sa.c | 4 ++-- drivers/usb/serial/ch341.c | 5 ++--- drivers/usb/serial/console.c | 4 ++-- drivers/usb/serial/cp210x.c | 6 ++---- drivers/usb/serial/cyberjack.c | 4 ++-- drivers/usb/serial/cypress_m8.c | 6 ++---- drivers/usb/serial/digi_acceleport.c | 6 ++---- drivers/usb/serial/empeg.c | 8 +++----- drivers/usb/serial/ftdi_sio.c | 6 ++---- drivers/usb/serial/garmin_gps.c | 3 +-- drivers/usb/serial/generic.c | 3 +-- drivers/usb/serial/io_edgeport.c | 6 ++---- drivers/usb/serial/io_ti.c | 3 +-- drivers/usb/serial/ipaq.c | 9 ++------- drivers/usb/serial/ipw.c | 3 +-- drivers/usb/serial/ir-usb.c | 6 ++---- drivers/usb/serial/iuu_phoenix.c | 5 ++--- drivers/usb/serial/keyspan.c | 3 +-- drivers/usb/serial/keyspan.h | 3 +-- drivers/usb/serial/keyspan_pda.c | 2 +- drivers/usb/serial/kl5kusb105.c | 10 ++-------- drivers/usb/serial/kobil_sct.c | 6 ++---- drivers/usb/serial/mct_u232.c | 6 ++---- drivers/usb/serial/mos7720.c | 3 +-- drivers/usb/serial/mos7840.c | 3 +-- drivers/usb/serial/navman.c | 3 +-- drivers/usb/serial/omninet.c | 6 ++---- drivers/usb/serial/opticon.c | 3 +-- drivers/usb/serial/option.c | 6 ++---- drivers/usb/serial/oti6858.c | 6 ++---- drivers/usb/serial/pl2303.c | 5 +---- drivers/usb/serial/sierra.c | 3 +-- drivers/usb/serial/spcp8x5.c | 5 +---- drivers/usb/serial/symbolserial.c | 3 +-- drivers/usb/serial/ti_usb_3410_5052.c | 6 ++---- drivers/usb/serial/usb-serial.c | 2 +- drivers/usb/serial/usb_debug.c | 5 ++--- drivers/usb/serial/visor.c | 6 ++---- drivers/usb/serial/whiteheat.c | 5 ++--- include/linux/usb/serial.h | 7 +++---- 41 files changed, 68 insertions(+), 131 deletions(-) (limited to 'include') diff --git a/drivers/usb/serial/ark3116.c b/drivers/usb/serial/ark3116.c index aec61880f36..7ddde4ddfb4 100644 --- a/drivers/usb/serial/ark3116.c +++ b/drivers/usb/serial/ark3116.c @@ -318,8 +318,7 @@ static void ark3116_set_termios(struct tty_struct *tty, return; } -static int ark3116_open(struct tty_struct *tty, struct usb_serial_port *port, - struct file *filp) +static int ark3116_open(struct tty_struct *tty, struct usb_serial_port *port) { struct ktermios tmp_termios; struct usb_serial *serial = port->serial; @@ -334,7 +333,7 @@ static int ark3116_open(struct tty_struct *tty, struct usb_serial_port *port, return -ENOMEM; } - result = usb_serial_generic_open(tty, port, filp); + result = usb_serial_generic_open(tty, port); if (result) goto err_out; diff --git a/drivers/usb/serial/belkin_sa.c b/drivers/usb/serial/belkin_sa.c index 7033b031b44..a0467bc6162 100644 --- a/drivers/usb/serial/belkin_sa.c +++ b/drivers/usb/serial/belkin_sa.c @@ -92,7 +92,7 @@ static int debug; static int belkin_sa_startup(struct usb_serial *serial); static void belkin_sa_release(struct usb_serial *serial); static int belkin_sa_open(struct tty_struct *tty, - struct usb_serial_port *port, struct file *filp); + struct usb_serial_port *port); static void belkin_sa_close(struct usb_serial_port *port); static void belkin_sa_read_int_callback(struct urb *urb); static void belkin_sa_set_termios(struct tty_struct *tty, @@ -213,7 +213,7 @@ static void belkin_sa_release(struct usb_serial *serial) static int belkin_sa_open(struct tty_struct *tty, - struct usb_serial_port *port, struct file *filp) + struct usb_serial_port *port) { int retval = 0; diff --git a/drivers/usb/serial/ch341.c b/drivers/usb/serial/ch341.c index 2830766f5b3..8c894a7d5dc 100644 --- a/drivers/usb/serial/ch341.c +++ b/drivers/usb/serial/ch341.c @@ -300,8 +300,7 @@ static void ch341_close(struct usb_serial_port *port) /* open this device, set default parameters */ -static int ch341_open(struct tty_struct *tty, struct usb_serial_port *port, - struct file *filp) +static int ch341_open(struct tty_struct *tty, struct usb_serial_port *port) { struct usb_serial *serial = port->serial; struct ch341_private *priv = usb_get_serial_port_data(serial->port[0]); @@ -333,7 +332,7 @@ static int ch341_open(struct tty_struct *tty, struct usb_serial_port *port, return -EPROTO; } - r = usb_serial_generic_open(tty, port, filp); + r = usb_serial_generic_open(tty, port); out: return r; } diff --git a/drivers/usb/serial/console.c b/drivers/usb/serial/console.c index 0e4f2e41ace..be086e4c76b 100644 --- a/drivers/usb/serial/console.c +++ b/drivers/usb/serial/console.c @@ -150,9 +150,9 @@ static int usb_console_setup(struct console *co, char *options) /* only call the device specific open if this * is the first time the port is opened */ if (serial->type->open) - retval = serial->type->open(NULL, port, NULL); + retval = serial->type->open(NULL, port); else - retval = usb_serial_generic_open(NULL, port, NULL); + retval = usb_serial_generic_open(NULL, port); if (retval) { err("could not open USB console port"); diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c index b5275c4a8ee..4a208fe85bc 100644 --- a/drivers/usb/serial/cp210x.c +++ b/drivers/usb/serial/cp210x.c @@ -33,8 +33,7 @@ /* * Function Prototypes */ -static int cp210x_open(struct tty_struct *, struct usb_serial_port *, - struct file *); +static int cp210x_open(struct tty_struct *tty, struct usb_serial_port *); static void cp210x_cleanup(struct usb_serial_port *); static void cp210x_close(struct usb_serial_port *); static void cp210x_get_termios(struct tty_struct *, @@ -368,8 +367,7 @@ static unsigned int cp210x_quantise_baudrate(unsigned int baud) { return baud; } -static int cp210x_open(struct tty_struct *tty, struct usb_serial_port *port, - struct file *filp) +static int cp210x_open(struct tty_struct *tty, struct usb_serial_port *port) { struct usb_serial *serial = port->serial; int result; diff --git a/drivers/usb/serial/cyberjack.c b/drivers/usb/serial/cyberjack.c index 336523fd736..b0f6402a91c 100644 --- a/drivers/usb/serial/cyberjack.c +++ b/drivers/usb/serial/cyberjack.c @@ -61,7 +61,7 @@ static int cyberjack_startup(struct usb_serial *serial); static void cyberjack_disconnect(struct usb_serial *serial); static void cyberjack_release(struct usb_serial *serial); static int cyberjack_open(struct tty_struct *tty, - struct usb_serial_port *port, struct file *filp); + struct usb_serial_port *port); static void cyberjack_close(struct usb_serial_port *port); static int cyberjack_write(struct tty_struct *tty, struct usb_serial_port *port, const unsigned char *buf, int count); @@ -173,7 +173,7 @@ static void cyberjack_release(struct usb_serial *serial) } static int cyberjack_open(struct tty_struct *tty, - struct usb_serial_port *port, struct file *filp) + struct usb_serial_port *port) { struct cyberjack_private *priv; unsigned long flags; diff --git a/drivers/usb/serial/cypress_m8.c b/drivers/usb/serial/cypress_m8.c index 59adfe12311..df1adb9a436 100644 --- a/drivers/usb/serial/cypress_m8.c +++ b/drivers/usb/serial/cypress_m8.c @@ -172,8 +172,7 @@ static int cypress_earthmate_startup(struct usb_serial *serial); static int cypress_hidcom_startup(struct usb_serial *serial); static int cypress_ca42v2_startup(struct usb_serial *serial); static void cypress_release(struct usb_serial *serial); -static int cypress_open(struct tty_struct *tty, - struct usb_serial_port *port, struct file *filp); +static int cypress_open(struct tty_struct *tty, struct usb_serial_port *port); static void cypress_close(struct usb_serial_port *port); static void cypress_dtr_rts(struct usb_serial_port *port, int on); static int cypress_write(struct tty_struct *tty, struct usb_serial_port *port, @@ -633,8 +632,7 @@ static void cypress_release(struct usb_serial *serial) } -static int cypress_open(struct tty_struct *tty, - struct usb_serial_port *port, struct file *filp) +static int cypress_open(struct tty_struct *tty, struct usb_serial_port *port) { struct cypress_private *priv = usb_get_serial_port_data(port); struct usb_serial *serial = port->serial; diff --git a/drivers/usb/serial/digi_acceleport.c b/drivers/usb/serial/digi_acceleport.c index f4808091c47..ab3dd991586 100644 --- a/drivers/usb/serial/digi_acceleport.c +++ b/drivers/usb/serial/digi_acceleport.c @@ -453,8 +453,7 @@ static int digi_write(struct tty_struct *tty, struct usb_serial_port *port, static void digi_write_bulk_callback(struct urb *urb); static int digi_write_room(struct tty_struct *tty); static int digi_chars_in_buffer(struct tty_struct *tty); -static int digi_open(struct tty_struct *tty, struct usb_serial_port *port, - struct file *filp); +static int digi_open(struct tty_struct *tty, struct usb_serial_port *port); static void digi_close(struct usb_serial_port *port); static int digi_carrier_raised(struct usb_serial_port *port); static void digi_dtr_rts(struct usb_serial_port *port, int on); @@ -1347,8 +1346,7 @@ static int digi_carrier_raised(struct usb_serial_port *port) return 0; } -static int digi_open(struct tty_struct *tty, struct usb_serial_port *port, - struct file *filp) +static int digi_open(struct tty_struct *tty, struct usb_serial_port *port) { int ret; unsigned char buf[32]; diff --git a/drivers/usb/serial/empeg.c b/drivers/usb/serial/empeg.c index 80cb3471adb..da559a773b5 100644 --- a/drivers/usb/serial/empeg.c +++ b/drivers/usb/serial/empeg.c @@ -79,8 +79,7 @@ static int debug; #define EMPEG_PRODUCT_ID 0x0001 /* function prototypes for an empeg-car player */ -static int empeg_open(struct tty_struct *tty, struct usb_serial_port *port, - struct file *filp); +static int empeg_open(struct tty_struct *tty, struct usb_serial_port *port); static void empeg_close(struct usb_serial_port *port); static int empeg_write(struct tty_struct *tty, struct usb_serial_port *port, const unsigned char *buf, @@ -142,8 +141,7 @@ static int bytes_out; /****************************************************************************** * Empeg specific driver functions ******************************************************************************/ -static int empeg_open(struct tty_struct *tty, struct usb_serial_port *port, - struct file *filp) +static int empeg_open(struct tty_struct *tty,struct usb_serial_port *port) { struct usb_serial *serial = port->serial; int result = 0; @@ -151,7 +149,7 @@ static int empeg_open(struct tty_struct *tty, struct usb_serial_port *port, dbg("%s - port %d", __func__, port->number); /* Force default termio settings */ - empeg_set_termios(tty, port, NULL) ; + empeg_set_termios(tty, port, NULL); bytes_in = 0; bytes_out = 0; diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c index 8fec5d4455c..76a17f915ee 100644 --- a/drivers/usb/serial/ftdi_sio.c +++ b/drivers/usb/serial/ftdi_sio.c @@ -747,8 +747,7 @@ static int ftdi_sio_probe(struct usb_serial *serial, const struct usb_device_id *id); static int ftdi_sio_port_probe(struct usb_serial_port *port); static int ftdi_sio_port_remove(struct usb_serial_port *port); -static int ftdi_open(struct tty_struct *tty, - struct usb_serial_port *port, struct file *filp); +static int ftdi_open(struct tty_struct *tty, struct usb_serial_port *port); static void ftdi_close(struct usb_serial_port *port); static void ftdi_dtr_rts(struct usb_serial_port *port, int on); static int ftdi_write(struct tty_struct *tty, struct usb_serial_port *port, @@ -1680,8 +1679,7 @@ static int ftdi_sio_port_remove(struct usb_serial_port *port) return 0; } -static int ftdi_open(struct tty_struct *tty, - struct usb_serial_port *port, struct file *filp) +static int ftdi_open(struct tty_struct *tty, struct usb_serial_port *port) { /* ftdi_open */ struct usb_device *dev = port->serial->dev; struct ftdi_private *priv = usb_get_serial_port_data(port); diff --git a/drivers/usb/serial/garmin_gps.c b/drivers/usb/serial/garmin_gps.c index 8839f1c70b7..20432d34552 100644 --- a/drivers/usb/serial/garmin_gps.c +++ b/drivers/usb/serial/garmin_gps.c @@ -933,8 +933,7 @@ static int garmin_init_session(struct usb_serial_port *port) -static int garmin_open(struct tty_struct *tty, - struct usb_serial_port *port, struct file *filp) +static int garmin_open(struct tty_struct *tty, struct usb_serial_port *port) { unsigned long flags; int status = 0; diff --git a/drivers/usb/serial/generic.c b/drivers/usb/serial/generic.c index ce57f6a32bd..d9398e9f30c 100644 --- a/drivers/usb/serial/generic.c +++ b/drivers/usb/serial/generic.c @@ -114,8 +114,7 @@ void usb_serial_generic_deregister(void) #endif } -int usb_serial_generic_open(struct tty_struct *tty, - struct usb_serial_port *port, struct file *filp) +int usb_serial_generic_open(struct tty_struct *tty, struct usb_serial_port *port) { struct usb_serial *serial = port->serial; int result = 0; diff --git a/drivers/usb/serial/io_edgeport.c b/drivers/usb/serial/io_edgeport.c index 0191693625d..dc0f832657e 100644 --- a/drivers/usb/serial/io_edgeport.c +++ b/drivers/usb/serial/io_edgeport.c @@ -205,8 +205,7 @@ static void edge_bulk_out_data_callback(struct urb *urb); static void edge_bulk_out_cmd_callback(struct urb *urb); /* function prototypes for the usbserial callbacks */ -static int edge_open(struct tty_struct *tty, struct usb_serial_port *port, - struct file *filp); +static int edge_open(struct tty_struct *tty, struct usb_serial_port *port); static void edge_close(struct usb_serial_port *port); static int edge_write(struct tty_struct *tty, struct usb_serial_port *port, const unsigned char *buf, int count); @@ -852,8 +851,7 @@ static void edge_bulk_out_cmd_callback(struct urb *urb) * If successful, we return 0 * Otherwise we return a negative error number. *****************************************************************************/ -static int edge_open(struct tty_struct *tty, - struct usb_serial_port *port, struct file *filp) +static int edge_open(struct tty_struct *tty, struct usb_serial_port *port) { struct edgeport_port *edge_port = usb_get_serial_port_data(port); struct usb_serial *serial; diff --git a/drivers/usb/serial/io_ti.c b/drivers/usb/serial/io_ti.c index e8bc42f92e7..d4cc0f7af40 100644 --- a/drivers/usb/serial/io_ti.c +++ b/drivers/usb/serial/io_ti.c @@ -1831,8 +1831,7 @@ static void edge_bulk_out_callback(struct urb *urb) tty_kref_put(tty); } -static int edge_open(struct tty_struct *tty, - struct usb_serial_port *port, struct file *filp) +static int edge_open(struct tty_struct *tty, struct usb_serial_port *port) { struct edgeport_port *edge_port = usb_get_serial_port_data(port); struct edgeport_serial *edge_serial; diff --git a/drivers/usb/serial/ipaq.c b/drivers/usb/serial/ipaq.c index 2545d45ce16..24fcc64b837 100644 --- a/drivers/usb/serial/ipaq.c +++ b/drivers/usb/serial/ipaq.c @@ -75,7 +75,7 @@ static int initial_wait; /* Function prototypes for an ipaq */ static int ipaq_open(struct tty_struct *tty, - struct usb_serial_port *port, struct file *filp); + struct usb_serial_port *port); static void ipaq_close(struct usb_serial_port *port); static int ipaq_calc_num_ports(struct usb_serial *serial); static int ipaq_startup(struct usb_serial *serial); @@ -587,7 +587,7 @@ static int bytes_in; static int bytes_out; static int ipaq_open(struct tty_struct *tty, - struct usb_serial_port *port, struct file *filp) + struct usb_serial_port *port) { struct usb_serial *serial = port->serial; struct ipaq_private *priv; @@ -628,11 +628,6 @@ static int ipaq_open(struct tty_struct *tty, priv->free_len += PACKET_SIZE; } - if (tty) { - /* FIXME: These two are bogus */ - tty->raw = 1; - tty->real_raw = 1; - } /* * Lose the small buffers usbserial provides. Make larger ones. */ diff --git a/drivers/usb/serial/ipw.c b/drivers/usb/serial/ipw.c index 29ad038b9c8..727d323f092 100644 --- a/drivers/usb/serial/ipw.c +++ b/drivers/usb/serial/ipw.c @@ -193,8 +193,7 @@ static void ipw_read_bulk_callback(struct urb *urb) return; } -static int ipw_open(struct tty_struct *tty, - struct usb_serial_port *port, struct file *filp) +static int ipw_open(struct tty_struct *tty, struct usb_serial_port *port) { struct usb_device *dev = port->serial->dev; u8 buf_flow_static[16] = IPW_BYTES_FLOWINIT; diff --git a/drivers/usb/serial/ir-usb.c b/drivers/usb/serial/ir-usb.c index 66009b6b763..95d8d26b9a4 100644 --- a/drivers/usb/serial/ir-usb.c +++ b/drivers/usb/serial/ir-usb.c @@ -86,8 +86,7 @@ static int buffer_size; static int xbof = -1; static int ir_startup (struct usb_serial *serial); -static int ir_open(struct tty_struct *tty, struct usb_serial_port *port, - struct file *filep); +static int ir_open(struct tty_struct *tty, struct usb_serial_port *port); static void ir_close(struct usb_serial_port *port); static int ir_write(struct tty_struct *tty, struct usb_serial_port *port, const unsigned char *buf, int count); @@ -296,8 +295,7 @@ static int ir_startup(struct usb_serial *serial) return 0; } -static int ir_open(struct tty_struct *tty, - struct usb_serial_port *port, struct file *filp) +static int ir_open(struct tty_struct *tty, struct usb_serial_port *port) { char *buffer; int result = 0; diff --git a/drivers/usb/serial/iuu_phoenix.c b/drivers/usb/serial/iuu_phoenix.c index 96873a7a32b..f3f9be0469c 100644 --- a/drivers/usb/serial/iuu_phoenix.c +++ b/drivers/usb/serial/iuu_phoenix.c @@ -1018,8 +1018,7 @@ static void iuu_close(struct usb_serial_port *port) } } -static int iuu_open(struct tty_struct *tty, - struct usb_serial_port *port, struct file *filp) +static int iuu_open(struct tty_struct *tty, struct usb_serial_port *port) { struct usb_serial *serial = port->serial; u8 *buf; @@ -1077,7 +1076,7 @@ static int iuu_open(struct tty_struct *tty, tty->termios->c_iflag = 0; priv->termios_initialized = 1; priv->poll = 0; - } + } spin_unlock_irqrestore(&priv->lock, flags); /* initialize writebuf */ diff --git a/drivers/usb/serial/keyspan.c b/drivers/usb/serial/keyspan.c index 2594b8743d3..f8c4b07033f 100644 --- a/drivers/usb/serial/keyspan.c +++ b/drivers/usb/serial/keyspan.c @@ -1209,8 +1209,7 @@ static int keyspan_write_room(struct tty_struct *tty) } -static int keyspan_open(struct tty_struct *tty, - struct usb_serial_port *port, struct file *filp) +static int keyspan_open(struct tty_struct *tty, struct usb_serial_port *port) { struct keyspan_port_private *p_priv; struct keyspan_serial_private *s_priv; diff --git a/drivers/usb/serial/keyspan.h b/drivers/usb/serial/keyspan.h index 3107ed15af6..30771e5b397 100644 --- a/drivers/usb/serial/keyspan.h +++ b/drivers/usb/serial/keyspan.h @@ -36,8 +36,7 @@ /* Function prototypes for Keyspan serial converter */ static int keyspan_open (struct tty_struct *tty, - struct usb_serial_port *port, - struct file *filp); + struct usb_serial_port *port); static void keyspan_close (struct usb_serial_port *port); static void keyspan_dtr_rts (struct usb_serial_port *port, int on); static int keyspan_startup (struct usb_serial *serial); diff --git a/drivers/usb/serial/keyspan_pda.c b/drivers/usb/serial/keyspan_pda.c index d0b12e40c2b..257c16cc6b2 100644 --- a/drivers/usb/serial/keyspan_pda.c +++ b/drivers/usb/serial/keyspan_pda.c @@ -681,7 +681,7 @@ static int keyspan_pda_carrier_raised(struct usb_serial_port *port) static int keyspan_pda_open(struct tty_struct *tty, - struct usb_serial_port *port, struct file *filp) + struct usb_serial_port *port) { struct usb_serial *serial = port->serial; unsigned char room; diff --git a/drivers/usb/serial/kl5kusb105.c b/drivers/usb/serial/kl5kusb105.c index 0f44bb8e8d4..a61673133d7 100644 --- a/drivers/usb/serial/kl5kusb105.c +++ b/drivers/usb/serial/kl5kusb105.c @@ -75,8 +75,7 @@ static int debug; static int klsi_105_startup(struct usb_serial *serial); static void klsi_105_disconnect(struct usb_serial *serial); static void klsi_105_release(struct usb_serial *serial); -static int klsi_105_open(struct tty_struct *tty, - struct usb_serial_port *port, struct file *filp); +static int klsi_105_open(struct tty_struct *tty, struct usb_serial_port *port); static void klsi_105_close(struct usb_serial_port *port); static int klsi_105_write(struct tty_struct *tty, struct usb_serial_port *port, const unsigned char *buf, int count); @@ -358,8 +357,7 @@ static void klsi_105_release(struct usb_serial *serial) } } /* klsi_105_release */ -static int klsi_105_open(struct tty_struct *tty, - struct usb_serial_port *port, struct file *filp) +static int klsi_105_open(struct tty_struct *tty, struct usb_serial_port *port) { struct klsi_105_private *priv = usb_get_serial_port_data(port); int retval = 0; @@ -371,10 +369,6 @@ static int klsi_105_open(struct tty_struct *tty, dbg("%s port %d", __func__, port->number); - /* force low_latency on so that our tty_push actually forces - * the data through - * tty->low_latency = 1; */ - /* Do a defined restart: * Set up sane default baud rate and send the 'READ_ON' * vendor command. diff --git a/drivers/usb/serial/kobil_sct.c b/drivers/usb/serial/kobil_sct.c index 6db0e561f68..97901578343 100644 --- a/drivers/usb/serial/kobil_sct.c +++ b/drivers/usb/serial/kobil_sct.c @@ -70,8 +70,7 @@ static int debug; /* Function prototypes */ static int kobil_startup(struct usb_serial *serial); static void kobil_release(struct usb_serial *serial); -static int kobil_open(struct tty_struct *tty, - struct usb_serial_port *port, struct file *filp); +static int kobil_open(struct tty_struct *tty, struct usb_serial_port *port); static void kobil_close(struct usb_serial_port *port); static int kobil_write(struct tty_struct *tty, struct usb_serial_port *port, const unsigned char *buf, int count); @@ -211,8 +210,7 @@ static void kobil_release(struct usb_serial *serial) } -static int kobil_open(struct tty_struct *tty, - struct usb_serial_port *port, struct file *filp) +static int kobil_open(struct tty_struct *tty, struct usb_serial_port *port) { int result = 0; struct kobil_private *priv; diff --git a/drivers/usb/serial/mct_u232.c b/drivers/usb/serial/mct_u232.c index d8825e159aa..d501aefa262 100644 --- a/drivers/usb/serial/mct_u232.c +++ b/drivers/usb/serial/mct_u232.c @@ -93,8 +93,7 @@ static int debug; */ static int mct_u232_startup(struct usb_serial *serial); static void mct_u232_release(struct usb_serial *serial); -static int mct_u232_open(struct tty_struct *tty, - struct usb_serial_port *port, struct file *filp); +static int mct_u232_open(struct tty_struct *tty, struct usb_serial_port *port); static void mct_u232_close(struct usb_serial_port *port); static void mct_u232_dtr_rts(struct usb_serial_port *port, int on); static void mct_u232_read_int_callback(struct urb *urb); @@ -421,8 +420,7 @@ static void mct_u232_release(struct usb_serial *serial) } } /* mct_u232_release */ -static int mct_u232_open(struct tty_struct *tty, - struct usb_serial_port *port, struct file *filp) +static int mct_u232_open(struct tty_struct *tty, struct usb_serial_port *port) { struct usb_serial *serial = port->serial; struct mct_u232_private *priv = usb_get_serial_port_data(port); diff --git a/drivers/usb/serial/mos7720.c b/drivers/usb/serial/mos7720.c index 4342a8a0eac..763e32a44be 100644 --- a/drivers/usb/serial/mos7720.c +++ b/drivers/usb/serial/mos7720.c @@ -319,8 +319,7 @@ static int send_mos_cmd(struct usb_serial *serial, __u8 request, __u16 value, return status; } -static int mos7720_open(struct tty_struct *tty, - struct usb_serial_port *port, struct file *filp) +static int mos7720_open(struct tty_struct *tty, struct usb_serial_port *port) { struct usb_serial *serial; struct usb_serial_port *port0; diff --git a/drivers/usb/serial/mos7840.c b/drivers/usb/serial/mos7840.c index b93f0f992d9..f11abf52be7 100644 --- a/drivers/usb/serial/mos7840.c +++ b/drivers/usb/serial/mos7840.c @@ -824,8 +824,7 @@ static int mos7840_serial_probe(struct usb_serial *serial, * Otherwise we return a negative error number. *****************************************************************************/ -static int mos7840_open(struct tty_struct *tty, - struct usb_serial_port *port, struct file *filp) +static int mos7840_open(struct tty_struct *tty, struct usb_serial_port *port) { int response; int j; diff --git a/drivers/usb/serial/navman.c b/drivers/usb/serial/navman.c index f5f3751a888..5ceaa4c6be0 100644 --- a/drivers/usb/serial/navman.c +++ b/drivers/usb/serial/navman.c @@ -80,8 +80,7 @@ exit: __func__, result); } -static int navman_open(struct tty_struct *tty, - struct usb_serial_port *port, struct file *filp) +static int navman_open(struct tty_struct *tty, struct usb_serial_port *port) { int result = 0; diff --git a/drivers/usb/serial/omninet.c b/drivers/usb/serial/omninet.c index 56857ddbd70..062265038bf 100644 --- a/drivers/usb/serial/omninet.c +++ b/drivers/usb/serial/omninet.c @@ -64,8 +64,7 @@ static int debug; #define BT_IGNITIONPRO_ID 0x2000 /* function prototypes */ -static int omninet_open(struct tty_struct *tty, struct usb_serial_port *port, - struct file *filp); +static int omninet_open(struct tty_struct *tty, struct usb_serial_port *port); static void omninet_close(struct usb_serial_port *port); static void omninet_read_bulk_callback(struct urb *urb); static void omninet_write_bulk_callback(struct urb *urb); @@ -163,8 +162,7 @@ static int omninet_attach(struct usb_serial *serial) return 0; } -static int omninet_open(struct tty_struct *tty, - struct usb_serial_port *port, struct file *filp) +static int omninet_open(struct tty_struct *tty, struct usb_serial_port *port) { struct usb_serial *serial = port->serial; struct usb_serial_port *wport; diff --git a/drivers/usb/serial/opticon.c b/drivers/usb/serial/opticon.c index 336bba79ad3..1085a577c5c 100644 --- a/drivers/usb/serial/opticon.c +++ b/drivers/usb/serial/opticon.c @@ -144,8 +144,7 @@ exit: spin_unlock(&priv->lock); } -static int opticon_open(struct tty_struct *tty, struct usb_serial_port *port, - struct file *filp) +static int opticon_open(struct tty_struct *tty, struct usb_serial_port *port) { struct opticon_private *priv = usb_get_serial_data(port->serial); unsigned long flags; diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c index c784ddbe7b6..fe47051dbef 100644 --- a/drivers/usb/serial/option.c +++ b/drivers/usb/serial/option.c @@ -45,8 +45,7 @@ /* Function prototypes */ static int option_probe(struct usb_serial *serial, const struct usb_device_id *id); -static int option_open(struct tty_struct *tty, struct usb_serial_port *port, - struct file *filp); +static int option_open(struct tty_struct *tty, struct usb_serial_port *port); static void option_close(struct usb_serial_port *port); static void option_dtr_rts(struct usb_serial_port *port, int on); @@ -961,8 +960,7 @@ static int option_chars_in_buffer(struct tty_struct *tty) return data_len; } -static int option_open(struct tty_struct *tty, - struct usb_serial_port *port, struct file *filp) +static int option_open(struct tty_struct *tty, struct usb_serial_port *port) { struct option_port_private *portdata; int i, err; diff --git a/drivers/usb/serial/oti6858.c b/drivers/usb/serial/oti6858.c index 3cece27325e..e90f88a3b04 100644 --- a/drivers/usb/serial/oti6858.c +++ b/drivers/usb/serial/oti6858.c @@ -141,8 +141,7 @@ struct oti6858_control_pkt { && ((a)->frame_fmt == (priv)->pending_setup.frame_fmt)) /* function prototypes */ -static int oti6858_open(struct tty_struct *tty, - struct usb_serial_port *port, struct file *filp); +static int oti6858_open(struct tty_struct *tty, struct usb_serial_port *port); static void oti6858_close(struct usb_serial_port *port); static void oti6858_set_termios(struct tty_struct *tty, struct usb_serial_port *port, struct ktermios *old); @@ -566,8 +565,7 @@ static void oti6858_set_termios(struct tty_struct *tty, spin_unlock_irqrestore(&priv->lock, flags); } -static int oti6858_open(struct tty_struct *tty, - struct usb_serial_port *port, struct file *filp) +static int oti6858_open(struct tty_struct *tty, struct usb_serial_port *port) { struct oti6858_private *priv = usb_get_serial_port_data(port); struct ktermios tmp_termios; diff --git a/drivers/usb/serial/pl2303.c b/drivers/usb/serial/pl2303.c index 3e86815b270..a63ea99936f 100644 --- a/drivers/usb/serial/pl2303.c +++ b/drivers/usb/serial/pl2303.c @@ -691,8 +691,7 @@ static void pl2303_close(struct usb_serial_port *port) } -static int pl2303_open(struct tty_struct *tty, - struct usb_serial_port *port, struct file *filp) +static int pl2303_open(struct tty_struct *tty, struct usb_serial_port *port) { struct ktermios tmp_termios; struct usb_serial *serial = port->serial; @@ -714,8 +713,6 @@ static int pl2303_open(struct tty_struct *tty, if (tty) pl2303_set_termios(tty, port, &tmp_termios); - /* FIXME: need to assert RTS and DTR if CRTSCTS off */ - dbg("%s - submitting read urb", __func__); port->read_urb->dev = serial->dev; result = usb_submit_urb(port->read_urb, GFP_KERNEL); diff --git a/drivers/usb/serial/sierra.c b/drivers/usb/serial/sierra.c index f48d05e0acc..55391bbe123 100644 --- a/drivers/usb/serial/sierra.c +++ b/drivers/usb/serial/sierra.c @@ -734,8 +734,7 @@ static void sierra_close(struct usb_serial_port *port) } } -static int sierra_open(struct tty_struct *tty, - struct usb_serial_port *port, struct file *filp) +static int sierra_open(struct tty_struct *tty, struct usb_serial_port *port) { struct sierra_port_private *portdata; struct usb_serial *serial = port->serial; diff --git a/drivers/usb/serial/spcp8x5.c b/drivers/usb/serial/spcp8x5.c index 3c249d8e8b8..8b312a05a35 100644 --- a/drivers/usb/serial/spcp8x5.c +++ b/drivers/usb/serial/spcp8x5.c @@ -623,8 +623,7 @@ static void spcp8x5_set_termios(struct tty_struct *tty, /* open the serial port. do some usb system call. set termios and get the line * status of the device. then submit the read urb */ -static int spcp8x5_open(struct tty_struct *tty, - struct usb_serial_port *port, struct file *filp) +static int spcp8x5_open(struct tty_struct *tty, struct usb_serial_port *port) { struct ktermios tmp_termios; struct usb_serial *serial = port->serial; @@ -658,8 +657,6 @@ static int spcp8x5_open(struct tty_struct *tty, priv->line_status = status & 0xf0 ; spin_unlock_irqrestore(&priv->lock, flags); - /* FIXME: need to assert RTS and DTR if CRTSCTS off */ - dbg("%s - submitting read urb", __func__); port->read_urb->dev = serial->dev; ret = usb_submit_urb(port->read_urb, GFP_KERNEL); diff --git a/drivers/usb/serial/symbolserial.c b/drivers/usb/serial/symbolserial.c index 6157fac9366..cb7e95f9fcb 100644 --- a/drivers/usb/serial/symbolserial.c +++ b/drivers/usb/serial/symbolserial.c @@ -124,8 +124,7 @@ exit: spin_unlock(&priv->lock); } -static int symbol_open(struct tty_struct *tty, struct usb_serial_port *port, - struct file *filp) +static int symbol_open(struct tty_struct *tty, struct usb_serial_port *port) { struct symbol_private *priv = usb_get_serial_data(port->serial); unsigned long flags; diff --git a/drivers/usb/serial/ti_usb_3410_5052.c b/drivers/usb/serial/ti_usb_3410_5052.c index 3bc609fe224..1e9dc882169 100644 --- a/drivers/usb/serial/ti_usb_3410_5052.c +++ b/drivers/usb/serial/ti_usb_3410_5052.c @@ -98,8 +98,7 @@ struct ti_device { static int ti_startup(struct usb_serial *serial); static void ti_release(struct usb_serial *serial); -static int ti_open(struct tty_struct *tty, struct usb_serial_port *port, - struct file *file); +static int ti_open(struct tty_struct *tty, struct usb_serial_port *port); static void ti_close(struct usb_serial_port *port); static int ti_write(struct tty_struct *tty, struct usb_serial_port *port, const unsigned char *data, int count); @@ -492,8 +491,7 @@ static void ti_release(struct usb_serial *serial) } -static int ti_open(struct tty_struct *tty, - struct usb_serial_port *port, struct file *file) +static int ti_open(struct tty_struct *tty, struct usb_serial_port *port) { struct ti_port *tport = usb_get_serial_port_data(port); struct ti_device *tdev; diff --git a/drivers/usb/serial/usb-serial.c b/drivers/usb/serial/usb-serial.c index 31c7a0939b9..3dda6841e72 100644 --- a/drivers/usb/serial/usb-serial.c +++ b/drivers/usb/serial/usb-serial.c @@ -244,7 +244,7 @@ static int serial_open (struct tty_struct *tty, struct file *filp) /* only call the device specific open if this * is the first time the port is opened */ - retval = serial->type->open(tty, port, filp); + retval = serial->type->open(tty, port); if (retval) goto bailout_interface_put; mutex_unlock(&serial->disc_mutex); diff --git a/drivers/usb/serial/usb_debug.c b/drivers/usb/serial/usb_debug.c index 614800972dc..7b5bfc4edd3 100644 --- a/drivers/usb/serial/usb_debug.c +++ b/drivers/usb/serial/usb_debug.c @@ -43,11 +43,10 @@ static struct usb_driver debug_driver = { .no_dynamic_id = 1, }; -static int usb_debug_open(struct tty_struct *tty, struct usb_serial_port *port, - struct file *filp) +static int usb_debug_open(struct tty_struct *tty, struct usb_serial_port *port) { port->bulk_out_size = USB_DEBUG_MAX_PACKET_SIZE; - return usb_serial_generic_open(tty, port, filp); + return usb_serial_generic_open(tty, port); } /* This HW really does not support a serial break, so one will be diff --git a/drivers/usb/serial/visor.c b/drivers/usb/serial/visor.c index f5d0f64dcc5..1aa5d20a5d9 100644 --- a/drivers/usb/serial/visor.c +++ b/drivers/usb/serial/visor.c @@ -36,8 +36,7 @@ #define DRIVER_DESC "USB HandSpring Visor / Palm OS driver" /* function prototypes for a handspring visor */ -static int visor_open(struct tty_struct *tty, struct usb_serial_port *port, - struct file *filp); +static int visor_open(struct tty_struct *tty, struct usb_serial_port *port); static void visor_close(struct usb_serial_port *port); static int visor_write(struct tty_struct *tty, struct usb_serial_port *port, const unsigned char *buf, int count); @@ -273,8 +272,7 @@ static int stats; /****************************************************************************** * Handspring Visor specific driver functions ******************************************************************************/ -static int visor_open(struct tty_struct *tty, struct usb_serial_port *port, - struct file *filp) +static int visor_open(struct tty_struct *tty, struct usb_serial_port *port) { struct usb_serial *serial = port->serial; struct visor_private *priv = usb_get_serial_port_data(port); diff --git a/drivers/usb/serial/whiteheat.c b/drivers/usb/serial/whiteheat.c index 8d126dd7a02..81f2ae50596 100644 --- a/drivers/usb/serial/whiteheat.c +++ b/drivers/usb/serial/whiteheat.c @@ -146,7 +146,7 @@ static int whiteheat_firmware_attach(struct usb_serial *serial); static int whiteheat_attach(struct usb_serial *serial); static void whiteheat_release(struct usb_serial *serial); static int whiteheat_open(struct tty_struct *tty, - struct usb_serial_port *port, struct file *filp); + struct usb_serial_port *port); static void whiteheat_close(struct usb_serial_port *port); static int whiteheat_write(struct tty_struct *tty, struct usb_serial_port *port, @@ -659,8 +659,7 @@ static void whiteheat_release(struct usb_serial *serial) return; } -static int whiteheat_open(struct tty_struct *tty, - struct usb_serial_port *port, struct file *filp) +static int whiteheat_open(struct tty_struct *tty, struct usb_serial_port *port) { int retval = 0; diff --git a/include/linux/usb/serial.h b/include/linux/usb/serial.h index 0ec50ba6213..1cbaf4a6b44 100644 --- a/include/linux/usb/serial.h +++ b/include/linux/usb/serial.h @@ -238,9 +238,8 @@ struct usb_serial_driver { int (*resume)(struct usb_serial *serial); /* serial function calls */ - /* Called by console with tty = NULL and by tty */ - int (*open)(struct tty_struct *tty, - struct usb_serial_port *port, struct file *filp); + /* Called by console and by the tty layer */ + int (*open)(struct tty_struct *tty, struct usb_serial_port *port); void (*close)(struct usb_serial_port *port); int (*write)(struct tty_struct *tty, struct usb_serial_port *port, const unsigned char *buf, int count); @@ -300,7 +299,7 @@ static inline void usb_serial_console_disconnect(struct usb_serial *serial) {} extern struct usb_serial *usb_serial_get_by_index(unsigned int minor); extern void usb_serial_put(struct usb_serial *serial); extern int usb_serial_generic_open(struct tty_struct *tty, - struct usb_serial_port *port, struct file *filp); + struct usb_serial_port *port); extern int usb_serial_generic_write(struct tty_struct *tty, struct usb_serial_port *port, const unsigned char *buf, int count); extern void usb_serial_generic_close(struct usb_serial_port *port); -- cgit v1.2.3-70-g09d2 From ebd2c8f6d2ec4012c267ecb95e72a57b8355a705 Mon Sep 17 00:00:00 2001 From: Alan Cox Date: Sat, 19 Sep 2009 13:13:28 -0700 Subject: serial: kill off uart_info We moved this into uart_state, now move the fields out of the separate structure and kill it off. Signed-off-by: Alan Cox Signed-off-by: Greg Kroah-Hartman --- arch/mn10300/kernel/asm-offsets.c | 6 +- drivers/serial/21285.c | 8 +- drivers/serial/8250.c | 10 +- drivers/serial/amba-pl010.c | 6 +- drivers/serial/amba-pl011.c | 6 +- drivers/serial/atmel_serial.c | 14 +- drivers/serial/bfin_5xx.c | 20 +-- drivers/serial/bfin_sport_uart.c | 6 +- drivers/serial/clps711x.c | 4 +- drivers/serial/cpm_uart/cpm_uart_core.c | 2 +- drivers/serial/dz.c | 6 +- drivers/serial/icom.c | 16 +- drivers/serial/imx.c | 16 +- drivers/serial/ioc3_serial.c | 52 +++--- drivers/serial/ioc4_serial.c | 66 ++++---- drivers/serial/ip22zilog.c | 14 +- drivers/serial/jsm/jsm_neo.c | 2 +- drivers/serial/jsm/jsm_tty.c | 20 +-- drivers/serial/m32r_sio.c | 6 +- drivers/serial/max3100.c | 16 +- drivers/serial/mcf.c | 4 +- drivers/serial/mpc52xx_uart.c | 4 +- drivers/serial/mpsc.c | 4 +- drivers/serial/msm_serial.c | 6 +- drivers/serial/mux.c | 4 +- drivers/serial/netx-serial.c | 6 +- drivers/serial/nwpserial.c | 4 +- drivers/serial/pmac_zilog.c | 12 +- drivers/serial/pnx8xxx_uart.c | 8 +- drivers/serial/pxa.c | 6 +- drivers/serial/sa1100.c | 8 +- drivers/serial/samsung.c | 8 +- drivers/serial/sb1250-duart.c | 6 +- drivers/serial/sc26xx.c | 10 +- drivers/serial/serial_core.c | 286 ++++++++++++++++---------------- drivers/serial/serial_ks8695.c | 6 +- drivers/serial/serial_lh7a40x.c | 6 +- drivers/serial/serial_txx9.c | 4 +- drivers/serial/sh-sci.c | 10 +- drivers/serial/sn_console.c | 22 +-- drivers/serial/sunhv.c | 8 +- drivers/serial/sunsab.c | 10 +- drivers/serial/sunsu.c | 6 +- drivers/serial/sunzilog.c | 14 +- drivers/serial/timbuart.c | 10 +- drivers/serial/uartlite.c | 6 +- drivers/serial/ucc_uart.c | 4 +- drivers/serial/vr41xx_siu.c | 6 +- drivers/serial/zs.c | 6 +- include/linux/serial_core.h | 64 ++++--- 50 files changed, 422 insertions(+), 432 deletions(-) (limited to 'include') diff --git a/arch/mn10300/kernel/asm-offsets.c b/arch/mn10300/kernel/asm-offsets.c index 2646fcbd7d8..82b40079ad7 100644 --- a/arch/mn10300/kernel/asm-offsets.c +++ b/arch/mn10300/kernel/asm-offsets.c @@ -95,7 +95,7 @@ void foo(void) OFFSET(__iobase, mn10300_serial_port, _iobase); DEFINE(__UART_XMIT_SIZE, UART_XMIT_SIZE); - OFFSET(__xmit_buffer, uart_info, xmit.buf); - OFFSET(__xmit_head, uart_info, xmit.head); - OFFSET(__xmit_tail, uart_info, xmit.tail); + OFFSET(__xmit_buffer, uart_state, xmit.buf); + OFFSET(__xmit_head, uart_state, xmit.head); + OFFSET(__xmit_tail, uart_state, xmit.tail); } diff --git a/drivers/serial/21285.c b/drivers/serial/21285.c index cb6d85d7ff4..1e3d19397a5 100644 --- a/drivers/serial/21285.c +++ b/drivers/serial/21285.c @@ -86,7 +86,7 @@ static void serial21285_enable_ms(struct uart_port *port) static irqreturn_t serial21285_rx_chars(int irq, void *dev_id) { struct uart_port *port = dev_id; - struct tty_struct *tty = port->info->port.tty; + struct tty_struct *tty = port->state->port.tty; unsigned int status, ch, flag, rxs, max_count = 256; status = *CSR_UARTFLG; @@ -124,7 +124,7 @@ static irqreturn_t serial21285_rx_chars(int irq, void *dev_id) static irqreturn_t serial21285_tx_chars(int irq, void *dev_id) { struct uart_port *port = dev_id; - struct circ_buf *xmit = &port->info->xmit; + struct circ_buf *xmit = &port->state->xmit; int count = 256; if (port->x_char) { @@ -235,8 +235,8 @@ serial21285_set_termios(struct uart_port *port, struct ktermios *termios, baud = uart_get_baud_rate(port, termios, old, 0, port->uartclk/16); quot = uart_get_divisor(port, baud); - if (port->info && port->info->port.tty) { - struct tty_struct *tty = port->info->port.tty; + if (port->state && port->state->port.tty) { + struct tty_struct *tty = port->state->port.tty; unsigned int b = port->uartclk / (16 * quot); tty_encode_baud_rate(tty, b, b); } diff --git a/drivers/serial/8250.c b/drivers/serial/8250.c index 1fd4894d9b5..e415c5eca59 100644 --- a/drivers/serial/8250.c +++ b/drivers/serial/8250.c @@ -1382,7 +1382,7 @@ static void serial8250_enable_ms(struct uart_port *port) static void receive_chars(struct uart_8250_port *up, unsigned int *status) { - struct tty_struct *tty = up->port.info->port.tty; + struct tty_struct *tty = up->port.state->port.tty; unsigned char ch, lsr = *status; int max_count = 256; char flag; @@ -1457,7 +1457,7 @@ ignore_char: static void transmit_chars(struct uart_8250_port *up) { - struct circ_buf *xmit = &up->port.info->xmit; + struct circ_buf *xmit = &up->port.state->xmit; int count; if (up->port.x_char) { @@ -1500,7 +1500,7 @@ static unsigned int check_modem_status(struct uart_8250_port *up) status |= up->msr_saved_flags; up->msr_saved_flags = 0; if (status & UART_MSR_ANY_DELTA && up->ier & UART_IER_MSI && - up->port.info != NULL) { + up->port.state != NULL) { if (status & UART_MSR_TERI) up->port.icount.rng++; if (status & UART_MSR_DDSR) @@ -1510,7 +1510,7 @@ static unsigned int check_modem_status(struct uart_8250_port *up) if (status & UART_MSR_DCTS) uart_handle_cts_change(&up->port, status & UART_MSR_CTS); - wake_up_interruptible(&up->port.info->delta_msr_wait); + wake_up_interruptible(&up->port.state->delta_msr_wait); } return status; @@ -1764,7 +1764,7 @@ static void serial8250_backup_timeout(unsigned long data) up->lsr_saved_flags |= lsr & LSR_SAVE_FLAGS; spin_unlock_irqrestore(&up->port.lock, flags); if ((iir & UART_IIR_NO_INT) && (up->ier & UART_IER_THRI) && - (!uart_circ_empty(&up->port.info->xmit) || up->port.x_char) && + (!uart_circ_empty(&up->port.state->xmit) || up->port.x_char) && (lsr & UART_LSR_THRE)) { iir &= ~(UART_IIR_ID | UART_IIR_NO_INT); iir |= UART_IIR_THRI; diff --git a/drivers/serial/amba-pl010.c b/drivers/serial/amba-pl010.c index 58a4879c7e4..39032413d4a 100644 --- a/drivers/serial/amba-pl010.c +++ b/drivers/serial/amba-pl010.c @@ -117,7 +117,7 @@ static void pl010_enable_ms(struct uart_port *port) static void pl010_rx_chars(struct uart_amba_port *uap) { - struct tty_struct *tty = uap->port.info->port.tty; + struct tty_struct *tty = uap->port.state->port.tty; unsigned int status, ch, flag, rsr, max_count = 256; status = readb(uap->port.membase + UART01x_FR); @@ -172,7 +172,7 @@ static void pl010_rx_chars(struct uart_amba_port *uap) static void pl010_tx_chars(struct uart_amba_port *uap) { - struct circ_buf *xmit = &uap->port.info->xmit; + struct circ_buf *xmit = &uap->port.state->xmit; int count; if (uap->port.x_char) { @@ -225,7 +225,7 @@ static void pl010_modem_status(struct uart_amba_port *uap) if (delta & UART01x_FR_CTS) uart_handle_cts_change(&uap->port, status & UART01x_FR_CTS); - wake_up_interruptible(&uap->port.info->delta_msr_wait); + wake_up_interruptible(&uap->port.state->delta_msr_wait); } static irqreturn_t pl010_int(int irq, void *dev_id) diff --git a/drivers/serial/amba-pl011.c b/drivers/serial/amba-pl011.c index 72ba0c6d355..ef82a34baf0 100644 --- a/drivers/serial/amba-pl011.c +++ b/drivers/serial/amba-pl011.c @@ -124,7 +124,7 @@ static void pl011_enable_ms(struct uart_port *port) static void pl011_rx_chars(struct uart_amba_port *uap) { - struct tty_struct *tty = uap->port.info->port.tty; + struct tty_struct *tty = uap->port.state->port.tty; unsigned int status, ch, flag, max_count = 256; status = readw(uap->port.membase + UART01x_FR); @@ -175,7 +175,7 @@ static void pl011_rx_chars(struct uart_amba_port *uap) static void pl011_tx_chars(struct uart_amba_port *uap) { - struct circ_buf *xmit = &uap->port.info->xmit; + struct circ_buf *xmit = &uap->port.state->xmit; int count; if (uap->port.x_char) { @@ -226,7 +226,7 @@ static void pl011_modem_status(struct uart_amba_port *uap) if (delta & UART01x_FR_CTS) uart_handle_cts_change(&uap->port, status & UART01x_FR_CTS); - wake_up_interruptible(&uap->port.info->delta_msr_wait); + wake_up_interruptible(&uap->port.state->delta_msr_wait); } static irqreturn_t pl011_int(int irq, void *dev_id) diff --git a/drivers/serial/atmel_serial.c b/drivers/serial/atmel_serial.c index 607d43a3104..963e3c12af4 100644 --- a/drivers/serial/atmel_serial.c +++ b/drivers/serial/atmel_serial.c @@ -427,7 +427,7 @@ static void atmel_rx_chars(struct uart_port *port) */ static void atmel_tx_chars(struct uart_port *port) { - struct circ_buf *xmit = &port->info->xmit; + struct circ_buf *xmit = &port->state->xmit; if (port->x_char && UART_GET_CSR(port) & ATMEL_US_TXRDY) { UART_PUT_CHAR(port, port->x_char); @@ -560,7 +560,7 @@ static irqreturn_t atmel_interrupt(int irq, void *dev_id) static void atmel_tx_dma(struct uart_port *port) { struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); - struct circ_buf *xmit = &port->info->xmit; + struct circ_buf *xmit = &port->state->xmit; struct atmel_dma_buffer *pdc = &atmel_port->pdc_tx; int count; @@ -663,14 +663,14 @@ static void atmel_rx_from_ring(struct uart_port *port) * uart_start(), which takes the lock. */ spin_unlock(&port->lock); - tty_flip_buffer_push(port->info->port.tty); + tty_flip_buffer_push(port->state->port.tty); spin_lock(&port->lock); } static void atmel_rx_from_dma(struct uart_port *port) { struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); - struct tty_struct *tty = port->info->port.tty; + struct tty_struct *tty = port->state->port.tty; struct atmel_dma_buffer *pdc; int rx_idx = atmel_port->pdc_rx_idx; unsigned int head; @@ -776,7 +776,7 @@ static void atmel_tasklet_func(unsigned long data) if (status_change & ATMEL_US_CTS) uart_handle_cts_change(port, !(status & ATMEL_US_CTS)); - wake_up_interruptible(&port->info->delta_msr_wait); + wake_up_interruptible(&port->state->delta_msr_wait); atmel_port->irq_status_prev = status; } @@ -795,7 +795,7 @@ static void atmel_tasklet_func(unsigned long data) static int atmel_startup(struct uart_port *port) { struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); - struct tty_struct *tty = port->info->port.tty; + struct tty_struct *tty = port->state->port.tty; int retval; /* @@ -854,7 +854,7 @@ static int atmel_startup(struct uart_port *port) } if (atmel_use_dma_tx(port)) { struct atmel_dma_buffer *pdc = &atmel_port->pdc_tx; - struct circ_buf *xmit = &port->info->xmit; + struct circ_buf *xmit = &port->state->xmit; pdc->buf = xmit->buf; pdc->dma_addr = dma_map_single(port->dev, diff --git a/drivers/serial/bfin_5xx.c b/drivers/serial/bfin_5xx.c index 4fff4e52403..50abb7e557f 100644 --- a/drivers/serial/bfin_5xx.c +++ b/drivers/serial/bfin_5xx.c @@ -144,7 +144,7 @@ static void bfin_serial_stop_tx(struct uart_port *port) { struct bfin_serial_port *uart = (struct bfin_serial_port *)port; #ifdef CONFIG_SERIAL_BFIN_DMA - struct circ_buf *xmit = &uart->port.info->xmit; + struct circ_buf *xmit = &uart->port.state->xmit; #endif while (!(UART_GET_LSR(uart) & TEMT)) @@ -171,7 +171,7 @@ static void bfin_serial_stop_tx(struct uart_port *port) static void bfin_serial_start_tx(struct uart_port *port) { struct bfin_serial_port *uart = (struct bfin_serial_port *)port; - struct tty_struct *tty = uart->port.info->port.tty; + struct tty_struct *tty = uart->port.state->port.tty; #ifdef CONFIG_SERIAL_BFIN_HARD_CTSRTS if (uart->scts && !(bfin_serial_get_mctrl(&uart->port) & TIOCM_CTS)) { @@ -243,10 +243,10 @@ static void bfin_serial_rx_chars(struct bfin_serial_port *uart) return; } - if (!uart->port.info || !uart->port.info->port.tty) + if (!uart->port.state || !uart->port.state->port.tty) return; #endif - tty = uart->port.info->port.tty; + tty = uart->port.state->port.tty; if (ANOMALY_05000363) { /* The BF533 (and BF561) family of processors have a nice anomaly @@ -331,7 +331,7 @@ static void bfin_serial_rx_chars(struct bfin_serial_port *uart) static void bfin_serial_tx_chars(struct bfin_serial_port *uart) { - struct circ_buf *xmit = &uart->port.info->xmit; + struct circ_buf *xmit = &uart->port.state->xmit; if (uart_circ_empty(xmit) || uart_tx_stopped(&uart->port)) { #ifdef CONFIG_BF54x @@ -398,7 +398,7 @@ static irqreturn_t bfin_serial_tx_int(int irq, void *dev_id) #ifdef CONFIG_SERIAL_BFIN_DMA static void bfin_serial_dma_tx_chars(struct bfin_serial_port *uart) { - struct circ_buf *xmit = &uart->port.info->xmit; + struct circ_buf *xmit = &uart->port.state->xmit; uart->tx_done = 0; @@ -436,7 +436,7 @@ static void bfin_serial_dma_tx_chars(struct bfin_serial_port *uart) static void bfin_serial_dma_rx_chars(struct bfin_serial_port *uart) { - struct tty_struct *tty = uart->port.info->port.tty; + struct tty_struct *tty = uart->port.state->port.tty; int i, flg, status; status = UART_GET_LSR(uart); @@ -529,7 +529,7 @@ void bfin_serial_rx_dma_timeout(struct bfin_serial_port *uart) static irqreturn_t bfin_serial_dma_tx_int(int irq, void *dev_id) { struct bfin_serial_port *uart = dev_id; - struct circ_buf *xmit = &uart->port.info->xmit; + struct circ_buf *xmit = &uart->port.state->xmit; #ifdef CONFIG_SERIAL_BFIN_HARD_CTSRTS if (uart->scts && !(bfin_serial_get_mctrl(&uart->port)&TIOCM_CTS)) { @@ -965,10 +965,10 @@ static void bfin_serial_set_ldisc(struct uart_port *port) int line = port->line; unsigned short val; - if (line >= port->info->port.tty->driver->num) + if (line >= port->state->port.tty->driver->num) return; - switch (port->info->port.tty->termios->c_line) { + switch (port->state->port.tty->termios->c_line) { case N_IRDA: val = UART_GET_GCTL(&bfin_serial_ports[line]); val |= (IREN | RPOLC); diff --git a/drivers/serial/bfin_sport_uart.c b/drivers/serial/bfin_sport_uart.c index c108b1a0ce9..088bb35475f 100644 --- a/drivers/serial/bfin_sport_uart.c +++ b/drivers/serial/bfin_sport_uart.c @@ -178,7 +178,7 @@ static int sport_uart_setup(struct sport_uart_port *up, int sclk, int baud_rate) static irqreturn_t sport_uart_rx_irq(int irq, void *dev_id) { struct sport_uart_port *up = dev_id; - struct tty_struct *tty = up->port.info->port.tty; + struct tty_struct *tty = up->port.state->port.tty; unsigned int ch; do { @@ -205,7 +205,7 @@ static irqreturn_t sport_uart_tx_irq(int irq, void *dev_id) static irqreturn_t sport_uart_err_irq(int irq, void *dev_id) { struct sport_uart_port *up = dev_id; - struct tty_struct *tty = up->port.info->port.tty; + struct tty_struct *tty = up->port.state->port.tty; unsigned int stat = SPORT_GET_STAT(up); /* Overflow in RX FIFO */ @@ -290,7 +290,7 @@ fail1: static void sport_uart_tx_chars(struct sport_uart_port *up) { - struct circ_buf *xmit = &up->port.info->xmit; + struct circ_buf *xmit = &up->port.state->xmit; if (SPORT_GET_STAT(up) & TXF) return; diff --git a/drivers/serial/clps711x.c b/drivers/serial/clps711x.c index 80e76426131..b6acd19b458 100644 --- a/drivers/serial/clps711x.c +++ b/drivers/serial/clps711x.c @@ -93,7 +93,7 @@ static void clps711xuart_enable_ms(struct uart_port *port) static irqreturn_t clps711xuart_int_rx(int irq, void *dev_id) { struct uart_port *port = dev_id; - struct tty_struct *tty = port->info->port.tty; + struct tty_struct *tty = port->state->port.tty; unsigned int status, ch, flg; status = clps_readl(SYSFLG(port)); @@ -147,7 +147,7 @@ static irqreturn_t clps711xuart_int_rx(int irq, void *dev_id) static irqreturn_t clps711xuart_int_tx(int irq, void *dev_id) { struct uart_port *port = dev_id; - struct circ_buf *xmit = &port->info->xmit; + struct circ_buf *xmit = &port->state->xmit; int count; if (port->x_char) { diff --git a/drivers/serial/cpm_uart/cpm_uart_core.c b/drivers/serial/cpm_uart/cpm_uart_core.c index f8df0681e16..8d349b23249 100644 --- a/drivers/serial/cpm_uart/cpm_uart_core.c +++ b/drivers/serial/cpm_uart/cpm_uart_core.c @@ -244,7 +244,7 @@ static void cpm_uart_int_rx(struct uart_port *port) int i; unsigned char ch; u8 *cp; - struct tty_struct *tty = port->info->port.tty; + struct tty_struct *tty = port->state->port.tty; struct uart_cpm_port *pinfo = (struct uart_cpm_port *)port; cbd_t __iomem *bdp; u16 status; diff --git a/drivers/serial/dz.c b/drivers/serial/dz.c index 6042b87797a..57421d77632 100644 --- a/drivers/serial/dz.c +++ b/drivers/serial/dz.c @@ -197,7 +197,7 @@ static inline void dz_receive_chars(struct dz_mux *mux) while ((status = dz_in(dport, DZ_RBUF)) & DZ_DVAL) { dport = &mux->dport[LINE(status)]; uport = &dport->port; - tty = uport->info->port.tty; /* point to the proper dev */ + tty = uport->state->port.tty; /* point to the proper dev */ ch = UCHAR(status); /* grab the char */ flag = TTY_NORMAL; @@ -249,7 +249,7 @@ static inline void dz_receive_chars(struct dz_mux *mux) } for (i = 0; i < DZ_NB_PORT; i++) if (lines_rx[i]) - tty_flip_buffer_push(mux->dport[i].port.info->port.tty); + tty_flip_buffer_push(mux->dport[i].port.state->port.tty); } /* @@ -268,7 +268,7 @@ static inline void dz_transmit_chars(struct dz_mux *mux) status = dz_in(dport, DZ_CSR); dport = &mux->dport[LINE(status)]; - xmit = &dport->port.info->xmit; + xmit = &dport->port.state->xmit; if (dport->port.x_char) { /* XON/XOFF chars */ dz_out(dport, DZ_TDR, dport->port.x_char); diff --git a/drivers/serial/icom.c b/drivers/serial/icom.c index 060f4e3d54c..f86c47e08a0 100644 --- a/drivers/serial/icom.c +++ b/drivers/serial/icom.c @@ -627,7 +627,7 @@ static int icom_write(struct uart_port *port) unsigned long data_count; unsigned char cmdReg; unsigned long offset; - int temp_tail = port->info->xmit.tail; + int temp_tail = port->state->xmit.tail; trace(ICOM_PORT, "WRITE", 0); @@ -638,11 +638,11 @@ static int icom_write(struct uart_port *port) } data_count = 0; - while ((port->info->xmit.head != temp_tail) && + while ((port->state->xmit.head != temp_tail) && (data_count <= XMIT_BUFF_SZ)) { ICOM_PORT->xmit_buf[data_count++] = - port->info->xmit.buf[temp_tail]; + port->state->xmit.buf[temp_tail]; temp_tail++; temp_tail &= (UART_XMIT_SIZE - 1); @@ -694,7 +694,7 @@ static inline void check_modem_status(struct icom_port *icom_port) uart_handle_cts_change(&icom_port->uart_port, delta_status & ICOM_CTS); - wake_up_interruptible(&icom_port->uart_port.info-> + wake_up_interruptible(&icom_port->uart_port.state-> delta_msr_wait); old_status = status; } @@ -718,10 +718,10 @@ static void xmit_interrupt(u16 port_int_reg, struct icom_port *icom_port) icom_port->uart_port.icount.tx += count; for (i=0; iuart_port.info->xmit); i++) { + !uart_circ_empty(&icom_port->uart_port.state->xmit); i++) { - icom_port->uart_port.info->xmit.tail++; - icom_port->uart_port.info->xmit.tail &= + icom_port->uart_port.state->xmit.tail++; + icom_port->uart_port.state->xmit.tail &= (UART_XMIT_SIZE - 1); } @@ -735,7 +735,7 @@ static void xmit_interrupt(u16 port_int_reg, struct icom_port *icom_port) static void recv_interrupt(u16 port_int_reg, struct icom_port *icom_port) { short int count, rcv_buff; - struct tty_struct *tty = icom_port->uart_port.info->port.tty; + struct tty_struct *tty = icom_port->uart_port.state->port.tty; unsigned short int status; struct uart_icount *icount; unsigned long offset; diff --git a/drivers/serial/imx.c b/drivers/serial/imx.c index 7485afd0df4..1febeafcb97 100644 --- a/drivers/serial/imx.c +++ b/drivers/serial/imx.c @@ -224,7 +224,7 @@ static void imx_mctrl_check(struct imx_port *sport) if (changed & TIOCM_CTS) uart_handle_cts_change(&sport->port, status & TIOCM_CTS); - wake_up_interruptible(&sport->port.info->delta_msr_wait); + wake_up_interruptible(&sport->port.state->delta_msr_wait); } /* @@ -236,7 +236,7 @@ static void imx_timeout(unsigned long data) struct imx_port *sport = (struct imx_port *)data; unsigned long flags; - if (sport->port.info) { + if (sport->port.state) { spin_lock_irqsave(&sport->port.lock, flags); imx_mctrl_check(sport); spin_unlock_irqrestore(&sport->port.lock, flags); @@ -323,7 +323,7 @@ static void imx_enable_ms(struct uart_port *port) static inline void imx_transmit_buffer(struct imx_port *sport) { - struct circ_buf *xmit = &sport->port.info->xmit; + struct circ_buf *xmit = &sport->port.state->xmit; while (!(readl(sport->port.membase + UTS) & UTS_TXFULL)) { /* send xmit->buf[xmit->tail] @@ -388,7 +388,7 @@ static irqreturn_t imx_rtsint(int irq, void *dev_id) writel(USR1_RTSD, sport->port.membase + USR1); uart_handle_cts_change(&sport->port, !!val); - wake_up_interruptible(&sport->port.info->delta_msr_wait); + wake_up_interruptible(&sport->port.state->delta_msr_wait); spin_unlock_irqrestore(&sport->port.lock, flags); return IRQ_HANDLED; @@ -397,7 +397,7 @@ static irqreturn_t imx_rtsint(int irq, void *dev_id) static irqreturn_t imx_txint(int irq, void *dev_id) { struct imx_port *sport = dev_id; - struct circ_buf *xmit = &sport->port.info->xmit; + struct circ_buf *xmit = &sport->port.state->xmit; unsigned long flags; spin_lock_irqsave(&sport->port.lock,flags); @@ -427,7 +427,7 @@ static irqreturn_t imx_rxint(int irq, void *dev_id) { struct imx_port *sport = dev_id; unsigned int rx,flg,ignored = 0; - struct tty_struct *tty = sport->port.info->port.tty; + struct tty_struct *tty = sport->port.state->port.tty; unsigned long flags, temp; spin_lock_irqsave(&sport->port.lock,flags); @@ -900,11 +900,11 @@ imx_set_termios(struct uart_port *port, struct ktermios *termios, rational_best_approximation(16 * div * baud, sport->port.uartclk, 1 << 16, 1 << 16, &num, &denom); - if (port->info && port->info->port.tty) { + if (port->state && port->state->port.tty) { tdiv64 = sport->port.uartclk; tdiv64 *= num; do_div(tdiv64, denom * 16 * div); - tty_encode_baud_rate(sport->port.info->port.tty, + tty_encode_baud_rate(sport->port.state->port.tty, (speed_t)tdiv64, (speed_t)tdiv64); } diff --git a/drivers/serial/ioc3_serial.c b/drivers/serial/ioc3_serial.c index ae3699d77dd..de4ab1bfee8 100644 --- a/drivers/serial/ioc3_serial.c +++ b/drivers/serial/ioc3_serial.c @@ -897,25 +897,25 @@ static void transmit_chars(struct uart_port *the_port) char *start; struct tty_struct *tty; struct ioc3_port *port = get_ioc3_port(the_port); - struct uart_info *info; + struct uart_state *state; if (!the_port) return; if (!port) return; - info = the_port->info; - tty = info->port.tty; + state = the_port->state; + tty = state->port.tty; - if (uart_circ_empty(&info->xmit) || uart_tx_stopped(the_port)) { + if (uart_circ_empty(&state->xmit) || uart_tx_stopped(the_port)) { /* Nothing to do or hw stopped */ set_notification(port, N_ALL_OUTPUT, 0); return; } - head = info->xmit.head; - tail = info->xmit.tail; - start = (char *)&info->xmit.buf[tail]; + head = state->xmit.head; + tail = state->xmit.tail; + start = (char *)&state->xmit.buf[tail]; /* write out all the data or until the end of the buffer */ xmit_count = (head < tail) ? (UART_XMIT_SIZE - tail) : (head - tail); @@ -928,14 +928,14 @@ static void transmit_chars(struct uart_port *the_port) /* advance the pointers */ tail += result; tail &= UART_XMIT_SIZE - 1; - info->xmit.tail = tail; - start = (char *)&info->xmit.buf[tail]; + state->xmit.tail = tail; + start = (char *)&state->xmit.buf[tail]; } } - if (uart_circ_chars_pending(&info->xmit) < WAKEUP_CHARS) + if (uart_circ_chars_pending(&state->xmit) < WAKEUP_CHARS) uart_write_wakeup(the_port); - if (uart_circ_empty(&info->xmit)) { + if (uart_circ_empty(&state->xmit)) { set_notification(port, N_OUTPUT_LOWAT, 0); } else { set_notification(port, N_OUTPUT_LOWAT, 1); @@ -956,7 +956,7 @@ ioc3_change_speed(struct uart_port *the_port, unsigned int cflag; int baud; int new_parity = 0, new_parity_enable = 0, new_stop = 0, new_data = 8; - struct uart_info *info = the_port->info; + struct uart_state *state = the_port->state; cflag = new_termios->c_cflag; @@ -997,14 +997,14 @@ ioc3_change_speed(struct uart_port *the_port, the_port->ignore_status_mask = N_ALL_INPUT; - info->port.tty->low_latency = 1; + state->port.tty->low_latency = 1; - if (I_IGNPAR(info->port.tty)) + if (I_IGNPAR(state->port.tty)) the_port->ignore_status_mask &= ~(N_PARITY_ERROR | N_FRAMING_ERROR); - if (I_IGNBRK(info->port.tty)) { + if (I_IGNBRK(state->port.tty)) { the_port->ignore_status_mask &= ~N_BREAK; - if (I_IGNPAR(info->port.tty)) + if (I_IGNPAR(state->port.tty)) the_port->ignore_status_mask &= ~N_OVERRUN_ERROR; } if (!(cflag & CREAD)) { @@ -1286,7 +1286,7 @@ static inline int do_read(struct uart_port *the_port, char *buf, int len) uart_handle_dcd_change (port->ip_port, 0); wake_up_interruptible - (&the_port->info-> + (&the_port->state-> delta_msr_wait); } @@ -1392,21 +1392,21 @@ static int receive_chars(struct uart_port *the_port) struct tty_struct *tty; unsigned char ch[MAX_CHARS]; int read_count = 0, read_room, flip = 0; - struct uart_info *info = the_port->info; + struct uart_state *state = the_port->state; struct ioc3_port *port = get_ioc3_port(the_port); unsigned long pflags; /* Make sure all the pointers are "good" ones */ - if (!info) + if (!state) return 0; - if (!info->port.tty) + if (!state->port.tty) return 0; if (!(port->ip_flags & INPUT_ENABLE)) return 0; spin_lock_irqsave(&the_port->lock, pflags); - tty = info->port.tty; + tty = state->port.tty; read_count = do_read(the_port, ch, MAX_CHARS); if (read_count > 0) { @@ -1491,7 +1491,7 @@ ioc3uart_intr_one(struct ioc3_submodule *is, uart_handle_dcd_change(the_port, shadow & SHADOW_DCD); wake_up_interruptible - (&the_port->info->delta_msr_wait); + (&the_port->state->delta_msr_wait); } else if ((port->ip_notify & N_DDCD) && !(shadow & SHADOW_DCD)) { /* Flag delta DCD/no DCD */ @@ -1511,7 +1511,7 @@ ioc3uart_intr_one(struct ioc3_submodule *is, uart_handle_cts_change(the_port, shadow & SHADOW_CTS); wake_up_interruptible - (&the_port->info->delta_msr_wait); + (&the_port->state->delta_msr_wait); } } @@ -1721,14 +1721,14 @@ static void ic3_shutdown(struct uart_port *the_port) { unsigned long port_flags; struct ioc3_port *port; - struct uart_info *info; + struct uart_state *state; port = get_ioc3_port(the_port); if (!port) return; - info = the_port->info; - wake_up_interruptible(&info->delta_msr_wait); + state = the_port->state; + wake_up_interruptible(&state->delta_msr_wait); spin_lock_irqsave(&the_port->lock, port_flags); set_notification(port, N_ALL, 0); diff --git a/drivers/serial/ioc4_serial.c b/drivers/serial/ioc4_serial.c index e5c58fe7e74..2055d323f15 100644 --- a/drivers/serial/ioc4_serial.c +++ b/drivers/serial/ioc4_serial.c @@ -1627,25 +1627,25 @@ static void transmit_chars(struct uart_port *the_port) char *start; struct tty_struct *tty; struct ioc4_port *port = get_ioc4_port(the_port, 0); - struct uart_info *info; + struct uart_state *state; if (!the_port) return; if (!port) return; - info = the_port->info; - tty = info->port.tty; + state = the_port->state; + tty = state->port.tty; - if (uart_circ_empty(&info->xmit) || uart_tx_stopped(the_port)) { + if (uart_circ_empty(&state->xmit) || uart_tx_stopped(the_port)) { /* Nothing to do or hw stopped */ set_notification(port, N_ALL_OUTPUT, 0); return; } - head = info->xmit.head; - tail = info->xmit.tail; - start = (char *)&info->xmit.buf[tail]; + head = state->xmit.head; + tail = state->xmit.tail; + start = (char *)&state->xmit.buf[tail]; /* write out all the data or until the end of the buffer */ xmit_count = (head < tail) ? (UART_XMIT_SIZE - tail) : (head - tail); @@ -1658,14 +1658,14 @@ static void transmit_chars(struct uart_port *the_port) /* advance the pointers */ tail += result; tail &= UART_XMIT_SIZE - 1; - info->xmit.tail = tail; - start = (char *)&info->xmit.buf[tail]; + state->xmit.tail = tail; + start = (char *)&state->xmit.buf[tail]; } } - if (uart_circ_chars_pending(&info->xmit) < WAKEUP_CHARS) + if (uart_circ_chars_pending(&state->xmit) < WAKEUP_CHARS) uart_write_wakeup(the_port); - if (uart_circ_empty(&info->xmit)) { + if (uart_circ_empty(&state->xmit)) { set_notification(port, N_OUTPUT_LOWAT, 0); } else { set_notification(port, N_OUTPUT_LOWAT, 1); @@ -1686,7 +1686,7 @@ ioc4_change_speed(struct uart_port *the_port, int baud, bits; unsigned cflag; int new_parity = 0, new_parity_enable = 0, new_stop = 0, new_data = 8; - struct uart_info *info = the_port->info; + struct uart_state *state = the_port->state; cflag = new_termios->c_cflag; @@ -1738,14 +1738,14 @@ ioc4_change_speed(struct uart_port *the_port, the_port->ignore_status_mask = N_ALL_INPUT; - info->port.tty->low_latency = 1; + state->port.tty->low_latency = 1; - if (I_IGNPAR(info->port.tty)) + if (I_IGNPAR(state->port.tty)) the_port->ignore_status_mask &= ~(N_PARITY_ERROR | N_FRAMING_ERROR); - if (I_IGNBRK(info->port.tty)) { + if (I_IGNBRK(state->port.tty)) { the_port->ignore_status_mask &= ~N_BREAK; - if (I_IGNPAR(info->port.tty)) + if (I_IGNPAR(state->port.tty)) the_port->ignore_status_mask &= ~N_OVERRUN_ERROR; } if (!(cflag & CREAD)) { @@ -1784,7 +1784,7 @@ ioc4_change_speed(struct uart_port *the_port, static inline int ic4_startup_local(struct uart_port *the_port) { struct ioc4_port *port; - struct uart_info *info; + struct uart_state *state; if (!the_port) return -1; @@ -1793,7 +1793,7 @@ static inline int ic4_startup_local(struct uart_port *the_port) if (!port) return -1; - info = the_port->info; + state = the_port->state; local_open(port); @@ -1801,7 +1801,7 @@ static inline int ic4_startup_local(struct uart_port *the_port) ioc4_set_proto(port, the_port->mapbase); /* set the speed of the serial port */ - ioc4_change_speed(the_port, info->port.tty->termios, + ioc4_change_speed(the_port, state->port.tty->termios, (struct ktermios *)0); return 0; @@ -1882,7 +1882,7 @@ static void handle_intr(void *arg, uint32_t sio_ir) the_port = port->ip_port; the_port->icount.dcd = 1; wake_up_interruptible - (&the_port-> info->delta_msr_wait); + (&the_port->state->delta_msr_wait); } else if ((port->ip_notify & N_DDCD) && !(shadow & IOC4_SHADOW_DCD)) { /* Flag delta DCD/no DCD */ @@ -1904,7 +1904,7 @@ static void handle_intr(void *arg, uint32_t sio_ir) the_port->icount.cts = (shadow & IOC4_SHADOW_CTS) ? 1 : 0; wake_up_interruptible - (&the_port->info->delta_msr_wait); + (&the_port->state->delta_msr_wait); } } @@ -2236,7 +2236,7 @@ static inline int do_read(struct uart_port *the_port, unsigned char *buf, && port->ip_port) { the_port->icount.dcd = 0; wake_up_interruptible - (&the_port->info-> + (&the_port->state-> delta_msr_wait); } @@ -2341,17 +2341,17 @@ static void receive_chars(struct uart_port *the_port) unsigned char ch[IOC4_MAX_CHARS]; int read_count, request_count = IOC4_MAX_CHARS; struct uart_icount *icount; - struct uart_info *info = the_port->info; + struct uart_state *state = the_port->state; unsigned long pflags; /* Make sure all the pointers are "good" ones */ - if (!info) + if (!state) return; - if (!info->port.tty) + if (!state->port.tty) return; spin_lock_irqsave(&the_port->lock, pflags); - tty = info->port.tty; + tty = state->port.tty; request_count = tty_buffer_request_room(tty, IOC4_MAX_CHARS); @@ -2430,19 +2430,19 @@ static void ic4_shutdown(struct uart_port *the_port) { unsigned long port_flags; struct ioc4_port *port; - struct uart_info *info; + struct uart_state *state; port = get_ioc4_port(the_port, 0); if (!port) return; - info = the_port->info; + state = the_port->state; port->ip_port = NULL; - wake_up_interruptible(&info->delta_msr_wait); + wake_up_interruptible(&state->delta_msr_wait); - if (info->port.tty) - set_bit(TTY_IO_ERROR, &info->port.tty->flags); + if (state->port.tty) + set_bit(TTY_IO_ERROR, &state->port.tty->flags); spin_lock_irqsave(&the_port->lock, port_flags); set_notification(port, N_ALL, 0); @@ -2538,7 +2538,7 @@ static int ic4_startup(struct uart_port *the_port) int retval; struct ioc4_port *port; struct ioc4_control *control; - struct uart_info *info; + struct uart_state *state; unsigned long port_flags; if (!the_port) @@ -2546,7 +2546,7 @@ static int ic4_startup(struct uart_port *the_port) port = get_ioc4_port(the_port, 1); if (!port) return -ENODEV; - info = the_port->info; + state = the_port->state; control = port->ip_control; if (!control) { diff --git a/drivers/serial/ip22zilog.c b/drivers/serial/ip22zilog.c index 0d9acbd0bb7..2e847deb41d 100644 --- a/drivers/serial/ip22zilog.c +++ b/drivers/serial/ip22zilog.c @@ -256,9 +256,9 @@ static struct tty_struct *ip22zilog_receive_chars(struct uart_ip22zilog_port *up unsigned int r1; tty = NULL; - if (up->port.info != NULL && - up->port.info->port.tty != NULL) - tty = up->port.info->port.tty; + if (up->port.state != NULL && + up->port.state->port.tty != NULL) + tty = up->port.state->port.tty; for (;;) { ch = readb(&channel->control); @@ -354,7 +354,7 @@ static void ip22zilog_status_handle(struct uart_ip22zilog_port *up, uart_handle_cts_change(&up->port, (status & CTS)); - wake_up_interruptible(&up->port.info->delta_msr_wait); + wake_up_interruptible(&up->port.state->delta_msr_wait); } up->prev_status = status; @@ -404,9 +404,9 @@ static void ip22zilog_transmit_chars(struct uart_ip22zilog_port *up, return; } - if (up->port.info == NULL) + if (up->port.state == NULL) goto ack_tx_int; - xmit = &up->port.info->xmit; + xmit = &up->port.state->xmit; if (uart_circ_empty(xmit)) goto ack_tx_int; if (uart_tx_stopped(&up->port)) @@ -607,7 +607,7 @@ static void ip22zilog_start_tx(struct uart_port *port) port->icount.tx++; port->x_char = 0; } else { - struct circ_buf *xmit = &port->info->xmit; + struct circ_buf *xmit = &port->state->xmit; writeb(xmit->buf[xmit->tail], &channel->data); ZSDELAY(); diff --git a/drivers/serial/jsm/jsm_neo.c b/drivers/serial/jsm/jsm_neo.c index 9dadaa11d26..b4b124e4828 100644 --- a/drivers/serial/jsm/jsm_neo.c +++ b/drivers/serial/jsm/jsm_neo.c @@ -989,7 +989,7 @@ static void neo_param(struct jsm_channel *ch) { 50, B50 }, }; - cflag = C_BAUD(ch->uart_port.info->port.tty); + cflag = C_BAUD(ch->uart_port.state->port.tty); baud = 9600; for (i = 0; i < ARRAY_SIZE(baud_rates); i++) { if (baud_rates[i].cflag == cflag) { diff --git a/drivers/serial/jsm/jsm_tty.c b/drivers/serial/jsm/jsm_tty.c index 00f4577d2f7..7439c037362 100644 --- a/drivers/serial/jsm/jsm_tty.c +++ b/drivers/serial/jsm/jsm_tty.c @@ -147,7 +147,7 @@ static void jsm_tty_send_xchar(struct uart_port *port, char ch) struct ktermios *termios; spin_lock_irqsave(&port->lock, lock_flags); - termios = port->info->port.tty->termios; + termios = port->state->port.tty->termios; if (ch == termios->c_cc[VSTART]) channel->ch_bd->bd_ops->send_start_character(channel); @@ -245,7 +245,7 @@ static int jsm_tty_open(struct uart_port *port) channel->ch_cached_lsr = 0; channel->ch_stops_sent = 0; - termios = port->info->port.tty->termios; + termios = port->state->port.tty->termios; channel->ch_c_cflag = termios->c_cflag; channel->ch_c_iflag = termios->c_iflag; channel->ch_c_oflag = termios->c_oflag; @@ -278,7 +278,7 @@ static void jsm_tty_close(struct uart_port *port) jsm_printk(CLOSE, INFO, &channel->ch_bd->pci_dev, "start\n"); bd = channel->ch_bd; - ts = port->info->port.tty->termios; + ts = port->state->port.tty->termios; channel->ch_flags &= ~(CH_STOPI); @@ -530,7 +530,7 @@ void jsm_input(struct jsm_channel *ch) if (!ch) return; - tp = ch->uart_port.info->port.tty; + tp = ch->uart_port.state->port.tty; bd = ch->ch_bd; if(!bd) @@ -849,7 +849,7 @@ int jsm_tty_write(struct uart_port *port) u16 tail; u16 tmask; u32 remain; - int temp_tail = port->info->xmit.tail; + int temp_tail = port->state->xmit.tail; struct jsm_channel *channel = (struct jsm_channel *)port; tmask = WQUEUEMASK; @@ -865,10 +865,10 @@ int jsm_tty_write(struct uart_port *port) data_count = 0; if (bufcount >= remain) { bufcount -= remain; - while ((port->info->xmit.head != temp_tail) && + while ((port->state->xmit.head != temp_tail) && (data_count < remain)) { channel->ch_wqueue[head++] = - port->info->xmit.buf[temp_tail]; + port->state->xmit.buf[temp_tail]; temp_tail++; temp_tail &= (UART_XMIT_SIZE - 1); @@ -880,10 +880,10 @@ int jsm_tty_write(struct uart_port *port) data_count1 = 0; if (bufcount > 0) { remain = bufcount; - while ((port->info->xmit.head != temp_tail) && + while ((port->state->xmit.head != temp_tail) && (data_count1 < remain)) { channel->ch_wqueue[head++] = - port->info->xmit.buf[temp_tail]; + port->state->xmit.buf[temp_tail]; temp_tail++; temp_tail &= (UART_XMIT_SIZE - 1); @@ -892,7 +892,7 @@ int jsm_tty_write(struct uart_port *port) } } - port->info->xmit.tail = temp_tail; + port->state->xmit.tail = temp_tail; data_count += data_count1; if (data_count) { diff --git a/drivers/serial/m32r_sio.c b/drivers/serial/m32r_sio.c index 611c97a1565..bea5c215460 100644 --- a/drivers/serial/m32r_sio.c +++ b/drivers/serial/m32r_sio.c @@ -286,7 +286,7 @@ static void m32r_sio_start_tx(struct uart_port *port) { #ifdef CONFIG_SERIAL_M32R_PLDSIO struct uart_sio_port *up = (struct uart_sio_port *)port; - struct circ_buf *xmit = &up->port.info->xmit; + struct circ_buf *xmit = &up->port.state->xmit; if (!(up->ier & UART_IER_THRI)) { up->ier |= UART_IER_THRI; @@ -325,7 +325,7 @@ static void m32r_sio_enable_ms(struct uart_port *port) static void receive_chars(struct uart_sio_port *up, int *status) { - struct tty_struct *tty = up->port.info->port.tty; + struct tty_struct *tty = up->port.state->port.tty; unsigned char ch; unsigned char flag; int max_count = 256; @@ -398,7 +398,7 @@ static void receive_chars(struct uart_sio_port *up, int *status) static void transmit_chars(struct uart_sio_port *up) { - struct circ_buf *xmit = &up->port.info->xmit; + struct circ_buf *xmit = &up->port.state->xmit; int count; if (up->port.x_char) { diff --git a/drivers/serial/max3100.c b/drivers/serial/max3100.c index 9fd33e5622b..75ab00631c4 100644 --- a/drivers/serial/max3100.c +++ b/drivers/serial/max3100.c @@ -184,7 +184,7 @@ static void max3100_timeout(unsigned long data) { struct max3100_port *s = (struct max3100_port *)data; - if (s->port.info) { + if (s->port.state) { max3100_dowork(s); mod_timer(&s->timer, jiffies + s->poll_time); } @@ -261,7 +261,7 @@ static void max3100_work(struct work_struct *w) int rxchars; u16 tx, rx; int conf, cconf, rts, crts; - struct circ_buf *xmit = &s->port.info->xmit; + struct circ_buf *xmit = &s->port.state->xmit; dev_dbg(&s->spi->dev, "%s\n", __func__); @@ -307,8 +307,8 @@ static void max3100_work(struct work_struct *w) } } - if (rxchars > 16 && s->port.info->port.tty != NULL) { - tty_flip_buffer_push(s->port.info->port.tty); + if (rxchars > 16 && s->port.state->port.tty != NULL) { + tty_flip_buffer_push(s->port.state->port.tty); rxchars = 0; } if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) @@ -320,8 +320,8 @@ static void max3100_work(struct work_struct *w) (!uart_circ_empty(xmit) && !uart_tx_stopped(&s->port)))); - if (rxchars > 0 && s->port.info->port.tty != NULL) - tty_flip_buffer_push(s->port.info->port.tty); + if (rxchars > 0 && s->port.state->port.tty != NULL) + tty_flip_buffer_push(s->port.state->port.tty); } static irqreturn_t max3100_irq(int irqno, void *dev_id) @@ -429,7 +429,7 @@ max3100_set_termios(struct uart_port *port, struct ktermios *termios, int baud = 0; unsigned cflag; u32 param_new, param_mask, parity = 0; - struct tty_struct *tty = s->port.info->port.tty; + struct tty_struct *tty = s->port.state->port.tty; dev_dbg(&s->spi->dev, "%s\n", __func__); if (!tty) @@ -529,7 +529,7 @@ max3100_set_termios(struct uart_port *port, struct ktermios *termios, MAX3100_STATUS_OE; /* we are sending char from a workqueue so enable */ - s->port.info->port.tty->low_latency = 1; + s->port.state->port.tty->low_latency = 1; if (s->poll_time > 0) del_timer_sync(&s->timer); diff --git a/drivers/serial/mcf.c b/drivers/serial/mcf.c index 0eefb07beba..b44382442bf 100644 --- a/drivers/serial/mcf.c +++ b/drivers/serial/mcf.c @@ -323,7 +323,7 @@ static void mcf_rx_chars(struct mcf_uart *pp) uart_insert_char(port, status, MCFUART_USR_RXOVERRUN, ch, flag); } - tty_flip_buffer_push(port->info->port.tty); + tty_flip_buffer_push(port->state->port.tty); } /****************************************************************************/ @@ -331,7 +331,7 @@ static void mcf_rx_chars(struct mcf_uart *pp) static void mcf_tx_chars(struct mcf_uart *pp) { struct uart_port *port = &pp->port; - struct circ_buf *xmit = &port->info->xmit; + struct circ_buf *xmit = &port->state->xmit; if (port->x_char) { /* Send special char - probably flow control */ diff --git a/drivers/serial/mpc52xx_uart.c b/drivers/serial/mpc52xx_uart.c index abbd146c50d..d7bcd074d38 100644 --- a/drivers/serial/mpc52xx_uart.c +++ b/drivers/serial/mpc52xx_uart.c @@ -745,7 +745,7 @@ static struct uart_ops mpc52xx_uart_ops = { static inline int mpc52xx_uart_int_rx_chars(struct uart_port *port) { - struct tty_struct *tty = port->info->port.tty; + struct tty_struct *tty = port->state->port.tty; unsigned char ch, flag; unsigned short status; @@ -812,7 +812,7 @@ mpc52xx_uart_int_rx_chars(struct uart_port *port) static inline int mpc52xx_uart_int_tx_chars(struct uart_port *port) { - struct circ_buf *xmit = &port->info->xmit; + struct circ_buf *xmit = &port->state->xmit; /* Process out of band chars */ if (port->x_char) { diff --git a/drivers/serial/mpsc.c b/drivers/serial/mpsc.c index 61d3ade5286..b5496c28e60 100644 --- a/drivers/serial/mpsc.c +++ b/drivers/serial/mpsc.c @@ -936,7 +936,7 @@ static int serial_polled; static int mpsc_rx_intr(struct mpsc_port_info *pi) { struct mpsc_rx_desc *rxre; - struct tty_struct *tty = pi->port.info->port.tty; + struct tty_struct *tty = pi->port.state->port.tty; u32 cmdstat, bytes_in, i; int rc = 0; u8 *bp; @@ -1109,7 +1109,7 @@ static void mpsc_setup_tx_desc(struct mpsc_port_info *pi, u32 count, u32 intr) static void mpsc_copy_tx_data(struct mpsc_port_info *pi) { - struct circ_buf *xmit = &pi->port.info->xmit; + struct circ_buf *xmit = &pi->port.state->xmit; u8 *bp; u32 i; diff --git a/drivers/serial/msm_serial.c b/drivers/serial/msm_serial.c index f7c24baa141..ff18d50c99c 100644 --- a/drivers/serial/msm_serial.c +++ b/drivers/serial/msm_serial.c @@ -88,7 +88,7 @@ static void msm_enable_ms(struct uart_port *port) static void handle_rx(struct uart_port *port) { - struct tty_struct *tty = port->info->port.tty; + struct tty_struct *tty = port->state->port.tty; unsigned int sr; /* @@ -136,7 +136,7 @@ static void handle_rx(struct uart_port *port) static void handle_tx(struct uart_port *port) { - struct circ_buf *xmit = &port->info->xmit; + struct circ_buf *xmit = &port->state->xmit; struct msm_port *msm_port = UART_TO_MSM(port); int sent_tx; @@ -169,7 +169,7 @@ static void handle_delta_cts(struct uart_port *port) { msm_write(port, UART_CR_CMD_RESET_CTS, UART_CR); port->icount.cts++; - wake_up_interruptible(&port->info->delta_msr_wait); + wake_up_interruptible(&port->state->delta_msr_wait); } static irqreturn_t msm_irq(int irq, void *dev_id) diff --git a/drivers/serial/mux.c b/drivers/serial/mux.c index 953a5ffa9b4..7571aaa138b 100644 --- a/drivers/serial/mux.c +++ b/drivers/serial/mux.c @@ -199,7 +199,7 @@ static void mux_break_ctl(struct uart_port *port, int break_state) static void mux_write(struct uart_port *port) { int count; - struct circ_buf *xmit = &port->info->xmit; + struct circ_buf *xmit = &port->state->xmit; if(port->x_char) { UART_PUT_CHAR(port, port->x_char); @@ -243,7 +243,7 @@ static void mux_write(struct uart_port *port) static void mux_read(struct uart_port *port) { int data; - struct tty_struct *tty = port->info->port.tty; + struct tty_struct *tty = port->state->port.tty; __u32 start_count = port->icount.rx; while(1) { diff --git a/drivers/serial/netx-serial.c b/drivers/serial/netx-serial.c index 3e5dda8518b..7735c9f35fa 100644 --- a/drivers/serial/netx-serial.c +++ b/drivers/serial/netx-serial.c @@ -140,7 +140,7 @@ static void netx_enable_ms(struct uart_port *port) static inline void netx_transmit_buffer(struct uart_port *port) { - struct circ_buf *xmit = &port->info->xmit; + struct circ_buf *xmit = &port->state->xmit; if (port->x_char) { writel(port->x_char, port->membase + UART_DR); @@ -185,7 +185,7 @@ static unsigned int netx_tx_empty(struct uart_port *port) static void netx_txint(struct uart_port *port) { - struct circ_buf *xmit = &port->info->xmit; + struct circ_buf *xmit = &port->state->xmit; if (uart_circ_empty(xmit) || uart_tx_stopped(port)) { netx_stop_tx(port); @@ -201,7 +201,7 @@ static void netx_txint(struct uart_port *port) static void netx_rxint(struct uart_port *port) { unsigned char rx, flg, status; - struct tty_struct *tty = port->info->port.tty; + struct tty_struct *tty = port->state->port.tty; while (!(readl(port->membase + UART_FR) & FR_RXFE)) { rx = readl(port->membase + UART_DR); diff --git a/drivers/serial/nwpserial.c b/drivers/serial/nwpserial.c index 9e150b19d72..e1ab8ec0a4a 100644 --- a/drivers/serial/nwpserial.c +++ b/drivers/serial/nwpserial.c @@ -126,7 +126,7 @@ static void nwpserial_config_port(struct uart_port *port, int flags) static irqreturn_t nwpserial_interrupt(int irq, void *dev_id) { struct nwpserial_port *up = dev_id; - struct tty_struct *tty = up->port.info->port.tty; + struct tty_struct *tty = up->port.state->port.tty; irqreturn_t ret; unsigned int iir; unsigned char ch; @@ -261,7 +261,7 @@ static void nwpserial_start_tx(struct uart_port *port) struct nwpserial_port *up; struct circ_buf *xmit; up = container_of(port, struct nwpserial_port, port); - xmit = &up->port.info->xmit; + xmit = &up->port.state->xmit; if (port->x_char) { nwpserial_putchar(up, up->port.x_char); diff --git a/drivers/serial/pmac_zilog.c b/drivers/serial/pmac_zilog.c index 9c1243fbd51..ab4c85ba354 100644 --- a/drivers/serial/pmac_zilog.c +++ b/drivers/serial/pmac_zilog.c @@ -242,12 +242,12 @@ static struct tty_struct *pmz_receive_chars(struct uart_pmac_port *uap) } /* Sanity check, make sure the old bug is no longer happening */ - if (uap->port.info == NULL || uap->port.info->port.tty == NULL) { + if (uap->port.state == NULL || uap->port.state->port.tty == NULL) { WARN_ON(1); (void)read_zsdata(uap); return NULL; } - tty = uap->port.info->port.tty; + tty = uap->port.state->port.tty; while (1) { error = 0; @@ -369,7 +369,7 @@ static void pmz_status_handle(struct uart_pmac_port *uap) uart_handle_cts_change(&uap->port, !(status & CTS)); - wake_up_interruptible(&uap->port.info->delta_msr_wait); + wake_up_interruptible(&uap->port.state->delta_msr_wait); } if (status & BRK_ABRT) @@ -420,9 +420,9 @@ static void pmz_transmit_chars(struct uart_pmac_port *uap) return; } - if (uap->port.info == NULL) + if (uap->port.state == NULL) goto ack_tx_int; - xmit = &uap->port.info->xmit; + xmit = &uap->port.state->xmit; if (uart_circ_empty(xmit)) { uart_write_wakeup(&uap->port); goto ack_tx_int; @@ -655,7 +655,7 @@ static void pmz_start_tx(struct uart_port *port) port->icount.tx++; port->x_char = 0; } else { - struct circ_buf *xmit = &port->info->xmit; + struct circ_buf *xmit = &port->state->xmit; write_zsdata(uap, xmit->buf[xmit->tail]); zssync(uap); diff --git a/drivers/serial/pnx8xxx_uart.c b/drivers/serial/pnx8xxx_uart.c index 1bb8f1b4576..2da74763527 100644 --- a/drivers/serial/pnx8xxx_uart.c +++ b/drivers/serial/pnx8xxx_uart.c @@ -100,7 +100,7 @@ static void pnx8xxx_mctrl_check(struct pnx8xxx_port *sport) if (changed & TIOCM_CTS) uart_handle_cts_change(&sport->port, status & TIOCM_CTS); - wake_up_interruptible(&sport->port.info->delta_msr_wait); + wake_up_interruptible(&sport->port.state->delta_msr_wait); } /* @@ -112,7 +112,7 @@ static void pnx8xxx_timeout(unsigned long data) struct pnx8xxx_port *sport = (struct pnx8xxx_port *)data; unsigned long flags; - if (sport->port.info) { + if (sport->port.state) { spin_lock_irqsave(&sport->port.lock, flags); pnx8xxx_mctrl_check(sport); spin_unlock_irqrestore(&sport->port.lock, flags); @@ -181,7 +181,7 @@ static void pnx8xxx_enable_ms(struct uart_port *port) static void pnx8xxx_rx_chars(struct pnx8xxx_port *sport) { - struct tty_struct *tty = sport->port.info->port.tty; + struct tty_struct *tty = sport->port.state->port.tty; unsigned int status, ch, flg; status = FIFO_TO_SM(serial_in(sport, PNX8XXX_FIFO)) | @@ -243,7 +243,7 @@ static void pnx8xxx_rx_chars(struct pnx8xxx_port *sport) static void pnx8xxx_tx_chars(struct pnx8xxx_port *sport) { - struct circ_buf *xmit = &sport->port.info->xmit; + struct circ_buf *xmit = &sport->port.state->xmit; if (sport->port.x_char) { serial_out(sport, PNX8XXX_FIFO, sport->port.x_char); diff --git a/drivers/serial/pxa.c b/drivers/serial/pxa.c index a48a8a13d87..ad48919c041 100644 --- a/drivers/serial/pxa.c +++ b/drivers/serial/pxa.c @@ -96,7 +96,7 @@ static void serial_pxa_stop_rx(struct uart_port *port) static inline void receive_chars(struct uart_pxa_port *up, int *status) { - struct tty_struct *tty = up->port.info->port.tty; + struct tty_struct *tty = up->port.state->port.tty; unsigned int ch, flag; int max_count = 256; @@ -161,7 +161,7 @@ static inline void receive_chars(struct uart_pxa_port *up, int *status) static void transmit_chars(struct uart_pxa_port *up) { - struct circ_buf *xmit = &up->port.info->xmit; + struct circ_buf *xmit = &up->port.state->xmit; int count; if (up->port.x_char) { @@ -220,7 +220,7 @@ static inline void check_modem_status(struct uart_pxa_port *up) if (status & UART_MSR_DCTS) uart_handle_cts_change(&up->port, status & UART_MSR_CTS); - wake_up_interruptible(&up->port.info->delta_msr_wait); + wake_up_interruptible(&up->port.state->delta_msr_wait); } /* diff --git a/drivers/serial/sa1100.c b/drivers/serial/sa1100.c index 94530f01521..61ef3ae2492 100644 --- a/drivers/serial/sa1100.c +++ b/drivers/serial/sa1100.c @@ -117,7 +117,7 @@ static void sa1100_mctrl_check(struct sa1100_port *sport) if (changed & TIOCM_CTS) uart_handle_cts_change(&sport->port, status & TIOCM_CTS); - wake_up_interruptible(&sport->port.info->delta_msr_wait); + wake_up_interruptible(&sport->port.state->delta_msr_wait); } /* @@ -129,7 +129,7 @@ static void sa1100_timeout(unsigned long data) struct sa1100_port *sport = (struct sa1100_port *)data; unsigned long flags; - if (sport->port.info) { + if (sport->port.state) { spin_lock_irqsave(&sport->port.lock, flags); sa1100_mctrl_check(sport); spin_unlock_irqrestore(&sport->port.lock, flags); @@ -189,7 +189,7 @@ static void sa1100_enable_ms(struct uart_port *port) static void sa1100_rx_chars(struct sa1100_port *sport) { - struct tty_struct *tty = sport->port.info->port.tty; + struct tty_struct *tty = sport->port.state->port.tty; unsigned int status, ch, flg; status = UTSR1_TO_SM(UART_GET_UTSR1(sport)) | @@ -239,7 +239,7 @@ sa1100_rx_chars(struct sa1100_port *sport) static void sa1100_tx_chars(struct sa1100_port *sport) { - struct circ_buf *xmit = &sport->port.info->xmit; + struct circ_buf *xmit = &sport->port.state->xmit; if (sport->port.x_char) { UART_PUT_CHAR(sport, sport->port.x_char); diff --git a/drivers/serial/samsung.c b/drivers/serial/samsung.c index c8851a0db63..1523e8d9ae7 100644 --- a/drivers/serial/samsung.c +++ b/drivers/serial/samsung.c @@ -196,7 +196,7 @@ s3c24xx_serial_rx_chars(int irq, void *dev_id) { struct s3c24xx_uart_port *ourport = dev_id; struct uart_port *port = &ourport->port; - struct tty_struct *tty = port->info->port.tty; + struct tty_struct *tty = port->state->port.tty; unsigned int ufcon, ch, flag, ufstat, uerstat; int max_count = 64; @@ -281,7 +281,7 @@ static irqreturn_t s3c24xx_serial_tx_chars(int irq, void *id) { struct s3c24xx_uart_port *ourport = id; struct uart_port *port = &ourport->port; - struct circ_buf *xmit = &port->info->xmit; + struct circ_buf *xmit = &port->state->xmit; int count = 256; if (port->x_char) { @@ -992,10 +992,10 @@ static int s3c24xx_serial_cpufreq_transition(struct notifier_block *nb, struct ktermios *termios; struct tty_struct *tty; - if (uport->info == NULL) + if (uport->state == NULL) goto exit; - tty = uport->info->port.tty; + tty = uport->state->port.tty; if (tty == NULL) goto exit; diff --git a/drivers/serial/sb1250-duart.c b/drivers/serial/sb1250-duart.c index 319e8b83f6b..fa5f303b36d 100644 --- a/drivers/serial/sb1250-duart.c +++ b/drivers/serial/sb1250-duart.c @@ -384,13 +384,13 @@ static void sbd_receive_chars(struct sbd_port *sport) uart_insert_char(uport, status, M_DUART_OVRUN_ERR, ch, flag); } - tty_flip_buffer_push(uport->info->port.tty); + tty_flip_buffer_push(uport->state->port.tty); } static void sbd_transmit_chars(struct sbd_port *sport) { struct uart_port *uport = &sport->port; - struct circ_buf *xmit = &sport->port.info->xmit; + struct circ_buf *xmit = &sport->port.state->xmit; unsigned int mask; int stop_tx; @@ -440,7 +440,7 @@ static void sbd_status_handle(struct sbd_port *sport) if (delta & ((M_DUART_IN_PIN2_VAL | M_DUART_IN_PIN0_VAL) << S_DUART_IN_PIN_CHNG)) - wake_up_interruptible(&uport->info->delta_msr_wait); + wake_up_interruptible(&uport->state->delta_msr_wait); } static irqreturn_t sbd_interrupt(int irq, void *dev_id) diff --git a/drivers/serial/sc26xx.c b/drivers/serial/sc26xx.c index e0be11ceaa2..75038ad2b24 100644 --- a/drivers/serial/sc26xx.c +++ b/drivers/serial/sc26xx.c @@ -140,8 +140,8 @@ static struct tty_struct *receive_chars(struct uart_port *port) char flag; u8 status; - if (port->info != NULL) /* Unopened serial console */ - tty = port->info->port.tty; + if (port->state != NULL) /* Unopened serial console */ + tty = port->state->port.tty; while (limit-- > 0) { status = READ_SC_PORT(port, SR); @@ -190,10 +190,10 @@ static void transmit_chars(struct uart_port *port) { struct circ_buf *xmit; - if (!port->info) + if (!port->state) return; - xmit = &port->info->xmit; + xmit = &port->state->xmit; if (uart_circ_empty(xmit) || uart_tx_stopped(port)) { sc26xx_disable_irq(port, IMR_TXRDY); return; @@ -316,7 +316,7 @@ static void sc26xx_stop_tx(struct uart_port *port) /* port->lock held by caller. */ static void sc26xx_start_tx(struct uart_port *port) { - struct circ_buf *xmit = &port->info->xmit; + struct circ_buf *xmit = &port->state->xmit; while (!uart_circ_empty(xmit)) { if (!(READ_SC_PORT(port, SR) & SR_TXRDY)) { diff --git a/drivers/serial/serial_core.c b/drivers/serial/serial_core.c index b0bb29d804a..ea53b6f224b 100644 --- a/drivers/serial/serial_core.c +++ b/drivers/serial/serial_core.c @@ -52,7 +52,7 @@ static struct lock_class_key port_lock_key; #define HIGH_BITS_OFFSET ((sizeof(long)-sizeof(int))*8) -#define uart_users(state) ((state)->count + (state)->info.port.blocked_open) +#define uart_users(state) ((state)->count + (state)->port.blocked_open) #ifdef CONFIG_SERIAL_CORE_CONSOLE #define uart_console(port) ((port)->cons && (port)->cons->index == (port)->line) @@ -71,19 +71,19 @@ static void uart_change_pm(struct uart_state *state, int pm_state); */ void uart_write_wakeup(struct uart_port *port) { - struct uart_info *info = port->info; + struct uart_state *state = port->state; /* * This means you called this function _after_ the port was * closed. No cookie for you. */ - BUG_ON(!info); - tasklet_schedule(&info->tlet); + BUG_ON(!state); + tasklet_schedule(&state->tlet); } static void uart_stop(struct tty_struct *tty) { struct uart_state *state = tty->driver_data; - struct uart_port *port = state->port; + struct uart_port *port = state->uart_port; unsigned long flags; spin_lock_irqsave(&port->lock, flags); @@ -94,9 +94,9 @@ static void uart_stop(struct tty_struct *tty) static void __uart_start(struct tty_struct *tty) { struct uart_state *state = tty->driver_data; - struct uart_port *port = state->port; + struct uart_port *port = state->uart_port; - if (!uart_circ_empty(&state->info.xmit) && state->info.xmit.buf && + if (!uart_circ_empty(&state->xmit) && state->xmit.buf && !tty->stopped && !tty->hw_stopped) port->ops->start_tx(port); } @@ -104,7 +104,7 @@ static void __uart_start(struct tty_struct *tty) static void uart_start(struct tty_struct *tty) { struct uart_state *state = tty->driver_data; - struct uart_port *port = state->port; + struct uart_port *port = state->uart_port; unsigned long flags; spin_lock_irqsave(&port->lock, flags); @@ -115,7 +115,7 @@ static void uart_start(struct tty_struct *tty) static void uart_tasklet_action(unsigned long data) { struct uart_state *state = (struct uart_state *)data; - tty_wakeup(state->info.port.tty); + tty_wakeup(state->port.tty); } static inline void @@ -141,12 +141,11 @@ uart_update_mctrl(struct uart_port *port, unsigned int set, unsigned int clear) */ static int uart_startup(struct uart_state *state, int init_hw) { - struct uart_info *info = &state->info; - struct uart_port *port = state->port; + struct uart_port *port = state->uart_port; unsigned long page; int retval = 0; - if (info->flags & UIF_INITIALIZED) + if (state->flags & UIF_INITIALIZED) return 0; /* @@ -154,7 +153,7 @@ static int uart_startup(struct uart_state *state, int init_hw) * once we have successfully opened the port. Also set * up the tty->alt_speed kludge */ - set_bit(TTY_IO_ERROR, &info->port.tty->flags); + set_bit(TTY_IO_ERROR, &state->port.tty->flags); if (port->type == PORT_UNKNOWN) return 0; @@ -163,14 +162,14 @@ static int uart_startup(struct uart_state *state, int init_hw) * Initialise and allocate the transmit and temporary * buffer. */ - if (!info->xmit.buf) { + if (!state->xmit.buf) { /* This is protected by the per port mutex */ page = get_zeroed_page(GFP_KERNEL); if (!page) return -ENOMEM; - info->xmit.buf = (unsigned char *) page; - uart_circ_clear(&info->xmit); + state->xmit.buf = (unsigned char *) page; + uart_circ_clear(&state->xmit); } retval = port->ops->startup(port); @@ -185,20 +184,20 @@ static int uart_startup(struct uart_state *state, int init_hw) * Setup the RTS and DTR signals once the * port is open and ready to respond. */ - if (info->port.tty->termios->c_cflag & CBAUD) + if (state->port.tty->termios->c_cflag & CBAUD) uart_set_mctrl(port, TIOCM_RTS | TIOCM_DTR); } - if (info->flags & UIF_CTS_FLOW) { + if (state->flags & UIF_CTS_FLOW) { spin_lock_irq(&port->lock); if (!(port->ops->get_mctrl(port) & TIOCM_CTS)) - info->port.tty->hw_stopped = 1; + state->port.tty->hw_stopped = 1; spin_unlock_irq(&port->lock); } - info->flags |= UIF_INITIALIZED; + state->flags |= UIF_INITIALIZED; - clear_bit(TTY_IO_ERROR, &info->port.tty->flags); + clear_bit(TTY_IO_ERROR, &state->port.tty->flags); } if (retval && capable(CAP_SYS_ADMIN)) @@ -214,9 +213,8 @@ static int uart_startup(struct uart_state *state, int init_hw) */ static void uart_shutdown(struct uart_state *state) { - struct uart_info *info = &state->info; - struct uart_port *port = state->port; - struct tty_struct *tty = info->port.tty; + struct uart_port *port = state->uart_port; + struct tty_struct *tty = state->port.tty; /* * Set the TTY IO error marker @@ -224,8 +222,8 @@ static void uart_shutdown(struct uart_state *state) if (tty) set_bit(TTY_IO_ERROR, &tty->flags); - if (info->flags & UIF_INITIALIZED) { - info->flags &= ~UIF_INITIALIZED; + if (state->flags & UIF_INITIALIZED) { + state->flags &= ~UIF_INITIALIZED; /* * Turn off DTR and RTS early. @@ -240,7 +238,7 @@ static void uart_shutdown(struct uart_state *state) * any outstanding file descriptors should be pointing at * hung_up_tty_fops now. */ - wake_up_interruptible(&info->delta_msr_wait); + wake_up_interruptible(&state->delta_msr_wait); /* * Free the IRQ and disable the port. @@ -256,14 +254,14 @@ static void uart_shutdown(struct uart_state *state) /* * kill off our tasklet */ - tasklet_kill(&info->tlet); + tasklet_kill(&state->tlet); /* * Free the transmit buffer page. */ - if (info->xmit.buf) { - free_page((unsigned long)info->xmit.buf); - info->xmit.buf = NULL; + if (state->xmit.buf) { + free_page((unsigned long)state->xmit.buf); + state->xmit.buf = NULL; } } @@ -430,8 +428,8 @@ EXPORT_SYMBOL(uart_get_divisor); static void uart_change_speed(struct uart_state *state, struct ktermios *old_termios) { - struct tty_struct *tty = state->info.port.tty; - struct uart_port *port = state->port; + struct tty_struct *tty = state->port.tty; + struct uart_port *port = state->uart_port; struct ktermios *termios; /* @@ -447,14 +445,14 @@ uart_change_speed(struct uart_state *state, struct ktermios *old_termios) * Set flags based on termios cflag */ if (termios->c_cflag & CRTSCTS) - state->info.flags |= UIF_CTS_FLOW; + state->flags |= UIF_CTS_FLOW; else - state->info.flags &= ~UIF_CTS_FLOW; + state->flags &= ~UIF_CTS_FLOW; if (termios->c_cflag & CLOCAL) - state->info.flags &= ~UIF_CHECK_CD; + state->flags &= ~UIF_CHECK_CD; else - state->info.flags |= UIF_CHECK_CD; + state->flags |= UIF_CHECK_CD; port->ops->set_termios(port, termios, old_termios); } @@ -482,7 +480,7 @@ static int uart_put_char(struct tty_struct *tty, unsigned char ch) { struct uart_state *state = tty->driver_data; - return __uart_put_char(state->port, &state->info.xmit, ch); + return __uart_put_char(state->uart_port, &state->xmit, ch); } static void uart_flush_chars(struct tty_struct *tty) @@ -508,8 +506,8 @@ uart_write(struct tty_struct *tty, const unsigned char *buf, int count) return -EL3HLT; } - port = state->port; - circ = &state->info.xmit; + port = state->uart_port; + circ = &state->xmit; if (!circ->buf) return 0; @@ -539,9 +537,9 @@ static int uart_write_room(struct tty_struct *tty) unsigned long flags; int ret; - spin_lock_irqsave(&state->port->lock, flags); - ret = uart_circ_chars_free(&state->info.xmit); - spin_unlock_irqrestore(&state->port->lock, flags); + spin_lock_irqsave(&state->uart_port->lock, flags); + ret = uart_circ_chars_free(&state->xmit); + spin_unlock_irqrestore(&state->uart_port->lock, flags); return ret; } @@ -551,9 +549,9 @@ static int uart_chars_in_buffer(struct tty_struct *tty) unsigned long flags; int ret; - spin_lock_irqsave(&state->port->lock, flags); - ret = uart_circ_chars_pending(&state->info.xmit); - spin_unlock_irqrestore(&state->port->lock, flags); + spin_lock_irqsave(&state->uart_port->lock, flags); + ret = uart_circ_chars_pending(&state->xmit); + spin_unlock_irqrestore(&state->uart_port->lock, flags); return ret; } @@ -572,11 +570,11 @@ static void uart_flush_buffer(struct tty_struct *tty) return; } - port = state->port; + port = state->uart_port; pr_debug("uart_flush_buffer(%d) called\n", tty->index); spin_lock_irqsave(&port->lock, flags); - uart_circ_clear(&state->info.xmit); + uart_circ_clear(&state->xmit); if (port->ops->flush_buffer) port->ops->flush_buffer(port); spin_unlock_irqrestore(&port->lock, flags); @@ -590,7 +588,7 @@ static void uart_flush_buffer(struct tty_struct *tty) static void uart_send_xchar(struct tty_struct *tty, char ch) { struct uart_state *state = tty->driver_data; - struct uart_port *port = state->port; + struct uart_port *port = state->uart_port; unsigned long flags; if (port->ops->send_xchar) @@ -613,13 +611,13 @@ static void uart_throttle(struct tty_struct *tty) uart_send_xchar(tty, STOP_CHAR(tty)); if (tty->termios->c_cflag & CRTSCTS) - uart_clear_mctrl(state->port, TIOCM_RTS); + uart_clear_mctrl(state->uart_port, TIOCM_RTS); } static void uart_unthrottle(struct tty_struct *tty) { struct uart_state *state = tty->driver_data; - struct uart_port *port = state->port; + struct uart_port *port = state->uart_port; if (I_IXOFF(tty)) { if (port->x_char) @@ -635,7 +633,7 @@ static void uart_unthrottle(struct tty_struct *tty) static int uart_get_info(struct uart_state *state, struct serial_struct __user *retinfo) { - struct uart_port *port = state->port; + struct uart_port *port = state->uart_port; struct serial_struct tmp; memset(&tmp, 0, sizeof(tmp)); @@ -674,7 +672,7 @@ static int uart_set_info(struct uart_state *state, struct serial_struct __user *newinfo) { struct serial_struct new_serial; - struct uart_port *port = state->port; + struct uart_port *port = state->uart_port; unsigned long new_port; unsigned int change_irq, change_port, closing_wait; unsigned int old_custom_divisor, close_delay; @@ -840,15 +838,15 @@ static int uart_set_info(struct uart_state *state, state->closing_wait = closing_wait; if (new_serial.xmit_fifo_size) port->fifosize = new_serial.xmit_fifo_size; - if (state->info.port.tty) - state->info.port.tty->low_latency = + if (state->port.tty) + state->port.tty->low_latency = (port->flags & UPF_LOW_LATENCY) ? 1 : 0; check_and_exit: retval = 0; if (port->type == PORT_UNKNOWN) goto exit; - if (state->info.flags & UIF_INITIALIZED) { + if (state->flags & UIF_INITIALIZED) { if (((old_flags ^ port->flags) & UPF_SPD_MASK) || old_custom_divisor != port->custom_divisor) { /* @@ -861,7 +859,7 @@ static int uart_set_info(struct uart_state *state, printk(KERN_NOTICE "%s sets custom speed on %s. This " "is deprecated.\n", current->comm, - tty_name(state->info.port.tty, buf)); + tty_name(state->port.tty, buf)); } uart_change_speed(state, NULL); } @@ -880,7 +878,7 @@ static int uart_set_info(struct uart_state *state, static int uart_get_lsr_info(struct uart_state *state, unsigned int __user *value) { - struct uart_port *port = state->port; + struct uart_port *port = state->uart_port; unsigned int result; result = port->ops->tx_empty(port); @@ -892,8 +890,8 @@ static int uart_get_lsr_info(struct uart_state *state, * interrupt happens). */ if (port->x_char || - ((uart_circ_chars_pending(&state->info.xmit) > 0) && - !state->info.port.tty->stopped && !state->info.port.tty->hw_stopped)) + ((uart_circ_chars_pending(&state->xmit) > 0) && + !state->port.tty->stopped && !state->port.tty->hw_stopped)) result &= ~TIOCSER_TEMT; return put_user(result, value); @@ -902,7 +900,7 @@ static int uart_get_lsr_info(struct uart_state *state, static int uart_tiocmget(struct tty_struct *tty, struct file *file) { struct uart_state *state = tty->driver_data; - struct uart_port *port = state->port; + struct uart_port *port = state->uart_port; int result = -EIO; mutex_lock(&state->mutex); @@ -924,7 +922,7 @@ uart_tiocmset(struct tty_struct *tty, struct file *file, unsigned int set, unsigned int clear) { struct uart_state *state = tty->driver_data; - struct uart_port *port = state->port; + struct uart_port *port = state->uart_port; int ret = -EIO; mutex_lock(&state->mutex); @@ -940,7 +938,7 @@ uart_tiocmset(struct tty_struct *tty, struct file *file, static int uart_break_ctl(struct tty_struct *tty, int break_state) { struct uart_state *state = tty->driver_data; - struct uart_port *port = state->port; + struct uart_port *port = state->uart_port; mutex_lock(&state->mutex); @@ -953,7 +951,7 @@ static int uart_break_ctl(struct tty_struct *tty, int break_state) static int uart_do_autoconfig(struct uart_state *state) { - struct uart_port *port = state->port; + struct uart_port *port = state->uart_port; int flags, ret; if (!capable(CAP_SYS_ADMIN)) @@ -1003,7 +1001,7 @@ static int uart_do_autoconfig(struct uart_state *state) static int uart_wait_modem_status(struct uart_state *state, unsigned long arg) { - struct uart_port *port = state->port; + struct uart_port *port = state->uart_port; DECLARE_WAITQUEUE(wait, current); struct uart_icount cprev, cnow; int ret; @@ -1020,7 +1018,7 @@ uart_wait_modem_status(struct uart_state *state, unsigned long arg) port->ops->enable_ms(port); spin_unlock_irq(&port->lock); - add_wait_queue(&state->info.delta_msr_wait, &wait); + add_wait_queue(&state->delta_msr_wait, &wait); for (;;) { spin_lock_irq(&port->lock); memcpy(&cnow, &port->icount, sizeof(struct uart_icount)); @@ -1048,7 +1046,7 @@ uart_wait_modem_status(struct uart_state *state, unsigned long arg) } current->state = TASK_RUNNING; - remove_wait_queue(&state->info.delta_msr_wait, &wait); + remove_wait_queue(&state->delta_msr_wait, &wait); return ret; } @@ -1064,7 +1062,7 @@ static int uart_get_count(struct uart_state *state, { struct serial_icounter_struct icount; struct uart_icount cnow; - struct uart_port *port = state->port; + struct uart_port *port = state->uart_port; spin_lock_irq(&port->lock); memcpy(&cnow, &port->icount, sizeof(struct uart_icount)); @@ -1160,7 +1158,7 @@ uart_ioctl(struct tty_struct *tty, struct file *filp, unsigned int cmd, break; default: { - struct uart_port *port = state->port; + struct uart_port *port = state->uart_port; if (port->ops->ioctl) ret = port->ops->ioctl(port, cmd, arg); break; @@ -1175,7 +1173,7 @@ out: static void uart_set_ldisc(struct tty_struct *tty) { struct uart_state *state = tty->driver_data; - struct uart_port *port = state->port; + struct uart_port *port = state->uart_port; if (port->ops->set_ldisc) port->ops->set_ldisc(port); @@ -1207,7 +1205,7 @@ static void uart_set_termios(struct tty_struct *tty, /* Handle transition to B0 status */ if ((old_termios->c_cflag & CBAUD) && !(cflag & CBAUD)) - uart_clear_mctrl(state->port, TIOCM_RTS | TIOCM_DTR); + uart_clear_mctrl(state->uart_port, TIOCM_RTS | TIOCM_DTR); /* Handle transition away from B0 status */ if (!(old_termios->c_cflag & CBAUD) && (cflag & CBAUD)) { @@ -1215,25 +1213,25 @@ static void uart_set_termios(struct tty_struct *tty, if (!(cflag & CRTSCTS) || !test_bit(TTY_THROTTLED, &tty->flags)) mask |= TIOCM_RTS; - uart_set_mctrl(state->port, mask); + uart_set_mctrl(state->uart_port, mask); } /* Handle turning off CRTSCTS */ if ((old_termios->c_cflag & CRTSCTS) && !(cflag & CRTSCTS)) { - spin_lock_irqsave(&state->port->lock, flags); + spin_lock_irqsave(&state->uart_port->lock, flags); tty->hw_stopped = 0; __uart_start(tty); - spin_unlock_irqrestore(&state->port->lock, flags); + spin_unlock_irqrestore(&state->uart_port->lock, flags); } /* Handle turning on CRTSCTS */ if (!(old_termios->c_cflag & CRTSCTS) && (cflag & CRTSCTS)) { - spin_lock_irqsave(&state->port->lock, flags); - if (!(state->port->ops->get_mctrl(state->port) & TIOCM_CTS)) { + spin_lock_irqsave(&state->uart_port->lock, flags); + if (!(state->uart_port->ops->get_mctrl(state->uart_port) & TIOCM_CTS)) { tty->hw_stopped = 1; - state->port->ops->stop_tx(state->port); + state->uart_port->ops->stop_tx(state->uart_port); } - spin_unlock_irqrestore(&state->port->lock, flags); + spin_unlock_irqrestore(&state->uart_port->lock, flags); } #if 0 /* @@ -1244,7 +1242,7 @@ static void uart_set_termios(struct tty_struct *tty, */ if (!(old_termios->c_cflag & CLOCAL) && (tty->termios->c_cflag & CLOCAL)) - wake_up_interruptible(&info->port.open_wait); + wake_up_interruptible(&state->uart_port.open_wait); #endif } @@ -1260,10 +1258,10 @@ static void uart_close(struct tty_struct *tty, struct file *filp) BUG_ON(!kernel_locked()); - if (!state || !state->port) + if (!state || !state->uart_port) return; - port = state->port; + port = state->uart_port; pr_debug("uart_close(%d) called\n", port->line); @@ -1306,7 +1304,7 @@ static void uart_close(struct tty_struct *tty, struct file *filp) * At this point, we stop accepting input. To do this, we * disable the receive line status interrupts. */ - if (state->info.flags & UIF_INITIALIZED) { + if (state->flags & UIF_INITIALIZED) { unsigned long flags; spin_lock_irqsave(&port->lock, flags); port->ops->stop_rx(port); @@ -1325,9 +1323,9 @@ static void uart_close(struct tty_struct *tty, struct file *filp) tty_ldisc_flush(tty); tty->closing = 0; - state->info.port.tty = NULL; + state->port.tty = NULL; - if (state->info.port.blocked_open) { + if (state->port.blocked_open) { if (state->close_delay) msleep_interruptible(state->close_delay); } else if (!uart_console(port)) { @@ -1337,8 +1335,8 @@ static void uart_close(struct tty_struct *tty, struct file *filp) /* * Wake up anyone trying to open this port. */ - state->info.flags &= ~UIF_NORMAL_ACTIVE; - wake_up_interruptible(&state->info.port.open_wait); + state->flags &= ~UIF_NORMAL_ACTIVE; + wake_up_interruptible(&state->port.open_wait); done: mutex_unlock(&state->mutex); @@ -1347,7 +1345,7 @@ static void uart_close(struct tty_struct *tty, struct file *filp) static void uart_wait_until_sent(struct tty_struct *tty, int timeout) { struct uart_state *state = tty->driver_data; - struct uart_port *port = state->port; + struct uart_port *port = state->uart_port; unsigned long char_time, expire; if (port->type == PORT_UNKNOWN || port->fifosize == 0) @@ -1412,20 +1410,19 @@ static void uart_wait_until_sent(struct tty_struct *tty, int timeout) static void uart_hangup(struct tty_struct *tty) { struct uart_state *state = tty->driver_data; - struct uart_info *info = &state->info; BUG_ON(!kernel_locked()); - pr_debug("uart_hangup(%d)\n", state->port->line); + pr_debug("uart_hangup(%d)\n", state->uart_port->line); mutex_lock(&state->mutex); - if (info->flags & UIF_NORMAL_ACTIVE) { + if (state->flags & UIF_NORMAL_ACTIVE) { uart_flush_buffer(tty); uart_shutdown(state); state->count = 0; - info->flags &= ~UIF_NORMAL_ACTIVE; - info->port.tty = NULL; - wake_up_interruptible(&info->port.open_wait); - wake_up_interruptible(&info->delta_msr_wait); + state->flags &= ~UIF_NORMAL_ACTIVE; + state->port.tty = NULL; + wake_up_interruptible(&state->port.open_wait); + wake_up_interruptible(&state->delta_msr_wait); } mutex_unlock(&state->mutex); } @@ -1438,8 +1435,8 @@ static void uart_hangup(struct tty_struct *tty) */ static void uart_update_termios(struct uart_state *state) { - struct tty_struct *tty = state->info.port.tty; - struct uart_port *port = state->port; + struct tty_struct *tty = state->port.tty; + struct uart_port *port = state->uart_port; if (uart_console(port) && port->cons->cflag) { tty->termios->c_cflag = port->cons->cflag; @@ -1473,27 +1470,26 @@ static int uart_block_til_ready(struct file *filp, struct uart_state *state) { DECLARE_WAITQUEUE(wait, current); - struct uart_info *info = &state->info; - struct uart_port *port = state->port; + struct uart_port *port = state->uart_port; unsigned int mctrl; - info->port.blocked_open++; + state->port.blocked_open++; state->count--; - add_wait_queue(&info->port.open_wait, &wait); + add_wait_queue(&state->port.open_wait, &wait); while (1) { set_current_state(TASK_INTERRUPTIBLE); /* * If we have been hung up, tell userspace/restart open. */ - if (tty_hung_up_p(filp) || info->port.tty == NULL) + if (tty_hung_up_p(filp) || state->port.tty == NULL) break; /* * If the port has been closed, tell userspace/restart open. */ - if (!(info->flags & UIF_INITIALIZED)) + if (!(state->flags & UIF_INITIALIZED)) break; /* @@ -1506,8 +1502,8 @@ uart_block_til_ready(struct file *filp, struct uart_state *state) * have set TTY_IO_ERROR for a non-existant port. */ if ((filp->f_flags & O_NONBLOCK) || - (info->port.tty->termios->c_cflag & CLOCAL) || - (info->port.tty->flags & (1 << TTY_IO_ERROR))) + (state->port.tty->termios->c_cflag & CLOCAL) || + (state->port.tty->flags & (1 << TTY_IO_ERROR))) break; /* @@ -1515,7 +1511,7 @@ uart_block_til_ready(struct file *filp, struct uart_state *state) * not set RTS here - we want to make sure we catch * the data from the modem. */ - if (info->port.tty->termios->c_cflag & CBAUD) + if (state->port.tty->termios->c_cflag & CBAUD) uart_set_mctrl(port, TIOCM_DTR); /* @@ -1537,15 +1533,15 @@ uart_block_til_ready(struct file *filp, struct uart_state *state) break; } set_current_state(TASK_RUNNING); - remove_wait_queue(&info->port.open_wait, &wait); + remove_wait_queue(&state->port.open_wait, &wait); state->count++; - info->port.blocked_open--; + state->port.blocked_open--; if (signal_pending(current)) return -ERESTARTSYS; - if (!info->port.tty || tty_hung_up_p(filp)) + if (!state->port.tty || tty_hung_up_p(filp)) return -EAGAIN; return 0; @@ -1563,7 +1559,7 @@ static struct uart_state *uart_get(struct uart_driver *drv, int line) } state->count++; - if (!state->port || state->port->flags & UPF_DEAD) { + if (!state->uart_port || state->uart_port->flags & UPF_DEAD) { ret = -ENXIO; goto err_unlock; } @@ -1606,10 +1602,11 @@ static int uart_open(struct tty_struct *tty, struct file *filp) /* * We take the semaphore inside uart_get to guarantee that we won't - * be re-entered while allocating the info structure, or while we + * be re-entered while allocating the state structure, or while we * request any IRQs that the driver may need. This also has the nice * side-effect that it delays the action of uart_hangup, so we can - * guarantee that info->port.tty will always contain something reasonable. + * guarantee that state->port.tty will always contain something + * reasonable. */ state = uart_get(drv, line); if (IS_ERR(state)) { @@ -1623,10 +1620,10 @@ static int uart_open(struct tty_struct *tty, struct file *filp) * Any failures from here onwards should not touch the count. */ tty->driver_data = state; - state->port->info = &state->info; - tty->low_latency = (state->port->flags & UPF_LOW_LATENCY) ? 1 : 0; + state->uart_port->state = state; + tty->low_latency = (state->uart_port->flags & UPF_LOW_LATENCY) ? 1 : 0; tty->alt_speed = 0; - state->info.port.tty = tty; + state->port.tty = tty; /* * If the port is in the middle of closing, bail out now. @@ -1659,8 +1656,8 @@ static int uart_open(struct tty_struct *tty, struct file *filp) /* * If this is the first open to succeed, adjust things to suit. */ - if (retval == 0 && !(state->info.flags & UIF_NORMAL_ACTIVE)) { - state->info.flags |= UIF_NORMAL_ACTIVE; + if (retval == 0 && !(state->flags & UIF_NORMAL_ACTIVE)) { + state->flags |= UIF_NORMAL_ACTIVE; uart_update_termios(state); } @@ -1688,7 +1685,7 @@ static void uart_line_info(struct seq_file *m, struct uart_driver *drv, int i) { struct uart_state *state = drv->state + i; int pm_state; - struct uart_port *port = state->port; + struct uart_port *port = state->uart_port; char stat_buf[32]; unsigned int status; int mmio; @@ -1958,7 +1955,7 @@ EXPORT_SYMBOL_GPL(uart_set_options); static void uart_change_pm(struct uart_state *state, int pm_state) { - struct uart_port *port = state->port; + struct uart_port *port = state->uart_port; if (state->pm_state != pm_state) { if (port->ops->pm) @@ -2005,11 +2002,11 @@ int uart_suspend_port(struct uart_driver *drv, struct uart_port *port) } port->suspended = 1; - if (state->info.flags & UIF_INITIALIZED) { + if (state->flags & UIF_INITIALIZED) { const struct uart_ops *ops = port->ops; int tries; - state->info.flags = (state->info.flags & ~UIF_INITIALIZED) + state->flags = (state->flags & ~UIF_INITIALIZED) | UIF_SUSPENDED; spin_lock_irq(&port->lock); @@ -2084,15 +2081,15 @@ int uart_resume_port(struct uart_driver *drv, struct uart_port *port) /* * If that's unset, use the tty termios setting. */ - if (state->info.port.tty && termios.c_cflag == 0) - termios = *state->info.port.tty->termios; + if (state->port.tty && termios.c_cflag == 0) + termios = *state->port.tty->termios; uart_change_pm(state, 0); port->ops->set_termios(port, &termios, NULL); console_start(port->cons); } - if (state->info.flags & UIF_SUSPENDED) { + if (state->flags & UIF_SUSPENDED) { const struct uart_ops *ops = port->ops; int ret; @@ -2107,7 +2104,7 @@ int uart_resume_port(struct uart_driver *drv, struct uart_port *port) ops->set_mctrl(port, port->mctrl); ops->start_tx(port); spin_unlock_irq(&port->lock); - state->info.flags |= UIF_INITIALIZED; + state->flags |= UIF_INITIALIZED; } else { /* * Failed to resume - maybe hardware went away? @@ -2117,7 +2114,7 @@ int uart_resume_port(struct uart_driver *drv, struct uart_port *port) uart_shutdown(state); } - state->info.flags &= ~UIF_SUSPENDED; + state->flags &= ~UIF_SUSPENDED; } mutex_unlock(&state->mutex); @@ -2232,10 +2229,10 @@ static int uart_poll_init(struct tty_driver *driver, int line, char *options) int parity = 'n'; int flow = 'n'; - if (!state || !state->port) + if (!state || !state->uart_port) return -1; - port = state->port; + port = state->uart_port; if (!(port->ops->poll_get_char && port->ops->poll_put_char)) return -1; @@ -2253,10 +2250,10 @@ static int uart_poll_get_char(struct tty_driver *driver, int line) struct uart_state *state = drv->state + line; struct uart_port *port; - if (!state || !state->port) + if (!state || !state->uart_port) return -1; - port = state->port; + port = state->uart_port; return port->ops->poll_get_char(port); } @@ -2266,10 +2263,10 @@ static void uart_poll_put_char(struct tty_driver *driver, int line, char ch) struct uart_state *state = drv->state + line; struct uart_port *port; - if (!state || !state->port) + if (!state || !state->uart_port) return; - port = state->port; + port = state->uart_port; port->ops->poll_put_char(port, ch); } #endif @@ -2365,9 +2362,9 @@ int uart_register_driver(struct uart_driver *drv) state->closing_wait = 30000; /* 30 seconds */ mutex_init(&state->mutex); - tty_port_init(&state->info.port); - init_waitqueue_head(&state->info.delta_msr_wait); - tasklet_init(&state->info.tlet, uart_tasklet_action, + tty_port_init(&state->port); + init_waitqueue_head(&state->delta_msr_wait); + tasklet_init(&state->tlet, uart_tasklet_action, (unsigned long)state); } @@ -2430,16 +2427,16 @@ int uart_add_one_port(struct uart_driver *drv, struct uart_port *port) mutex_lock(&port_mutex); mutex_lock(&state->mutex); - if (state->port) { + if (state->uart_port) { ret = -EINVAL; goto out; } - state->port = port; + state->uart_port = port; state->pm_state = -1; port->cons = drv->cons; - port->info = &state->info; + port->state = state; /* * If this port is a console, then the spinlock is already @@ -2488,13 +2485,12 @@ int uart_add_one_port(struct uart_driver *drv, struct uart_port *port) int uart_remove_one_port(struct uart_driver *drv, struct uart_port *port) { struct uart_state *state = drv->state + port->line; - struct uart_info *info; BUG_ON(in_interrupt()); - if (state->port != port) + if (state->uart_port != port) printk(KERN_ALERT "Removing wrong port: %p != %p\n", - state->port, port); + state->uart_port, port); mutex_lock(&port_mutex); @@ -2511,9 +2507,8 @@ int uart_remove_one_port(struct uart_driver *drv, struct uart_port *port) */ tty_unregister_device(drv->tty_driver, port->line); - info = &state->info; - if (info && info->port.tty) - tty_vhangup(info->port.tty); + if (state->port.tty) + tty_vhangup(state->port.tty); /* * Free the port IO and memory resources, if any. @@ -2529,10 +2524,9 @@ int uart_remove_one_port(struct uart_driver *drv, struct uart_port *port) /* * Kill the tasklet, and free resources. */ - if (info) - tasklet_kill(&info->tlet); + tasklet_kill(&state->tlet); - state->port = NULL; + state->uart_port = NULL; mutex_unlock(&port_mutex); return 0; diff --git a/drivers/serial/serial_ks8695.c b/drivers/serial/serial_ks8695.c index 52db5cc3f90..4560b2e7068 100644 --- a/drivers/serial/serial_ks8695.c +++ b/drivers/serial/serial_ks8695.c @@ -154,7 +154,7 @@ static void ks8695uart_disable_ms(struct uart_port *port) static irqreturn_t ks8695uart_rx_chars(int irq, void *dev_id) { struct uart_port *port = dev_id; - struct tty_struct *tty = port->info->port.tty; + struct tty_struct *tty = port->state->port.tty; unsigned int status, ch, lsr, flg, max_count = 256; status = UART_GET_LSR(port); /* clears pending LSR interrupts */ @@ -210,7 +210,7 @@ ignore_char: static irqreturn_t ks8695uart_tx_chars(int irq, void *dev_id) { struct uart_port *port = dev_id; - struct circ_buf *xmit = &port->info->xmit; + struct circ_buf *xmit = &port->state->xmit; unsigned int count; if (port->x_char) { @@ -266,7 +266,7 @@ static irqreturn_t ks8695uart_modem_status(int irq, void *dev_id) if (status & URMS_URTERI) port->icount.rng++; - wake_up_interruptible(&port->info->delta_msr_wait); + wake_up_interruptible(&port->state->delta_msr_wait); return IRQ_HANDLED; } diff --git a/drivers/serial/serial_lh7a40x.c b/drivers/serial/serial_lh7a40x.c index a7bf024a828..057fc5e8cc8 100644 --- a/drivers/serial/serial_lh7a40x.c +++ b/drivers/serial/serial_lh7a40x.c @@ -138,7 +138,7 @@ static void lh7a40xuart_enable_ms (struct uart_port* port) static void lh7a40xuart_rx_chars (struct uart_port* port) { - struct tty_struct* tty = port->info->port.tty; + struct tty_struct* tty = port->state->port.tty; int cbRxMax = 256; /* (Gross) limit on receive */ unsigned int data; /* Received data and status */ unsigned int flag; @@ -184,7 +184,7 @@ static void lh7a40xuart_rx_chars (struct uart_port* port) static void lh7a40xuart_tx_chars (struct uart_port* port) { - struct circ_buf* xmit = &port->info->xmit; + struct circ_buf* xmit = &port->state->xmit; int cbTxMax = port->fifosize; if (port->x_char) { @@ -241,7 +241,7 @@ static void lh7a40xuart_modem_status (struct uart_port* port) if (delta & CTS) uart_handle_cts_change (port, status & CTS); - wake_up_interruptible (&port->info->delta_msr_wait); + wake_up_interruptible (&port->state->delta_msr_wait); } static irqreturn_t lh7a40xuart_int (int irq, void* dev_id) diff --git a/drivers/serial/serial_txx9.c b/drivers/serial/serial_txx9.c index 54dd16d66a4..0f7cf4c453e 100644 --- a/drivers/serial/serial_txx9.c +++ b/drivers/serial/serial_txx9.c @@ -272,7 +272,7 @@ static void serial_txx9_initialize(struct uart_port *port) static inline void receive_chars(struct uart_txx9_port *up, unsigned int *status) { - struct tty_struct *tty = up->port.info->port.tty; + struct tty_struct *tty = up->port.state->port.tty; unsigned char ch; unsigned int disr = *status; int max_count = 256; @@ -348,7 +348,7 @@ receive_chars(struct uart_txx9_port *up, unsigned int *status) static inline void transmit_chars(struct uart_txx9_port *up) { - struct circ_buf *xmit = &up->port.info->xmit; + struct circ_buf *xmit = &up->port.state->xmit; int count; if (up->port.x_char) { diff --git a/drivers/serial/sh-sci.c b/drivers/serial/sh-sci.c index 32dc2fc50e6..85119fb7cb5 100644 --- a/drivers/serial/sh-sci.c +++ b/drivers/serial/sh-sci.c @@ -361,7 +361,7 @@ static inline int sci_rxroom(struct uart_port *port) static void sci_transmit_chars(struct uart_port *port) { - struct circ_buf *xmit = &port->info->xmit; + struct circ_buf *xmit = &port->state->xmit; unsigned int stopped = uart_tx_stopped(port); unsigned short status; unsigned short ctrl; @@ -426,7 +426,7 @@ static void sci_transmit_chars(struct uart_port *port) static inline void sci_receive_chars(struct uart_port *port) { struct sci_port *sci_port = to_sci_port(port); - struct tty_struct *tty = port->info->port.tty; + struct tty_struct *tty = port->state->port.tty; int i, count, copied = 0; unsigned short status; unsigned char flag; @@ -546,7 +546,7 @@ static inline int sci_handle_errors(struct uart_port *port) { int copied = 0; unsigned short status = sci_in(port, SCxSR); - struct tty_struct *tty = port->info->port.tty; + struct tty_struct *tty = port->state->port.tty; if (status & SCxSR_ORER(port)) { /* overrun error */ @@ -600,7 +600,7 @@ static inline int sci_handle_errors(struct uart_port *port) static inline int sci_handle_fifo_overrun(struct uart_port *port) { - struct tty_struct *tty = port->info->port.tty; + struct tty_struct *tty = port->state->port.tty; int copied = 0; if (port->type != PORT_SCIF) @@ -623,7 +623,7 @@ static inline int sci_handle_breaks(struct uart_port *port) { int copied = 0; unsigned short status = sci_in(port, SCxSR); - struct tty_struct *tty = port->info->port.tty; + struct tty_struct *tty = port->state->port.tty; struct sci_port *s = to_sci_port(port); if (uart_handle_break(port)) diff --git a/drivers/serial/sn_console.c b/drivers/serial/sn_console.c index d5276c012f7..9794e0cd3dc 100644 --- a/drivers/serial/sn_console.c +++ b/drivers/serial/sn_console.c @@ -469,9 +469,9 @@ sn_receive_chars(struct sn_cons_port *port, unsigned long flags) return; } - if (port->sc_port.info) { + if (port->sc_port.state) { /* The serial_core stuffs are initilized, use them */ - tty = port->sc_port.info->port.tty; + tty = port->sc_port.state->port.tty; } else { /* Not registered yet - can't pass to tty layer. */ @@ -550,9 +550,9 @@ static void sn_transmit_chars(struct sn_cons_port *port, int raw) BUG_ON(!port->sc_is_asynch); - if (port->sc_port.info) { + if (port->sc_port.state) { /* We're initilized, using serial core infrastructure */ - xmit = &port->sc_port.info->xmit; + xmit = &port->sc_port.state->xmit; } else { /* Probably sn_sal_switch_to_asynch has been run but serial core isn't * initilized yet. Just return. Writes are going through @@ -927,7 +927,7 @@ sn_sal_console_write(struct console *co, const char *s, unsigned count) /* We can't look at the xmit buffer if we're not registered with serial core * yet. So only do the fancy recovery after registering */ - if (!port->sc_port.info) { + if (!port->sc_port.state) { /* Not yet registered with serial core - simple case */ puts_raw_fixed(port->sc_ops->sal_puts_raw, s, count); return; @@ -936,8 +936,8 @@ sn_sal_console_write(struct console *co, const char *s, unsigned count) /* somebody really wants this output, might be an * oops, kdb, panic, etc. make sure they get it. */ if (spin_is_locked(&port->sc_port.lock)) { - int lhead = port->sc_port.info->xmit.head; - int ltail = port->sc_port.info->xmit.tail; + int lhead = port->sc_port.state->xmit.head; + int ltail = port->sc_port.state->xmit.tail; int counter, got_lock = 0; /* @@ -962,13 +962,13 @@ sn_sal_console_write(struct console *co, const char *s, unsigned count) break; } else { /* still locked */ - if ((lhead != port->sc_port.info->xmit.head) + if ((lhead != port->sc_port.state->xmit.head) || (ltail != - port->sc_port.info->xmit.tail)) { + port->sc_port.state->xmit.tail)) { lhead = - port->sc_port.info->xmit.head; + port->sc_port.state->xmit.head; ltail = - port->sc_port.info->xmit.tail; + port->sc_port.state->xmit.tail; counter = 0; } } diff --git a/drivers/serial/sunhv.c b/drivers/serial/sunhv.c index 1df5325faab..d548652dee5 100644 --- a/drivers/serial/sunhv.c +++ b/drivers/serial/sunhv.c @@ -184,8 +184,8 @@ static struct tty_struct *receive_chars(struct uart_port *port) { struct tty_struct *tty = NULL; - if (port->info != NULL) /* Unopened serial console */ - tty = port->info->port.tty; + if (port->state != NULL) /* Unopened serial console */ + tty = port->state->port.tty; if (sunhv_ops->receive_chars(port, tty)) sun_do_break(); @@ -197,10 +197,10 @@ static void transmit_chars(struct uart_port *port) { struct circ_buf *xmit; - if (!port->info) + if (!port->state) return; - xmit = &port->info->xmit; + xmit = &port->state->xmit; if (uart_circ_empty(xmit) || uart_tx_stopped(port)) return; diff --git a/drivers/serial/sunsab.c b/drivers/serial/sunsab.c index 0355efe115d..7c4f2fe8e24 100644 --- a/drivers/serial/sunsab.c +++ b/drivers/serial/sunsab.c @@ -117,8 +117,8 @@ receive_chars(struct uart_sunsab_port *up, int count = 0; int i; - if (up->port.info != NULL) /* Unopened serial console */ - tty = up->port.info->port.tty; + if (up->port.state != NULL) /* Unopened serial console */ + tty = up->port.state->port.tty; /* Read number of BYTES (Character + Status) available. */ if (stat->sreg.isr0 & SAB82532_ISR0_RPF) { @@ -229,7 +229,7 @@ static void sunsab_tx_idle(struct uart_sunsab_port *); static void transmit_chars(struct uart_sunsab_port *up, union sab82532_irq_status *stat) { - struct circ_buf *xmit = &up->port.info->xmit; + struct circ_buf *xmit = &up->port.state->xmit; int i; if (stat->sreg.isr1 & SAB82532_ISR1_ALLS) { @@ -297,7 +297,7 @@ static void check_status(struct uart_sunsab_port *up, up->port.icount.dsr++; } - wake_up_interruptible(&up->port.info->delta_msr_wait); + wake_up_interruptible(&up->port.state->delta_msr_wait); } static irqreturn_t sunsab_interrupt(int irq, void *dev_id) @@ -429,7 +429,7 @@ static void sunsab_tx_idle(struct uart_sunsab_port *up) static void sunsab_start_tx(struct uart_port *port) { struct uart_sunsab_port *up = (struct uart_sunsab_port *) port; - struct circ_buf *xmit = &up->port.info->xmit; + struct circ_buf *xmit = &up->port.state->xmit; int i; up->interrupt_mask1 &= ~(SAB82532_IMR1_ALLS|SAB82532_IMR1_XPR); diff --git a/drivers/serial/sunsu.c b/drivers/serial/sunsu.c index 47c6837850b..5a32365b58a 100644 --- a/drivers/serial/sunsu.c +++ b/drivers/serial/sunsu.c @@ -311,7 +311,7 @@ static void sunsu_enable_ms(struct uart_port *port) static struct tty_struct * receive_chars(struct uart_sunsu_port *up, unsigned char *status) { - struct tty_struct *tty = up->port.info->port.tty; + struct tty_struct *tty = up->port.state->port.tty; unsigned char ch, flag; int max_count = 256; int saw_console_brk = 0; @@ -389,7 +389,7 @@ receive_chars(struct uart_sunsu_port *up, unsigned char *status) static void transmit_chars(struct uart_sunsu_port *up) { - struct circ_buf *xmit = &up->port.info->xmit; + struct circ_buf *xmit = &up->port.state->xmit; int count; if (up->port.x_char) { @@ -441,7 +441,7 @@ static void check_modem_status(struct uart_sunsu_port *up) if (status & UART_MSR_DCTS) uart_handle_cts_change(&up->port, status & UART_MSR_CTS); - wake_up_interruptible(&up->port.info->delta_msr_wait); + wake_up_interruptible(&up->port.state->delta_msr_wait); } static irqreturn_t sunsu_serial_interrupt(int irq, void *dev_id) diff --git a/drivers/serial/sunzilog.c b/drivers/serial/sunzilog.c index e09d3cebb4f..055034d12b1 100644 --- a/drivers/serial/sunzilog.c +++ b/drivers/serial/sunzilog.c @@ -328,9 +328,9 @@ sunzilog_receive_chars(struct uart_sunzilog_port *up, unsigned char ch, r1, flag; tty = NULL; - if (up->port.info != NULL && /* Unopened serial console */ - up->port.info->port.tty != NULL) /* Keyboard || mouse */ - tty = up->port.info->port.tty; + if (up->port.state != NULL && /* Unopened serial console */ + up->port.state->port.tty != NULL) /* Keyboard || mouse */ + tty = up->port.state->port.tty; for (;;) { @@ -451,7 +451,7 @@ static void sunzilog_status_handle(struct uart_sunzilog_port *up, uart_handle_cts_change(&up->port, (status & CTS)); - wake_up_interruptible(&up->port.info->delta_msr_wait); + wake_up_interruptible(&up->port.state->delta_msr_wait); } up->prev_status = status; @@ -501,9 +501,9 @@ static void sunzilog_transmit_chars(struct uart_sunzilog_port *up, return; } - if (up->port.info == NULL) + if (up->port.state == NULL) goto ack_tx_int; - xmit = &up->port.info->xmit; + xmit = &up->port.state->xmit; if (uart_circ_empty(xmit)) goto ack_tx_int; @@ -705,7 +705,7 @@ static void sunzilog_start_tx(struct uart_port *port) port->icount.tx++; port->x_char = 0; } else { - struct circ_buf *xmit = &port->info->xmit; + struct circ_buf *xmit = &port->state->xmit; writeb(xmit->buf[xmit->tail], &channel->data); ZSDELAY(); diff --git a/drivers/serial/timbuart.c b/drivers/serial/timbuart.c index 063a313b755..3d40be6f389 100644 --- a/drivers/serial/timbuart.c +++ b/drivers/serial/timbuart.c @@ -77,7 +77,7 @@ static void timbuart_flush_buffer(struct uart_port *port) static void timbuart_rx_chars(struct uart_port *port) { - struct tty_struct *tty = port->info->port.tty; + struct tty_struct *tty = port->state->port.tty; while (ioread32(port->membase + TIMBUART_ISR) & RXDP) { u8 ch = ioread8(port->membase + TIMBUART_RXFIFO); @@ -86,7 +86,7 @@ static void timbuart_rx_chars(struct uart_port *port) } spin_unlock(&port->lock); - tty_flip_buffer_push(port->info->port.tty); + tty_flip_buffer_push(port->state->port.tty); spin_lock(&port->lock); dev_dbg(port->dev, "%s - total read %d bytes\n", @@ -95,7 +95,7 @@ static void timbuart_rx_chars(struct uart_port *port) static void timbuart_tx_chars(struct uart_port *port) { - struct circ_buf *xmit = &port->info->xmit; + struct circ_buf *xmit = &port->state->xmit; while (!(ioread32(port->membase + TIMBUART_ISR) & TXBF) && !uart_circ_empty(xmit)) { @@ -118,7 +118,7 @@ static void timbuart_handle_tx_port(struct uart_port *port, u32 isr, u32 *ier) { struct timbuart_port *uart = container_of(port, struct timbuart_port, port); - struct circ_buf *xmit = &port->info->xmit; + struct circ_buf *xmit = &port->state->xmit; if (uart_circ_empty(xmit) || uart_tx_stopped(port)) return; @@ -231,7 +231,7 @@ static void timbuart_mctrl_check(struct uart_port *port, u32 isr, u32 *ier) iowrite32(CTS_DELTA, port->membase + TIMBUART_ISR); cts = timbuart_get_mctrl(port); uart_handle_cts_change(port, cts & TIOCM_CTS); - wake_up_interruptible(&port->info->delta_msr_wait); + wake_up_interruptible(&port->state->delta_msr_wait); } *ier |= CTS_DELTA; diff --git a/drivers/serial/uartlite.c b/drivers/serial/uartlite.c index 3317148a4b9..5d3a573d699 100644 --- a/drivers/serial/uartlite.c +++ b/drivers/serial/uartlite.c @@ -75,7 +75,7 @@ static struct uart_port ulite_ports[ULITE_NR_UARTS]; static int ulite_receive(struct uart_port *port, int stat) { - struct tty_struct *tty = port->info->port.tty; + struct tty_struct *tty = port->state->port.tty; unsigned char ch = 0; char flag = TTY_NORMAL; @@ -125,7 +125,7 @@ static int ulite_receive(struct uart_port *port, int stat) static int ulite_transmit(struct uart_port *port, int stat) { - struct circ_buf *xmit = &port->info->xmit; + struct circ_buf *xmit = &port->state->xmit; if (stat & ULITE_STATUS_TXFULL) return 0; @@ -162,7 +162,7 @@ static irqreturn_t ulite_isr(int irq, void *dev_id) busy |= ulite_transmit(port, stat); } while (busy); - tty_flip_buffer_push(port->info->port.tty); + tty_flip_buffer_push(port->state->port.tty); return IRQ_HANDLED; } diff --git a/drivers/serial/ucc_uart.c b/drivers/serial/ucc_uart.c index e945e780b5c..0c08f286a2e 100644 --- a/drivers/serial/ucc_uart.c +++ b/drivers/serial/ucc_uart.c @@ -327,7 +327,7 @@ static int qe_uart_tx_pump(struct uart_qe_port *qe_port) unsigned char *p; unsigned int count; struct uart_port *port = &qe_port->port; - struct circ_buf *xmit = &port->info->xmit; + struct circ_buf *xmit = &port->state->xmit; bdp = qe_port->rx_cur; @@ -466,7 +466,7 @@ static void qe_uart_int_rx(struct uart_qe_port *qe_port) int i; unsigned char ch, *cp; struct uart_port *port = &qe_port->port; - struct tty_struct *tty = port->info->port.tty; + struct tty_struct *tty = port->state->port.tty; struct qe_bd *bdp; u16 status; unsigned int flg; diff --git a/drivers/serial/vr41xx_siu.c b/drivers/serial/vr41xx_siu.c index dac550e57c2..cf4410e6d53 100644 --- a/drivers/serial/vr41xx_siu.c +++ b/drivers/serial/vr41xx_siu.c @@ -318,7 +318,7 @@ static inline void receive_chars(struct uart_port *port, uint8_t *status) char flag; int max_count = RX_MAX_COUNT; - tty = port->info->port.tty; + tty = port->state->port.tty; lsr = *status; do { @@ -386,7 +386,7 @@ static inline void check_modem_status(struct uart_port *port) if (msr & UART_MSR_DCTS) uart_handle_cts_change(port, msr & UART_MSR_CTS); - wake_up_interruptible(&port->info->delta_msr_wait); + wake_up_interruptible(&port->state->delta_msr_wait); } static inline void transmit_chars(struct uart_port *port) @@ -394,7 +394,7 @@ static inline void transmit_chars(struct uart_port *port) struct circ_buf *xmit; int max_count = TX_MAX_COUNT; - xmit = &port->info->xmit; + xmit = &port->state->xmit; if (port->x_char) { siu_write(port, UART_TX, port->x_char); diff --git a/drivers/serial/zs.c b/drivers/serial/zs.c index d8c2809b1ab..b9c9fb9198d 100644 --- a/drivers/serial/zs.c +++ b/drivers/serial/zs.c @@ -602,12 +602,12 @@ static void zs_receive_chars(struct zs_port *zport) uart_insert_char(uport, status, Rx_OVR, ch, flag); } - tty_flip_buffer_push(uport->info->port.tty); + tty_flip_buffer_push(uport->state->port.tty); } static void zs_raw_transmit_chars(struct zs_port *zport) { - struct circ_buf *xmit = &zport->port.info->xmit; + struct circ_buf *xmit = &zport->port.state->xmit; /* XON/XOFF chars. */ if (zport->port.x_char) { @@ -686,7 +686,7 @@ static void zs_status_handle(struct zs_port *zport, struct zs_port *zport_a) uport->icount.rng++; if (delta) - wake_up_interruptible(&uport->info->delta_msr_wait); + wake_up_interruptible(&uport->state->delta_msr_wait); spin_lock(&scc->zlock); } diff --git a/include/linux/serial_core.h b/include/linux/serial_core.h index 3cd255f0b21..c1542703fba 100644 --- a/include/linux/serial_core.h +++ b/include/linux/serial_core.h @@ -186,7 +186,6 @@ #include struct uart_port; -struct uart_info; struct serial_struct; struct device; @@ -284,7 +283,7 @@ struct uart_port { unsigned int read_status_mask; /* driver specific */ unsigned int ignore_status_mask; /* driver specific */ - struct uart_info *info; /* pointer to parent info */ + struct uart_state *state; /* pointer to parent state */ struct uart_icount icount; /* statistics */ struct console *cons; /* struct console, if any */ @@ -343,8 +342,22 @@ struct uart_port { */ typedef unsigned int __bitwise__ uif_t; -struct uart_info { + +/* + * This is the state information which is persistent across opens. + * The low level driver must not to touch any elements contained + * within. + */ +struct uart_state { struct tty_port port; + unsigned int close_delay; /* msec */ + unsigned int closing_wait; /* msec */ + +#define USF_CLOSING_WAIT_INF (0) +#define USF_CLOSING_WAIT_NONE (~0U) + + int count; + int pm_state; struct circ_buf xmit; uif_t flags; @@ -362,24 +375,7 @@ struct uart_info { struct tasklet_struct tlet; wait_queue_head_t delta_msr_wait; -}; - -/* - * This is the state information which is persistent across opens. - * The low level driver must not to touch any elements contained - * within. - */ -struct uart_state { - unsigned int close_delay; /* msec */ - unsigned int closing_wait; /* msec */ - -#define USF_CLOSING_WAIT_INF (0) -#define USF_CLOSING_WAIT_NONE (~0U) - - int count; - int pm_state; - struct uart_info info; - struct uart_port *port; + struct uart_port *uart_port; struct mutex mutex; }; @@ -462,7 +458,7 @@ int uart_resume_port(struct uart_driver *reg, struct uart_port *port); static inline int uart_tx_stopped(struct uart_port *port) { - struct tty_struct *tty = port->info->port.tty; + struct tty_struct *tty = port->state->port.tty; if(tty->stopped || tty->hw_stopped) return 1; return 0; @@ -477,7 +473,7 @@ uart_handle_sysrq_char(struct uart_port *port, unsigned int ch) #ifdef SUPPORT_SYSRQ if (port->sysrq) { if (ch && time_before(jiffies, port->sysrq)) { - handle_sysrq(ch, port->info->port.tty); + handle_sysrq(ch, port->state->port.tty); port->sysrq = 0; return 1; } @@ -495,7 +491,7 @@ uart_handle_sysrq_char(struct uart_port *port, unsigned int ch) */ static inline int uart_handle_break(struct uart_port *port) { - struct uart_info *info = port->info; + struct uart_state *state = port->state; #ifdef SUPPORT_SYSRQ if (port->cons && port->cons->index == port->line) { if (!port->sysrq) { @@ -506,7 +502,7 @@ static inline int uart_handle_break(struct uart_port *port) } #endif if (port->flags & UPF_SAK) - do_SAK(info->port.tty); + do_SAK(state->port.tty); return 0; } @@ -518,7 +514,7 @@ static inline int uart_handle_break(struct uart_port *port) static inline void uart_handle_dcd_change(struct uart_port *port, unsigned int status) { - struct uart_info *info = port->info; + struct uart_state *state = port->state; port->icount.dcd++; @@ -527,11 +523,11 @@ uart_handle_dcd_change(struct uart_port *port, unsigned int status) hardpps(); #endif - if (info->flags & UIF_CHECK_CD) { + if (state->flags & UIF_CHECK_CD) { if (status) - wake_up_interruptible(&info->port.open_wait); - else if (info->port.tty) - tty_hangup(info->port.tty); + wake_up_interruptible(&state->port.open_wait); + else if (state->port.tty) + tty_hangup(state->port.tty); } } @@ -543,12 +539,12 @@ uart_handle_dcd_change(struct uart_port *port, unsigned int status) static inline void uart_handle_cts_change(struct uart_port *port, unsigned int status) { - struct uart_info *info = port->info; - struct tty_struct *tty = info->port.tty; + struct uart_state *state = port->state; + struct tty_struct *tty = state->port.tty; port->icount.cts++; - if (info->flags & UIF_CTS_FLOW) { + if (state->flags & UIF_CTS_FLOW) { if (tty->hw_stopped) { if (status) { tty->hw_stopped = 0; @@ -570,7 +566,7 @@ static inline void uart_insert_char(struct uart_port *port, unsigned int status, unsigned int overrun, unsigned int ch, unsigned int flag) { - struct tty_struct *tty = port->info->port.tty; + struct tty_struct *tty = port->state->port.tty; if ((status & port->ignore_status_mask & ~overrun) == 0) tty_insert_flip_char(tty, ch, flag); -- cgit v1.2.3-70-g09d2 From 5e99df561fc830730d63672d795a0b02ef8cdd6f Mon Sep 17 00:00:00 2001 From: Alan Cox Date: Sat, 19 Sep 2009 13:13:28 -0700 Subject: serial: Fold closing_* fields into the tty_port ones Remove some more serial specific use Signed-off-by: Alan Cox Signed-off-by: Greg Kroah-Hartman --- drivers/serial/serial_core.c | 26 +++++++++++++------------- include/linux/serial_core.h | 2 -- include/linux/tty.h | 4 ++-- 3 files changed, 15 insertions(+), 17 deletions(-) (limited to 'include') diff --git a/drivers/serial/serial_core.c b/drivers/serial/serial_core.c index ea53b6f224b..2d63b13e2f7 100644 --- a/drivers/serial/serial_core.c +++ b/drivers/serial/serial_core.c @@ -651,10 +651,10 @@ static int uart_get_info(struct uart_state *state, tmp.flags = port->flags; tmp.xmit_fifo_size = port->fifosize; tmp.baud_base = port->uartclk / 16; - tmp.close_delay = state->close_delay / 10; - tmp.closing_wait = state->closing_wait == USF_CLOSING_WAIT_NONE ? + tmp.close_delay = state->port.close_delay / 10; + tmp.closing_wait = state->port.closing_wait == USF_CLOSING_WAIT_NONE ? ASYNC_CLOSING_WAIT_NONE : - state->closing_wait / 10; + state->port.closing_wait / 10; tmp.custom_divisor = port->custom_divisor; tmp.hub6 = port->hub6; tmp.io_type = port->iotype; @@ -724,8 +724,8 @@ static int uart_set_info(struct uart_state *state, retval = -EPERM; if (change_irq || change_port || (new_serial.baud_base != port->uartclk / 16) || - (close_delay != state->close_delay) || - (closing_wait != state->closing_wait) || + (close_delay != state->port.close_delay) || + (closing_wait != state->port.closing_wait) || (new_serial.xmit_fifo_size && new_serial.xmit_fifo_size != port->fifosize) || (((new_flags ^ old_flags) & ~UPF_USR_MASK) != 0)) @@ -834,8 +834,8 @@ static int uart_set_info(struct uart_state *state, port->flags = (port->flags & ~UPF_CHANGE_MASK) | (new_flags & UPF_CHANGE_MASK); port->custom_divisor = new_serial.custom_divisor; - state->close_delay = close_delay; - state->closing_wait = closing_wait; + state->port.close_delay = close_delay; + state->port.closing_wait = closing_wait; if (new_serial.xmit_fifo_size) port->fifosize = new_serial.xmit_fifo_size; if (state->port.tty) @@ -1297,8 +1297,8 @@ static void uart_close(struct tty_struct *tty, struct file *filp) */ tty->closing = 1; - if (state->closing_wait != USF_CLOSING_WAIT_NONE) - tty_wait_until_sent(tty, msecs_to_jiffies(state->closing_wait)); + if (state->port.closing_wait != USF_CLOSING_WAIT_NONE) + tty_wait_until_sent(tty, msecs_to_jiffies(state->port.closing_wait)); /* * At this point, we stop accepting input. To do this, we @@ -1326,8 +1326,8 @@ static void uart_close(struct tty_struct *tty, struct file *filp) state->port.tty = NULL; if (state->port.blocked_open) { - if (state->close_delay) - msleep_interruptible(state->close_delay); + if (state->port.close_delay) + msleep_interruptible(state->port.close_delay); } else if (!uart_console(port)) { uart_change_pm(state, 3); } @@ -2358,11 +2358,11 @@ int uart_register_driver(struct uart_driver *drv) for (i = 0; i < drv->nr; i++) { struct uart_state *state = drv->state + i; - state->close_delay = 500; /* .5 seconds */ - state->closing_wait = 30000; /* 30 seconds */ mutex_init(&state->mutex); tty_port_init(&state->port); + state->port.close_delay = 500; /* .5 seconds */ + state->port.closing_wait = 30000; /* 30 seconds */ init_waitqueue_head(&state->delta_msr_wait); tasklet_init(&state->tlet, uart_tasklet_action, (unsigned long)state); diff --git a/include/linux/serial_core.h b/include/linux/serial_core.h index c1542703fba..fd11d4d5a57 100644 --- a/include/linux/serial_core.h +++ b/include/linux/serial_core.h @@ -350,8 +350,6 @@ typedef unsigned int __bitwise__ uif_t; */ struct uart_state { struct tty_port port; - unsigned int close_delay; /* msec */ - unsigned int closing_wait; /* msec */ #define USF_CLOSING_WAIT_INF (0) #define USF_CLOSING_WAIT_NONE (~0U) diff --git a/include/linux/tty.h b/include/linux/tty.h index ecb3d1ba301..9fdc3d84baa 100644 --- a/include/linux/tty.h +++ b/include/linux/tty.h @@ -206,8 +206,8 @@ struct tty_port { unsigned long flags; /* TTY flags ASY_*/ struct mutex mutex; /* Locking */ unsigned char *xmit_buf; /* Optional buffer */ - int close_delay; /* Close port delay */ - int closing_wait; /* Delay for output */ + unsigned int close_delay; /* Close port delay */ + unsigned int closing_wait; /* Delay for output */ int drain_delay; /* Set to zero if no pure time based drain is needed else set to size of fifo */ -- cgit v1.2.3-70-g09d2 From 91312cdb4fcd832341e425f74f49938e0503c929 Mon Sep 17 00:00:00 2001 From: Alan Cox Date: Sat, 19 Sep 2009 13:13:29 -0700 Subject: serial: move count into the tty_port version Remove more stuff from the serial special case code Signed-off-by: Alan Cox Signed-off-by: Greg Kroah-Hartman --- drivers/serial/serial_core.c | 38 ++++++++++++++++++++------------------ include/linux/serial_core.h | 1 - 2 files changed, 20 insertions(+), 19 deletions(-) (limited to 'include') diff --git a/drivers/serial/serial_core.c b/drivers/serial/serial_core.c index 0ffefb3f1d8..4af3364dd81 100644 --- a/drivers/serial/serial_core.c +++ b/drivers/serial/serial_core.c @@ -52,7 +52,7 @@ static struct lock_class_key port_lock_key; #define HIGH_BITS_OFFSET ((sizeof(long)-sizeof(int))*8) -#define uart_users(state) ((state)->count + (state)->port.blocked_open) +#define uart_users(state) ((state)->port.count + (state)->port.blocked_open) #ifdef CONFIG_SERIAL_CORE_CONSOLE #define uart_console(port) ((port)->cons && (port)->cons->index == (port)->line) @@ -694,7 +694,7 @@ static int uart_set_info(struct uart_state *state, USF_CLOSING_WAIT_NONE : new_serial.closing_wait * 10; /* - * This semaphore protects state->count. It is also + * This semaphore protects port->count. It is also * very useful to prevent opens. Also, take the * port configuration semaphore to make sure that a * module insertion/removal doesn't change anything @@ -1272,24 +1272,24 @@ static void uart_close(struct tty_struct *tty, struct file *filp) if (tty_hung_up_p(filp)) goto done; - if ((tty->count == 1) && (state->count != 1)) { + if ((tty->count == 1) && (port->count != 1)) { /* * Uh, oh. tty->count is 1, which means that the tty - * structure will be freed. state->count should always + * structure will be freed. port->count should always * be one in these conditions. If it's greater than * one, we've got real problems, since it means the * serial port won't be shutdown. */ printk(KERN_ERR "uart_close: bad serial port count; tty->count is 1, " - "state->count is %d\n", state->count); - state->count = 1; + "port->count is %d\n", port->count); + port->count = 1; } - if (--state->count < 0) { + if (--port->count < 0) { printk(KERN_ERR "uart_close: bad serial port count for %s: %d\n", - tty->name, state->count); - state->count = 0; + tty->name, port->count); + port->count = 0; } - if (state->count) + if (port->count) goto done; /* @@ -1421,7 +1421,7 @@ static void uart_hangup(struct tty_struct *tty) if (state->flags & UIF_NORMAL_ACTIVE) { uart_flush_buffer(tty); uart_shutdown(state); - state->count = 0; + port->count = 0; state->flags &= ~UIF_NORMAL_ACTIVE; port->tty = NULL; wake_up_interruptible(&port->open_wait); @@ -1478,7 +1478,7 @@ uart_block_til_ready(struct file *filp, struct uart_state *state) unsigned int mctrl; port->blocked_open++; - state->count--; + port->count--; add_wait_queue(&port->open_wait, &wait); while (1) { @@ -1539,7 +1539,7 @@ uart_block_til_ready(struct file *filp, struct uart_state *state) set_current_state(TASK_RUNNING); remove_wait_queue(&port->open_wait, &wait); - state->count++; + port->count++; port->blocked_open--; if (signal_pending(current)) @@ -1562,7 +1562,7 @@ static struct uart_state *uart_get(struct uart_driver *drv, int line) goto err; } - state->count++; + state->port.count++; if (!state->uart_port || state->uart_port->flags & UPF_DEAD) { ret = -ENXIO; goto err_unlock; @@ -1570,7 +1570,7 @@ static struct uart_state *uart_get(struct uart_driver *drv, int line) return state; err_unlock: - state->count--; + state->port.count--; mutex_unlock(&state->mutex); err: return ERR_PTR(ret); @@ -1590,6 +1590,7 @@ static int uart_open(struct tty_struct *tty, struct file *filp) { struct uart_driver *drv = (struct uart_driver *)tty->driver->driver_state; struct uart_state *state; + struct tty_port *port; int retval, line = tty->index; BUG_ON(!kernel_locked()); @@ -1617,6 +1618,7 @@ static int uart_open(struct tty_struct *tty, struct file *filp) retval = PTR_ERR(state); goto fail; } + port = &state->port; /* * Once we set tty->driver_data here, we are guaranteed that @@ -1627,14 +1629,14 @@ static int uart_open(struct tty_struct *tty, struct file *filp) state->uart_port->state = state; tty->low_latency = (state->uart_port->flags & UPF_LOW_LATENCY) ? 1 : 0; tty->alt_speed = 0; - state->port.tty = tty; + port->tty = tty; /* * If the port is in the middle of closing, bail out now. */ if (tty_hung_up_p(filp)) { retval = -EAGAIN; - state->count--; + port->count--; mutex_unlock(&state->mutex); goto fail; } @@ -1642,7 +1644,7 @@ static int uart_open(struct tty_struct *tty, struct file *filp) /* * Make sure the device is in D0 state. */ - if (state->count == 1) + if (port->count == 1) uart_change_pm(state, 0); /* diff --git a/include/linux/serial_core.h b/include/linux/serial_core.h index fd11d4d5a57..63ad90966db 100644 --- a/include/linux/serial_core.h +++ b/include/linux/serial_core.h @@ -354,7 +354,6 @@ struct uart_state { #define USF_CLOSING_WAIT_INF (0) #define USF_CLOSING_WAIT_NONE (~0U) - int count; int pm_state; struct circ_buf xmit; uif_t flags; -- cgit v1.2.3-70-g09d2 From ccce6debb62d94964e3878f978a56b0f3e32d94f Mon Sep 17 00:00:00 2001 From: Alan Cox Date: Sat, 19 Sep 2009 13:13:30 -0700 Subject: serial: move the flags into the tty_port field Fortunately the serial layer was designed to use the same flag values but with different names. It has its own SUSPENDED flag which is a free slot in the ASYNC flags so we allocate it in the ASYNC flags instead. Signed-off-by: Alan Cox Signed-off-by: Greg Kroah-Hartman --- drivers/serial/serial_core.c | 153 ++++++++++++++++++++++--------------------- include/linux/serial.h | 2 + include/linux/serial_core.h | 48 ++++++-------- 3 files changed, 98 insertions(+), 105 deletions(-) (limited to 'include') diff --git a/drivers/serial/serial_core.c b/drivers/serial/serial_core.c index 4af3364dd81..744dec9301f 100644 --- a/drivers/serial/serial_core.c +++ b/drivers/serial/serial_core.c @@ -29,10 +29,10 @@ #include #include #include -#include #include #include #include /* for serial_state and serial_icounter_struct */ +#include #include #include @@ -146,7 +146,7 @@ static int uart_startup(struct uart_state *state, int init_hw) unsigned long page; int retval = 0; - if (state->flags & UIF_INITIALIZED) + if (port->flags & ASYNC_INITIALIZED) return 0; /* @@ -189,14 +189,14 @@ static int uart_startup(struct uart_state *state, int init_hw) uart_set_mctrl(uport, TIOCM_RTS | TIOCM_DTR); } - if (state->flags & UIF_CTS_FLOW) { + if (port->flags & ASYNC_CTS_FLOW) { spin_lock_irq(&uport->lock); if (!(uport->ops->get_mctrl(uport) & TIOCM_CTS)) port->tty->hw_stopped = 1; spin_unlock_irq(&uport->lock); } - state->flags |= UIF_INITIALIZED; + set_bit(ASYNCB_INITIALIZED, &port->flags); clear_bit(TTY_IO_ERROR, &port->tty->flags); } @@ -214,7 +214,7 @@ static int uart_startup(struct uart_state *state, int init_hw) */ static void uart_shutdown(struct uart_state *state) { - struct uart_port *port = state->uart_port; + struct uart_port *uport = state->uart_port; struct tty_struct *tty = state->port.tty; /* @@ -223,14 +223,12 @@ static void uart_shutdown(struct uart_state *state) if (tty) set_bit(TTY_IO_ERROR, &tty->flags); - if (state->flags & UIF_INITIALIZED) { - state->flags &= ~UIF_INITIALIZED; - + if (test_and_clear_bit(ASYNCB_INITIALIZED, &state->port.flags)) { /* * Turn off DTR and RTS early. */ if (!tty || (tty->termios->c_cflag & HUPCL)) - uart_clear_mctrl(port, TIOCM_DTR | TIOCM_RTS); + uart_clear_mctrl(uport, TIOCM_DTR | TIOCM_RTS); /* * clear delta_msr_wait queue to avoid mem leaks: we may free @@ -244,12 +242,12 @@ static void uart_shutdown(struct uart_state *state) /* * Free the IRQ and disable the port. */ - port->ops->shutdown(port); + uport->ops->shutdown(uport); /* * Ensure that the IRQ handler isn't running on another CPU. */ - synchronize_irq(port->irq); + synchronize_irq(uport->irq); } /* @@ -429,15 +427,16 @@ EXPORT_SYMBOL(uart_get_divisor); static void uart_change_speed(struct uart_state *state, struct ktermios *old_termios) { - struct tty_struct *tty = state->port.tty; - struct uart_port *port = state->uart_port; + struct tty_port *port = &state->port; + struct tty_struct *tty = port->tty; + struct uart_port *uport = state->uart_port; struct ktermios *termios; /* * If we have no tty, termios, or the port does not exist, * then we can't set the parameters for this port. */ - if (!tty || !tty->termios || port->type == PORT_UNKNOWN) + if (!tty || !tty->termios || uport->type == PORT_UNKNOWN) return; termios = tty->termios; @@ -446,16 +445,16 @@ uart_change_speed(struct uart_state *state, struct ktermios *old_termios) * Set flags based on termios cflag */ if (termios->c_cflag & CRTSCTS) - state->flags |= UIF_CTS_FLOW; + set_bit(ASYNCB_CTS_FLOW, &port->flags); else - state->flags &= ~UIF_CTS_FLOW; + clear_bit(ASYNCB_CTS_FLOW, &port->flags); if (termios->c_cflag & CLOCAL) - state->flags &= ~UIF_CHECK_CD; + clear_bit(ASYNCB_CHECK_CD, &port->flags); else - state->flags |= UIF_CHECK_CD; + set_bit(ASYNCB_CHECK_CD, &port->flags); - port->ops->set_termios(port, termios, old_termios); + uport->ops->set_termios(uport, termios, old_termios); } static inline int @@ -848,7 +847,7 @@ static int uart_set_info(struct uart_state *state, retval = 0; if (uport->type == PORT_UNKNOWN) goto exit; - if (state->flags & UIF_INITIALIZED) { + if (port->flags & ASYNC_INITIALIZED) { if (((old_flags ^ uport->flags) & UPF_SPD_MASK) || old_custom_divisor != uport->custom_divisor) { /* @@ -1306,7 +1305,7 @@ static void uart_close(struct tty_struct *tty, struct file *filp) * At this point, we stop accepting input. To do this, we * disable the receive line status interrupts. */ - if (state->flags & UIF_INITIALIZED) { + if (port->flags & ASYNC_INITIALIZED) { unsigned long flags; spin_lock_irqsave(&port->lock, flags); uport->ops->stop_rx(uport); @@ -1337,7 +1336,7 @@ static void uart_close(struct tty_struct *tty, struct file *filp) /* * Wake up anyone trying to open this port. */ - state->flags &= ~UIF_NORMAL_ACTIVE; + clear_bit(ASYNCB_NORMAL_ACTIVE, &port->flags); wake_up_interruptible(&port->open_wait); done: @@ -1418,11 +1417,11 @@ static void uart_hangup(struct tty_struct *tty) pr_debug("uart_hangup(%d)\n", state->uart_port->line); mutex_lock(&state->mutex); - if (state->flags & UIF_NORMAL_ACTIVE) { + if (port->flags & ASYNC_NORMAL_ACTIVE) { uart_flush_buffer(tty); uart_shutdown(state); port->count = 0; - state->flags &= ~UIF_NORMAL_ACTIVE; + clear_bit(ASYNCB_NORMAL_ACTIVE, &port->flags); port->tty = NULL; wake_up_interruptible(&port->open_wait); wake_up_interruptible(&state->delta_msr_wait); @@ -1493,7 +1492,7 @@ uart_block_til_ready(struct file *filp, struct uart_state *state) /* * If the port has been closed, tell userspace/restart open. */ - if (!(state->flags & UIF_INITIALIZED)) + if (!(port->flags & ASYNC_INITIALIZED)) break; /* @@ -1662,8 +1661,8 @@ static int uart_open(struct tty_struct *tty, struct file *filp) /* * If this is the first open to succeed, adjust things to suit. */ - if (retval == 0 && !(state->flags & UIF_NORMAL_ACTIVE)) { - state->flags |= UIF_NORMAL_ACTIVE; + if (retval == 0 && !(port->flags & ASYNC_NORMAL_ACTIVE)) { + set_bit(ASYNCB_NORMAL_ACTIVE, &port->flags); uart_update_termios(state); } @@ -1985,63 +1984,64 @@ static int serial_match_port(struct device *dev, void *data) return dev->devt == devt; /* Actually, only one tty per port */ } -int uart_suspend_port(struct uart_driver *drv, struct uart_port *port) +int uart_suspend_port(struct uart_driver *drv, struct uart_port *uport) { - struct uart_state *state = drv->state + port->line; + struct uart_state *state = drv->state + uport->line; + struct tty_port *port = &state->port; struct device *tty_dev; - struct uart_match match = {port, drv}; + struct uart_match match = {uport, drv}; mutex_lock(&state->mutex); - if (!console_suspend_enabled && uart_console(port)) { + if (!console_suspend_enabled && uart_console(uport)) { /* we're going to avoid suspending serial console */ mutex_unlock(&state->mutex); return 0; } - tty_dev = device_find_child(port->dev, &match, serial_match_port); + tty_dev = device_find_child(uport->dev, &match, serial_match_port); if (device_may_wakeup(tty_dev)) { - enable_irq_wake(port->irq); + enable_irq_wake(uport->irq); put_device(tty_dev); mutex_unlock(&state->mutex); return 0; } - port->suspended = 1; + uport->suspended = 1; - if (state->flags & UIF_INITIALIZED) { - const struct uart_ops *ops = port->ops; + if (port->flags & ASYNC_INITIALIZED) { + const struct uart_ops *ops = uport->ops; int tries; - state->flags = (state->flags & ~UIF_INITIALIZED) - | UIF_SUSPENDED; + set_bit(ASYNCB_SUSPENDED, &port->flags); + clear_bit(ASYNCB_INITIALIZED, &port->flags); - spin_lock_irq(&port->lock); - ops->stop_tx(port); - ops->set_mctrl(port, 0); - ops->stop_rx(port); - spin_unlock_irq(&port->lock); + spin_lock_irq(&uport->lock); + ops->stop_tx(uport); + ops->set_mctrl(uport, 0); + ops->stop_rx(uport); + spin_unlock_irq(&uport->lock); /* * Wait for the transmitter to empty. */ - for (tries = 3; !ops->tx_empty(port) && tries; tries--) + for (tries = 3; !ops->tx_empty(uport) && tries; tries--) msleep(10); if (!tries) printk(KERN_ERR "%s%s%s%d: Unable to drain " "transmitter\n", - port->dev ? dev_name(port->dev) : "", - port->dev ? ": " : "", + uport->dev ? dev_name(uport->dev) : "", + uport->dev ? ": " : "", drv->dev_name, - drv->tty_driver->name_base + port->line); + drv->tty_driver->name_base + uport->line); - ops->shutdown(port); + ops->shutdown(uport); } /* * Disable the console device before suspending. */ - if (uart_console(port)) - console_stop(port->cons); + if (uart_console(uport)) + console_stop(uport->cons); uart_change_pm(state, 3); @@ -2050,67 +2050,68 @@ int uart_suspend_port(struct uart_driver *drv, struct uart_port *port) return 0; } -int uart_resume_port(struct uart_driver *drv, struct uart_port *port) +int uart_resume_port(struct uart_driver *drv, struct uart_port *uport) { - struct uart_state *state = drv->state + port->line; + struct uart_state *state = drv->state + uport->line; + struct tty_port *port = &state->port; struct device *tty_dev; - struct uart_match match = {port, drv}; + struct uart_match match = {uport, drv}; mutex_lock(&state->mutex); - if (!console_suspend_enabled && uart_console(port)) { + if (!console_suspend_enabled && uart_console(uport)) { /* no need to resume serial console, it wasn't suspended */ mutex_unlock(&state->mutex); return 0; } - tty_dev = device_find_child(port->dev, &match, serial_match_port); - if (!port->suspended && device_may_wakeup(tty_dev)) { - disable_irq_wake(port->irq); + tty_dev = device_find_child(uport->dev, &match, serial_match_port); + if (!uport->suspended && device_may_wakeup(tty_dev)) { + disable_irq_wake(uport->irq); mutex_unlock(&state->mutex); return 0; } - port->suspended = 0; + uport->suspended = 0; /* * Re-enable the console device after suspending. */ - if (uart_console(port)) { + if (uart_console(uport)) { struct ktermios termios; /* * First try to use the console cflag setting. */ memset(&termios, 0, sizeof(struct ktermios)); - termios.c_cflag = port->cons->cflag; + termios.c_cflag = uport->cons->cflag; /* * If that's unset, use the tty termios setting. */ - if (state->port.tty && termios.c_cflag == 0) - termios = *state->port.tty->termios; + if (port->tty && termios.c_cflag == 0) + termios = *port->tty->termios; uart_change_pm(state, 0); - port->ops->set_termios(port, &termios, NULL); - console_start(port->cons); + uport->ops->set_termios(uport, &termios, NULL); + console_start(uport->cons); } - if (state->flags & UIF_SUSPENDED) { - const struct uart_ops *ops = port->ops; + if (port->flags & ASYNC_SUSPENDED) { + const struct uart_ops *ops = uport->ops; int ret; uart_change_pm(state, 0); - spin_lock_irq(&port->lock); - ops->set_mctrl(port, 0); - spin_unlock_irq(&port->lock); - ret = ops->startup(port); + spin_lock_irq(&uport->lock); + ops->set_mctrl(uport, 0); + spin_unlock_irq(&uport->lock); + ret = ops->startup(uport); if (ret == 0) { uart_change_speed(state, NULL); - spin_lock_irq(&port->lock); - ops->set_mctrl(port, port->mctrl); - ops->start_tx(port); - spin_unlock_irq(&port->lock); - state->flags |= UIF_INITIALIZED; + spin_lock_irq(&uport->lock); + ops->set_mctrl(uport, uport->mctrl); + ops->start_tx(uport); + spin_unlock_irq(&uport->lock); + set_bit(ASYNCB_INITIALIZED, &port->flags); } else { /* * Failed to resume - maybe hardware went away? @@ -2120,7 +2121,7 @@ int uart_resume_port(struct uart_driver *drv, struct uart_port *port) uart_shutdown(state); } - state->flags &= ~UIF_SUSPENDED; + clear_bit(ASYNCB_SUSPENDED, &port->flags); } mutex_unlock(&state->mutex); diff --git a/include/linux/serial.h b/include/linux/serial.h index e5bb75a6380..c8613c3ff9d 100644 --- a/include/linux/serial.h +++ b/include/linux/serial.h @@ -122,6 +122,7 @@ struct serial_uart_config { /* Internal flags used only by kernel */ #define ASYNCB_INITIALIZED 31 /* Serial port was initialized */ +#define ASYNCB_SUSPENDED 30 /* Serial port is suspended */ #define ASYNCB_NORMAL_ACTIVE 29 /* Normal device is active */ #define ASYNCB_BOOT_AUTOCONF 28 /* Autoconfigure port on bootup */ #define ASYNCB_CLOSING 27 /* Serial port is closing */ @@ -133,6 +134,7 @@ struct serial_uart_config { #define ASYNCB_FIRST_KERNEL 22 #define ASYNC_HUP_NOTIFY (1U << ASYNCB_HUP_NOTIFY) +#define ASYNC_SUSPENDED (1U << ASYNCB_SUSPENDED) #define ASYNC_FOURPORT (1U << ASYNCB_FOURPORT) #define ASYNC_SAK (1U << ASYNCB_SAK) #define ASYNC_SPLIT_TERMIOS (1U << ASYNCB_SPLIT_TERMIOS) diff --git a/include/linux/serial_core.h b/include/linux/serial_core.h index 63ad90966db..284ef7f5489 100644 --- a/include/linux/serial_core.h +++ b/include/linux/serial_core.h @@ -20,6 +20,8 @@ #ifndef LINUX_SERIAL_CORE_H #define LINUX_SERIAL_CORE_H +#include + /* * The type definitions. These are from Ted Ts'o's serial.h */ @@ -356,19 +358,6 @@ struct uart_state { int pm_state; struct circ_buf xmit; - uif_t flags; - -/* - * Definitions for info->flags. These are _private_ to serial_core, and - * are specific to this structure. They may be queried by low level drivers. - * - * FIXME: use the ASY_ definitions - */ -#define UIF_CHECK_CD ((__force uif_t) (1 << 25)) -#define UIF_CTS_FLOW ((__force uif_t) (1 << 26)) -#define UIF_NORMAL_ACTIVE ((__force uif_t) (1 << 29)) -#define UIF_INITIALIZED ((__force uif_t) (1 << 31)) -#define UIF_SUSPENDED ((__force uif_t) (1 << 30)) struct tasklet_struct tlet; wait_queue_head_t delta_msr_wait; @@ -509,22 +498,23 @@ static inline int uart_handle_break(struct uart_port *port) * @status: new carrier detect status, nonzero if active */ static inline void -uart_handle_dcd_change(struct uart_port *port, unsigned int status) +uart_handle_dcd_change(struct uart_port *uport, unsigned int status) { - struct uart_state *state = port->state; + struct uart_state *state = uport->state; + struct tty_port *port = &state->port; - port->icount.dcd++; + uport->icount.dcd++; #ifdef CONFIG_HARD_PPS - if ((port->flags & UPF_HARDPPS_CD) && status) + if ((uport->flags & UPF_HARDPPS_CD) && status) hardpps(); #endif - if (state->flags & UIF_CHECK_CD) { + if (port->flags & ASYNC_CHECK_CD) { if (status) - wake_up_interruptible(&state->port.open_wait); - else if (state->port.tty) - tty_hangup(state->port.tty); + wake_up_interruptible(&port->open_wait); + else if (port->tty) + tty_hangup(port->tty); } } @@ -534,24 +524,24 @@ uart_handle_dcd_change(struct uart_port *port, unsigned int status) * @status: new clear to send status, nonzero if active */ static inline void -uart_handle_cts_change(struct uart_port *port, unsigned int status) +uart_handle_cts_change(struct uart_port *uport, unsigned int status) { - struct uart_state *state = port->state; - struct tty_struct *tty = state->port.tty; + struct tty_port *port = &uport->state->port; + struct tty_struct *tty = port->tty; - port->icount.cts++; + uport->icount.cts++; - if (state->flags & UIF_CTS_FLOW) { + if (port->flags & ASYNC_CTS_FLOW) { if (tty->hw_stopped) { if (status) { tty->hw_stopped = 0; - port->ops->start_tx(port); - uart_write_wakeup(port); + uport->ops->start_tx(uport); + uart_write_wakeup(uport); } } else { if (!status) { tty->hw_stopped = 1; - port->ops->stop_tx(port); + uport->ops->stop_tx(uport); } } } -- cgit v1.2.3-70-g09d2 From a03006860d272eac5a8ebf23f04f54c7e1e783a5 Mon Sep 17 00:00:00 2001 From: Alan Cox Date: Sat, 19 Sep 2009 13:13:30 -0700 Subject: serial: kill off uif_t This typedef is now extinct Signed-off-by: Alan Cox Signed-off-by: Greg Kroah-Hartman --- include/linux/serial_core.h | 11 ----------- 1 file changed, 11 deletions(-) (limited to 'include') diff --git a/include/linux/serial_core.h b/include/linux/serial_core.h index 284ef7f5489..f98dc78abb2 100644 --- a/include/linux/serial_core.h +++ b/include/linux/serial_core.h @@ -336,19 +336,8 @@ struct uart_port { void *private_data; /* generic platform data pointer */ }; -/* - * This is the state information which is only valid when the port - * is open; it may be cleared the core driver once the device has - * been closed. Either the low level driver or the core can modify - * stuff here. - */ -typedef unsigned int __bitwise__ uif_t; - - /* * This is the state information which is persistent across opens. - * The low level driver must not to touch any elements contained - * within. */ struct uart_state { struct tty_port port; -- cgit v1.2.3-70-g09d2 From a2bceae065ed8c4f552b35c4dde4cc2db05ce9e3 Mon Sep 17 00:00:00 2001 From: Alan Cox Date: Sat, 19 Sep 2009 13:13:31 -0700 Subject: serial: replace the state mutex with the tty port mutex They cover essentially the same stuff and we can therefore fold it into the tty_port one. Signed-off-by: Alan Cox Signed-off-by: Greg Kroah-Hartman --- drivers/serial/pmac_zilog.c | 8 +- drivers/serial/serial_core.c | 227 +++++++++++++++++++++++-------------------- include/linux/serial_core.h | 2 - 3 files changed, 123 insertions(+), 114 deletions(-) (limited to 'include') diff --git a/drivers/serial/pmac_zilog.c b/drivers/serial/pmac_zilog.c index ab4c85ba354..0dc786835dc 100644 --- a/drivers/serial/pmac_zilog.c +++ b/drivers/serial/pmac_zilog.c @@ -1645,7 +1645,7 @@ static int pmz_suspend(struct macio_dev *mdev, pm_message_t pm_state) state = pmz_uart_reg.state + uap->port.line; mutex_lock(&pmz_irq_mutex); - mutex_lock(&state->mutex); + mutex_lock(&state->port.mutex); spin_lock_irqsave(&uap->port.lock, flags); @@ -1676,7 +1676,7 @@ static int pmz_suspend(struct macio_dev *mdev, pm_message_t pm_state) /* Shut the chip down */ pmz_set_scc_power(uap, 0); - mutex_unlock(&state->mutex); + mutex_unlock(&state->port.mutex); mutex_unlock(&pmz_irq_mutex); pmz_debug("suspend, switching complete\n"); @@ -1705,7 +1705,7 @@ static int pmz_resume(struct macio_dev *mdev) state = pmz_uart_reg.state + uap->port.line; mutex_lock(&pmz_irq_mutex); - mutex_lock(&state->mutex); + mutex_lock(&state->port.mutex); spin_lock_irqsave(&uap->port.lock, flags); if (!ZS_IS_OPEN(uap) && !ZS_IS_CONS(uap)) { @@ -1737,7 +1737,7 @@ static int pmz_resume(struct macio_dev *mdev) } bail: - mutex_unlock(&state->mutex); + mutex_unlock(&state->port.mutex); mutex_unlock(&pmz_irq_mutex); /* Right now, we deal with delay by blocking here, I'll be diff --git a/drivers/serial/serial_core.c b/drivers/serial/serial_core.c index 744dec9301f..9d42e57e197 100644 --- a/drivers/serial/serial_core.c +++ b/drivers/serial/serial_core.c @@ -633,35 +633,36 @@ static void uart_unthrottle(struct tty_struct *tty) static int uart_get_info(struct uart_state *state, struct serial_struct __user *retinfo) { - struct uart_port *port = state->uart_port; + struct uart_port *uport = state->uart_port; + struct tty_port *port = &state->port; struct serial_struct tmp; memset(&tmp, 0, sizeof(tmp)); /* Ensure the state we copy is consistent and no hardware changes occur as we go */ - mutex_lock(&state->mutex); + mutex_lock(&port->mutex); - tmp.type = port->type; - tmp.line = port->line; - tmp.port = port->iobase; + tmp.type = uport->type; + tmp.line = uport->line; + tmp.port = uport->iobase; if (HIGH_BITS_OFFSET) - tmp.port_high = (long) port->iobase >> HIGH_BITS_OFFSET; - tmp.irq = port->irq; - tmp.flags = port->flags; - tmp.xmit_fifo_size = port->fifosize; - tmp.baud_base = port->uartclk / 16; - tmp.close_delay = state->port.close_delay / 10; - tmp.closing_wait = state->port.closing_wait == USF_CLOSING_WAIT_NONE ? + tmp.port_high = (long) uport->iobase >> HIGH_BITS_OFFSET; + tmp.irq = uport->irq; + tmp.flags = uport->flags; + tmp.xmit_fifo_size = uport->fifosize; + tmp.baud_base = uport->uartclk / 16; + tmp.close_delay = port->close_delay / 10; + tmp.closing_wait = port->closing_wait == USF_CLOSING_WAIT_NONE ? ASYNC_CLOSING_WAIT_NONE : - state->port.closing_wait / 10; - tmp.custom_divisor = port->custom_divisor; - tmp.hub6 = port->hub6; - tmp.io_type = port->iotype; - tmp.iomem_reg_shift = port->regshift; - tmp.iomem_base = (void *)(unsigned long)port->mapbase; + port->closing_wait / 10; + tmp.custom_divisor = uport->custom_divisor; + tmp.hub6 = uport->hub6; + tmp.io_type = uport->iotype; + tmp.iomem_reg_shift = uport->regshift; + tmp.iomem_base = (void *)(unsigned long)uport->mapbase; - mutex_unlock(&state->mutex); + mutex_unlock(&port->mutex); if (copy_to_user(retinfo, &tmp, sizeof(*retinfo))) return -EFAULT; @@ -699,7 +700,7 @@ static int uart_set_info(struct uart_state *state, * module insertion/removal doesn't change anything * under us. */ - mutex_lock(&state->mutex); + mutex_lock(&port->mutex); change_irq = !(uport->flags & UPF_FIXED_PORT) && new_serial.irq != uport->irq; @@ -867,7 +868,7 @@ static int uart_set_info(struct uart_state *state, } else retval = uart_startup(state, 1); exit: - mutex_unlock(&state->mutex); + mutex_unlock(&port->mutex); return retval; } @@ -902,10 +903,11 @@ static int uart_get_lsr_info(struct uart_state *state, static int uart_tiocmget(struct tty_struct *tty, struct file *file) { struct uart_state *state = tty->driver_data; + struct tty_port *port = &state->port; struct uart_port *uport = state->uart_port; int result = -EIO; - mutex_lock(&state->mutex); + mutex_lock(&port->mutex); if ((!file || !tty_hung_up_p(file)) && !(tty->flags & (1 << TTY_IO_ERROR))) { result = uport->mctrl; @@ -914,7 +916,7 @@ static int uart_tiocmget(struct tty_struct *tty, struct file *file) result |= uport->ops->get_mctrl(uport); spin_unlock_irq(&uport->lock); } - mutex_unlock(&state->mutex); + mutex_unlock(&port->mutex); return result; } @@ -925,35 +927,38 @@ uart_tiocmset(struct tty_struct *tty, struct file *file, { struct uart_state *state = tty->driver_data; struct uart_port *uport = state->uart_port; + struct tty_port *port = &state->port; int ret = -EIO; - mutex_lock(&state->mutex); + mutex_lock(&port->mutex); if ((!file || !tty_hung_up_p(file)) && !(tty->flags & (1 << TTY_IO_ERROR))) { uart_update_mctrl(uport, set, clear); ret = 0; } - mutex_unlock(&state->mutex); + mutex_unlock(&port->mutex); return ret; } static int uart_break_ctl(struct tty_struct *tty, int break_state) { struct uart_state *state = tty->driver_data; + struct tty_port *port = &state->port; struct uart_port *uport = state->uart_port; - mutex_lock(&state->mutex); + mutex_lock(&port->mutex); if (uport->type != PORT_UNKNOWN) uport->ops->break_ctl(uport, break_state); - mutex_unlock(&state->mutex); + mutex_unlock(&port->mutex); return 0; } static int uart_do_autoconfig(struct uart_state *state) { struct uart_port *uport = state->uart_port; + struct tty_port *port = &state->port; int flags, ret; if (!capable(CAP_SYS_ADMIN)) @@ -964,7 +969,7 @@ static int uart_do_autoconfig(struct uart_state *state) * changing, and hence any extra opens of the port while * we're auto-configuring. */ - if (mutex_lock_interruptible(&state->mutex)) + if (mutex_lock_interruptible(&port->mutex)) return -ERESTARTSYS; ret = -EBUSY; @@ -990,7 +995,7 @@ static int uart_do_autoconfig(struct uart_state *state) ret = uart_startup(state, 1); } - mutex_unlock(&state->mutex); + mutex_unlock(&port->mutex); return ret; } @@ -1093,6 +1098,7 @@ uart_ioctl(struct tty_struct *tty, struct file *filp, unsigned int cmd, unsigned long arg) { struct uart_state *state = tty->driver_data; + struct tty_port *port = &state->port; void __user *uarg = (void __user *)arg; int ret = -ENOIOCTLCMD; @@ -1143,7 +1149,7 @@ uart_ioctl(struct tty_struct *tty, struct file *filp, unsigned int cmd, if (ret != -ENOIOCTLCMD) goto out; - mutex_lock(&state->mutex); + mutex_lock(&port->mutex); if (tty_hung_up_p(filp)) { ret = -EIO; @@ -1167,7 +1173,7 @@ uart_ioctl(struct tty_struct *tty, struct file *filp, unsigned int cmd, } } out_up: - mutex_unlock(&state->mutex); + mutex_unlock(&port->mutex); out: return ret; } @@ -1266,7 +1272,7 @@ static void uart_close(struct tty_struct *tty, struct file *filp) pr_debug("uart_close(%d) called\n", uport->line); - mutex_lock(&state->mutex); + mutex_lock(&port->mutex); if (tty_hung_up_p(filp)) goto done; @@ -1340,7 +1346,7 @@ static void uart_close(struct tty_struct *tty, struct file *filp) wake_up_interruptible(&port->open_wait); done: - mutex_unlock(&state->mutex); + mutex_unlock(&port->mutex); } static void uart_wait_until_sent(struct tty_struct *tty, int timeout) @@ -1416,7 +1422,7 @@ static void uart_hangup(struct tty_struct *tty) BUG_ON(!kernel_locked()); pr_debug("uart_hangup(%d)\n", state->uart_port->line); - mutex_lock(&state->mutex); + mutex_lock(&port->mutex); if (port->flags & ASYNC_NORMAL_ACTIVE) { uart_flush_buffer(tty); uart_shutdown(state); @@ -1426,7 +1432,7 @@ static void uart_hangup(struct tty_struct *tty) wake_up_interruptible(&port->open_wait); wake_up_interruptible(&state->delta_msr_wait); } - mutex_unlock(&state->mutex); + mutex_unlock(&port->mutex); } /* @@ -1528,9 +1534,9 @@ uart_block_til_ready(struct file *filp, struct uart_state *state) if (mctrl & TIOCM_CAR) break; - mutex_unlock(&state->mutex); + mutex_unlock(&port->mutex); schedule(); - mutex_lock(&state->mutex); + mutex_lock(&port->mutex); if (signal_pending(current)) break; @@ -1553,15 +1559,17 @@ uart_block_til_ready(struct file *filp, struct uart_state *state) static struct uart_state *uart_get(struct uart_driver *drv, int line) { struct uart_state *state; + struct tty_port *port; int ret = 0; state = drv->state + line; - if (mutex_lock_interruptible(&state->mutex)) { + port = &state->port; + if (mutex_lock_interruptible(&port->mutex)) { ret = -ERESTARTSYS; goto err; } - state->port.count++; + port->count++; if (!state->uart_port || state->uart_port->flags & UPF_DEAD) { ret = -ENXIO; goto err_unlock; @@ -1569,8 +1577,8 @@ static struct uart_state *uart_get(struct uart_driver *drv, int line) return state; err_unlock: - state->port.count--; - mutex_unlock(&state->mutex); + port->count--; + mutex_unlock(&port->mutex); err: return ERR_PTR(ret); } @@ -1636,7 +1644,7 @@ static int uart_open(struct tty_struct *tty, struct file *filp) if (tty_hung_up_p(filp)) { retval = -EAGAIN; port->count--; - mutex_unlock(&state->mutex); + mutex_unlock(&port->mutex); goto fail; } @@ -1656,7 +1664,7 @@ static int uart_open(struct tty_struct *tty, struct file *filp) */ if (retval == 0) retval = uart_block_til_ready(filp, state); - mutex_unlock(&state->mutex); + mutex_unlock(&port->mutex); /* * If this is the first open to succeed, adjust things to suit. @@ -1667,7 +1675,7 @@ static int uart_open(struct tty_struct *tty, struct file *filp) uart_update_termios(state); } - fail: +fail: return retval; } @@ -1689,57 +1697,58 @@ static const char *uart_type(struct uart_port *port) static void uart_line_info(struct seq_file *m, struct uart_driver *drv, int i) { struct uart_state *state = drv->state + i; + struct tty_port *port = &state->port; int pm_state; - struct uart_port *port = state->uart_port; + struct uart_port *uport = state->uart_port; char stat_buf[32]; unsigned int status; int mmio; - if (!port) + if (!uport) return; - mmio = port->iotype >= UPIO_MEM; + mmio = uport->iotype >= UPIO_MEM; seq_printf(m, "%d: uart:%s %s%08llX irq:%d", - port->line, uart_type(port), + uport->line, uart_type(uport), mmio ? "mmio:0x" : "port:", - mmio ? (unsigned long long)port->mapbase - : (unsigned long long) port->iobase, - port->irq); + mmio ? (unsigned long long)uport->mapbase + : (unsigned long long)uport->iobase, + uport->irq); - if (port->type == PORT_UNKNOWN) { + if (uport->type == PORT_UNKNOWN) { seq_putc(m, '\n'); return; } if (capable(CAP_SYS_ADMIN)) { - mutex_lock(&state->mutex); + mutex_lock(&port->mutex); pm_state = state->pm_state; if (pm_state) uart_change_pm(state, 0); - spin_lock_irq(&port->lock); - status = port->ops->get_mctrl(port); - spin_unlock_irq(&port->lock); + spin_lock_irq(&uport->lock); + status = uport->ops->get_mctrl(uport); + spin_unlock_irq(&uport->lock); if (pm_state) uart_change_pm(state, pm_state); - mutex_unlock(&state->mutex); + mutex_unlock(&port->mutex); seq_printf(m, " tx:%d rx:%d", - port->icount.tx, port->icount.rx); - if (port->icount.frame) + uport->icount.tx, uport->icount.rx); + if (uport->icount.frame) seq_printf(m, " fe:%d", - port->icount.frame); - if (port->icount.parity) + uport->icount.frame); + if (uport->icount.parity) seq_printf(m, " pe:%d", - port->icount.parity); - if (port->icount.brk) + uport->icount.parity); + if (uport->icount.brk) seq_printf(m, " brk:%d", - port->icount.brk); - if (port->icount.overrun) + uport->icount.brk); + if (uport->icount.overrun) seq_printf(m, " oe:%d", - port->icount.overrun); + uport->icount.overrun); #define INFOBIT(bit, str) \ - if (port->mctrl & (bit)) \ + if (uport->mctrl & (bit)) \ strncat(stat_buf, (str), sizeof(stat_buf) - \ strlen(stat_buf) - 2) #define STATBIT(bit, str) \ @@ -1991,11 +2000,11 @@ int uart_suspend_port(struct uart_driver *drv, struct uart_port *uport) struct device *tty_dev; struct uart_match match = {uport, drv}; - mutex_lock(&state->mutex); + mutex_lock(&port->mutex); if (!console_suspend_enabled && uart_console(uport)) { /* we're going to avoid suspending serial console */ - mutex_unlock(&state->mutex); + mutex_unlock(&port->mutex); return 0; } @@ -2003,7 +2012,7 @@ int uart_suspend_port(struct uart_driver *drv, struct uart_port *uport) if (device_may_wakeup(tty_dev)) { enable_irq_wake(uport->irq); put_device(tty_dev); - mutex_unlock(&state->mutex); + mutex_unlock(&port->mutex); return 0; } uport->suspended = 1; @@ -2045,7 +2054,7 @@ int uart_suspend_port(struct uart_driver *drv, struct uart_port *uport) uart_change_pm(state, 3); - mutex_unlock(&state->mutex); + mutex_unlock(&port->mutex); return 0; } @@ -2057,18 +2066,18 @@ int uart_resume_port(struct uart_driver *drv, struct uart_port *uport) struct device *tty_dev; struct uart_match match = {uport, drv}; - mutex_lock(&state->mutex); + mutex_lock(&port->mutex); if (!console_suspend_enabled && uart_console(uport)) { /* no need to resume serial console, it wasn't suspended */ - mutex_unlock(&state->mutex); + mutex_unlock(&port->mutex); return 0; } tty_dev = device_find_child(uport->dev, &match, serial_match_port); if (!uport->suspended && device_may_wakeup(tty_dev)) { disable_irq_wake(uport->irq); - mutex_unlock(&state->mutex); + mutex_unlock(&port->mutex); return 0; } uport->suspended = 0; @@ -2124,7 +2133,7 @@ int uart_resume_port(struct uart_driver *drv, struct uart_port *uport) clear_bit(ASYNCB_SUSPENDED, &port->flags); } - mutex_unlock(&state->mutex); + mutex_unlock(&port->mutex); return 0; } @@ -2364,12 +2373,11 @@ int uart_register_driver(struct uart_driver *drv) */ for (i = 0; i < drv->nr; i++) { struct uart_state *state = drv->state + i; + struct tty_port *port = &state->port; - mutex_init(&state->mutex); - - tty_port_init(&state->port); - state->port.close_delay = 500; /* .5 seconds */ - state->port.closing_wait = 30000; /* 30 seconds */ + tty_port_init(port); + port->close_delay = 500; /* .5 seconds */ + port->closing_wait = 30000; /* 30 seconds */ init_waitqueue_head(&state->delta_msr_wait); tasklet_init(&state->tlet, uart_tasklet_action, (unsigned long)state); @@ -2419,62 +2427,64 @@ struct tty_driver *uart_console_device(struct console *co, int *index) * level uart drivers to expand uart_port, rather than having yet * more levels of structures. */ -int uart_add_one_port(struct uart_driver *drv, struct uart_port *port) +int uart_add_one_port(struct uart_driver *drv, struct uart_port *uport) { struct uart_state *state; + struct tty_port *port; int ret = 0; struct device *tty_dev; BUG_ON(in_interrupt()); - if (port->line >= drv->nr) + if (uport->line >= drv->nr) return -EINVAL; - state = drv->state + port->line; + state = drv->state + uport->line; + port = &state->port; mutex_lock(&port_mutex); - mutex_lock(&state->mutex); + mutex_lock(&port->mutex); if (state->uart_port) { ret = -EINVAL; goto out; } - state->uart_port = port; + state->uart_port = uport; state->pm_state = -1; - port->cons = drv->cons; - port->state = state; + uport->cons = drv->cons; + uport->state = state; /* * If this port is a console, then the spinlock is already * initialised. */ - if (!(uart_console(port) && (port->cons->flags & CON_ENABLED))) { - spin_lock_init(&port->lock); - lockdep_set_class(&port->lock, &port_lock_key); + if (!(uart_console(uport) && (uport->cons->flags & CON_ENABLED))) { + spin_lock_init(&uport->lock); + lockdep_set_class(&uport->lock, &port_lock_key); } - uart_configure_port(drv, state, port); + uart_configure_port(drv, state, uport); /* * Register the port whether it's detected or not. This allows * setserial to be used to alter this ports parameters. */ - tty_dev = tty_register_device(drv->tty_driver, port->line, port->dev); + tty_dev = tty_register_device(drv->tty_driver, uport->line, uport->dev); if (likely(!IS_ERR(tty_dev))) { device_init_wakeup(tty_dev, 1); device_set_wakeup_enable(tty_dev, 0); } else printk(KERN_ERR "Cannot register tty device on line %d\n", - port->line); + uport->line); /* * Ensure UPF_DEAD is not set. */ - port->flags &= ~UPF_DEAD; + uport->flags &= ~UPF_DEAD; out: - mutex_unlock(&state->mutex); + mutex_unlock(&port->mutex); mutex_unlock(&port_mutex); return ret; @@ -2489,15 +2499,16 @@ int uart_add_one_port(struct uart_driver *drv, struct uart_port *port) * core driver. No further calls will be made to the low-level code * for this port. */ -int uart_remove_one_port(struct uart_driver *drv, struct uart_port *port) +int uart_remove_one_port(struct uart_driver *drv, struct uart_port *uport) { - struct uart_state *state = drv->state + port->line; + struct uart_state *state = drv->state + uport->line; + struct tty_port *port = &state->port; BUG_ON(in_interrupt()); - if (state->uart_port != port) + if (state->uart_port != uport) printk(KERN_ALERT "Removing wrong port: %p != %p\n", - state->uart_port, port); + state->uart_port, uport); mutex_lock(&port_mutex); @@ -2505,28 +2516,28 @@ int uart_remove_one_port(struct uart_driver *drv, struct uart_port *port) * Mark the port "dead" - this prevents any opens from * succeeding while we shut down the port. */ - mutex_lock(&state->mutex); - port->flags |= UPF_DEAD; - mutex_unlock(&state->mutex); + mutex_lock(&port->mutex); + uport->flags |= UPF_DEAD; + mutex_unlock(&port->mutex); /* * Remove the devices from the tty layer */ - tty_unregister_device(drv->tty_driver, port->line); + tty_unregister_device(drv->tty_driver, uport->line); - if (state->port.tty) - tty_vhangup(state->port.tty); + if (port->tty) + tty_vhangup(port->tty); /* * Free the port IO and memory resources, if any. */ - if (port->type != PORT_UNKNOWN) - port->ops->release_port(port); + if (uport->type != PORT_UNKNOWN) + uport->ops->release_port(uport); /* * Indicate that there isn't a port here anymore. */ - port->type = PORT_UNKNOWN; + uport->type = PORT_UNKNOWN; /* * Kill the tasklet, and free resources. diff --git a/include/linux/serial_core.h b/include/linux/serial_core.h index f98dc78abb2..27767ea5fa2 100644 --- a/include/linux/serial_core.h +++ b/include/linux/serial_core.h @@ -351,8 +351,6 @@ struct uart_state { struct tasklet_struct tlet; wait_queue_head_t delta_msr_wait; struct uart_port *uart_port; - - struct mutex mutex; }; #define UART_XMIT_SIZE PAGE_SIZE -- cgit v1.2.3-70-g09d2 From bdc04e3174e18f475289fa8f4144f66686326b7e Mon Sep 17 00:00:00 2001 From: Alan Cox Date: Sat, 19 Sep 2009 13:13:31 -0700 Subject: serial: move delta_msr_wait into the tty_port This is used by various drivers not just serial and can be extracted as commonality Signed-off-by: Alan Cox --- drivers/char/cyclades.c | 9 ++++----- drivers/char/esp.c | 7 +++---- drivers/char/mxser.c | 8 +++----- drivers/char/tty_port.c | 2 ++ drivers/serial/8250.c | 2 +- drivers/serial/amba-pl010.c | 2 +- drivers/serial/amba-pl011.c | 2 +- drivers/serial/atmel_serial.c | 2 +- drivers/serial/icom.c | 2 +- drivers/serial/imx.c | 4 ++-- drivers/serial/ioc3_serial.c | 8 ++++---- drivers/serial/ioc4_serial.c | 8 ++++---- drivers/serial/ip22zilog.c | 2 +- drivers/serial/msm_serial.c | 2 +- drivers/serial/pmac_zilog.c | 2 +- drivers/serial/pnx8xxx_uart.c | 2 +- drivers/serial/pxa.c | 2 +- drivers/serial/sa1100.c | 2 +- drivers/serial/sb1250-duart.c | 2 +- drivers/serial/serial_core.c | 18 +++++++++++------- drivers/serial/serial_ks8695.c | 2 +- drivers/serial/serial_lh7a40x.c | 2 +- drivers/serial/sunsab.c | 2 +- drivers/serial/sunsu.c | 2 +- drivers/serial/sunzilog.c | 2 +- drivers/serial/timbuart.c | 2 +- drivers/serial/vr41xx_siu.c | 2 +- drivers/serial/zs.c | 2 +- include/linux/cyclades.h | 1 - include/linux/hayesesp.h | 1 - include/linux/serial_core.h | 1 - include/linux/tty.h | 1 + 32 files changed, 54 insertions(+), 54 deletions(-) (limited to 'include') diff --git a/drivers/char/cyclades.c b/drivers/char/cyclades.c index 70bd61b2a7d..df5038bbcbc 100644 --- a/drivers/char/cyclades.c +++ b/drivers/char/cyclades.c @@ -729,7 +729,7 @@ static void cyy_chip_modem(struct cyclades_card *cinfo, int chip, if (mdm_change & CyRI) info->icount.rng++; - wake_up_interruptible(&info->delta_msr_wait); + wake_up_interruptible(&info->port.delta_msr_wait); } if ((mdm_change & CyDCD) && (info->port.flags & ASYNC_CHECK_CD)) { @@ -1197,7 +1197,7 @@ static void cyz_handle_cmd(struct cyclades_card *cinfo) break; } if (delta_count) - wake_up_interruptible(&info->delta_msr_wait); + wake_up_interruptible(&info->port.delta_msr_wait); if (special_count) tty_schedule_flip(tty); tty_kref_put(tty); @@ -1464,7 +1464,7 @@ static void cy_shutdown(struct cyclades_port *info, struct tty_struct *tty) spin_lock_irqsave(&card->card_lock, flags); /* Clear delta_msr_wait queue to avoid mem leaks. */ - wake_up_interruptible(&info->delta_msr_wait); + wake_up_interruptible(&info->port.delta_msr_wait); if (info->port.xmit_buf) { unsigned char *temp; @@ -2788,7 +2788,7 @@ cy_ioctl(struct tty_struct *tty, struct file *file, /* note the counters on entry */ cnow = info->icount; spin_unlock_irqrestore(&info->card->card_lock, flags); - ret_val = wait_event_interruptible(info->delta_msr_wait, + ret_val = wait_event_interruptible(info->port.delta_msr_wait, cy_cflags_changed(info, arg, &cnow)); break; @@ -3153,7 +3153,6 @@ static int __devinit cy_init_card(struct cyclades_card *cinfo) info->port.close_delay = 5 * HZ / 10; info->port.flags = STD_COM_FLAGS; init_completion(&info->shutdown_wait); - init_waitqueue_head(&info->delta_msr_wait); if (cy_is_Z(cinfo)) { struct FIRM_ID *firm_id = cinfo->base_addr + ID_ADDRESS; diff --git a/drivers/char/esp.c b/drivers/char/esp.c index a5c59fc2b0f..b19d43cd954 100644 --- a/drivers/char/esp.c +++ b/drivers/char/esp.c @@ -572,7 +572,7 @@ static void check_modem_status(struct esp_struct *info) info->icount.dcd++; if (status & UART_MSR_DCTS) info->icount.cts++; - wake_up_interruptible(&info->delta_msr_wait); + wake_up_interruptible(&info->port.delta_msr_wait); } if ((info->port.flags & ASYNC_CHECK_CD) && (status & UART_MSR_DDCD)) { @@ -927,7 +927,7 @@ static void shutdown(struct esp_struct *info) * clear delta_msr_wait queue to avoid mem leaks: we may free the irq * here so the queue might never be waken up */ - wake_up_interruptible(&info->delta_msr_wait); + wake_up_interruptible(&info->port.delta_msr_wait); wake_up_interruptible(&info->break_wait); /* stop a DMA transfer on the port being closed */ @@ -1800,7 +1800,7 @@ static int rs_ioctl(struct tty_struct *tty, struct file *file, spin_unlock_irqrestore(&info->lock, flags); while (1) { /* FIXME: convert to new style wakeup */ - interruptible_sleep_on(&info->delta_msr_wait); + interruptible_sleep_on(&info->port.delta_msr_wait); /* see if a signal did it */ if (signal_pending(current)) return -ERESTARTSYS; @@ -2452,7 +2452,6 @@ static int __init espserial_init(void) info->config.flow_off = flow_off; info->config.pio_threshold = pio_threshold; info->next_port = ports; - init_waitqueue_head(&info->delta_msr_wait); init_waitqueue_head(&info->break_wait); ports = info; printk(KERN_INFO "ttyP%d at 0x%04x (irq = %d) is an ESP ", diff --git a/drivers/char/mxser.c b/drivers/char/mxser.c index 30544ca5e95..37058ff7da7 100644 --- a/drivers/char/mxser.c +++ b/drivers/char/mxser.c @@ -258,7 +258,6 @@ struct mxser_port { struct mxser_mon mon_data; spinlock_t slock; - wait_queue_head_t delta_msr_wait; }; struct mxser_board { @@ -818,7 +817,7 @@ static void mxser_check_modem_status(struct tty_struct *tty, if (status & UART_MSR_DCTS) port->icount.cts++; port->mon_data.modem_status = status; - wake_up_interruptible(&port->delta_msr_wait); + wake_up_interruptible(&port->port.delta_msr_wait); if ((port->port.flags & ASYNC_CHECK_CD) && (status & UART_MSR_DDCD)) { if (status & UART_MSR_DCD) @@ -973,7 +972,7 @@ static void mxser_shutdown(struct tty_struct *tty) * clear delta_msr_wait queue to avoid mem leaks: we may free the irq * here so the queue might never be waken up */ - wake_up_interruptible(&info->delta_msr_wait); + wake_up_interruptible(&info->port.delta_msr_wait); /* * Free the IRQ, if necessary @@ -1762,7 +1761,7 @@ static int mxser_ioctl(struct tty_struct *tty, struct file *file, cnow = info->icount; /* note the counters on entry */ spin_unlock_irqrestore(&info->slock, flags); - return wait_event_interruptible(info->delta_msr_wait, + return wait_event_interruptible(info->port.delta_msr_wait, mxser_cflags_changed(info, arg, &cnow)); /* * Get counter of input serial line interrupts (DCD,RI,DSR,CTS) @@ -2414,7 +2413,6 @@ static int __devinit mxser_initbrd(struct mxser_board *brd, info->port.close_delay = 5 * HZ / 10; info->port.closing_wait = 30 * HZ; info->normal_termios = mxvar_sdriver->init_termios; - init_waitqueue_head(&info->delta_msr_wait); memset(&info->mon_data, 0, sizeof(struct mxser_mon)); info->err_shadow = 0; spin_lock_init(&info->slock); diff --git a/drivers/char/tty_port.c b/drivers/char/tty_port.c index 549bd0fa8bb..c767e30a142 100644 --- a/drivers/char/tty_port.c +++ b/drivers/char/tty_port.c @@ -23,6 +23,7 @@ void tty_port_init(struct tty_port *port) memset(port, 0, sizeof(*port)); init_waitqueue_head(&port->open_wait); init_waitqueue_head(&port->close_wait); + init_waitqueue_head(&port->delta_msr_wait); mutex_init(&port->mutex); spin_lock_init(&port->lock); port->close_delay = (50 * HZ) / 100; @@ -124,6 +125,7 @@ void tty_port_hangup(struct tty_port *port) port->tty = NULL; spin_unlock_irqrestore(&port->lock, flags); wake_up_interruptible(&port->open_wait); + wake_up_interruptible(&port->delta_msr_wait); tty_port_shutdown(port); } EXPORT_SYMBOL(tty_port_hangup); diff --git a/drivers/serial/8250.c b/drivers/serial/8250.c index e415c5eca59..2209620d234 100644 --- a/drivers/serial/8250.c +++ b/drivers/serial/8250.c @@ -1510,7 +1510,7 @@ static unsigned int check_modem_status(struct uart_8250_port *up) if (status & UART_MSR_DCTS) uart_handle_cts_change(&up->port, status & UART_MSR_CTS); - wake_up_interruptible(&up->port.state->delta_msr_wait); + wake_up_interruptible(&up->port.state->port.delta_msr_wait); } return status; diff --git a/drivers/serial/amba-pl010.c b/drivers/serial/amba-pl010.c index 39032413d4a..429a8ae8693 100644 --- a/drivers/serial/amba-pl010.c +++ b/drivers/serial/amba-pl010.c @@ -225,7 +225,7 @@ static void pl010_modem_status(struct uart_amba_port *uap) if (delta & UART01x_FR_CTS) uart_handle_cts_change(&uap->port, status & UART01x_FR_CTS); - wake_up_interruptible(&uap->port.state->delta_msr_wait); + wake_up_interruptible(&uap->port.state->port.delta_msr_wait); } static irqreturn_t pl010_int(int irq, void *dev_id) diff --git a/drivers/serial/amba-pl011.c b/drivers/serial/amba-pl011.c index ef82a34baf0..ef7adc8135d 100644 --- a/drivers/serial/amba-pl011.c +++ b/drivers/serial/amba-pl011.c @@ -226,7 +226,7 @@ static void pl011_modem_status(struct uart_amba_port *uap) if (delta & UART01x_FR_CTS) uart_handle_cts_change(&uap->port, status & UART01x_FR_CTS); - wake_up_interruptible(&uap->port.state->delta_msr_wait); + wake_up_interruptible(&uap->port.state->port.delta_msr_wait); } static irqreturn_t pl011_int(int irq, void *dev_id) diff --git a/drivers/serial/atmel_serial.c b/drivers/serial/atmel_serial.c index 963e3c12af4..3551c5cb709 100644 --- a/drivers/serial/atmel_serial.c +++ b/drivers/serial/atmel_serial.c @@ -776,7 +776,7 @@ static void atmel_tasklet_func(unsigned long data) if (status_change & ATMEL_US_CTS) uart_handle_cts_change(port, !(status & ATMEL_US_CTS)); - wake_up_interruptible(&port->state->delta_msr_wait); + wake_up_interruptible(&port->state->port.delta_msr_wait); atmel_port->irq_status_prev = status; } diff --git a/drivers/serial/icom.c b/drivers/serial/icom.c index f86c47e08a0..2d7feecaf49 100644 --- a/drivers/serial/icom.c +++ b/drivers/serial/icom.c @@ -695,7 +695,7 @@ static inline void check_modem_status(struct icom_port *icom_port) delta_status & ICOM_CTS); wake_up_interruptible(&icom_port->uart_port.state-> - delta_msr_wait); + port.delta_msr_wait); old_status = status; } spin_unlock(&icom_port->uart_port.lock); diff --git a/drivers/serial/imx.c b/drivers/serial/imx.c index 1febeafcb97..18130f11238 100644 --- a/drivers/serial/imx.c +++ b/drivers/serial/imx.c @@ -224,7 +224,7 @@ static void imx_mctrl_check(struct imx_port *sport) if (changed & TIOCM_CTS) uart_handle_cts_change(&sport->port, status & TIOCM_CTS); - wake_up_interruptible(&sport->port.state->delta_msr_wait); + wake_up_interruptible(&sport->port.state->port.delta_msr_wait); } /* @@ -388,7 +388,7 @@ static irqreturn_t imx_rtsint(int irq, void *dev_id) writel(USR1_RTSD, sport->port.membase + USR1); uart_handle_cts_change(&sport->port, !!val); - wake_up_interruptible(&sport->port.state->delta_msr_wait); + wake_up_interruptible(&sport->port.state->port.delta_msr_wait); spin_unlock_irqrestore(&sport->port.lock, flags); return IRQ_HANDLED; diff --git a/drivers/serial/ioc3_serial.c b/drivers/serial/ioc3_serial.c index de4ab1bfee8..d8983dd5c4b 100644 --- a/drivers/serial/ioc3_serial.c +++ b/drivers/serial/ioc3_serial.c @@ -1287,7 +1287,7 @@ static inline int do_read(struct uart_port *the_port, char *buf, int len) (port->ip_port, 0); wake_up_interruptible (&the_port->state-> - delta_msr_wait); + port.delta_msr_wait); } /* If we had any data to return, we @@ -1491,7 +1491,7 @@ ioc3uart_intr_one(struct ioc3_submodule *is, uart_handle_dcd_change(the_port, shadow & SHADOW_DCD); wake_up_interruptible - (&the_port->state->delta_msr_wait); + (&the_port->state->port.delta_msr_wait); } else if ((port->ip_notify & N_DDCD) && !(shadow & SHADOW_DCD)) { /* Flag delta DCD/no DCD */ @@ -1511,7 +1511,7 @@ ioc3uart_intr_one(struct ioc3_submodule *is, uart_handle_cts_change(the_port, shadow & SHADOW_CTS); wake_up_interruptible - (&the_port->state->delta_msr_wait); + (&the_port->state->port.delta_msr_wait); } } @@ -1728,7 +1728,7 @@ static void ic3_shutdown(struct uart_port *the_port) return; state = the_port->state; - wake_up_interruptible(&state->delta_msr_wait); + wake_up_interruptible(&state->port.delta_msr_wait); spin_lock_irqsave(&the_port->lock, port_flags); set_notification(port, N_ALL, 0); diff --git a/drivers/serial/ioc4_serial.c b/drivers/serial/ioc4_serial.c index 2055d323f15..2e02c3026d2 100644 --- a/drivers/serial/ioc4_serial.c +++ b/drivers/serial/ioc4_serial.c @@ -1882,7 +1882,7 @@ static void handle_intr(void *arg, uint32_t sio_ir) the_port = port->ip_port; the_port->icount.dcd = 1; wake_up_interruptible - (&the_port->state->delta_msr_wait); + (&the_port->state->port.delta_msr_wait); } else if ((port->ip_notify & N_DDCD) && !(shadow & IOC4_SHADOW_DCD)) { /* Flag delta DCD/no DCD */ @@ -1904,7 +1904,7 @@ static void handle_intr(void *arg, uint32_t sio_ir) the_port->icount.cts = (shadow & IOC4_SHADOW_CTS) ? 1 : 0; wake_up_interruptible - (&the_port->state->delta_msr_wait); + (&the_port->state->port.delta_msr_wait); } } @@ -2237,7 +2237,7 @@ static inline int do_read(struct uart_port *the_port, unsigned char *buf, the_port->icount.dcd = 0; wake_up_interruptible (&the_port->state-> - delta_msr_wait); + port.delta_msr_wait); } /* If we had any data to return, we @@ -2439,7 +2439,7 @@ static void ic4_shutdown(struct uart_port *the_port) state = the_port->state; port->ip_port = NULL; - wake_up_interruptible(&state->delta_msr_wait); + wake_up_interruptible(&state->port.delta_msr_wait); if (state->port.tty) set_bit(TTY_IO_ERROR, &state->port.tty->flags); diff --git a/drivers/serial/ip22zilog.c b/drivers/serial/ip22zilog.c index 2e847deb41d..ebff4a1d4bc 100644 --- a/drivers/serial/ip22zilog.c +++ b/drivers/serial/ip22zilog.c @@ -354,7 +354,7 @@ static void ip22zilog_status_handle(struct uart_ip22zilog_port *up, uart_handle_cts_change(&up->port, (status & CTS)); - wake_up_interruptible(&up->port.state->delta_msr_wait); + wake_up_interruptible(&up->port.state->port.delta_msr_wait); } up->prev_status = status; diff --git a/drivers/serial/msm_serial.c b/drivers/serial/msm_serial.c index ff18d50c99c..b05c5aa02cb 100644 --- a/drivers/serial/msm_serial.c +++ b/drivers/serial/msm_serial.c @@ -169,7 +169,7 @@ static void handle_delta_cts(struct uart_port *port) { msm_write(port, UART_CR_CMD_RESET_CTS, UART_CR); port->icount.cts++; - wake_up_interruptible(&port->state->delta_msr_wait); + wake_up_interruptible(&port->state->port.delta_msr_wait); } static irqreturn_t msm_irq(int irq, void *dev_id) diff --git a/drivers/serial/pmac_zilog.c b/drivers/serial/pmac_zilog.c index 0dc786835dc..0700cd10b97 100644 --- a/drivers/serial/pmac_zilog.c +++ b/drivers/serial/pmac_zilog.c @@ -369,7 +369,7 @@ static void pmz_status_handle(struct uart_pmac_port *uap) uart_handle_cts_change(&uap->port, !(status & CTS)); - wake_up_interruptible(&uap->port.state->delta_msr_wait); + wake_up_interruptible(&uap->port.state->port.delta_msr_wait); } if (status & BRK_ABRT) diff --git a/drivers/serial/pnx8xxx_uart.c b/drivers/serial/pnx8xxx_uart.c index 2da74763527..0aa75a97531 100644 --- a/drivers/serial/pnx8xxx_uart.c +++ b/drivers/serial/pnx8xxx_uart.c @@ -100,7 +100,7 @@ static void pnx8xxx_mctrl_check(struct pnx8xxx_port *sport) if (changed & TIOCM_CTS) uart_handle_cts_change(&sport->port, status & TIOCM_CTS); - wake_up_interruptible(&sport->port.state->delta_msr_wait); + wake_up_interruptible(&sport->port.state->port.delta_msr_wait); } /* diff --git a/drivers/serial/pxa.c b/drivers/serial/pxa.c index ad48919c041..6443b7ff274 100644 --- a/drivers/serial/pxa.c +++ b/drivers/serial/pxa.c @@ -220,7 +220,7 @@ static inline void check_modem_status(struct uart_pxa_port *up) if (status & UART_MSR_DCTS) uart_handle_cts_change(&up->port, status & UART_MSR_CTS); - wake_up_interruptible(&up->port.state->delta_msr_wait); + wake_up_interruptible(&up->port.state->port.delta_msr_wait); } /* diff --git a/drivers/serial/sa1100.c b/drivers/serial/sa1100.c index 61ef3ae2492..7f5e2687322 100644 --- a/drivers/serial/sa1100.c +++ b/drivers/serial/sa1100.c @@ -117,7 +117,7 @@ static void sa1100_mctrl_check(struct sa1100_port *sport) if (changed & TIOCM_CTS) uart_handle_cts_change(&sport->port, status & TIOCM_CTS); - wake_up_interruptible(&sport->port.state->delta_msr_wait); + wake_up_interruptible(&sport->port.state->port.delta_msr_wait); } /* diff --git a/drivers/serial/sb1250-duart.c b/drivers/serial/sb1250-duart.c index fa5f303b36d..a2f2b325449 100644 --- a/drivers/serial/sb1250-duart.c +++ b/drivers/serial/sb1250-duart.c @@ -440,7 +440,7 @@ static void sbd_status_handle(struct sbd_port *sport) if (delta & ((M_DUART_IN_PIN2_VAL | M_DUART_IN_PIN0_VAL) << S_DUART_IN_PIN_CHNG)) - wake_up_interruptible(&uport->state->delta_msr_wait); + wake_up_interruptible(&uport->state->port.delta_msr_wait); } static irqreturn_t sbd_interrupt(int irq, void *dev_id) diff --git a/drivers/serial/serial_core.c b/drivers/serial/serial_core.c index 9d42e57e197..e16d15343df 100644 --- a/drivers/serial/serial_core.c +++ b/drivers/serial/serial_core.c @@ -215,7 +215,8 @@ static int uart_startup(struct uart_state *state, int init_hw) static void uart_shutdown(struct uart_state *state) { struct uart_port *uport = state->uart_port; - struct tty_struct *tty = state->port.tty; + struct tty_port *port = &state->port; + struct tty_struct *tty = port->tty; /* * Set the TTY IO error marker @@ -223,7 +224,7 @@ static void uart_shutdown(struct uart_state *state) if (tty) set_bit(TTY_IO_ERROR, &tty->flags); - if (test_and_clear_bit(ASYNCB_INITIALIZED, &state->port.flags)) { + if (test_and_clear_bit(ASYNCB_INITIALIZED, &port->flags)) { /* * Turn off DTR and RTS early. */ @@ -237,7 +238,7 @@ static void uart_shutdown(struct uart_state *state) * any outstanding file descriptors should be pointing at * hung_up_tty_fops now. */ - wake_up_interruptible(&state->delta_msr_wait); + wake_up_interruptible(&port->delta_msr_wait); /* * Free the IRQ and disable the port. @@ -1004,11 +1005,15 @@ static int uart_do_autoconfig(struct uart_state *state) * - mask passed in arg for lines of interest * (use |'ed TIOCM_RNG/DSR/CD/CTS for masking) * Caller should use TIOCGICOUNT to see which one it was + * + * FIXME: This wants extracting into a common all driver implementation + * of TIOCMWAIT using tty_port. */ static int uart_wait_modem_status(struct uart_state *state, unsigned long arg) { struct uart_port *uport = state->uart_port; + struct tty_port *port = &state->port; DECLARE_WAITQUEUE(wait, current); struct uart_icount cprev, cnow; int ret; @@ -1025,7 +1030,7 @@ uart_wait_modem_status(struct uart_state *state, unsigned long arg) uport->ops->enable_ms(uport); spin_unlock_irq(&uport->lock); - add_wait_queue(&state->delta_msr_wait, &wait); + add_wait_queue(&port->delta_msr_wait, &wait); for (;;) { spin_lock_irq(&uport->lock); memcpy(&cnow, &uport->icount, sizeof(struct uart_icount)); @@ -1053,7 +1058,7 @@ uart_wait_modem_status(struct uart_state *state, unsigned long arg) } current->state = TASK_RUNNING; - remove_wait_queue(&state->delta_msr_wait, &wait); + remove_wait_queue(&port->delta_msr_wait, &wait); return ret; } @@ -1430,7 +1435,7 @@ static void uart_hangup(struct tty_struct *tty) clear_bit(ASYNCB_NORMAL_ACTIVE, &port->flags); port->tty = NULL; wake_up_interruptible(&port->open_wait); - wake_up_interruptible(&state->delta_msr_wait); + wake_up_interruptible(&port->delta_msr_wait); } mutex_unlock(&port->mutex); } @@ -2378,7 +2383,6 @@ int uart_register_driver(struct uart_driver *drv) tty_port_init(port); port->close_delay = 500; /* .5 seconds */ port->closing_wait = 30000; /* 30 seconds */ - init_waitqueue_head(&state->delta_msr_wait); tasklet_init(&state->tlet, uart_tasklet_action, (unsigned long)state); } diff --git a/drivers/serial/serial_ks8695.c b/drivers/serial/serial_ks8695.c index 4560b2e7068..2e71bbc04da 100644 --- a/drivers/serial/serial_ks8695.c +++ b/drivers/serial/serial_ks8695.c @@ -266,7 +266,7 @@ static irqreturn_t ks8695uart_modem_status(int irq, void *dev_id) if (status & URMS_URTERI) port->icount.rng++; - wake_up_interruptible(&port->state->delta_msr_wait); + wake_up_interruptible(&port->state->port.delta_msr_wait); return IRQ_HANDLED; } diff --git a/drivers/serial/serial_lh7a40x.c b/drivers/serial/serial_lh7a40x.c index 057fc5e8cc8..ea744707c4d 100644 --- a/drivers/serial/serial_lh7a40x.c +++ b/drivers/serial/serial_lh7a40x.c @@ -241,7 +241,7 @@ static void lh7a40xuart_modem_status (struct uart_port* port) if (delta & CTS) uart_handle_cts_change (port, status & CTS); - wake_up_interruptible (&port->state->delta_msr_wait); + wake_up_interruptible (&port->state->port.delta_msr_wait); } static irqreturn_t lh7a40xuart_int (int irq, void* dev_id) diff --git a/drivers/serial/sunsab.c b/drivers/serial/sunsab.c index 7c4f2fe8e24..d1ad3412863 100644 --- a/drivers/serial/sunsab.c +++ b/drivers/serial/sunsab.c @@ -297,7 +297,7 @@ static void check_status(struct uart_sunsab_port *up, up->port.icount.dsr++; } - wake_up_interruptible(&up->port.state->delta_msr_wait); + wake_up_interruptible(&up->port.state->port.delta_msr_wait); } static irqreturn_t sunsab_interrupt(int irq, void *dev_id) diff --git a/drivers/serial/sunsu.c b/drivers/serial/sunsu.c index 5a32365b58a..68d262b1574 100644 --- a/drivers/serial/sunsu.c +++ b/drivers/serial/sunsu.c @@ -441,7 +441,7 @@ static void check_modem_status(struct uart_sunsu_port *up) if (status & UART_MSR_DCTS) uart_handle_cts_change(&up->port, status & UART_MSR_CTS); - wake_up_interruptible(&up->port.state->delta_msr_wait); + wake_up_interruptible(&up->port.state->port.delta_msr_wait); } static irqreturn_t sunsu_serial_interrupt(int irq, void *dev_id) diff --git a/drivers/serial/sunzilog.c b/drivers/serial/sunzilog.c index 055034d12b1..ef693ae22e7 100644 --- a/drivers/serial/sunzilog.c +++ b/drivers/serial/sunzilog.c @@ -451,7 +451,7 @@ static void sunzilog_status_handle(struct uart_sunzilog_port *up, uart_handle_cts_change(&up->port, (status & CTS)); - wake_up_interruptible(&up->port.state->delta_msr_wait); + wake_up_interruptible(&up->port.state->port.delta_msr_wait); } up->prev_status = status; diff --git a/drivers/serial/timbuart.c b/drivers/serial/timbuart.c index 3d40be6f389..34b31da01d0 100644 --- a/drivers/serial/timbuart.c +++ b/drivers/serial/timbuart.c @@ -231,7 +231,7 @@ static void timbuart_mctrl_check(struct uart_port *port, u32 isr, u32 *ier) iowrite32(CTS_DELTA, port->membase + TIMBUART_ISR); cts = timbuart_get_mctrl(port); uart_handle_cts_change(port, cts & TIOCM_CTS); - wake_up_interruptible(&port->state->delta_msr_wait); + wake_up_interruptible(&port->state->port.delta_msr_wait); } *ier |= CTS_DELTA; diff --git a/drivers/serial/vr41xx_siu.c b/drivers/serial/vr41xx_siu.c index cf4410e6d53..3beb6ab4fa6 100644 --- a/drivers/serial/vr41xx_siu.c +++ b/drivers/serial/vr41xx_siu.c @@ -386,7 +386,7 @@ static inline void check_modem_status(struct uart_port *port) if (msr & UART_MSR_DCTS) uart_handle_cts_change(port, msr & UART_MSR_CTS); - wake_up_interruptible(&port->state->delta_msr_wait); + wake_up_interruptible(&port->state->port.delta_msr_wait); } static inline void transmit_chars(struct uart_port *port) diff --git a/drivers/serial/zs.c b/drivers/serial/zs.c index b9c9fb9198d..1a7fd3e7031 100644 --- a/drivers/serial/zs.c +++ b/drivers/serial/zs.c @@ -686,7 +686,7 @@ static void zs_status_handle(struct zs_port *zport, struct zs_port *zport_a) uport->icount.rng++; if (delta) - wake_up_interruptible(&uport->state->delta_msr_wait); + wake_up_interruptible(&uport->state->port.delta_msr_wait); spin_lock(&scc->zlock); } diff --git a/include/linux/cyclades.h b/include/linux/cyclades.h index bbebef7713b..a5049eaf782 100644 --- a/include/linux/cyclades.h +++ b/include/linux/cyclades.h @@ -578,7 +578,6 @@ struct cyclades_port { struct cyclades_idle_stats idle_stats; struct cyclades_icount icount; struct completion shutdown_wait; - wait_queue_head_t delta_msr_wait; int throttle; }; diff --git a/include/linux/hayesesp.h b/include/linux/hayesesp.h index 940aeb51d53..92b08cfe4a7 100644 --- a/include/linux/hayesesp.h +++ b/include/linux/hayesesp.h @@ -96,7 +96,6 @@ struct esp_struct { int xmit_head; int xmit_tail; int xmit_cnt; - wait_queue_head_t delta_msr_wait; wait_queue_head_t break_wait; struct async_icount icount; /* kernel counters for the 4 input interrupts */ struct hayes_esp_config config; /* port configuration */ diff --git a/include/linux/serial_core.h b/include/linux/serial_core.h index 27767ea5fa2..bcafecd3b7c 100644 --- a/include/linux/serial_core.h +++ b/include/linux/serial_core.h @@ -349,7 +349,6 @@ struct uart_state { struct circ_buf xmit; struct tasklet_struct tlet; - wait_queue_head_t delta_msr_wait; struct uart_port *uart_port; }; diff --git a/include/linux/tty.h b/include/linux/tty.h index 9fdc3d84baa..0daa8a72b17 100644 --- a/include/linux/tty.h +++ b/include/linux/tty.h @@ -203,6 +203,7 @@ struct tty_port { int count; /* Usage count */ wait_queue_head_t open_wait; /* Open waiters */ wait_queue_head_t close_wait; /* Close waiters */ + wait_queue_head_t delta_msr_wait; /* Modem status change */ unsigned long flags; /* TTY flags ASY_*/ struct mutex mutex; /* Locking */ unsigned char *xmit_buf; /* Optional buffer */ -- cgit v1.2.3-70-g09d2 From b58d13a0216d4e0753668214f23e1d2c24c30f8c Mon Sep 17 00:00:00 2001 From: Alan Cox Date: Sat, 19 Sep 2009 13:13:32 -0700 Subject: serial: move port users helper This little helper is now tty_port specific and useful generally so move it Signed-off-by: Alan Cox Signed-off-by: Greg Kroah-Hartman --- drivers/serial/serial_core.c | 6 ++---- include/linux/tty.h | 5 +++++ 2 files changed, 7 insertions(+), 4 deletions(-) (limited to 'include') diff --git a/drivers/serial/serial_core.c b/drivers/serial/serial_core.c index e16d15343df..3c45a8d7eb7 100644 --- a/drivers/serial/serial_core.c +++ b/drivers/serial/serial_core.c @@ -52,8 +52,6 @@ static struct lock_class_key port_lock_key; #define HIGH_BITS_OFFSET ((sizeof(long)-sizeof(int))*8) -#define uart_users(state) ((state)->port.count + (state)->port.blocked_open) - #ifdef CONFIG_SERIAL_CORE_CONSOLE #define uart_console(port) ((port)->cons && (port)->cons->index == (port)->line) #else @@ -758,7 +756,7 @@ static int uart_set_info(struct uart_state *state, /* * Make sure that we are the sole user of this port. */ - if (uart_users(state) > 1) + if (tty_port_users(port) > 1) goto exit; /* @@ -974,7 +972,7 @@ static int uart_do_autoconfig(struct uart_state *state) return -ERESTARTSYS; ret = -EBUSY; - if (uart_users(state) == 1) { + if (tty_port_users(port) == 1) { uart_shutdown(state); /* diff --git a/include/linux/tty.h b/include/linux/tty.h index 0daa8a72b17..a0e03309a37 100644 --- a/include/linux/tty.h +++ b/include/linux/tty.h @@ -467,6 +467,11 @@ extern int tty_port_close_start(struct tty_port *port, extern void tty_port_close_end(struct tty_port *port, struct tty_struct *tty); extern void tty_port_close(struct tty_port *port, struct tty_struct *tty, struct file *filp); +extern inline int tty_port_users(struct tty_port *port) +{ + return port->count + port->blocked_open; +} + extern int tty_register_ldisc(int disc, struct tty_ldisc_ops *new_ldisc); extern int tty_unregister_ldisc(int disc); extern int tty_set_ldisc(struct tty_struct *tty, int ldisc); -- cgit v1.2.3-70-g09d2 From 016af53a6de6837e5be3da68901083ea85ebb4da Mon Sep 17 00:00:00 2001 From: Alan Cox Date: Sat, 19 Sep 2009 13:13:32 -0700 Subject: serial: kill USF_CLOSING_* definitions The serial layer for some reason uses different defines for the special case close delays and then conditionally switches to/from the normal ones in the ioctls. Remove this rather pointless abstraction Signed-off-by: Alan Cox Signed-off-by: Greg Kroah-Hartman --- drivers/serial/serial_core.c | 6 +++--- include/linux/serial_core.h | 3 --- 2 files changed, 3 insertions(+), 6 deletions(-) (limited to 'include') diff --git a/drivers/serial/serial_core.c b/drivers/serial/serial_core.c index 3c45a8d7eb7..69c4e20530f 100644 --- a/drivers/serial/serial_core.c +++ b/drivers/serial/serial_core.c @@ -652,7 +652,7 @@ static int uart_get_info(struct uart_state *state, tmp.xmit_fifo_size = uport->fifosize; tmp.baud_base = uport->uartclk / 16; tmp.close_delay = port->close_delay / 10; - tmp.closing_wait = port->closing_wait == USF_CLOSING_WAIT_NONE ? + tmp.closing_wait = port->closing_wait == ASYNC_CLOSING_WAIT_NONE ? ASYNC_CLOSING_WAIT_NONE : port->closing_wait / 10; tmp.custom_divisor = uport->custom_divisor; @@ -690,7 +690,7 @@ static int uart_set_info(struct uart_state *state, new_serial.irq = irq_canonicalize(new_serial.irq); close_delay = new_serial.close_delay * 10; closing_wait = new_serial.closing_wait == ASYNC_CLOSING_WAIT_NONE ? - USF_CLOSING_WAIT_NONE : new_serial.closing_wait * 10; + ASYNC_CLOSING_WAIT_NONE : new_serial.closing_wait * 10; /* * This semaphore protects port->count. It is also @@ -1307,7 +1307,7 @@ static void uart_close(struct tty_struct *tty, struct file *filp) */ tty->closing = 1; - if (port->closing_wait != USF_CLOSING_WAIT_NONE) + if (port->closing_wait != ASYNC_CLOSING_WAIT_NONE) tty_wait_until_sent(tty, msecs_to_jiffies(port->closing_wait)); /* diff --git a/include/linux/serial_core.h b/include/linux/serial_core.h index bcafecd3b7c..d58e460844d 100644 --- a/include/linux/serial_core.h +++ b/include/linux/serial_core.h @@ -342,9 +342,6 @@ struct uart_port { struct uart_state { struct tty_port port; -#define USF_CLOSING_WAIT_INF (0) -#define USF_CLOSING_WAIT_NONE (~0U) - int pm_state; struct circ_buf xmit; -- cgit v1.2.3-70-g09d2 From fe1ae7fdd2ee603f2d95f04e09a68f7f79045127 Mon Sep 17 00:00:00 2001 From: Alan Cox Date: Sat, 19 Sep 2009 13:13:33 -0700 Subject: tty: USB serial termios bits Various drivers have hacks to mangle termios structures. This stems from the fact there is no nice setup hook for configuring the termios settings when the port is created Signed-off-by: Alan Cox Signed-off-by: Greg Kroah-Hartman --- drivers/char/tty_io.c | 1 + drivers/usb/serial/ark3116.c | 46 ++++++++++------------------------------ drivers/usb/serial/cypress_m8.c | 12 +++-------- drivers/usb/serial/empeg.c | 12 +++-------- drivers/usb/serial/iuu_phoenix.c | 31 ++++++++++++--------------- drivers/usb/serial/kobil_sct.c | 22 +++++++++---------- drivers/usb/serial/oti6858.c | 21 +++++++++--------- drivers/usb/serial/spcp8x5.c | 21 +++++++++--------- drivers/usb/serial/usb-serial.c | 38 ++++++++++++++++++++++++++++++++- drivers/usb/serial/whiteheat.c | 6 +++--- include/linux/usb/serial.h | 3 +++ 11 files changed, 106 insertions(+), 107 deletions(-) (limited to 'include') diff --git a/drivers/char/tty_io.c b/drivers/char/tty_io.c index 385cca7074d..05f443c47bb 100644 --- a/drivers/char/tty_io.c +++ b/drivers/char/tty_io.c @@ -1184,6 +1184,7 @@ int tty_init_termios(struct tty_struct *tty) tty->termios->c_ospeed = tty_termios_baud_rate(tty->termios); return 0; } +EXPORT_SYMBOL_GPL(tty_init_termios); /** * tty_driver_install_tty() - install a tty entry in the driver diff --git a/drivers/usb/serial/ark3116.c b/drivers/usb/serial/ark3116.c index 7ddde4ddfb4..5d25d3e52bf 100644 --- a/drivers/usb/serial/ark3116.c +++ b/drivers/usb/serial/ark3116.c @@ -35,11 +35,6 @@ static struct usb_device_id id_table [] = { }; MODULE_DEVICE_TABLE(usb, id_table); -struct ark3116_private { - spinlock_t lock; - u8 termios_initialized; -}; - static inline void ARK3116_SND(struct usb_serial *serial, int seq, __u8 request, __u8 requesttype, __u16 value, __u16 index) @@ -82,22 +77,11 @@ static inline void ARK3116_RCV_QUIET(struct usb_serial *serial, static int ark3116_attach(struct usb_serial *serial) { char *buf; - struct ark3116_private *priv; - int i; - - for (i = 0; i < serial->num_ports; ++i) { - priv = kzalloc(sizeof(struct ark3116_private), GFP_KERNEL); - if (!priv) - goto cleanup; - spin_lock_init(&priv->lock); - - usb_set_serial_port_data(serial->port[i], priv); - } buf = kmalloc(1, GFP_KERNEL); if (!buf) { dbg("error kmalloc -> out of mem?"); - goto cleanup; + return -ENOMEM; } /* 3 */ @@ -149,13 +133,16 @@ static int ark3116_attach(struct usb_serial *serial) kfree(buf); return 0; +} -cleanup: - for (--i; i >= 0; --i) { - kfree(usb_get_serial_port_data(serial->port[i])); - usb_set_serial_port_data(serial->port[i], NULL); - } - return -ENOMEM; +static void ark3116_init_termios(struct tty_struct *tty) +{ + struct ktermios *termios = tty->termios; + *termios = tty_std_termios; + termios->c_cflag = B9600 | CS8 + | CREAD | HUPCL | CLOCAL; + termios->c_ispeed = 9600; + termios->c_ospeed = 9600; } static void ark3116_set_termios(struct tty_struct *tty, @@ -163,10 +150,8 @@ static void ark3116_set_termios(struct tty_struct *tty, struct ktermios *old_termios) { struct usb_serial *serial = port->serial; - struct ark3116_private *priv = usb_get_serial_port_data(port); struct ktermios *termios = tty->termios; unsigned int cflag = termios->c_cflag; - unsigned long flags; int baud; int ark3116_baud; char *buf; @@ -176,16 +161,6 @@ static void ark3116_set_termios(struct tty_struct *tty, dbg("%s - port %d", __func__, port->number); - spin_lock_irqsave(&priv->lock, flags); - if (!priv->termios_initialized) { - *termios = tty_std_termios; - termios->c_cflag = B9600 | CS8 - | CREAD | HUPCL | CLOCAL; - termios->c_ispeed = 9600; - termios->c_ospeed = 9600; - priv->termios_initialized = 1; - } - spin_unlock_irqrestore(&priv->lock, flags); cflag = termios->c_cflag; termios->c_cflag &= ~(CMSPAR|CRTSCTS); @@ -454,6 +429,7 @@ static struct usb_serial_driver ark3116_device = { .num_ports = 1, .attach = ark3116_attach, .set_termios = ark3116_set_termios, + .init_termios = ark3116_init_termios, .ioctl = ark3116_ioctl, .tiocmget = ark3116_tiocmget, .open = ark3116_open, diff --git a/drivers/usb/serial/cypress_m8.c b/drivers/usb/serial/cypress_m8.c index df1adb9a436..e0a8b715f2f 100644 --- a/drivers/usb/serial/cypress_m8.c +++ b/drivers/usb/serial/cypress_m8.c @@ -657,15 +657,7 @@ static int cypress_open(struct tty_struct *tty, struct usb_serial_port *port) spin_unlock_irqrestore(&priv->lock, flags); /* Set termios */ - result = cypress_write(tty, port, NULL, 0); - - if (result) { - dev_err(&port->dev, - "%s - failed setting the control lines - error %d\n", - __func__, result); - return result; - } else - dbg("%s - success setting the control lines", __func__); + cypress_send(port); if (tty) cypress_set_termios(tty, port, &priv->tmp_termios); @@ -1003,6 +995,8 @@ static void cypress_set_termios(struct tty_struct *tty, dbg("%s - port %d", __func__, port->number); spin_lock_irqsave(&priv->lock, flags); + /* We can't clean this one up as we don't know the device type + early enough */ if (!priv->termios_initialized) { if (priv->chiptype == CT_EARTHMATE) { *(tty->termios) = tty_std_termios; diff --git a/drivers/usb/serial/empeg.c b/drivers/usb/serial/empeg.c index da559a773b5..33c9e9cf9eb 100644 --- a/drivers/usb/serial/empeg.c +++ b/drivers/usb/serial/empeg.c @@ -89,8 +89,7 @@ static int empeg_chars_in_buffer(struct tty_struct *tty); static void empeg_throttle(struct tty_struct *tty); static void empeg_unthrottle(struct tty_struct *tty); static int empeg_startup(struct usb_serial *serial); -static void empeg_set_termios(struct tty_struct *tty, - struct usb_serial_port *port, struct ktermios *old_termios); +static void empeg_init_termios(struct tty_struct *tty); static void empeg_write_bulk_callback(struct urb *urb); static void empeg_read_bulk_callback(struct urb *urb); @@ -122,7 +121,7 @@ static struct usb_serial_driver empeg_device = { .throttle = empeg_throttle, .unthrottle = empeg_unthrottle, .attach = empeg_startup, - .set_termios = empeg_set_termios, + .init_termios = empeg_init_termios, .write = empeg_write, .write_room = empeg_write_room, .chars_in_buffer = empeg_chars_in_buffer, @@ -148,9 +147,6 @@ static int empeg_open(struct tty_struct *tty,struct usb_serial_port *port) dbg("%s - port %d", __func__, port->number); - /* Force default termio settings */ - empeg_set_termios(tty, port, NULL); - bytes_in = 0; bytes_out = 0; @@ -423,11 +419,9 @@ static int empeg_startup(struct usb_serial *serial) } -static void empeg_set_termios(struct tty_struct *tty, - struct usb_serial_port *port, struct ktermios *old_termios) +static void empeg_init_termios(struct tty_struct *tty) { struct ktermios *termios = tty->termios; - dbg("%s - port %d", __func__, port->number); /* * The empeg-car player wants these particular tty settings. diff --git a/drivers/usb/serial/iuu_phoenix.c b/drivers/usb/serial/iuu_phoenix.c index f3f9be0469c..6138c1cda35 100644 --- a/drivers/usb/serial/iuu_phoenix.c +++ b/drivers/usb/serial/iuu_phoenix.c @@ -71,7 +71,6 @@ struct iuu_private { spinlock_t lock; /* store irq state */ wait_queue_head_t delta_msr_wait; u8 line_status; - u8 termios_initialized; int tiostatus; /* store IUART SIGNAL for tiocmget call */ u8 reset; /* if 1 reset is needed */ int poll; /* number of poll */ @@ -1018,13 +1017,24 @@ static void iuu_close(struct usb_serial_port *port) } } +static void iuu_init_termios(struct tty_struct *tty) +{ + *(tty->termios) = tty_std_termios; + tty->termios->c_cflag = CLOCAL | CREAD | CS8 | B9600 + | TIOCM_CTS | CSTOPB | PARENB; + tty->termios->c_ispeed = 9600; + tty->termios->c_ospeed = 9600; + tty->termios->c_lflag = 0; + tty->termios->c_oflag = 0; + tty->termios->c_iflag = 0; +} + static int iuu_open(struct tty_struct *tty, struct usb_serial_port *port) { struct usb_serial *serial = port->serial; u8 *buf; int result; u32 actual; - unsigned long flags; struct iuu_private *priv = usb_get_serial_port_data(port); dbg("%s - port %d", __func__, port->number); @@ -1063,21 +1073,7 @@ static int iuu_open(struct tty_struct *tty, struct usb_serial_port *port) port->bulk_in_buffer, 512, NULL, NULL); - /* set the termios structure */ - spin_lock_irqsave(&priv->lock, flags); - if (tty && !priv->termios_initialized) { - *(tty->termios) = tty_std_termios; - tty->termios->c_cflag = CLOCAL | CREAD | CS8 | B9600 - | TIOCM_CTS | CSTOPB | PARENB; - tty->termios->c_ispeed = 9600; - tty->termios->c_ospeed = 9600; - tty->termios->c_lflag = 0; - tty->termios->c_oflag = 0; - tty->termios->c_iflag = 0; - priv->termios_initialized = 1; - priv->poll = 0; - } - spin_unlock_irqrestore(&priv->lock, flags); + priv->poll = 0; /* initialize writebuf */ #define FISH(a, b, c, d) do { \ @@ -1200,6 +1196,7 @@ static struct usb_serial_driver iuu_device = { .tiocmget = iuu_tiocmget, .tiocmset = iuu_tiocmset, .set_termios = iuu_set_termios, + .init_termios = iuu_init_termios, .attach = iuu_startup, .release = iuu_release, }; diff --git a/drivers/usb/serial/kobil_sct.c b/drivers/usb/serial/kobil_sct.c index 97901578343..45ea694b3ae 100644 --- a/drivers/usb/serial/kobil_sct.c +++ b/drivers/usb/serial/kobil_sct.c @@ -84,7 +84,7 @@ static void kobil_read_int_callback(struct urb *urb); static void kobil_write_callback(struct urb *purb); static void kobil_set_termios(struct tty_struct *tty, struct usb_serial_port *port, struct ktermios *old); - +static void kobil_init_termios(struct tty_struct *tty); static struct usb_device_id id_table [] = { { USB_DEVICE(KOBIL_VENDOR_ID, KOBIL_ADAPTER_B_PRODUCT_ID) }, @@ -119,6 +119,7 @@ static struct usb_serial_driver kobil_device = { .release = kobil_release, .ioctl = kobil_ioctl, .set_termios = kobil_set_termios, + .init_termios = kobil_init_termios, .tiocmget = kobil_tiocmget, .tiocmset = kobil_tiocmset, .open = kobil_open, @@ -209,6 +210,15 @@ static void kobil_release(struct usb_serial *serial) kfree(usb_get_serial_port_data(serial->port[i])); } +static void kobil_init_termios(struct tty_struct *tty) +{ + /* Default to echo off and other sane device settings */ + tty->termios->c_lflag = 0; + tty->termios->c_lflag &= ~(ISIG | ICANON | ECHO | IEXTEN | XCASE); + tty->termios->c_iflag = IGNBRK | IGNPAR | IXOFF; + /* do NOT translate CR to CR-NL (0x0A -> 0x0A 0x0D) */ + tty->termios->c_oflag &= ~ONLCR; +} static int kobil_open(struct tty_struct *tty, struct usb_serial_port *port) { @@ -224,16 +234,6 @@ static int kobil_open(struct tty_struct *tty, struct usb_serial_port *port) /* someone sets the dev to 0 if the close method has been called */ port->interrupt_in_urb->dev = port->serial->dev; - if (tty) { - - /* Default to echo off and other sane device settings */ - tty->termios->c_lflag = 0; - tty->termios->c_lflag &= ~(ISIG | ICANON | ECHO | IEXTEN | - XCASE); - tty->termios->c_iflag = IGNBRK | IGNPAR | IXOFF; - /* do NOT translate CR to CR-NL (0x0A -> 0x0A 0x0D) */ - tty->termios->c_oflag &= ~ONLCR; - } /* allocate memory for transfer buffer */ transfer_buffer = kzalloc(transfer_buffer_length, GFP_KERNEL); if (!transfer_buffer) diff --git a/drivers/usb/serial/oti6858.c b/drivers/usb/serial/oti6858.c index e90f88a3b04..0f4a70ce382 100644 --- a/drivers/usb/serial/oti6858.c +++ b/drivers/usb/serial/oti6858.c @@ -145,6 +145,7 @@ static int oti6858_open(struct tty_struct *tty, struct usb_serial_port *port); static void oti6858_close(struct usb_serial_port *port); static void oti6858_set_termios(struct tty_struct *tty, struct usb_serial_port *port, struct ktermios *old); +static void oti6858_init_termios(struct tty_struct *tty); static int oti6858_ioctl(struct tty_struct *tty, struct file *file, unsigned int cmd, unsigned long arg); static void oti6858_read_int_callback(struct urb *urb); @@ -185,6 +186,7 @@ static struct usb_serial_driver oti6858_device = { .write = oti6858_write, .ioctl = oti6858_ioctl, .set_termios = oti6858_set_termios, + .init_termios = oti6858_init_termios, .tiocmget = oti6858_tiocmget, .tiocmset = oti6858_tiocmset, .read_bulk_callback = oti6858_read_bulk_callback, @@ -205,7 +207,6 @@ struct oti6858_private { struct { u8 read_urb_in_use; u8 write_urb_in_use; - u8 termios_initialized; } flags; struct delayed_work delayed_write_work; @@ -446,6 +447,14 @@ static int oti6858_chars_in_buffer(struct tty_struct *tty) return chars; } +static void oti6858_init_termios(struct tty_struct *tty) +{ + *(tty->termios) = tty_std_termios; + tty->termios->c_cflag = B38400 | CS8 | CREAD | HUPCL | CLOCAL; + tty->termios->c_ispeed = 38400; + tty->termios->c_ospeed = 38400; +} + static void oti6858_set_termios(struct tty_struct *tty, struct usb_serial_port *port, struct ktermios *old_termios) { @@ -463,16 +472,6 @@ static void oti6858_set_termios(struct tty_struct *tty, return; } - spin_lock_irqsave(&priv->lock, flags); - if (!priv->flags.termios_initialized) { - *(tty->termios) = tty_std_termios; - tty->termios->c_cflag = B38400 | CS8 | CREAD | HUPCL | CLOCAL; - tty->termios->c_ispeed = 38400; - tty->termios->c_ospeed = 38400; - priv->flags.termios_initialized = 1; - } - spin_unlock_irqrestore(&priv->lock, flags); - cflag = tty->termios->c_cflag; spin_lock_irqsave(&priv->lock, flags); diff --git a/drivers/usb/serial/spcp8x5.c b/drivers/usb/serial/spcp8x5.c index 8b312a05a35..61e7c40b94f 100644 --- a/drivers/usb/serial/spcp8x5.c +++ b/drivers/usb/serial/spcp8x5.c @@ -299,7 +299,6 @@ struct spcp8x5_private { wait_queue_head_t delta_msr_wait; u8 line_control; u8 line_status; - u8 termios_initialized; }; /* desc : when device plug in,this function would be called. @@ -498,6 +497,15 @@ static void spcp8x5_close(struct usb_serial_port *port) dev_dbg(&port->dev, "usb_unlink_urb(read_urb) = %d\n", result); } +static void spcp8x5_init_termios(struct tty_struct *tty) +{ + /* for the 1st time call this function */ + *(tty->termios) = tty_std_termios; + tty->termios->c_cflag = B115200 | CS8 | CREAD | HUPCL | CLOCAL; + tty->termios->c_ispeed = 115200; + tty->termios->c_ospeed = 115200; +} + /* set the serial param for transfer. we should check if we really need to * transfer. if we set flow control we should do this too. */ static void spcp8x5_set_termios(struct tty_struct *tty, @@ -514,16 +522,6 @@ static void spcp8x5_set_termios(struct tty_struct *tty, int i; u8 control; - /* for the 1st time call this function */ - spin_lock_irqsave(&priv->lock, flags); - if (!priv->termios_initialized) { - *(tty->termios) = tty_std_termios; - tty->termios->c_cflag = B115200 | CS8 | CREAD | HUPCL | CLOCAL; - tty->termios->c_ispeed = 115200; - tty->termios->c_ospeed = 115200; - priv->termios_initialized = 1; - } - spin_unlock_irqrestore(&priv->lock, flags); /* check that they really want us to change something */ if (!tty_termios_hw_change(tty->termios, old_termios)) @@ -1008,6 +1006,7 @@ static struct usb_serial_driver spcp8x5_device = { .carrier_raised = spcp8x5_carrier_raised, .write = spcp8x5_write, .set_termios = spcp8x5_set_termios, + .init_termios = spcp8x5_init_termios, .ioctl = spcp8x5_ioctl, .tiocmget = spcp8x5_tiocmget, .tiocmset = spcp8x5_tiocmset, diff --git a/drivers/usb/serial/usb-serial.c b/drivers/usb/serial/usb-serial.c index 3dda6841e72..80c1f4d8e91 100644 --- a/drivers/usb/serial/usb-serial.c +++ b/drivers/usb/serial/usb-serial.c @@ -721,6 +721,41 @@ static const struct tty_port_operations serial_port_ops = { .dtr_rts = serial_dtr_rts, }; +/** + * serial_install - install tty + * @driver: the driver (USB in our case) + * @tty: the tty being created + * + * Create the termios objects for this tty. We use the default USB + * serial ones but permit them to be overriddenby serial->type->termios. + * This lets us remove all the ugly hackery + */ + +static int serial_install(struct tty_driver *driver, struct tty_struct *tty) +{ + int idx = tty->index; + struct usb_serial *serial; + int retval; + + /* If the termios setup has yet to be done */ + if (tty->driver->termios[idx] == NULL) { + /* perform the standard setup */ + retval = tty_init_termios(tty); + if (retval) + return retval; + /* allow the driver to update it */ + serial = usb_serial_get_by_index(tty->index); + if (serial->type->init_termios) + serial->type->init_termios(tty); + usb_serial_put(serial); + } + /* Final install (we use the default method) */ + tty_driver_kref_get(driver); + tty->count++; + driver->ttys[idx] = tty; + return 0; +} + int usb_serial_probe(struct usb_interface *interface, const struct usb_device_id *id) { @@ -1228,7 +1263,8 @@ static const struct tty_operations serial_ops = { .chars_in_buffer = serial_chars_in_buffer, .tiocmget = serial_tiocmget, .tiocmset = serial_tiocmset, - .shutdown = serial_do_free, + .shutdown = serial_do_free, + .install = serial_install, .proc_fops = &serial_proc_fops, }; diff --git a/drivers/usb/serial/whiteheat.c b/drivers/usb/serial/whiteheat.c index 81f2ae50596..62424eec33e 100644 --- a/drivers/usb/serial/whiteheat.c +++ b/drivers/usb/serial/whiteheat.c @@ -259,7 +259,7 @@ static int firm_send_command(struct usb_serial_port *port, __u8 command, __u8 *data, __u8 datasize); static int firm_open(struct usb_serial_port *port); static int firm_close(struct usb_serial_port *port); -static int firm_setup_port(struct tty_struct *tty); +static void firm_setup_port(struct tty_struct *tty); static int firm_set_rts(struct usb_serial_port *port, __u8 onoff); static int firm_set_dtr(struct usb_serial_port *port, __u8 onoff); static int firm_set_break(struct usb_serial_port *port, __u8 onoff); @@ -1210,7 +1210,7 @@ static int firm_close(struct usb_serial_port *port) } -static int firm_setup_port(struct tty_struct *tty) +static void firm_setup_port(struct tty_struct *tty) { struct usb_serial_port *port = tty->driver_data; struct whiteheat_port_settings port_settings; @@ -1285,7 +1285,7 @@ static int firm_setup_port(struct tty_struct *tty) port_settings.lloop = 0; /* now send the message to the device */ - return firm_send_command(port, WHITEHEAT_SETUP_PORT, + firm_send_command(port, WHITEHEAT_SETUP_PORT, (__u8 *)&port_settings, sizeof(port_settings)); } diff --git a/include/linux/usb/serial.h b/include/linux/usb/serial.h index 1cbaf4a6b44..7b85e327af9 100644 --- a/include/linux/usb/serial.h +++ b/include/linux/usb/serial.h @@ -260,6 +260,9 @@ struct usb_serial_driver { be an attached tty at this point */ void (*dtr_rts)(struct usb_serial_port *port, int on); int (*carrier_raised)(struct usb_serial_port *port); + /* Called by the usb serial hooks to allow the user to rework the + termios state */ + void (*init_termios)(struct tty_struct *tty); /* USB events */ void (*read_int_callback)(struct urb *urb); void (*write_int_callback)(struct urb *urb); -- cgit v1.2.3-70-g09d2 From e92166517e3ca9bfb416f91e69cf0373b55b6ede Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Thu, 6 Aug 2009 15:09:28 +0200 Subject: tty: handle VT specific compat ioctls in vt driver The VT specific compat_ioctl handlers are the only ones in common code that require the BKL. Moving them into the vt driver lets us remove the BKL from the other handlers and cleans up the code. Signed-off-by: Arnd Bergmann Signed-off-by: Greg Kroah-Hartman --- drivers/char/vt.c | 3 + drivers/char/vt_ioctl.c | 203 ++++++++++++++++++++++++++++++++++++++++++++++++ include/linux/tty.h | 3 + 3 files changed, 209 insertions(+) (limited to 'include') diff --git a/drivers/char/vt.c b/drivers/char/vt.c index 33214d92ca4..5dfbfa7d760 100644 --- a/drivers/char/vt.c +++ b/drivers/char/vt.c @@ -2910,6 +2910,9 @@ static const struct tty_operations con_ops = { .flush_chars = con_flush_chars, .chars_in_buffer = con_chars_in_buffer, .ioctl = vt_ioctl, +#ifdef CONFIG_COMPAT + .compat_ioctl = vt_compat_ioctl, +#endif .stop = con_stop, .start = con_start, .throttle = con_throttle, diff --git a/drivers/char/vt_ioctl.c b/drivers/char/vt_ioctl.c index 0fceb8f4d6f..30b5d128037 100644 --- a/drivers/char/vt_ioctl.c +++ b/drivers/char/vt_ioctl.c @@ -16,6 +16,7 @@ #include #include #include +#include #include #include #include @@ -1376,6 +1377,208 @@ void vc_SAK(struct work_struct *work) release_console_sem(); } +#ifdef CONFIG_COMPAT + +struct compat_consolefontdesc { + unsigned short charcount; /* characters in font (256 or 512) */ + unsigned short charheight; /* scan lines per character (1-32) */ + compat_caddr_t chardata; /* font data in expanded form */ +}; + +static inline int +compat_fontx_ioctl(int cmd, struct compat_consolefontdesc __user *user_cfd, + int perm, struct console_font_op *op) +{ + struct compat_consolefontdesc cfdarg; + int i; + + if (copy_from_user(&cfdarg, user_cfd, sizeof(struct compat_consolefontdesc))) + return -EFAULT; + + switch (cmd) { + case PIO_FONTX: + if (!perm) + return -EPERM; + op->op = KD_FONT_OP_SET; + op->flags = KD_FONT_FLAG_OLD; + op->width = 8; + op->height = cfdarg.charheight; + op->charcount = cfdarg.charcount; + op->data = compat_ptr(cfdarg.chardata); + return con_font_op(vc_cons[fg_console].d, op); + case GIO_FONTX: + op->op = KD_FONT_OP_GET; + op->flags = KD_FONT_FLAG_OLD; + op->width = 8; + op->height = cfdarg.charheight; + op->charcount = cfdarg.charcount; + op->data = compat_ptr(cfdarg.chardata); + i = con_font_op(vc_cons[fg_console].d, op); + if (i) + return i; + cfdarg.charheight = op->height; + cfdarg.charcount = op->charcount; + if (copy_to_user(user_cfd, &cfdarg, sizeof(struct compat_consolefontdesc))) + return -EFAULT; + return 0; + } + return -EINVAL; +} + +struct compat_console_font_op { + compat_uint_t op; /* operation code KD_FONT_OP_* */ + compat_uint_t flags; /* KD_FONT_FLAG_* */ + compat_uint_t width, height; /* font size */ + compat_uint_t charcount; + compat_caddr_t data; /* font data with height fixed to 32 */ +}; + +static inline int +compat_kdfontop_ioctl(struct compat_console_font_op __user *fontop, + int perm, struct console_font_op *op, struct vc_data *vc) +{ + int i; + + if (copy_from_user(op, fontop, sizeof(struct compat_console_font_op))) + return -EFAULT; + if (!perm && op->op != KD_FONT_OP_GET) + return -EPERM; + op->data = compat_ptr(((struct compat_console_font_op *)op)->data); + op->flags |= KD_FONT_FLAG_OLD; + i = con_font_op(vc, op); + if (i) + return i; + ((struct compat_console_font_op *)op)->data = (unsigned long)op->data; + if (copy_to_user(fontop, op, sizeof(struct compat_console_font_op))) + return -EFAULT; + return 0; +} + +struct compat_unimapdesc { + unsigned short entry_ct; + compat_caddr_t entries; +}; + +static inline int +compat_unimap_ioctl(unsigned int cmd, struct compat_unimapdesc __user *user_ud, + int perm, struct vc_data *vc) +{ + struct compat_unimapdesc tmp; + struct unipair __user *tmp_entries; + + if (copy_from_user(&tmp, user_ud, sizeof tmp)) + return -EFAULT; + tmp_entries = compat_ptr(tmp.entries); + if (tmp_entries) + if (!access_ok(VERIFY_WRITE, tmp_entries, + tmp.entry_ct*sizeof(struct unipair))) + return -EFAULT; + switch (cmd) { + case PIO_UNIMAP: + if (!perm) + return -EPERM; + return con_set_unimap(vc, tmp.entry_ct, tmp_entries); + case GIO_UNIMAP: + if (!perm && fg_console != vc->vc_num) + return -EPERM; + return con_get_unimap(vc, tmp.entry_ct, &(user_ud->entry_ct), tmp_entries); + } + return 0; +} + +long vt_compat_ioctl(struct tty_struct *tty, struct file * file, + unsigned int cmd, unsigned long arg) +{ + struct vc_data *vc = tty->driver_data; + struct console_font_op op; /* used in multiple places here */ + struct kbd_struct *kbd; + unsigned int console; + void __user *up = (void __user *)arg; + int perm; + int ret = 0; + + console = vc->vc_num; + + lock_kernel(); + + if (!vc_cons_allocated(console)) { /* impossible? */ + ret = -ENOIOCTLCMD; + goto out; + } + + /* + * To have permissions to do most of the vt ioctls, we either have + * to be the owner of the tty, or have CAP_SYS_TTY_CONFIG. + */ + perm = 0; + if (current->signal->tty == tty || capable(CAP_SYS_TTY_CONFIG)) + perm = 1; + + kbd = kbd_table + console; + switch (cmd) { + /* + * these need special handlers for incompatible data structures + */ + case PIO_FONTX: + case GIO_FONTX: + ret = compat_fontx_ioctl(cmd, up, perm, &op); + break; + + case KDFONTOP: + ret = compat_kdfontop_ioctl(up, perm, &op, vc); + break; + + case PIO_UNIMAP: + case GIO_UNIMAP: + ret = do_unimap_ioctl(cmd, up, perm, vc); + break; + + /* + * all these treat 'arg' as an integer + */ + case KIOCSOUND: + case KDMKTONE: +#ifdef CONFIG_X86 + case KDADDIO: + case KDDELIO: +#endif + case KDSETMODE: + case KDMAPDISP: + case KDUNMAPDISP: + case KDSKBMODE: + case KDSKBMETA: + case KDSKBLED: + case KDSETLED: + case KDSIGACCEPT: + case VT_ACTIVATE: + case VT_WAITACTIVE: + case VT_RELDISP: + case VT_DISALLOCATE: + case VT_RESIZE: + case VT_RESIZEX: + goto fallback; + + /* + * the rest has a compatible data structure behind arg, + * but we have to convert it to a proper 64 bit pointer. + */ + default: + arg = (unsigned long)compat_ptr(arg); + goto fallback; + } +out: + unlock_kernel(); + return ret; + +fallback: + unlock_kernel(); + return vt_ioctl(tty, file, cmd, arg); +} + + +#endif /* CONFIG_COMPAT */ + + /* * Performs the back end of a vt switch. Called under the console * semaphore. diff --git a/include/linux/tty.h b/include/linux/tty.h index a0e03309a37..f0f43d08d8b 100644 --- a/include/linux/tty.h +++ b/include/linux/tty.h @@ -536,5 +536,8 @@ extern int pcxe_open(struct tty_struct *tty, struct file *filp); extern int vt_ioctl(struct tty_struct *tty, struct file *file, unsigned int cmd, unsigned long arg); +extern long vt_compat_ioctl(struct tty_struct *tty, struct file * file, + unsigned int cmd, unsigned long arg); + #endif /* __KERNEL__ */ #endif -- cgit v1.2.3-70-g09d2 From be2f092bfc4f6a415bb4c3e2dcbf521a1f2a0fe5 Mon Sep 17 00:00:00 2001 From: Atsushi Nemoto Date: Sat, 5 Sep 2009 01:20:43 +0900 Subject: mtd: nand: add __nand_correct_data helper function Split nand_correct_data() into two part, a pure calculation function and a wrapper for mtd interface. The tmio_nand driver can implement its ecc.correct function easily using this __nand_correct_data helper. Signed-off-by: Atsushi Nemoto Acked-by: Dmitry Eremin-Solenikov Acked-by: Vimal Singh Signed-off-by: David Woodhouse --- drivers/mtd/nand/nand_ecc.c | 31 ++++++++++++++++++++++++------- include/linux/mtd/nand_ecc.h | 6 ++++++ 2 files changed, 30 insertions(+), 7 deletions(-) (limited to 'include') diff --git a/drivers/mtd/nand/nand_ecc.c b/drivers/mtd/nand/nand_ecc.c index c0cb87d6d16..db7ae9d6a29 100644 --- a/drivers/mtd/nand/nand_ecc.c +++ b/drivers/mtd/nand/nand_ecc.c @@ -417,22 +417,22 @@ int nand_calculate_ecc(struct mtd_info *mtd, const unsigned char *buf, EXPORT_SYMBOL(nand_calculate_ecc); /** - * nand_correct_data - [NAND Interface] Detect and correct bit error(s) - * @mtd: MTD block structure + * __nand_correct_data - [NAND Interface] Detect and correct bit error(s) * @buf: raw data read from the chip * @read_ecc: ECC from the chip * @calc_ecc: the ECC calculated from raw data + * @eccsize: data bytes per ecc step (256 or 512) * - * Detect and correct a 1 bit error for 256/512 byte block + * Detect and correct a 1 bit error for eccsize byte block */ -int nand_correct_data(struct mtd_info *mtd, unsigned char *buf, - unsigned char *read_ecc, unsigned char *calc_ecc) +int __nand_correct_data(unsigned char *buf, + unsigned char *read_ecc, unsigned char *calc_ecc, + unsigned int eccsize) { unsigned char b0, b1, b2, bit_addr; unsigned int byte_addr; /* 256 or 512 bytes/ecc */ - const uint32_t eccsize_mult = - (((struct nand_chip *)mtd->priv)->ecc.size) >> 8; + const uint32_t eccsize_mult = eccsize >> 8; /* * b0 to b2 indicate which bit is faulty (if any) @@ -495,6 +495,23 @@ int nand_correct_data(struct mtd_info *mtd, unsigned char *buf, printk(KERN_ERR "uncorrectable error : "); return -1; } +EXPORT_SYMBOL(__nand_correct_data); + +/** + * nand_correct_data - [NAND Interface] Detect and correct bit error(s) + * @mtd: MTD block structure + * @buf: raw data read from the chip + * @read_ecc: ECC from the chip + * @calc_ecc: the ECC calculated from raw data + * + * Detect and correct a 1 bit error for 256/512 byte block + */ +int nand_correct_data(struct mtd_info *mtd, unsigned char *buf, + unsigned char *read_ecc, unsigned char *calc_ecc) +{ + return __nand_correct_data(buf, read_ecc, calc_ecc, + ((struct nand_chip *)mtd->priv)->ecc.size); +} EXPORT_SYMBOL(nand_correct_data); MODULE_LICENSE("GPL"); diff --git a/include/linux/mtd/nand_ecc.h b/include/linux/mtd/nand_ecc.h index 090da505425..052ea8ca243 100644 --- a/include/linux/mtd/nand_ecc.h +++ b/include/linux/mtd/nand_ecc.h @@ -20,6 +20,12 @@ struct mtd_info; */ int nand_calculate_ecc(struct mtd_info *mtd, const u_char *dat, u_char *ecc_code); +/* + * Detect and correct a 1 bit error for eccsize byte block + */ +int __nand_correct_data(u_char *dat, u_char *read_ecc, u_char *calc_ecc, + unsigned int eccsize); + /* * Detect and correct a 1 bit error for 256 byte block */ -- cgit v1.2.3-70-g09d2 From b21495a03e20514eacd788a6b5d160667177cd94 Mon Sep 17 00:00:00 2001 From: Jaswinder Singh Rajput Date: Sun, 20 Sep 2009 15:48:42 +0530 Subject: includecheck fix: include/acpi, acpi_bus.h fix the following 'make includecheck' warning: include/acpi/acpi_bus.h: linux/device.h is included more than once. Signed-off-by: Jaswinder Singh Rajput Cc: Sam Ravnborg LKML-Reference: <1247068058.4382.96.camel@ht.satnam> Acked-by: Len Brown --- include/acpi/acpi_bus.h | 1 - 1 file changed, 1 deletion(-) (limited to 'include') diff --git a/include/acpi/acpi_bus.h b/include/acpi/acpi_bus.h index 1fa3ffb7c93..1b3b36068ca 100644 --- a/include/acpi/acpi_bus.h +++ b/include/acpi/acpi_bus.h @@ -356,7 +356,6 @@ void acpi_remove_dir(struct acpi_device *); /* * Bind physical devices with ACPI devices */ -#include struct acpi_bus_type { struct list_head list; struct bus_type *bus; -- cgit v1.2.3-70-g09d2 From 067006a5f5b956fbdd183f9b799e7b8059b53f6c Mon Sep 17 00:00:00 2001 From: Jaswinder Singh Rajput Date: Sun, 20 Sep 2009 15:53:25 +0530 Subject: includecheck fix: include/drm, drm_memory.h fix the following 'make includecheck' warning: include/drm/drm_memory.h: linux/vmalloc.h is included more than once. Signed-off-by: Jaswinder Singh Rajput Cc: Sam Ravnborg LKML-Reference: <1247068169.4382.99.camel@ht.satnam> Acked-by: Dave Airlie --- include/drm/drm_memory.h | 2 -- 1 file changed, 2 deletions(-) (limited to 'include') diff --git a/include/drm/drm_memory.h b/include/drm/drm_memory.h index 63e425b5ea8..15af9b32ae4 100644 --- a/include/drm/drm_memory.h +++ b/include/drm/drm_memory.h @@ -44,8 +44,6 @@ #if __OS_HAS_AGP -#include - #ifdef HAVE_PAGE_AGP #include #else -- cgit v1.2.3-70-g09d2 From 43cc960980760f58d1f49ecd39e8cbfb063d63e1 Mon Sep 17 00:00:00 2001 From: Jaswinder Singh Rajput Date: Sun, 20 Sep 2009 16:09:20 +0530 Subject: includecheck fix: include/linux, aio.h fix the following 'make includecheck' warning: include/linux/aio.h: linux/aio_abi.h is included more than once. Signed-off-by: Jaswinder Singh Rajput Cc: bcrl@kvack.org Cc: Sam Ravnborg LKML-Reference: <1247068254.4382.101.camel@ht.satnam> --- include/linux/aio.h | 2 -- 1 file changed, 2 deletions(-) (limited to 'include') diff --git a/include/linux/aio.h b/include/linux/aio.h index 47f7d932a01..aea219d7d8d 100644 --- a/include/linux/aio.h +++ b/include/linux/aio.h @@ -225,8 +225,6 @@ static inline void exit_aio(struct mm_struct *mm) { } #define io_wait_to_kiocb(wait) container_of(wait, struct kiocb, ki_wait) -#include - static inline struct kiocb *list_kiocb(struct list_head *h) { return list_entry(h, struct kiocb, ki_list); -- cgit v1.2.3-70-g09d2 From 97572751d78133cf9a5f7165b252bf975f9dd17d Mon Sep 17 00:00:00 2001 From: Jaswinder Singh Rajput Date: Sun, 20 Sep 2009 16:20:44 +0530 Subject: includecheck fix: include/linux, page_cgroup.h fix the following 'make includecheck' warning: include/linux/page_cgroup.h: linux/swap.h is included more than once. Signed-off-by: Jaswinder Singh Rajput Cc: Li Zefan Acked-by: Paul Menage --- include/linux/page_cgroup.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'include') diff --git a/include/linux/page_cgroup.h b/include/linux/page_cgroup.h index 13f126c89ae..ada779f2417 100644 --- a/include/linux/page_cgroup.h +++ b/include/linux/page_cgroup.h @@ -105,14 +105,14 @@ static inline void __init page_cgroup_init_flatmem(void) #endif -#ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP #include + +#ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP extern unsigned short swap_cgroup_record(swp_entry_t ent, unsigned short id); extern unsigned short lookup_swap_cgroup(swp_entry_t ent); extern int swap_cgroup_swapon(int type, unsigned long max_pages); extern void swap_cgroup_swapoff(int type); #else -#include static inline unsigned short swap_cgroup_record(swp_entry_t ent, unsigned short id) -- cgit v1.2.3-70-g09d2 From 83ba7c34d2b82dc608647f629616df393ab883f9 Mon Sep 17 00:00:00 2001 From: Jaswinder Singh Rajput Date: Sun, 20 Sep 2009 16:24:00 +0530 Subject: includecheck fix: include/linux, ftrace.h fix the following 'make includecheck' warning: include/linux/ftrace.h: linux/sched.h is included more than once. Signed-off-by: Jaswinder Singh Rajput Cc: Steven Rostedt Cc: Ingo Molnar Cc: Sam Ravnborg LKML-Reference: <1247068321.4382.102.camel@ht.satnam> --- include/linux/ftrace.h | 1 - 1 file changed, 1 deletion(-) (limited to 'include') diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h index dc3b1328aae..3c0924a18da 100644 --- a/include/linux/ftrace.h +++ b/include/linux/ftrace.h @@ -446,7 +446,6 @@ static inline void unpause_graph_tracing(void) { } #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ #ifdef CONFIG_TRACING -#include /* flags for current->trace */ enum { -- cgit v1.2.3-70-g09d2 From 42f29a25207dc7b3051d299cc028d4b395d1328d Mon Sep 17 00:00:00 2001 From: Tim Abbott Date: Sun, 20 Sep 2009 18:14:12 -0400 Subject: kbuild: Don't define ALIGN and ENTRY when preprocessing linker scripts. Adding a reference to to x86's causes the x86 linker script to have syntax errors, because the ALIGN and ENTRY keywords get redefined to the assembly implementations of those. One could fix this by adjusting the include structure, but I think any solution based on that approach would be fragile. Currently, it is impossible when writing a header to do something different for assembly files and linker scripts, even though there are clearly cases where one wants them to define macros differently for the two (ENTRY being an excellent example). So I think the right solution here is to introduce a new preprocessor definition, called LINKER_SCRIPT that is set along with __ASSEMBLY__ for linker scripts, and to use that to not define ALIGN and ENTRY in linker scripts. I suspect we'll find other uses for this mechanism in the future. Signed-off-by: Tim Abbott Signed-off-by: Sam Ravnborg --- include/linux/linkage.h | 2 ++ scripts/Makefile.build | 2 +- 2 files changed, 3 insertions(+), 1 deletion(-) (limited to 'include') diff --git a/include/linux/linkage.h b/include/linux/linkage.h index 691f59171c6..5126cceb6ae 100644 --- a/include/linux/linkage.h +++ b/include/linux/linkage.h @@ -57,6 +57,7 @@ #ifdef __ASSEMBLY__ +#ifndef LINKER_SCRIPT #define ALIGN __ALIGN #define ALIGN_STR __ALIGN_STR @@ -66,6 +67,7 @@ ALIGN; \ name: #endif +#endif /* LINKER_SCRIPT */ #ifndef WEAK #define WEAK(name) \ diff --git a/scripts/Makefile.build b/scripts/Makefile.build index d5425660a3d..341b58902ff 100644 --- a/scripts/Makefile.build +++ b/scripts/Makefile.build @@ -271,7 +271,7 @@ targets += $(extra-y) $(MAKECMDGOALS) $(always) # --------------------------------------------------------------------------- quiet_cmd_cpp_lds_S = LDS $@ cmd_cpp_lds_S = $(CPP) $(cpp_flags) -P -C -U$(ARCH) \ - -D__ASSEMBLY__ -o $@ $< + -D__ASSEMBLY__ -DLINKER_SCRIPT -o $@ $< $(obj)/%.lds: $(src)/%.lds.S FORCE $(call if_changed_dep,cpp_lds_S) -- cgit v1.2.3-70-g09d2 From 28d520433b6375740990ab99d69b0d0067fd656b Mon Sep 17 00:00:00 2001 From: Dave Airlie Date: Mon, 21 Sep 2009 14:33:58 +1000 Subject: drm/vgaarb: add VGA arbitration support to the drm and kms. VGA arb requires DRM support for non-kms drivers, to turn on/off irqs when disabling the mem/io regions. VGA arb requires KMS support for GPUs where we can turn off VGA decoding. Currently we know how to do this for intel and radeon kms drivers, which allows them to be removed from the arbiter. This patch comes from Fedora rawhide kernel. Signed-off-by: Dave Airlie --- drivers/gpu/drm/drm_irq.c | 27 +++++++++++++++++++++++++++ drivers/gpu/drm/i915/i915_dma.c | 20 ++++++++++++++++++++ drivers/gpu/drm/i915/i915_drv.h | 1 + drivers/gpu/drm/i915/i915_reg.h | 1 + drivers/gpu/drm/i915/intel_display.c | 17 +++++++++++++++++ drivers/gpu/drm/i915/intel_drv.h | 1 + drivers/gpu/drm/radeon/r100.c | 14 ++++++++++++++ drivers/gpu/drm/radeon/r600.c | 14 ++++++++++++++ drivers/gpu/drm/radeon/r600d.h | 1 + drivers/gpu/drm/radeon/radeon.h | 2 ++ drivers/gpu/drm/radeon/radeon_asic.h | 12 ++++++++++++ drivers/gpu/drm/radeon/radeon_device.c | 21 ++++++++++++++++++++- include/drm/drmP.h | 3 +++ 13 files changed, 133 insertions(+), 1 deletion(-) (limited to 'include') diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c index f85aaf21e78..0a6f0b3bdc7 100644 --- a/drivers/gpu/drm/drm_irq.c +++ b/drivers/gpu/drm/drm_irq.c @@ -37,6 +37,7 @@ #include /* For task queue support */ +#include /** * Get interrupt from bus id. * @@ -171,6 +172,26 @@ err: } EXPORT_SYMBOL(drm_vblank_init); +static void drm_irq_vgaarb_nokms(void *cookie, bool state) +{ + struct drm_device *dev = cookie; + + if (dev->driver->vgaarb_irq) { + dev->driver->vgaarb_irq(dev, state); + return; + } + + if (!dev->irq_enabled) + return; + + if (state) + dev->driver->irq_uninstall(dev); + else { + dev->driver->irq_preinstall(dev); + dev->driver->irq_postinstall(dev); + } +} + /** * Install IRQ handler. * @@ -231,6 +252,9 @@ int drm_irq_install(struct drm_device *dev) return ret; } + if (!drm_core_check_feature(dev, DRIVER_MODESET)) + vga_client_register(dev->pdev, (void *)dev, drm_irq_vgaarb_nokms, NULL); + /* After installing handler */ ret = dev->driver->irq_postinstall(dev); if (ret < 0) { @@ -279,6 +303,9 @@ int drm_irq_uninstall(struct drm_device * dev) DRM_DEBUG("irq=%d\n", dev->pdev->irq); + if (!drm_core_check_feature(dev, DRIVER_MODESET)) + vga_client_register(dev->pdev, NULL, NULL, NULL); + dev->driver->irq_uninstall(dev); free_irq(dev->pdev->irq, dev); diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c index 9909505d070..5a49a1867b3 100644 --- a/drivers/gpu/drm/i915/i915_dma.c +++ b/drivers/gpu/drm/i915/i915_dma.c @@ -33,6 +33,7 @@ #include "intel_drv.h" #include "i915_drm.h" #include "i915_drv.h" +#include /* Really want an OS-independent resettable timer. Would like to have * this loop run for (eg) 3 sec, but have the timer reset every time @@ -1012,6 +1013,19 @@ static int i915_probe_agp(struct drm_device *dev, uint32_t *aperture_size, return 0; } +/* true = enable decode, false = disable decoder */ +static unsigned int i915_vga_set_decode(void *cookie, bool state) +{ + struct drm_device *dev = cookie; + + intel_modeset_vga_set_state(dev, state); + if (state) + return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM | + VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM; + else + return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM; +} + static int i915_load_modeset_init(struct drm_device *dev, unsigned long prealloc_size, unsigned long agp_size) @@ -1057,6 +1071,11 @@ static int i915_load_modeset_init(struct drm_device *dev, if (ret) DRM_INFO("failed to find VBIOS tables\n"); + /* if we have > 1 VGA cards, then disable the radeon VGA resources */ + ret = vga_client_register(dev->pdev, dev, NULL, i915_vga_set_decode); + if (ret) + goto destroy_ringbuffer; + ret = drm_irq_install(dev); if (ret) goto destroy_ringbuffer; @@ -1324,6 +1343,7 @@ int i915_driver_unload(struct drm_device *dev) if (drm_core_check_feature(dev, DRIVER_MODESET)) { drm_irq_uninstall(dev); + vga_client_register(dev->pdev, NULL, NULL, NULL); } if (dev->pdev->msi_enabled) diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 77ed060b429..a0632f8e76a 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -766,6 +766,7 @@ static inline void opregion_enable_asle(struct drm_device *dev) { return; } /* modesetting */ extern void intel_modeset_init(struct drm_device *dev); extern void intel_modeset_cleanup(struct drm_device *dev); +extern int intel_modeset_vga_set_state(struct drm_device *dev, bool state); /** * Lock test for when it's just for synchronization of ring access. diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index e38cd21161c..3f796355346 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -30,6 +30,7 @@ * fb aperture size and the amount of pre-reserved memory. */ #define INTEL_GMCH_CTRL 0x52 +#define INTEL_GMCH_VGA_DISABLE (1 << 1) #define INTEL_GMCH_ENABLED 0x4 #define INTEL_GMCH_MEM_MASK 0x1 #define INTEL_GMCH_MEM_64M 0x1 diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 155719ff99d..0227b165290 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -3917,3 +3917,20 @@ struct drm_encoder *intel_best_encoder(struct drm_connector *connector) return &intel_output->enc; } + +/* + * set vga decode state - true == enable VGA decode + */ +int intel_modeset_vga_set_state(struct drm_device *dev, bool state) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + u16 gmch_ctrl; + + pci_read_config_word(dev_priv->bridge_dev, INTEL_GMCH_CTRL, &gmch_ctrl); + if (state) + gmch_ctrl &= ~INTEL_GMCH_VGA_DISABLE; + else + gmch_ctrl |= INTEL_GMCH_VGA_DISABLE; + pci_write_config_word(dev_priv->bridge_dev, INTEL_GMCH_CTRL, gmch_ctrl); + return 0; +} diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h index b9e47f1e1cc..3a0004f755d 100644 --- a/drivers/gpu/drm/i915/intel_drv.h +++ b/drivers/gpu/drm/i915/intel_drv.h @@ -178,4 +178,5 @@ extern int intel_framebuffer_create(struct drm_device *dev, struct drm_mode_fb_cmd *mode_cmd, struct drm_framebuffer **fb, struct drm_gem_object *obj); + #endif /* __INTEL_DRV_H__ */ diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c index 737970b43ae..be51c5f7d0f 100644 --- a/drivers/gpu/drm/radeon/r100.c +++ b/drivers/gpu/drm/radeon/r100.c @@ -1955,6 +1955,20 @@ void r100_vram_init_sizes(struct radeon_device *rdev) rdev->mc.real_vram_size = rdev->mc.aper_size; } +void r100_vga_set_state(struct radeon_device *rdev, bool state) +{ + uint32_t temp; + + temp = RREG32(RADEON_CONFIG_CNTL); + if (state == false) { + temp &= ~(1<<8); + temp |= (1<<9); + } else { + temp &= ~(1<<9); + } + WREG32(RADEON_CONFIG_CNTL, temp); +} + void r100_vram_info(struct radeon_device *rdev) { r100_vram_get_type(rdev); diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c index 5f42fad1919..eab31c1d6df 100644 --- a/drivers/gpu/drm/radeon/r600.c +++ b/drivers/gpu/drm/radeon/r600.c @@ -1499,6 +1499,20 @@ int r600_startup(struct radeon_device *rdev) return 0; } +void r600_vga_set_state(struct radeon_device *rdev, bool state) +{ + uint32_t temp; + + temp = RREG32(CONFIG_CNTL); + if (state == false) { + temp &= ~(1<<0); + temp |= (1<<1); + } else { + temp &= ~(1<<1); + } + WREG32(CONFIG_CNTL, temp); +} + int r600_resume(struct radeon_device *rdev) { int r; diff --git a/drivers/gpu/drm/radeon/r600d.h b/drivers/gpu/drm/radeon/r600d.h index 723295f5928..4a9028a85c9 100644 --- a/drivers/gpu/drm/radeon/r600d.h +++ b/drivers/gpu/drm/radeon/r600d.h @@ -78,6 +78,7 @@ #define CB_COLOR0_MASK 0x28100 #define CONFIG_MEMSIZE 0x5428 +#define CONFIG_CNTL 0x5424 #define CP_STAT 0x8680 #define CP_COHER_BASE 0x85F8 #define CP_DEBUG 0xC1FC diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h index 817af8ecff1..c839b608970 100644 --- a/drivers/gpu/drm/radeon/radeon.h +++ b/drivers/gpu/drm/radeon/radeon.h @@ -596,6 +596,7 @@ struct radeon_asic { int (*suspend)(struct radeon_device *rdev); void (*errata)(struct radeon_device *rdev); void (*vram_info)(struct radeon_device *rdev); + void (*vga_set_state)(struct radeon_device *rdev, bool state); int (*gpu_reset)(struct radeon_device *rdev); int (*mc_init)(struct radeon_device *rdev); void (*mc_fini)(struct radeon_device *rdev); @@ -954,6 +955,7 @@ static inline void radeon_ring_write(struct radeon_device *rdev, uint32_t v) #define radeon_cs_parse(p) rdev->asic->cs_parse((p)) #define radeon_errata(rdev) (rdev)->asic->errata((rdev)) #define radeon_vram_info(rdev) (rdev)->asic->vram_info((rdev)) +#define radeon_vga_set_state(rdev, state) (rdev)->asic->vga_set_state((rdev), (state)) #define radeon_gpu_reset(rdev) (rdev)->asic->gpu_reset((rdev)) #define radeon_mc_init(rdev) (rdev)->asic->mc_init((rdev)) #define radeon_mc_fini(rdev) (rdev)->asic->mc_fini((rdev)) diff --git a/drivers/gpu/drm/radeon/radeon_asic.h b/drivers/gpu/drm/radeon/radeon_asic.h index 5f2a9e6f12c..8968f78fa1e 100644 --- a/drivers/gpu/drm/radeon/radeon_asic.h +++ b/drivers/gpu/drm/radeon/radeon_asic.h @@ -47,6 +47,7 @@ uint32_t r100_mm_rreg(struct radeon_device *rdev, uint32_t reg); void r100_mm_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v); void r100_errata(struct radeon_device *rdev); void r100_vram_info(struct radeon_device *rdev); +void r100_vga_set_state(struct radeon_device *rdev, bool state); int r100_gpu_reset(struct radeon_device *rdev); int r100_mc_init(struct radeon_device *rdev); void r100_mc_fini(struct radeon_device *rdev); @@ -89,6 +90,7 @@ static struct radeon_asic r100_asic = { .init = &r100_init, .errata = &r100_errata, .vram_info = &r100_vram_info, + .vga_set_state = &r100_vga_set_state, .gpu_reset = &r100_gpu_reset, .mc_init = &r100_mc_init, .mc_fini = &r100_mc_fini, @@ -158,6 +160,7 @@ static struct radeon_asic r300_asic = { .init = &r300_init, .errata = &r300_errata, .vram_info = &r300_vram_info, + .vga_set_state = &r100_vga_set_state, .gpu_reset = &r300_gpu_reset, .mc_init = &r300_mc_init, .mc_fini = &r300_mc_fini, @@ -208,6 +211,7 @@ static struct radeon_asic r420_asic = { .resume = &r420_resume, .errata = NULL, .vram_info = NULL, + .vga_set_state = &r100_vga_set_state, .gpu_reset = &r300_gpu_reset, .mc_init = NULL, .mc_fini = NULL, @@ -262,6 +266,7 @@ static struct radeon_asic rs400_asic = { .init = &r300_init, .errata = &rs400_errata, .vram_info = &rs400_vram_info, + .vga_set_state = &r100_vga_set_state, .gpu_reset = &r300_gpu_reset, .mc_init = &rs400_mc_init, .mc_fini = &rs400_mc_fini, @@ -323,6 +328,7 @@ static struct radeon_asic rs600_asic = { .init = &rs600_init, .errata = &rs600_errata, .vram_info = &rs600_vram_info, + .vga_set_state = &r100_vga_set_state, .gpu_reset = &r300_gpu_reset, .mc_init = &rs600_mc_init, .mc_fini = &rs600_mc_fini, @@ -372,6 +378,7 @@ static struct radeon_asic rs690_asic = { .init = &rs600_init, .errata = &rs690_errata, .vram_info = &rs690_vram_info, + .vga_set_state = &r100_vga_set_state, .gpu_reset = &r300_gpu_reset, .mc_init = &rs690_mc_init, .mc_fini = &rs690_mc_fini, @@ -428,6 +435,7 @@ static struct radeon_asic rv515_asic = { .init = &rv515_init, .errata = &rv515_errata, .vram_info = &rv515_vram_info, + .vga_set_state = &r100_vga_set_state, .gpu_reset = &rv515_gpu_reset, .mc_init = &rv515_mc_init, .mc_fini = &rv515_mc_fini, @@ -477,6 +485,7 @@ static struct radeon_asic r520_asic = { .init = &rv515_init, .errata = &r520_errata, .vram_info = &r520_vram_info, + .vga_set_state = &r100_vga_set_state, .gpu_reset = &rv515_gpu_reset, .mc_init = &r520_mc_init, .mc_fini = &r520_mc_fini, @@ -520,6 +529,7 @@ int r600_init(struct radeon_device *rdev); void r600_fini(struct radeon_device *rdev); int r600_suspend(struct radeon_device *rdev); int r600_resume(struct radeon_device *rdev); +void r600_vga_set_state(struct radeon_device *rdev, bool state); int r600_wb_init(struct radeon_device *rdev); void r600_wb_fini(struct radeon_device *rdev); void r600_cp_commit(struct radeon_device *rdev); @@ -556,6 +566,7 @@ static struct radeon_asic r600_asic = { .resume = &r600_resume, .cp_commit = &r600_cp_commit, .vram_info = NULL, + .vga_set_state = &r600_vga_set_state, .gpu_reset = &r600_gpu_reset, .mc_init = NULL, .mc_fini = NULL, @@ -606,6 +617,7 @@ static struct radeon_asic rv770_asic = { .cp_commit = &r600_cp_commit, .vram_info = NULL, .gpu_reset = &rv770_gpu_reset, + .vga_set_state = &r600_vga_set_state, .mc_init = NULL, .mc_fini = NULL, .wb_init = &r600_wb_init, diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c index 8a40c616b53..daf5db78095 100644 --- a/drivers/gpu/drm/radeon/radeon_device.c +++ b/drivers/gpu/drm/radeon/radeon_device.c @@ -29,6 +29,7 @@ #include #include #include +#include #include "radeon_reg.h" #include "radeon.h" #include "radeon_asic.h" @@ -480,7 +481,18 @@ void radeon_combios_fini(struct radeon_device *rdev) { } +/* if we get transitioned to only one device, tak VGA back */ +static unsigned int radeon_vga_set_decode(void *cookie, bool state) +{ + struct radeon_device *rdev = cookie; + radeon_vga_set_state(rdev, state); + if (state) + return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM | + VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM; + else + return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM; +} /* * Radeon device. */ @@ -578,6 +590,13 @@ int radeon_device_init(struct radeon_device *rdev, if (r) { return r; } + + /* if we have > 1 VGA cards, then disable the radeon VGA resources */ + r = vga_client_register(rdev->pdev, rdev, NULL, radeon_vga_set_decode); + if (r) { + return -EINVAL; + } + if (!rdev->new_init_path) { /* Setup errata flags */ radeon_errata(rdev); @@ -586,7 +605,6 @@ int radeon_device_init(struct radeon_device *rdev, /* Initialize surface registers */ radeon_surface_init(rdev); - /* TODO: disable VGA need to use VGA request */ /* BIOS*/ if (!radeon_get_bios(rdev)) { if (ASIC_IS_AVIVO(rdev)) @@ -697,6 +715,7 @@ void radeon_device_fini(struct radeon_device *rdev) radeon_agp_fini(rdev); #endif radeon_irq_kms_fini(rdev); + vga_client_register(rdev->pdev, NULL, NULL, NULL); radeon_fence_driver_fini(rdev); radeon_clocks_fini(rdev); radeon_object_fini(rdev); diff --git a/include/drm/drmP.h b/include/drm/drmP.h index eeefb6369e1..c8e64bbadbc 100644 --- a/include/drm/drmP.h +++ b/include/drm/drmP.h @@ -810,6 +810,9 @@ struct drm_driver { int (*gem_init_object) (struct drm_gem_object *obj); void (*gem_free_object) (struct drm_gem_object *obj); + /* vga arb irq handler */ + void (*vgaarb_irq)(struct drm_device *dev, bool state); + /* Driver private ops for this object */ struct vm_operations_struct *gem_vm_ops; -- cgit v1.2.3-70-g09d2 From cd74c86bdf705f824d494a2bbda393d1d562b40a Mon Sep 17 00:00:00 2001 From: Paul Mackerras Date: Mon, 21 Sep 2009 16:44:32 +1000 Subject: perf_counter, powerpc, sparc: Fix compilation after perf_counter_overflow() change Commit 5622f295 ("x86, perf_counter, bts: Optimize BTS overflow handling") removed the regs field from struct perf_sample_data and added a regs parameter to perf_counter_overflow(). This breaks the build on powerpc (and Sparc) as reported by Sachin Sant: arch/powerpc/kernel/perf_counter.c: In function 'record_and_restart': arch/powerpc/kernel/perf_counter.c:1165: error: unknown field 'regs' specified in initializer This adjusts arch/powerpc/kernel/perf_counter.c to correspond with the new struct perf_sample_data and perf_counter_overflow(). [ v2: also fix Sparc, Markus Metzger ] Reported-by: Sachin Sant Signed-off-by: Paul Mackerras Cc: Markus Metzger Cc: David S. Miller Cc: benh@kernel.crashing.org Cc: linuxppc-dev@ozlabs.org Cc: Peter Zijlstra LKML-Reference: <19127.8400.376239.586120@drongo.ozlabs.ibm.com> Signed-off-by: Ingo Molnar --- arch/powerpc/kernel/perf_counter.c | 3 +-- arch/sparc/kernel/perf_counter.c | 3 +-- include/linux/perf_counter.h | 17 ----------------- 3 files changed, 2 insertions(+), 21 deletions(-) (limited to 'include') diff --git a/arch/powerpc/kernel/perf_counter.c b/arch/powerpc/kernel/perf_counter.c index 7ceefaf3a7f..5ccf9bca96c 100644 --- a/arch/powerpc/kernel/perf_counter.c +++ b/arch/powerpc/kernel/perf_counter.c @@ -1162,7 +1162,6 @@ static void record_and_restart(struct perf_counter *counter, unsigned long val, */ if (record) { struct perf_sample_data data = { - .regs = regs, .addr = 0, .period = counter->hw.last_period, }; @@ -1170,7 +1169,7 @@ static void record_and_restart(struct perf_counter *counter, unsigned long val, if (counter->attr.sample_type & PERF_SAMPLE_ADDR) perf_get_data_addr(regs, &data.addr); - if (perf_counter_overflow(counter, nmi, &data)) { + if (perf_counter_overflow(counter, nmi, &data, regs)) { /* * Interrupts are coming too fast - throttle them * by setting the counter to 0, so it will be diff --git a/arch/sparc/kernel/perf_counter.c b/arch/sparc/kernel/perf_counter.c index 09de4035eaa..b1265ce8a05 100644 --- a/arch/sparc/kernel/perf_counter.c +++ b/arch/sparc/kernel/perf_counter.c @@ -493,7 +493,6 @@ static int __kprobes perf_counter_nmi_handler(struct notifier_block *self, regs = args->regs; - data.regs = regs; data.addr = 0; cpuc = &__get_cpu_var(cpu_hw_counters); @@ -513,7 +512,7 @@ static int __kprobes perf_counter_nmi_handler(struct notifier_block *self, if (!sparc_perf_counter_set_period(counter, hwc, idx)) continue; - if (perf_counter_overflow(counter, 1, &data)) + if (perf_counter_overflow(counter, 1, &data, regs)) sparc_pmu_disable_counter(hwc, idx); } diff --git a/include/linux/perf_counter.h b/include/linux/perf_counter.h index bd341007c4f..740caad09a4 100644 --- a/include/linux/perf_counter.h +++ b/include/linux/perf_counter.h @@ -849,23 +849,6 @@ static inline void perf_counter_comm(struct task_struct *tsk) { } static inline void perf_counter_fork(struct task_struct *tsk) { } static inline void perf_counter_init(void) { } -static inline int -perf_output_begin(struct perf_output_handle *handle, struct perf_counter *c, - unsigned int size, int nmi, int sample) { } -static inline void perf_output_end(struct perf_output_handle *handle) { } -static inline void -perf_output_copy(struct perf_output_handle *handle, - const void *buf, unsigned int len) { } -static inline void -perf_output_sample(struct perf_output_handle *handle, - struct perf_event_header *header, - struct perf_sample_data *data, - struct perf_counter *counter) { } -static inline void -perf_prepare_sample(struct perf_event_header *header, - struct perf_sample_data *data, - struct perf_counter *counter, - struct pt_regs *regs) { } #endif #define perf_output_put(handle, x) \ -- cgit v1.2.3-70-g09d2 From 65abc8653c282ded3dbdb9ec1227784140ba28cd Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Mon, 21 Sep 2009 10:18:27 +0200 Subject: perf_counter: Rename list_entry -> group_entry, counter_list -> group_list This is in preparation of the big rename, but also makes sense in a standalone way: 'list_entry' is a bad name as we already have a list_entry() in list.h. Also, the 'counter list' is too vague, it doesnt tell us the purpose of that list. Clarify these names to show that it's all about the group hiearchy. Acked-by: Peter Zijlstra Cc: Paul Mackerras Cc: Frederic Weisbecker LKML-Reference: Signed-off-by: Ingo Molnar --- include/linux/perf_counter.h | 4 +-- kernel/perf_counter.c | 71 ++++++++++++++++++++++---------------------- 2 files changed, 37 insertions(+), 38 deletions(-) (limited to 'include') diff --git a/include/linux/perf_counter.h b/include/linux/perf_counter.h index 740caad09a4..f6486273267 100644 --- a/include/linux/perf_counter.h +++ b/include/linux/perf_counter.h @@ -543,7 +543,7 @@ struct perf_pending_entry { */ struct perf_counter { #ifdef CONFIG_PERF_COUNTERS - struct list_head list_entry; + struct list_head group_entry; struct list_head event_entry; struct list_head sibling_list; int nr_siblings; @@ -649,7 +649,7 @@ struct perf_counter_context { */ struct mutex mutex; - struct list_head counter_list; + struct list_head group_list; struct list_head event_list; int nr_counters; int nr_active; diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c index cc768ab81ac..13ad73aed4c 100644 --- a/kernel/perf_counter.c +++ b/kernel/perf_counter.c @@ -258,9 +258,9 @@ list_add_counter(struct perf_counter *counter, struct perf_counter_context *ctx) * leader's sibling list: */ if (group_leader == counter) - list_add_tail(&counter->list_entry, &ctx->counter_list); + list_add_tail(&counter->group_entry, &ctx->group_list); else { - list_add_tail(&counter->list_entry, &group_leader->sibling_list); + list_add_tail(&counter->group_entry, &group_leader->sibling_list); group_leader->nr_siblings++; } @@ -279,13 +279,13 @@ list_del_counter(struct perf_counter *counter, struct perf_counter_context *ctx) { struct perf_counter *sibling, *tmp; - if (list_empty(&counter->list_entry)) + if (list_empty(&counter->group_entry)) return; ctx->nr_counters--; if (counter->attr.inherit_stat) ctx->nr_stat--; - list_del_init(&counter->list_entry); + list_del_init(&counter->group_entry); list_del_rcu(&counter->event_entry); if (counter->group_leader != counter) @@ -296,10 +296,9 @@ list_del_counter(struct perf_counter *counter, struct perf_counter_context *ctx) * upgrade the siblings to singleton counters by adding them * to the context list directly: */ - list_for_each_entry_safe(sibling, tmp, - &counter->sibling_list, list_entry) { + list_for_each_entry_safe(sibling, tmp, &counter->sibling_list, group_entry) { - list_move_tail(&sibling->list_entry, &ctx->counter_list); + list_move_tail(&sibling->group_entry, &ctx->group_list); sibling->group_leader = sibling; } } @@ -343,7 +342,7 @@ group_sched_out(struct perf_counter *group_counter, /* * Schedule out siblings (if any): */ - list_for_each_entry(counter, &group_counter->sibling_list, list_entry) + list_for_each_entry(counter, &group_counter->sibling_list, group_entry) counter_sched_out(counter, cpuctx, ctx); if (group_counter->attr.exclusive) @@ -435,7 +434,7 @@ retry: /* * If the context is active we need to retry the smp call. */ - if (ctx->nr_active && !list_empty(&counter->list_entry)) { + if (ctx->nr_active && !list_empty(&counter->group_entry)) { spin_unlock_irq(&ctx->lock); goto retry; } @@ -445,7 +444,7 @@ retry: * can remove the counter safely, if the call above did not * succeed. */ - if (!list_empty(&counter->list_entry)) { + if (!list_empty(&counter->group_entry)) { list_del_counter(counter, ctx); } spin_unlock_irq(&ctx->lock); @@ -497,7 +496,7 @@ static void update_group_times(struct perf_counter *leader) struct perf_counter *counter; update_counter_times(leader); - list_for_each_entry(counter, &leader->sibling_list, list_entry) + list_for_each_entry(counter, &leader->sibling_list, group_entry) update_counter_times(counter); } @@ -643,7 +642,7 @@ group_sched_in(struct perf_counter *group_counter, /* * Schedule in siblings as one group (if any): */ - list_for_each_entry(counter, &group_counter->sibling_list, list_entry) { + list_for_each_entry(counter, &group_counter->sibling_list, group_entry) { if (counter_sched_in(counter, cpuctx, ctx, cpu)) { partial_group = counter; goto group_error; @@ -657,7 +656,7 @@ group_error: * Groups can be scheduled in as one unit only, so undo any * partial group before returning: */ - list_for_each_entry(counter, &group_counter->sibling_list, list_entry) { + list_for_each_entry(counter, &group_counter->sibling_list, group_entry) { if (counter == partial_group) break; counter_sched_out(counter, cpuctx, ctx); @@ -678,7 +677,7 @@ static int is_software_only_group(struct perf_counter *leader) if (!is_software_counter(leader)) return 0; - list_for_each_entry(counter, &leader->sibling_list, list_entry) + list_for_each_entry(counter, &leader->sibling_list, group_entry) if (!is_software_counter(counter)) return 0; @@ -842,7 +841,7 @@ retry: /* * we need to retry the smp call. */ - if (ctx->is_active && list_empty(&counter->list_entry)) { + if (ctx->is_active && list_empty(&counter->group_entry)) { spin_unlock_irq(&ctx->lock); goto retry; } @@ -852,7 +851,7 @@ retry: * can add the counter safely, if it the call above did not * succeed. */ - if (list_empty(&counter->list_entry)) + if (list_empty(&counter->group_entry)) add_counter_to_ctx(counter, ctx); spin_unlock_irq(&ctx->lock); } @@ -872,7 +871,7 @@ static void __perf_counter_mark_enabled(struct perf_counter *counter, counter->state = PERF_COUNTER_STATE_INACTIVE; counter->tstamp_enabled = ctx->time - counter->total_time_enabled; - list_for_each_entry(sub, &counter->sibling_list, list_entry) + list_for_each_entry(sub, &counter->sibling_list, group_entry) if (sub->state >= PERF_COUNTER_STATE_INACTIVE) sub->tstamp_enabled = ctx->time - sub->total_time_enabled; @@ -1032,7 +1031,7 @@ void __perf_counter_sched_out(struct perf_counter_context *ctx, perf_disable(); if (ctx->nr_active) { - list_for_each_entry(counter, &ctx->counter_list, list_entry) { + list_for_each_entry(counter, &ctx->group_list, group_entry) { if (counter != counter->group_leader) counter_sched_out(counter, cpuctx, ctx); else @@ -1252,7 +1251,7 @@ __perf_counter_sched_in(struct perf_counter_context *ctx, * First go through the list and put on any pinned groups * in order to give them the best chance of going on. */ - list_for_each_entry(counter, &ctx->counter_list, list_entry) { + list_for_each_entry(counter, &ctx->group_list, group_entry) { if (counter->state <= PERF_COUNTER_STATE_OFF || !counter->attr.pinned) continue; @@ -1276,7 +1275,7 @@ __perf_counter_sched_in(struct perf_counter_context *ctx, } } - list_for_each_entry(counter, &ctx->counter_list, list_entry) { + list_for_each_entry(counter, &ctx->group_list, group_entry) { /* * Ignore counters in OFF or ERROR state, and * ignore pinned counters since we did them already. @@ -1369,7 +1368,7 @@ static void perf_ctx_adjust_freq(struct perf_counter_context *ctx) u64 interrupts, freq; spin_lock(&ctx->lock); - list_for_each_entry(counter, &ctx->counter_list, list_entry) { + list_for_each_entry(counter, &ctx->group_list, group_entry) { if (counter->state != PERF_COUNTER_STATE_ACTIVE) continue; @@ -1441,8 +1440,8 @@ static void rotate_ctx(struct perf_counter_context *ctx) * Rotate the first entry last (works just fine for group counters too): */ perf_disable(); - list_for_each_entry(counter, &ctx->counter_list, list_entry) { - list_move_tail(&counter->list_entry, &ctx->counter_list); + list_for_each_entry(counter, &ctx->group_list, group_entry) { + list_move_tail(&counter->group_entry, &ctx->group_list); break; } perf_enable(); @@ -1498,7 +1497,7 @@ static void perf_counter_enable_on_exec(struct task_struct *task) spin_lock(&ctx->lock); - list_for_each_entry(counter, &ctx->counter_list, list_entry) { + list_for_each_entry(counter, &ctx->group_list, group_entry) { if (!counter->attr.enable_on_exec) continue; counter->attr.enable_on_exec = 0; @@ -1575,7 +1574,7 @@ __perf_counter_init_context(struct perf_counter_context *ctx, memset(ctx, 0, sizeof(*ctx)); spin_lock_init(&ctx->lock); mutex_init(&ctx->mutex); - INIT_LIST_HEAD(&ctx->counter_list); + INIT_LIST_HEAD(&ctx->group_list); INIT_LIST_HEAD(&ctx->event_list); atomic_set(&ctx->refcount, 1); ctx->task = task; @@ -1818,7 +1817,7 @@ static int perf_counter_read_group(struct perf_counter *counter, size += err; - list_for_each_entry(sub, &leader->sibling_list, list_entry) { + list_for_each_entry(sub, &leader->sibling_list, group_entry) { err = perf_counter_read_entry(sub, read_format, buf + size); if (err < 0) @@ -1948,7 +1947,7 @@ static void perf_counter_for_each(struct perf_counter *counter, perf_counter_for_each_child(counter, func); func(counter); - list_for_each_entry(sibling, &counter->sibling_list, list_entry) + list_for_each_entry(sibling, &counter->sibling_list, group_entry) perf_counter_for_each_child(counter, func); mutex_unlock(&ctx->mutex); } @@ -2832,7 +2831,7 @@ static void perf_output_read_group(struct perf_output_handle *handle, perf_output_copy(handle, values, n * sizeof(u64)); - list_for_each_entry(sub, &leader->sibling_list, list_entry) { + list_for_each_entry(sub, &leader->sibling_list, group_entry) { n = 0; if (sub != counter) @@ -4118,7 +4117,7 @@ perf_counter_alloc(struct perf_counter_attr *attr, mutex_init(&counter->child_mutex); INIT_LIST_HEAD(&counter->child_list); - INIT_LIST_HEAD(&counter->list_entry); + INIT_LIST_HEAD(&counter->group_entry); INIT_LIST_HEAD(&counter->event_entry); INIT_LIST_HEAD(&counter->sibling_list); init_waitqueue_head(&counter->waitq); @@ -4544,7 +4543,7 @@ static int inherit_group(struct perf_counter *parent_counter, child, NULL, child_ctx); if (IS_ERR(leader)) return PTR_ERR(leader); - list_for_each_entry(sub, &parent_counter->sibling_list, list_entry) { + list_for_each_entry(sub, &parent_counter->sibling_list, group_entry) { child_ctr = inherit_counter(sub, parent, parent_ctx, child, leader, child_ctx); if (IS_ERR(child_ctr)) @@ -4670,8 +4669,8 @@ void perf_counter_exit_task(struct task_struct *child) mutex_lock_nested(&child_ctx->mutex, SINGLE_DEPTH_NESTING); again: - list_for_each_entry_safe(child_counter, tmp, &child_ctx->counter_list, - list_entry) + list_for_each_entry_safe(child_counter, tmp, &child_ctx->group_list, + group_entry) __perf_counter_exit_task(child_counter, child_ctx, child); /* @@ -4679,7 +4678,7 @@ again: * its siblings to the list, but we obtained 'tmp' before that which * will still point to the list head terminating the iteration. */ - if (!list_empty(&child_ctx->counter_list)) + if (!list_empty(&child_ctx->group_list)) goto again; mutex_unlock(&child_ctx->mutex); @@ -4701,7 +4700,7 @@ void perf_counter_free_task(struct task_struct *task) mutex_lock(&ctx->mutex); again: - list_for_each_entry_safe(counter, tmp, &ctx->counter_list, list_entry) { + list_for_each_entry_safe(counter, tmp, &ctx->group_list, group_entry) { struct perf_counter *parent = counter->parent; if (WARN_ON_ONCE(!parent)) @@ -4717,7 +4716,7 @@ again: free_counter(counter); } - if (!list_empty(&ctx->counter_list)) + if (!list_empty(&ctx->group_list)) goto again; mutex_unlock(&ctx->mutex); @@ -4847,7 +4846,7 @@ static void __perf_counter_exit_cpu(void *info) struct perf_counter_context *ctx = &cpuctx->ctx; struct perf_counter *counter, *tmp; - list_for_each_entry_safe(counter, tmp, &ctx->counter_list, list_entry) + list_for_each_entry_safe(counter, tmp, &ctx->group_list, group_entry) __perf_counter_remove_from_context(counter); } static void perf_counter_exit_cpu(int cpu) -- cgit v1.2.3-70-g09d2 From cdd6c482c9ff9c55475ee7392ec8f672eddb7be6 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Mon, 21 Sep 2009 12:02:48 +0200 Subject: perf: Do the big rename: Performance Counters -> Performance Events Bye-bye Performance Counters, welcome Performance Events! In the past few months the perfcounters subsystem has grown out its initial role of counting hardware events, and has become (and is becoming) a much broader generic event enumeration, reporting, logging, monitoring, analysis facility. Naming its core object 'perf_counter' and naming the subsystem 'perfcounters' has become more and more of a misnomer. With pending code like hw-breakpoints support the 'counter' name is less and less appropriate. All in one, we've decided to rename the subsystem to 'performance events' and to propagate this rename through all fields, variables and API names. (in an ABI compatible fashion) The word 'event' is also a bit shorter than 'counter' - which makes it slightly more convenient to write/handle as well. Thanks goes to Stephane Eranian who first observed this misnomer and suggested a rename. User-space tooling and ABI compatibility is not affected - this patch should be function-invariant. (Also, defconfigs were not touched to keep the size down.) This patch has been generated via the following script: FILES=$(find * -type f | grep -vE 'oprofile|[^K]config') sed -i \ -e 's/PERF_EVENT_/PERF_RECORD_/g' \ -e 's/PERF_COUNTER/PERF_EVENT/g' \ -e 's/perf_counter/perf_event/g' \ -e 's/nb_counters/nb_events/g' \ -e 's/swcounter/swevent/g' \ -e 's/tpcounter_event/tp_event/g' \ $FILES for N in $(find . -name perf_counter.[ch]); do M=$(echo $N | sed 's/perf_counter/perf_event/g') mv $N $M done FILES=$(find . -name perf_event.*) sed -i \ -e 's/COUNTER_MASK/REG_MASK/g' \ -e 's/COUNTER/EVENT/g' \ -e 's/\/event_id/g' \ -e 's/counter/event/g' \ -e 's/Counter/Event/g' \ $FILES ... to keep it as correct as possible. This script can also be used by anyone who has pending perfcounters patches - it converts a Linux kernel tree over to the new naming. We tried to time this change to the point in time where the amount of pending patches is the smallest: the end of the merge window. Namespace clashes were fixed up in a preparatory patch - and some stylistic fallout will be fixed up in a subsequent patch. ( NOTE: 'counters' are still the proper terminology when we deal with hardware registers - and these sed scripts are a bit over-eager in renaming them. I've undone some of that, but in case there's something left where 'counter' would be better than 'event' we can undo that on an individual basis instead of touching an otherwise nicely automated patch. ) Suggested-by: Stephane Eranian Acked-by: Peter Zijlstra Acked-by: Paul Mackerras Reviewed-by: Arjan van de Ven Cc: Mike Galbraith Cc: Arnaldo Carvalho de Melo Cc: Frederic Weisbecker Cc: Steven Rostedt Cc: Benjamin Herrenschmidt Cc: David Howells Cc: Kyle McMartin Cc: Martin Schwidefsky Cc: "David S. Miller" Cc: Thomas Gleixner Cc: "H. Peter Anvin" Cc: LKML-Reference: Signed-off-by: Ingo Molnar --- arch/arm/include/asm/unistd.h | 2 +- arch/arm/kernel/calls.S | 2 +- arch/blackfin/include/asm/unistd.h | 2 +- arch/blackfin/mach-common/entry.S | 2 +- arch/frv/Kconfig | 2 +- arch/frv/include/asm/perf_counter.h | 17 - arch/frv/include/asm/perf_event.h | 17 + arch/frv/include/asm/unistd.h | 2 +- arch/frv/kernel/entry.S | 2 +- arch/frv/lib/Makefile | 2 +- arch/frv/lib/perf_counter.c | 19 - arch/frv/lib/perf_event.c | 19 + arch/m68k/include/asm/unistd.h | 2 +- arch/m68k/kernel/entry.S | 2 +- arch/m68knommu/kernel/syscalltable.S | 2 +- arch/microblaze/include/asm/unistd.h | 2 +- arch/microblaze/kernel/syscall_table.S | 2 +- arch/mips/include/asm/unistd.h | 6 +- arch/mips/kernel/scall32-o32.S | 2 +- arch/mips/kernel/scall64-64.S | 2 +- arch/mips/kernel/scall64-n32.S | 2 +- arch/mips/kernel/scall64-o32.S | 2 +- arch/mn10300/include/asm/unistd.h | 2 +- arch/mn10300/kernel/entry.S | 2 +- arch/parisc/Kconfig | 2 +- arch/parisc/include/asm/perf_counter.h | 7 - arch/parisc/include/asm/perf_event.h | 7 + arch/parisc/include/asm/unistd.h | 4 +- arch/parisc/kernel/syscall_table.S | 2 +- arch/powerpc/Kconfig | 2 +- arch/powerpc/include/asm/hw_irq.h | 22 +- arch/powerpc/include/asm/paca.h | 2 +- arch/powerpc/include/asm/perf_counter.h | 110 - arch/powerpc/include/asm/perf_event.h | 110 + arch/powerpc/include/asm/systbl.h | 2 +- arch/powerpc/include/asm/unistd.h | 2 +- arch/powerpc/kernel/Makefile | 2 +- arch/powerpc/kernel/asm-offsets.c | 2 +- arch/powerpc/kernel/entry_64.S | 8 +- arch/powerpc/kernel/irq.c | 8 +- arch/powerpc/kernel/mpc7450-pmu.c | 2 +- arch/powerpc/kernel/perf_callchain.c | 2 +- arch/powerpc/kernel/perf_counter.c | 1315 -------- arch/powerpc/kernel/perf_event.c | 1315 ++++++++ arch/powerpc/kernel/power4-pmu.c | 2 +- arch/powerpc/kernel/power5+-pmu.c | 2 +- arch/powerpc/kernel/power5-pmu.c | 2 +- arch/powerpc/kernel/power6-pmu.c | 2 +- arch/powerpc/kernel/power7-pmu.c | 2 +- arch/powerpc/kernel/ppc970-pmu.c | 2 +- arch/powerpc/kernel/time.c | 30 +- arch/powerpc/mm/fault.c | 8 +- arch/powerpc/platforms/Kconfig.cputype | 4 +- arch/s390/Kconfig | 2 +- arch/s390/include/asm/perf_counter.h | 10 - arch/s390/include/asm/perf_event.h | 10 + arch/s390/include/asm/unistd.h | 2 +- arch/s390/kernel/compat_wrapper.S | 8 +- arch/s390/kernel/syscalls.S | 2 +- arch/s390/mm/fault.c | 8 +- arch/sh/Kconfig | 2 +- arch/sh/include/asm/perf_counter.h | 9 - arch/sh/include/asm/perf_event.h | 9 + arch/sh/include/asm/unistd_32.h | 2 +- arch/sh/include/asm/unistd_64.h | 2 +- arch/sh/kernel/syscalls_32.S | 2 +- arch/sh/kernel/syscalls_64.S | 2 +- arch/sh/mm/fault_32.c | 8 +- arch/sh/mm/tlbflush_64.c | 8 +- arch/sparc/Kconfig | 4 +- arch/sparc/include/asm/perf_counter.h | 14 - arch/sparc/include/asm/perf_event.h | 14 + arch/sparc/include/asm/unistd.h | 2 +- arch/sparc/kernel/Makefile | 2 +- arch/sparc/kernel/nmi.c | 4 +- arch/sparc/kernel/pcr.c | 10 +- arch/sparc/kernel/perf_counter.c | 556 ---- arch/sparc/kernel/perf_event.c | 556 ++++ arch/sparc/kernel/systbls_32.S | 2 +- arch/sparc/kernel/systbls_64.S | 4 +- arch/x86/Kconfig | 2 +- arch/x86/ia32/ia32entry.S | 2 +- arch/x86/include/asm/entry_arch.h | 2 +- arch/x86/include/asm/perf_counter.h | 108 - arch/x86/include/asm/perf_event.h | 108 + arch/x86/include/asm/unistd_32.h | 2 +- arch/x86/include/asm/unistd_64.h | 4 +- arch/x86/kernel/apic/apic.c | 6 +- arch/x86/kernel/cpu/Makefile | 2 +- arch/x86/kernel/cpu/common.c | 4 +- arch/x86/kernel/cpu/perf_counter.c | 2298 -------------- arch/x86/kernel/cpu/perf_event.c | 2298 ++++++++++++++ arch/x86/kernel/cpu/perfctr-watchdog.c | 2 +- arch/x86/kernel/entry_64.S | 2 +- arch/x86/kernel/irqinit.c | 2 +- arch/x86/kernel/syscall_table_32.S | 2 +- arch/x86/mm/fault.c | 8 +- arch/x86/oprofile/op_model_ppro.c | 4 +- arch/x86/oprofile/op_x86_model.h | 2 +- drivers/char/sysrq.c | 4 +- fs/exec.c | 6 +- include/asm-generic/unistd.h | 4 +- include/linux/init_task.h | 14 +- include/linux/perf_counter.h | 858 ------ include/linux/perf_event.h | 858 ++++++ include/linux/prctl.h | 4 +- include/linux/sched.h | 12 +- include/linux/syscalls.h | 6 +- include/trace/ftrace.h | 10 +- init/Kconfig | 8 +- kernel/Makefile | 2 +- kernel/exit.c | 8 +- kernel/fork.c | 8 +- kernel/perf_counter.c | 5000 ------------------------------- kernel/perf_event.c | 5000 +++++++++++++++++++++++++++++++ kernel/sched.c | 14 +- kernel/sys.c | 10 +- kernel/sys_ni.c | 2 +- kernel/sysctl.c | 22 +- kernel/timer.c | 4 +- kernel/trace/trace_syscalls.c | 6 +- mm/mmap.c | 6 +- mm/mprotect.c | 4 +- tools/perf/Makefile | 2 +- tools/perf/builtin-annotate.c | 28 +- tools/perf/builtin-record.c | 22 +- tools/perf/builtin-report.c | 48 +- tools/perf/builtin-sched.c | 20 +- tools/perf/builtin-stat.c | 10 +- tools/perf/builtin-timechart.c | 14 +- tools/perf/builtin-top.c | 12 +- tools/perf/builtin-trace.c | 22 +- tools/perf/design.txt | 58 +- tools/perf/perf.h | 12 +- tools/perf/util/event.h | 4 +- tools/perf/util/header.c | 6 +- tools/perf/util/header.h | 8 +- tools/perf/util/parse-events.c | 32 +- tools/perf/util/parse-events.h | 2 +- tools/perf/util/trace-event-info.c | 8 +- tools/perf/util/trace-event.h | 2 +- 141 files changed, 10694 insertions(+), 10694 deletions(-) delete mode 100644 arch/frv/include/asm/perf_counter.h create mode 100644 arch/frv/include/asm/perf_event.h delete mode 100644 arch/frv/lib/perf_counter.c create mode 100644 arch/frv/lib/perf_event.c delete mode 100644 arch/parisc/include/asm/perf_counter.h create mode 100644 arch/parisc/include/asm/perf_event.h delete mode 100644 arch/powerpc/include/asm/perf_counter.h create mode 100644 arch/powerpc/include/asm/perf_event.h delete mode 100644 arch/powerpc/kernel/perf_counter.c create mode 100644 arch/powerpc/kernel/perf_event.c delete mode 100644 arch/s390/include/asm/perf_counter.h create mode 100644 arch/s390/include/asm/perf_event.h delete mode 100644 arch/sh/include/asm/perf_counter.h create mode 100644 arch/sh/include/asm/perf_event.h delete mode 100644 arch/sparc/include/asm/perf_counter.h create mode 100644 arch/sparc/include/asm/perf_event.h delete mode 100644 arch/sparc/kernel/perf_counter.c create mode 100644 arch/sparc/kernel/perf_event.c delete mode 100644 arch/x86/include/asm/perf_counter.h create mode 100644 arch/x86/include/asm/perf_event.h delete mode 100644 arch/x86/kernel/cpu/perf_counter.c create mode 100644 arch/x86/kernel/cpu/perf_event.c delete mode 100644 include/linux/perf_counter.h create mode 100644 include/linux/perf_event.h delete mode 100644 kernel/perf_counter.c create mode 100644 kernel/perf_event.c (limited to 'include') diff --git a/arch/arm/include/asm/unistd.h b/arch/arm/include/asm/unistd.h index 9122c9ee18f..89f7eade20a 100644 --- a/arch/arm/include/asm/unistd.h +++ b/arch/arm/include/asm/unistd.h @@ -390,7 +390,7 @@ #define __NR_preadv (__NR_SYSCALL_BASE+361) #define __NR_pwritev (__NR_SYSCALL_BASE+362) #define __NR_rt_tgsigqueueinfo (__NR_SYSCALL_BASE+363) -#define __NR_perf_counter_open (__NR_SYSCALL_BASE+364) +#define __NR_perf_event_open (__NR_SYSCALL_BASE+364) /* * The following SWIs are ARM private. diff --git a/arch/arm/kernel/calls.S b/arch/arm/kernel/calls.S index ecfa98954d1..fafce1b5c69 100644 --- a/arch/arm/kernel/calls.S +++ b/arch/arm/kernel/calls.S @@ -373,7 +373,7 @@ CALL(sys_preadv) CALL(sys_pwritev) CALL(sys_rt_tgsigqueueinfo) - CALL(sys_perf_counter_open) + CALL(sys_perf_event_open) #ifndef syscalls_counted .equ syscalls_padding, ((NR_syscalls + 3) & ~3) - NR_syscalls #define syscalls_counted diff --git a/arch/blackfin/include/asm/unistd.h b/arch/blackfin/include/asm/unistd.h index c8e7ee4768c..02b1529dad5 100644 --- a/arch/blackfin/include/asm/unistd.h +++ b/arch/blackfin/include/asm/unistd.h @@ -381,7 +381,7 @@ #define __NR_preadv 366 #define __NR_pwritev 367 #define __NR_rt_tgsigqueueinfo 368 -#define __NR_perf_counter_open 369 +#define __NR_perf_event_open 369 #define __NR_syscall 370 #define NR_syscalls __NR_syscall diff --git a/arch/blackfin/mach-common/entry.S b/arch/blackfin/mach-common/entry.S index 01af24cde36..1e7cac23e25 100644 --- a/arch/blackfin/mach-common/entry.S +++ b/arch/blackfin/mach-common/entry.S @@ -1620,7 +1620,7 @@ ENTRY(_sys_call_table) .long _sys_preadv .long _sys_pwritev .long _sys_rt_tgsigqueueinfo - .long _sys_perf_counter_open + .long _sys_perf_event_open .rept NR_syscalls-(.-_sys_call_table)/4 .long _sys_ni_syscall diff --git a/arch/frv/Kconfig b/arch/frv/Kconfig index b86e19c9b5b..4b5830bcbe2 100644 --- a/arch/frv/Kconfig +++ b/arch/frv/Kconfig @@ -7,7 +7,7 @@ config FRV default y select HAVE_IDE select HAVE_ARCH_TRACEHOOK - select HAVE_PERF_COUNTERS + select HAVE_PERF_EVENTS config ZONE_DMA bool diff --git a/arch/frv/include/asm/perf_counter.h b/arch/frv/include/asm/perf_counter.h deleted file mode 100644 index ccf726e61b2..00000000000 --- a/arch/frv/include/asm/perf_counter.h +++ /dev/null @@ -1,17 +0,0 @@ -/* FRV performance counter support - * - * Copyright (C) 2009 Red Hat, Inc. All Rights Reserved. - * Written by David Howells (dhowells@redhat.com) - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public Licence - * as published by the Free Software Foundation; either version - * 2 of the Licence, or (at your option) any later version. - */ - -#ifndef _ASM_PERF_COUNTER_H -#define _ASM_PERF_COUNTER_H - -#define PERF_COUNTER_INDEX_OFFSET 0 - -#endif /* _ASM_PERF_COUNTER_H */ diff --git a/arch/frv/include/asm/perf_event.h b/arch/frv/include/asm/perf_event.h new file mode 100644 index 00000000000..a69e0155d14 --- /dev/null +++ b/arch/frv/include/asm/perf_event.h @@ -0,0 +1,17 @@ +/* FRV performance event support + * + * Copyright (C) 2009 Red Hat, Inc. All Rights Reserved. + * Written by David Howells (dhowells@redhat.com) + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public Licence + * as published by the Free Software Foundation; either version + * 2 of the Licence, or (at your option) any later version. + */ + +#ifndef _ASM_PERF_EVENT_H +#define _ASM_PERF_EVENT_H + +#define PERF_EVENT_INDEX_OFFSET 0 + +#endif /* _ASM_PERF_EVENT_H */ diff --git a/arch/frv/include/asm/unistd.h b/arch/frv/include/asm/unistd.h index 4a8fb427ce0..be6ef0f5cd4 100644 --- a/arch/frv/include/asm/unistd.h +++ b/arch/frv/include/asm/unistd.h @@ -342,7 +342,7 @@ #define __NR_preadv 333 #define __NR_pwritev 334 #define __NR_rt_tgsigqueueinfo 335 -#define __NR_perf_counter_open 336 +#define __NR_perf_event_open 336 #ifdef __KERNEL__ diff --git a/arch/frv/kernel/entry.S b/arch/frv/kernel/entry.S index fde1e446b44..189397ec012 100644 --- a/arch/frv/kernel/entry.S +++ b/arch/frv/kernel/entry.S @@ -1525,6 +1525,6 @@ sys_call_table: .long sys_preadv .long sys_pwritev .long sys_rt_tgsigqueueinfo /* 335 */ - .long sys_perf_counter_open + .long sys_perf_event_open syscall_table_size = (. - sys_call_table) diff --git a/arch/frv/lib/Makefile b/arch/frv/lib/Makefile index 0a377210c89..f4709756d0d 100644 --- a/arch/frv/lib/Makefile +++ b/arch/frv/lib/Makefile @@ -5,4 +5,4 @@ lib-y := \ __ashldi3.o __lshrdi3.o __muldi3.o __ashrdi3.o __negdi2.o __ucmpdi2.o \ checksum.o memcpy.o memset.o atomic-ops.o atomic64-ops.o \ - outsl_ns.o outsl_sw.o insl_ns.o insl_sw.o cache.o perf_counter.o + outsl_ns.o outsl_sw.o insl_ns.o insl_sw.o cache.o perf_event.o diff --git a/arch/frv/lib/perf_counter.c b/arch/frv/lib/perf_counter.c deleted file mode 100644 index 2000feecd57..00000000000 --- a/arch/frv/lib/perf_counter.c +++ /dev/null @@ -1,19 +0,0 @@ -/* Performance counter handling - * - * Copyright (C) 2009 Red Hat, Inc. All Rights Reserved. - * Written by David Howells (dhowells@redhat.com) - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public Licence - * as published by the Free Software Foundation; either version - * 2 of the Licence, or (at your option) any later version. - */ - -#include - -/* - * mark the performance counter as pending - */ -void set_perf_counter_pending(void) -{ -} diff --git a/arch/frv/lib/perf_event.c b/arch/frv/lib/perf_event.c new file mode 100644 index 00000000000..9ac5acfd2e9 --- /dev/null +++ b/arch/frv/lib/perf_event.c @@ -0,0 +1,19 @@ +/* Performance event handling + * + * Copyright (C) 2009 Red Hat, Inc. All Rights Reserved. + * Written by David Howells (dhowells@redhat.com) + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public Licence + * as published by the Free Software Foundation; either version + * 2 of the Licence, or (at your option) any later version. + */ + +#include + +/* + * mark the performance event as pending + */ +void set_perf_event_pending(void) +{ +} diff --git a/arch/m68k/include/asm/unistd.h b/arch/m68k/include/asm/unistd.h index 946d8691f2b..48b87f5ced5 100644 --- a/arch/m68k/include/asm/unistd.h +++ b/arch/m68k/include/asm/unistd.h @@ -335,7 +335,7 @@ #define __NR_preadv 329 #define __NR_pwritev 330 #define __NR_rt_tgsigqueueinfo 331 -#define __NR_perf_counter_open 332 +#define __NR_perf_event_open 332 #ifdef __KERNEL__ diff --git a/arch/m68k/kernel/entry.S b/arch/m68k/kernel/entry.S index 922f52e7ed1..c5b33634c98 100644 --- a/arch/m68k/kernel/entry.S +++ b/arch/m68k/kernel/entry.S @@ -756,5 +756,5 @@ sys_call_table: .long sys_preadv .long sys_pwritev /* 330 */ .long sys_rt_tgsigqueueinfo - .long sys_perf_counter_open + .long sys_perf_event_open diff --git a/arch/m68knommu/kernel/syscalltable.S b/arch/m68knommu/kernel/syscalltable.S index 0ae123e0898..23535cc415a 100644 --- a/arch/m68knommu/kernel/syscalltable.S +++ b/arch/m68knommu/kernel/syscalltable.S @@ -350,7 +350,7 @@ ENTRY(sys_call_table) .long sys_preadv .long sys_pwritev /* 330 */ .long sys_rt_tgsigqueueinfo - .long sys_perf_counter_open + .long sys_perf_event_open .rept NR_syscalls-(.-sys_call_table)/4 .long sys_ni_syscall diff --git a/arch/microblaze/include/asm/unistd.h b/arch/microblaze/include/asm/unistd.h index 0b852327c0e..cb05a07e55e 100644 --- a/arch/microblaze/include/asm/unistd.h +++ b/arch/microblaze/include/asm/unistd.h @@ -381,7 +381,7 @@ #define __NR_preadv 363 /* new */ #define __NR_pwritev 364 /* new */ #define __NR_rt_tgsigqueueinfo 365 /* new */ -#define __NR_perf_counter_open 366 /* new */ +#define __NR_perf_event_open 366 /* new */ #define __NR_syscalls 367 diff --git a/arch/microblaze/kernel/syscall_table.S b/arch/microblaze/kernel/syscall_table.S index 457216097df..ecec1915513 100644 --- a/arch/microblaze/kernel/syscall_table.S +++ b/arch/microblaze/kernel/syscall_table.S @@ -370,4 +370,4 @@ ENTRY(sys_call_table) .long sys_ni_syscall .long sys_ni_syscall .long sys_rt_tgsigqueueinfo /* 365 */ - .long sys_perf_counter_open + .long sys_perf_event_open diff --git a/arch/mips/include/asm/unistd.h b/arch/mips/include/asm/unistd.h index e753a777949..8c9dfa9e901 100644 --- a/arch/mips/include/asm/unistd.h +++ b/arch/mips/include/asm/unistd.h @@ -353,7 +353,7 @@ #define __NR_preadv (__NR_Linux + 330) #define __NR_pwritev (__NR_Linux + 331) #define __NR_rt_tgsigqueueinfo (__NR_Linux + 332) -#define __NR_perf_counter_open (__NR_Linux + 333) +#define __NR_perf_event_open (__NR_Linux + 333) #define __NR_accept4 (__NR_Linux + 334) /* @@ -664,7 +664,7 @@ #define __NR_preadv (__NR_Linux + 289) #define __NR_pwritev (__NR_Linux + 290) #define __NR_rt_tgsigqueueinfo (__NR_Linux + 291) -#define __NR_perf_counter_open (__NR_Linux + 292) +#define __NR_perf_event_open (__NR_Linux + 292) #define __NR_accept4 (__NR_Linux + 293) /* @@ -979,7 +979,7 @@ #define __NR_preadv (__NR_Linux + 293) #define __NR_pwritev (__NR_Linux + 294) #define __NR_rt_tgsigqueueinfo (__NR_Linux + 295) -#define __NR_perf_counter_open (__NR_Linux + 296) +#define __NR_perf_event_open (__NR_Linux + 296) #define __NR_accept4 (__NR_Linux + 297) /* diff --git a/arch/mips/kernel/scall32-o32.S b/arch/mips/kernel/scall32-o32.S index 7c2de4f091c..fd2a9bb620d 100644 --- a/arch/mips/kernel/scall32-o32.S +++ b/arch/mips/kernel/scall32-o32.S @@ -581,7 +581,7 @@ einval: li v0, -ENOSYS sys sys_preadv 6 /* 4330 */ sys sys_pwritev 6 sys sys_rt_tgsigqueueinfo 4 - sys sys_perf_counter_open 5 + sys sys_perf_event_open 5 sys sys_accept4 4 .endm diff --git a/arch/mips/kernel/scall64-64.S b/arch/mips/kernel/scall64-64.S index b97b993846d..18bf7f32c5e 100644 --- a/arch/mips/kernel/scall64-64.S +++ b/arch/mips/kernel/scall64-64.S @@ -418,6 +418,6 @@ sys_call_table: PTR sys_preadv PTR sys_pwritev /* 5390 */ PTR sys_rt_tgsigqueueinfo - PTR sys_perf_counter_open + PTR sys_perf_event_open PTR sys_accept4 .size sys_call_table,.-sys_call_table diff --git a/arch/mips/kernel/scall64-n32.S b/arch/mips/kernel/scall64-n32.S index 1a6ae124635..6ebc0797669 100644 --- a/arch/mips/kernel/scall64-n32.S +++ b/arch/mips/kernel/scall64-n32.S @@ -416,6 +416,6 @@ EXPORT(sysn32_call_table) PTR sys_preadv PTR sys_pwritev PTR compat_sys_rt_tgsigqueueinfo /* 5295 */ - PTR sys_perf_counter_open + PTR sys_perf_event_open PTR sys_accept4 .size sysn32_call_table,.-sysn32_call_table diff --git a/arch/mips/kernel/scall64-o32.S b/arch/mips/kernel/scall64-o32.S index cd31087a651..9bbf9775e0b 100644 --- a/arch/mips/kernel/scall64-o32.S +++ b/arch/mips/kernel/scall64-o32.S @@ -536,6 +536,6 @@ sys_call_table: PTR compat_sys_preadv /* 4330 */ PTR compat_sys_pwritev PTR compat_sys_rt_tgsigqueueinfo - PTR sys_perf_counter_open + PTR sys_perf_event_open PTR sys_accept4 .size sys_call_table,.-sys_call_table diff --git a/arch/mn10300/include/asm/unistd.h b/arch/mn10300/include/asm/unistd.h index fad68616af3..2a983931c11 100644 --- a/arch/mn10300/include/asm/unistd.h +++ b/arch/mn10300/include/asm/unistd.h @@ -347,7 +347,7 @@ #define __NR_preadv 334 #define __NR_pwritev 335 #define __NR_rt_tgsigqueueinfo 336 -#define __NR_perf_counter_open 337 +#define __NR_perf_event_open 337 #ifdef __KERNEL__ diff --git a/arch/mn10300/kernel/entry.S b/arch/mn10300/kernel/entry.S index e0d2563af4f..a94e7ea3faa 100644 --- a/arch/mn10300/kernel/entry.S +++ b/arch/mn10300/kernel/entry.S @@ -723,7 +723,7 @@ ENTRY(sys_call_table) .long sys_preadv .long sys_pwritev /* 335 */ .long sys_rt_tgsigqueueinfo - .long sys_perf_counter_open + .long sys_perf_event_open nr_syscalls=(.-sys_call_table)/4 diff --git a/arch/parisc/Kconfig b/arch/parisc/Kconfig index 06f8d5b5b0f..f388dc68f60 100644 --- a/arch/parisc/Kconfig +++ b/arch/parisc/Kconfig @@ -16,7 +16,7 @@ config PARISC select RTC_DRV_GENERIC select INIT_ALL_POSSIBLE select BUG - select HAVE_PERF_COUNTERS + select HAVE_PERF_EVENTS select GENERIC_ATOMIC64 if !64BIT help The PA-RISC microprocessor is designed by Hewlett-Packard and used diff --git a/arch/parisc/include/asm/perf_counter.h b/arch/parisc/include/asm/perf_counter.h deleted file mode 100644 index dc9e829f701..00000000000 --- a/arch/parisc/include/asm/perf_counter.h +++ /dev/null @@ -1,7 +0,0 @@ -#ifndef __ASM_PARISC_PERF_COUNTER_H -#define __ASM_PARISC_PERF_COUNTER_H - -/* parisc only supports software counters through this interface. */ -static inline void set_perf_counter_pending(void) { } - -#endif /* __ASM_PARISC_PERF_COUNTER_H */ diff --git a/arch/parisc/include/asm/perf_event.h b/arch/parisc/include/asm/perf_event.h new file mode 100644 index 00000000000..cc146427d8f --- /dev/null +++ b/arch/parisc/include/asm/perf_event.h @@ -0,0 +1,7 @@ +#ifndef __ASM_PARISC_PERF_EVENT_H +#define __ASM_PARISC_PERF_EVENT_H + +/* parisc only supports software events through this interface. */ +static inline void set_perf_event_pending(void) { } + +#endif /* __ASM_PARISC_PERF_EVENT_H */ diff --git a/arch/parisc/include/asm/unistd.h b/arch/parisc/include/asm/unistd.h index f3d3b8b012c..cda158318c6 100644 --- a/arch/parisc/include/asm/unistd.h +++ b/arch/parisc/include/asm/unistd.h @@ -810,9 +810,9 @@ #define __NR_preadv (__NR_Linux + 315) #define __NR_pwritev (__NR_Linux + 316) #define __NR_rt_tgsigqueueinfo (__NR_Linux + 317) -#define __NR_perf_counter_open (__NR_Linux + 318) +#define __NR_perf_event_open (__NR_Linux + 318) -#define __NR_Linux_syscalls (__NR_perf_counter_open + 1) +#define __NR_Linux_syscalls (__NR_perf_event_open + 1) #define __IGNORE_select /* newselect */ diff --git a/arch/parisc/kernel/syscall_table.S b/arch/parisc/kernel/syscall_table.S index cf145eb026b..843f423dec6 100644 --- a/arch/parisc/kernel/syscall_table.S +++ b/arch/parisc/kernel/syscall_table.S @@ -416,7 +416,7 @@ ENTRY_COMP(preadv) /* 315 */ ENTRY_COMP(pwritev) ENTRY_COMP(rt_tgsigqueueinfo) - ENTRY_SAME(perf_counter_open) + ENTRY_SAME(perf_event_open) /* Nothing yet */ diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig index 8250902265c..4fd479059d6 100644 --- a/arch/powerpc/Kconfig +++ b/arch/powerpc/Kconfig @@ -129,7 +129,7 @@ config PPC select HAVE_OPROFILE select HAVE_SYSCALL_WRAPPERS if PPC64 select GENERIC_ATOMIC64 if PPC32 - select HAVE_PERF_COUNTERS + select HAVE_PERF_EVENTS config EARLY_PRINTK bool diff --git a/arch/powerpc/include/asm/hw_irq.h b/arch/powerpc/include/asm/hw_irq.h index e73d554538d..abbc2aaaced 100644 --- a/arch/powerpc/include/asm/hw_irq.h +++ b/arch/powerpc/include/asm/hw_irq.h @@ -135,43 +135,43 @@ static inline int irqs_disabled_flags(unsigned long flags) */ struct irq_chip; -#ifdef CONFIG_PERF_COUNTERS +#ifdef CONFIG_PERF_EVENTS #ifdef CONFIG_PPC64 -static inline unsigned long test_perf_counter_pending(void) +static inline unsigned long test_perf_event_pending(void) { unsigned long x; asm volatile("lbz %0,%1(13)" : "=r" (x) - : "i" (offsetof(struct paca_struct, perf_counter_pending))); + : "i" (offsetof(struct paca_struct, perf_event_pending))); return x; } -static inline void set_perf_counter_pending(void) +static inline void set_perf_event_pending(void) { asm volatile("stb %0,%1(13)" : : "r" (1), - "i" (offsetof(struct paca_struct, perf_counter_pending))); + "i" (offsetof(struct paca_struct, perf_event_pending))); } -static inline void clear_perf_counter_pending(void) +static inline void clear_perf_event_pending(void) { asm volatile("stb %0,%1(13)" : : "r" (0), - "i" (offsetof(struct paca_struct, perf_counter_pending))); + "i" (offsetof(struct paca_struct, perf_event_pending))); } #endif /* CONFIG_PPC64 */ -#else /* CONFIG_PERF_COUNTERS */ +#else /* CONFIG_PERF_EVENTS */ -static inline unsigned long test_perf_counter_pending(void) +static inline unsigned long test_perf_event_pending(void) { return 0; } -static inline void clear_perf_counter_pending(void) {} -#endif /* CONFIG_PERF_COUNTERS */ +static inline void clear_perf_event_pending(void) {} +#endif /* CONFIG_PERF_EVENTS */ #endif /* __KERNEL__ */ #endif /* _ASM_POWERPC_HW_IRQ_H */ diff --git a/arch/powerpc/include/asm/paca.h b/arch/powerpc/include/asm/paca.h index b634456ea89..154f405b642 100644 --- a/arch/powerpc/include/asm/paca.h +++ b/arch/powerpc/include/asm/paca.h @@ -122,7 +122,7 @@ struct paca_struct { u8 soft_enabled; /* irq soft-enable flag */ u8 hard_enabled; /* set if irqs are enabled in MSR */ u8 io_sync; /* writel() needs spin_unlock sync */ - u8 perf_counter_pending; /* PM interrupt while soft-disabled */ + u8 perf_event_pending; /* PM interrupt while soft-disabled */ /* Stuff for accurate time accounting */ u64 user_time; /* accumulated usermode TB ticks */ diff --git a/arch/powerpc/include/asm/perf_counter.h b/arch/powerpc/include/asm/perf_counter.h deleted file mode 100644 index 0ea0639fcf7..00000000000 --- a/arch/powerpc/include/asm/perf_counter.h +++ /dev/null @@ -1,110 +0,0 @@ -/* - * Performance counter support - PowerPC-specific definitions. - * - * Copyright 2008-2009 Paul Mackerras, IBM Corporation. - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * as published by the Free Software Foundation; either version - * 2 of the License, or (at your option) any later version. - */ -#include - -#include - -#define MAX_HWCOUNTERS 8 -#define MAX_EVENT_ALTERNATIVES 8 -#define MAX_LIMITED_HWCOUNTERS 2 - -/* - * This struct provides the constants and functions needed to - * describe the PMU on a particular POWER-family CPU. - */ -struct power_pmu { - const char *name; - int n_counter; - int max_alternatives; - unsigned long add_fields; - unsigned long test_adder; - int (*compute_mmcr)(u64 events[], int n_ev, - unsigned int hwc[], unsigned long mmcr[]); - int (*get_constraint)(u64 event, unsigned long *mskp, - unsigned long *valp); - int (*get_alternatives)(u64 event, unsigned int flags, - u64 alt[]); - void (*disable_pmc)(unsigned int pmc, unsigned long mmcr[]); - int (*limited_pmc_event)(u64 event); - u32 flags; - int n_generic; - int *generic_events; - int (*cache_events)[PERF_COUNT_HW_CACHE_MAX] - [PERF_COUNT_HW_CACHE_OP_MAX] - [PERF_COUNT_HW_CACHE_RESULT_MAX]; -}; - -/* - * Values for power_pmu.flags - */ -#define PPMU_LIMITED_PMC5_6 1 /* PMC5/6 have limited function */ -#define PPMU_ALT_SIPR 2 /* uses alternate posn for SIPR/HV */ - -/* - * Values for flags to get_alternatives() - */ -#define PPMU_LIMITED_PMC_OK 1 /* can put this on a limited PMC */ -#define PPMU_LIMITED_PMC_REQD 2 /* have to put this on a limited PMC */ -#define PPMU_ONLY_COUNT_RUN 4 /* only counting in run state */ - -extern int register_power_pmu(struct power_pmu *); - -struct pt_regs; -extern unsigned long perf_misc_flags(struct pt_regs *regs); -extern unsigned long perf_instruction_pointer(struct pt_regs *regs); - -#define PERF_COUNTER_INDEX_OFFSET 1 - -/* - * Only override the default definitions in include/linux/perf_counter.h - * if we have hardware PMU support. - */ -#ifdef CONFIG_PPC_PERF_CTRS -#define perf_misc_flags(regs) perf_misc_flags(regs) -#endif - -/* - * The power_pmu.get_constraint function returns a 32/64-bit value and - * a 32/64-bit mask that express the constraints between this event and - * other events. - * - * The value and mask are divided up into (non-overlapping) bitfields - * of three different types: - * - * Select field: this expresses the constraint that some set of bits - * in MMCR* needs to be set to a specific value for this event. For a - * select field, the mask contains 1s in every bit of the field, and - * the value contains a unique value for each possible setting of the - * MMCR* bits. The constraint checking code will ensure that two events - * that set the same field in their masks have the same value in their - * value dwords. - * - * Add field: this expresses the constraint that there can be at most - * N events in a particular class. A field of k bits can be used for - * N <= 2^(k-1) - 1. The mask has the most significant bit of the field - * set (and the other bits 0), and the value has only the least significant - * bit of the field set. In addition, the 'add_fields' and 'test_adder' - * in the struct power_pmu for this processor come into play. The - * add_fields value contains 1 in the LSB of the field, and the - * test_adder contains 2^(k-1) - 1 - N in the field. - * - * NAND field: this expresses the constraint that you may not have events - * in all of a set of classes. (For example, on PPC970, you can't select - * events from the FPU, ISU and IDU simultaneously, although any two are - * possible.) For N classes, the field is N+1 bits wide, and each class - * is assigned one bit from the least-significant N bits. The mask has - * only the most-significant bit set, and the value has only the bit - * for the event's class set. The test_adder has the least significant - * bit set in the field. - * - * If an event is not subject to the constraint expressed by a particular - * field, then it will have 0 in both the mask and value for that field. - */ diff --git a/arch/powerpc/include/asm/perf_event.h b/arch/powerpc/include/asm/perf_event.h new file mode 100644 index 00000000000..2499aaadaeb --- /dev/null +++ b/arch/powerpc/include/asm/perf_event.h @@ -0,0 +1,110 @@ +/* + * Performance event support - PowerPC-specific definitions. + * + * Copyright 2008-2009 Paul Mackerras, IBM Corporation. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + */ +#include + +#include + +#define MAX_HWEVENTS 8 +#define MAX_EVENT_ALTERNATIVES 8 +#define MAX_LIMITED_HWEVENTS 2 + +/* + * This struct provides the constants and functions needed to + * describe the PMU on a particular POWER-family CPU. + */ +struct power_pmu { + const char *name; + int n_event; + int max_alternatives; + unsigned long add_fields; + unsigned long test_adder; + int (*compute_mmcr)(u64 events[], int n_ev, + unsigned int hwc[], unsigned long mmcr[]); + int (*get_constraint)(u64 event_id, unsigned long *mskp, + unsigned long *valp); + int (*get_alternatives)(u64 event_id, unsigned int flags, + u64 alt[]); + void (*disable_pmc)(unsigned int pmc, unsigned long mmcr[]); + int (*limited_pmc_event)(u64 event_id); + u32 flags; + int n_generic; + int *generic_events; + int (*cache_events)[PERF_COUNT_HW_CACHE_MAX] + [PERF_COUNT_HW_CACHE_OP_MAX] + [PERF_COUNT_HW_CACHE_RESULT_MAX]; +}; + +/* + * Values for power_pmu.flags + */ +#define PPMU_LIMITED_PMC5_6 1 /* PMC5/6 have limited function */ +#define PPMU_ALT_SIPR 2 /* uses alternate posn for SIPR/HV */ + +/* + * Values for flags to get_alternatives() + */ +#define PPMU_LIMITED_PMC_OK 1 /* can put this on a limited PMC */ +#define PPMU_LIMITED_PMC_REQD 2 /* have to put this on a limited PMC */ +#define PPMU_ONLY_COUNT_RUN 4 /* only counting in run state */ + +extern int register_power_pmu(struct power_pmu *); + +struct pt_regs; +extern unsigned long perf_misc_flags(struct pt_regs *regs); +extern unsigned long perf_instruction_pointer(struct pt_regs *regs); + +#define PERF_EVENT_INDEX_OFFSET 1 + +/* + * Only override the default definitions in include/linux/perf_event.h + * if we have hardware PMU support. + */ +#ifdef CONFIG_PPC_PERF_CTRS +#define perf_misc_flags(regs) perf_misc_flags(regs) +#endif + +/* + * The power_pmu.get_constraint function returns a 32/64-bit value and + * a 32/64-bit mask that express the constraints between this event_id and + * other events. + * + * The value and mask are divided up into (non-overlapping) bitfields + * of three different types: + * + * Select field: this expresses the constraint that some set of bits + * in MMCR* needs to be set to a specific value for this event_id. For a + * select field, the mask contains 1s in every bit of the field, and + * the value contains a unique value for each possible setting of the + * MMCR* bits. The constraint checking code will ensure that two events + * that set the same field in their masks have the same value in their + * value dwords. + * + * Add field: this expresses the constraint that there can be at most + * N events in a particular class. A field of k bits can be used for + * N <= 2^(k-1) - 1. The mask has the most significant bit of the field + * set (and the other bits 0), and the value has only the least significant + * bit of the field set. In addition, the 'add_fields' and 'test_adder' + * in the struct power_pmu for this processor come into play. The + * add_fields value contains 1 in the LSB of the field, and the + * test_adder contains 2^(k-1) - 1 - N in the field. + * + * NAND field: this expresses the constraint that you may not have events + * in all of a set of classes. (For example, on PPC970, you can't select + * events from the FPU, ISU and IDU simultaneously, although any two are + * possible.) For N classes, the field is N+1 bits wide, and each class + * is assigned one bit from the least-significant N bits. The mask has + * only the most-significant bit set, and the value has only the bit + * for the event_id's class set. The test_adder has the least significant + * bit set in the field. + * + * If an event_id is not subject to the constraint expressed by a particular + * field, then it will have 0 in both the mask and value for that field. + */ diff --git a/arch/powerpc/include/asm/systbl.h b/arch/powerpc/include/asm/systbl.h index ed24bd92fe4..c7d671a7d9a 100644 --- a/arch/powerpc/include/asm/systbl.h +++ b/arch/powerpc/include/asm/systbl.h @@ -322,7 +322,7 @@ SYSCALL_SPU(epoll_create1) SYSCALL_SPU(dup3) SYSCALL_SPU(pipe2) SYSCALL(inotify_init1) -SYSCALL_SPU(perf_counter_open) +SYSCALL_SPU(perf_event_open) COMPAT_SYS_SPU(preadv) COMPAT_SYS_SPU(pwritev) COMPAT_SYS(rt_tgsigqueueinfo) diff --git a/arch/powerpc/include/asm/unistd.h b/arch/powerpc/include/asm/unistd.h index cef080bfc60..f6ca7617676 100644 --- a/arch/powerpc/include/asm/unistd.h +++ b/arch/powerpc/include/asm/unistd.h @@ -341,7 +341,7 @@ #define __NR_dup3 316 #define __NR_pipe2 317 #define __NR_inotify_init1 318 -#define __NR_perf_counter_open 319 +#define __NR_perf_event_open 319 #define __NR_preadv 320 #define __NR_pwritev 321 #define __NR_rt_tgsigqueueinfo 322 diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile index 569f79ccd31..b23664a0b86 100644 --- a/arch/powerpc/kernel/Makefile +++ b/arch/powerpc/kernel/Makefile @@ -97,7 +97,7 @@ obj64-$(CONFIG_AUDIT) += compat_audit.o obj-$(CONFIG_DYNAMIC_FTRACE) += ftrace.o obj-$(CONFIG_FUNCTION_GRAPH_TRACER) += ftrace.o -obj-$(CONFIG_PPC_PERF_CTRS) += perf_counter.o perf_callchain.o +obj-$(CONFIG_PPC_PERF_CTRS) += perf_event.o perf_callchain.o obj64-$(CONFIG_PPC_PERF_CTRS) += power4-pmu.o ppc970-pmu.o power5-pmu.o \ power5+-pmu.o power6-pmu.o power7-pmu.o obj32-$(CONFIG_PPC_PERF_CTRS) += mpc7450-pmu.o diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c index f0df285f0f8..0812b0f414b 100644 --- a/arch/powerpc/kernel/asm-offsets.c +++ b/arch/powerpc/kernel/asm-offsets.c @@ -133,7 +133,7 @@ int main(void) DEFINE(PACAKMSR, offsetof(struct paca_struct, kernel_msr)); DEFINE(PACASOFTIRQEN, offsetof(struct paca_struct, soft_enabled)); DEFINE(PACAHARDIRQEN, offsetof(struct paca_struct, hard_enabled)); - DEFINE(PACAPERFPEND, offsetof(struct paca_struct, perf_counter_pending)); + DEFINE(PACAPERFPEND, offsetof(struct paca_struct, perf_event_pending)); DEFINE(PACACONTEXTID, offsetof(struct paca_struct, context.id)); #ifdef CONFIG_PPC_MM_SLICES DEFINE(PACALOWSLICESPSIZE, offsetof(struct paca_struct, diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S index 66bcda34a6b..900e0eea009 100644 --- a/arch/powerpc/kernel/entry_64.S +++ b/arch/powerpc/kernel/entry_64.S @@ -556,14 +556,14 @@ ALT_FW_FTR_SECTION_END_IFCLR(FW_FEATURE_ISERIES) 2: TRACE_AND_RESTORE_IRQ(r5); -#ifdef CONFIG_PERF_COUNTERS - /* check paca->perf_counter_pending if we're enabling ints */ +#ifdef CONFIG_PERF_EVENTS + /* check paca->perf_event_pending if we're enabling ints */ lbz r3,PACAPERFPEND(r13) and. r3,r3,r5 beq 27f - bl .perf_counter_do_pending + bl .perf_event_do_pending 27: -#endif /* CONFIG_PERF_COUNTERS */ +#endif /* CONFIG_PERF_EVENTS */ /* extract EE bit and use it to restore paca->hard_enabled */ ld r3,_MSR(r1) diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c index f7f376ea7b1..e5d12117798 100644 --- a/arch/powerpc/kernel/irq.c +++ b/arch/powerpc/kernel/irq.c @@ -53,7 +53,7 @@ #include #include #include -#include +#include #include #include @@ -138,9 +138,9 @@ notrace void raw_local_irq_restore(unsigned long en) } #endif /* CONFIG_PPC_STD_MMU_64 */ - if (test_perf_counter_pending()) { - clear_perf_counter_pending(); - perf_counter_do_pending(); + if (test_perf_event_pending()) { + clear_perf_event_pending(); + perf_event_do_pending(); } /* diff --git a/arch/powerpc/kernel/mpc7450-pmu.c b/arch/powerpc/kernel/mpc7450-pmu.c index cc466d039af..09d72028f31 100644 --- a/arch/powerpc/kernel/mpc7450-pmu.c +++ b/arch/powerpc/kernel/mpc7450-pmu.c @@ -9,7 +9,7 @@ * 2 of the License, or (at your option) any later version. */ #include -#include +#include #include #include diff --git a/arch/powerpc/kernel/perf_callchain.c b/arch/powerpc/kernel/perf_callchain.c index f74b62c6751..0a03cf70d24 100644 --- a/arch/powerpc/kernel/perf_callchain.c +++ b/arch/powerpc/kernel/perf_callchain.c @@ -10,7 +10,7 @@ */ #include #include -#include +#include #include #include #include diff --git a/arch/powerpc/kernel/perf_counter.c b/arch/powerpc/kernel/perf_counter.c deleted file mode 100644 index 5ccf9bca96c..00000000000 --- a/arch/powerpc/kernel/perf_counter.c +++ /dev/null @@ -1,1315 +0,0 @@ -/* - * Performance counter support - powerpc architecture code - * - * Copyright 2008-2009 Paul Mackerras, IBM Corporation. - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * as published by the Free Software Foundation; either version - * 2 of the License, or (at your option) any later version. - */ -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -struct cpu_hw_counters { - int n_counters; - int n_percpu; - int disabled; - int n_added; - int n_limited; - u8 pmcs_enabled; - struct perf_counter *counter[MAX_HWCOUNTERS]; - u64 events[MAX_HWCOUNTERS]; - unsigned int flags[MAX_HWCOUNTERS]; - unsigned long mmcr[3]; - struct perf_counter *limited_counter[MAX_LIMITED_HWCOUNTERS]; - u8 limited_hwidx[MAX_LIMITED_HWCOUNTERS]; - u64 alternatives[MAX_HWCOUNTERS][MAX_EVENT_ALTERNATIVES]; - unsigned long amasks[MAX_HWCOUNTERS][MAX_EVENT_ALTERNATIVES]; - unsigned long avalues[MAX_HWCOUNTERS][MAX_EVENT_ALTERNATIVES]; -}; -DEFINE_PER_CPU(struct cpu_hw_counters, cpu_hw_counters); - -struct power_pmu *ppmu; - -/* - * Normally, to ignore kernel events we set the FCS (freeze counters - * in supervisor mode) bit in MMCR0, but if the kernel runs with the - * hypervisor bit set in the MSR, or if we are running on a processor - * where the hypervisor bit is forced to 1 (as on Apple G5 processors), - * then we need to use the FCHV bit to ignore kernel events. - */ -static unsigned int freeze_counters_kernel = MMCR0_FCS; - -/* - * 32-bit doesn't have MMCRA but does have an MMCR2, - * and a few other names are different. - */ -#ifdef CONFIG_PPC32 - -#define MMCR0_FCHV 0 -#define MMCR0_PMCjCE MMCR0_PMCnCE - -#define SPRN_MMCRA SPRN_MMCR2 -#define MMCRA_SAMPLE_ENABLE 0 - -static inline unsigned long perf_ip_adjust(struct pt_regs *regs) -{ - return 0; -} -static inline void perf_get_data_addr(struct pt_regs *regs, u64 *addrp) { } -static inline u32 perf_get_misc_flags(struct pt_regs *regs) -{ - return 0; -} -static inline void perf_read_regs(struct pt_regs *regs) { } -static inline int perf_intr_is_nmi(struct pt_regs *regs) -{ - return 0; -} - -#endif /* CONFIG_PPC32 */ - -/* - * Things that are specific to 64-bit implementations. - */ -#ifdef CONFIG_PPC64 - -static inline unsigned long perf_ip_adjust(struct pt_regs *regs) -{ - unsigned long mmcra = regs->dsisr; - - if ((mmcra & MMCRA_SAMPLE_ENABLE) && !(ppmu->flags & PPMU_ALT_SIPR)) { - unsigned long slot = (mmcra & MMCRA_SLOT) >> MMCRA_SLOT_SHIFT; - if (slot > 1) - return 4 * (slot - 1); - } - return 0; -} - -/* - * The user wants a data address recorded. - * If we're not doing instruction sampling, give them the SDAR - * (sampled data address). If we are doing instruction sampling, then - * only give them the SDAR if it corresponds to the instruction - * pointed to by SIAR; this is indicated by the [POWER6_]MMCRA_SDSYNC - * bit in MMCRA. - */ -static inline void perf_get_data_addr(struct pt_regs *regs, u64 *addrp) -{ - unsigned long mmcra = regs->dsisr; - unsigned long sdsync = (ppmu->flags & PPMU_ALT_SIPR) ? - POWER6_MMCRA_SDSYNC : MMCRA_SDSYNC; - - if (!(mmcra & MMCRA_SAMPLE_ENABLE) || (mmcra & sdsync)) - *addrp = mfspr(SPRN_SDAR); -} - -static inline u32 perf_get_misc_flags(struct pt_regs *regs) -{ - unsigned long mmcra = regs->dsisr; - - if (TRAP(regs) != 0xf00) - return 0; /* not a PMU interrupt */ - - if (ppmu->flags & PPMU_ALT_SIPR) { - if (mmcra & POWER6_MMCRA_SIHV) - return PERF_EVENT_MISC_HYPERVISOR; - return (mmcra & POWER6_MMCRA_SIPR) ? - PERF_EVENT_MISC_USER : PERF_EVENT_MISC_KERNEL; - } - if (mmcra & MMCRA_SIHV) - return PERF_EVENT_MISC_HYPERVISOR; - return (mmcra & MMCRA_SIPR) ? PERF_EVENT_MISC_USER : - PERF_EVENT_MISC_KERNEL; -} - -/* - * Overload regs->dsisr to store MMCRA so we only need to read it once - * on each interrupt. - */ -static inline void perf_read_regs(struct pt_regs *regs) -{ - regs->dsisr = mfspr(SPRN_MMCRA); -} - -/* - * If interrupts were soft-disabled when a PMU interrupt occurs, treat - * it as an NMI. - */ -static inline int perf_intr_is_nmi(struct pt_regs *regs) -{ - return !regs->softe; -} - -#endif /* CONFIG_PPC64 */ - -static void perf_counter_interrupt(struct pt_regs *regs); - -void perf_counter_print_debug(void) -{ -} - -/* - * Read one performance monitor counter (PMC). - */ -static unsigned long read_pmc(int idx) -{ - unsigned long val; - - switch (idx) { - case 1: - val = mfspr(SPRN_PMC1); - break; - case 2: - val = mfspr(SPRN_PMC2); - break; - case 3: - val = mfspr(SPRN_PMC3); - break; - case 4: - val = mfspr(SPRN_PMC4); - break; - case 5: - val = mfspr(SPRN_PMC5); - break; - case 6: - val = mfspr(SPRN_PMC6); - break; -#ifdef CONFIG_PPC64 - case 7: - val = mfspr(SPRN_PMC7); - break; - case 8: - val = mfspr(SPRN_PMC8); - break; -#endif /* CONFIG_PPC64 */ - default: - printk(KERN_ERR "oops trying to read PMC%d\n", idx); - val = 0; - } - return val; -} - -/* - * Write one PMC. - */ -static void write_pmc(int idx, unsigned long val) -{ - switch (idx) { - case 1: - mtspr(SPRN_PMC1, val); - break; - case 2: - mtspr(SPRN_PMC2, val); - break; - case 3: - mtspr(SPRN_PMC3, val); - break; - case 4: - mtspr(SPRN_PMC4, val); - break; - case 5: - mtspr(SPRN_PMC5, val); - break; - case 6: - mtspr(SPRN_PMC6, val); - break; -#ifdef CONFIG_PPC64 - case 7: - mtspr(SPRN_PMC7, val); - break; - case 8: - mtspr(SPRN_PMC8, val); - break; -#endif /* CONFIG_PPC64 */ - default: - printk(KERN_ERR "oops trying to write PMC%d\n", idx); - } -} - -/* - * Check if a set of events can all go on the PMU at once. - * If they can't, this will look at alternative codes for the events - * and see if any combination of alternative codes is feasible. - * The feasible set is returned in event[]. - */ -static int power_check_constraints(struct cpu_hw_counters *cpuhw, - u64 event[], unsigned int cflags[], - int n_ev) -{ - unsigned long mask, value, nv; - unsigned long smasks[MAX_HWCOUNTERS], svalues[MAX_HWCOUNTERS]; - int n_alt[MAX_HWCOUNTERS], choice[MAX_HWCOUNTERS]; - int i, j; - unsigned long addf = ppmu->add_fields; - unsigned long tadd = ppmu->test_adder; - - if (n_ev > ppmu->n_counter) - return -1; - - /* First see if the events will go on as-is */ - for (i = 0; i < n_ev; ++i) { - if ((cflags[i] & PPMU_LIMITED_PMC_REQD) - && !ppmu->limited_pmc_event(event[i])) { - ppmu->get_alternatives(event[i], cflags[i], - cpuhw->alternatives[i]); - event[i] = cpuhw->alternatives[i][0]; - } - if (ppmu->get_constraint(event[i], &cpuhw->amasks[i][0], - &cpuhw->avalues[i][0])) - return -1; - } - value = mask = 0; - for (i = 0; i < n_ev; ++i) { - nv = (value | cpuhw->avalues[i][0]) + - (value & cpuhw->avalues[i][0] & addf); - if ((((nv + tadd) ^ value) & mask) != 0 || - (((nv + tadd) ^ cpuhw->avalues[i][0]) & - cpuhw->amasks[i][0]) != 0) - break; - value = nv; - mask |= cpuhw->amasks[i][0]; - } - if (i == n_ev) - return 0; /* all OK */ - - /* doesn't work, gather alternatives... */ - if (!ppmu->get_alternatives) - return -1; - for (i = 0; i < n_ev; ++i) { - choice[i] = 0; - n_alt[i] = ppmu->get_alternatives(event[i], cflags[i], - cpuhw->alternatives[i]); - for (j = 1; j < n_alt[i]; ++j) - ppmu->get_constraint(cpuhw->alternatives[i][j], - &cpuhw->amasks[i][j], - &cpuhw->avalues[i][j]); - } - - /* enumerate all possibilities and see if any will work */ - i = 0; - j = -1; - value = mask = nv = 0; - while (i < n_ev) { - if (j >= 0) { - /* we're backtracking, restore context */ - value = svalues[i]; - mask = smasks[i]; - j = choice[i]; - } - /* - * See if any alternative k for event i, - * where k > j, will satisfy the constraints. - */ - while (++j < n_alt[i]) { - nv = (value | cpuhw->avalues[i][j]) + - (value & cpuhw->avalues[i][j] & addf); - if ((((nv + tadd) ^ value) & mask) == 0 && - (((nv + tadd) ^ cpuhw->avalues[i][j]) - & cpuhw->amasks[i][j]) == 0) - break; - } - if (j >= n_alt[i]) { - /* - * No feasible alternative, backtrack - * to event i-1 and continue enumerating its - * alternatives from where we got up to. - */ - if (--i < 0) - return -1; - } else { - /* - * Found a feasible alternative for event i, - * remember where we got up to with this event, - * go on to the next event, and start with - * the first alternative for it. - */ - choice[i] = j; - svalues[i] = value; - smasks[i] = mask; - value = nv; - mask |= cpuhw->amasks[i][j]; - ++i; - j = -1; - } - } - - /* OK, we have a feasible combination, tell the caller the solution */ - for (i = 0; i < n_ev; ++i) - event[i] = cpuhw->alternatives[i][choice[i]]; - return 0; -} - -/* - * Check if newly-added counters have consistent settings for - * exclude_{user,kernel,hv} with each other and any previously - * added counters. - */ -static int check_excludes(struct perf_counter **ctrs, unsigned int cflags[], - int n_prev, int n_new) -{ - int eu = 0, ek = 0, eh = 0; - int i, n, first; - struct perf_counter *counter; - - n = n_prev + n_new; - if (n <= 1) - return 0; - - first = 1; - for (i = 0; i < n; ++i) { - if (cflags[i] & PPMU_LIMITED_PMC_OK) { - cflags[i] &= ~PPMU_LIMITED_PMC_REQD; - continue; - } - counter = ctrs[i]; - if (first) { - eu = counter->attr.exclude_user; - ek = counter->attr.exclude_kernel; - eh = counter->attr.exclude_hv; - first = 0; - } else if (counter->attr.exclude_user != eu || - counter->attr.exclude_kernel != ek || - counter->attr.exclude_hv != eh) { - return -EAGAIN; - } - } - - if (eu || ek || eh) - for (i = 0; i < n; ++i) - if (cflags[i] & PPMU_LIMITED_PMC_OK) - cflags[i] |= PPMU_LIMITED_PMC_REQD; - - return 0; -} - -static void power_pmu_read(struct perf_counter *counter) -{ - s64 val, delta, prev; - - if (!counter->hw.idx) - return; - /* - * Performance monitor interrupts come even when interrupts - * are soft-disabled, as long as interrupts are hard-enabled. - * Therefore we treat them like NMIs. - */ - do { - prev = atomic64_read(&counter->hw.prev_count); - barrier(); - val = read_pmc(counter->hw.idx); - } while (atomic64_cmpxchg(&counter->hw.prev_count, prev, val) != prev); - - /* The counters are only 32 bits wide */ - delta = (val - prev) & 0xfffffffful; - atomic64_add(delta, &counter->count); - atomic64_sub(delta, &counter->hw.period_left); -} - -/* - * On some machines, PMC5 and PMC6 can't be written, don't respect - * the freeze conditions, and don't generate interrupts. This tells - * us if `counter' is using such a PMC. - */ -static int is_limited_pmc(int pmcnum) -{ - return (ppmu->flags & PPMU_LIMITED_PMC5_6) - && (pmcnum == 5 || pmcnum == 6); -} - -static void freeze_limited_counters(struct cpu_hw_counters *cpuhw, - unsigned long pmc5, unsigned long pmc6) -{ - struct perf_counter *counter; - u64 val, prev, delta; - int i; - - for (i = 0; i < cpuhw->n_limited; ++i) { - counter = cpuhw->limited_counter[i]; - if (!counter->hw.idx) - continue; - val = (counter->hw.idx == 5) ? pmc5 : pmc6; - prev = atomic64_read(&counter->hw.prev_count); - counter->hw.idx = 0; - delta = (val - prev) & 0xfffffffful; - atomic64_add(delta, &counter->count); - } -} - -static void thaw_limited_counters(struct cpu_hw_counters *cpuhw, - unsigned long pmc5, unsigned long pmc6) -{ - struct perf_counter *counter; - u64 val; - int i; - - for (i = 0; i < cpuhw->n_limited; ++i) { - counter = cpuhw->limited_counter[i]; - counter->hw.idx = cpuhw->limited_hwidx[i]; - val = (counter->hw.idx == 5) ? pmc5 : pmc6; - atomic64_set(&counter->hw.prev_count, val); - perf_counter_update_userpage(counter); - } -} - -/* - * Since limited counters don't respect the freeze conditions, we - * have to read them immediately after freezing or unfreezing the - * other counters. We try to keep the values from the limited - * counters as consistent as possible by keeping the delay (in - * cycles and instructions) between freezing/unfreezing and reading - * the limited counters as small and consistent as possible. - * Therefore, if any limited counters are in use, we read them - * both, and always in the same order, to minimize variability, - * and do it inside the same asm that writes MMCR0. - */ -static void write_mmcr0(struct cpu_hw_counters *cpuhw, unsigned long mmcr0) -{ - unsigned long pmc5, pmc6; - - if (!cpuhw->n_limited) { - mtspr(SPRN_MMCR0, mmcr0); - return; - } - - /* - * Write MMCR0, then read PMC5 and PMC6 immediately. - * To ensure we don't get a performance monitor interrupt - * between writing MMCR0 and freezing/thawing the limited - * counters, we first write MMCR0 with the counter overflow - * interrupt enable bits turned off. - */ - asm volatile("mtspr %3,%2; mfspr %0,%4; mfspr %1,%5" - : "=&r" (pmc5), "=&r" (pmc6) - : "r" (mmcr0 & ~(MMCR0_PMC1CE | MMCR0_PMCjCE)), - "i" (SPRN_MMCR0), - "i" (SPRN_PMC5), "i" (SPRN_PMC6)); - - if (mmcr0 & MMCR0_FC) - freeze_limited_counters(cpuhw, pmc5, pmc6); - else - thaw_limited_counters(cpuhw, pmc5, pmc6); - - /* - * Write the full MMCR0 including the counter overflow interrupt - * enable bits, if necessary. - */ - if (mmcr0 & (MMCR0_PMC1CE | MMCR0_PMCjCE)) - mtspr(SPRN_MMCR0, mmcr0); -} - -/* - * Disable all counters to prevent PMU interrupts and to allow - * counters to be added or removed. - */ -void hw_perf_disable(void) -{ - struct cpu_hw_counters *cpuhw; - unsigned long flags; - - if (!ppmu) - return; - local_irq_save(flags); - cpuhw = &__get_cpu_var(cpu_hw_counters); - - if (!cpuhw->disabled) { - cpuhw->disabled = 1; - cpuhw->n_added = 0; - - /* - * Check if we ever enabled the PMU on this cpu. - */ - if (!cpuhw->pmcs_enabled) { - ppc_enable_pmcs(); - cpuhw->pmcs_enabled = 1; - } - - /* - * Disable instruction sampling if it was enabled - */ - if (cpuhw->mmcr[2] & MMCRA_SAMPLE_ENABLE) { - mtspr(SPRN_MMCRA, - cpuhw->mmcr[2] & ~MMCRA_SAMPLE_ENABLE); - mb(); - } - - /* - * Set the 'freeze counters' bit. - * The barrier is to make sure the mtspr has been - * executed and the PMU has frozen the counters - * before we return. - */ - write_mmcr0(cpuhw, mfspr(SPRN_MMCR0) | MMCR0_FC); - mb(); - } - local_irq_restore(flags); -} - -/* - * Re-enable all counters if disable == 0. - * If we were previously disabled and counters were added, then - * put the new config on the PMU. - */ -void hw_perf_enable(void) -{ - struct perf_counter *counter; - struct cpu_hw_counters *cpuhw; - unsigned long flags; - long i; - unsigned long val; - s64 left; - unsigned int hwc_index[MAX_HWCOUNTERS]; - int n_lim; - int idx; - - if (!ppmu) - return; - local_irq_save(flags); - cpuhw = &__get_cpu_var(cpu_hw_counters); - if (!cpuhw->disabled) { - local_irq_restore(flags); - return; - } - cpuhw->disabled = 0; - - /* - * If we didn't change anything, or only removed counters, - * no need to recalculate MMCR* settings and reset the PMCs. - * Just reenable the PMU with the current MMCR* settings - * (possibly updated for removal of counters). - */ - if (!cpuhw->n_added) { - mtspr(SPRN_MMCRA, cpuhw->mmcr[2] & ~MMCRA_SAMPLE_ENABLE); - mtspr(SPRN_MMCR1, cpuhw->mmcr[1]); - if (cpuhw->n_counters == 0) - ppc_set_pmu_inuse(0); - goto out_enable; - } - - /* - * Compute MMCR* values for the new set of counters - */ - if (ppmu->compute_mmcr(cpuhw->events, cpuhw->n_counters, hwc_index, - cpuhw->mmcr)) { - /* shouldn't ever get here */ - printk(KERN_ERR "oops compute_mmcr failed\n"); - goto out; - } - - /* - * Add in MMCR0 freeze bits corresponding to the - * attr.exclude_* bits for the first counter. - * We have already checked that all counters have the - * same values for these bits as the first counter. - */ - counter = cpuhw->counter[0]; - if (counter->attr.exclude_user) - cpuhw->mmcr[0] |= MMCR0_FCP; - if (counter->attr.exclude_kernel) - cpuhw->mmcr[0] |= freeze_counters_kernel; - if (counter->attr.exclude_hv) - cpuhw->mmcr[0] |= MMCR0_FCHV; - - /* - * Write the new configuration to MMCR* with the freeze - * bit set and set the hardware counters to their initial values. - * Then unfreeze the counters. - */ - ppc_set_pmu_inuse(1); - mtspr(SPRN_MMCRA, cpuhw->mmcr[2] & ~MMCRA_SAMPLE_ENABLE); - mtspr(SPRN_MMCR1, cpuhw->mmcr[1]); - mtspr(SPRN_MMCR0, (cpuhw->mmcr[0] & ~(MMCR0_PMC1CE | MMCR0_PMCjCE)) - | MMCR0_FC); - - /* - * Read off any pre-existing counters that need to move - * to another PMC. - */ - for (i = 0; i < cpuhw->n_counters; ++i) { - counter = cpuhw->counter[i]; - if (counter->hw.idx && counter->hw.idx != hwc_index[i] + 1) { - power_pmu_read(counter); - write_pmc(counter->hw.idx, 0); - counter->hw.idx = 0; - } - } - - /* - * Initialize the PMCs for all the new and moved counters. - */ - cpuhw->n_limited = n_lim = 0; - for (i = 0; i < cpuhw->n_counters; ++i) { - counter = cpuhw->counter[i]; - if (counter->hw.idx) - continue; - idx = hwc_index[i] + 1; - if (is_limited_pmc(idx)) { - cpuhw->limited_counter[n_lim] = counter; - cpuhw->limited_hwidx[n_lim] = idx; - ++n_lim; - continue; - } - val = 0; - if (counter->hw.sample_period) { - left = atomic64_read(&counter->hw.period_left); - if (left < 0x80000000L) - val = 0x80000000L - left; - } - atomic64_set(&counter->hw.prev_count, val); - counter->hw.idx = idx; - write_pmc(idx, val); - perf_counter_update_userpage(counter); - } - cpuhw->n_limited = n_lim; - cpuhw->mmcr[0] |= MMCR0_PMXE | MMCR0_FCECE; - - out_enable: - mb(); - write_mmcr0(cpuhw, cpuhw->mmcr[0]); - - /* - * Enable instruction sampling if necessary - */ - if (cpuhw->mmcr[2] & MMCRA_SAMPLE_ENABLE) { - mb(); - mtspr(SPRN_MMCRA, cpuhw->mmcr[2]); - } - - out: - local_irq_restore(flags); -} - -static int collect_events(struct perf_counter *group, int max_count, - struct perf_counter *ctrs[], u64 *events, - unsigned int *flags) -{ - int n = 0; - struct perf_counter *counter; - - if (!is_software_counter(group)) { - if (n >= max_count) - return -1; - ctrs[n] = group; - flags[n] = group->hw.counter_base; - events[n++] = group->hw.config; - } - list_for_each_entry(counter, &group->sibling_list, list_entry) { - if (!is_software_counter(counter) && - counter->state != PERF_COUNTER_STATE_OFF) { - if (n >= max_count) - return -1; - ctrs[n] = counter; - flags[n] = counter->hw.counter_base; - events[n++] = counter->hw.config; - } - } - return n; -} - -static void counter_sched_in(struct perf_counter *counter, int cpu) -{ - counter->state = PERF_COUNTER_STATE_ACTIVE; - counter->oncpu = cpu; - counter->tstamp_running += counter->ctx->time - counter->tstamp_stopped; - if (is_software_counter(counter)) - counter->pmu->enable(counter); -} - -/* - * Called to enable a whole group of counters. - * Returns 1 if the group was enabled, or -EAGAIN if it could not be. - * Assumes the caller has disabled interrupts and has - * frozen the PMU with hw_perf_save_disable. - */ -int hw_perf_group_sched_in(struct perf_counter *group_leader, - struct perf_cpu_context *cpuctx, - struct perf_counter_context *ctx, int cpu) -{ - struct cpu_hw_counters *cpuhw; - long i, n, n0; - struct perf_counter *sub; - - if (!ppmu) - return 0; - cpuhw = &__get_cpu_var(cpu_hw_counters); - n0 = cpuhw->n_counters; - n = collect_events(group_leader, ppmu->n_counter - n0, - &cpuhw->counter[n0], &cpuhw->events[n0], - &cpuhw->flags[n0]); - if (n < 0) - return -EAGAIN; - if (check_excludes(cpuhw->counter, cpuhw->flags, n0, n)) - return -EAGAIN; - i = power_check_constraints(cpuhw, cpuhw->events, cpuhw->flags, n + n0); - if (i < 0) - return -EAGAIN; - cpuhw->n_counters = n0 + n; - cpuhw->n_added += n; - - /* - * OK, this group can go on; update counter states etc., - * and enable any software counters - */ - for (i = n0; i < n0 + n; ++i) - cpuhw->counter[i]->hw.config = cpuhw->events[i]; - cpuctx->active_oncpu += n; - n = 1; - counter_sched_in(group_leader, cpu); - list_for_each_entry(sub, &group_leader->sibling_list, list_entry) { - if (sub->state != PERF_COUNTER_STATE_OFF) { - counter_sched_in(sub, cpu); - ++n; - } - } - ctx->nr_active += n; - - return 1; -} - -/* - * Add a counter to the PMU. - * If all counters are not already frozen, then we disable and - * re-enable the PMU in order to get hw_perf_enable to do the - * actual work of reconfiguring the PMU. - */ -static int power_pmu_enable(struct perf_counter *counter) -{ - struct cpu_hw_counters *cpuhw; - unsigned long flags; - int n0; - int ret = -EAGAIN; - - local_irq_save(flags); - perf_disable(); - - /* - * Add the counter to the list (if there is room) - * and check whether the total set is still feasible. - */ - cpuhw = &__get_cpu_var(cpu_hw_counters); - n0 = cpuhw->n_counters; - if (n0 >= ppmu->n_counter) - goto out; - cpuhw->counter[n0] = counter; - cpuhw->events[n0] = counter->hw.config; - cpuhw->flags[n0] = counter->hw.counter_base; - if (check_excludes(cpuhw->counter, cpuhw->flags, n0, 1)) - goto out; - if (power_check_constraints(cpuhw, cpuhw->events, cpuhw->flags, n0 + 1)) - goto out; - - counter->hw.config = cpuhw->events[n0]; - ++cpuhw->n_counters; - ++cpuhw->n_added; - - ret = 0; - out: - perf_enable(); - local_irq_restore(flags); - return ret; -} - -/* - * Remove a counter from the PMU. - */ -static void power_pmu_disable(struct perf_counter *counter) -{ - struct cpu_hw_counters *cpuhw; - long i; - unsigned long flags; - - local_irq_save(flags); - perf_disable(); - - power_pmu_read(counter); - - cpuhw = &__get_cpu_var(cpu_hw_counters); - for (i = 0; i < cpuhw->n_counters; ++i) { - if (counter == cpuhw->counter[i]) { - while (++i < cpuhw->n_counters) - cpuhw->counter[i-1] = cpuhw->counter[i]; - --cpuhw->n_counters; - ppmu->disable_pmc(counter->hw.idx - 1, cpuhw->mmcr); - if (counter->hw.idx) { - write_pmc(counter->hw.idx, 0); - counter->hw.idx = 0; - } - perf_counter_update_userpage(counter); - break; - } - } - for (i = 0; i < cpuhw->n_limited; ++i) - if (counter == cpuhw->limited_counter[i]) - break; - if (i < cpuhw->n_limited) { - while (++i < cpuhw->n_limited) { - cpuhw->limited_counter[i-1] = cpuhw->limited_counter[i]; - cpuhw->limited_hwidx[i-1] = cpuhw->limited_hwidx[i]; - } - --cpuhw->n_limited; - } - if (cpuhw->n_counters == 0) { - /* disable exceptions if no counters are running */ - cpuhw->mmcr[0] &= ~(MMCR0_PMXE | MMCR0_FCECE); - } - - perf_enable(); - local_irq_restore(flags); -} - -/* - * Re-enable interrupts on a counter after they were throttled - * because they were coming too fast. - */ -static void power_pmu_unthrottle(struct perf_counter *counter) -{ - s64 val, left; - unsigned long flags; - - if (!counter->hw.idx || !counter->hw.sample_period) - return; - local_irq_save(flags); - perf_disable(); - power_pmu_read(counter); - left = counter->hw.sample_period; - counter->hw.last_period = left; - val = 0; - if (left < 0x80000000L) - val = 0x80000000L - left; - write_pmc(counter->hw.idx, val); - atomic64_set(&counter->hw.prev_count, val); - atomic64_set(&counter->hw.period_left, left); - perf_counter_update_userpage(counter); - perf_enable(); - local_irq_restore(flags); -} - -struct pmu power_pmu = { - .enable = power_pmu_enable, - .disable = power_pmu_disable, - .read = power_pmu_read, - .unthrottle = power_pmu_unthrottle, -}; - -/* - * Return 1 if we might be able to put counter on a limited PMC, - * or 0 if not. - * A counter can only go on a limited PMC if it counts something - * that a limited PMC can count, doesn't require interrupts, and - * doesn't exclude any processor mode. - */ -static int can_go_on_limited_pmc(struct perf_counter *counter, u64 ev, - unsigned int flags) -{ - int n; - u64 alt[MAX_EVENT_ALTERNATIVES]; - - if (counter->attr.exclude_user - || counter->attr.exclude_kernel - || counter->attr.exclude_hv - || counter->attr.sample_period) - return 0; - - if (ppmu->limited_pmc_event(ev)) - return 1; - - /* - * The requested event isn't on a limited PMC already; - * see if any alternative code goes on a limited PMC. - */ - if (!ppmu->get_alternatives) - return 0; - - flags |= PPMU_LIMITED_PMC_OK | PPMU_LIMITED_PMC_REQD; - n = ppmu->get_alternatives(ev, flags, alt); - - return n > 0; -} - -/* - * Find an alternative event that goes on a normal PMC, if possible, - * and return the event code, or 0 if there is no such alternative. - * (Note: event code 0 is "don't count" on all machines.) - */ -static u64 normal_pmc_alternative(u64 ev, unsigned long flags) -{ - u64 alt[MAX_EVENT_ALTERNATIVES]; - int n; - - flags &= ~(PPMU_LIMITED_PMC_OK | PPMU_LIMITED_PMC_REQD); - n = ppmu->get_alternatives(ev, flags, alt); - if (!n) - return 0; - return alt[0]; -} - -/* Number of perf_counters counting hardware events */ -static atomic_t num_counters; -/* Used to avoid races in calling reserve/release_pmc_hardware */ -static DEFINE_MUTEX(pmc_reserve_mutex); - -/* - * Release the PMU if this is the last perf_counter. - */ -static void hw_perf_counter_destroy(struct perf_counter *counter) -{ - if (!atomic_add_unless(&num_counters, -1, 1)) { - mutex_lock(&pmc_reserve_mutex); - if (atomic_dec_return(&num_counters) == 0) - release_pmc_hardware(); - mutex_unlock(&pmc_reserve_mutex); - } -} - -/* - * Translate a generic cache event config to a raw event code. - */ -static int hw_perf_cache_event(u64 config, u64 *eventp) -{ - unsigned long type, op, result; - int ev; - - if (!ppmu->cache_events) - return -EINVAL; - - /* unpack config */ - type = config & 0xff; - op = (config >> 8) & 0xff; - result = (config >> 16) & 0xff; - - if (type >= PERF_COUNT_HW_CACHE_MAX || - op >= PERF_COUNT_HW_CACHE_OP_MAX || - result >= PERF_COUNT_HW_CACHE_RESULT_MAX) - return -EINVAL; - - ev = (*ppmu->cache_events)[type][op][result]; - if (ev == 0) - return -EOPNOTSUPP; - if (ev == -1) - return -EINVAL; - *eventp = ev; - return 0; -} - -const struct pmu *hw_perf_counter_init(struct perf_counter *counter) -{ - u64 ev; - unsigned long flags; - struct perf_counter *ctrs[MAX_HWCOUNTERS]; - u64 events[MAX_HWCOUNTERS]; - unsigned int cflags[MAX_HWCOUNTERS]; - int n; - int err; - struct cpu_hw_counters *cpuhw; - - if (!ppmu) - return ERR_PTR(-ENXIO); - switch (counter->attr.type) { - case PERF_TYPE_HARDWARE: - ev = counter->attr.config; - if (ev >= ppmu->n_generic || ppmu->generic_events[ev] == 0) - return ERR_PTR(-EOPNOTSUPP); - ev = ppmu->generic_events[ev]; - break; - case PERF_TYPE_HW_CACHE: - err = hw_perf_cache_event(counter->attr.config, &ev); - if (err) - return ERR_PTR(err); - break; - case PERF_TYPE_RAW: - ev = counter->attr.config; - break; - default: - return ERR_PTR(-EINVAL); - } - counter->hw.config_base = ev; - counter->hw.idx = 0; - - /* - * If we are not running on a hypervisor, force the - * exclude_hv bit to 0 so that we don't care what - * the user set it to. - */ - if (!firmware_has_feature(FW_FEATURE_LPAR)) - counter->attr.exclude_hv = 0; - - /* - * If this is a per-task counter, then we can use - * PM_RUN_* events interchangeably with their non RUN_* - * equivalents, e.g. PM_RUN_CYC instead of PM_CYC. - * XXX we should check if the task is an idle task. - */ - flags = 0; - if (counter->ctx->task) - flags |= PPMU_ONLY_COUNT_RUN; - - /* - * If this machine has limited counters, check whether this - * event could go on a limited counter. - */ - if (ppmu->flags & PPMU_LIMITED_PMC5_6) { - if (can_go_on_limited_pmc(counter, ev, flags)) { - flags |= PPMU_LIMITED_PMC_OK; - } else if (ppmu->limited_pmc_event(ev)) { - /* - * The requested event is on a limited PMC, - * but we can't use a limited PMC; see if any - * alternative goes on a normal PMC. - */ - ev = normal_pmc_alternative(ev, flags); - if (!ev) - return ERR_PTR(-EINVAL); - } - } - - /* - * If this is in a group, check if it can go on with all the - * other hardware counters in the group. We assume the counter - * hasn't been linked into its leader's sibling list at this point. - */ - n = 0; - if (counter->group_leader != counter) { - n = collect_events(counter->group_leader, ppmu->n_counter - 1, - ctrs, events, cflags); - if (n < 0) - return ERR_PTR(-EINVAL); - } - events[n] = ev; - ctrs[n] = counter; - cflags[n] = flags; - if (check_excludes(ctrs, cflags, n, 1)) - return ERR_PTR(-EINVAL); - - cpuhw = &get_cpu_var(cpu_hw_counters); - err = power_check_constraints(cpuhw, events, cflags, n + 1); - put_cpu_var(cpu_hw_counters); - if (err) - return ERR_PTR(-EINVAL); - - counter->hw.config = events[n]; - counter->hw.counter_base = cflags[n]; - counter->hw.last_period = counter->hw.sample_period; - atomic64_set(&counter->hw.period_left, counter->hw.last_period); - - /* - * See if we need to reserve the PMU. - * If no counters are currently in use, then we have to take a - * mutex to ensure that we don't race with another task doing - * reserve_pmc_hardware or release_pmc_hardware. - */ - err = 0; - if (!atomic_inc_not_zero(&num_counters)) { - mutex_lock(&pmc_reserve_mutex); - if (atomic_read(&num_counters) == 0 && - reserve_pmc_hardware(perf_counter_interrupt)) - err = -EBUSY; - else - atomic_inc(&num_counters); - mutex_unlock(&pmc_reserve_mutex); - } - counter->destroy = hw_perf_counter_destroy; - - if (err) - return ERR_PTR(err); - return &power_pmu; -} - -/* - * A counter has overflowed; update its count and record - * things if requested. Note that interrupts are hard-disabled - * here so there is no possibility of being interrupted. - */ -static void record_and_restart(struct perf_counter *counter, unsigned long val, - struct pt_regs *regs, int nmi) -{ - u64 period = counter->hw.sample_period; - s64 prev, delta, left; - int record = 0; - - /* we don't have to worry about interrupts here */ - prev = atomic64_read(&counter->hw.prev_count); - delta = (val - prev) & 0xfffffffful; - atomic64_add(delta, &counter->count); - - /* - * See if the total period for this counter has expired, - * and update for the next period. - */ - val = 0; - left = atomic64_read(&counter->hw.period_left) - delta; - if (period) { - if (left <= 0) { - left += period; - if (left <= 0) - left = period; - record = 1; - } - if (left < 0x80000000LL) - val = 0x80000000LL - left; - } - - /* - * Finally record data if requested. - */ - if (record) { - struct perf_sample_data data = { - .addr = 0, - .period = counter->hw.last_period, - }; - - if (counter->attr.sample_type & PERF_SAMPLE_ADDR) - perf_get_data_addr(regs, &data.addr); - - if (perf_counter_overflow(counter, nmi, &data, regs)) { - /* - * Interrupts are coming too fast - throttle them - * by setting the counter to 0, so it will be - * at least 2^30 cycles until the next interrupt - * (assuming each counter counts at most 2 counts - * per cycle). - */ - val = 0; - left = ~0ULL >> 1; - } - } - - write_pmc(counter->hw.idx, val); - atomic64_set(&counter->hw.prev_count, val); - atomic64_set(&counter->hw.period_left, left); - perf_counter_update_userpage(counter); -} - -/* - * Called from generic code to get the misc flags (i.e. processor mode) - * for an event. - */ -unsigned long perf_misc_flags(struct pt_regs *regs) -{ - u32 flags = perf_get_misc_flags(regs); - - if (flags) - return flags; - return user_mode(regs) ? PERF_EVENT_MISC_USER : - PERF_EVENT_MISC_KERNEL; -} - -/* - * Called from generic code to get the instruction pointer - * for an event. - */ -unsigned long perf_instruction_pointer(struct pt_regs *regs) -{ - unsigned long ip; - - if (TRAP(regs) != 0xf00) - return regs->nip; /* not a PMU interrupt */ - - ip = mfspr(SPRN_SIAR) + perf_ip_adjust(regs); - return ip; -} - -/* - * Performance monitor interrupt stuff - */ -static void perf_counter_interrupt(struct pt_regs *regs) -{ - int i; - struct cpu_hw_counters *cpuhw = &__get_cpu_var(cpu_hw_counters); - struct perf_counter *counter; - unsigned long val; - int found = 0; - int nmi; - - if (cpuhw->n_limited) - freeze_limited_counters(cpuhw, mfspr(SPRN_PMC5), - mfspr(SPRN_PMC6)); - - perf_read_regs(regs); - - nmi = perf_intr_is_nmi(regs); - if (nmi) - nmi_enter(); - else - irq_enter(); - - for (i = 0; i < cpuhw->n_counters; ++i) { - counter = cpuhw->counter[i]; - if (!counter->hw.idx || is_limited_pmc(counter->hw.idx)) - continue; - val = read_pmc(counter->hw.idx); - if ((int)val < 0) { - /* counter has overflowed */ - found = 1; - record_and_restart(counter, val, regs, nmi); - } - } - - /* - * In case we didn't find and reset the counter that caused - * the interrupt, scan all counters and reset any that are - * negative, to avoid getting continual interrupts. - * Any that we processed in the previous loop will not be negative. - */ - if (!found) { - for (i = 0; i < ppmu->n_counter; ++i) { - if (is_limited_pmc(i + 1)) - continue; - val = read_pmc(i + 1); - if ((int)val < 0) - write_pmc(i + 1, 0); - } - } - - /* - * Reset MMCR0 to its normal value. This will set PMXE and - * clear FC (freeze counters) and PMAO (perf mon alert occurred) - * and thus allow interrupts to occur again. - * XXX might want to use MSR.PM to keep the counters frozen until - * we get back out of this interrupt. - */ - write_mmcr0(cpuhw, cpuhw->mmcr[0]); - - if (nmi) - nmi_exit(); - else - irq_exit(); -} - -void hw_perf_counter_setup(int cpu) -{ - struct cpu_hw_counters *cpuhw = &per_cpu(cpu_hw_counters, cpu); - - if (!ppmu) - return; - memset(cpuhw, 0, sizeof(*cpuhw)); - cpuhw->mmcr[0] = MMCR0_FC; -} - -int register_power_pmu(struct power_pmu *pmu) -{ - if (ppmu) - return -EBUSY; /* something's already registered */ - - ppmu = pmu; - pr_info("%s performance monitor hardware support registered\n", - pmu->name); - -#ifdef MSR_HV - /* - * Use FCHV to ignore kernel events if MSR.HV is set. - */ - if (mfmsr() & MSR_HV) - freeze_counters_kernel = MMCR0_FCHV; -#endif /* CONFIG_PPC64 */ - - return 0; -} diff --git a/arch/powerpc/kernel/perf_event.c b/arch/powerpc/kernel/perf_event.c new file mode 100644 index 00000000000..c98321fcb45 --- /dev/null +++ b/arch/powerpc/kernel/perf_event.c @@ -0,0 +1,1315 @@ +/* + * Performance event support - powerpc architecture code + * + * Copyright 2008-2009 Paul Mackerras, IBM Corporation. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +struct cpu_hw_events { + int n_events; + int n_percpu; + int disabled; + int n_added; + int n_limited; + u8 pmcs_enabled; + struct perf_event *event[MAX_HWEVENTS]; + u64 events[MAX_HWEVENTS]; + unsigned int flags[MAX_HWEVENTS]; + unsigned long mmcr[3]; + struct perf_event *limited_event[MAX_LIMITED_HWEVENTS]; + u8 limited_hwidx[MAX_LIMITED_HWEVENTS]; + u64 alternatives[MAX_HWEVENTS][MAX_EVENT_ALTERNATIVES]; + unsigned long amasks[MAX_HWEVENTS][MAX_EVENT_ALTERNATIVES]; + unsigned long avalues[MAX_HWEVENTS][MAX_EVENT_ALTERNATIVES]; +}; +DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events); + +struct power_pmu *ppmu; + +/* + * Normally, to ignore kernel events we set the FCS (freeze events + * in supervisor mode) bit in MMCR0, but if the kernel runs with the + * hypervisor bit set in the MSR, or if we are running on a processor + * where the hypervisor bit is forced to 1 (as on Apple G5 processors), + * then we need to use the FCHV bit to ignore kernel events. + */ +static unsigned int freeze_events_kernel = MMCR0_FCS; + +/* + * 32-bit doesn't have MMCRA but does have an MMCR2, + * and a few other names are different. + */ +#ifdef CONFIG_PPC32 + +#define MMCR0_FCHV 0 +#define MMCR0_PMCjCE MMCR0_PMCnCE + +#define SPRN_MMCRA SPRN_MMCR2 +#define MMCRA_SAMPLE_ENABLE 0 + +static inline unsigned long perf_ip_adjust(struct pt_regs *regs) +{ + return 0; +} +static inline void perf_get_data_addr(struct pt_regs *regs, u64 *addrp) { } +static inline u32 perf_get_misc_flags(struct pt_regs *regs) +{ + return 0; +} +static inline void perf_read_regs(struct pt_regs *regs) { } +static inline int perf_intr_is_nmi(struct pt_regs *regs) +{ + return 0; +} + +#endif /* CONFIG_PPC32 */ + +/* + * Things that are specific to 64-bit implementations. + */ +#ifdef CONFIG_PPC64 + +static inline unsigned long perf_ip_adjust(struct pt_regs *regs) +{ + unsigned long mmcra = regs->dsisr; + + if ((mmcra & MMCRA_SAMPLE_ENABLE) && !(ppmu->flags & PPMU_ALT_SIPR)) { + unsigned long slot = (mmcra & MMCRA_SLOT) >> MMCRA_SLOT_SHIFT; + if (slot > 1) + return 4 * (slot - 1); + } + return 0; +} + +/* + * The user wants a data address recorded. + * If we're not doing instruction sampling, give them the SDAR + * (sampled data address). If we are doing instruction sampling, then + * only give them the SDAR if it corresponds to the instruction + * pointed to by SIAR; this is indicated by the [POWER6_]MMCRA_SDSYNC + * bit in MMCRA. + */ +static inline void perf_get_data_addr(struct pt_regs *regs, u64 *addrp) +{ + unsigned long mmcra = regs->dsisr; + unsigned long sdsync = (ppmu->flags & PPMU_ALT_SIPR) ? + POWER6_MMCRA_SDSYNC : MMCRA_SDSYNC; + + if (!(mmcra & MMCRA_SAMPLE_ENABLE) || (mmcra & sdsync)) + *addrp = mfspr(SPRN_SDAR); +} + +static inline u32 perf_get_misc_flags(struct pt_regs *regs) +{ + unsigned long mmcra = regs->dsisr; + + if (TRAP(regs) != 0xf00) + return 0; /* not a PMU interrupt */ + + if (ppmu->flags & PPMU_ALT_SIPR) { + if (mmcra & POWER6_MMCRA_SIHV) + return PERF_RECORD_MISC_HYPERVISOR; + return (mmcra & POWER6_MMCRA_SIPR) ? + PERF_RECORD_MISC_USER : PERF_RECORD_MISC_KERNEL; + } + if (mmcra & MMCRA_SIHV) + return PERF_RECORD_MISC_HYPERVISOR; + return (mmcra & MMCRA_SIPR) ? PERF_RECORD_MISC_USER : + PERF_RECORD_MISC_KERNEL; +} + +/* + * Overload regs->dsisr to store MMCRA so we only need to read it once + * on each interrupt. + */ +static inline void perf_read_regs(struct pt_regs *regs) +{ + regs->dsisr = mfspr(SPRN_MMCRA); +} + +/* + * If interrupts were soft-disabled when a PMU interrupt occurs, treat + * it as an NMI. + */ +static inline int perf_intr_is_nmi(struct pt_regs *regs) +{ + return !regs->softe; +} + +#endif /* CONFIG_PPC64 */ + +static void perf_event_interrupt(struct pt_regs *regs); + +void perf_event_print_debug(void) +{ +} + +/* + * Read one performance monitor event (PMC). + */ +static unsigned long read_pmc(int idx) +{ + unsigned long val; + + switch (idx) { + case 1: + val = mfspr(SPRN_PMC1); + break; + case 2: + val = mfspr(SPRN_PMC2); + break; + case 3: + val = mfspr(SPRN_PMC3); + break; + case 4: + val = mfspr(SPRN_PMC4); + break; + case 5: + val = mfspr(SPRN_PMC5); + break; + case 6: + val = mfspr(SPRN_PMC6); + break; +#ifdef CONFIG_PPC64 + case 7: + val = mfspr(SPRN_PMC7); + break; + case 8: + val = mfspr(SPRN_PMC8); + break; +#endif /* CONFIG_PPC64 */ + default: + printk(KERN_ERR "oops trying to read PMC%d\n", idx); + val = 0; + } + return val; +} + +/* + * Write one PMC. + */ +static void write_pmc(int idx, unsigned long val) +{ + switch (idx) { + case 1: + mtspr(SPRN_PMC1, val); + break; + case 2: + mtspr(SPRN_PMC2, val); + break; + case 3: + mtspr(SPRN_PMC3, val); + break; + case 4: + mtspr(SPRN_PMC4, val); + break; + case 5: + mtspr(SPRN_PMC5, val); + break; + case 6: + mtspr(SPRN_PMC6, val); + break; +#ifdef CONFIG_PPC64 + case 7: + mtspr(SPRN_PMC7, val); + break; + case 8: + mtspr(SPRN_PMC8, val); + break; +#endif /* CONFIG_PPC64 */ + default: + printk(KERN_ERR "oops trying to write PMC%d\n", idx); + } +} + +/* + * Check if a set of events can all go on the PMU at once. + * If they can't, this will look at alternative codes for the events + * and see if any combination of alternative codes is feasible. + * The feasible set is returned in event_id[]. + */ +static int power_check_constraints(struct cpu_hw_events *cpuhw, + u64 event_id[], unsigned int cflags[], + int n_ev) +{ + unsigned long mask, value, nv; + unsigned long smasks[MAX_HWEVENTS], svalues[MAX_HWEVENTS]; + int n_alt[MAX_HWEVENTS], choice[MAX_HWEVENTS]; + int i, j; + unsigned long addf = ppmu->add_fields; + unsigned long tadd = ppmu->test_adder; + + if (n_ev > ppmu->n_event) + return -1; + + /* First see if the events will go on as-is */ + for (i = 0; i < n_ev; ++i) { + if ((cflags[i] & PPMU_LIMITED_PMC_REQD) + && !ppmu->limited_pmc_event(event_id[i])) { + ppmu->get_alternatives(event_id[i], cflags[i], + cpuhw->alternatives[i]); + event_id[i] = cpuhw->alternatives[i][0]; + } + if (ppmu->get_constraint(event_id[i], &cpuhw->amasks[i][0], + &cpuhw->avalues[i][0])) + return -1; + } + value = mask = 0; + for (i = 0; i < n_ev; ++i) { + nv = (value | cpuhw->avalues[i][0]) + + (value & cpuhw->avalues[i][0] & addf); + if ((((nv + tadd) ^ value) & mask) != 0 || + (((nv + tadd) ^ cpuhw->avalues[i][0]) & + cpuhw->amasks[i][0]) != 0) + break; + value = nv; + mask |= cpuhw->amasks[i][0]; + } + if (i == n_ev) + return 0; /* all OK */ + + /* doesn't work, gather alternatives... */ + if (!ppmu->get_alternatives) + return -1; + for (i = 0; i < n_ev; ++i) { + choice[i] = 0; + n_alt[i] = ppmu->get_alternatives(event_id[i], cflags[i], + cpuhw->alternatives[i]); + for (j = 1; j < n_alt[i]; ++j) + ppmu->get_constraint(cpuhw->alternatives[i][j], + &cpuhw->amasks[i][j], + &cpuhw->avalues[i][j]); + } + + /* enumerate all possibilities and see if any will work */ + i = 0; + j = -1; + value = mask = nv = 0; + while (i < n_ev) { + if (j >= 0) { + /* we're backtracking, restore context */ + value = svalues[i]; + mask = smasks[i]; + j = choice[i]; + } + /* + * See if any alternative k for event_id i, + * where k > j, will satisfy the constraints. + */ + while (++j < n_alt[i]) { + nv = (value | cpuhw->avalues[i][j]) + + (value & cpuhw->avalues[i][j] & addf); + if ((((nv + tadd) ^ value) & mask) == 0 && + (((nv + tadd) ^ cpuhw->avalues[i][j]) + & cpuhw->amasks[i][j]) == 0) + break; + } + if (j >= n_alt[i]) { + /* + * No feasible alternative, backtrack + * to event_id i-1 and continue enumerating its + * alternatives from where we got up to. + */ + if (--i < 0) + return -1; + } else { + /* + * Found a feasible alternative for event_id i, + * remember where we got up to with this event_id, + * go on to the next event_id, and start with + * the first alternative for it. + */ + choice[i] = j; + svalues[i] = value; + smasks[i] = mask; + value = nv; + mask |= cpuhw->amasks[i][j]; + ++i; + j = -1; + } + } + + /* OK, we have a feasible combination, tell the caller the solution */ + for (i = 0; i < n_ev; ++i) + event_id[i] = cpuhw->alternatives[i][choice[i]]; + return 0; +} + +/* + * Check if newly-added events have consistent settings for + * exclude_{user,kernel,hv} with each other and any previously + * added events. + */ +static int check_excludes(struct perf_event **ctrs, unsigned int cflags[], + int n_prev, int n_new) +{ + int eu = 0, ek = 0, eh = 0; + int i, n, first; + struct perf_event *event; + + n = n_prev + n_new; + if (n <= 1) + return 0; + + first = 1; + for (i = 0; i < n; ++i) { + if (cflags[i] & PPMU_LIMITED_PMC_OK) { + cflags[i] &= ~PPMU_LIMITED_PMC_REQD; + continue; + } + event = ctrs[i]; + if (first) { + eu = event->attr.exclude_user; + ek = event->attr.exclude_kernel; + eh = event->attr.exclude_hv; + first = 0; + } else if (event->attr.exclude_user != eu || + event->attr.exclude_kernel != ek || + event->attr.exclude_hv != eh) { + return -EAGAIN; + } + } + + if (eu || ek || eh) + for (i = 0; i < n; ++i) + if (cflags[i] & PPMU_LIMITED_PMC_OK) + cflags[i] |= PPMU_LIMITED_PMC_REQD; + + return 0; +} + +static void power_pmu_read(struct perf_event *event) +{ + s64 val, delta, prev; + + if (!event->hw.idx) + return; + /* + * Performance monitor interrupts come even when interrupts + * are soft-disabled, as long as interrupts are hard-enabled. + * Therefore we treat them like NMIs. + */ + do { + prev = atomic64_read(&event->hw.prev_count); + barrier(); + val = read_pmc(event->hw.idx); + } while (atomic64_cmpxchg(&event->hw.prev_count, prev, val) != prev); + + /* The events are only 32 bits wide */ + delta = (val - prev) & 0xfffffffful; + atomic64_add(delta, &event->count); + atomic64_sub(delta, &event->hw.period_left); +} + +/* + * On some machines, PMC5 and PMC6 can't be written, don't respect + * the freeze conditions, and don't generate interrupts. This tells + * us if `event' is using such a PMC. + */ +static int is_limited_pmc(int pmcnum) +{ + return (ppmu->flags & PPMU_LIMITED_PMC5_6) + && (pmcnum == 5 || pmcnum == 6); +} + +static void freeze_limited_events(struct cpu_hw_events *cpuhw, + unsigned long pmc5, unsigned long pmc6) +{ + struct perf_event *event; + u64 val, prev, delta; + int i; + + for (i = 0; i < cpuhw->n_limited; ++i) { + event = cpuhw->limited_event[i]; + if (!event->hw.idx) + continue; + val = (event->hw.idx == 5) ? pmc5 : pmc6; + prev = atomic64_read(&event->hw.prev_count); + event->hw.idx = 0; + delta = (val - prev) & 0xfffffffful; + atomic64_add(delta, &event->count); + } +} + +static void thaw_limited_events(struct cpu_hw_events *cpuhw, + unsigned long pmc5, unsigned long pmc6) +{ + struct perf_event *event; + u64 val; + int i; + + for (i = 0; i < cpuhw->n_limited; ++i) { + event = cpuhw->limited_event[i]; + event->hw.idx = cpuhw->limited_hwidx[i]; + val = (event->hw.idx == 5) ? pmc5 : pmc6; + atomic64_set(&event->hw.prev_count, val); + perf_event_update_userpage(event); + } +} + +/* + * Since limited events don't respect the freeze conditions, we + * have to read them immediately after freezing or unfreezing the + * other events. We try to keep the values from the limited + * events as consistent as possible by keeping the delay (in + * cycles and instructions) between freezing/unfreezing and reading + * the limited events as small and consistent as possible. + * Therefore, if any limited events are in use, we read them + * both, and always in the same order, to minimize variability, + * and do it inside the same asm that writes MMCR0. + */ +static void write_mmcr0(struct cpu_hw_events *cpuhw, unsigned long mmcr0) +{ + unsigned long pmc5, pmc6; + + if (!cpuhw->n_limited) { + mtspr(SPRN_MMCR0, mmcr0); + return; + } + + /* + * Write MMCR0, then read PMC5 and PMC6 immediately. + * To ensure we don't get a performance monitor interrupt + * between writing MMCR0 and freezing/thawing the limited + * events, we first write MMCR0 with the event overflow + * interrupt enable bits turned off. + */ + asm volatile("mtspr %3,%2; mfspr %0,%4; mfspr %1,%5" + : "=&r" (pmc5), "=&r" (pmc6) + : "r" (mmcr0 & ~(MMCR0_PMC1CE | MMCR0_PMCjCE)), + "i" (SPRN_MMCR0), + "i" (SPRN_PMC5), "i" (SPRN_PMC6)); + + if (mmcr0 & MMCR0_FC) + freeze_limited_events(cpuhw, pmc5, pmc6); + else + thaw_limited_events(cpuhw, pmc5, pmc6); + + /* + * Write the full MMCR0 including the event overflow interrupt + * enable bits, if necessary. + */ + if (mmcr0 & (MMCR0_PMC1CE | MMCR0_PMCjCE)) + mtspr(SPRN_MMCR0, mmcr0); +} + +/* + * Disable all events to prevent PMU interrupts and to allow + * events to be added or removed. + */ +void hw_perf_disable(void) +{ + struct cpu_hw_events *cpuhw; + unsigned long flags; + + if (!ppmu) + return; + local_irq_save(flags); + cpuhw = &__get_cpu_var(cpu_hw_events); + + if (!cpuhw->disabled) { + cpuhw->disabled = 1; + cpuhw->n_added = 0; + + /* + * Check if we ever enabled the PMU on this cpu. + */ + if (!cpuhw->pmcs_enabled) { + ppc_enable_pmcs(); + cpuhw->pmcs_enabled = 1; + } + + /* + * Disable instruction sampling if it was enabled + */ + if (cpuhw->mmcr[2] & MMCRA_SAMPLE_ENABLE) { + mtspr(SPRN_MMCRA, + cpuhw->mmcr[2] & ~MMCRA_SAMPLE_ENABLE); + mb(); + } + + /* + * Set the 'freeze events' bit. + * The barrier is to make sure the mtspr has been + * executed and the PMU has frozen the events + * before we return. + */ + write_mmcr0(cpuhw, mfspr(SPRN_MMCR0) | MMCR0_FC); + mb(); + } + local_irq_restore(flags); +} + +/* + * Re-enable all events if disable == 0. + * If we were previously disabled and events were added, then + * put the new config on the PMU. + */ +void hw_perf_enable(void) +{ + struct perf_event *event; + struct cpu_hw_events *cpuhw; + unsigned long flags; + long i; + unsigned long val; + s64 left; + unsigned int hwc_index[MAX_HWEVENTS]; + int n_lim; + int idx; + + if (!ppmu) + return; + local_irq_save(flags); + cpuhw = &__get_cpu_var(cpu_hw_events); + if (!cpuhw->disabled) { + local_irq_restore(flags); + return; + } + cpuhw->disabled = 0; + + /* + * If we didn't change anything, or only removed events, + * no need to recalculate MMCR* settings and reset the PMCs. + * Just reenable the PMU with the current MMCR* settings + * (possibly updated for removal of events). + */ + if (!cpuhw->n_added) { + mtspr(SPRN_MMCRA, cpuhw->mmcr[2] & ~MMCRA_SAMPLE_ENABLE); + mtspr(SPRN_MMCR1, cpuhw->mmcr[1]); + if (cpuhw->n_events == 0) + ppc_set_pmu_inuse(0); + goto out_enable; + } + + /* + * Compute MMCR* values for the new set of events + */ + if (ppmu->compute_mmcr(cpuhw->events, cpuhw->n_events, hwc_index, + cpuhw->mmcr)) { + /* shouldn't ever get here */ + printk(KERN_ERR "oops compute_mmcr failed\n"); + goto out; + } + + /* + * Add in MMCR0 freeze bits corresponding to the + * attr.exclude_* bits for the first event. + * We have already checked that all events have the + * same values for these bits as the first event. + */ + event = cpuhw->event[0]; + if (event->attr.exclude_user) + cpuhw->mmcr[0] |= MMCR0_FCP; + if (event->attr.exclude_kernel) + cpuhw->mmcr[0] |= freeze_events_kernel; + if (event->attr.exclude_hv) + cpuhw->mmcr[0] |= MMCR0_FCHV; + + /* + * Write the new configuration to MMCR* with the freeze + * bit set and set the hardware events to their initial values. + * Then unfreeze the events. + */ + ppc_set_pmu_inuse(1); + mtspr(SPRN_MMCRA, cpuhw->mmcr[2] & ~MMCRA_SAMPLE_ENABLE); + mtspr(SPRN_MMCR1, cpuhw->mmcr[1]); + mtspr(SPRN_MMCR0, (cpuhw->mmcr[0] & ~(MMCR0_PMC1CE | MMCR0_PMCjCE)) + | MMCR0_FC); + + /* + * Read off any pre-existing events that need to move + * to another PMC. + */ + for (i = 0; i < cpuhw->n_events; ++i) { + event = cpuhw->event[i]; + if (event->hw.idx && event->hw.idx != hwc_index[i] + 1) { + power_pmu_read(event); + write_pmc(event->hw.idx, 0); + event->hw.idx = 0; + } + } + + /* + * Initialize the PMCs for all the new and moved events. + */ + cpuhw->n_limited = n_lim = 0; + for (i = 0; i < cpuhw->n_events; ++i) { + event = cpuhw->event[i]; + if (event->hw.idx) + continue; + idx = hwc_index[i] + 1; + if (is_limited_pmc(idx)) { + cpuhw->limited_event[n_lim] = event; + cpuhw->limited_hwidx[n_lim] = idx; + ++n_lim; + continue; + } + val = 0; + if (event->hw.sample_period) { + left = atomic64_read(&event->hw.period_left); + if (left < 0x80000000L) + val = 0x80000000L - left; + } + atomic64_set(&event->hw.prev_count, val); + event->hw.idx = idx; + write_pmc(idx, val); + perf_event_update_userpage(event); + } + cpuhw->n_limited = n_lim; + cpuhw->mmcr[0] |= MMCR0_PMXE | MMCR0_FCECE; + + out_enable: + mb(); + write_mmcr0(cpuhw, cpuhw->mmcr[0]); + + /* + * Enable instruction sampling if necessary + */ + if (cpuhw->mmcr[2] & MMCRA_SAMPLE_ENABLE) { + mb(); + mtspr(SPRN_MMCRA, cpuhw->mmcr[2]); + } + + out: + local_irq_restore(flags); +} + +static int collect_events(struct perf_event *group, int max_count, + struct perf_event *ctrs[], u64 *events, + unsigned int *flags) +{ + int n = 0; + struct perf_event *event; + + if (!is_software_event(group)) { + if (n >= max_count) + return -1; + ctrs[n] = group; + flags[n] = group->hw.event_base; + events[n++] = group->hw.config; + } + list_for_each_entry(event, &group->sibling_list, list_entry) { + if (!is_software_event(event) && + event->state != PERF_EVENT_STATE_OFF) { + if (n >= max_count) + return -1; + ctrs[n] = event; + flags[n] = event->hw.event_base; + events[n++] = event->hw.config; + } + } + return n; +} + +static void event_sched_in(struct perf_event *event, int cpu) +{ + event->state = PERF_EVENT_STATE_ACTIVE; + event->oncpu = cpu; + event->tstamp_running += event->ctx->time - event->tstamp_stopped; + if (is_software_event(event)) + event->pmu->enable(event); +} + +/* + * Called to enable a whole group of events. + * Returns 1 if the group was enabled, or -EAGAIN if it could not be. + * Assumes the caller has disabled interrupts and has + * frozen the PMU with hw_perf_save_disable. + */ +int hw_perf_group_sched_in(struct perf_event *group_leader, + struct perf_cpu_context *cpuctx, + struct perf_event_context *ctx, int cpu) +{ + struct cpu_hw_events *cpuhw; + long i, n, n0; + struct perf_event *sub; + + if (!ppmu) + return 0; + cpuhw = &__get_cpu_var(cpu_hw_events); + n0 = cpuhw->n_events; + n = collect_events(group_leader, ppmu->n_event - n0, + &cpuhw->event[n0], &cpuhw->events[n0], + &cpuhw->flags[n0]); + if (n < 0) + return -EAGAIN; + if (check_excludes(cpuhw->event, cpuhw->flags, n0, n)) + return -EAGAIN; + i = power_check_constraints(cpuhw, cpuhw->events, cpuhw->flags, n + n0); + if (i < 0) + return -EAGAIN; + cpuhw->n_events = n0 + n; + cpuhw->n_added += n; + + /* + * OK, this group can go on; update event states etc., + * and enable any software events + */ + for (i = n0; i < n0 + n; ++i) + cpuhw->event[i]->hw.config = cpuhw->events[i]; + cpuctx->active_oncpu += n; + n = 1; + event_sched_in(group_leader, cpu); + list_for_each_entry(sub, &group_leader->sibling_list, list_entry) { + if (sub->state != PERF_EVENT_STATE_OFF) { + event_sched_in(sub, cpu); + ++n; + } + } + ctx->nr_active += n; + + return 1; +} + +/* + * Add a event to the PMU. + * If all events are not already frozen, then we disable and + * re-enable the PMU in order to get hw_perf_enable to do the + * actual work of reconfiguring the PMU. + */ +static int power_pmu_enable(struct perf_event *event) +{ + struct cpu_hw_events *cpuhw; + unsigned long flags; + int n0; + int ret = -EAGAIN; + + local_irq_save(flags); + perf_disable(); + + /* + * Add the event to the list (if there is room) + * and check whether the total set is still feasible. + */ + cpuhw = &__get_cpu_var(cpu_hw_events); + n0 = cpuhw->n_events; + if (n0 >= ppmu->n_event) + goto out; + cpuhw->event[n0] = event; + cpuhw->events[n0] = event->hw.config; + cpuhw->flags[n0] = event->hw.event_base; + if (check_excludes(cpuhw->event, cpuhw->flags, n0, 1)) + goto out; + if (power_check_constraints(cpuhw, cpuhw->events, cpuhw->flags, n0 + 1)) + goto out; + + event->hw.config = cpuhw->events[n0]; + ++cpuhw->n_events; + ++cpuhw->n_added; + + ret = 0; + out: + perf_enable(); + local_irq_restore(flags); + return ret; +} + +/* + * Remove a event from the PMU. + */ +static void power_pmu_disable(struct perf_event *event) +{ + struct cpu_hw_events *cpuhw; + long i; + unsigned long flags; + + local_irq_save(flags); + perf_disable(); + + power_pmu_read(event); + + cpuhw = &__get_cpu_var(cpu_hw_events); + for (i = 0; i < cpuhw->n_events; ++i) { + if (event == cpuhw->event[i]) { + while (++i < cpuhw->n_events) + cpuhw->event[i-1] = cpuhw->event[i]; + --cpuhw->n_events; + ppmu->disable_pmc(event->hw.idx - 1, cpuhw->mmcr); + if (event->hw.idx) { + write_pmc(event->hw.idx, 0); + event->hw.idx = 0; + } + perf_event_update_userpage(event); + break; + } + } + for (i = 0; i < cpuhw->n_limited; ++i) + if (event == cpuhw->limited_event[i]) + break; + if (i < cpuhw->n_limited) { + while (++i < cpuhw->n_limited) { + cpuhw->limited_event[i-1] = cpuhw->limited_event[i]; + cpuhw->limited_hwidx[i-1] = cpuhw->limited_hwidx[i]; + } + --cpuhw->n_limited; + } + if (cpuhw->n_events == 0) { + /* disable exceptions if no events are running */ + cpuhw->mmcr[0] &= ~(MMCR0_PMXE | MMCR0_FCECE); + } + + perf_enable(); + local_irq_restore(flags); +} + +/* + * Re-enable interrupts on a event after they were throttled + * because they were coming too fast. + */ +static void power_pmu_unthrottle(struct perf_event *event) +{ + s64 val, left; + unsigned long flags; + + if (!event->hw.idx || !event->hw.sample_period) + return; + local_irq_save(flags); + perf_disable(); + power_pmu_read(event); + left = event->hw.sample_period; + event->hw.last_period = left; + val = 0; + if (left < 0x80000000L) + val = 0x80000000L - left; + write_pmc(event->hw.idx, val); + atomic64_set(&event->hw.prev_count, val); + atomic64_set(&event->hw.period_left, left); + perf_event_update_userpage(event); + perf_enable(); + local_irq_restore(flags); +} + +struct pmu power_pmu = { + .enable = power_pmu_enable, + .disable = power_pmu_disable, + .read = power_pmu_read, + .unthrottle = power_pmu_unthrottle, +}; + +/* + * Return 1 if we might be able to put event on a limited PMC, + * or 0 if not. + * A event can only go on a limited PMC if it counts something + * that a limited PMC can count, doesn't require interrupts, and + * doesn't exclude any processor mode. + */ +static int can_go_on_limited_pmc(struct perf_event *event, u64 ev, + unsigned int flags) +{ + int n; + u64 alt[MAX_EVENT_ALTERNATIVES]; + + if (event->attr.exclude_user + || event->attr.exclude_kernel + || event->attr.exclude_hv + || event->attr.sample_period) + return 0; + + if (ppmu->limited_pmc_event(ev)) + return 1; + + /* + * The requested event_id isn't on a limited PMC already; + * see if any alternative code goes on a limited PMC. + */ + if (!ppmu->get_alternatives) + return 0; + + flags |= PPMU_LIMITED_PMC_OK | PPMU_LIMITED_PMC_REQD; + n = ppmu->get_alternatives(ev, flags, alt); + + return n > 0; +} + +/* + * Find an alternative event_id that goes on a normal PMC, if possible, + * and return the event_id code, or 0 if there is no such alternative. + * (Note: event_id code 0 is "don't count" on all machines.) + */ +static u64 normal_pmc_alternative(u64 ev, unsigned long flags) +{ + u64 alt[MAX_EVENT_ALTERNATIVES]; + int n; + + flags &= ~(PPMU_LIMITED_PMC_OK | PPMU_LIMITED_PMC_REQD); + n = ppmu->get_alternatives(ev, flags, alt); + if (!n) + return 0; + return alt[0]; +} + +/* Number of perf_events counting hardware events */ +static atomic_t num_events; +/* Used to avoid races in calling reserve/release_pmc_hardware */ +static DEFINE_MUTEX(pmc_reserve_mutex); + +/* + * Release the PMU if this is the last perf_event. + */ +static void hw_perf_event_destroy(struct perf_event *event) +{ + if (!atomic_add_unless(&num_events, -1, 1)) { + mutex_lock(&pmc_reserve_mutex); + if (atomic_dec_return(&num_events) == 0) + release_pmc_hardware(); + mutex_unlock(&pmc_reserve_mutex); + } +} + +/* + * Translate a generic cache event_id config to a raw event_id code. + */ +static int hw_perf_cache_event(u64 config, u64 *eventp) +{ + unsigned long type, op, result; + int ev; + + if (!ppmu->cache_events) + return -EINVAL; + + /* unpack config */ + type = config & 0xff; + op = (config >> 8) & 0xff; + result = (config >> 16) & 0xff; + + if (type >= PERF_COUNT_HW_CACHE_MAX || + op >= PERF_COUNT_HW_CACHE_OP_MAX || + result >= PERF_COUNT_HW_CACHE_RESULT_MAX) + return -EINVAL; + + ev = (*ppmu->cache_events)[type][op][result]; + if (ev == 0) + return -EOPNOTSUPP; + if (ev == -1) + return -EINVAL; + *eventp = ev; + return 0; +} + +const struct pmu *hw_perf_event_init(struct perf_event *event) +{ + u64 ev; + unsigned long flags; + struct perf_event *ctrs[MAX_HWEVENTS]; + u64 events[MAX_HWEVENTS]; + unsigned int cflags[MAX_HWEVENTS]; + int n; + int err; + struct cpu_hw_events *cpuhw; + + if (!ppmu) + return ERR_PTR(-ENXIO); + switch (event->attr.type) { + case PERF_TYPE_HARDWARE: + ev = event->attr.config; + if (ev >= ppmu->n_generic || ppmu->generic_events[ev] == 0) + return ERR_PTR(-EOPNOTSUPP); + ev = ppmu->generic_events[ev]; + break; + case PERF_TYPE_HW_CACHE: + err = hw_perf_cache_event(event->attr.config, &ev); + if (err) + return ERR_PTR(err); + break; + case PERF_TYPE_RAW: + ev = event->attr.config; + break; + default: + return ERR_PTR(-EINVAL); + } + event->hw.config_base = ev; + event->hw.idx = 0; + + /* + * If we are not running on a hypervisor, force the + * exclude_hv bit to 0 so that we don't care what + * the user set it to. + */ + if (!firmware_has_feature(FW_FEATURE_LPAR)) + event->attr.exclude_hv = 0; + + /* + * If this is a per-task event, then we can use + * PM_RUN_* events interchangeably with their non RUN_* + * equivalents, e.g. PM_RUN_CYC instead of PM_CYC. + * XXX we should check if the task is an idle task. + */ + flags = 0; + if (event->ctx->task) + flags |= PPMU_ONLY_COUNT_RUN; + + /* + * If this machine has limited events, check whether this + * event_id could go on a limited event. + */ + if (ppmu->flags & PPMU_LIMITED_PMC5_6) { + if (can_go_on_limited_pmc(event, ev, flags)) { + flags |= PPMU_LIMITED_PMC_OK; + } else if (ppmu->limited_pmc_event(ev)) { + /* + * The requested event_id is on a limited PMC, + * but we can't use a limited PMC; see if any + * alternative goes on a normal PMC. + */ + ev = normal_pmc_alternative(ev, flags); + if (!ev) + return ERR_PTR(-EINVAL); + } + } + + /* + * If this is in a group, check if it can go on with all the + * other hardware events in the group. We assume the event + * hasn't been linked into its leader's sibling list at this point. + */ + n = 0; + if (event->group_leader != event) { + n = collect_events(event->group_leader, ppmu->n_event - 1, + ctrs, events, cflags); + if (n < 0) + return ERR_PTR(-EINVAL); + } + events[n] = ev; + ctrs[n] = event; + cflags[n] = flags; + if (check_excludes(ctrs, cflags, n, 1)) + return ERR_PTR(-EINVAL); + + cpuhw = &get_cpu_var(cpu_hw_events); + err = power_check_constraints(cpuhw, events, cflags, n + 1); + put_cpu_var(cpu_hw_events); + if (err) + return ERR_PTR(-EINVAL); + + event->hw.config = events[n]; + event->hw.event_base = cflags[n]; + event->hw.last_period = event->hw.sample_period; + atomic64_set(&event->hw.period_left, event->hw.last_period); + + /* + * See if we need to reserve the PMU. + * If no events are currently in use, then we have to take a + * mutex to ensure that we don't race with another task doing + * reserve_pmc_hardware or release_pmc_hardware. + */ + err = 0; + if (!atomic_inc_not_zero(&num_events)) { + mutex_lock(&pmc_reserve_mutex); + if (atomic_read(&num_events) == 0 && + reserve_pmc_hardware(perf_event_interrupt)) + err = -EBUSY; + else + atomic_inc(&num_events); + mutex_unlock(&pmc_reserve_mutex); + } + event->destroy = hw_perf_event_destroy; + + if (err) + return ERR_PTR(err); + return &power_pmu; +} + +/* + * A event has overflowed; update its count and record + * things if requested. Note that interrupts are hard-disabled + * here so there is no possibility of being interrupted. + */ +static void record_and_restart(struct perf_event *event, unsigned long val, + struct pt_regs *regs, int nmi) +{ + u64 period = event->hw.sample_period; + s64 prev, delta, left; + int record = 0; + + /* we don't have to worry about interrupts here */ + prev = atomic64_read(&event->hw.prev_count); + delta = (val - prev) & 0xfffffffful; + atomic64_add(delta, &event->count); + + /* + * See if the total period for this event has expired, + * and update for the next period. + */ + val = 0; + left = atomic64_read(&event->hw.period_left) - delta; + if (period) { + if (left <= 0) { + left += period; + if (left <= 0) + left = period; + record = 1; + } + if (left < 0x80000000LL) + val = 0x80000000LL - left; + } + + /* + * Finally record data if requested. + */ + if (record) { + struct perf_sample_data data = { + .addr = 0, + .period = event->hw.last_period, + }; + + if (event->attr.sample_type & PERF_SAMPLE_ADDR) + perf_get_data_addr(regs, &data.addr); + + if (perf_event_overflow(event, nmi, &data, regs)) { + /* + * Interrupts are coming too fast - throttle them + * by setting the event to 0, so it will be + * at least 2^30 cycles until the next interrupt + * (assuming each event counts at most 2 counts + * per cycle). + */ + val = 0; + left = ~0ULL >> 1; + } + } + + write_pmc(event->hw.idx, val); + atomic64_set(&event->hw.prev_count, val); + atomic64_set(&event->hw.period_left, left); + perf_event_update_userpage(event); +} + +/* + * Called from generic code to get the misc flags (i.e. processor mode) + * for an event_id. + */ +unsigned long perf_misc_flags(struct pt_regs *regs) +{ + u32 flags = perf_get_misc_flags(regs); + + if (flags) + return flags; + return user_mode(regs) ? PERF_RECORD_MISC_USER : + PERF_RECORD_MISC_KERNEL; +} + +/* + * Called from generic code to get the instruction pointer + * for an event_id. + */ +unsigned long perf_instruction_pointer(struct pt_regs *regs) +{ + unsigned long ip; + + if (TRAP(regs) != 0xf00) + return regs->nip; /* not a PMU interrupt */ + + ip = mfspr(SPRN_SIAR) + perf_ip_adjust(regs); + return ip; +} + +/* + * Performance monitor interrupt stuff + */ +static void perf_event_interrupt(struct pt_regs *regs) +{ + int i; + struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events); + struct perf_event *event; + unsigned long val; + int found = 0; + int nmi; + + if (cpuhw->n_limited) + freeze_limited_events(cpuhw, mfspr(SPRN_PMC5), + mfspr(SPRN_PMC6)); + + perf_read_regs(regs); + + nmi = perf_intr_is_nmi(regs); + if (nmi) + nmi_enter(); + else + irq_enter(); + + for (i = 0; i < cpuhw->n_events; ++i) { + event = cpuhw->event[i]; + if (!event->hw.idx || is_limited_pmc(event->hw.idx)) + continue; + val = read_pmc(event->hw.idx); + if ((int)val < 0) { + /* event has overflowed */ + found = 1; + record_and_restart(event, val, regs, nmi); + } + } + + /* + * In case we didn't find and reset the event that caused + * the interrupt, scan all events and reset any that are + * negative, to avoid getting continual interrupts. + * Any that we processed in the previous loop will not be negative. + */ + if (!found) { + for (i = 0; i < ppmu->n_event; ++i) { + if (is_limited_pmc(i + 1)) + continue; + val = read_pmc(i + 1); + if ((int)val < 0) + write_pmc(i + 1, 0); + } + } + + /* + * Reset MMCR0 to its normal value. This will set PMXE and + * clear FC (freeze events) and PMAO (perf mon alert occurred) + * and thus allow interrupts to occur again. + * XXX might want to use MSR.PM to keep the events frozen until + * we get back out of this interrupt. + */ + write_mmcr0(cpuhw, cpuhw->mmcr[0]); + + if (nmi) + nmi_exit(); + else + irq_exit(); +} + +void hw_perf_event_setup(int cpu) +{ + struct cpu_hw_events *cpuhw = &per_cpu(cpu_hw_events, cpu); + + if (!ppmu) + return; + memset(cpuhw, 0, sizeof(*cpuhw)); + cpuhw->mmcr[0] = MMCR0_FC; +} + +int register_power_pmu(struct power_pmu *pmu) +{ + if (ppmu) + return -EBUSY; /* something's already registered */ + + ppmu = pmu; + pr_info("%s performance monitor hardware support registered\n", + pmu->name); + +#ifdef MSR_HV + /* + * Use FCHV to ignore kernel events if MSR.HV is set. + */ + if (mfmsr() & MSR_HV) + freeze_events_kernel = MMCR0_FCHV; +#endif /* CONFIG_PPC64 */ + + return 0; +} diff --git a/arch/powerpc/kernel/power4-pmu.c b/arch/powerpc/kernel/power4-pmu.c index 3c90a3d9173..2a361cdda63 100644 --- a/arch/powerpc/kernel/power4-pmu.c +++ b/arch/powerpc/kernel/power4-pmu.c @@ -9,7 +9,7 @@ * 2 of the License, or (at your option) any later version. */ #include -#include +#include #include #include #include diff --git a/arch/powerpc/kernel/power5+-pmu.c b/arch/powerpc/kernel/power5+-pmu.c index 31918af3e35..0f4c1c73a6a 100644 --- a/arch/powerpc/kernel/power5+-pmu.c +++ b/arch/powerpc/kernel/power5+-pmu.c @@ -9,7 +9,7 @@ * 2 of the License, or (at your option) any later version. */ #include -#include +#include #include #include #include diff --git a/arch/powerpc/kernel/power5-pmu.c b/arch/powerpc/kernel/power5-pmu.c index 867f6f66396..c351b3a57fb 100644 --- a/arch/powerpc/kernel/power5-pmu.c +++ b/arch/powerpc/kernel/power5-pmu.c @@ -9,7 +9,7 @@ * 2 of the License, or (at your option) any later version. */ #include -#include +#include #include #include #include diff --git a/arch/powerpc/kernel/power6-pmu.c b/arch/powerpc/kernel/power6-pmu.c index fa21890531d..ca399ba5034 100644 --- a/arch/powerpc/kernel/power6-pmu.c +++ b/arch/powerpc/kernel/power6-pmu.c @@ -9,7 +9,7 @@ * 2 of the License, or (at your option) any later version. */ #include -#include +#include #include #include #include diff --git a/arch/powerpc/kernel/power7-pmu.c b/arch/powerpc/kernel/power7-pmu.c index 018d094d92f..28a4daacdc0 100644 --- a/arch/powerpc/kernel/power7-pmu.c +++ b/arch/powerpc/kernel/power7-pmu.c @@ -9,7 +9,7 @@ * 2 of the License, or (at your option) any later version. */ #include -#include +#include #include #include #include diff --git a/arch/powerpc/kernel/ppc970-pmu.c b/arch/powerpc/kernel/ppc970-pmu.c index 75dccb71a04..479574413a9 100644 --- a/arch/powerpc/kernel/ppc970-pmu.c +++ b/arch/powerpc/kernel/ppc970-pmu.c @@ -9,7 +9,7 @@ * 2 of the License, or (at your option) any later version. */ #include -#include +#include #include #include diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c index 465e498bcb3..df45a7449a6 100644 --- a/arch/powerpc/kernel/time.c +++ b/arch/powerpc/kernel/time.c @@ -53,7 +53,7 @@ #include #include #include -#include +#include #include #include @@ -527,25 +527,25 @@ void __init iSeries_time_init_early(void) } #endif /* CONFIG_PPC_ISERIES */ -#if defined(CONFIG_PERF_COUNTERS) && defined(CONFIG_PPC32) -DEFINE_PER_CPU(u8, perf_counter_pending); +#if defined(CONFIG_PERF_EVENTS) && defined(CONFIG_PPC32) +DEFINE_PER_CPU(u8, perf_event_pending); -void set_perf_counter_pending(void) +void set_perf_event_pending(void) { - get_cpu_var(perf_counter_pending) = 1; + get_cpu_var(perf_event_pending) = 1; set_dec(1); - put_cpu_var(perf_counter_pending); + put_cpu_var(perf_event_pending); } -#define test_perf_counter_pending() __get_cpu_var(perf_counter_pending) -#define clear_perf_counter_pending() __get_cpu_var(perf_counter_pending) = 0 +#define test_perf_event_pending() __get_cpu_var(perf_event_pending) +#define clear_perf_event_pending() __get_cpu_var(perf_event_pending) = 0 -#else /* CONFIG_PERF_COUNTERS && CONFIG_PPC32 */ +#else /* CONFIG_PERF_EVENTS && CONFIG_PPC32 */ -#define test_perf_counter_pending() 0 -#define clear_perf_counter_pending() +#define test_perf_event_pending() 0 +#define clear_perf_event_pending() -#endif /* CONFIG_PERF_COUNTERS && CONFIG_PPC32 */ +#endif /* CONFIG_PERF_EVENTS && CONFIG_PPC32 */ /* * For iSeries shared processors, we have to let the hypervisor @@ -573,9 +573,9 @@ void timer_interrupt(struct pt_regs * regs) set_dec(DECREMENTER_MAX); #ifdef CONFIG_PPC32 - if (test_perf_counter_pending()) { - clear_perf_counter_pending(); - perf_counter_do_pending(); + if (test_perf_event_pending()) { + clear_perf_event_pending(); + perf_event_do_pending(); } if (atomic_read(&ppc_n_lost_interrupts) != 0) do_IRQ(regs); diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c index 830bef0a113..e7dae82c128 100644 --- a/arch/powerpc/mm/fault.c +++ b/arch/powerpc/mm/fault.c @@ -29,7 +29,7 @@ #include #include #include -#include +#include #include #include @@ -171,7 +171,7 @@ int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address, die("Weird page fault", regs, SIGSEGV); } - perf_swcounter_event(PERF_COUNT_SW_PAGE_FAULTS, 1, 0, regs, address); + perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, 0, regs, address); /* When running in the kernel we expect faults to occur only to * addresses in user space. All other faults represent errors in the @@ -312,7 +312,7 @@ good_area: } if (ret & VM_FAULT_MAJOR) { current->maj_flt++; - perf_swcounter_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, 0, + perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, 0, regs, address); #ifdef CONFIG_PPC_SMLPAR if (firmware_has_feature(FW_FEATURE_CMO)) { @@ -323,7 +323,7 @@ good_area: #endif } else { current->min_flt++; - perf_swcounter_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, 0, + perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, 0, regs, address); } up_read(&mm->mmap_sem); diff --git a/arch/powerpc/platforms/Kconfig.cputype b/arch/powerpc/platforms/Kconfig.cputype index 9efc8bda01b..e382cae678b 100644 --- a/arch/powerpc/platforms/Kconfig.cputype +++ b/arch/powerpc/platforms/Kconfig.cputype @@ -280,9 +280,9 @@ config PPC_HAVE_PMU_SUPPORT config PPC_PERF_CTRS def_bool y - depends on PERF_COUNTERS && PPC_HAVE_PMU_SUPPORT + depends on PERF_EVENTS && PPC_HAVE_PMU_SUPPORT help - This enables the powerpc-specific perf_counter back-end. + This enables the powerpc-specific perf_event back-end. config SMP depends on PPC_BOOK3S || PPC_BOOK3E || FSL_BOOKE diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig index 1c866efd217..43c0acad716 100644 --- a/arch/s390/Kconfig +++ b/arch/s390/Kconfig @@ -94,7 +94,7 @@ config S390 select HAVE_KVM if 64BIT select HAVE_ARCH_TRACEHOOK select INIT_ALL_POSSIBLE - select HAVE_PERF_COUNTERS + select HAVE_PERF_EVENTS config SCHED_OMIT_FRAME_POINTER bool diff --git a/arch/s390/include/asm/perf_counter.h b/arch/s390/include/asm/perf_counter.h deleted file mode 100644 index 7015188c2cc..00000000000 --- a/arch/s390/include/asm/perf_counter.h +++ /dev/null @@ -1,10 +0,0 @@ -/* - * Performance counter support - s390 specific definitions. - * - * Copyright 2009 Martin Schwidefsky, IBM Corporation. - */ - -static inline void set_perf_counter_pending(void) {} -static inline void clear_perf_counter_pending(void) {} - -#define PERF_COUNTER_INDEX_OFFSET 0 diff --git a/arch/s390/include/asm/perf_event.h b/arch/s390/include/asm/perf_event.h new file mode 100644 index 00000000000..3840cbe7763 --- /dev/null +++ b/arch/s390/include/asm/perf_event.h @@ -0,0 +1,10 @@ +/* + * Performance event support - s390 specific definitions. + * + * Copyright 2009 Martin Schwidefsky, IBM Corporation. + */ + +static inline void set_perf_event_pending(void) {} +static inline void clear_perf_event_pending(void) {} + +#define PERF_EVENT_INDEX_OFFSET 0 diff --git a/arch/s390/include/asm/unistd.h b/arch/s390/include/asm/unistd.h index c80602d7c88..cb5232df151 100644 --- a/arch/s390/include/asm/unistd.h +++ b/arch/s390/include/asm/unistd.h @@ -268,7 +268,7 @@ #define __NR_preadv 328 #define __NR_pwritev 329 #define __NR_rt_tgsigqueueinfo 330 -#define __NR_perf_counter_open 331 +#define __NR_perf_event_open 331 #define NR_syscalls 332 /* diff --git a/arch/s390/kernel/compat_wrapper.S b/arch/s390/kernel/compat_wrapper.S index 88a83366819..624790042d4 100644 --- a/arch/s390/kernel/compat_wrapper.S +++ b/arch/s390/kernel/compat_wrapper.S @@ -1832,11 +1832,11 @@ compat_sys_rt_tgsigqueueinfo_wrapper: llgtr %r5,%r5 # struct compat_siginfo * jg compat_sys_rt_tgsigqueueinfo_wrapper # branch to system call - .globl sys_perf_counter_open_wrapper -sys_perf_counter_open_wrapper: - llgtr %r2,%r2 # const struct perf_counter_attr * + .globl sys_perf_event_open_wrapper +sys_perf_event_open_wrapper: + llgtr %r2,%r2 # const struct perf_event_attr * lgfr %r3,%r3 # pid_t lgfr %r4,%r4 # int lgfr %r5,%r5 # int llgfr %r6,%r6 # unsigned long - jg sys_perf_counter_open # branch to system call + jg sys_perf_event_open # branch to system call diff --git a/arch/s390/kernel/syscalls.S b/arch/s390/kernel/syscalls.S index ad1acd20038..0b5083681e7 100644 --- a/arch/s390/kernel/syscalls.S +++ b/arch/s390/kernel/syscalls.S @@ -339,4 +339,4 @@ SYSCALL(sys_epoll_create1,sys_epoll_create1,sys_epoll_create1_wrapper) SYSCALL(sys_preadv,sys_preadv,compat_sys_preadv_wrapper) SYSCALL(sys_pwritev,sys_pwritev,compat_sys_pwritev_wrapper) SYSCALL(sys_rt_tgsigqueueinfo,sys_rt_tgsigqueueinfo,compat_sys_rt_tgsigqueueinfo_wrapper) /* 330 */ -SYSCALL(sys_perf_counter_open,sys_perf_counter_open,sys_perf_counter_open_wrapper) +SYSCALL(sys_perf_event_open,sys_perf_event_open,sys_perf_event_open_wrapper) diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c index 1abbadd497e..6d507462967 100644 --- a/arch/s390/mm/fault.c +++ b/arch/s390/mm/fault.c @@ -10,7 +10,7 @@ * Copyright (C) 1995 Linus Torvalds */ -#include +#include #include #include #include @@ -306,7 +306,7 @@ do_exception(struct pt_regs *regs, unsigned long error_code, int write) * interrupts again and then search the VMAs */ local_irq_enable(); - perf_swcounter_event(PERF_COUNT_SW_PAGE_FAULTS, 1, 0, regs, address); + perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, 0, regs, address); down_read(&mm->mmap_sem); si_code = SEGV_MAPERR; @@ -366,11 +366,11 @@ good_area: } if (fault & VM_FAULT_MAJOR) { tsk->maj_flt++; - perf_swcounter_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, 0, + perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, 0, regs, address); } else { tsk->min_flt++; - perf_swcounter_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, 0, + perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, 0, regs, address); } up_read(&mm->mmap_sem); diff --git a/arch/sh/Kconfig b/arch/sh/Kconfig index 4df3570fe51..b940424f8cc 100644 --- a/arch/sh/Kconfig +++ b/arch/sh/Kconfig @@ -16,7 +16,7 @@ config SUPERH select HAVE_IOREMAP_PROT if MMU select HAVE_ARCH_TRACEHOOK select HAVE_DMA_API_DEBUG - select HAVE_PERF_COUNTERS + select HAVE_PERF_EVENTS select HAVE_KERNEL_GZIP select HAVE_KERNEL_BZIP2 select HAVE_KERNEL_LZMA diff --git a/arch/sh/include/asm/perf_counter.h b/arch/sh/include/asm/perf_counter.h deleted file mode 100644 index d8e6bb9c0cc..00000000000 --- a/arch/sh/include/asm/perf_counter.h +++ /dev/null @@ -1,9 +0,0 @@ -#ifndef __ASM_SH_PERF_COUNTER_H -#define __ASM_SH_PERF_COUNTER_H - -/* SH only supports software counters through this interface. */ -static inline void set_perf_counter_pending(void) {} - -#define PERF_COUNTER_INDEX_OFFSET 0 - -#endif /* __ASM_SH_PERF_COUNTER_H */ diff --git a/arch/sh/include/asm/perf_event.h b/arch/sh/include/asm/perf_event.h new file mode 100644 index 00000000000..11a302297ab --- /dev/null +++ b/arch/sh/include/asm/perf_event.h @@ -0,0 +1,9 @@ +#ifndef __ASM_SH_PERF_EVENT_H +#define __ASM_SH_PERF_EVENT_H + +/* SH only supports software events through this interface. */ +static inline void set_perf_event_pending(void) {} + +#define PERF_EVENT_INDEX_OFFSET 0 + +#endif /* __ASM_SH_PERF_EVENT_H */ diff --git a/arch/sh/include/asm/unistd_32.h b/arch/sh/include/asm/unistd_32.h index 925dd40d9d5..f3fd1b9eb6b 100644 --- a/arch/sh/include/asm/unistd_32.h +++ b/arch/sh/include/asm/unistd_32.h @@ -344,7 +344,7 @@ #define __NR_preadv 333 #define __NR_pwritev 334 #define __NR_rt_tgsigqueueinfo 335 -#define __NR_perf_counter_open 336 +#define __NR_perf_event_open 336 #define NR_syscalls 337 diff --git a/arch/sh/include/asm/unistd_64.h b/arch/sh/include/asm/unistd_64.h index 2b84bc916bc..343ce8f073e 100644 --- a/arch/sh/include/asm/unistd_64.h +++ b/arch/sh/include/asm/unistd_64.h @@ -384,7 +384,7 @@ #define __NR_preadv 361 #define __NR_pwritev 362 #define __NR_rt_tgsigqueueinfo 363 -#define __NR_perf_counter_open 364 +#define __NR_perf_event_open 364 #ifdef __KERNEL__ diff --git a/arch/sh/kernel/syscalls_32.S b/arch/sh/kernel/syscalls_32.S index 16ba225ede8..19fd11dd987 100644 --- a/arch/sh/kernel/syscalls_32.S +++ b/arch/sh/kernel/syscalls_32.S @@ -352,4 +352,4 @@ ENTRY(sys_call_table) .long sys_preadv .long sys_pwritev .long sys_rt_tgsigqueueinfo /* 335 */ - .long sys_perf_counter_open + .long sys_perf_event_open diff --git a/arch/sh/kernel/syscalls_64.S b/arch/sh/kernel/syscalls_64.S index af6fb7410c2..5bfde6c7749 100644 --- a/arch/sh/kernel/syscalls_64.S +++ b/arch/sh/kernel/syscalls_64.S @@ -390,4 +390,4 @@ sys_call_table: .long sys_preadv .long sys_pwritev .long sys_rt_tgsigqueueinfo - .long sys_perf_counter_open + .long sys_perf_event_open diff --git a/arch/sh/mm/fault_32.c b/arch/sh/mm/fault_32.c index 781b413ff82..47530104e0a 100644 --- a/arch/sh/mm/fault_32.c +++ b/arch/sh/mm/fault_32.c @@ -15,7 +15,7 @@ #include #include #include -#include +#include #include #include #include @@ -157,7 +157,7 @@ asmlinkage void __kprobes do_page_fault(struct pt_regs *regs, if ((regs->sr & SR_IMASK) != SR_IMASK) local_irq_enable(); - perf_swcounter_event(PERF_COUNT_SW_PAGE_FAULTS, 1, 0, regs, address); + perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, 0, regs, address); /* * If we're in an interrupt, have no user context or are running @@ -208,11 +208,11 @@ survive: } if (fault & VM_FAULT_MAJOR) { tsk->maj_flt++; - perf_swcounter_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, 0, + perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, 0, regs, address); } else { tsk->min_flt++; - perf_swcounter_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, 0, + perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, 0, regs, address); } diff --git a/arch/sh/mm/tlbflush_64.c b/arch/sh/mm/tlbflush_64.c index 2dcc48528f7..de0b0e88182 100644 --- a/arch/sh/mm/tlbflush_64.c +++ b/arch/sh/mm/tlbflush_64.c @@ -20,7 +20,7 @@ #include #include #include -#include +#include #include #include #include @@ -116,7 +116,7 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long writeaccess, /* Not an IO address, so reenable interrupts */ local_irq_enable(); - perf_swcounter_event(PERF_COUNT_SW_PAGE_FAULTS, 1, 0, regs, address); + perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, 0, regs, address); /* * If we're in an interrupt or have no user @@ -201,11 +201,11 @@ survive: if (fault & VM_FAULT_MAJOR) { tsk->maj_flt++; - perf_swcounter_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, 0, + perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, 0, regs, address); } else { tsk->min_flt++; - perf_swcounter_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, 0, + perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, 0, regs, address); } diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig index 86b82348b97..97fca4695e0 100644 --- a/arch/sparc/Kconfig +++ b/arch/sparc/Kconfig @@ -25,7 +25,7 @@ config SPARC select ARCH_WANT_OPTIONAL_GPIOLIB select RTC_CLASS select RTC_DRV_M48T59 - select HAVE_PERF_COUNTERS + select HAVE_PERF_EVENTS select HAVE_DMA_ATTRS select HAVE_DMA_API_DEBUG @@ -47,7 +47,7 @@ config SPARC64 select RTC_DRV_BQ4802 select RTC_DRV_SUN4V select RTC_DRV_STARFIRE - select HAVE_PERF_COUNTERS + select HAVE_PERF_EVENTS config ARCH_DEFCONFIG string diff --git a/arch/sparc/include/asm/perf_counter.h b/arch/sparc/include/asm/perf_counter.h deleted file mode 100644 index 5d7a8ca0e49..00000000000 --- a/arch/sparc/include/asm/perf_counter.h +++ /dev/null @@ -1,14 +0,0 @@ -#ifndef __ASM_SPARC_PERF_COUNTER_H -#define __ASM_SPARC_PERF_COUNTER_H - -extern void set_perf_counter_pending(void); - -#define PERF_COUNTER_INDEX_OFFSET 0 - -#ifdef CONFIG_PERF_COUNTERS -extern void init_hw_perf_counters(void); -#else -static inline void init_hw_perf_counters(void) { } -#endif - -#endif diff --git a/arch/sparc/include/asm/perf_event.h b/arch/sparc/include/asm/perf_event.h new file mode 100644 index 00000000000..7e2669894ce --- /dev/null +++ b/arch/sparc/include/asm/perf_event.h @@ -0,0 +1,14 @@ +#ifndef __ASM_SPARC_PERF_EVENT_H +#define __ASM_SPARC_PERF_EVENT_H + +extern void set_perf_event_pending(void); + +#define PERF_EVENT_INDEX_OFFSET 0 + +#ifdef CONFIG_PERF_EVENTS +extern void init_hw_perf_events(void); +#else +static inline void init_hw_perf_events(void) { } +#endif + +#endif diff --git a/arch/sparc/include/asm/unistd.h b/arch/sparc/include/asm/unistd.h index 706df669f3b..42f2316c3ea 100644 --- a/arch/sparc/include/asm/unistd.h +++ b/arch/sparc/include/asm/unistd.h @@ -395,7 +395,7 @@ #define __NR_preadv 324 #define __NR_pwritev 325 #define __NR_rt_tgsigqueueinfo 326 -#define __NR_perf_counter_open 327 +#define __NR_perf_event_open 327 #define NR_SYSCALLS 328 diff --git a/arch/sparc/kernel/Makefile b/arch/sparc/kernel/Makefile index 247cc620cee..3a048fad7ee 100644 --- a/arch/sparc/kernel/Makefile +++ b/arch/sparc/kernel/Makefile @@ -104,5 +104,5 @@ obj-$(CONFIG_AUDIT) += audit.o audit--$(CONFIG_AUDIT) := compat_audit.o obj-$(CONFIG_COMPAT) += $(audit--y) -pc--$(CONFIG_PERF_COUNTERS) := perf_counter.o +pc--$(CONFIG_PERF_EVENTS) := perf_event.o obj-$(CONFIG_SPARC64) += $(pc--y) diff --git a/arch/sparc/kernel/nmi.c b/arch/sparc/kernel/nmi.c index 378eb53e077..b129611590a 100644 --- a/arch/sparc/kernel/nmi.c +++ b/arch/sparc/kernel/nmi.c @@ -19,7 +19,7 @@ #include #include -#include +#include #include #include #include @@ -265,7 +265,7 @@ int __init nmi_init(void) } } if (!err) - init_hw_perf_counters(); + init_hw_perf_events(); return err; } diff --git a/arch/sparc/kernel/pcr.c b/arch/sparc/kernel/pcr.c index 68ff0010707..2d94e7a03af 100644 --- a/arch/sparc/kernel/pcr.c +++ b/arch/sparc/kernel/pcr.c @@ -7,7 +7,7 @@ #include #include -#include +#include #include #include @@ -15,7 +15,7 @@ /* This code is shared between various users of the performance * counters. Users will be oprofile, pseudo-NMI watchdog, and the - * perf_counter support layer. + * perf_event support layer. */ #define PCR_SUN4U_ENABLE (PCR_PIC_PRIV | PCR_STRACE | PCR_UTRACE) @@ -42,14 +42,14 @@ void deferred_pcr_work_irq(int irq, struct pt_regs *regs) old_regs = set_irq_regs(regs); irq_enter(); -#ifdef CONFIG_PERF_COUNTERS - perf_counter_do_pending(); +#ifdef CONFIG_PERF_EVENTS + perf_event_do_pending(); #endif irq_exit(); set_irq_regs(old_regs); } -void set_perf_counter_pending(void) +void set_perf_event_pending(void) { set_softint(1 << PIL_DEFERRED_PCR_WORK); } diff --git a/arch/sparc/kernel/perf_counter.c b/arch/sparc/kernel/perf_counter.c deleted file mode 100644 index b1265ce8a05..00000000000 --- a/arch/sparc/kernel/perf_counter.c +++ /dev/null @@ -1,556 +0,0 @@ -/* Performance counter support for sparc64. - * - * Copyright (C) 2009 David S. Miller - * - * This code is based almost entirely upon the x86 perf counter - * code, which is: - * - * Copyright (C) 2008 Thomas Gleixner - * Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar - * Copyright (C) 2009 Jaswinder Singh Rajput - * Copyright (C) 2009 Advanced Micro Devices, Inc., Robert Richter - * Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra - */ - -#include -#include -#include -#include -#include - -#include -#include -#include -#include - -/* Sparc64 chips have two performance counters, 32-bits each, with - * overflow interrupts generated on transition from 0xffffffff to 0. - * The counters are accessed in one go using a 64-bit register. - * - * Both counters are controlled using a single control register. The - * only way to stop all sampling is to clear all of the context (user, - * supervisor, hypervisor) sampling enable bits. But these bits apply - * to both counters, thus the two counters can't be enabled/disabled - * individually. - * - * The control register has two event fields, one for each of the two - * counters. It's thus nearly impossible to have one counter going - * while keeping the other one stopped. Therefore it is possible to - * get overflow interrupts for counters not currently "in use" and - * that condition must be checked in the overflow interrupt handler. - * - * So we use a hack, in that we program inactive counters with the - * "sw_count0" and "sw_count1" events. These count how many times - * the instruction "sethi %hi(0xfc000), %g0" is executed. It's an - * unusual way to encode a NOP and therefore will not trigger in - * normal code. - */ - -#define MAX_HWCOUNTERS 2 -#define MAX_PERIOD ((1UL << 32) - 1) - -#define PIC_UPPER_INDEX 0 -#define PIC_LOWER_INDEX 1 - -struct cpu_hw_counters { - struct perf_counter *counters[MAX_HWCOUNTERS]; - unsigned long used_mask[BITS_TO_LONGS(MAX_HWCOUNTERS)]; - unsigned long active_mask[BITS_TO_LONGS(MAX_HWCOUNTERS)]; - int enabled; -}; -DEFINE_PER_CPU(struct cpu_hw_counters, cpu_hw_counters) = { .enabled = 1, }; - -struct perf_event_map { - u16 encoding; - u8 pic_mask; -#define PIC_NONE 0x00 -#define PIC_UPPER 0x01 -#define PIC_LOWER 0x02 -}; - -struct sparc_pmu { - const struct perf_event_map *(*event_map)(int); - int max_events; - int upper_shift; - int lower_shift; - int event_mask; - int hv_bit; - int irq_bit; - int upper_nop; - int lower_nop; -}; - -static const struct perf_event_map ultra3i_perfmon_event_map[] = { - [PERF_COUNT_HW_CPU_CYCLES] = { 0x0000, PIC_UPPER | PIC_LOWER }, - [PERF_COUNT_HW_INSTRUCTIONS] = { 0x0001, PIC_UPPER | PIC_LOWER }, - [PERF_COUNT_HW_CACHE_REFERENCES] = { 0x0009, PIC_LOWER }, - [PERF_COUNT_HW_CACHE_MISSES] = { 0x0009, PIC_UPPER }, -}; - -static const struct perf_event_map *ultra3i_event_map(int event) -{ - return &ultra3i_perfmon_event_map[event]; -} - -static const struct sparc_pmu ultra3i_pmu = { - .event_map = ultra3i_event_map, - .max_events = ARRAY_SIZE(ultra3i_perfmon_event_map), - .upper_shift = 11, - .lower_shift = 4, - .event_mask = 0x3f, - .upper_nop = 0x1c, - .lower_nop = 0x14, -}; - -static const struct perf_event_map niagara2_perfmon_event_map[] = { - [PERF_COUNT_HW_CPU_CYCLES] = { 0x02ff, PIC_UPPER | PIC_LOWER }, - [PERF_COUNT_HW_INSTRUCTIONS] = { 0x02ff, PIC_UPPER | PIC_LOWER }, - [PERF_COUNT_HW_CACHE_REFERENCES] = { 0x0208, PIC_UPPER | PIC_LOWER }, - [PERF_COUNT_HW_CACHE_MISSES] = { 0x0302, PIC_UPPER | PIC_LOWER }, - [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = { 0x0201, PIC_UPPER | PIC_LOWER }, - [PERF_COUNT_HW_BRANCH_MISSES] = { 0x0202, PIC_UPPER | PIC_LOWER }, -}; - -static const struct perf_event_map *niagara2_event_map(int event) -{ - return &niagara2_perfmon_event_map[event]; -} - -static const struct sparc_pmu niagara2_pmu = { - .event_map = niagara2_event_map, - .max_events = ARRAY_SIZE(niagara2_perfmon_event_map), - .upper_shift = 19, - .lower_shift = 6, - .event_mask = 0xfff, - .hv_bit = 0x8, - .irq_bit = 0x03, - .upper_nop = 0x220, - .lower_nop = 0x220, -}; - -static const struct sparc_pmu *sparc_pmu __read_mostly; - -static u64 event_encoding(u64 event, int idx) -{ - if (idx == PIC_UPPER_INDEX) - event <<= sparc_pmu->upper_shift; - else - event <<= sparc_pmu->lower_shift; - return event; -} - -static u64 mask_for_index(int idx) -{ - return event_encoding(sparc_pmu->event_mask, idx); -} - -static u64 nop_for_index(int idx) -{ - return event_encoding(idx == PIC_UPPER_INDEX ? - sparc_pmu->upper_nop : - sparc_pmu->lower_nop, idx); -} - -static inline void sparc_pmu_enable_counter(struct hw_perf_counter *hwc, - int idx) -{ - u64 val, mask = mask_for_index(idx); - - val = pcr_ops->read(); - pcr_ops->write((val & ~mask) | hwc->config); -} - -static inline void sparc_pmu_disable_counter(struct hw_perf_counter *hwc, - int idx) -{ - u64 mask = mask_for_index(idx); - u64 nop = nop_for_index(idx); - u64 val = pcr_ops->read(); - - pcr_ops->write((val & ~mask) | nop); -} - -void hw_perf_enable(void) -{ - struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters); - u64 val; - int i; - - if (cpuc->enabled) - return; - - cpuc->enabled = 1; - barrier(); - - val = pcr_ops->read(); - - for (i = 0; i < MAX_HWCOUNTERS; i++) { - struct perf_counter *cp = cpuc->counters[i]; - struct hw_perf_counter *hwc; - - if (!cp) - continue; - hwc = &cp->hw; - val |= hwc->config_base; - } - - pcr_ops->write(val); -} - -void hw_perf_disable(void) -{ - struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters); - u64 val; - - if (!cpuc->enabled) - return; - - cpuc->enabled = 0; - - val = pcr_ops->read(); - val &= ~(PCR_UTRACE | PCR_STRACE | - sparc_pmu->hv_bit | sparc_pmu->irq_bit); - pcr_ops->write(val); -} - -static u32 read_pmc(int idx) -{ - u64 val; - - read_pic(val); - if (idx == PIC_UPPER_INDEX) - val >>= 32; - - return val & 0xffffffff; -} - -static void write_pmc(int idx, u64 val) -{ - u64 shift, mask, pic; - - shift = 0; - if (idx == PIC_UPPER_INDEX) - shift = 32; - - mask = ((u64) 0xffffffff) << shift; - val <<= shift; - - read_pic(pic); - pic &= ~mask; - pic |= val; - write_pic(pic); -} - -static int sparc_perf_counter_set_period(struct perf_counter *counter, - struct hw_perf_counter *hwc, int idx) -{ - s64 left = atomic64_read(&hwc->period_left); - s64 period = hwc->sample_period; - int ret = 0; - - if (unlikely(left <= -period)) { - left = period; - atomic64_set(&hwc->period_left, left); - hwc->last_period = period; - ret = 1; - } - - if (unlikely(left <= 0)) { - left += period; - atomic64_set(&hwc->period_left, left); - hwc->last_period = period; - ret = 1; - } - if (left > MAX_PERIOD) - left = MAX_PERIOD; - - atomic64_set(&hwc->prev_count, (u64)-left); - - write_pmc(idx, (u64)(-left) & 0xffffffff); - - perf_counter_update_userpage(counter); - - return ret; -} - -static int sparc_pmu_enable(struct perf_counter *counter) -{ - struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters); - struct hw_perf_counter *hwc = &counter->hw; - int idx = hwc->idx; - - if (test_and_set_bit(idx, cpuc->used_mask)) - return -EAGAIN; - - sparc_pmu_disable_counter(hwc, idx); - - cpuc->counters[idx] = counter; - set_bit(idx, cpuc->active_mask); - - sparc_perf_counter_set_period(counter, hwc, idx); - sparc_pmu_enable_counter(hwc, idx); - perf_counter_update_userpage(counter); - return 0; -} - -static u64 sparc_perf_counter_update(struct perf_counter *counter, - struct hw_perf_counter *hwc, int idx) -{ - int shift = 64 - 32; - u64 prev_raw_count, new_raw_count; - s64 delta; - -again: - prev_raw_count = atomic64_read(&hwc->prev_count); - new_raw_count = read_pmc(idx); - - if (atomic64_cmpxchg(&hwc->prev_count, prev_raw_count, - new_raw_count) != prev_raw_count) - goto again; - - delta = (new_raw_count << shift) - (prev_raw_count << shift); - delta >>= shift; - - atomic64_add(delta, &counter->count); - atomic64_sub(delta, &hwc->period_left); - - return new_raw_count; -} - -static void sparc_pmu_disable(struct perf_counter *counter) -{ - struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters); - struct hw_perf_counter *hwc = &counter->hw; - int idx = hwc->idx; - - clear_bit(idx, cpuc->active_mask); - sparc_pmu_disable_counter(hwc, idx); - - barrier(); - - sparc_perf_counter_update(counter, hwc, idx); - cpuc->counters[idx] = NULL; - clear_bit(idx, cpuc->used_mask); - - perf_counter_update_userpage(counter); -} - -static void sparc_pmu_read(struct perf_counter *counter) -{ - struct hw_perf_counter *hwc = &counter->hw; - sparc_perf_counter_update(counter, hwc, hwc->idx); -} - -static void sparc_pmu_unthrottle(struct perf_counter *counter) -{ - struct hw_perf_counter *hwc = &counter->hw; - sparc_pmu_enable_counter(hwc, hwc->idx); -} - -static atomic_t active_counters = ATOMIC_INIT(0); -static DEFINE_MUTEX(pmc_grab_mutex); - -void perf_counter_grab_pmc(void) -{ - if (atomic_inc_not_zero(&active_counters)) - return; - - mutex_lock(&pmc_grab_mutex); - if (atomic_read(&active_counters) == 0) { - if (atomic_read(&nmi_active) > 0) { - on_each_cpu(stop_nmi_watchdog, NULL, 1); - BUG_ON(atomic_read(&nmi_active) != 0); - } - atomic_inc(&active_counters); - } - mutex_unlock(&pmc_grab_mutex); -} - -void perf_counter_release_pmc(void) -{ - if (atomic_dec_and_mutex_lock(&active_counters, &pmc_grab_mutex)) { - if (atomic_read(&nmi_active) == 0) - on_each_cpu(start_nmi_watchdog, NULL, 1); - mutex_unlock(&pmc_grab_mutex); - } -} - -static void hw_perf_counter_destroy(struct perf_counter *counter) -{ - perf_counter_release_pmc(); -} - -static int __hw_perf_counter_init(struct perf_counter *counter) -{ - struct perf_counter_attr *attr = &counter->attr; - struct hw_perf_counter *hwc = &counter->hw; - const struct perf_event_map *pmap; - u64 enc; - - if (atomic_read(&nmi_active) < 0) - return -ENODEV; - - if (attr->type != PERF_TYPE_HARDWARE) - return -EOPNOTSUPP; - - if (attr->config >= sparc_pmu->max_events) - return -EINVAL; - - perf_counter_grab_pmc(); - counter->destroy = hw_perf_counter_destroy; - - /* We save the enable bits in the config_base. So to - * turn off sampling just write 'config', and to enable - * things write 'config | config_base'. - */ - hwc->config_base = sparc_pmu->irq_bit; - if (!attr->exclude_user) - hwc->config_base |= PCR_UTRACE; - if (!attr->exclude_kernel) - hwc->config_base |= PCR_STRACE; - if (!attr->exclude_hv) - hwc->config_base |= sparc_pmu->hv_bit; - - if (!hwc->sample_period) { - hwc->sample_period = MAX_PERIOD; - hwc->last_period = hwc->sample_period; - atomic64_set(&hwc->period_left, hwc->sample_period); - } - - pmap = sparc_pmu->event_map(attr->config); - - enc = pmap->encoding; - if (pmap->pic_mask & PIC_UPPER) { - hwc->idx = PIC_UPPER_INDEX; - enc <<= sparc_pmu->upper_shift; - } else { - hwc->idx = PIC_LOWER_INDEX; - enc <<= sparc_pmu->lower_shift; - } - - hwc->config |= enc; - return 0; -} - -static const struct pmu pmu = { - .enable = sparc_pmu_enable, - .disable = sparc_pmu_disable, - .read = sparc_pmu_read, - .unthrottle = sparc_pmu_unthrottle, -}; - -const struct pmu *hw_perf_counter_init(struct perf_counter *counter) -{ - int err = __hw_perf_counter_init(counter); - - if (err) - return ERR_PTR(err); - return &pmu; -} - -void perf_counter_print_debug(void) -{ - unsigned long flags; - u64 pcr, pic; - int cpu; - - if (!sparc_pmu) - return; - - local_irq_save(flags); - - cpu = smp_processor_id(); - - pcr = pcr_ops->read(); - read_pic(pic); - - pr_info("\n"); - pr_info("CPU#%d: PCR[%016llx] PIC[%016llx]\n", - cpu, pcr, pic); - - local_irq_restore(flags); -} - -static int __kprobes perf_counter_nmi_handler(struct notifier_block *self, - unsigned long cmd, void *__args) -{ - struct die_args *args = __args; - struct perf_sample_data data; - struct cpu_hw_counters *cpuc; - struct pt_regs *regs; - int idx; - - if (!atomic_read(&active_counters)) - return NOTIFY_DONE; - - switch (cmd) { - case DIE_NMI: - break; - - default: - return NOTIFY_DONE; - } - - regs = args->regs; - - data.addr = 0; - - cpuc = &__get_cpu_var(cpu_hw_counters); - for (idx = 0; idx < MAX_HWCOUNTERS; idx++) { - struct perf_counter *counter = cpuc->counters[idx]; - struct hw_perf_counter *hwc; - u64 val; - - if (!test_bit(idx, cpuc->active_mask)) - continue; - hwc = &counter->hw; - val = sparc_perf_counter_update(counter, hwc, idx); - if (val & (1ULL << 31)) - continue; - - data.period = counter->hw.last_period; - if (!sparc_perf_counter_set_period(counter, hwc, idx)) - continue; - - if (perf_counter_overflow(counter, 1, &data, regs)) - sparc_pmu_disable_counter(hwc, idx); - } - - return NOTIFY_STOP; -} - -static __read_mostly struct notifier_block perf_counter_nmi_notifier = { - .notifier_call = perf_counter_nmi_handler, -}; - -static bool __init supported_pmu(void) -{ - if (!strcmp(sparc_pmu_type, "ultra3i")) { - sparc_pmu = &ultra3i_pmu; - return true; - } - if (!strcmp(sparc_pmu_type, "niagara2")) { - sparc_pmu = &niagara2_pmu; - return true; - } - return false; -} - -void __init init_hw_perf_counters(void) -{ - pr_info("Performance counters: "); - - if (!supported_pmu()) { - pr_cont("No support for PMU type '%s'\n", sparc_pmu_type); - return; - } - - pr_cont("Supported PMU type is '%s'\n", sparc_pmu_type); - - /* All sparc64 PMUs currently have 2 counters. But this simple - * driver only supports one active counter at a time. - */ - perf_max_counters = 1; - - register_die_notifier(&perf_counter_nmi_notifier); -} diff --git a/arch/sparc/kernel/perf_event.c b/arch/sparc/kernel/perf_event.c new file mode 100644 index 00000000000..2d6a1b10c81 --- /dev/null +++ b/arch/sparc/kernel/perf_event.c @@ -0,0 +1,556 @@ +/* Performance event support for sparc64. + * + * Copyright (C) 2009 David S. Miller + * + * This code is based almost entirely upon the x86 perf event + * code, which is: + * + * Copyright (C) 2008 Thomas Gleixner + * Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar + * Copyright (C) 2009 Jaswinder Singh Rajput + * Copyright (C) 2009 Advanced Micro Devices, Inc., Robert Richter + * Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra + */ + +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +/* Sparc64 chips have two performance counters, 32-bits each, with + * overflow interrupts generated on transition from 0xffffffff to 0. + * The counters are accessed in one go using a 64-bit register. + * + * Both counters are controlled using a single control register. The + * only way to stop all sampling is to clear all of the context (user, + * supervisor, hypervisor) sampling enable bits. But these bits apply + * to both counters, thus the two counters can't be enabled/disabled + * individually. + * + * The control register has two event fields, one for each of the two + * counters. It's thus nearly impossible to have one counter going + * while keeping the other one stopped. Therefore it is possible to + * get overflow interrupts for counters not currently "in use" and + * that condition must be checked in the overflow interrupt handler. + * + * So we use a hack, in that we program inactive counters with the + * "sw_count0" and "sw_count1" events. These count how many times + * the instruction "sethi %hi(0xfc000), %g0" is executed. It's an + * unusual way to encode a NOP and therefore will not trigger in + * normal code. + */ + +#define MAX_HWEVENTS 2 +#define MAX_PERIOD ((1UL << 32) - 1) + +#define PIC_UPPER_INDEX 0 +#define PIC_LOWER_INDEX 1 + +struct cpu_hw_events { + struct perf_event *events[MAX_HWEVENTS]; + unsigned long used_mask[BITS_TO_LONGS(MAX_HWEVENTS)]; + unsigned long active_mask[BITS_TO_LONGS(MAX_HWEVENTS)]; + int enabled; +}; +DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events) = { .enabled = 1, }; + +struct perf_event_map { + u16 encoding; + u8 pic_mask; +#define PIC_NONE 0x00 +#define PIC_UPPER 0x01 +#define PIC_LOWER 0x02 +}; + +struct sparc_pmu { + const struct perf_event_map *(*event_map)(int); + int max_events; + int upper_shift; + int lower_shift; + int event_mask; + int hv_bit; + int irq_bit; + int upper_nop; + int lower_nop; +}; + +static const struct perf_event_map ultra3i_perfmon_event_map[] = { + [PERF_COUNT_HW_CPU_CYCLES] = { 0x0000, PIC_UPPER | PIC_LOWER }, + [PERF_COUNT_HW_INSTRUCTIONS] = { 0x0001, PIC_UPPER | PIC_LOWER }, + [PERF_COUNT_HW_CACHE_REFERENCES] = { 0x0009, PIC_LOWER }, + [PERF_COUNT_HW_CACHE_MISSES] = { 0x0009, PIC_UPPER }, +}; + +static const struct perf_event_map *ultra3i_event_map(int event_id) +{ + return &ultra3i_perfmon_event_map[event_id]; +} + +static const struct sparc_pmu ultra3i_pmu = { + .event_map = ultra3i_event_map, + .max_events = ARRAY_SIZE(ultra3i_perfmon_event_map), + .upper_shift = 11, + .lower_shift = 4, + .event_mask = 0x3f, + .upper_nop = 0x1c, + .lower_nop = 0x14, +}; + +static const struct perf_event_map niagara2_perfmon_event_map[] = { + [PERF_COUNT_HW_CPU_CYCLES] = { 0x02ff, PIC_UPPER | PIC_LOWER }, + [PERF_COUNT_HW_INSTRUCTIONS] = { 0x02ff, PIC_UPPER | PIC_LOWER }, + [PERF_COUNT_HW_CACHE_REFERENCES] = { 0x0208, PIC_UPPER | PIC_LOWER }, + [PERF_COUNT_HW_CACHE_MISSES] = { 0x0302, PIC_UPPER | PIC_LOWER }, + [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = { 0x0201, PIC_UPPER | PIC_LOWER }, + [PERF_COUNT_HW_BRANCH_MISSES] = { 0x0202, PIC_UPPER | PIC_LOWER }, +}; + +static const struct perf_event_map *niagara2_event_map(int event_id) +{ + return &niagara2_perfmon_event_map[event_id]; +} + +static const struct sparc_pmu niagara2_pmu = { + .event_map = niagara2_event_map, + .max_events = ARRAY_SIZE(niagara2_perfmon_event_map), + .upper_shift = 19, + .lower_shift = 6, + .event_mask = 0xfff, + .hv_bit = 0x8, + .irq_bit = 0x03, + .upper_nop = 0x220, + .lower_nop = 0x220, +}; + +static const struct sparc_pmu *sparc_pmu __read_mostly; + +static u64 event_encoding(u64 event_id, int idx) +{ + if (idx == PIC_UPPER_INDEX) + event_id <<= sparc_pmu->upper_shift; + else + event_id <<= sparc_pmu->lower_shift; + return event_id; +} + +static u64 mask_for_index(int idx) +{ + return event_encoding(sparc_pmu->event_mask, idx); +} + +static u64 nop_for_index(int idx) +{ + return event_encoding(idx == PIC_UPPER_INDEX ? + sparc_pmu->upper_nop : + sparc_pmu->lower_nop, idx); +} + +static inline void sparc_pmu_enable_event(struct hw_perf_event *hwc, + int idx) +{ + u64 val, mask = mask_for_index(idx); + + val = pcr_ops->read(); + pcr_ops->write((val & ~mask) | hwc->config); +} + +static inline void sparc_pmu_disable_event(struct hw_perf_event *hwc, + int idx) +{ + u64 mask = mask_for_index(idx); + u64 nop = nop_for_index(idx); + u64 val = pcr_ops->read(); + + pcr_ops->write((val & ~mask) | nop); +} + +void hw_perf_enable(void) +{ + struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); + u64 val; + int i; + + if (cpuc->enabled) + return; + + cpuc->enabled = 1; + barrier(); + + val = pcr_ops->read(); + + for (i = 0; i < MAX_HWEVENTS; i++) { + struct perf_event *cp = cpuc->events[i]; + struct hw_perf_event *hwc; + + if (!cp) + continue; + hwc = &cp->hw; + val |= hwc->config_base; + } + + pcr_ops->write(val); +} + +void hw_perf_disable(void) +{ + struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); + u64 val; + + if (!cpuc->enabled) + return; + + cpuc->enabled = 0; + + val = pcr_ops->read(); + val &= ~(PCR_UTRACE | PCR_STRACE | + sparc_pmu->hv_bit | sparc_pmu->irq_bit); + pcr_ops->write(val); +} + +static u32 read_pmc(int idx) +{ + u64 val; + + read_pic(val); + if (idx == PIC_UPPER_INDEX) + val >>= 32; + + return val & 0xffffffff; +} + +static void write_pmc(int idx, u64 val) +{ + u64 shift, mask, pic; + + shift = 0; + if (idx == PIC_UPPER_INDEX) + shift = 32; + + mask = ((u64) 0xffffffff) << shift; + val <<= shift; + + read_pic(pic); + pic &= ~mask; + pic |= val; + write_pic(pic); +} + +static int sparc_perf_event_set_period(struct perf_event *event, + struct hw_perf_event *hwc, int idx) +{ + s64 left = atomic64_read(&hwc->period_left); + s64 period = hwc->sample_period; + int ret = 0; + + if (unlikely(left <= -period)) { + left = period; + atomic64_set(&hwc->period_left, left); + hwc->last_period = period; + ret = 1; + } + + if (unlikely(left <= 0)) { + left += period; + atomic64_set(&hwc->period_left, left); + hwc->last_period = period; + ret = 1; + } + if (left > MAX_PERIOD) + left = MAX_PERIOD; + + atomic64_set(&hwc->prev_count, (u64)-left); + + write_pmc(idx, (u64)(-left) & 0xffffffff); + + perf_event_update_userpage(event); + + return ret; +} + +static int sparc_pmu_enable(struct perf_event *event) +{ + struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); + struct hw_perf_event *hwc = &event->hw; + int idx = hwc->idx; + + if (test_and_set_bit(idx, cpuc->used_mask)) + return -EAGAIN; + + sparc_pmu_disable_event(hwc, idx); + + cpuc->events[idx] = event; + set_bit(idx, cpuc->active_mask); + + sparc_perf_event_set_period(event, hwc, idx); + sparc_pmu_enable_event(hwc, idx); + perf_event_update_userpage(event); + return 0; +} + +static u64 sparc_perf_event_update(struct perf_event *event, + struct hw_perf_event *hwc, int idx) +{ + int shift = 64 - 32; + u64 prev_raw_count, new_raw_count; + s64 delta; + +again: + prev_raw_count = atomic64_read(&hwc->prev_count); + new_raw_count = read_pmc(idx); + + if (atomic64_cmpxchg(&hwc->prev_count, prev_raw_count, + new_raw_count) != prev_raw_count) + goto again; + + delta = (new_raw_count << shift) - (prev_raw_count << shift); + delta >>= shift; + + atomic64_add(delta, &event->count); + atomic64_sub(delta, &hwc->period_left); + + return new_raw_count; +} + +static void sparc_pmu_disable(struct perf_event *event) +{ + struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); + struct hw_perf_event *hwc = &event->hw; + int idx = hwc->idx; + + clear_bit(idx, cpuc->active_mask); + sparc_pmu_disable_event(hwc, idx); + + barrier(); + + sparc_perf_event_update(event, hwc, idx); + cpuc->events[idx] = NULL; + clear_bit(idx, cpuc->used_mask); + + perf_event_update_userpage(event); +} + +static void sparc_pmu_read(struct perf_event *event) +{ + struct hw_perf_event *hwc = &event->hw; + sparc_perf_event_update(event, hwc, hwc->idx); +} + +static void sparc_pmu_unthrottle(struct perf_event *event) +{ + struct hw_perf_event *hwc = &event->hw; + sparc_pmu_enable_event(hwc, hwc->idx); +} + +static atomic_t active_events = ATOMIC_INIT(0); +static DEFINE_MUTEX(pmc_grab_mutex); + +void perf_event_grab_pmc(void) +{ + if (atomic_inc_not_zero(&active_events)) + return; + + mutex_lock(&pmc_grab_mutex); + if (atomic_read(&active_events) == 0) { + if (atomic_read(&nmi_active) > 0) { + on_each_cpu(stop_nmi_watchdog, NULL, 1); + BUG_ON(atomic_read(&nmi_active) != 0); + } + atomic_inc(&active_events); + } + mutex_unlock(&pmc_grab_mutex); +} + +void perf_event_release_pmc(void) +{ + if (atomic_dec_and_mutex_lock(&active_events, &pmc_grab_mutex)) { + if (atomic_read(&nmi_active) == 0) + on_each_cpu(start_nmi_watchdog, NULL, 1); + mutex_unlock(&pmc_grab_mutex); + } +} + +static void hw_perf_event_destroy(struct perf_event *event) +{ + perf_event_release_pmc(); +} + +static int __hw_perf_event_init(struct perf_event *event) +{ + struct perf_event_attr *attr = &event->attr; + struct hw_perf_event *hwc = &event->hw; + const struct perf_event_map *pmap; + u64 enc; + + if (atomic_read(&nmi_active) < 0) + return -ENODEV; + + if (attr->type != PERF_TYPE_HARDWARE) + return -EOPNOTSUPP; + + if (attr->config >= sparc_pmu->max_events) + return -EINVAL; + + perf_event_grab_pmc(); + event->destroy = hw_perf_event_destroy; + + /* We save the enable bits in the config_base. So to + * turn off sampling just write 'config', and to enable + * things write 'config | config_base'. + */ + hwc->config_base = sparc_pmu->irq_bit; + if (!attr->exclude_user) + hwc->config_base |= PCR_UTRACE; + if (!attr->exclude_kernel) + hwc->config_base |= PCR_STRACE; + if (!attr->exclude_hv) + hwc->config_base |= sparc_pmu->hv_bit; + + if (!hwc->sample_period) { + hwc->sample_period = MAX_PERIOD; + hwc->last_period = hwc->sample_period; + atomic64_set(&hwc->period_left, hwc->sample_period); + } + + pmap = sparc_pmu->event_map(attr->config); + + enc = pmap->encoding; + if (pmap->pic_mask & PIC_UPPER) { + hwc->idx = PIC_UPPER_INDEX; + enc <<= sparc_pmu->upper_shift; + } else { + hwc->idx = PIC_LOWER_INDEX; + enc <<= sparc_pmu->lower_shift; + } + + hwc->config |= enc; + return 0; +} + +static const struct pmu pmu = { + .enable = sparc_pmu_enable, + .disable = sparc_pmu_disable, + .read = sparc_pmu_read, + .unthrottle = sparc_pmu_unthrottle, +}; + +const struct pmu *hw_perf_event_init(struct perf_event *event) +{ + int err = __hw_perf_event_init(event); + + if (err) + return ERR_PTR(err); + return &pmu; +} + +void perf_event_print_debug(void) +{ + unsigned long flags; + u64 pcr, pic; + int cpu; + + if (!sparc_pmu) + return; + + local_irq_save(flags); + + cpu = smp_processor_id(); + + pcr = pcr_ops->read(); + read_pic(pic); + + pr_info("\n"); + pr_info("CPU#%d: PCR[%016llx] PIC[%016llx]\n", + cpu, pcr, pic); + + local_irq_restore(flags); +} + +static int __kprobes perf_event_nmi_handler(struct notifier_block *self, + unsigned long cmd, void *__args) +{ + struct die_args *args = __args; + struct perf_sample_data data; + struct cpu_hw_events *cpuc; + struct pt_regs *regs; + int idx; + + if (!atomic_read(&active_events)) + return NOTIFY_DONE; + + switch (cmd) { + case DIE_NMI: + break; + + default: + return NOTIFY_DONE; + } + + regs = args->regs; + + data.addr = 0; + + cpuc = &__get_cpu_var(cpu_hw_events); + for (idx = 0; idx < MAX_HWEVENTS; idx++) { + struct perf_event *event = cpuc->events[idx]; + struct hw_perf_event *hwc; + u64 val; + + if (!test_bit(idx, cpuc->active_mask)) + continue; + hwc = &event->hw; + val = sparc_perf_event_update(event, hwc, idx); + if (val & (1ULL << 31)) + continue; + + data.period = event->hw.last_period; + if (!sparc_perf_event_set_period(event, hwc, idx)) + continue; + + if (perf_event_overflow(event, 1, &data, regs)) + sparc_pmu_disable_event(hwc, idx); + } + + return NOTIFY_STOP; +} + +static __read_mostly struct notifier_block perf_event_nmi_notifier = { + .notifier_call = perf_event_nmi_handler, +}; + +static bool __init supported_pmu(void) +{ + if (!strcmp(sparc_pmu_type, "ultra3i")) { + sparc_pmu = &ultra3i_pmu; + return true; + } + if (!strcmp(sparc_pmu_type, "niagara2")) { + sparc_pmu = &niagara2_pmu; + return true; + } + return false; +} + +void __init init_hw_perf_events(void) +{ + pr_info("Performance events: "); + + if (!supported_pmu()) { + pr_cont("No support for PMU type '%s'\n", sparc_pmu_type); + return; + } + + pr_cont("Supported PMU type is '%s'\n", sparc_pmu_type); + + /* All sparc64 PMUs currently have 2 events. But this simple + * driver only supports one active event at a time. + */ + perf_max_events = 1; + + register_die_notifier(&perf_event_nmi_notifier); +} diff --git a/arch/sparc/kernel/systbls_32.S b/arch/sparc/kernel/systbls_32.S index 04181577cb6..0f1658d3749 100644 --- a/arch/sparc/kernel/systbls_32.S +++ b/arch/sparc/kernel/systbls_32.S @@ -82,5 +82,5 @@ sys_call_table: /*310*/ .long sys_utimensat, sys_signalfd, sys_timerfd_create, sys_eventfd, sys_fallocate /*315*/ .long sys_timerfd_settime, sys_timerfd_gettime, sys_signalfd4, sys_eventfd2, sys_epoll_create1 /*320*/ .long sys_dup3, sys_pipe2, sys_inotify_init1, sys_accept4, sys_preadv -/*325*/ .long sys_pwritev, sys_rt_tgsigqueueinfo, sys_perf_counter_open +/*325*/ .long sys_pwritev, sys_rt_tgsigqueueinfo, sys_perf_event_open diff --git a/arch/sparc/kernel/systbls_64.S b/arch/sparc/kernel/systbls_64.S index 91b06b7f7ac..009825f6e73 100644 --- a/arch/sparc/kernel/systbls_64.S +++ b/arch/sparc/kernel/systbls_64.S @@ -83,7 +83,7 @@ sys_call_table32: /*310*/ .word compat_sys_utimensat, compat_sys_signalfd, sys_timerfd_create, sys_eventfd, compat_sys_fallocate .word compat_sys_timerfd_settime, compat_sys_timerfd_gettime, compat_sys_signalfd4, sys_eventfd2, sys_epoll_create1 /*320*/ .word sys_dup3, sys_pipe2, sys_inotify_init1, sys_accept4, compat_sys_preadv - .word compat_sys_pwritev, compat_sys_rt_tgsigqueueinfo, sys_perf_counter_open + .word compat_sys_pwritev, compat_sys_rt_tgsigqueueinfo, sys_perf_event_open #endif /* CONFIG_COMPAT */ @@ -158,4 +158,4 @@ sys_call_table: /*310*/ .word sys_utimensat, sys_signalfd, sys_timerfd_create, sys_eventfd, sys_fallocate .word sys_timerfd_settime, sys_timerfd_gettime, sys_signalfd4, sys_eventfd2, sys_epoll_create1 /*320*/ .word sys_dup3, sys_pipe2, sys_inotify_init1, sys_accept4, sys_preadv - .word sys_pwritev, sys_rt_tgsigqueueinfo, sys_perf_counter_open + .word sys_pwritev, sys_rt_tgsigqueueinfo, sys_perf_event_open diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index 51c59015b28..e4ff5d1280c 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -24,7 +24,7 @@ config X86 select HAVE_UNSTABLE_SCHED_CLOCK select HAVE_IDE select HAVE_OPROFILE - select HAVE_PERF_COUNTERS if (!M386 && !M486) + select HAVE_PERF_EVENTS if (!M386 && !M486) select HAVE_IOREMAP_PROT select HAVE_KPROBES select ARCH_WANT_OPTIONAL_GPIOLIB diff --git a/arch/x86/ia32/ia32entry.S b/arch/x86/ia32/ia32entry.S index ba331bfd111..74619c4f9fd 100644 --- a/arch/x86/ia32/ia32entry.S +++ b/arch/x86/ia32/ia32entry.S @@ -831,5 +831,5 @@ ia32_sys_call_table: .quad compat_sys_preadv .quad compat_sys_pwritev .quad compat_sys_rt_tgsigqueueinfo /* 335 */ - .quad sys_perf_counter_open + .quad sys_perf_event_open ia32_syscall_end: diff --git a/arch/x86/include/asm/entry_arch.h b/arch/x86/include/asm/entry_arch.h index 5e3f2044f0d..f5693c81a1d 100644 --- a/arch/x86/include/asm/entry_arch.h +++ b/arch/x86/include/asm/entry_arch.h @@ -49,7 +49,7 @@ BUILD_INTERRUPT(apic_timer_interrupt,LOCAL_TIMER_VECTOR) BUILD_INTERRUPT(error_interrupt,ERROR_APIC_VECTOR) BUILD_INTERRUPT(spurious_interrupt,SPURIOUS_APIC_VECTOR) -#ifdef CONFIG_PERF_COUNTERS +#ifdef CONFIG_PERF_EVENTS BUILD_INTERRUPT(perf_pending_interrupt, LOCAL_PENDING_VECTOR) #endif diff --git a/arch/x86/include/asm/perf_counter.h b/arch/x86/include/asm/perf_counter.h deleted file mode 100644 index e7b7c938ae2..00000000000 --- a/arch/x86/include/asm/perf_counter.h +++ /dev/null @@ -1,108 +0,0 @@ -#ifndef _ASM_X86_PERF_COUNTER_H -#define _ASM_X86_PERF_COUNTER_H - -/* - * Performance counter hw details: - */ - -#define X86_PMC_MAX_GENERIC 8 -#define X86_PMC_MAX_FIXED 3 - -#define X86_PMC_IDX_GENERIC 0 -#define X86_PMC_IDX_FIXED 32 -#define X86_PMC_IDX_MAX 64 - -#define MSR_ARCH_PERFMON_PERFCTR0 0xc1 -#define MSR_ARCH_PERFMON_PERFCTR1 0xc2 - -#define MSR_ARCH_PERFMON_EVENTSEL0 0x186 -#define MSR_ARCH_PERFMON_EVENTSEL1 0x187 - -#define ARCH_PERFMON_EVENTSEL0_ENABLE (1 << 22) -#define ARCH_PERFMON_EVENTSEL_INT (1 << 20) -#define ARCH_PERFMON_EVENTSEL_OS (1 << 17) -#define ARCH_PERFMON_EVENTSEL_USR (1 << 16) - -/* - * Includes eventsel and unit mask as well: - */ -#define ARCH_PERFMON_EVENT_MASK 0xffff - -#define ARCH_PERFMON_UNHALTED_CORE_CYCLES_SEL 0x3c -#define ARCH_PERFMON_UNHALTED_CORE_CYCLES_UMASK (0x00 << 8) -#define ARCH_PERFMON_UNHALTED_CORE_CYCLES_INDEX 0 -#define ARCH_PERFMON_UNHALTED_CORE_CYCLES_PRESENT \ - (1 << (ARCH_PERFMON_UNHALTED_CORE_CYCLES_INDEX)) - -#define ARCH_PERFMON_BRANCH_MISSES_RETIRED 6 - -/* - * Intel "Architectural Performance Monitoring" CPUID - * detection/enumeration details: - */ -union cpuid10_eax { - struct { - unsigned int version_id:8; - unsigned int num_counters:8; - unsigned int bit_width:8; - unsigned int mask_length:8; - } split; - unsigned int full; -}; - -union cpuid10_edx { - struct { - unsigned int num_counters_fixed:4; - unsigned int reserved:28; - } split; - unsigned int full; -}; - - -/* - * Fixed-purpose performance counters: - */ - -/* - * All 3 fixed-mode PMCs are configured via this single MSR: - */ -#define MSR_ARCH_PERFMON_FIXED_CTR_CTRL 0x38d - -/* - * The counts are available in three separate MSRs: - */ - -/* Instr_Retired.Any: */ -#define MSR_ARCH_PERFMON_FIXED_CTR0 0x309 -#define X86_PMC_IDX_FIXED_INSTRUCTIONS (X86_PMC_IDX_FIXED + 0) - -/* CPU_CLK_Unhalted.Core: */ -#define MSR_ARCH_PERFMON_FIXED_CTR1 0x30a -#define X86_PMC_IDX_FIXED_CPU_CYCLES (X86_PMC_IDX_FIXED + 1) - -/* CPU_CLK_Unhalted.Ref: */ -#define MSR_ARCH_PERFMON_FIXED_CTR2 0x30b -#define X86_PMC_IDX_FIXED_BUS_CYCLES (X86_PMC_IDX_FIXED + 2) - -/* - * We model BTS tracing as another fixed-mode PMC. - * - * We choose a value in the middle of the fixed counter range, since lower - * values are used by actual fixed counters and higher values are used - * to indicate other overflow conditions in the PERF_GLOBAL_STATUS msr. - */ -#define X86_PMC_IDX_FIXED_BTS (X86_PMC_IDX_FIXED + 16) - - -#ifdef CONFIG_PERF_COUNTERS -extern void init_hw_perf_counters(void); -extern void perf_counters_lapic_init(void); - -#define PERF_COUNTER_INDEX_OFFSET 0 - -#else -static inline void init_hw_perf_counters(void) { } -static inline void perf_counters_lapic_init(void) { } -#endif - -#endif /* _ASM_X86_PERF_COUNTER_H */ diff --git a/arch/x86/include/asm/perf_event.h b/arch/x86/include/asm/perf_event.h new file mode 100644 index 00000000000..ad7ce3fd506 --- /dev/null +++ b/arch/x86/include/asm/perf_event.h @@ -0,0 +1,108 @@ +#ifndef _ASM_X86_PERF_EVENT_H +#define _ASM_X86_PERF_EVENT_H + +/* + * Performance event hw details: + */ + +#define X86_PMC_MAX_GENERIC 8 +#define X86_PMC_MAX_FIXED 3 + +#define X86_PMC_IDX_GENERIC 0 +#define X86_PMC_IDX_FIXED 32 +#define X86_PMC_IDX_MAX 64 + +#define MSR_ARCH_PERFMON_PERFCTR0 0xc1 +#define MSR_ARCH_PERFMON_PERFCTR1 0xc2 + +#define MSR_ARCH_PERFMON_EVENTSEL0 0x186 +#define MSR_ARCH_PERFMON_EVENTSEL1 0x187 + +#define ARCH_PERFMON_EVENTSEL0_ENABLE (1 << 22) +#define ARCH_PERFMON_EVENTSEL_INT (1 << 20) +#define ARCH_PERFMON_EVENTSEL_OS (1 << 17) +#define ARCH_PERFMON_EVENTSEL_USR (1 << 16) + +/* + * Includes eventsel and unit mask as well: + */ +#define ARCH_PERFMON_EVENT_MASK 0xffff + +#define ARCH_PERFMON_UNHALTED_CORE_CYCLES_SEL 0x3c +#define ARCH_PERFMON_UNHALTED_CORE_CYCLES_UMASK (0x00 << 8) +#define ARCH_PERFMON_UNHALTED_CORE_CYCLES_INDEX 0 +#define ARCH_PERFMON_UNHALTED_CORE_CYCLES_PRESENT \ + (1 << (ARCH_PERFMON_UNHALTED_CORE_CYCLES_INDEX)) + +#define ARCH_PERFMON_BRANCH_MISSES_RETIRED 6 + +/* + * Intel "Architectural Performance Monitoring" CPUID + * detection/enumeration details: + */ +union cpuid10_eax { + struct { + unsigned int version_id:8; + unsigned int num_events:8; + unsigned int bit_width:8; + unsigned int mask_length:8; + } split; + unsigned int full; +}; + +union cpuid10_edx { + struct { + unsigned int num_events_fixed:4; + unsigned int reserved:28; + } split; + unsigned int full; +}; + + +/* + * Fixed-purpose performance events: + */ + +/* + * All 3 fixed-mode PMCs are configured via this single MSR: + */ +#define MSR_ARCH_PERFMON_FIXED_CTR_CTRL 0x38d + +/* + * The counts are available in three separate MSRs: + */ + +/* Instr_Retired.Any: */ +#define MSR_ARCH_PERFMON_FIXED_CTR0 0x309 +#define X86_PMC_IDX_FIXED_INSTRUCTIONS (X86_PMC_IDX_FIXED + 0) + +/* CPU_CLK_Unhalted.Core: */ +#define MSR_ARCH_PERFMON_FIXED_CTR1 0x30a +#define X86_PMC_IDX_FIXED_CPU_CYCLES (X86_PMC_IDX_FIXED + 1) + +/* CPU_CLK_Unhalted.Ref: */ +#define MSR_ARCH_PERFMON_FIXED_CTR2 0x30b +#define X86_PMC_IDX_FIXED_BUS_CYCLES (X86_PMC_IDX_FIXED + 2) + +/* + * We model BTS tracing as another fixed-mode PMC. + * + * We choose a value in the middle of the fixed event range, since lower + * values are used by actual fixed events and higher values are used + * to indicate other overflow conditions in the PERF_GLOBAL_STATUS msr. + */ +#define X86_PMC_IDX_FIXED_BTS (X86_PMC_IDX_FIXED + 16) + + +#ifdef CONFIG_PERF_EVENTS +extern void init_hw_perf_events(void); +extern void perf_events_lapic_init(void); + +#define PERF_EVENT_INDEX_OFFSET 0 + +#else +static inline void init_hw_perf_events(void) { } +static inline void perf_events_lapic_init(void) { } +#endif + +#endif /* _ASM_X86_PERF_EVENT_H */ diff --git a/arch/x86/include/asm/unistd_32.h b/arch/x86/include/asm/unistd_32.h index 8deaada61bc..6fb3c209a7e 100644 --- a/arch/x86/include/asm/unistd_32.h +++ b/arch/x86/include/asm/unistd_32.h @@ -341,7 +341,7 @@ #define __NR_preadv 333 #define __NR_pwritev 334 #define __NR_rt_tgsigqueueinfo 335 -#define __NR_perf_counter_open 336 +#define __NR_perf_event_open 336 #ifdef __KERNEL__ diff --git a/arch/x86/include/asm/unistd_64.h b/arch/x86/include/asm/unistd_64.h index b9f3c60de5f..8d3ad0adbc6 100644 --- a/arch/x86/include/asm/unistd_64.h +++ b/arch/x86/include/asm/unistd_64.h @@ -659,8 +659,8 @@ __SYSCALL(__NR_preadv, sys_preadv) __SYSCALL(__NR_pwritev, sys_pwritev) #define __NR_rt_tgsigqueueinfo 297 __SYSCALL(__NR_rt_tgsigqueueinfo, sys_rt_tgsigqueueinfo) -#define __NR_perf_counter_open 298 -__SYSCALL(__NR_perf_counter_open, sys_perf_counter_open) +#define __NR_perf_event_open 298 +__SYSCALL(__NR_perf_event_open, sys_perf_event_open) #ifndef __NO_STUBS #define __ARCH_WANT_OLD_READDIR diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c index a34601f5298..754174d09de 100644 --- a/arch/x86/kernel/apic/apic.c +++ b/arch/x86/kernel/apic/apic.c @@ -14,7 +14,7 @@ * Mikael Pettersson : PM converted to driver model. */ -#include +#include #include #include #include @@ -35,7 +35,7 @@ #include #include -#include +#include #include #include #include @@ -1189,7 +1189,7 @@ void __cpuinit setup_local_APIC(void) apic_write(APIC_ESR, 0); } #endif - perf_counters_lapic_init(); + perf_events_lapic_init(); preempt_disable(); diff --git a/arch/x86/kernel/cpu/Makefile b/arch/x86/kernel/cpu/Makefile index 8dd30638fe4..68537e957a9 100644 --- a/arch/x86/kernel/cpu/Makefile +++ b/arch/x86/kernel/cpu/Makefile @@ -27,7 +27,7 @@ obj-$(CONFIG_CPU_SUP_CENTAUR) += centaur.o obj-$(CONFIG_CPU_SUP_TRANSMETA_32) += transmeta.o obj-$(CONFIG_CPU_SUP_UMC_32) += umc.o -obj-$(CONFIG_PERF_COUNTERS) += perf_counter.o +obj-$(CONFIG_PERF_EVENTS) += perf_event.o obj-$(CONFIG_X86_MCE) += mcheck/ obj-$(CONFIG_MTRR) += mtrr/ diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c index 2fea97eccf7..cc25c2b4a56 100644 --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c @@ -13,7 +13,7 @@ #include #include -#include +#include #include #include #include @@ -869,7 +869,7 @@ void __init identify_boot_cpu(void) #else vgetcpu_set_mode(); #endif - init_hw_perf_counters(); + init_hw_perf_events(); } void __cpuinit identify_secondary_cpu(struct cpuinfo_x86 *c) diff --git a/arch/x86/kernel/cpu/perf_counter.c b/arch/x86/kernel/cpu/perf_counter.c deleted file mode 100644 index b1f115696c8..00000000000 --- a/arch/x86/kernel/cpu/perf_counter.c +++ /dev/null @@ -1,2298 +0,0 @@ -/* - * Performance counter x86 architecture code - * - * Copyright (C) 2008 Thomas Gleixner - * Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar - * Copyright (C) 2009 Jaswinder Singh Rajput - * Copyright (C) 2009 Advanced Micro Devices, Inc., Robert Richter - * Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra - * Copyright (C) 2009 Intel Corporation, - * - * For licencing details see kernel-base/COPYING - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include -#include - -static u64 perf_counter_mask __read_mostly; - -/* The maximal number of PEBS counters: */ -#define MAX_PEBS_COUNTERS 4 - -/* The size of a BTS record in bytes: */ -#define BTS_RECORD_SIZE 24 - -/* The size of a per-cpu BTS buffer in bytes: */ -#define BTS_BUFFER_SIZE (BTS_RECORD_SIZE * 2048) - -/* The BTS overflow threshold in bytes from the end of the buffer: */ -#define BTS_OVFL_TH (BTS_RECORD_SIZE * 128) - - -/* - * Bits in the debugctlmsr controlling branch tracing. - */ -#define X86_DEBUGCTL_TR (1 << 6) -#define X86_DEBUGCTL_BTS (1 << 7) -#define X86_DEBUGCTL_BTINT (1 << 8) -#define X86_DEBUGCTL_BTS_OFF_OS (1 << 9) -#define X86_DEBUGCTL_BTS_OFF_USR (1 << 10) - -/* - * A debug store configuration. - * - * We only support architectures that use 64bit fields. - */ -struct debug_store { - u64 bts_buffer_base; - u64 bts_index; - u64 bts_absolute_maximum; - u64 bts_interrupt_threshold; - u64 pebs_buffer_base; - u64 pebs_index; - u64 pebs_absolute_maximum; - u64 pebs_interrupt_threshold; - u64 pebs_counter_reset[MAX_PEBS_COUNTERS]; -}; - -struct cpu_hw_counters { - struct perf_counter *counters[X86_PMC_IDX_MAX]; - unsigned long used_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)]; - unsigned long active_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)]; - unsigned long interrupts; - int enabled; - struct debug_store *ds; -}; - -/* - * struct x86_pmu - generic x86 pmu - */ -struct x86_pmu { - const char *name; - int version; - int (*handle_irq)(struct pt_regs *); - void (*disable_all)(void); - void (*enable_all)(void); - void (*enable)(struct hw_perf_counter *, int); - void (*disable)(struct hw_perf_counter *, int); - unsigned eventsel; - unsigned perfctr; - u64 (*event_map)(int); - u64 (*raw_event)(u64); - int max_events; - int num_counters; - int num_counters_fixed; - int counter_bits; - u64 counter_mask; - int apic; - u64 max_period; - u64 intel_ctrl; - void (*enable_bts)(u64 config); - void (*disable_bts)(void); -}; - -static struct x86_pmu x86_pmu __read_mostly; - -static DEFINE_PER_CPU(struct cpu_hw_counters, cpu_hw_counters) = { - .enabled = 1, -}; - -/* - * Not sure about some of these - */ -static const u64 p6_perfmon_event_map[] = -{ - [PERF_COUNT_HW_CPU_CYCLES] = 0x0079, - [PERF_COUNT_HW_INSTRUCTIONS] = 0x00c0, - [PERF_COUNT_HW_CACHE_REFERENCES] = 0x0f2e, - [PERF_COUNT_HW_CACHE_MISSES] = 0x012e, - [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x00c4, - [PERF_COUNT_HW_BRANCH_MISSES] = 0x00c5, - [PERF_COUNT_HW_BUS_CYCLES] = 0x0062, -}; - -static u64 p6_pmu_event_map(int hw_event) -{ - return p6_perfmon_event_map[hw_event]; -} - -/* - * Counter setting that is specified not to count anything. - * We use this to effectively disable a counter. - * - * L2_RQSTS with 0 MESI unit mask. - */ -#define P6_NOP_COUNTER 0x0000002EULL - -static u64 p6_pmu_raw_event(u64 hw_event) -{ -#define P6_EVNTSEL_EVENT_MASK 0x000000FFULL -#define P6_EVNTSEL_UNIT_MASK 0x0000FF00ULL -#define P6_EVNTSEL_EDGE_MASK 0x00040000ULL -#define P6_EVNTSEL_INV_MASK 0x00800000ULL -#define P6_EVNTSEL_COUNTER_MASK 0xFF000000ULL - -#define P6_EVNTSEL_MASK \ - (P6_EVNTSEL_EVENT_MASK | \ - P6_EVNTSEL_UNIT_MASK | \ - P6_EVNTSEL_EDGE_MASK | \ - P6_EVNTSEL_INV_MASK | \ - P6_EVNTSEL_COUNTER_MASK) - - return hw_event & P6_EVNTSEL_MASK; -} - - -/* - * Intel PerfMon v3. Used on Core2 and later. - */ -static const u64 intel_perfmon_event_map[] = -{ - [PERF_COUNT_HW_CPU_CYCLES] = 0x003c, - [PERF_COUNT_HW_INSTRUCTIONS] = 0x00c0, - [PERF_COUNT_HW_CACHE_REFERENCES] = 0x4f2e, - [PERF_COUNT_HW_CACHE_MISSES] = 0x412e, - [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x00c4, - [PERF_COUNT_HW_BRANCH_MISSES] = 0x00c5, - [PERF_COUNT_HW_BUS_CYCLES] = 0x013c, -}; - -static u64 intel_pmu_event_map(int hw_event) -{ - return intel_perfmon_event_map[hw_event]; -} - -/* - * Generalized hw caching related hw_event table, filled - * in on a per model basis. A value of 0 means - * 'not supported', -1 means 'hw_event makes no sense on - * this CPU', any other value means the raw hw_event - * ID. - */ - -#define C(x) PERF_COUNT_HW_CACHE_##x - -static u64 __read_mostly hw_cache_event_ids - [PERF_COUNT_HW_CACHE_MAX] - [PERF_COUNT_HW_CACHE_OP_MAX] - [PERF_COUNT_HW_CACHE_RESULT_MAX]; - -static const u64 nehalem_hw_cache_event_ids - [PERF_COUNT_HW_CACHE_MAX] - [PERF_COUNT_HW_CACHE_OP_MAX] - [PERF_COUNT_HW_CACHE_RESULT_MAX] = -{ - [ C(L1D) ] = { - [ C(OP_READ) ] = { - [ C(RESULT_ACCESS) ] = 0x0f40, /* L1D_CACHE_LD.MESI */ - [ C(RESULT_MISS) ] = 0x0140, /* L1D_CACHE_LD.I_STATE */ - }, - [ C(OP_WRITE) ] = { - [ C(RESULT_ACCESS) ] = 0x0f41, /* L1D_CACHE_ST.MESI */ - [ C(RESULT_MISS) ] = 0x0141, /* L1D_CACHE_ST.I_STATE */ - }, - [ C(OP_PREFETCH) ] = { - [ C(RESULT_ACCESS) ] = 0x014e, /* L1D_PREFETCH.REQUESTS */ - [ C(RESULT_MISS) ] = 0x024e, /* L1D_PREFETCH.MISS */ - }, - }, - [ C(L1I ) ] = { - [ C(OP_READ) ] = { - [ C(RESULT_ACCESS) ] = 0x0380, /* L1I.READS */ - [ C(RESULT_MISS) ] = 0x0280, /* L1I.MISSES */ - }, - [ C(OP_WRITE) ] = { - [ C(RESULT_ACCESS) ] = -1, - [ C(RESULT_MISS) ] = -1, - }, - [ C(OP_PREFETCH) ] = { - [ C(RESULT_ACCESS) ] = 0x0, - [ C(RESULT_MISS) ] = 0x0, - }, - }, - [ C(LL ) ] = { - [ C(OP_READ) ] = { - [ C(RESULT_ACCESS) ] = 0x0324, /* L2_RQSTS.LOADS */ - [ C(RESULT_MISS) ] = 0x0224, /* L2_RQSTS.LD_MISS */ - }, - [ C(OP_WRITE) ] = { - [ C(RESULT_ACCESS) ] = 0x0c24, /* L2_RQSTS.RFOS */ - [ C(RESULT_MISS) ] = 0x0824, /* L2_RQSTS.RFO_MISS */ - }, - [ C(OP_PREFETCH) ] = { - [ C(RESULT_ACCESS) ] = 0x4f2e, /* LLC Reference */ - [ C(RESULT_MISS) ] = 0x412e, /* LLC Misses */ - }, - }, - [ C(DTLB) ] = { - [ C(OP_READ) ] = { - [ C(RESULT_ACCESS) ] = 0x0f40, /* L1D_CACHE_LD.MESI (alias) */ - [ C(RESULT_MISS) ] = 0x0108, /* DTLB_LOAD_MISSES.ANY */ - }, - [ C(OP_WRITE) ] = { - [ C(RESULT_ACCESS) ] = 0x0f41, /* L1D_CACHE_ST.MESI (alias) */ - [ C(RESULT_MISS) ] = 0x010c, /* MEM_STORE_RETIRED.DTLB_MISS */ - }, - [ C(OP_PREFETCH) ] = { - [ C(RESULT_ACCESS) ] = 0x0, - [ C(RESULT_MISS) ] = 0x0, - }, - }, - [ C(ITLB) ] = { - [ C(OP_READ) ] = { - [ C(RESULT_ACCESS) ] = 0x01c0, /* INST_RETIRED.ANY_P */ - [ C(RESULT_MISS) ] = 0x20c8, /* ITLB_MISS_RETIRED */ - }, - [ C(OP_WRITE) ] = { - [ C(RESULT_ACCESS) ] = -1, - [ C(RESULT_MISS) ] = -1, - }, - [ C(OP_PREFETCH) ] = { - [ C(RESULT_ACCESS) ] = -1, - [ C(RESULT_MISS) ] = -1, - }, - }, - [ C(BPU ) ] = { - [ C(OP_READ) ] = { - [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ALL_BRANCHES */ - [ C(RESULT_MISS) ] = 0x03e8, /* BPU_CLEARS.ANY */ - }, - [ C(OP_WRITE) ] = { - [ C(RESULT_ACCESS) ] = -1, - [ C(RESULT_MISS) ] = -1, - }, - [ C(OP_PREFETCH) ] = { - [ C(RESULT_ACCESS) ] = -1, - [ C(RESULT_MISS) ] = -1, - }, - }, -}; - -static const u64 core2_hw_cache_event_ids - [PERF_COUNT_HW_CACHE_MAX] - [PERF_COUNT_HW_CACHE_OP_MAX] - [PERF_COUNT_HW_CACHE_RESULT_MAX] = -{ - [ C(L1D) ] = { - [ C(OP_READ) ] = { - [ C(RESULT_ACCESS) ] = 0x0f40, /* L1D_CACHE_LD.MESI */ - [ C(RESULT_MISS) ] = 0x0140, /* L1D_CACHE_LD.I_STATE */ - }, - [ C(OP_WRITE) ] = { - [ C(RESULT_ACCESS) ] = 0x0f41, /* L1D_CACHE_ST.MESI */ - [ C(RESULT_MISS) ] = 0x0141, /* L1D_CACHE_ST.I_STATE */ - }, - [ C(OP_PREFETCH) ] = { - [ C(RESULT_ACCESS) ] = 0x104e, /* L1D_PREFETCH.REQUESTS */ - [ C(RESULT_MISS) ] = 0, - }, - }, - [ C(L1I ) ] = { - [ C(OP_READ) ] = { - [ C(RESULT_ACCESS) ] = 0x0080, /* L1I.READS */ - [ C(RESULT_MISS) ] = 0x0081, /* L1I.MISSES */ - }, - [ C(OP_WRITE) ] = { - [ C(RESULT_ACCESS) ] = -1, - [ C(RESULT_MISS) ] = -1, - }, - [ C(OP_PREFETCH) ] = { - [ C(RESULT_ACCESS) ] = 0, - [ C(RESULT_MISS) ] = 0, - }, - }, - [ C(LL ) ] = { - [ C(OP_READ) ] = { - [ C(RESULT_ACCESS) ] = 0x4f29, /* L2_LD.MESI */ - [ C(RESULT_MISS) ] = 0x4129, /* L2_LD.ISTATE */ - }, - [ C(OP_WRITE) ] = { - [ C(RESULT_ACCESS) ] = 0x4f2A, /* L2_ST.MESI */ - [ C(RESULT_MISS) ] = 0x412A, /* L2_ST.ISTATE */ - }, - [ C(OP_PREFETCH) ] = { - [ C(RESULT_ACCESS) ] = 0, - [ C(RESULT_MISS) ] = 0, - }, - }, - [ C(DTLB) ] = { - [ C(OP_READ) ] = { - [ C(RESULT_ACCESS) ] = 0x0f40, /* L1D_CACHE_LD.MESI (alias) */ - [ C(RESULT_MISS) ] = 0x0208, /* DTLB_MISSES.MISS_LD */ - }, - [ C(OP_WRITE) ] = { - [ C(RESULT_ACCESS) ] = 0x0f41, /* L1D_CACHE_ST.MESI (alias) */ - [ C(RESULT_MISS) ] = 0x0808, /* DTLB_MISSES.MISS_ST */ - }, - [ C(OP_PREFETCH) ] = { - [ C(RESULT_ACCESS) ] = 0, - [ C(RESULT_MISS) ] = 0, - }, - }, - [ C(ITLB) ] = { - [ C(OP_READ) ] = { - [ C(RESULT_ACCESS) ] = 0x00c0, /* INST_RETIRED.ANY_P */ - [ C(RESULT_MISS) ] = 0x1282, /* ITLBMISSES */ - }, - [ C(OP_WRITE) ] = { - [ C(RESULT_ACCESS) ] = -1, - [ C(RESULT_MISS) ] = -1, - }, - [ C(OP_PREFETCH) ] = { - [ C(RESULT_ACCESS) ] = -1, - [ C(RESULT_MISS) ] = -1, - }, - }, - [ C(BPU ) ] = { - [ C(OP_READ) ] = { - [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ANY */ - [ C(RESULT_MISS) ] = 0x00c5, /* BP_INST_RETIRED.MISPRED */ - }, - [ C(OP_WRITE) ] = { - [ C(RESULT_ACCESS) ] = -1, - [ C(RESULT_MISS) ] = -1, - }, - [ C(OP_PREFETCH) ] = { - [ C(RESULT_ACCESS) ] = -1, - [ C(RESULT_MISS) ] = -1, - }, - }, -}; - -static const u64 atom_hw_cache_event_ids - [PERF_COUNT_HW_CACHE_MAX] - [PERF_COUNT_HW_CACHE_OP_MAX] - [PERF_COUNT_HW_CACHE_RESULT_MAX] = -{ - [ C(L1D) ] = { - [ C(OP_READ) ] = { - [ C(RESULT_ACCESS) ] = 0x2140, /* L1D_CACHE.LD */ - [ C(RESULT_MISS) ] = 0, - }, - [ C(OP_WRITE) ] = { - [ C(RESULT_ACCESS) ] = 0x2240, /* L1D_CACHE.ST */ - [ C(RESULT_MISS) ] = 0, - }, - [ C(OP_PREFETCH) ] = { - [ C(RESULT_ACCESS) ] = 0x0, - [ C(RESULT_MISS) ] = 0, - }, - }, - [ C(L1I ) ] = { - [ C(OP_READ) ] = { - [ C(RESULT_ACCESS) ] = 0x0380, /* L1I.READS */ - [ C(RESULT_MISS) ] = 0x0280, /* L1I.MISSES */ - }, - [ C(OP_WRITE) ] = { - [ C(RESULT_ACCESS) ] = -1, - [ C(RESULT_MISS) ] = -1, - }, - [ C(OP_PREFETCH) ] = { - [ C(RESULT_ACCESS) ] = 0, - [ C(RESULT_MISS) ] = 0, - }, - }, - [ C(LL ) ] = { - [ C(OP_READ) ] = { - [ C(RESULT_ACCESS) ] = 0x4f29, /* L2_LD.MESI */ - [ C(RESULT_MISS) ] = 0x4129, /* L2_LD.ISTATE */ - }, - [ C(OP_WRITE) ] = { - [ C(RESULT_ACCESS) ] = 0x4f2A, /* L2_ST.MESI */ - [ C(RESULT_MISS) ] = 0x412A, /* L2_ST.ISTATE */ - }, - [ C(OP_PREFETCH) ] = { - [ C(RESULT_ACCESS) ] = 0, - [ C(RESULT_MISS) ] = 0, - }, - }, - [ C(DTLB) ] = { - [ C(OP_READ) ] = { - [ C(RESULT_ACCESS) ] = 0x2140, /* L1D_CACHE_LD.MESI (alias) */ - [ C(RESULT_MISS) ] = 0x0508, /* DTLB_MISSES.MISS_LD */ - }, - [ C(OP_WRITE) ] = { - [ C(RESULT_ACCESS) ] = 0x2240, /* L1D_CACHE_ST.MESI (alias) */ - [ C(RESULT_MISS) ] = 0x0608, /* DTLB_MISSES.MISS_ST */ - }, - [ C(OP_PREFETCH) ] = { - [ C(RESULT_ACCESS) ] = 0, - [ C(RESULT_MISS) ] = 0, - }, - }, - [ C(ITLB) ] = { - [ C(OP_READ) ] = { - [ C(RESULT_ACCESS) ] = 0x00c0, /* INST_RETIRED.ANY_P */ - [ C(RESULT_MISS) ] = 0x0282, /* ITLB.MISSES */ - }, - [ C(OP_WRITE) ] = { - [ C(RESULT_ACCESS) ] = -1, - [ C(RESULT_MISS) ] = -1, - }, - [ C(OP_PREFETCH) ] = { - [ C(RESULT_ACCESS) ] = -1, - [ C(RESULT_MISS) ] = -1, - }, - }, - [ C(BPU ) ] = { - [ C(OP_READ) ] = { - [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ANY */ - [ C(RESULT_MISS) ] = 0x00c5, /* BP_INST_RETIRED.MISPRED */ - }, - [ C(OP_WRITE) ] = { - [ C(RESULT_ACCESS) ] = -1, - [ C(RESULT_MISS) ] = -1, - }, - [ C(OP_PREFETCH) ] = { - [ C(RESULT_ACCESS) ] = -1, - [ C(RESULT_MISS) ] = -1, - }, - }, -}; - -static u64 intel_pmu_raw_event(u64 hw_event) -{ -#define CORE_EVNTSEL_EVENT_MASK 0x000000FFULL -#define CORE_EVNTSEL_UNIT_MASK 0x0000FF00ULL -#define CORE_EVNTSEL_EDGE_MASK 0x00040000ULL -#define CORE_EVNTSEL_INV_MASK 0x00800000ULL -#define CORE_EVNTSEL_COUNTER_MASK 0xFF000000ULL - -#define CORE_EVNTSEL_MASK \ - (CORE_EVNTSEL_EVENT_MASK | \ - CORE_EVNTSEL_UNIT_MASK | \ - CORE_EVNTSEL_EDGE_MASK | \ - CORE_EVNTSEL_INV_MASK | \ - CORE_EVNTSEL_COUNTER_MASK) - - return hw_event & CORE_EVNTSEL_MASK; -} - -static const u64 amd_hw_cache_event_ids - [PERF_COUNT_HW_CACHE_MAX] - [PERF_COUNT_HW_CACHE_OP_MAX] - [PERF_COUNT_HW_CACHE_RESULT_MAX] = -{ - [ C(L1D) ] = { - [ C(OP_READ) ] = { - [ C(RESULT_ACCESS) ] = 0x0040, /* Data Cache Accesses */ - [ C(RESULT_MISS) ] = 0x0041, /* Data Cache Misses */ - }, - [ C(OP_WRITE) ] = { - [ C(RESULT_ACCESS) ] = 0x0142, /* Data Cache Refills :system */ - [ C(RESULT_MISS) ] = 0, - }, - [ C(OP_PREFETCH) ] = { - [ C(RESULT_ACCESS) ] = 0x0267, /* Data Prefetcher :attempts */ - [ C(RESULT_MISS) ] = 0x0167, /* Data Prefetcher :cancelled */ - }, - }, - [ C(L1I ) ] = { - [ C(OP_READ) ] = { - [ C(RESULT_ACCESS) ] = 0x0080, /* Instruction cache fetches */ - [ C(RESULT_MISS) ] = 0x0081, /* Instruction cache misses */ - }, - [ C(OP_WRITE) ] = { - [ C(RESULT_ACCESS) ] = -1, - [ C(RESULT_MISS) ] = -1, - }, - [ C(OP_PREFETCH) ] = { - [ C(RESULT_ACCESS) ] = 0x014B, /* Prefetch Instructions :Load */ - [ C(RESULT_MISS) ] = 0, - }, - }, - [ C(LL ) ] = { - [ C(OP_READ) ] = { - [ C(RESULT_ACCESS) ] = 0x037D, /* Requests to L2 Cache :IC+DC */ - [ C(RESULT_MISS) ] = 0x037E, /* L2 Cache Misses : IC+DC */ - }, - [ C(OP_WRITE) ] = { - [ C(RESULT_ACCESS) ] = 0x017F, /* L2 Fill/Writeback */ - [ C(RESULT_MISS) ] = 0, - }, - [ C(OP_PREFETCH) ] = { - [ C(RESULT_ACCESS) ] = 0, - [ C(RESULT_MISS) ] = 0, - }, - }, - [ C(DTLB) ] = { - [ C(OP_READ) ] = { - [ C(RESULT_ACCESS) ] = 0x0040, /* Data Cache Accesses */ - [ C(RESULT_MISS) ] = 0x0046, /* L1 DTLB and L2 DLTB Miss */ - }, - [ C(OP_WRITE) ] = { - [ C(RESULT_ACCESS) ] = 0, - [ C(RESULT_MISS) ] = 0, - }, - [ C(OP_PREFETCH) ] = { - [ C(RESULT_ACCESS) ] = 0, - [ C(RESULT_MISS) ] = 0, - }, - }, - [ C(ITLB) ] = { - [ C(OP_READ) ] = { - [ C(RESULT_ACCESS) ] = 0x0080, /* Instruction fecthes */ - [ C(RESULT_MISS) ] = 0x0085, /* Instr. fetch ITLB misses */ - }, - [ C(OP_WRITE) ] = { - [ C(RESULT_ACCESS) ] = -1, - [ C(RESULT_MISS) ] = -1, - }, - [ C(OP_PREFETCH) ] = { - [ C(RESULT_ACCESS) ] = -1, - [ C(RESULT_MISS) ] = -1, - }, - }, - [ C(BPU ) ] = { - [ C(OP_READ) ] = { - [ C(RESULT_ACCESS) ] = 0x00c2, /* Retired Branch Instr. */ - [ C(RESULT_MISS) ] = 0x00c3, /* Retired Mispredicted BI */ - }, - [ C(OP_WRITE) ] = { - [ C(RESULT_ACCESS) ] = -1, - [ C(RESULT_MISS) ] = -1, - }, - [ C(OP_PREFETCH) ] = { - [ C(RESULT_ACCESS) ] = -1, - [ C(RESULT_MISS) ] = -1, - }, - }, -}; - -/* - * AMD Performance Monitor K7 and later. - */ -static const u64 amd_perfmon_event_map[] = -{ - [PERF_COUNT_HW_CPU_CYCLES] = 0x0076, - [PERF_COUNT_HW_INSTRUCTIONS] = 0x00c0, - [PERF_COUNT_HW_CACHE_REFERENCES] = 0x0080, - [PERF_COUNT_HW_CACHE_MISSES] = 0x0081, - [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x00c4, - [PERF_COUNT_HW_BRANCH_MISSES] = 0x00c5, -}; - -static u64 amd_pmu_event_map(int hw_event) -{ - return amd_perfmon_event_map[hw_event]; -} - -static u64 amd_pmu_raw_event(u64 hw_event) -{ -#define K7_EVNTSEL_EVENT_MASK 0x7000000FFULL -#define K7_EVNTSEL_UNIT_MASK 0x00000FF00ULL -#define K7_EVNTSEL_EDGE_MASK 0x000040000ULL -#define K7_EVNTSEL_INV_MASK 0x000800000ULL -#define K7_EVNTSEL_COUNTER_MASK 0x0FF000000ULL - -#define K7_EVNTSEL_MASK \ - (K7_EVNTSEL_EVENT_MASK | \ - K7_EVNTSEL_UNIT_MASK | \ - K7_EVNTSEL_EDGE_MASK | \ - K7_EVNTSEL_INV_MASK | \ - K7_EVNTSEL_COUNTER_MASK) - - return hw_event & K7_EVNTSEL_MASK; -} - -/* - * Propagate counter elapsed time into the generic counter. - * Can only be executed on the CPU where the counter is active. - * Returns the delta events processed. - */ -static u64 -x86_perf_counter_update(struct perf_counter *counter, - struct hw_perf_counter *hwc, int idx) -{ - int shift = 64 - x86_pmu.counter_bits; - u64 prev_raw_count, new_raw_count; - s64 delta; - - if (idx == X86_PMC_IDX_FIXED_BTS) - return 0; - - /* - * Careful: an NMI might modify the previous counter value. - * - * Our tactic to handle this is to first atomically read and - * exchange a new raw count - then add that new-prev delta - * count to the generic counter atomically: - */ -again: - prev_raw_count = atomic64_read(&hwc->prev_count); - rdmsrl(hwc->counter_base + idx, new_raw_count); - - if (atomic64_cmpxchg(&hwc->prev_count, prev_raw_count, - new_raw_count) != prev_raw_count) - goto again; - - /* - * Now we have the new raw value and have updated the prev - * timestamp already. We can now calculate the elapsed delta - * (counter-)time and add that to the generic counter. - * - * Careful, not all hw sign-extends above the physical width - * of the count. - */ - delta = (new_raw_count << shift) - (prev_raw_count << shift); - delta >>= shift; - - atomic64_add(delta, &counter->count); - atomic64_sub(delta, &hwc->period_left); - - return new_raw_count; -} - -static atomic_t active_counters; -static DEFINE_MUTEX(pmc_reserve_mutex); - -static bool reserve_pmc_hardware(void) -{ -#ifdef CONFIG_X86_LOCAL_APIC - int i; - - if (nmi_watchdog == NMI_LOCAL_APIC) - disable_lapic_nmi_watchdog(); - - for (i = 0; i < x86_pmu.num_counters; i++) { - if (!reserve_perfctr_nmi(x86_pmu.perfctr + i)) - goto perfctr_fail; - } - - for (i = 0; i < x86_pmu.num_counters; i++) { - if (!reserve_evntsel_nmi(x86_pmu.eventsel + i)) - goto eventsel_fail; - } -#endif - - return true; - -#ifdef CONFIG_X86_LOCAL_APIC -eventsel_fail: - for (i--; i >= 0; i--) - release_evntsel_nmi(x86_pmu.eventsel + i); - - i = x86_pmu.num_counters; - -perfctr_fail: - for (i--; i >= 0; i--) - release_perfctr_nmi(x86_pmu.perfctr + i); - - if (nmi_watchdog == NMI_LOCAL_APIC) - enable_lapic_nmi_watchdog(); - - return false; -#endif -} - -static void release_pmc_hardware(void) -{ -#ifdef CONFIG_X86_LOCAL_APIC - int i; - - for (i = 0; i < x86_pmu.num_counters; i++) { - release_perfctr_nmi(x86_pmu.perfctr + i); - release_evntsel_nmi(x86_pmu.eventsel + i); - } - - if (nmi_watchdog == NMI_LOCAL_APIC) - enable_lapic_nmi_watchdog(); -#endif -} - -static inline bool bts_available(void) -{ - return x86_pmu.enable_bts != NULL; -} - -static inline void init_debug_store_on_cpu(int cpu) -{ - struct debug_store *ds = per_cpu(cpu_hw_counters, cpu).ds; - - if (!ds) - return; - - wrmsr_on_cpu(cpu, MSR_IA32_DS_AREA, - (u32)((u64)(unsigned long)ds), - (u32)((u64)(unsigned long)ds >> 32)); -} - -static inline void fini_debug_store_on_cpu(int cpu) -{ - if (!per_cpu(cpu_hw_counters, cpu).ds) - return; - - wrmsr_on_cpu(cpu, MSR_IA32_DS_AREA, 0, 0); -} - -static void release_bts_hardware(void) -{ - int cpu; - - if (!bts_available()) - return; - - get_online_cpus(); - - for_each_online_cpu(cpu) - fini_debug_store_on_cpu(cpu); - - for_each_possible_cpu(cpu) { - struct debug_store *ds = per_cpu(cpu_hw_counters, cpu).ds; - - if (!ds) - continue; - - per_cpu(cpu_hw_counters, cpu).ds = NULL; - - kfree((void *)(unsigned long)ds->bts_buffer_base); - kfree(ds); - } - - put_online_cpus(); -} - -static int reserve_bts_hardware(void) -{ - int cpu, err = 0; - - if (!bts_available()) - return 0; - - get_online_cpus(); - - for_each_possible_cpu(cpu) { - struct debug_store *ds; - void *buffer; - - err = -ENOMEM; - buffer = kzalloc(BTS_BUFFER_SIZE, GFP_KERNEL); - if (unlikely(!buffer)) - break; - - ds = kzalloc(sizeof(*ds), GFP_KERNEL); - if (unlikely(!ds)) { - kfree(buffer); - break; - } - - ds->bts_buffer_base = (u64)(unsigned long)buffer; - ds->bts_index = ds->bts_buffer_base; - ds->bts_absolute_maximum = - ds->bts_buffer_base + BTS_BUFFER_SIZE; - ds->bts_interrupt_threshold = - ds->bts_absolute_maximum - BTS_OVFL_TH; - - per_cpu(cpu_hw_counters, cpu).ds = ds; - err = 0; - } - - if (err) - release_bts_hardware(); - else { - for_each_online_cpu(cpu) - init_debug_store_on_cpu(cpu); - } - - put_online_cpus(); - - return err; -} - -static void hw_perf_counter_destroy(struct perf_counter *counter) -{ - if (atomic_dec_and_mutex_lock(&active_counters, &pmc_reserve_mutex)) { - release_pmc_hardware(); - release_bts_hardware(); - mutex_unlock(&pmc_reserve_mutex); - } -} - -static inline int x86_pmu_initialized(void) -{ - return x86_pmu.handle_irq != NULL; -} - -static inline int -set_ext_hw_attr(struct hw_perf_counter *hwc, struct perf_counter_attr *attr) -{ - unsigned int cache_type, cache_op, cache_result; - u64 config, val; - - config = attr->config; - - cache_type = (config >> 0) & 0xff; - if (cache_type >= PERF_COUNT_HW_CACHE_MAX) - return -EINVAL; - - cache_op = (config >> 8) & 0xff; - if (cache_op >= PERF_COUNT_HW_CACHE_OP_MAX) - return -EINVAL; - - cache_result = (config >> 16) & 0xff; - if (cache_result >= PERF_COUNT_HW_CACHE_RESULT_MAX) - return -EINVAL; - - val = hw_cache_event_ids[cache_type][cache_op][cache_result]; - - if (val == 0) - return -ENOENT; - - if (val == -1) - return -EINVAL; - - hwc->config |= val; - - return 0; -} - -static void intel_pmu_enable_bts(u64 config) -{ - unsigned long debugctlmsr; - - debugctlmsr = get_debugctlmsr(); - - debugctlmsr |= X86_DEBUGCTL_TR; - debugctlmsr |= X86_DEBUGCTL_BTS; - debugctlmsr |= X86_DEBUGCTL_BTINT; - - if (!(config & ARCH_PERFMON_EVENTSEL_OS)) - debugctlmsr |= X86_DEBUGCTL_BTS_OFF_OS; - - if (!(config & ARCH_PERFMON_EVENTSEL_USR)) - debugctlmsr |= X86_DEBUGCTL_BTS_OFF_USR; - - update_debugctlmsr(debugctlmsr); -} - -static void intel_pmu_disable_bts(void) -{ - struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters); - unsigned long debugctlmsr; - - if (!cpuc->ds) - return; - - debugctlmsr = get_debugctlmsr(); - - debugctlmsr &= - ~(X86_DEBUGCTL_TR | X86_DEBUGCTL_BTS | X86_DEBUGCTL_BTINT | - X86_DEBUGCTL_BTS_OFF_OS | X86_DEBUGCTL_BTS_OFF_USR); - - update_debugctlmsr(debugctlmsr); -} - -/* - * Setup the hardware configuration for a given attr_type - */ -static int __hw_perf_counter_init(struct perf_counter *counter) -{ - struct perf_counter_attr *attr = &counter->attr; - struct hw_perf_counter *hwc = &counter->hw; - u64 config; - int err; - - if (!x86_pmu_initialized()) - return -ENODEV; - - err = 0; - if (!atomic_inc_not_zero(&active_counters)) { - mutex_lock(&pmc_reserve_mutex); - if (atomic_read(&active_counters) == 0) { - if (!reserve_pmc_hardware()) - err = -EBUSY; - else - err = reserve_bts_hardware(); - } - if (!err) - atomic_inc(&active_counters); - mutex_unlock(&pmc_reserve_mutex); - } - if (err) - return err; - - counter->destroy = hw_perf_counter_destroy; - - /* - * Generate PMC IRQs: - * (keep 'enabled' bit clear for now) - */ - hwc->config = ARCH_PERFMON_EVENTSEL_INT; - - /* - * Count user and OS events unless requested not to. - */ - if (!attr->exclude_user) - hwc->config |= ARCH_PERFMON_EVENTSEL_USR; - if (!attr->exclude_kernel) - hwc->config |= ARCH_PERFMON_EVENTSEL_OS; - - if (!hwc->sample_period) { - hwc->sample_period = x86_pmu.max_period; - hwc->last_period = hwc->sample_period; - atomic64_set(&hwc->period_left, hwc->sample_period); - } else { - /* - * If we have a PMU initialized but no APIC - * interrupts, we cannot sample hardware - * counters (user-space has to fall back and - * sample via a hrtimer based software counter): - */ - if (!x86_pmu.apic) - return -EOPNOTSUPP; - } - - /* - * Raw hw_event type provide the config in the hw_event structure - */ - if (attr->type == PERF_TYPE_RAW) { - hwc->config |= x86_pmu.raw_event(attr->config); - return 0; - } - - if (attr->type == PERF_TYPE_HW_CACHE) - return set_ext_hw_attr(hwc, attr); - - if (attr->config >= x86_pmu.max_events) - return -EINVAL; - - /* - * The generic map: - */ - config = x86_pmu.event_map(attr->config); - - if (config == 0) - return -ENOENT; - - if (config == -1LL) - return -EINVAL; - - /* - * Branch tracing: - */ - if ((attr->config == PERF_COUNT_HW_BRANCH_INSTRUCTIONS) && - (hwc->sample_period == 1)) { - /* BTS is not supported by this architecture. */ - if (!bts_available()) - return -EOPNOTSUPP; - - /* BTS is currently only allowed for user-mode. */ - if (hwc->config & ARCH_PERFMON_EVENTSEL_OS) - return -EOPNOTSUPP; - } - - hwc->config |= config; - - return 0; -} - -static void p6_pmu_disable_all(void) -{ - struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters); - u64 val; - - if (!cpuc->enabled) - return; - - cpuc->enabled = 0; - barrier(); - - /* p6 only has one enable register */ - rdmsrl(MSR_P6_EVNTSEL0, val); - val &= ~ARCH_PERFMON_EVENTSEL0_ENABLE; - wrmsrl(MSR_P6_EVNTSEL0, val); -} - -static void intel_pmu_disable_all(void) -{ - struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters); - - if (!cpuc->enabled) - return; - - cpuc->enabled = 0; - barrier(); - - wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0); - - if (test_bit(X86_PMC_IDX_FIXED_BTS, cpuc->active_mask)) - intel_pmu_disable_bts(); -} - -static void amd_pmu_disable_all(void) -{ - struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters); - int idx; - - if (!cpuc->enabled) - return; - - cpuc->enabled = 0; - /* - * ensure we write the disable before we start disabling the - * counters proper, so that amd_pmu_enable_counter() does the - * right thing. - */ - barrier(); - - for (idx = 0; idx < x86_pmu.num_counters; idx++) { - u64 val; - - if (!test_bit(idx, cpuc->active_mask)) - continue; - rdmsrl(MSR_K7_EVNTSEL0 + idx, val); - if (!(val & ARCH_PERFMON_EVENTSEL0_ENABLE)) - continue; - val &= ~ARCH_PERFMON_EVENTSEL0_ENABLE; - wrmsrl(MSR_K7_EVNTSEL0 + idx, val); - } -} - -void hw_perf_disable(void) -{ - if (!x86_pmu_initialized()) - return; - return x86_pmu.disable_all(); -} - -static void p6_pmu_enable_all(void) -{ - struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters); - unsigned long val; - - if (cpuc->enabled) - return; - - cpuc->enabled = 1; - barrier(); - - /* p6 only has one enable register */ - rdmsrl(MSR_P6_EVNTSEL0, val); - val |= ARCH_PERFMON_EVENTSEL0_ENABLE; - wrmsrl(MSR_P6_EVNTSEL0, val); -} - -static void intel_pmu_enable_all(void) -{ - struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters); - - if (cpuc->enabled) - return; - - cpuc->enabled = 1; - barrier(); - - wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, x86_pmu.intel_ctrl); - - if (test_bit(X86_PMC_IDX_FIXED_BTS, cpuc->active_mask)) { - struct perf_counter *counter = - cpuc->counters[X86_PMC_IDX_FIXED_BTS]; - - if (WARN_ON_ONCE(!counter)) - return; - - intel_pmu_enable_bts(counter->hw.config); - } -} - -static void amd_pmu_enable_all(void) -{ - struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters); - int idx; - - if (cpuc->enabled) - return; - - cpuc->enabled = 1; - barrier(); - - for (idx = 0; idx < x86_pmu.num_counters; idx++) { - struct perf_counter *counter = cpuc->counters[idx]; - u64 val; - - if (!test_bit(idx, cpuc->active_mask)) - continue; - - val = counter->hw.config; - val |= ARCH_PERFMON_EVENTSEL0_ENABLE; - wrmsrl(MSR_K7_EVNTSEL0 + idx, val); - } -} - -void hw_perf_enable(void) -{ - if (!x86_pmu_initialized()) - return; - x86_pmu.enable_all(); -} - -static inline u64 intel_pmu_get_status(void) -{ - u64 status; - - rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS, status); - - return status; -} - -static inline void intel_pmu_ack_status(u64 ack) -{ - wrmsrl(MSR_CORE_PERF_GLOBAL_OVF_CTRL, ack); -} - -static inline void x86_pmu_enable_counter(struct hw_perf_counter *hwc, int idx) -{ - (void)checking_wrmsrl(hwc->config_base + idx, - hwc->config | ARCH_PERFMON_EVENTSEL0_ENABLE); -} - -static inline void x86_pmu_disable_counter(struct hw_perf_counter *hwc, int idx) -{ - (void)checking_wrmsrl(hwc->config_base + idx, hwc->config); -} - -static inline void -intel_pmu_disable_fixed(struct hw_perf_counter *hwc, int __idx) -{ - int idx = __idx - X86_PMC_IDX_FIXED; - u64 ctrl_val, mask; - - mask = 0xfULL << (idx * 4); - - rdmsrl(hwc->config_base, ctrl_val); - ctrl_val &= ~mask; - (void)checking_wrmsrl(hwc->config_base, ctrl_val); -} - -static inline void -p6_pmu_disable_counter(struct hw_perf_counter *hwc, int idx) -{ - struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters); - u64 val = P6_NOP_COUNTER; - - if (cpuc->enabled) - val |= ARCH_PERFMON_EVENTSEL0_ENABLE; - - (void)checking_wrmsrl(hwc->config_base + idx, val); -} - -static inline void -intel_pmu_disable_counter(struct hw_perf_counter *hwc, int idx) -{ - if (unlikely(idx == X86_PMC_IDX_FIXED_BTS)) { - intel_pmu_disable_bts(); - return; - } - - if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) { - intel_pmu_disable_fixed(hwc, idx); - return; - } - - x86_pmu_disable_counter(hwc, idx); -} - -static inline void -amd_pmu_disable_counter(struct hw_perf_counter *hwc, int idx) -{ - x86_pmu_disable_counter(hwc, idx); -} - -static DEFINE_PER_CPU(u64 [X86_PMC_IDX_MAX], pmc_prev_left); - -/* - * Set the next IRQ period, based on the hwc->period_left value. - * To be called with the counter disabled in hw: - */ -static int -x86_perf_counter_set_period(struct perf_counter *counter, - struct hw_perf_counter *hwc, int idx) -{ - s64 left = atomic64_read(&hwc->period_left); - s64 period = hwc->sample_period; - int err, ret = 0; - - if (idx == X86_PMC_IDX_FIXED_BTS) - return 0; - - /* - * If we are way outside a reasoable range then just skip forward: - */ - if (unlikely(left <= -period)) { - left = period; - atomic64_set(&hwc->period_left, left); - hwc->last_period = period; - ret = 1; - } - - if (unlikely(left <= 0)) { - left += period; - atomic64_set(&hwc->period_left, left); - hwc->last_period = period; - ret = 1; - } - /* - * Quirk: certain CPUs dont like it if just 1 hw_event is left: - */ - if (unlikely(left < 2)) - left = 2; - - if (left > x86_pmu.max_period) - left = x86_pmu.max_period; - - per_cpu(pmc_prev_left[idx], smp_processor_id()) = left; - - /* - * The hw counter starts counting from this counter offset, - * mark it to be able to extra future deltas: - */ - atomic64_set(&hwc->prev_count, (u64)-left); - - err = checking_wrmsrl(hwc->counter_base + idx, - (u64)(-left) & x86_pmu.counter_mask); - - perf_counter_update_userpage(counter); - - return ret; -} - -static inline void -intel_pmu_enable_fixed(struct hw_perf_counter *hwc, int __idx) -{ - int idx = __idx - X86_PMC_IDX_FIXED; - u64 ctrl_val, bits, mask; - int err; - - /* - * Enable IRQ generation (0x8), - * and enable ring-3 counting (0x2) and ring-0 counting (0x1) - * if requested: - */ - bits = 0x8ULL; - if (hwc->config & ARCH_PERFMON_EVENTSEL_USR) - bits |= 0x2; - if (hwc->config & ARCH_PERFMON_EVENTSEL_OS) - bits |= 0x1; - bits <<= (idx * 4); - mask = 0xfULL << (idx * 4); - - rdmsrl(hwc->config_base, ctrl_val); - ctrl_val &= ~mask; - ctrl_val |= bits; - err = checking_wrmsrl(hwc->config_base, ctrl_val); -} - -static void p6_pmu_enable_counter(struct hw_perf_counter *hwc, int idx) -{ - struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters); - u64 val; - - val = hwc->config; - if (cpuc->enabled) - val |= ARCH_PERFMON_EVENTSEL0_ENABLE; - - (void)checking_wrmsrl(hwc->config_base + idx, val); -} - - -static void intel_pmu_enable_counter(struct hw_perf_counter *hwc, int idx) -{ - if (unlikely(idx == X86_PMC_IDX_FIXED_BTS)) { - if (!__get_cpu_var(cpu_hw_counters).enabled) - return; - - intel_pmu_enable_bts(hwc->config); - return; - } - - if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) { - intel_pmu_enable_fixed(hwc, idx); - return; - } - - x86_pmu_enable_counter(hwc, idx); -} - -static void amd_pmu_enable_counter(struct hw_perf_counter *hwc, int idx) -{ - struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters); - - if (cpuc->enabled) - x86_pmu_enable_counter(hwc, idx); -} - -static int -fixed_mode_idx(struct perf_counter *counter, struct hw_perf_counter *hwc) -{ - unsigned int hw_event; - - hw_event = hwc->config & ARCH_PERFMON_EVENT_MASK; - - if (unlikely((hw_event == - x86_pmu.event_map(PERF_COUNT_HW_BRANCH_INSTRUCTIONS)) && - (hwc->sample_period == 1))) - return X86_PMC_IDX_FIXED_BTS; - - if (!x86_pmu.num_counters_fixed) - return -1; - - if (unlikely(hw_event == x86_pmu.event_map(PERF_COUNT_HW_INSTRUCTIONS))) - return X86_PMC_IDX_FIXED_INSTRUCTIONS; - if (unlikely(hw_event == x86_pmu.event_map(PERF_COUNT_HW_CPU_CYCLES))) - return X86_PMC_IDX_FIXED_CPU_CYCLES; - if (unlikely(hw_event == x86_pmu.event_map(PERF_COUNT_HW_BUS_CYCLES))) - return X86_PMC_IDX_FIXED_BUS_CYCLES; - - return -1; -} - -/* - * Find a PMC slot for the freshly enabled / scheduled in counter: - */ -static int x86_pmu_enable(struct perf_counter *counter) -{ - struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters); - struct hw_perf_counter *hwc = &counter->hw; - int idx; - - idx = fixed_mode_idx(counter, hwc); - if (idx == X86_PMC_IDX_FIXED_BTS) { - /* BTS is already occupied. */ - if (test_and_set_bit(idx, cpuc->used_mask)) - return -EAGAIN; - - hwc->config_base = 0; - hwc->counter_base = 0; - hwc->idx = idx; - } else if (idx >= 0) { - /* - * Try to get the fixed counter, if that is already taken - * then try to get a generic counter: - */ - if (test_and_set_bit(idx, cpuc->used_mask)) - goto try_generic; - - hwc->config_base = MSR_ARCH_PERFMON_FIXED_CTR_CTRL; - /* - * We set it so that counter_base + idx in wrmsr/rdmsr maps to - * MSR_ARCH_PERFMON_FIXED_CTR0 ... CTR2: - */ - hwc->counter_base = - MSR_ARCH_PERFMON_FIXED_CTR0 - X86_PMC_IDX_FIXED; - hwc->idx = idx; - } else { - idx = hwc->idx; - /* Try to get the previous generic counter again */ - if (test_and_set_bit(idx, cpuc->used_mask)) { -try_generic: - idx = find_first_zero_bit(cpuc->used_mask, - x86_pmu.num_counters); - if (idx == x86_pmu.num_counters) - return -EAGAIN; - - set_bit(idx, cpuc->used_mask); - hwc->idx = idx; - } - hwc->config_base = x86_pmu.eventsel; - hwc->counter_base = x86_pmu.perfctr; - } - - perf_counters_lapic_init(); - - x86_pmu.disable(hwc, idx); - - cpuc->counters[idx] = counter; - set_bit(idx, cpuc->active_mask); - - x86_perf_counter_set_period(counter, hwc, idx); - x86_pmu.enable(hwc, idx); - - perf_counter_update_userpage(counter); - - return 0; -} - -static void x86_pmu_unthrottle(struct perf_counter *counter) -{ - struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters); - struct hw_perf_counter *hwc = &counter->hw; - - if (WARN_ON_ONCE(hwc->idx >= X86_PMC_IDX_MAX || - cpuc->counters[hwc->idx] != counter)) - return; - - x86_pmu.enable(hwc, hwc->idx); -} - -void perf_counter_print_debug(void) -{ - u64 ctrl, status, overflow, pmc_ctrl, pmc_count, prev_left, fixed; - struct cpu_hw_counters *cpuc; - unsigned long flags; - int cpu, idx; - - if (!x86_pmu.num_counters) - return; - - local_irq_save(flags); - - cpu = smp_processor_id(); - cpuc = &per_cpu(cpu_hw_counters, cpu); - - if (x86_pmu.version >= 2) { - rdmsrl(MSR_CORE_PERF_GLOBAL_CTRL, ctrl); - rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS, status); - rdmsrl(MSR_CORE_PERF_GLOBAL_OVF_CTRL, overflow); - rdmsrl(MSR_ARCH_PERFMON_FIXED_CTR_CTRL, fixed); - - pr_info("\n"); - pr_info("CPU#%d: ctrl: %016llx\n", cpu, ctrl); - pr_info("CPU#%d: status: %016llx\n", cpu, status); - pr_info("CPU#%d: overflow: %016llx\n", cpu, overflow); - pr_info("CPU#%d: fixed: %016llx\n", cpu, fixed); - } - pr_info("CPU#%d: used: %016llx\n", cpu, *(u64 *)cpuc->used_mask); - - for (idx = 0; idx < x86_pmu.num_counters; idx++) { - rdmsrl(x86_pmu.eventsel + idx, pmc_ctrl); - rdmsrl(x86_pmu.perfctr + idx, pmc_count); - - prev_left = per_cpu(pmc_prev_left[idx], cpu); - - pr_info("CPU#%d: gen-PMC%d ctrl: %016llx\n", - cpu, idx, pmc_ctrl); - pr_info("CPU#%d: gen-PMC%d count: %016llx\n", - cpu, idx, pmc_count); - pr_info("CPU#%d: gen-PMC%d left: %016llx\n", - cpu, idx, prev_left); - } - for (idx = 0; idx < x86_pmu.num_counters_fixed; idx++) { - rdmsrl(MSR_ARCH_PERFMON_FIXED_CTR0 + idx, pmc_count); - - pr_info("CPU#%d: fixed-PMC%d count: %016llx\n", - cpu, idx, pmc_count); - } - local_irq_restore(flags); -} - -static void intel_pmu_drain_bts_buffer(struct cpu_hw_counters *cpuc) -{ - struct debug_store *ds = cpuc->ds; - struct bts_record { - u64 from; - u64 to; - u64 flags; - }; - struct perf_counter *counter = cpuc->counters[X86_PMC_IDX_FIXED_BTS]; - struct bts_record *at, *top; - struct perf_output_handle handle; - struct perf_event_header header; - struct perf_sample_data data; - struct pt_regs regs; - - if (!counter) - return; - - if (!ds) - return; - - at = (struct bts_record *)(unsigned long)ds->bts_buffer_base; - top = (struct bts_record *)(unsigned long)ds->bts_index; - - if (top <= at) - return; - - ds->bts_index = ds->bts_buffer_base; - - - data.period = counter->hw.last_period; - data.addr = 0; - regs.ip = 0; - - /* - * Prepare a generic sample, i.e. fill in the invariant fields. - * We will overwrite the from and to address before we output - * the sample. - */ - perf_prepare_sample(&header, &data, counter, ®s); - - if (perf_output_begin(&handle, counter, - header.size * (top - at), 1, 1)) - return; - - for (; at < top; at++) { - data.ip = at->from; - data.addr = at->to; - - perf_output_sample(&handle, &header, &data, counter); - } - - perf_output_end(&handle); - - /* There's new data available. */ - counter->hw.interrupts++; - counter->pending_kill = POLL_IN; -} - -static void x86_pmu_disable(struct perf_counter *counter) -{ - struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters); - struct hw_perf_counter *hwc = &counter->hw; - int idx = hwc->idx; - - /* - * Must be done before we disable, otherwise the nmi handler - * could reenable again: - */ - clear_bit(idx, cpuc->active_mask); - x86_pmu.disable(hwc, idx); - - /* - * Make sure the cleared pointer becomes visible before we - * (potentially) free the counter: - */ - barrier(); - - /* - * Drain the remaining delta count out of a counter - * that we are disabling: - */ - x86_perf_counter_update(counter, hwc, idx); - - /* Drain the remaining BTS records. */ - if (unlikely(idx == X86_PMC_IDX_FIXED_BTS)) - intel_pmu_drain_bts_buffer(cpuc); - - cpuc->counters[idx] = NULL; - clear_bit(idx, cpuc->used_mask); - - perf_counter_update_userpage(counter); -} - -/* - * Save and restart an expired counter. Called by NMI contexts, - * so it has to be careful about preempting normal counter ops: - */ -static int intel_pmu_save_and_restart(struct perf_counter *counter) -{ - struct hw_perf_counter *hwc = &counter->hw; - int idx = hwc->idx; - int ret; - - x86_perf_counter_update(counter, hwc, idx); - ret = x86_perf_counter_set_period(counter, hwc, idx); - - if (counter->state == PERF_COUNTER_STATE_ACTIVE) - intel_pmu_enable_counter(hwc, idx); - - return ret; -} - -static void intel_pmu_reset(void) -{ - struct debug_store *ds = __get_cpu_var(cpu_hw_counters).ds; - unsigned long flags; - int idx; - - if (!x86_pmu.num_counters) - return; - - local_irq_save(flags); - - printk("clearing PMU state on CPU#%d\n", smp_processor_id()); - - for (idx = 0; idx < x86_pmu.num_counters; idx++) { - checking_wrmsrl(x86_pmu.eventsel + idx, 0ull); - checking_wrmsrl(x86_pmu.perfctr + idx, 0ull); - } - for (idx = 0; idx < x86_pmu.num_counters_fixed; idx++) { - checking_wrmsrl(MSR_ARCH_PERFMON_FIXED_CTR0 + idx, 0ull); - } - if (ds) - ds->bts_index = ds->bts_buffer_base; - - local_irq_restore(flags); -} - -static int p6_pmu_handle_irq(struct pt_regs *regs) -{ - struct perf_sample_data data; - struct cpu_hw_counters *cpuc; - struct perf_counter *counter; - struct hw_perf_counter *hwc; - int idx, handled = 0; - u64 val; - - data.addr = 0; - - cpuc = &__get_cpu_var(cpu_hw_counters); - - for (idx = 0; idx < x86_pmu.num_counters; idx++) { - if (!test_bit(idx, cpuc->active_mask)) - continue; - - counter = cpuc->counters[idx]; - hwc = &counter->hw; - - val = x86_perf_counter_update(counter, hwc, idx); - if (val & (1ULL << (x86_pmu.counter_bits - 1))) - continue; - - /* - * counter overflow - */ - handled = 1; - data.period = counter->hw.last_period; - - if (!x86_perf_counter_set_period(counter, hwc, idx)) - continue; - - if (perf_counter_overflow(counter, 1, &data, regs)) - p6_pmu_disable_counter(hwc, idx); - } - - if (handled) - inc_irq_stat(apic_perf_irqs); - - return handled; -} - -/* - * This handler is triggered by the local APIC, so the APIC IRQ handling - * rules apply: - */ -static int intel_pmu_handle_irq(struct pt_regs *regs) -{ - struct perf_sample_data data; - struct cpu_hw_counters *cpuc; - int bit, loops; - u64 ack, status; - - data.addr = 0; - - cpuc = &__get_cpu_var(cpu_hw_counters); - - perf_disable(); - intel_pmu_drain_bts_buffer(cpuc); - status = intel_pmu_get_status(); - if (!status) { - perf_enable(); - return 0; - } - - loops = 0; -again: - if (++loops > 100) { - WARN_ONCE(1, "perfcounters: irq loop stuck!\n"); - perf_counter_print_debug(); - intel_pmu_reset(); - perf_enable(); - return 1; - } - - inc_irq_stat(apic_perf_irqs); - ack = status; - for_each_bit(bit, (unsigned long *)&status, X86_PMC_IDX_MAX) { - struct perf_counter *counter = cpuc->counters[bit]; - - clear_bit(bit, (unsigned long *) &status); - if (!test_bit(bit, cpuc->active_mask)) - continue; - - if (!intel_pmu_save_and_restart(counter)) - continue; - - data.period = counter->hw.last_period; - - if (perf_counter_overflow(counter, 1, &data, regs)) - intel_pmu_disable_counter(&counter->hw, bit); - } - - intel_pmu_ack_status(ack); - - /* - * Repeat if there is more work to be done: - */ - status = intel_pmu_get_status(); - if (status) - goto again; - - perf_enable(); - - return 1; -} - -static int amd_pmu_handle_irq(struct pt_regs *regs) -{ - struct perf_sample_data data; - struct cpu_hw_counters *cpuc; - struct perf_counter *counter; - struct hw_perf_counter *hwc; - int idx, handled = 0; - u64 val; - - data.addr = 0; - - cpuc = &__get_cpu_var(cpu_hw_counters); - - for (idx = 0; idx < x86_pmu.num_counters; idx++) { - if (!test_bit(idx, cpuc->active_mask)) - continue; - - counter = cpuc->counters[idx]; - hwc = &counter->hw; - - val = x86_perf_counter_update(counter, hwc, idx); - if (val & (1ULL << (x86_pmu.counter_bits - 1))) - continue; - - /* - * counter overflow - */ - handled = 1; - data.period = counter->hw.last_period; - - if (!x86_perf_counter_set_period(counter, hwc, idx)) - continue; - - if (perf_counter_overflow(counter, 1, &data, regs)) - amd_pmu_disable_counter(hwc, idx); - } - - if (handled) - inc_irq_stat(apic_perf_irqs); - - return handled; -} - -void smp_perf_pending_interrupt(struct pt_regs *regs) -{ - irq_enter(); - ack_APIC_irq(); - inc_irq_stat(apic_pending_irqs); - perf_counter_do_pending(); - irq_exit(); -} - -void set_perf_counter_pending(void) -{ -#ifdef CONFIG_X86_LOCAL_APIC - apic->send_IPI_self(LOCAL_PENDING_VECTOR); -#endif -} - -void perf_counters_lapic_init(void) -{ -#ifdef CONFIG_X86_LOCAL_APIC - if (!x86_pmu.apic || !x86_pmu_initialized()) - return; - - /* - * Always use NMI for PMU - */ - apic_write(APIC_LVTPC, APIC_DM_NMI); -#endif -} - -static int __kprobes -perf_counter_nmi_handler(struct notifier_block *self, - unsigned long cmd, void *__args) -{ - struct die_args *args = __args; - struct pt_regs *regs; - - if (!atomic_read(&active_counters)) - return NOTIFY_DONE; - - switch (cmd) { - case DIE_NMI: - case DIE_NMI_IPI: - break; - - default: - return NOTIFY_DONE; - } - - regs = args->regs; - -#ifdef CONFIG_X86_LOCAL_APIC - apic_write(APIC_LVTPC, APIC_DM_NMI); -#endif - /* - * Can't rely on the handled return value to say it was our NMI, two - * counters could trigger 'simultaneously' raising two back-to-back NMIs. - * - * If the first NMI handles both, the latter will be empty and daze - * the CPU. - */ - x86_pmu.handle_irq(regs); - - return NOTIFY_STOP; -} - -static __read_mostly struct notifier_block perf_counter_nmi_notifier = { - .notifier_call = perf_counter_nmi_handler, - .next = NULL, - .priority = 1 -}; - -static struct x86_pmu p6_pmu = { - .name = "p6", - .handle_irq = p6_pmu_handle_irq, - .disable_all = p6_pmu_disable_all, - .enable_all = p6_pmu_enable_all, - .enable = p6_pmu_enable_counter, - .disable = p6_pmu_disable_counter, - .eventsel = MSR_P6_EVNTSEL0, - .perfctr = MSR_P6_PERFCTR0, - .event_map = p6_pmu_event_map, - .raw_event = p6_pmu_raw_event, - .max_events = ARRAY_SIZE(p6_perfmon_event_map), - .apic = 1, - .max_period = (1ULL << 31) - 1, - .version = 0, - .num_counters = 2, - /* - * Counters have 40 bits implemented. However they are designed such - * that bits [32-39] are sign extensions of bit 31. As such the - * effective width of a counter for P6-like PMU is 32 bits only. - * - * See IA-32 Intel Architecture Software developer manual Vol 3B - */ - .counter_bits = 32, - .counter_mask = (1ULL << 32) - 1, -}; - -static struct x86_pmu intel_pmu = { - .name = "Intel", - .handle_irq = intel_pmu_handle_irq, - .disable_all = intel_pmu_disable_all, - .enable_all = intel_pmu_enable_all, - .enable = intel_pmu_enable_counter, - .disable = intel_pmu_disable_counter, - .eventsel = MSR_ARCH_PERFMON_EVENTSEL0, - .perfctr = MSR_ARCH_PERFMON_PERFCTR0, - .event_map = intel_pmu_event_map, - .raw_event = intel_pmu_raw_event, - .max_events = ARRAY_SIZE(intel_perfmon_event_map), - .apic = 1, - /* - * Intel PMCs cannot be accessed sanely above 32 bit width, - * so we install an artificial 1<<31 period regardless of - * the generic counter period: - */ - .max_period = (1ULL << 31) - 1, - .enable_bts = intel_pmu_enable_bts, - .disable_bts = intel_pmu_disable_bts, -}; - -static struct x86_pmu amd_pmu = { - .name = "AMD", - .handle_irq = amd_pmu_handle_irq, - .disable_all = amd_pmu_disable_all, - .enable_all = amd_pmu_enable_all, - .enable = amd_pmu_enable_counter, - .disable = amd_pmu_disable_counter, - .eventsel = MSR_K7_EVNTSEL0, - .perfctr = MSR_K7_PERFCTR0, - .event_map = amd_pmu_event_map, - .raw_event = amd_pmu_raw_event, - .max_events = ARRAY_SIZE(amd_perfmon_event_map), - .num_counters = 4, - .counter_bits = 48, - .counter_mask = (1ULL << 48) - 1, - .apic = 1, - /* use highest bit to detect overflow */ - .max_period = (1ULL << 47) - 1, -}; - -static int p6_pmu_init(void) -{ - switch (boot_cpu_data.x86_model) { - case 1: - case 3: /* Pentium Pro */ - case 5: - case 6: /* Pentium II */ - case 7: - case 8: - case 11: /* Pentium III */ - break; - case 9: - case 13: - /* Pentium M */ - break; - default: - pr_cont("unsupported p6 CPU model %d ", - boot_cpu_data.x86_model); - return -ENODEV; - } - - x86_pmu = p6_pmu; - - if (!cpu_has_apic) { - pr_info("no APIC, boot with the \"lapic\" boot parameter to force-enable it.\n"); - pr_info("no hardware sampling interrupt available.\n"); - x86_pmu.apic = 0; - } - - return 0; -} - -static int intel_pmu_init(void) -{ - union cpuid10_edx edx; - union cpuid10_eax eax; - unsigned int unused; - unsigned int ebx; - int version; - - if (!cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON)) { - /* check for P6 processor family */ - if (boot_cpu_data.x86 == 6) { - return p6_pmu_init(); - } else { - return -ENODEV; - } - } - - /* - * Check whether the Architectural PerfMon supports - * Branch Misses Retired hw_event or not. - */ - cpuid(10, &eax.full, &ebx, &unused, &edx.full); - if (eax.split.mask_length <= ARCH_PERFMON_BRANCH_MISSES_RETIRED) - return -ENODEV; - - version = eax.split.version_id; - if (version < 2) - return -ENODEV; - - x86_pmu = intel_pmu; - x86_pmu.version = version; - x86_pmu.num_counters = eax.split.num_counters; - x86_pmu.counter_bits = eax.split.bit_width; - x86_pmu.counter_mask = (1ULL << eax.split.bit_width) - 1; - - /* - * Quirk: v2 perfmon does not report fixed-purpose counters, so - * assume at least 3 counters: - */ - x86_pmu.num_counters_fixed = max((int)edx.split.num_counters_fixed, 3); - - /* - * Install the hw-cache-events table: - */ - switch (boot_cpu_data.x86_model) { - case 15: /* original 65 nm celeron/pentium/core2/xeon, "Merom"/"Conroe" */ - case 22: /* single-core 65 nm celeron/core2solo "Merom-L"/"Conroe-L" */ - case 23: /* current 45 nm celeron/core2/xeon "Penryn"/"Wolfdale" */ - case 29: /* six-core 45 nm xeon "Dunnington" */ - memcpy(hw_cache_event_ids, core2_hw_cache_event_ids, - sizeof(hw_cache_event_ids)); - - pr_cont("Core2 events, "); - break; - default: - case 26: - memcpy(hw_cache_event_ids, nehalem_hw_cache_event_ids, - sizeof(hw_cache_event_ids)); - - pr_cont("Nehalem/Corei7 events, "); - break; - case 28: - memcpy(hw_cache_event_ids, atom_hw_cache_event_ids, - sizeof(hw_cache_event_ids)); - - pr_cont("Atom events, "); - break; - } - return 0; -} - -static int amd_pmu_init(void) -{ - /* Performance-monitoring supported from K7 and later: */ - if (boot_cpu_data.x86 < 6) - return -ENODEV; - - x86_pmu = amd_pmu; - - /* Events are common for all AMDs */ - memcpy(hw_cache_event_ids, amd_hw_cache_event_ids, - sizeof(hw_cache_event_ids)); - - return 0; -} - -void __init init_hw_perf_counters(void) -{ - int err; - - pr_info("Performance Counters: "); - - switch (boot_cpu_data.x86_vendor) { - case X86_VENDOR_INTEL: - err = intel_pmu_init(); - break; - case X86_VENDOR_AMD: - err = amd_pmu_init(); - break; - default: - return; - } - if (err != 0) { - pr_cont("no PMU driver, software counters only.\n"); - return; - } - - pr_cont("%s PMU driver.\n", x86_pmu.name); - - if (x86_pmu.num_counters > X86_PMC_MAX_GENERIC) { - WARN(1, KERN_ERR "hw perf counters %d > max(%d), clipping!", - x86_pmu.num_counters, X86_PMC_MAX_GENERIC); - x86_pmu.num_counters = X86_PMC_MAX_GENERIC; - } - perf_counter_mask = (1 << x86_pmu.num_counters) - 1; - perf_max_counters = x86_pmu.num_counters; - - if (x86_pmu.num_counters_fixed > X86_PMC_MAX_FIXED) { - WARN(1, KERN_ERR "hw perf counters fixed %d > max(%d), clipping!", - x86_pmu.num_counters_fixed, X86_PMC_MAX_FIXED); - x86_pmu.num_counters_fixed = X86_PMC_MAX_FIXED; - } - - perf_counter_mask |= - ((1LL << x86_pmu.num_counters_fixed)-1) << X86_PMC_IDX_FIXED; - x86_pmu.intel_ctrl = perf_counter_mask; - - perf_counters_lapic_init(); - register_die_notifier(&perf_counter_nmi_notifier); - - pr_info("... version: %d\n", x86_pmu.version); - pr_info("... bit width: %d\n", x86_pmu.counter_bits); - pr_info("... generic counters: %d\n", x86_pmu.num_counters); - pr_info("... value mask: %016Lx\n", x86_pmu.counter_mask); - pr_info("... max period: %016Lx\n", x86_pmu.max_period); - pr_info("... fixed-purpose counters: %d\n", x86_pmu.num_counters_fixed); - pr_info("... counter mask: %016Lx\n", perf_counter_mask); -} - -static inline void x86_pmu_read(struct perf_counter *counter) -{ - x86_perf_counter_update(counter, &counter->hw, counter->hw.idx); -} - -static const struct pmu pmu = { - .enable = x86_pmu_enable, - .disable = x86_pmu_disable, - .read = x86_pmu_read, - .unthrottle = x86_pmu_unthrottle, -}; - -const struct pmu *hw_perf_counter_init(struct perf_counter *counter) -{ - int err; - - err = __hw_perf_counter_init(counter); - if (err) { - if (counter->destroy) - counter->destroy(counter); - return ERR_PTR(err); - } - - return &pmu; -} - -/* - * callchain support - */ - -static inline -void callchain_store(struct perf_callchain_entry *entry, u64 ip) -{ - if (entry->nr < PERF_MAX_STACK_DEPTH) - entry->ip[entry->nr++] = ip; -} - -static DEFINE_PER_CPU(struct perf_callchain_entry, pmc_irq_entry); -static DEFINE_PER_CPU(struct perf_callchain_entry, pmc_nmi_entry); -static DEFINE_PER_CPU(int, in_nmi_frame); - - -static void -backtrace_warning_symbol(void *data, char *msg, unsigned long symbol) -{ - /* Ignore warnings */ -} - -static void backtrace_warning(void *data, char *msg) -{ - /* Ignore warnings */ -} - -static int backtrace_stack(void *data, char *name) -{ - per_cpu(in_nmi_frame, smp_processor_id()) = - x86_is_stack_id(NMI_STACK, name); - - return 0; -} - -static void backtrace_address(void *data, unsigned long addr, int reliable) -{ - struct perf_callchain_entry *entry = data; - - if (per_cpu(in_nmi_frame, smp_processor_id())) - return; - - if (reliable) - callchain_store(entry, addr); -} - -static const struct stacktrace_ops backtrace_ops = { - .warning = backtrace_warning, - .warning_symbol = backtrace_warning_symbol, - .stack = backtrace_stack, - .address = backtrace_address, -}; - -#include "../dumpstack.h" - -static void -perf_callchain_kernel(struct pt_regs *regs, struct perf_callchain_entry *entry) -{ - callchain_store(entry, PERF_CONTEXT_KERNEL); - callchain_store(entry, regs->ip); - - dump_trace(NULL, regs, NULL, 0, &backtrace_ops, entry); -} - -/* - * best effort, GUP based copy_from_user() that assumes IRQ or NMI context - */ -static unsigned long -copy_from_user_nmi(void *to, const void __user *from, unsigned long n) -{ - unsigned long offset, addr = (unsigned long)from; - int type = in_nmi() ? KM_NMI : KM_IRQ0; - unsigned long size, len = 0; - struct page *page; - void *map; - int ret; - - do { - ret = __get_user_pages_fast(addr, 1, 0, &page); - if (!ret) - break; - - offset = addr & (PAGE_SIZE - 1); - size = min(PAGE_SIZE - offset, n - len); - - map = kmap_atomic(page, type); - memcpy(to, map+offset, size); - kunmap_atomic(map, type); - put_page(page); - - len += size; - to += size; - addr += size; - - } while (len < n); - - return len; -} - -static int copy_stack_frame(const void __user *fp, struct stack_frame *frame) -{ - unsigned long bytes; - - bytes = copy_from_user_nmi(frame, fp, sizeof(*frame)); - - return bytes == sizeof(*frame); -} - -static void -perf_callchain_user(struct pt_regs *regs, struct perf_callchain_entry *entry) -{ - struct stack_frame frame; - const void __user *fp; - - if (!user_mode(regs)) - regs = task_pt_regs(current); - - fp = (void __user *)regs->bp; - - callchain_store(entry, PERF_CONTEXT_USER); - callchain_store(entry, regs->ip); - - while (entry->nr < PERF_MAX_STACK_DEPTH) { - frame.next_frame = NULL; - frame.return_address = 0; - - if (!copy_stack_frame(fp, &frame)) - break; - - if ((unsigned long)fp < regs->sp) - break; - - callchain_store(entry, frame.return_address); - fp = frame.next_frame; - } -} - -static void -perf_do_callchain(struct pt_regs *regs, struct perf_callchain_entry *entry) -{ - int is_user; - - if (!regs) - return; - - is_user = user_mode(regs); - - if (!current || current->pid == 0) - return; - - if (is_user && current->state != TASK_RUNNING) - return; - - if (!is_user) - perf_callchain_kernel(regs, entry); - - if (current->mm) - perf_callchain_user(regs, entry); -} - -struct perf_callchain_entry *perf_callchain(struct pt_regs *regs) -{ - struct perf_callchain_entry *entry; - - if (in_nmi()) - entry = &__get_cpu_var(pmc_nmi_entry); - else - entry = &__get_cpu_var(pmc_irq_entry); - - entry->nr = 0; - - perf_do_callchain(regs, entry); - - return entry; -} - -void hw_perf_counter_setup_online(int cpu) -{ - init_debug_store_on_cpu(cpu); -} diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c new file mode 100644 index 00000000000..0d03629fb1a --- /dev/null +++ b/arch/x86/kernel/cpu/perf_event.c @@ -0,0 +1,2298 @@ +/* + * Performance events x86 architecture code + * + * Copyright (C) 2008 Thomas Gleixner + * Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar + * Copyright (C) 2009 Jaswinder Singh Rajput + * Copyright (C) 2009 Advanced Micro Devices, Inc., Robert Richter + * Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra + * Copyright (C) 2009 Intel Corporation, + * + * For licencing details see kernel-base/COPYING + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +static u64 perf_event_mask __read_mostly; + +/* The maximal number of PEBS events: */ +#define MAX_PEBS_EVENTS 4 + +/* The size of a BTS record in bytes: */ +#define BTS_RECORD_SIZE 24 + +/* The size of a per-cpu BTS buffer in bytes: */ +#define BTS_BUFFER_SIZE (BTS_RECORD_SIZE * 2048) + +/* The BTS overflow threshold in bytes from the end of the buffer: */ +#define BTS_OVFL_TH (BTS_RECORD_SIZE * 128) + + +/* + * Bits in the debugctlmsr controlling branch tracing. + */ +#define X86_DEBUGCTL_TR (1 << 6) +#define X86_DEBUGCTL_BTS (1 << 7) +#define X86_DEBUGCTL_BTINT (1 << 8) +#define X86_DEBUGCTL_BTS_OFF_OS (1 << 9) +#define X86_DEBUGCTL_BTS_OFF_USR (1 << 10) + +/* + * A debug store configuration. + * + * We only support architectures that use 64bit fields. + */ +struct debug_store { + u64 bts_buffer_base; + u64 bts_index; + u64 bts_absolute_maximum; + u64 bts_interrupt_threshold; + u64 pebs_buffer_base; + u64 pebs_index; + u64 pebs_absolute_maximum; + u64 pebs_interrupt_threshold; + u64 pebs_event_reset[MAX_PEBS_EVENTS]; +}; + +struct cpu_hw_events { + struct perf_event *events[X86_PMC_IDX_MAX]; + unsigned long used_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)]; + unsigned long active_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)]; + unsigned long interrupts; + int enabled; + struct debug_store *ds; +}; + +/* + * struct x86_pmu - generic x86 pmu + */ +struct x86_pmu { + const char *name; + int version; + int (*handle_irq)(struct pt_regs *); + void (*disable_all)(void); + void (*enable_all)(void); + void (*enable)(struct hw_perf_event *, int); + void (*disable)(struct hw_perf_event *, int); + unsigned eventsel; + unsigned perfctr; + u64 (*event_map)(int); + u64 (*raw_event)(u64); + int max_events; + int num_events; + int num_events_fixed; + int event_bits; + u64 event_mask; + int apic; + u64 max_period; + u64 intel_ctrl; + void (*enable_bts)(u64 config); + void (*disable_bts)(void); +}; + +static struct x86_pmu x86_pmu __read_mostly; + +static DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events) = { + .enabled = 1, +}; + +/* + * Not sure about some of these + */ +static const u64 p6_perfmon_event_map[] = +{ + [PERF_COUNT_HW_CPU_CYCLES] = 0x0079, + [PERF_COUNT_HW_INSTRUCTIONS] = 0x00c0, + [PERF_COUNT_HW_CACHE_REFERENCES] = 0x0f2e, + [PERF_COUNT_HW_CACHE_MISSES] = 0x012e, + [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x00c4, + [PERF_COUNT_HW_BRANCH_MISSES] = 0x00c5, + [PERF_COUNT_HW_BUS_CYCLES] = 0x0062, +}; + +static u64 p6_pmu_event_map(int hw_event) +{ + return p6_perfmon_event_map[hw_event]; +} + +/* + * Event setting that is specified not to count anything. + * We use this to effectively disable a counter. + * + * L2_RQSTS with 0 MESI unit mask. + */ +#define P6_NOP_EVENT 0x0000002EULL + +static u64 p6_pmu_raw_event(u64 hw_event) +{ +#define P6_EVNTSEL_EVENT_MASK 0x000000FFULL +#define P6_EVNTSEL_UNIT_MASK 0x0000FF00ULL +#define P6_EVNTSEL_EDGE_MASK 0x00040000ULL +#define P6_EVNTSEL_INV_MASK 0x00800000ULL +#define P6_EVNTSEL_REG_MASK 0xFF000000ULL + +#define P6_EVNTSEL_MASK \ + (P6_EVNTSEL_EVENT_MASK | \ + P6_EVNTSEL_UNIT_MASK | \ + P6_EVNTSEL_EDGE_MASK | \ + P6_EVNTSEL_INV_MASK | \ + P6_EVNTSEL_REG_MASK) + + return hw_event & P6_EVNTSEL_MASK; +} + + +/* + * Intel PerfMon v3. Used on Core2 and later. + */ +static const u64 intel_perfmon_event_map[] = +{ + [PERF_COUNT_HW_CPU_CYCLES] = 0x003c, + [PERF_COUNT_HW_INSTRUCTIONS] = 0x00c0, + [PERF_COUNT_HW_CACHE_REFERENCES] = 0x4f2e, + [PERF_COUNT_HW_CACHE_MISSES] = 0x412e, + [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x00c4, + [PERF_COUNT_HW_BRANCH_MISSES] = 0x00c5, + [PERF_COUNT_HW_BUS_CYCLES] = 0x013c, +}; + +static u64 intel_pmu_event_map(int hw_event) +{ + return intel_perfmon_event_map[hw_event]; +} + +/* + * Generalized hw caching related hw_event table, filled + * in on a per model basis. A value of 0 means + * 'not supported', -1 means 'hw_event makes no sense on + * this CPU', any other value means the raw hw_event + * ID. + */ + +#define C(x) PERF_COUNT_HW_CACHE_##x + +static u64 __read_mostly hw_cache_event_ids + [PERF_COUNT_HW_CACHE_MAX] + [PERF_COUNT_HW_CACHE_OP_MAX] + [PERF_COUNT_HW_CACHE_RESULT_MAX]; + +static const u64 nehalem_hw_cache_event_ids + [PERF_COUNT_HW_CACHE_MAX] + [PERF_COUNT_HW_CACHE_OP_MAX] + [PERF_COUNT_HW_CACHE_RESULT_MAX] = +{ + [ C(L1D) ] = { + [ C(OP_READ) ] = { + [ C(RESULT_ACCESS) ] = 0x0f40, /* L1D_CACHE_LD.MESI */ + [ C(RESULT_MISS) ] = 0x0140, /* L1D_CACHE_LD.I_STATE */ + }, + [ C(OP_WRITE) ] = { + [ C(RESULT_ACCESS) ] = 0x0f41, /* L1D_CACHE_ST.MESI */ + [ C(RESULT_MISS) ] = 0x0141, /* L1D_CACHE_ST.I_STATE */ + }, + [ C(OP_PREFETCH) ] = { + [ C(RESULT_ACCESS) ] = 0x014e, /* L1D_PREFETCH.REQUESTS */ + [ C(RESULT_MISS) ] = 0x024e, /* L1D_PREFETCH.MISS */ + }, + }, + [ C(L1I ) ] = { + [ C(OP_READ) ] = { + [ C(RESULT_ACCESS) ] = 0x0380, /* L1I.READS */ + [ C(RESULT_MISS) ] = 0x0280, /* L1I.MISSES */ + }, + [ C(OP_WRITE) ] = { + [ C(RESULT_ACCESS) ] = -1, + [ C(RESULT_MISS) ] = -1, + }, + [ C(OP_PREFETCH) ] = { + [ C(RESULT_ACCESS) ] = 0x0, + [ C(RESULT_MISS) ] = 0x0, + }, + }, + [ C(LL ) ] = { + [ C(OP_READ) ] = { + [ C(RESULT_ACCESS) ] = 0x0324, /* L2_RQSTS.LOADS */ + [ C(RESULT_MISS) ] = 0x0224, /* L2_RQSTS.LD_MISS */ + }, + [ C(OP_WRITE) ] = { + [ C(RESULT_ACCESS) ] = 0x0c24, /* L2_RQSTS.RFOS */ + [ C(RESULT_MISS) ] = 0x0824, /* L2_RQSTS.RFO_MISS */ + }, + [ C(OP_PREFETCH) ] = { + [ C(RESULT_ACCESS) ] = 0x4f2e, /* LLC Reference */ + [ C(RESULT_MISS) ] = 0x412e, /* LLC Misses */ + }, + }, + [ C(DTLB) ] = { + [ C(OP_READ) ] = { + [ C(RESULT_ACCESS) ] = 0x0f40, /* L1D_CACHE_LD.MESI (alias) */ + [ C(RESULT_MISS) ] = 0x0108, /* DTLB_LOAD_MISSES.ANY */ + }, + [ C(OP_WRITE) ] = { + [ C(RESULT_ACCESS) ] = 0x0f41, /* L1D_CACHE_ST.MESI (alias) */ + [ C(RESULT_MISS) ] = 0x010c, /* MEM_STORE_RETIRED.DTLB_MISS */ + }, + [ C(OP_PREFETCH) ] = { + [ C(RESULT_ACCESS) ] = 0x0, + [ C(RESULT_MISS) ] = 0x0, + }, + }, + [ C(ITLB) ] = { + [ C(OP_READ) ] = { + [ C(RESULT_ACCESS) ] = 0x01c0, /* INST_RETIRED.ANY_P */ + [ C(RESULT_MISS) ] = 0x20c8, /* ITLB_MISS_RETIRED */ + }, + [ C(OP_WRITE) ] = { + [ C(RESULT_ACCESS) ] = -1, + [ C(RESULT_MISS) ] = -1, + }, + [ C(OP_PREFETCH) ] = { + [ C(RESULT_ACCESS) ] = -1, + [ C(RESULT_MISS) ] = -1, + }, + }, + [ C(BPU ) ] = { + [ C(OP_READ) ] = { + [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ALL_BRANCHES */ + [ C(RESULT_MISS) ] = 0x03e8, /* BPU_CLEARS.ANY */ + }, + [ C(OP_WRITE) ] = { + [ C(RESULT_ACCESS) ] = -1, + [ C(RESULT_MISS) ] = -1, + }, + [ C(OP_PREFETCH) ] = { + [ C(RESULT_ACCESS) ] = -1, + [ C(RESULT_MISS) ] = -1, + }, + }, +}; + +static const u64 core2_hw_cache_event_ids + [PERF_COUNT_HW_CACHE_MAX] + [PERF_COUNT_HW_CACHE_OP_MAX] + [PERF_COUNT_HW_CACHE_RESULT_MAX] = +{ + [ C(L1D) ] = { + [ C(OP_READ) ] = { + [ C(RESULT_ACCESS) ] = 0x0f40, /* L1D_CACHE_LD.MESI */ + [ C(RESULT_MISS) ] = 0x0140, /* L1D_CACHE_LD.I_STATE */ + }, + [ C(OP_WRITE) ] = { + [ C(RESULT_ACCESS) ] = 0x0f41, /* L1D_CACHE_ST.MESI */ + [ C(RESULT_MISS) ] = 0x0141, /* L1D_CACHE_ST.I_STATE */ + }, + [ C(OP_PREFETCH) ] = { + [ C(RESULT_ACCESS) ] = 0x104e, /* L1D_PREFETCH.REQUESTS */ + [ C(RESULT_MISS) ] = 0, + }, + }, + [ C(L1I ) ] = { + [ C(OP_READ) ] = { + [ C(RESULT_ACCESS) ] = 0x0080, /* L1I.READS */ + [ C(RESULT_MISS) ] = 0x0081, /* L1I.MISSES */ + }, + [ C(OP_WRITE) ] = { + [ C(RESULT_ACCESS) ] = -1, + [ C(RESULT_MISS) ] = -1, + }, + [ C(OP_PREFETCH) ] = { + [ C(RESULT_ACCESS) ] = 0, + [ C(RESULT_MISS) ] = 0, + }, + }, + [ C(LL ) ] = { + [ C(OP_READ) ] = { + [ C(RESULT_ACCESS) ] = 0x4f29, /* L2_LD.MESI */ + [ C(RESULT_MISS) ] = 0x4129, /* L2_LD.ISTATE */ + }, + [ C(OP_WRITE) ] = { + [ C(RESULT_ACCESS) ] = 0x4f2A, /* L2_ST.MESI */ + [ C(RESULT_MISS) ] = 0x412A, /* L2_ST.ISTATE */ + }, + [ C(OP_PREFETCH) ] = { + [ C(RESULT_ACCESS) ] = 0, + [ C(RESULT_MISS) ] = 0, + }, + }, + [ C(DTLB) ] = { + [ C(OP_READ) ] = { + [ C(RESULT_ACCESS) ] = 0x0f40, /* L1D_CACHE_LD.MESI (alias) */ + [ C(RESULT_MISS) ] = 0x0208, /* DTLB_MISSES.MISS_LD */ + }, + [ C(OP_WRITE) ] = { + [ C(RESULT_ACCESS) ] = 0x0f41, /* L1D_CACHE_ST.MESI (alias) */ + [ C(RESULT_MISS) ] = 0x0808, /* DTLB_MISSES.MISS_ST */ + }, + [ C(OP_PREFETCH) ] = { + [ C(RESULT_ACCESS) ] = 0, + [ C(RESULT_MISS) ] = 0, + }, + }, + [ C(ITLB) ] = { + [ C(OP_READ) ] = { + [ C(RESULT_ACCESS) ] = 0x00c0, /* INST_RETIRED.ANY_P */ + [ C(RESULT_MISS) ] = 0x1282, /* ITLBMISSES */ + }, + [ C(OP_WRITE) ] = { + [ C(RESULT_ACCESS) ] = -1, + [ C(RESULT_MISS) ] = -1, + }, + [ C(OP_PREFETCH) ] = { + [ C(RESULT_ACCESS) ] = -1, + [ C(RESULT_MISS) ] = -1, + }, + }, + [ C(BPU ) ] = { + [ C(OP_READ) ] = { + [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ANY */ + [ C(RESULT_MISS) ] = 0x00c5, /* BP_INST_RETIRED.MISPRED */ + }, + [ C(OP_WRITE) ] = { + [ C(RESULT_ACCESS) ] = -1, + [ C(RESULT_MISS) ] = -1, + }, + [ C(OP_PREFETCH) ] = { + [ C(RESULT_ACCESS) ] = -1, + [ C(RESULT_MISS) ] = -1, + }, + }, +}; + +static const u64 atom_hw_cache_event_ids + [PERF_COUNT_HW_CACHE_MAX] + [PERF_COUNT_HW_CACHE_OP_MAX] + [PERF_COUNT_HW_CACHE_RESULT_MAX] = +{ + [ C(L1D) ] = { + [ C(OP_READ) ] = { + [ C(RESULT_ACCESS) ] = 0x2140, /* L1D_CACHE.LD */ + [ C(RESULT_MISS) ] = 0, + }, + [ C(OP_WRITE) ] = { + [ C(RESULT_ACCESS) ] = 0x2240, /* L1D_CACHE.ST */ + [ C(RESULT_MISS) ] = 0, + }, + [ C(OP_PREFETCH) ] = { + [ C(RESULT_ACCESS) ] = 0x0, + [ C(RESULT_MISS) ] = 0, + }, + }, + [ C(L1I ) ] = { + [ C(OP_READ) ] = { + [ C(RESULT_ACCESS) ] = 0x0380, /* L1I.READS */ + [ C(RESULT_MISS) ] = 0x0280, /* L1I.MISSES */ + }, + [ C(OP_WRITE) ] = { + [ C(RESULT_ACCESS) ] = -1, + [ C(RESULT_MISS) ] = -1, + }, + [ C(OP_PREFETCH) ] = { + [ C(RESULT_ACCESS) ] = 0, + [ C(RESULT_MISS) ] = 0, + }, + }, + [ C(LL ) ] = { + [ C(OP_READ) ] = { + [ C(RESULT_ACCESS) ] = 0x4f29, /* L2_LD.MESI */ + [ C(RESULT_MISS) ] = 0x4129, /* L2_LD.ISTATE */ + }, + [ C(OP_WRITE) ] = { + [ C(RESULT_ACCESS) ] = 0x4f2A, /* L2_ST.MESI */ + [ C(RESULT_MISS) ] = 0x412A, /* L2_ST.ISTATE */ + }, + [ C(OP_PREFETCH) ] = { + [ C(RESULT_ACCESS) ] = 0, + [ C(RESULT_MISS) ] = 0, + }, + }, + [ C(DTLB) ] = { + [ C(OP_READ) ] = { + [ C(RESULT_ACCESS) ] = 0x2140, /* L1D_CACHE_LD.MESI (alias) */ + [ C(RESULT_MISS) ] = 0x0508, /* DTLB_MISSES.MISS_LD */ + }, + [ C(OP_WRITE) ] = { + [ C(RESULT_ACCESS) ] = 0x2240, /* L1D_CACHE_ST.MESI (alias) */ + [ C(RESULT_MISS) ] = 0x0608, /* DTLB_MISSES.MISS_ST */ + }, + [ C(OP_PREFETCH) ] = { + [ C(RESULT_ACCESS) ] = 0, + [ C(RESULT_MISS) ] = 0, + }, + }, + [ C(ITLB) ] = { + [ C(OP_READ) ] = { + [ C(RESULT_ACCESS) ] = 0x00c0, /* INST_RETIRED.ANY_P */ + [ C(RESULT_MISS) ] = 0x0282, /* ITLB.MISSES */ + }, + [ C(OP_WRITE) ] = { + [ C(RESULT_ACCESS) ] = -1, + [ C(RESULT_MISS) ] = -1, + }, + [ C(OP_PREFETCH) ] = { + [ C(RESULT_ACCESS) ] = -1, + [ C(RESULT_MISS) ] = -1, + }, + }, + [ C(BPU ) ] = { + [ C(OP_READ) ] = { + [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ANY */ + [ C(RESULT_MISS) ] = 0x00c5, /* BP_INST_RETIRED.MISPRED */ + }, + [ C(OP_WRITE) ] = { + [ C(RESULT_ACCESS) ] = -1, + [ C(RESULT_MISS) ] = -1, + }, + [ C(OP_PREFETCH) ] = { + [ C(RESULT_ACCESS) ] = -1, + [ C(RESULT_MISS) ] = -1, + }, + }, +}; + +static u64 intel_pmu_raw_event(u64 hw_event) +{ +#define CORE_EVNTSEL_EVENT_MASK 0x000000FFULL +#define CORE_EVNTSEL_UNIT_MASK 0x0000FF00ULL +#define CORE_EVNTSEL_EDGE_MASK 0x00040000ULL +#define CORE_EVNTSEL_INV_MASK 0x00800000ULL +#define CORE_EVNTSEL_REG_MASK 0xFF000000ULL + +#define CORE_EVNTSEL_MASK \ + (CORE_EVNTSEL_EVENT_MASK | \ + CORE_EVNTSEL_UNIT_MASK | \ + CORE_EVNTSEL_EDGE_MASK | \ + CORE_EVNTSEL_INV_MASK | \ + CORE_EVNTSEL_REG_MASK) + + return hw_event & CORE_EVNTSEL_MASK; +} + +static const u64 amd_hw_cache_event_ids + [PERF_COUNT_HW_CACHE_MAX] + [PERF_COUNT_HW_CACHE_OP_MAX] + [PERF_COUNT_HW_CACHE_RESULT_MAX] = +{ + [ C(L1D) ] = { + [ C(OP_READ) ] = { + [ C(RESULT_ACCESS) ] = 0x0040, /* Data Cache Accesses */ + [ C(RESULT_MISS) ] = 0x0041, /* Data Cache Misses */ + }, + [ C(OP_WRITE) ] = { + [ C(RESULT_ACCESS) ] = 0x0142, /* Data Cache Refills :system */ + [ C(RESULT_MISS) ] = 0, + }, + [ C(OP_PREFETCH) ] = { + [ C(RESULT_ACCESS) ] = 0x0267, /* Data Prefetcher :attempts */ + [ C(RESULT_MISS) ] = 0x0167, /* Data Prefetcher :cancelled */ + }, + }, + [ C(L1I ) ] = { + [ C(OP_READ) ] = { + [ C(RESULT_ACCESS) ] = 0x0080, /* Instruction cache fetches */ + [ C(RESULT_MISS) ] = 0x0081, /* Instruction cache misses */ + }, + [ C(OP_WRITE) ] = { + [ C(RESULT_ACCESS) ] = -1, + [ C(RESULT_MISS) ] = -1, + }, + [ C(OP_PREFETCH) ] = { + [ C(RESULT_ACCESS) ] = 0x014B, /* Prefetch Instructions :Load */ + [ C(RESULT_MISS) ] = 0, + }, + }, + [ C(LL ) ] = { + [ C(OP_READ) ] = { + [ C(RESULT_ACCESS) ] = 0x037D, /* Requests to L2 Cache :IC+DC */ + [ C(RESULT_MISS) ] = 0x037E, /* L2 Cache Misses : IC+DC */ + }, + [ C(OP_WRITE) ] = { + [ C(RESULT_ACCESS) ] = 0x017F, /* L2 Fill/Writeback */ + [ C(RESULT_MISS) ] = 0, + }, + [ C(OP_PREFETCH) ] = { + [ C(RESULT_ACCESS) ] = 0, + [ C(RESULT_MISS) ] = 0, + }, + }, + [ C(DTLB) ] = { + [ C(OP_READ) ] = { + [ C(RESULT_ACCESS) ] = 0x0040, /* Data Cache Accesses */ + [ C(RESULT_MISS) ] = 0x0046, /* L1 DTLB and L2 DLTB Miss */ + }, + [ C(OP_WRITE) ] = { + [ C(RESULT_ACCESS) ] = 0, + [ C(RESULT_MISS) ] = 0, + }, + [ C(OP_PREFETCH) ] = { + [ C(RESULT_ACCESS) ] = 0, + [ C(RESULT_MISS) ] = 0, + }, + }, + [ C(ITLB) ] = { + [ C(OP_READ) ] = { + [ C(RESULT_ACCESS) ] = 0x0080, /* Instruction fecthes */ + [ C(RESULT_MISS) ] = 0x0085, /* Instr. fetch ITLB misses */ + }, + [ C(OP_WRITE) ] = { + [ C(RESULT_ACCESS) ] = -1, + [ C(RESULT_MISS) ] = -1, + }, + [ C(OP_PREFETCH) ] = { + [ C(RESULT_ACCESS) ] = -1, + [ C(RESULT_MISS) ] = -1, + }, + }, + [ C(BPU ) ] = { + [ C(OP_READ) ] = { + [ C(RESULT_ACCESS) ] = 0x00c2, /* Retired Branch Instr. */ + [ C(RESULT_MISS) ] = 0x00c3, /* Retired Mispredicted BI */ + }, + [ C(OP_WRITE) ] = { + [ C(RESULT_ACCESS) ] = -1, + [ C(RESULT_MISS) ] = -1, + }, + [ C(OP_PREFETCH) ] = { + [ C(RESULT_ACCESS) ] = -1, + [ C(RESULT_MISS) ] = -1, + }, + }, +}; + +/* + * AMD Performance Monitor K7 and later. + */ +static const u64 amd_perfmon_event_map[] = +{ + [PERF_COUNT_HW_CPU_CYCLES] = 0x0076, + [PERF_COUNT_HW_INSTRUCTIONS] = 0x00c0, + [PERF_COUNT_HW_CACHE_REFERENCES] = 0x0080, + [PERF_COUNT_HW_CACHE_MISSES] = 0x0081, + [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x00c4, + [PERF_COUNT_HW_BRANCH_MISSES] = 0x00c5, +}; + +static u64 amd_pmu_event_map(int hw_event) +{ + return amd_perfmon_event_map[hw_event]; +} + +static u64 amd_pmu_raw_event(u64 hw_event) +{ +#define K7_EVNTSEL_EVENT_MASK 0x7000000FFULL +#define K7_EVNTSEL_UNIT_MASK 0x00000FF00ULL +#define K7_EVNTSEL_EDGE_MASK 0x000040000ULL +#define K7_EVNTSEL_INV_MASK 0x000800000ULL +#define K7_EVNTSEL_REG_MASK 0x0FF000000ULL + +#define K7_EVNTSEL_MASK \ + (K7_EVNTSEL_EVENT_MASK | \ + K7_EVNTSEL_UNIT_MASK | \ + K7_EVNTSEL_EDGE_MASK | \ + K7_EVNTSEL_INV_MASK | \ + K7_EVNTSEL_REG_MASK) + + return hw_event & K7_EVNTSEL_MASK; +} + +/* + * Propagate event elapsed time into the generic event. + * Can only be executed on the CPU where the event is active. + * Returns the delta events processed. + */ +static u64 +x86_perf_event_update(struct perf_event *event, + struct hw_perf_event *hwc, int idx) +{ + int shift = 64 - x86_pmu.event_bits; + u64 prev_raw_count, new_raw_count; + s64 delta; + + if (idx == X86_PMC_IDX_FIXED_BTS) + return 0; + + /* + * Careful: an NMI might modify the previous event value. + * + * Our tactic to handle this is to first atomically read and + * exchange a new raw count - then add that new-prev delta + * count to the generic event atomically: + */ +again: + prev_raw_count = atomic64_read(&hwc->prev_count); + rdmsrl(hwc->event_base + idx, new_raw_count); + + if (atomic64_cmpxchg(&hwc->prev_count, prev_raw_count, + new_raw_count) != prev_raw_count) + goto again; + + /* + * Now we have the new raw value and have updated the prev + * timestamp already. We can now calculate the elapsed delta + * (event-)time and add that to the generic event. + * + * Careful, not all hw sign-extends above the physical width + * of the count. + */ + delta = (new_raw_count << shift) - (prev_raw_count << shift); + delta >>= shift; + + atomic64_add(delta, &event->count); + atomic64_sub(delta, &hwc->period_left); + + return new_raw_count; +} + +static atomic_t active_events; +static DEFINE_MUTEX(pmc_reserve_mutex); + +static bool reserve_pmc_hardware(void) +{ +#ifdef CONFIG_X86_LOCAL_APIC + int i; + + if (nmi_watchdog == NMI_LOCAL_APIC) + disable_lapic_nmi_watchdog(); + + for (i = 0; i < x86_pmu.num_events; i++) { + if (!reserve_perfctr_nmi(x86_pmu.perfctr + i)) + goto perfctr_fail; + } + + for (i = 0; i < x86_pmu.num_events; i++) { + if (!reserve_evntsel_nmi(x86_pmu.eventsel + i)) + goto eventsel_fail; + } +#endif + + return true; + +#ifdef CONFIG_X86_LOCAL_APIC +eventsel_fail: + for (i--; i >= 0; i--) + release_evntsel_nmi(x86_pmu.eventsel + i); + + i = x86_pmu.num_events; + +perfctr_fail: + for (i--; i >= 0; i--) + release_perfctr_nmi(x86_pmu.perfctr + i); + + if (nmi_watchdog == NMI_LOCAL_APIC) + enable_lapic_nmi_watchdog(); + + return false; +#endif +} + +static void release_pmc_hardware(void) +{ +#ifdef CONFIG_X86_LOCAL_APIC + int i; + + for (i = 0; i < x86_pmu.num_events; i++) { + release_perfctr_nmi(x86_pmu.perfctr + i); + release_evntsel_nmi(x86_pmu.eventsel + i); + } + + if (nmi_watchdog == NMI_LOCAL_APIC) + enable_lapic_nmi_watchdog(); +#endif +} + +static inline bool bts_available(void) +{ + return x86_pmu.enable_bts != NULL; +} + +static inline void init_debug_store_on_cpu(int cpu) +{ + struct debug_store *ds = per_cpu(cpu_hw_events, cpu).ds; + + if (!ds) + return; + + wrmsr_on_cpu(cpu, MSR_IA32_DS_AREA, + (u32)((u64)(unsigned long)ds), + (u32)((u64)(unsigned long)ds >> 32)); +} + +static inline void fini_debug_store_on_cpu(int cpu) +{ + if (!per_cpu(cpu_hw_events, cpu).ds) + return; + + wrmsr_on_cpu(cpu, MSR_IA32_DS_AREA, 0, 0); +} + +static void release_bts_hardware(void) +{ + int cpu; + + if (!bts_available()) + return; + + get_online_cpus(); + + for_each_online_cpu(cpu) + fini_debug_store_on_cpu(cpu); + + for_each_possible_cpu(cpu) { + struct debug_store *ds = per_cpu(cpu_hw_events, cpu).ds; + + if (!ds) + continue; + + per_cpu(cpu_hw_events, cpu).ds = NULL; + + kfree((void *)(unsigned long)ds->bts_buffer_base); + kfree(ds); + } + + put_online_cpus(); +} + +static int reserve_bts_hardware(void) +{ + int cpu, err = 0; + + if (!bts_available()) + return 0; + + get_online_cpus(); + + for_each_possible_cpu(cpu) { + struct debug_store *ds; + void *buffer; + + err = -ENOMEM; + buffer = kzalloc(BTS_BUFFER_SIZE, GFP_KERNEL); + if (unlikely(!buffer)) + break; + + ds = kzalloc(sizeof(*ds), GFP_KERNEL); + if (unlikely(!ds)) { + kfree(buffer); + break; + } + + ds->bts_buffer_base = (u64)(unsigned long)buffer; + ds->bts_index = ds->bts_buffer_base; + ds->bts_absolute_maximum = + ds->bts_buffer_base + BTS_BUFFER_SIZE; + ds->bts_interrupt_threshold = + ds->bts_absolute_maximum - BTS_OVFL_TH; + + per_cpu(cpu_hw_events, cpu).ds = ds; + err = 0; + } + + if (err) + release_bts_hardware(); + else { + for_each_online_cpu(cpu) + init_debug_store_on_cpu(cpu); + } + + put_online_cpus(); + + return err; +} + +static void hw_perf_event_destroy(struct perf_event *event) +{ + if (atomic_dec_and_mutex_lock(&active_events, &pmc_reserve_mutex)) { + release_pmc_hardware(); + release_bts_hardware(); + mutex_unlock(&pmc_reserve_mutex); + } +} + +static inline int x86_pmu_initialized(void) +{ + return x86_pmu.handle_irq != NULL; +} + +static inline int +set_ext_hw_attr(struct hw_perf_event *hwc, struct perf_event_attr *attr) +{ + unsigned int cache_type, cache_op, cache_result; + u64 config, val; + + config = attr->config; + + cache_type = (config >> 0) & 0xff; + if (cache_type >= PERF_COUNT_HW_CACHE_MAX) + return -EINVAL; + + cache_op = (config >> 8) & 0xff; + if (cache_op >= PERF_COUNT_HW_CACHE_OP_MAX) + return -EINVAL; + + cache_result = (config >> 16) & 0xff; + if (cache_result >= PERF_COUNT_HW_CACHE_RESULT_MAX) + return -EINVAL; + + val = hw_cache_event_ids[cache_type][cache_op][cache_result]; + + if (val == 0) + return -ENOENT; + + if (val == -1) + return -EINVAL; + + hwc->config |= val; + + return 0; +} + +static void intel_pmu_enable_bts(u64 config) +{ + unsigned long debugctlmsr; + + debugctlmsr = get_debugctlmsr(); + + debugctlmsr |= X86_DEBUGCTL_TR; + debugctlmsr |= X86_DEBUGCTL_BTS; + debugctlmsr |= X86_DEBUGCTL_BTINT; + + if (!(config & ARCH_PERFMON_EVENTSEL_OS)) + debugctlmsr |= X86_DEBUGCTL_BTS_OFF_OS; + + if (!(config & ARCH_PERFMON_EVENTSEL_USR)) + debugctlmsr |= X86_DEBUGCTL_BTS_OFF_USR; + + update_debugctlmsr(debugctlmsr); +} + +static void intel_pmu_disable_bts(void) +{ + struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); + unsigned long debugctlmsr; + + if (!cpuc->ds) + return; + + debugctlmsr = get_debugctlmsr(); + + debugctlmsr &= + ~(X86_DEBUGCTL_TR | X86_DEBUGCTL_BTS | X86_DEBUGCTL_BTINT | + X86_DEBUGCTL_BTS_OFF_OS | X86_DEBUGCTL_BTS_OFF_USR); + + update_debugctlmsr(debugctlmsr); +} + +/* + * Setup the hardware configuration for a given attr_type + */ +static int __hw_perf_event_init(struct perf_event *event) +{ + struct perf_event_attr *attr = &event->attr; + struct hw_perf_event *hwc = &event->hw; + u64 config; + int err; + + if (!x86_pmu_initialized()) + return -ENODEV; + + err = 0; + if (!atomic_inc_not_zero(&active_events)) { + mutex_lock(&pmc_reserve_mutex); + if (atomic_read(&active_events) == 0) { + if (!reserve_pmc_hardware()) + err = -EBUSY; + else + err = reserve_bts_hardware(); + } + if (!err) + atomic_inc(&active_events); + mutex_unlock(&pmc_reserve_mutex); + } + if (err) + return err; + + event->destroy = hw_perf_event_destroy; + + /* + * Generate PMC IRQs: + * (keep 'enabled' bit clear for now) + */ + hwc->config = ARCH_PERFMON_EVENTSEL_INT; + + /* + * Count user and OS events unless requested not to. + */ + if (!attr->exclude_user) + hwc->config |= ARCH_PERFMON_EVENTSEL_USR; + if (!attr->exclude_kernel) + hwc->config |= ARCH_PERFMON_EVENTSEL_OS; + + if (!hwc->sample_period) { + hwc->sample_period = x86_pmu.max_period; + hwc->last_period = hwc->sample_period; + atomic64_set(&hwc->period_left, hwc->sample_period); + } else { + /* + * If we have a PMU initialized but no APIC + * interrupts, we cannot sample hardware + * events (user-space has to fall back and + * sample via a hrtimer based software event): + */ + if (!x86_pmu.apic) + return -EOPNOTSUPP; + } + + /* + * Raw hw_event type provide the config in the hw_event structure + */ + if (attr->type == PERF_TYPE_RAW) { + hwc->config |= x86_pmu.raw_event(attr->config); + return 0; + } + + if (attr->type == PERF_TYPE_HW_CACHE) + return set_ext_hw_attr(hwc, attr); + + if (attr->config >= x86_pmu.max_events) + return -EINVAL; + + /* + * The generic map: + */ + config = x86_pmu.event_map(attr->config); + + if (config == 0) + return -ENOENT; + + if (config == -1LL) + return -EINVAL; + + /* + * Branch tracing: + */ + if ((attr->config == PERF_COUNT_HW_BRANCH_INSTRUCTIONS) && + (hwc->sample_period == 1)) { + /* BTS is not supported by this architecture. */ + if (!bts_available()) + return -EOPNOTSUPP; + + /* BTS is currently only allowed for user-mode. */ + if (hwc->config & ARCH_PERFMON_EVENTSEL_OS) + return -EOPNOTSUPP; + } + + hwc->config |= config; + + return 0; +} + +static void p6_pmu_disable_all(void) +{ + struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); + u64 val; + + if (!cpuc->enabled) + return; + + cpuc->enabled = 0; + barrier(); + + /* p6 only has one enable register */ + rdmsrl(MSR_P6_EVNTSEL0, val); + val &= ~ARCH_PERFMON_EVENTSEL0_ENABLE; + wrmsrl(MSR_P6_EVNTSEL0, val); +} + +static void intel_pmu_disable_all(void) +{ + struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); + + if (!cpuc->enabled) + return; + + cpuc->enabled = 0; + barrier(); + + wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0); + + if (test_bit(X86_PMC_IDX_FIXED_BTS, cpuc->active_mask)) + intel_pmu_disable_bts(); +} + +static void amd_pmu_disable_all(void) +{ + struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); + int idx; + + if (!cpuc->enabled) + return; + + cpuc->enabled = 0; + /* + * ensure we write the disable before we start disabling the + * events proper, so that amd_pmu_enable_event() does the + * right thing. + */ + barrier(); + + for (idx = 0; idx < x86_pmu.num_events; idx++) { + u64 val; + + if (!test_bit(idx, cpuc->active_mask)) + continue; + rdmsrl(MSR_K7_EVNTSEL0 + idx, val); + if (!(val & ARCH_PERFMON_EVENTSEL0_ENABLE)) + continue; + val &= ~ARCH_PERFMON_EVENTSEL0_ENABLE; + wrmsrl(MSR_K7_EVNTSEL0 + idx, val); + } +} + +void hw_perf_disable(void) +{ + if (!x86_pmu_initialized()) + return; + return x86_pmu.disable_all(); +} + +static void p6_pmu_enable_all(void) +{ + struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); + unsigned long val; + + if (cpuc->enabled) + return; + + cpuc->enabled = 1; + barrier(); + + /* p6 only has one enable register */ + rdmsrl(MSR_P6_EVNTSEL0, val); + val |= ARCH_PERFMON_EVENTSEL0_ENABLE; + wrmsrl(MSR_P6_EVNTSEL0, val); +} + +static void intel_pmu_enable_all(void) +{ + struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); + + if (cpuc->enabled) + return; + + cpuc->enabled = 1; + barrier(); + + wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, x86_pmu.intel_ctrl); + + if (test_bit(X86_PMC_IDX_FIXED_BTS, cpuc->active_mask)) { + struct perf_event *event = + cpuc->events[X86_PMC_IDX_FIXED_BTS]; + + if (WARN_ON_ONCE(!event)) + return; + + intel_pmu_enable_bts(event->hw.config); + } +} + +static void amd_pmu_enable_all(void) +{ + struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); + int idx; + + if (cpuc->enabled) + return; + + cpuc->enabled = 1; + barrier(); + + for (idx = 0; idx < x86_pmu.num_events; idx++) { + struct perf_event *event = cpuc->events[idx]; + u64 val; + + if (!test_bit(idx, cpuc->active_mask)) + continue; + + val = event->hw.config; + val |= ARCH_PERFMON_EVENTSEL0_ENABLE; + wrmsrl(MSR_K7_EVNTSEL0 + idx, val); + } +} + +void hw_perf_enable(void) +{ + if (!x86_pmu_initialized()) + return; + x86_pmu.enable_all(); +} + +static inline u64 intel_pmu_get_status(void) +{ + u64 status; + + rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS, status); + + return status; +} + +static inline void intel_pmu_ack_status(u64 ack) +{ + wrmsrl(MSR_CORE_PERF_GLOBAL_OVF_CTRL, ack); +} + +static inline void x86_pmu_enable_event(struct hw_perf_event *hwc, int idx) +{ + (void)checking_wrmsrl(hwc->config_base + idx, + hwc->config | ARCH_PERFMON_EVENTSEL0_ENABLE); +} + +static inline void x86_pmu_disable_event(struct hw_perf_event *hwc, int idx) +{ + (void)checking_wrmsrl(hwc->config_base + idx, hwc->config); +} + +static inline void +intel_pmu_disable_fixed(struct hw_perf_event *hwc, int __idx) +{ + int idx = __idx - X86_PMC_IDX_FIXED; + u64 ctrl_val, mask; + + mask = 0xfULL << (idx * 4); + + rdmsrl(hwc->config_base, ctrl_val); + ctrl_val &= ~mask; + (void)checking_wrmsrl(hwc->config_base, ctrl_val); +} + +static inline void +p6_pmu_disable_event(struct hw_perf_event *hwc, int idx) +{ + struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); + u64 val = P6_NOP_EVENT; + + if (cpuc->enabled) + val |= ARCH_PERFMON_EVENTSEL0_ENABLE; + + (void)checking_wrmsrl(hwc->config_base + idx, val); +} + +static inline void +intel_pmu_disable_event(struct hw_perf_event *hwc, int idx) +{ + if (unlikely(idx == X86_PMC_IDX_FIXED_BTS)) { + intel_pmu_disable_bts(); + return; + } + + if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) { + intel_pmu_disable_fixed(hwc, idx); + return; + } + + x86_pmu_disable_event(hwc, idx); +} + +static inline void +amd_pmu_disable_event(struct hw_perf_event *hwc, int idx) +{ + x86_pmu_disable_event(hwc, idx); +} + +static DEFINE_PER_CPU(u64 [X86_PMC_IDX_MAX], pmc_prev_left); + +/* + * Set the next IRQ period, based on the hwc->period_left value. + * To be called with the event disabled in hw: + */ +static int +x86_perf_event_set_period(struct perf_event *event, + struct hw_perf_event *hwc, int idx) +{ + s64 left = atomic64_read(&hwc->period_left); + s64 period = hwc->sample_period; + int err, ret = 0; + + if (idx == X86_PMC_IDX_FIXED_BTS) + return 0; + + /* + * If we are way outside a reasoable range then just skip forward: + */ + if (unlikely(left <= -period)) { + left = period; + atomic64_set(&hwc->period_left, left); + hwc->last_period = period; + ret = 1; + } + + if (unlikely(left <= 0)) { + left += period; + atomic64_set(&hwc->period_left, left); + hwc->last_period = period; + ret = 1; + } + /* + * Quirk: certain CPUs dont like it if just 1 hw_event is left: + */ + if (unlikely(left < 2)) + left = 2; + + if (left > x86_pmu.max_period) + left = x86_pmu.max_period; + + per_cpu(pmc_prev_left[idx], smp_processor_id()) = left; + + /* + * The hw event starts counting from this event offset, + * mark it to be able to extra future deltas: + */ + atomic64_set(&hwc->prev_count, (u64)-left); + + err = checking_wrmsrl(hwc->event_base + idx, + (u64)(-left) & x86_pmu.event_mask); + + perf_event_update_userpage(event); + + return ret; +} + +static inline void +intel_pmu_enable_fixed(struct hw_perf_event *hwc, int __idx) +{ + int idx = __idx - X86_PMC_IDX_FIXED; + u64 ctrl_val, bits, mask; + int err; + + /* + * Enable IRQ generation (0x8), + * and enable ring-3 counting (0x2) and ring-0 counting (0x1) + * if requested: + */ + bits = 0x8ULL; + if (hwc->config & ARCH_PERFMON_EVENTSEL_USR) + bits |= 0x2; + if (hwc->config & ARCH_PERFMON_EVENTSEL_OS) + bits |= 0x1; + bits <<= (idx * 4); + mask = 0xfULL << (idx * 4); + + rdmsrl(hwc->config_base, ctrl_val); + ctrl_val &= ~mask; + ctrl_val |= bits; + err = checking_wrmsrl(hwc->config_base, ctrl_val); +} + +static void p6_pmu_enable_event(struct hw_perf_event *hwc, int idx) +{ + struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); + u64 val; + + val = hwc->config; + if (cpuc->enabled) + val |= ARCH_PERFMON_EVENTSEL0_ENABLE; + + (void)checking_wrmsrl(hwc->config_base + idx, val); +} + + +static void intel_pmu_enable_event(struct hw_perf_event *hwc, int idx) +{ + if (unlikely(idx == X86_PMC_IDX_FIXED_BTS)) { + if (!__get_cpu_var(cpu_hw_events).enabled) + return; + + intel_pmu_enable_bts(hwc->config); + return; + } + + if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) { + intel_pmu_enable_fixed(hwc, idx); + return; + } + + x86_pmu_enable_event(hwc, idx); +} + +static void amd_pmu_enable_event(struct hw_perf_event *hwc, int idx) +{ + struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); + + if (cpuc->enabled) + x86_pmu_enable_event(hwc, idx); +} + +static int +fixed_mode_idx(struct perf_event *event, struct hw_perf_event *hwc) +{ + unsigned int hw_event; + + hw_event = hwc->config & ARCH_PERFMON_EVENT_MASK; + + if (unlikely((hw_event == + x86_pmu.event_map(PERF_COUNT_HW_BRANCH_INSTRUCTIONS)) && + (hwc->sample_period == 1))) + return X86_PMC_IDX_FIXED_BTS; + + if (!x86_pmu.num_events_fixed) + return -1; + + if (unlikely(hw_event == x86_pmu.event_map(PERF_COUNT_HW_INSTRUCTIONS))) + return X86_PMC_IDX_FIXED_INSTRUCTIONS; + if (unlikely(hw_event == x86_pmu.event_map(PERF_COUNT_HW_CPU_CYCLES))) + return X86_PMC_IDX_FIXED_CPU_CYCLES; + if (unlikely(hw_event == x86_pmu.event_map(PERF_COUNT_HW_BUS_CYCLES))) + return X86_PMC_IDX_FIXED_BUS_CYCLES; + + return -1; +} + +/* + * Find a PMC slot for the freshly enabled / scheduled in event: + */ +static int x86_pmu_enable(struct perf_event *event) +{ + struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); + struct hw_perf_event *hwc = &event->hw; + int idx; + + idx = fixed_mode_idx(event, hwc); + if (idx == X86_PMC_IDX_FIXED_BTS) { + /* BTS is already occupied. */ + if (test_and_set_bit(idx, cpuc->used_mask)) + return -EAGAIN; + + hwc->config_base = 0; + hwc->event_base = 0; + hwc->idx = idx; + } else if (idx >= 0) { + /* + * Try to get the fixed event, if that is already taken + * then try to get a generic event: + */ + if (test_and_set_bit(idx, cpuc->used_mask)) + goto try_generic; + + hwc->config_base = MSR_ARCH_PERFMON_FIXED_CTR_CTRL; + /* + * We set it so that event_base + idx in wrmsr/rdmsr maps to + * MSR_ARCH_PERFMON_FIXED_CTR0 ... CTR2: + */ + hwc->event_base = + MSR_ARCH_PERFMON_FIXED_CTR0 - X86_PMC_IDX_FIXED; + hwc->idx = idx; + } else { + idx = hwc->idx; + /* Try to get the previous generic event again */ + if (test_and_set_bit(idx, cpuc->used_mask)) { +try_generic: + idx = find_first_zero_bit(cpuc->used_mask, + x86_pmu.num_events); + if (idx == x86_pmu.num_events) + return -EAGAIN; + + set_bit(idx, cpuc->used_mask); + hwc->idx = idx; + } + hwc->config_base = x86_pmu.eventsel; + hwc->event_base = x86_pmu.perfctr; + } + + perf_events_lapic_init(); + + x86_pmu.disable(hwc, idx); + + cpuc->events[idx] = event; + set_bit(idx, cpuc->active_mask); + + x86_perf_event_set_period(event, hwc, idx); + x86_pmu.enable(hwc, idx); + + perf_event_update_userpage(event); + + return 0; +} + +static void x86_pmu_unthrottle(struct perf_event *event) +{ + struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); + struct hw_perf_event *hwc = &event->hw; + + if (WARN_ON_ONCE(hwc->idx >= X86_PMC_IDX_MAX || + cpuc->events[hwc->idx] != event)) + return; + + x86_pmu.enable(hwc, hwc->idx); +} + +void perf_event_print_debug(void) +{ + u64 ctrl, status, overflow, pmc_ctrl, pmc_count, prev_left, fixed; + struct cpu_hw_events *cpuc; + unsigned long flags; + int cpu, idx; + + if (!x86_pmu.num_events) + return; + + local_irq_save(flags); + + cpu = smp_processor_id(); + cpuc = &per_cpu(cpu_hw_events, cpu); + + if (x86_pmu.version >= 2) { + rdmsrl(MSR_CORE_PERF_GLOBAL_CTRL, ctrl); + rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS, status); + rdmsrl(MSR_CORE_PERF_GLOBAL_OVF_CTRL, overflow); + rdmsrl(MSR_ARCH_PERFMON_FIXED_CTR_CTRL, fixed); + + pr_info("\n"); + pr_info("CPU#%d: ctrl: %016llx\n", cpu, ctrl); + pr_info("CPU#%d: status: %016llx\n", cpu, status); + pr_info("CPU#%d: overflow: %016llx\n", cpu, overflow); + pr_info("CPU#%d: fixed: %016llx\n", cpu, fixed); + } + pr_info("CPU#%d: used: %016llx\n", cpu, *(u64 *)cpuc->used_mask); + + for (idx = 0; idx < x86_pmu.num_events; idx++) { + rdmsrl(x86_pmu.eventsel + idx, pmc_ctrl); + rdmsrl(x86_pmu.perfctr + idx, pmc_count); + + prev_left = per_cpu(pmc_prev_left[idx], cpu); + + pr_info("CPU#%d: gen-PMC%d ctrl: %016llx\n", + cpu, idx, pmc_ctrl); + pr_info("CPU#%d: gen-PMC%d count: %016llx\n", + cpu, idx, pmc_count); + pr_info("CPU#%d: gen-PMC%d left: %016llx\n", + cpu, idx, prev_left); + } + for (idx = 0; idx < x86_pmu.num_events_fixed; idx++) { + rdmsrl(MSR_ARCH_PERFMON_FIXED_CTR0 + idx, pmc_count); + + pr_info("CPU#%d: fixed-PMC%d count: %016llx\n", + cpu, idx, pmc_count); + } + local_irq_restore(flags); +} + +static void intel_pmu_drain_bts_buffer(struct cpu_hw_events *cpuc) +{ + struct debug_store *ds = cpuc->ds; + struct bts_record { + u64 from; + u64 to; + u64 flags; + }; + struct perf_event *event = cpuc->events[X86_PMC_IDX_FIXED_BTS]; + struct bts_record *at, *top; + struct perf_output_handle handle; + struct perf_event_header header; + struct perf_sample_data data; + struct pt_regs regs; + + if (!event) + return; + + if (!ds) + return; + + at = (struct bts_record *)(unsigned long)ds->bts_buffer_base; + top = (struct bts_record *)(unsigned long)ds->bts_index; + + if (top <= at) + return; + + ds->bts_index = ds->bts_buffer_base; + + + data.period = event->hw.last_period; + data.addr = 0; + regs.ip = 0; + + /* + * Prepare a generic sample, i.e. fill in the invariant fields. + * We will overwrite the from and to address before we output + * the sample. + */ + perf_prepare_sample(&header, &data, event, ®s); + + if (perf_output_begin(&handle, event, + header.size * (top - at), 1, 1)) + return; + + for (; at < top; at++) { + data.ip = at->from; + data.addr = at->to; + + perf_output_sample(&handle, &header, &data, event); + } + + perf_output_end(&handle); + + /* There's new data available. */ + event->hw.interrupts++; + event->pending_kill = POLL_IN; +} + +static void x86_pmu_disable(struct perf_event *event) +{ + struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); + struct hw_perf_event *hwc = &event->hw; + int idx = hwc->idx; + + /* + * Must be done before we disable, otherwise the nmi handler + * could reenable again: + */ + clear_bit(idx, cpuc->active_mask); + x86_pmu.disable(hwc, idx); + + /* + * Make sure the cleared pointer becomes visible before we + * (potentially) free the event: + */ + barrier(); + + /* + * Drain the remaining delta count out of a event + * that we are disabling: + */ + x86_perf_event_update(event, hwc, idx); + + /* Drain the remaining BTS records. */ + if (unlikely(idx == X86_PMC_IDX_FIXED_BTS)) + intel_pmu_drain_bts_buffer(cpuc); + + cpuc->events[idx] = NULL; + clear_bit(idx, cpuc->used_mask); + + perf_event_update_userpage(event); +} + +/* + * Save and restart an expired event. Called by NMI contexts, + * so it has to be careful about preempting normal event ops: + */ +static int intel_pmu_save_and_restart(struct perf_event *event) +{ + struct hw_perf_event *hwc = &event->hw; + int idx = hwc->idx; + int ret; + + x86_perf_event_update(event, hwc, idx); + ret = x86_perf_event_set_period(event, hwc, idx); + + if (event->state == PERF_EVENT_STATE_ACTIVE) + intel_pmu_enable_event(hwc, idx); + + return ret; +} + +static void intel_pmu_reset(void) +{ + struct debug_store *ds = __get_cpu_var(cpu_hw_events).ds; + unsigned long flags; + int idx; + + if (!x86_pmu.num_events) + return; + + local_irq_save(flags); + + printk("clearing PMU state on CPU#%d\n", smp_processor_id()); + + for (idx = 0; idx < x86_pmu.num_events; idx++) { + checking_wrmsrl(x86_pmu.eventsel + idx, 0ull); + checking_wrmsrl(x86_pmu.perfctr + idx, 0ull); + } + for (idx = 0; idx < x86_pmu.num_events_fixed; idx++) { + checking_wrmsrl(MSR_ARCH_PERFMON_FIXED_CTR0 + idx, 0ull); + } + if (ds) + ds->bts_index = ds->bts_buffer_base; + + local_irq_restore(flags); +} + +static int p6_pmu_handle_irq(struct pt_regs *regs) +{ + struct perf_sample_data data; + struct cpu_hw_events *cpuc; + struct perf_event *event; + struct hw_perf_event *hwc; + int idx, handled = 0; + u64 val; + + data.addr = 0; + + cpuc = &__get_cpu_var(cpu_hw_events); + + for (idx = 0; idx < x86_pmu.num_events; idx++) { + if (!test_bit(idx, cpuc->active_mask)) + continue; + + event = cpuc->events[idx]; + hwc = &event->hw; + + val = x86_perf_event_update(event, hwc, idx); + if (val & (1ULL << (x86_pmu.event_bits - 1))) + continue; + + /* + * event overflow + */ + handled = 1; + data.period = event->hw.last_period; + + if (!x86_perf_event_set_period(event, hwc, idx)) + continue; + + if (perf_event_overflow(event, 1, &data, regs)) + p6_pmu_disable_event(hwc, idx); + } + + if (handled) + inc_irq_stat(apic_perf_irqs); + + return handled; +} + +/* + * This handler is triggered by the local APIC, so the APIC IRQ handling + * rules apply: + */ +static int intel_pmu_handle_irq(struct pt_regs *regs) +{ + struct perf_sample_data data; + struct cpu_hw_events *cpuc; + int bit, loops; + u64 ack, status; + + data.addr = 0; + + cpuc = &__get_cpu_var(cpu_hw_events); + + perf_disable(); + intel_pmu_drain_bts_buffer(cpuc); + status = intel_pmu_get_status(); + if (!status) { + perf_enable(); + return 0; + } + + loops = 0; +again: + if (++loops > 100) { + WARN_ONCE(1, "perfevents: irq loop stuck!\n"); + perf_event_print_debug(); + intel_pmu_reset(); + perf_enable(); + return 1; + } + + inc_irq_stat(apic_perf_irqs); + ack = status; + for_each_bit(bit, (unsigned long *)&status, X86_PMC_IDX_MAX) { + struct perf_event *event = cpuc->events[bit]; + + clear_bit(bit, (unsigned long *) &status); + if (!test_bit(bit, cpuc->active_mask)) + continue; + + if (!intel_pmu_save_and_restart(event)) + continue; + + data.period = event->hw.last_period; + + if (perf_event_overflow(event, 1, &data, regs)) + intel_pmu_disable_event(&event->hw, bit); + } + + intel_pmu_ack_status(ack); + + /* + * Repeat if there is more work to be done: + */ + status = intel_pmu_get_status(); + if (status) + goto again; + + perf_enable(); + + return 1; +} + +static int amd_pmu_handle_irq(struct pt_regs *regs) +{ + struct perf_sample_data data; + struct cpu_hw_events *cpuc; + struct perf_event *event; + struct hw_perf_event *hwc; + int idx, handled = 0; + u64 val; + + data.addr = 0; + + cpuc = &__get_cpu_var(cpu_hw_events); + + for (idx = 0; idx < x86_pmu.num_events; idx++) { + if (!test_bit(idx, cpuc->active_mask)) + continue; + + event = cpuc->events[idx]; + hwc = &event->hw; + + val = x86_perf_event_update(event, hwc, idx); + if (val & (1ULL << (x86_pmu.event_bits - 1))) + continue; + + /* + * event overflow + */ + handled = 1; + data.period = event->hw.last_period; + + if (!x86_perf_event_set_period(event, hwc, idx)) + continue; + + if (perf_event_overflow(event, 1, &data, regs)) + amd_pmu_disable_event(hwc, idx); + } + + if (handled) + inc_irq_stat(apic_perf_irqs); + + return handled; +} + +void smp_perf_pending_interrupt(struct pt_regs *regs) +{ + irq_enter(); + ack_APIC_irq(); + inc_irq_stat(apic_pending_irqs); + perf_event_do_pending(); + irq_exit(); +} + +void set_perf_event_pending(void) +{ +#ifdef CONFIG_X86_LOCAL_APIC + apic->send_IPI_self(LOCAL_PENDING_VECTOR); +#endif +} + +void perf_events_lapic_init(void) +{ +#ifdef CONFIG_X86_LOCAL_APIC + if (!x86_pmu.apic || !x86_pmu_initialized()) + return; + + /* + * Always use NMI for PMU + */ + apic_write(APIC_LVTPC, APIC_DM_NMI); +#endif +} + +static int __kprobes +perf_event_nmi_handler(struct notifier_block *self, + unsigned long cmd, void *__args) +{ + struct die_args *args = __args; + struct pt_regs *regs; + + if (!atomic_read(&active_events)) + return NOTIFY_DONE; + + switch (cmd) { + case DIE_NMI: + case DIE_NMI_IPI: + break; + + default: + return NOTIFY_DONE; + } + + regs = args->regs; + +#ifdef CONFIG_X86_LOCAL_APIC + apic_write(APIC_LVTPC, APIC_DM_NMI); +#endif + /* + * Can't rely on the handled return value to say it was our NMI, two + * events could trigger 'simultaneously' raising two back-to-back NMIs. + * + * If the first NMI handles both, the latter will be empty and daze + * the CPU. + */ + x86_pmu.handle_irq(regs); + + return NOTIFY_STOP; +} + +static __read_mostly struct notifier_block perf_event_nmi_notifier = { + .notifier_call = perf_event_nmi_handler, + .next = NULL, + .priority = 1 +}; + +static struct x86_pmu p6_pmu = { + .name = "p6", + .handle_irq = p6_pmu_handle_irq, + .disable_all = p6_pmu_disable_all, + .enable_all = p6_pmu_enable_all, + .enable = p6_pmu_enable_event, + .disable = p6_pmu_disable_event, + .eventsel = MSR_P6_EVNTSEL0, + .perfctr = MSR_P6_PERFCTR0, + .event_map = p6_pmu_event_map, + .raw_event = p6_pmu_raw_event, + .max_events = ARRAY_SIZE(p6_perfmon_event_map), + .apic = 1, + .max_period = (1ULL << 31) - 1, + .version = 0, + .num_events = 2, + /* + * Events have 40 bits implemented. However they are designed such + * that bits [32-39] are sign extensions of bit 31. As such the + * effective width of a event for P6-like PMU is 32 bits only. + * + * See IA-32 Intel Architecture Software developer manual Vol 3B + */ + .event_bits = 32, + .event_mask = (1ULL << 32) - 1, +}; + +static struct x86_pmu intel_pmu = { + .name = "Intel", + .handle_irq = intel_pmu_handle_irq, + .disable_all = intel_pmu_disable_all, + .enable_all = intel_pmu_enable_all, + .enable = intel_pmu_enable_event, + .disable = intel_pmu_disable_event, + .eventsel = MSR_ARCH_PERFMON_EVENTSEL0, + .perfctr = MSR_ARCH_PERFMON_PERFCTR0, + .event_map = intel_pmu_event_map, + .raw_event = intel_pmu_raw_event, + .max_events = ARRAY_SIZE(intel_perfmon_event_map), + .apic = 1, + /* + * Intel PMCs cannot be accessed sanely above 32 bit width, + * so we install an artificial 1<<31 period regardless of + * the generic event period: + */ + .max_period = (1ULL << 31) - 1, + .enable_bts = intel_pmu_enable_bts, + .disable_bts = intel_pmu_disable_bts, +}; + +static struct x86_pmu amd_pmu = { + .name = "AMD", + .handle_irq = amd_pmu_handle_irq, + .disable_all = amd_pmu_disable_all, + .enable_all = amd_pmu_enable_all, + .enable = amd_pmu_enable_event, + .disable = amd_pmu_disable_event, + .eventsel = MSR_K7_EVNTSEL0, + .perfctr = MSR_K7_PERFCTR0, + .event_map = amd_pmu_event_map, + .raw_event = amd_pmu_raw_event, + .max_events = ARRAY_SIZE(amd_perfmon_event_map), + .num_events = 4, + .event_bits = 48, + .event_mask = (1ULL << 48) - 1, + .apic = 1, + /* use highest bit to detect overflow */ + .max_period = (1ULL << 47) - 1, +}; + +static int p6_pmu_init(void) +{ + switch (boot_cpu_data.x86_model) { + case 1: + case 3: /* Pentium Pro */ + case 5: + case 6: /* Pentium II */ + case 7: + case 8: + case 11: /* Pentium III */ + break; + case 9: + case 13: + /* Pentium M */ + break; + default: + pr_cont("unsupported p6 CPU model %d ", + boot_cpu_data.x86_model); + return -ENODEV; + } + + x86_pmu = p6_pmu; + + if (!cpu_has_apic) { + pr_info("no APIC, boot with the \"lapic\" boot parameter to force-enable it.\n"); + pr_info("no hardware sampling interrupt available.\n"); + x86_pmu.apic = 0; + } + + return 0; +} + +static int intel_pmu_init(void) +{ + union cpuid10_edx edx; + union cpuid10_eax eax; + unsigned int unused; + unsigned int ebx; + int version; + + if (!cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON)) { + /* check for P6 processor family */ + if (boot_cpu_data.x86 == 6) { + return p6_pmu_init(); + } else { + return -ENODEV; + } + } + + /* + * Check whether the Architectural PerfMon supports + * Branch Misses Retired hw_event or not. + */ + cpuid(10, &eax.full, &ebx, &unused, &edx.full); + if (eax.split.mask_length <= ARCH_PERFMON_BRANCH_MISSES_RETIRED) + return -ENODEV; + + version = eax.split.version_id; + if (version < 2) + return -ENODEV; + + x86_pmu = intel_pmu; + x86_pmu.version = version; + x86_pmu.num_events = eax.split.num_events; + x86_pmu.event_bits = eax.split.bit_width; + x86_pmu.event_mask = (1ULL << eax.split.bit_width) - 1; + + /* + * Quirk: v2 perfmon does not report fixed-purpose events, so + * assume at least 3 events: + */ + x86_pmu.num_events_fixed = max((int)edx.split.num_events_fixed, 3); + + /* + * Install the hw-cache-events table: + */ + switch (boot_cpu_data.x86_model) { + case 15: /* original 65 nm celeron/pentium/core2/xeon, "Merom"/"Conroe" */ + case 22: /* single-core 65 nm celeron/core2solo "Merom-L"/"Conroe-L" */ + case 23: /* current 45 nm celeron/core2/xeon "Penryn"/"Wolfdale" */ + case 29: /* six-core 45 nm xeon "Dunnington" */ + memcpy(hw_cache_event_ids, core2_hw_cache_event_ids, + sizeof(hw_cache_event_ids)); + + pr_cont("Core2 events, "); + break; + default: + case 26: + memcpy(hw_cache_event_ids, nehalem_hw_cache_event_ids, + sizeof(hw_cache_event_ids)); + + pr_cont("Nehalem/Corei7 events, "); + break; + case 28: + memcpy(hw_cache_event_ids, atom_hw_cache_event_ids, + sizeof(hw_cache_event_ids)); + + pr_cont("Atom events, "); + break; + } + return 0; +} + +static int amd_pmu_init(void) +{ + /* Performance-monitoring supported from K7 and later: */ + if (boot_cpu_data.x86 < 6) + return -ENODEV; + + x86_pmu = amd_pmu; + + /* Events are common for all AMDs */ + memcpy(hw_cache_event_ids, amd_hw_cache_event_ids, + sizeof(hw_cache_event_ids)); + + return 0; +} + +void __init init_hw_perf_events(void) +{ + int err; + + pr_info("Performance Events: "); + + switch (boot_cpu_data.x86_vendor) { + case X86_VENDOR_INTEL: + err = intel_pmu_init(); + break; + case X86_VENDOR_AMD: + err = amd_pmu_init(); + break; + default: + return; + } + if (err != 0) { + pr_cont("no PMU driver, software events only.\n"); + return; + } + + pr_cont("%s PMU driver.\n", x86_pmu.name); + + if (x86_pmu.num_events > X86_PMC_MAX_GENERIC) { + WARN(1, KERN_ERR "hw perf events %d > max(%d), clipping!", + x86_pmu.num_events, X86_PMC_MAX_GENERIC); + x86_pmu.num_events = X86_PMC_MAX_GENERIC; + } + perf_event_mask = (1 << x86_pmu.num_events) - 1; + perf_max_events = x86_pmu.num_events; + + if (x86_pmu.num_events_fixed > X86_PMC_MAX_FIXED) { + WARN(1, KERN_ERR "hw perf events fixed %d > max(%d), clipping!", + x86_pmu.num_events_fixed, X86_PMC_MAX_FIXED); + x86_pmu.num_events_fixed = X86_PMC_MAX_FIXED; + } + + perf_event_mask |= + ((1LL << x86_pmu.num_events_fixed)-1) << X86_PMC_IDX_FIXED; + x86_pmu.intel_ctrl = perf_event_mask; + + perf_events_lapic_init(); + register_die_notifier(&perf_event_nmi_notifier); + + pr_info("... version: %d\n", x86_pmu.version); + pr_info("... bit width: %d\n", x86_pmu.event_bits); + pr_info("... generic events: %d\n", x86_pmu.num_events); + pr_info("... value mask: %016Lx\n", x86_pmu.event_mask); + pr_info("... max period: %016Lx\n", x86_pmu.max_period); + pr_info("... fixed-purpose events: %d\n", x86_pmu.num_events_fixed); + pr_info("... event mask: %016Lx\n", perf_event_mask); +} + +static inline void x86_pmu_read(struct perf_event *event) +{ + x86_perf_event_update(event, &event->hw, event->hw.idx); +} + +static const struct pmu pmu = { + .enable = x86_pmu_enable, + .disable = x86_pmu_disable, + .read = x86_pmu_read, + .unthrottle = x86_pmu_unthrottle, +}; + +const struct pmu *hw_perf_event_init(struct perf_event *event) +{ + int err; + + err = __hw_perf_event_init(event); + if (err) { + if (event->destroy) + event->destroy(event); + return ERR_PTR(err); + } + + return &pmu; +} + +/* + * callchain support + */ + +static inline +void callchain_store(struct perf_callchain_entry *entry, u64 ip) +{ + if (entry->nr < PERF_MAX_STACK_DEPTH) + entry->ip[entry->nr++] = ip; +} + +static DEFINE_PER_CPU(struct perf_callchain_entry, pmc_irq_entry); +static DEFINE_PER_CPU(struct perf_callchain_entry, pmc_nmi_entry); +static DEFINE_PER_CPU(int, in_nmi_frame); + + +static void +backtrace_warning_symbol(void *data, char *msg, unsigned long symbol) +{ + /* Ignore warnings */ +} + +static void backtrace_warning(void *data, char *msg) +{ + /* Ignore warnings */ +} + +static int backtrace_stack(void *data, char *name) +{ + per_cpu(in_nmi_frame, smp_processor_id()) = + x86_is_stack_id(NMI_STACK, name); + + return 0; +} + +static void backtrace_address(void *data, unsigned long addr, int reliable) +{ + struct perf_callchain_entry *entry = data; + + if (per_cpu(in_nmi_frame, smp_processor_id())) + return; + + if (reliable) + callchain_store(entry, addr); +} + +static const struct stacktrace_ops backtrace_ops = { + .warning = backtrace_warning, + .warning_symbol = backtrace_warning_symbol, + .stack = backtrace_stack, + .address = backtrace_address, +}; + +#include "../dumpstack.h" + +static void +perf_callchain_kernel(struct pt_regs *regs, struct perf_callchain_entry *entry) +{ + callchain_store(entry, PERF_CONTEXT_KERNEL); + callchain_store(entry, regs->ip); + + dump_trace(NULL, regs, NULL, 0, &backtrace_ops, entry); +} + +/* + * best effort, GUP based copy_from_user() that assumes IRQ or NMI context + */ +static unsigned long +copy_from_user_nmi(void *to, const void __user *from, unsigned long n) +{ + unsigned long offset, addr = (unsigned long)from; + int type = in_nmi() ? KM_NMI : KM_IRQ0; + unsigned long size, len = 0; + struct page *page; + void *map; + int ret; + + do { + ret = __get_user_pages_fast(addr, 1, 0, &page); + if (!ret) + break; + + offset = addr & (PAGE_SIZE - 1); + size = min(PAGE_SIZE - offset, n - len); + + map = kmap_atomic(page, type); + memcpy(to, map+offset, size); + kunmap_atomic(map, type); + put_page(page); + + len += size; + to += size; + addr += size; + + } while (len < n); + + return len; +} + +static int copy_stack_frame(const void __user *fp, struct stack_frame *frame) +{ + unsigned long bytes; + + bytes = copy_from_user_nmi(frame, fp, sizeof(*frame)); + + return bytes == sizeof(*frame); +} + +static void +perf_callchain_user(struct pt_regs *regs, struct perf_callchain_entry *entry) +{ + struct stack_frame frame; + const void __user *fp; + + if (!user_mode(regs)) + regs = task_pt_regs(current); + + fp = (void __user *)regs->bp; + + callchain_store(entry, PERF_CONTEXT_USER); + callchain_store(entry, regs->ip); + + while (entry->nr < PERF_MAX_STACK_DEPTH) { + frame.next_frame = NULL; + frame.return_address = 0; + + if (!copy_stack_frame(fp, &frame)) + break; + + if ((unsigned long)fp < regs->sp) + break; + + callchain_store(entry, frame.return_address); + fp = frame.next_frame; + } +} + +static void +perf_do_callchain(struct pt_regs *regs, struct perf_callchain_entry *entry) +{ + int is_user; + + if (!regs) + return; + + is_user = user_mode(regs); + + if (!current || current->pid == 0) + return; + + if (is_user && current->state != TASK_RUNNING) + return; + + if (!is_user) + perf_callchain_kernel(regs, entry); + + if (current->mm) + perf_callchain_user(regs, entry); +} + +struct perf_callchain_entry *perf_callchain(struct pt_regs *regs) +{ + struct perf_callchain_entry *entry; + + if (in_nmi()) + entry = &__get_cpu_var(pmc_nmi_entry); + else + entry = &__get_cpu_var(pmc_irq_entry); + + entry->nr = 0; + + perf_do_callchain(regs, entry); + + return entry; +} + +void hw_perf_event_setup_online(int cpu) +{ + init_debug_store_on_cpu(cpu); +} diff --git a/arch/x86/kernel/cpu/perfctr-watchdog.c b/arch/x86/kernel/cpu/perfctr-watchdog.c index 392bea43b89..fab786f60ed 100644 --- a/arch/x86/kernel/cpu/perfctr-watchdog.c +++ b/arch/x86/kernel/cpu/perfctr-watchdog.c @@ -20,7 +20,7 @@ #include #include -#include +#include struct nmi_watchdog_ctlblk { unsigned int cccr_msr; diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S index d59fe323807..681c3fda739 100644 --- a/arch/x86/kernel/entry_64.S +++ b/arch/x86/kernel/entry_64.S @@ -1021,7 +1021,7 @@ apicinterrupt ERROR_APIC_VECTOR \ apicinterrupt SPURIOUS_APIC_VECTOR \ spurious_interrupt smp_spurious_interrupt -#ifdef CONFIG_PERF_COUNTERS +#ifdef CONFIG_PERF_EVENTS apicinterrupt LOCAL_PENDING_VECTOR \ perf_pending_interrupt smp_perf_pending_interrupt #endif diff --git a/arch/x86/kernel/irqinit.c b/arch/x86/kernel/irqinit.c index 300883112e3..40f30773fb2 100644 --- a/arch/x86/kernel/irqinit.c +++ b/arch/x86/kernel/irqinit.c @@ -208,7 +208,7 @@ static void __init apic_intr_init(void) alloc_intr_gate(ERROR_APIC_VECTOR, error_interrupt); /* Performance monitoring interrupts: */ -# ifdef CONFIG_PERF_COUNTERS +# ifdef CONFIG_PERF_EVENTS alloc_intr_gate(LOCAL_PENDING_VECTOR, perf_pending_interrupt); # endif diff --git a/arch/x86/kernel/syscall_table_32.S b/arch/x86/kernel/syscall_table_32.S index d51321ddafd..0157cd26d7c 100644 --- a/arch/x86/kernel/syscall_table_32.S +++ b/arch/x86/kernel/syscall_table_32.S @@ -335,4 +335,4 @@ ENTRY(sys_call_table) .long sys_preadv .long sys_pwritev .long sys_rt_tgsigqueueinfo /* 335 */ - .long sys_perf_counter_open + .long sys_perf_event_open diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c index 775a020990a..82728f2c6d5 100644 --- a/arch/x86/mm/fault.c +++ b/arch/x86/mm/fault.c @@ -10,7 +10,7 @@ #include /* max_low_pfn */ #include /* __kprobes, ... */ #include /* kmmio_handler, ... */ -#include /* perf_swcounter_event */ +#include /* perf_sw_event */ #include /* dotraplinkage, ... */ #include /* pgd_*(), ... */ @@ -1017,7 +1017,7 @@ do_page_fault(struct pt_regs *regs, unsigned long error_code) if (unlikely(error_code & PF_RSVD)) pgtable_bad(regs, error_code, address); - perf_swcounter_event(PERF_COUNT_SW_PAGE_FAULTS, 1, 0, regs, address); + perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, 0, regs, address); /* * If we're in an interrupt, have no user context or are running @@ -1114,11 +1114,11 @@ good_area: if (fault & VM_FAULT_MAJOR) { tsk->maj_flt++; - perf_swcounter_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, 0, + perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, 0, regs, address); } else { tsk->min_flt++; - perf_swcounter_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, 0, + perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, 0, regs, address); } diff --git a/arch/x86/oprofile/op_model_ppro.c b/arch/x86/oprofile/op_model_ppro.c index 4899215999d..8eb05878554 100644 --- a/arch/x86/oprofile/op_model_ppro.c +++ b/arch/x86/oprofile/op_model_ppro.c @@ -234,11 +234,11 @@ static void arch_perfmon_setup_counters(void) if (eax.split.version_id == 0 && current_cpu_data.x86 == 6 && current_cpu_data.x86_model == 15) { eax.split.version_id = 2; - eax.split.num_counters = 2; + eax.split.num_events = 2; eax.split.bit_width = 40; } - num_counters = eax.split.num_counters; + num_counters = eax.split.num_events; op_arch_perfmon_spec.num_counters = num_counters; op_arch_perfmon_spec.num_controls = num_counters; diff --git a/arch/x86/oprofile/op_x86_model.h b/arch/x86/oprofile/op_x86_model.h index b83776180c7..7b8e75d1608 100644 --- a/arch/x86/oprofile/op_x86_model.h +++ b/arch/x86/oprofile/op_x86_model.h @@ -13,7 +13,7 @@ #define OP_X86_MODEL_H #include -#include +#include struct op_msr { unsigned long addr; diff --git a/drivers/char/sysrq.c b/drivers/char/sysrq.c index 50eecfe1d72..44203ff599d 100644 --- a/drivers/char/sysrq.c +++ b/drivers/char/sysrq.c @@ -26,7 +26,7 @@ #include #include #include -#include +#include #include #include #include @@ -252,7 +252,7 @@ static void sysrq_handle_showregs(int key, struct tty_struct *tty) struct pt_regs *regs = get_irq_regs(); if (regs) show_regs(regs); - perf_counter_print_debug(); + perf_event_print_debug(); } static struct sysrq_key_op sysrq_showregs_op = { .handler = sysrq_handle_showregs, diff --git a/fs/exec.c b/fs/exec.c index 172ceb6edde..434dba778cc 100644 --- a/fs/exec.c +++ b/fs/exec.c @@ -33,7 +33,7 @@ #include #include #include -#include +#include #include #include #include @@ -923,7 +923,7 @@ void set_task_comm(struct task_struct *tsk, char *buf) task_lock(tsk); strlcpy(tsk->comm, buf, sizeof(tsk->comm)); task_unlock(tsk); - perf_counter_comm(tsk); + perf_event_comm(tsk); } int flush_old_exec(struct linux_binprm * bprm) @@ -997,7 +997,7 @@ int flush_old_exec(struct linux_binprm * bprm) * security domain: */ if (!get_dumpable(current->mm)) - perf_counter_exit_task(current); + perf_event_exit_task(current); /* An exec changes our domain. We are no longer part of the thread group */ diff --git a/include/asm-generic/unistd.h b/include/asm-generic/unistd.h index 1125e5a1ee5..d76b66acea9 100644 --- a/include/asm-generic/unistd.h +++ b/include/asm-generic/unistd.h @@ -620,8 +620,8 @@ __SYSCALL(__NR_move_pages, sys_move_pages) #define __NR_rt_tgsigqueueinfo 240 __SYSCALL(__NR_rt_tgsigqueueinfo, sys_rt_tgsigqueueinfo) -#define __NR_perf_counter_open 241 -__SYSCALL(__NR_perf_counter_open, sys_perf_counter_open) +#define __NR_perf_event_open 241 +__SYSCALL(__NR_perf_event_open, sys_perf_event_open) #undef __NR_syscalls #define __NR_syscalls 242 diff --git a/include/linux/init_task.h b/include/linux/init_task.h index 9e7f2e8fc66..21a6f5d9af2 100644 --- a/include/linux/init_task.h +++ b/include/linux/init_task.h @@ -106,13 +106,13 @@ extern struct group_info init_groups; extern struct cred init_cred; -#ifdef CONFIG_PERF_COUNTERS -# define INIT_PERF_COUNTERS(tsk) \ - .perf_counter_mutex = \ - __MUTEX_INITIALIZER(tsk.perf_counter_mutex), \ - .perf_counter_list = LIST_HEAD_INIT(tsk.perf_counter_list), +#ifdef CONFIG_PERF_EVENTS +# define INIT_PERF_EVENTS(tsk) \ + .perf_event_mutex = \ + __MUTEX_INITIALIZER(tsk.perf_event_mutex), \ + .perf_event_list = LIST_HEAD_INIT(tsk.perf_event_list), #else -# define INIT_PERF_COUNTERS(tsk) +# define INIT_PERF_EVENTS(tsk) #endif /* @@ -178,7 +178,7 @@ extern struct cred init_cred; }, \ .dirties = INIT_PROP_LOCAL_SINGLE(dirties), \ INIT_IDS \ - INIT_PERF_COUNTERS(tsk) \ + INIT_PERF_EVENTS(tsk) \ INIT_TRACE_IRQFLAGS \ INIT_LOCKDEP \ INIT_FTRACE_GRAPH \ diff --git a/include/linux/perf_counter.h b/include/linux/perf_counter.h deleted file mode 100644 index f6486273267..00000000000 --- a/include/linux/perf_counter.h +++ /dev/null @@ -1,858 +0,0 @@ -/* - * Performance counters: - * - * Copyright (C) 2008-2009, Thomas Gleixner - * Copyright (C) 2008-2009, Red Hat, Inc., Ingo Molnar - * Copyright (C) 2008-2009, Red Hat, Inc., Peter Zijlstra - * - * Data type definitions, declarations, prototypes. - * - * Started by: Thomas Gleixner and Ingo Molnar - * - * For licencing details see kernel-base/COPYING - */ -#ifndef _LINUX_PERF_COUNTER_H -#define _LINUX_PERF_COUNTER_H - -#include -#include -#include - -/* - * User-space ABI bits: - */ - -/* - * attr.type - */ -enum perf_type_id { - PERF_TYPE_HARDWARE = 0, - PERF_TYPE_SOFTWARE = 1, - PERF_TYPE_TRACEPOINT = 2, - PERF_TYPE_HW_CACHE = 3, - PERF_TYPE_RAW = 4, - - PERF_TYPE_MAX, /* non-ABI */ -}; - -/* - * Generalized performance counter event types, used by the - * attr.event_id parameter of the sys_perf_counter_open() - * syscall: - */ -enum perf_hw_id { - /* - * Common hardware events, generalized by the kernel: - */ - PERF_COUNT_HW_CPU_CYCLES = 0, - PERF_COUNT_HW_INSTRUCTIONS = 1, - PERF_COUNT_HW_CACHE_REFERENCES = 2, - PERF_COUNT_HW_CACHE_MISSES = 3, - PERF_COUNT_HW_BRANCH_INSTRUCTIONS = 4, - PERF_COUNT_HW_BRANCH_MISSES = 5, - PERF_COUNT_HW_BUS_CYCLES = 6, - - PERF_COUNT_HW_MAX, /* non-ABI */ -}; - -/* - * Generalized hardware cache counters: - * - * { L1-D, L1-I, LLC, ITLB, DTLB, BPU } x - * { read, write, prefetch } x - * { accesses, misses } - */ -enum perf_hw_cache_id { - PERF_COUNT_HW_CACHE_L1D = 0, - PERF_COUNT_HW_CACHE_L1I = 1, - PERF_COUNT_HW_CACHE_LL = 2, - PERF_COUNT_HW_CACHE_DTLB = 3, - PERF_COUNT_HW_CACHE_ITLB = 4, - PERF_COUNT_HW_CACHE_BPU = 5, - - PERF_COUNT_HW_CACHE_MAX, /* non-ABI */ -}; - -enum perf_hw_cache_op_id { - PERF_COUNT_HW_CACHE_OP_READ = 0, - PERF_COUNT_HW_CACHE_OP_WRITE = 1, - PERF_COUNT_HW_CACHE_OP_PREFETCH = 2, - - PERF_COUNT_HW_CACHE_OP_MAX, /* non-ABI */ -}; - -enum perf_hw_cache_op_result_id { - PERF_COUNT_HW_CACHE_RESULT_ACCESS = 0, - PERF_COUNT_HW_CACHE_RESULT_MISS = 1, - - PERF_COUNT_HW_CACHE_RESULT_MAX, /* non-ABI */ -}; - -/* - * Special "software" counters provided by the kernel, even if the hardware - * does not support performance counters. These counters measure various - * physical and sw events of the kernel (and allow the profiling of them as - * well): - */ -enum perf_sw_ids { - PERF_COUNT_SW_CPU_CLOCK = 0, - PERF_COUNT_SW_TASK_CLOCK = 1, - PERF_COUNT_SW_PAGE_FAULTS = 2, - PERF_COUNT_SW_CONTEXT_SWITCHES = 3, - PERF_COUNT_SW_CPU_MIGRATIONS = 4, - PERF_COUNT_SW_PAGE_FAULTS_MIN = 5, - PERF_COUNT_SW_PAGE_FAULTS_MAJ = 6, - - PERF_COUNT_SW_MAX, /* non-ABI */ -}; - -/* - * Bits that can be set in attr.sample_type to request information - * in the overflow packets. - */ -enum perf_counter_sample_format { - PERF_SAMPLE_IP = 1U << 0, - PERF_SAMPLE_TID = 1U << 1, - PERF_SAMPLE_TIME = 1U << 2, - PERF_SAMPLE_ADDR = 1U << 3, - PERF_SAMPLE_READ = 1U << 4, - PERF_SAMPLE_CALLCHAIN = 1U << 5, - PERF_SAMPLE_ID = 1U << 6, - PERF_SAMPLE_CPU = 1U << 7, - PERF_SAMPLE_PERIOD = 1U << 8, - PERF_SAMPLE_STREAM_ID = 1U << 9, - PERF_SAMPLE_RAW = 1U << 10, - - PERF_SAMPLE_MAX = 1U << 11, /* non-ABI */ -}; - -/* - * The format of the data returned by read() on a perf counter fd, - * as specified by attr.read_format: - * - * struct read_format { - * { u64 value; - * { u64 time_enabled; } && PERF_FORMAT_ENABLED - * { u64 time_running; } && PERF_FORMAT_RUNNING - * { u64 id; } && PERF_FORMAT_ID - * } && !PERF_FORMAT_GROUP - * - * { u64 nr; - * { u64 time_enabled; } && PERF_FORMAT_ENABLED - * { u64 time_running; } && PERF_FORMAT_RUNNING - * { u64 value; - * { u64 id; } && PERF_FORMAT_ID - * } cntr[nr]; - * } && PERF_FORMAT_GROUP - * }; - */ -enum perf_counter_read_format { - PERF_FORMAT_TOTAL_TIME_ENABLED = 1U << 0, - PERF_FORMAT_TOTAL_TIME_RUNNING = 1U << 1, - PERF_FORMAT_ID = 1U << 2, - PERF_FORMAT_GROUP = 1U << 3, - - PERF_FORMAT_MAX = 1U << 4, /* non-ABI */ -}; - -#define PERF_ATTR_SIZE_VER0 64 /* sizeof first published struct */ - -/* - * Hardware event to monitor via a performance monitoring counter: - */ -struct perf_counter_attr { - - /* - * Major type: hardware/software/tracepoint/etc. - */ - __u32 type; - - /* - * Size of the attr structure, for fwd/bwd compat. - */ - __u32 size; - - /* - * Type specific configuration information. - */ - __u64 config; - - union { - __u64 sample_period; - __u64 sample_freq; - }; - - __u64 sample_type; - __u64 read_format; - - __u64 disabled : 1, /* off by default */ - inherit : 1, /* children inherit it */ - pinned : 1, /* must always be on PMU */ - exclusive : 1, /* only group on PMU */ - exclude_user : 1, /* don't count user */ - exclude_kernel : 1, /* ditto kernel */ - exclude_hv : 1, /* ditto hypervisor */ - exclude_idle : 1, /* don't count when idle */ - mmap : 1, /* include mmap data */ - comm : 1, /* include comm data */ - freq : 1, /* use freq, not period */ - inherit_stat : 1, /* per task counts */ - enable_on_exec : 1, /* next exec enables */ - task : 1, /* trace fork/exit */ - watermark : 1, /* wakeup_watermark */ - - __reserved_1 : 49; - - union { - __u32 wakeup_events; /* wakeup every n events */ - __u32 wakeup_watermark; /* bytes before wakeup */ - }; - __u32 __reserved_2; - - __u64 __reserved_3; -}; - -/* - * Ioctls that can be done on a perf counter fd: - */ -#define PERF_COUNTER_IOC_ENABLE _IO ('$', 0) -#define PERF_COUNTER_IOC_DISABLE _IO ('$', 1) -#define PERF_COUNTER_IOC_REFRESH _IO ('$', 2) -#define PERF_COUNTER_IOC_RESET _IO ('$', 3) -#define PERF_COUNTER_IOC_PERIOD _IOW('$', 4, u64) -#define PERF_COUNTER_IOC_SET_OUTPUT _IO ('$', 5) - -enum perf_counter_ioc_flags { - PERF_IOC_FLAG_GROUP = 1U << 0, -}; - -/* - * Structure of the page that can be mapped via mmap - */ -struct perf_counter_mmap_page { - __u32 version; /* version number of this structure */ - __u32 compat_version; /* lowest version this is compat with */ - - /* - * Bits needed to read the hw counters in user-space. - * - * u32 seq; - * s64 count; - * - * do { - * seq = pc->lock; - * - * barrier() - * if (pc->index) { - * count = pmc_read(pc->index - 1); - * count += pc->offset; - * } else - * goto regular_read; - * - * barrier(); - * } while (pc->lock != seq); - * - * NOTE: for obvious reason this only works on self-monitoring - * processes. - */ - __u32 lock; /* seqlock for synchronization */ - __u32 index; /* hardware counter identifier */ - __s64 offset; /* add to hardware counter value */ - __u64 time_enabled; /* time counter active */ - __u64 time_running; /* time counter on cpu */ - - /* - * Hole for extension of the self monitor capabilities - */ - - __u64 __reserved[123]; /* align to 1k */ - - /* - * Control data for the mmap() data buffer. - * - * User-space reading the @data_head value should issue an rmb(), on - * SMP capable platforms, after reading this value -- see - * perf_counter_wakeup(). - * - * When the mapping is PROT_WRITE the @data_tail value should be - * written by userspace to reflect the last read data. In this case - * the kernel will not over-write unread data. - */ - __u64 data_head; /* head in the data section */ - __u64 data_tail; /* user-space written tail */ -}; - -#define PERF_EVENT_MISC_CPUMODE_MASK (3 << 0) -#define PERF_EVENT_MISC_CPUMODE_UNKNOWN (0 << 0) -#define PERF_EVENT_MISC_KERNEL (1 << 0) -#define PERF_EVENT_MISC_USER (2 << 0) -#define PERF_EVENT_MISC_HYPERVISOR (3 << 0) - -struct perf_event_header { - __u32 type; - __u16 misc; - __u16 size; -}; - -enum perf_event_type { - - /* - * The MMAP events record the PROT_EXEC mappings so that we can - * correlate userspace IPs to code. They have the following structure: - * - * struct { - * struct perf_event_header header; - * - * u32 pid, tid; - * u64 addr; - * u64 len; - * u64 pgoff; - * char filename[]; - * }; - */ - PERF_EVENT_MMAP = 1, - - /* - * struct { - * struct perf_event_header header; - * u64 id; - * u64 lost; - * }; - */ - PERF_EVENT_LOST = 2, - - /* - * struct { - * struct perf_event_header header; - * - * u32 pid, tid; - * char comm[]; - * }; - */ - PERF_EVENT_COMM = 3, - - /* - * struct { - * struct perf_event_header header; - * u32 pid, ppid; - * u32 tid, ptid; - * u64 time; - * }; - */ - PERF_EVENT_EXIT = 4, - - /* - * struct { - * struct perf_event_header header; - * u64 time; - * u64 id; - * u64 stream_id; - * }; - */ - PERF_EVENT_THROTTLE = 5, - PERF_EVENT_UNTHROTTLE = 6, - - /* - * struct { - * struct perf_event_header header; - * u32 pid, ppid; - * u32 tid, ptid; - * { u64 time; } && PERF_SAMPLE_TIME - * }; - */ - PERF_EVENT_FORK = 7, - - /* - * struct { - * struct perf_event_header header; - * u32 pid, tid; - * - * struct read_format values; - * }; - */ - PERF_EVENT_READ = 8, - - /* - * struct { - * struct perf_event_header header; - * - * { u64 ip; } && PERF_SAMPLE_IP - * { u32 pid, tid; } && PERF_SAMPLE_TID - * { u64 time; } && PERF_SAMPLE_TIME - * { u64 addr; } && PERF_SAMPLE_ADDR - * { u64 id; } && PERF_SAMPLE_ID - * { u64 stream_id;} && PERF_SAMPLE_STREAM_ID - * { u32 cpu, res; } && PERF_SAMPLE_CPU - * { u64 period; } && PERF_SAMPLE_PERIOD - * - * { struct read_format values; } && PERF_SAMPLE_READ - * - * { u64 nr, - * u64 ips[nr]; } && PERF_SAMPLE_CALLCHAIN - * - * # - * # The RAW record below is opaque data wrt the ABI - * # - * # That is, the ABI doesn't make any promises wrt to - * # the stability of its content, it may vary depending - * # on event, hardware, kernel version and phase of - * # the moon. - * # - * # In other words, PERF_SAMPLE_RAW contents are not an ABI. - * # - * - * { u32 size; - * char data[size];}&& PERF_SAMPLE_RAW - * }; - */ - PERF_EVENT_SAMPLE = 9, - - PERF_EVENT_MAX, /* non-ABI */ -}; - -enum perf_callchain_context { - PERF_CONTEXT_HV = (__u64)-32, - PERF_CONTEXT_KERNEL = (__u64)-128, - PERF_CONTEXT_USER = (__u64)-512, - - PERF_CONTEXT_GUEST = (__u64)-2048, - PERF_CONTEXT_GUEST_KERNEL = (__u64)-2176, - PERF_CONTEXT_GUEST_USER = (__u64)-2560, - - PERF_CONTEXT_MAX = (__u64)-4095, -}; - -#define PERF_FLAG_FD_NO_GROUP (1U << 0) -#define PERF_FLAG_FD_OUTPUT (1U << 1) - -#ifdef __KERNEL__ -/* - * Kernel-internal data types and definitions: - */ - -#ifdef CONFIG_PERF_COUNTERS -# include -#endif - -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#define PERF_MAX_STACK_DEPTH 255 - -struct perf_callchain_entry { - __u64 nr; - __u64 ip[PERF_MAX_STACK_DEPTH]; -}; - -struct perf_raw_record { - u32 size; - void *data; -}; - -struct task_struct; - -/** - * struct hw_perf_counter - performance counter hardware details: - */ -struct hw_perf_counter { -#ifdef CONFIG_PERF_COUNTERS - union { - struct { /* hardware */ - u64 config; - unsigned long config_base; - unsigned long counter_base; - int idx; - }; - union { /* software */ - atomic64_t count; - struct hrtimer hrtimer; - }; - }; - atomic64_t prev_count; - u64 sample_period; - u64 last_period; - atomic64_t period_left; - u64 interrupts; - - u64 freq_count; - u64 freq_interrupts; - u64 freq_stamp; -#endif -}; - -struct perf_counter; - -/** - * struct pmu - generic performance monitoring unit - */ -struct pmu { - int (*enable) (struct perf_counter *counter); - void (*disable) (struct perf_counter *counter); - void (*read) (struct perf_counter *counter); - void (*unthrottle) (struct perf_counter *counter); -}; - -/** - * enum perf_counter_active_state - the states of a counter - */ -enum perf_counter_active_state { - PERF_COUNTER_STATE_ERROR = -2, - PERF_COUNTER_STATE_OFF = -1, - PERF_COUNTER_STATE_INACTIVE = 0, - PERF_COUNTER_STATE_ACTIVE = 1, -}; - -struct file; - -struct perf_mmap_data { - struct rcu_head rcu_head; - int nr_pages; /* nr of data pages */ - int writable; /* are we writable */ - int nr_locked; /* nr pages mlocked */ - - atomic_t poll; /* POLL_ for wakeups */ - atomic_t events; /* event limit */ - - atomic_long_t head; /* write position */ - atomic_long_t done_head; /* completed head */ - - atomic_t lock; /* concurrent writes */ - atomic_t wakeup; /* needs a wakeup */ - atomic_t lost; /* nr records lost */ - - long watermark; /* wakeup watermark */ - - struct perf_counter_mmap_page *user_page; - void *data_pages[0]; -}; - -struct perf_pending_entry { - struct perf_pending_entry *next; - void (*func)(struct perf_pending_entry *); -}; - -/** - * struct perf_counter - performance counter kernel representation: - */ -struct perf_counter { -#ifdef CONFIG_PERF_COUNTERS - struct list_head group_entry; - struct list_head event_entry; - struct list_head sibling_list; - int nr_siblings; - struct perf_counter *group_leader; - struct perf_counter *output; - const struct pmu *pmu; - - enum perf_counter_active_state state; - atomic64_t count; - - /* - * These are the total time in nanoseconds that the counter - * has been enabled (i.e. eligible to run, and the task has - * been scheduled in, if this is a per-task counter) - * and running (scheduled onto the CPU), respectively. - * - * They are computed from tstamp_enabled, tstamp_running and - * tstamp_stopped when the counter is in INACTIVE or ACTIVE state. - */ - u64 total_time_enabled; - u64 total_time_running; - - /* - * These are timestamps used for computing total_time_enabled - * and total_time_running when the counter is in INACTIVE or - * ACTIVE state, measured in nanoseconds from an arbitrary point - * in time. - * tstamp_enabled: the notional time when the counter was enabled - * tstamp_running: the notional time when the counter was scheduled on - * tstamp_stopped: in INACTIVE state, the notional time when the - * counter was scheduled off. - */ - u64 tstamp_enabled; - u64 tstamp_running; - u64 tstamp_stopped; - - struct perf_counter_attr attr; - struct hw_perf_counter hw; - - struct perf_counter_context *ctx; - struct file *filp; - - /* - * These accumulate total time (in nanoseconds) that children - * counters have been enabled and running, respectively. - */ - atomic64_t child_total_time_enabled; - atomic64_t child_total_time_running; - - /* - * Protect attach/detach and child_list: - */ - struct mutex child_mutex; - struct list_head child_list; - struct perf_counter *parent; - - int oncpu; - int cpu; - - struct list_head owner_entry; - struct task_struct *owner; - - /* mmap bits */ - struct mutex mmap_mutex; - atomic_t mmap_count; - struct perf_mmap_data *data; - - /* poll related */ - wait_queue_head_t waitq; - struct fasync_struct *fasync; - - /* delayed work for NMIs and such */ - int pending_wakeup; - int pending_kill; - int pending_disable; - struct perf_pending_entry pending; - - atomic_t event_limit; - - void (*destroy)(struct perf_counter *); - struct rcu_head rcu_head; - - struct pid_namespace *ns; - u64 id; -#endif -}; - -/** - * struct perf_counter_context - counter context structure - * - * Used as a container for task counters and CPU counters as well: - */ -struct perf_counter_context { - /* - * Protect the states of the counters in the list, - * nr_active, and the list: - */ - spinlock_t lock; - /* - * Protect the list of counters. Locking either mutex or lock - * is sufficient to ensure the list doesn't change; to change - * the list you need to lock both the mutex and the spinlock. - */ - struct mutex mutex; - - struct list_head group_list; - struct list_head event_list; - int nr_counters; - int nr_active; - int is_active; - int nr_stat; - atomic_t refcount; - struct task_struct *task; - - /* - * Context clock, runs when context enabled. - */ - u64 time; - u64 timestamp; - - /* - * These fields let us detect when two contexts have both - * been cloned (inherited) from a common ancestor. - */ - struct perf_counter_context *parent_ctx; - u64 parent_gen; - u64 generation; - int pin_count; - struct rcu_head rcu_head; -}; - -/** - * struct perf_counter_cpu_context - per cpu counter context structure - */ -struct perf_cpu_context { - struct perf_counter_context ctx; - struct perf_counter_context *task_ctx; - int active_oncpu; - int max_pertask; - int exclusive; - - /* - * Recursion avoidance: - * - * task, softirq, irq, nmi context - */ - int recursion[4]; -}; - -struct perf_output_handle { - struct perf_counter *counter; - struct perf_mmap_data *data; - unsigned long head; - unsigned long offset; - int nmi; - int sample; - int locked; - unsigned long flags; -}; - -#ifdef CONFIG_PERF_COUNTERS - -/* - * Set by architecture code: - */ -extern int perf_max_counters; - -extern const struct pmu *hw_perf_counter_init(struct perf_counter *counter); - -extern void perf_counter_task_sched_in(struct task_struct *task, int cpu); -extern void perf_counter_task_sched_out(struct task_struct *task, - struct task_struct *next, int cpu); -extern void perf_counter_task_tick(struct task_struct *task, int cpu); -extern int perf_counter_init_task(struct task_struct *child); -extern void perf_counter_exit_task(struct task_struct *child); -extern void perf_counter_free_task(struct task_struct *task); -extern void set_perf_counter_pending(void); -extern void perf_counter_do_pending(void); -extern void perf_counter_print_debug(void); -extern void __perf_disable(void); -extern bool __perf_enable(void); -extern void perf_disable(void); -extern void perf_enable(void); -extern int perf_counter_task_disable(void); -extern int perf_counter_task_enable(void); -extern int hw_perf_group_sched_in(struct perf_counter *group_leader, - struct perf_cpu_context *cpuctx, - struct perf_counter_context *ctx, int cpu); -extern void perf_counter_update_userpage(struct perf_counter *counter); - -struct perf_sample_data { - u64 type; - - u64 ip; - struct { - u32 pid; - u32 tid; - } tid_entry; - u64 time; - u64 addr; - u64 id; - u64 stream_id; - struct { - u32 cpu; - u32 reserved; - } cpu_entry; - u64 period; - struct perf_callchain_entry *callchain; - struct perf_raw_record *raw; -}; - -extern void perf_output_sample(struct perf_output_handle *handle, - struct perf_event_header *header, - struct perf_sample_data *data, - struct perf_counter *counter); -extern void perf_prepare_sample(struct perf_event_header *header, - struct perf_sample_data *data, - struct perf_counter *counter, - struct pt_regs *regs); - -extern int perf_counter_overflow(struct perf_counter *counter, int nmi, - struct perf_sample_data *data, - struct pt_regs *regs); - -/* - * Return 1 for a software counter, 0 for a hardware counter - */ -static inline int is_software_counter(struct perf_counter *counter) -{ - return (counter->attr.type != PERF_TYPE_RAW) && - (counter->attr.type != PERF_TYPE_HARDWARE) && - (counter->attr.type != PERF_TYPE_HW_CACHE); -} - -extern atomic_t perf_swcounter_enabled[PERF_COUNT_SW_MAX]; - -extern void __perf_swcounter_event(u32, u64, int, struct pt_regs *, u64); - -static inline void -perf_swcounter_event(u32 event, u64 nr, int nmi, struct pt_regs *regs, u64 addr) -{ - if (atomic_read(&perf_swcounter_enabled[event])) - __perf_swcounter_event(event, nr, nmi, regs, addr); -} - -extern void __perf_counter_mmap(struct vm_area_struct *vma); - -static inline void perf_counter_mmap(struct vm_area_struct *vma) -{ - if (vma->vm_flags & VM_EXEC) - __perf_counter_mmap(vma); -} - -extern void perf_counter_comm(struct task_struct *tsk); -extern void perf_counter_fork(struct task_struct *tsk); - -extern struct perf_callchain_entry *perf_callchain(struct pt_regs *regs); - -extern int sysctl_perf_counter_paranoid; -extern int sysctl_perf_counter_mlock; -extern int sysctl_perf_counter_sample_rate; - -extern void perf_counter_init(void); -extern void perf_tpcounter_event(int event_id, u64 addr, u64 count, - void *record, int entry_size); - -#ifndef perf_misc_flags -#define perf_misc_flags(regs) (user_mode(regs) ? PERF_EVENT_MISC_USER : \ - PERF_EVENT_MISC_KERNEL) -#define perf_instruction_pointer(regs) instruction_pointer(regs) -#endif - -extern int perf_output_begin(struct perf_output_handle *handle, - struct perf_counter *counter, unsigned int size, - int nmi, int sample); -extern void perf_output_end(struct perf_output_handle *handle); -extern void perf_output_copy(struct perf_output_handle *handle, - const void *buf, unsigned int len); -#else -static inline void -perf_counter_task_sched_in(struct task_struct *task, int cpu) { } -static inline void -perf_counter_task_sched_out(struct task_struct *task, - struct task_struct *next, int cpu) { } -static inline void -perf_counter_task_tick(struct task_struct *task, int cpu) { } -static inline int perf_counter_init_task(struct task_struct *child) { return 0; } -static inline void perf_counter_exit_task(struct task_struct *child) { } -static inline void perf_counter_free_task(struct task_struct *task) { } -static inline void perf_counter_do_pending(void) { } -static inline void perf_counter_print_debug(void) { } -static inline void perf_disable(void) { } -static inline void perf_enable(void) { } -static inline int perf_counter_task_disable(void) { return -EINVAL; } -static inline int perf_counter_task_enable(void) { return -EINVAL; } - -static inline void -perf_swcounter_event(u32 event, u64 nr, int nmi, - struct pt_regs *regs, u64 addr) { } - -static inline void perf_counter_mmap(struct vm_area_struct *vma) { } -static inline void perf_counter_comm(struct task_struct *tsk) { } -static inline void perf_counter_fork(struct task_struct *tsk) { } -static inline void perf_counter_init(void) { } - -#endif - -#define perf_output_put(handle, x) \ - perf_output_copy((handle), &(x), sizeof(x)) - -#endif /* __KERNEL__ */ -#endif /* _LINUX_PERF_COUNTER_H */ diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h new file mode 100644 index 00000000000..ae9d9ed6df2 --- /dev/null +++ b/include/linux/perf_event.h @@ -0,0 +1,858 @@ +/* + * Performance events: + * + * Copyright (C) 2008-2009, Thomas Gleixner + * Copyright (C) 2008-2009, Red Hat, Inc., Ingo Molnar + * Copyright (C) 2008-2009, Red Hat, Inc., Peter Zijlstra + * + * Data type definitions, declarations, prototypes. + * + * Started by: Thomas Gleixner and Ingo Molnar + * + * For licencing details see kernel-base/COPYING + */ +#ifndef _LINUX_PERF_EVENT_H +#define _LINUX_PERF_EVENT_H + +#include +#include +#include + +/* + * User-space ABI bits: + */ + +/* + * attr.type + */ +enum perf_type_id { + PERF_TYPE_HARDWARE = 0, + PERF_TYPE_SOFTWARE = 1, + PERF_TYPE_TRACEPOINT = 2, + PERF_TYPE_HW_CACHE = 3, + PERF_TYPE_RAW = 4, + + PERF_TYPE_MAX, /* non-ABI */ +}; + +/* + * Generalized performance event event_id types, used by the + * attr.event_id parameter of the sys_perf_event_open() + * syscall: + */ +enum perf_hw_id { + /* + * Common hardware events, generalized by the kernel: + */ + PERF_COUNT_HW_CPU_CYCLES = 0, + PERF_COUNT_HW_INSTRUCTIONS = 1, + PERF_COUNT_HW_CACHE_REFERENCES = 2, + PERF_COUNT_HW_CACHE_MISSES = 3, + PERF_COUNT_HW_BRANCH_INSTRUCTIONS = 4, + PERF_COUNT_HW_BRANCH_MISSES = 5, + PERF_COUNT_HW_BUS_CYCLES = 6, + + PERF_COUNT_HW_MAX, /* non-ABI */ +}; + +/* + * Generalized hardware cache events: + * + * { L1-D, L1-I, LLC, ITLB, DTLB, BPU } x + * { read, write, prefetch } x + * { accesses, misses } + */ +enum perf_hw_cache_id { + PERF_COUNT_HW_CACHE_L1D = 0, + PERF_COUNT_HW_CACHE_L1I = 1, + PERF_COUNT_HW_CACHE_LL = 2, + PERF_COUNT_HW_CACHE_DTLB = 3, + PERF_COUNT_HW_CACHE_ITLB = 4, + PERF_COUNT_HW_CACHE_BPU = 5, + + PERF_COUNT_HW_CACHE_MAX, /* non-ABI */ +}; + +enum perf_hw_cache_op_id { + PERF_COUNT_HW_CACHE_OP_READ = 0, + PERF_COUNT_HW_CACHE_OP_WRITE = 1, + PERF_COUNT_HW_CACHE_OP_PREFETCH = 2, + + PERF_COUNT_HW_CACHE_OP_MAX, /* non-ABI */ +}; + +enum perf_hw_cache_op_result_id { + PERF_COUNT_HW_CACHE_RESULT_ACCESS = 0, + PERF_COUNT_HW_CACHE_RESULT_MISS = 1, + + PERF_COUNT_HW_CACHE_RESULT_MAX, /* non-ABI */ +}; + +/* + * Special "software" events provided by the kernel, even if the hardware + * does not support performance events. These events measure various + * physical and sw events of the kernel (and allow the profiling of them as + * well): + */ +enum perf_sw_ids { + PERF_COUNT_SW_CPU_CLOCK = 0, + PERF_COUNT_SW_TASK_CLOCK = 1, + PERF_COUNT_SW_PAGE_FAULTS = 2, + PERF_COUNT_SW_CONTEXT_SWITCHES = 3, + PERF_COUNT_SW_CPU_MIGRATIONS = 4, + PERF_COUNT_SW_PAGE_FAULTS_MIN = 5, + PERF_COUNT_SW_PAGE_FAULTS_MAJ = 6, + + PERF_COUNT_SW_MAX, /* non-ABI */ +}; + +/* + * Bits that can be set in attr.sample_type to request information + * in the overflow packets. + */ +enum perf_event_sample_format { + PERF_SAMPLE_IP = 1U << 0, + PERF_SAMPLE_TID = 1U << 1, + PERF_SAMPLE_TIME = 1U << 2, + PERF_SAMPLE_ADDR = 1U << 3, + PERF_SAMPLE_READ = 1U << 4, + PERF_SAMPLE_CALLCHAIN = 1U << 5, + PERF_SAMPLE_ID = 1U << 6, + PERF_SAMPLE_CPU = 1U << 7, + PERF_SAMPLE_PERIOD = 1U << 8, + PERF_SAMPLE_STREAM_ID = 1U << 9, + PERF_SAMPLE_RAW = 1U << 10, + + PERF_SAMPLE_MAX = 1U << 11, /* non-ABI */ +}; + +/* + * The format of the data returned by read() on a perf event fd, + * as specified by attr.read_format: + * + * struct read_format { + * { u64 value; + * { u64 time_enabled; } && PERF_FORMAT_ENABLED + * { u64 time_running; } && PERF_FORMAT_RUNNING + * { u64 id; } && PERF_FORMAT_ID + * } && !PERF_FORMAT_GROUP + * + * { u64 nr; + * { u64 time_enabled; } && PERF_FORMAT_ENABLED + * { u64 time_running; } && PERF_FORMAT_RUNNING + * { u64 value; + * { u64 id; } && PERF_FORMAT_ID + * } cntr[nr]; + * } && PERF_FORMAT_GROUP + * }; + */ +enum perf_event_read_format { + PERF_FORMAT_TOTAL_TIME_ENABLED = 1U << 0, + PERF_FORMAT_TOTAL_TIME_RUNNING = 1U << 1, + PERF_FORMAT_ID = 1U << 2, + PERF_FORMAT_GROUP = 1U << 3, + + PERF_FORMAT_MAX = 1U << 4, /* non-ABI */ +}; + +#define PERF_ATTR_SIZE_VER0 64 /* sizeof first published struct */ + +/* + * Hardware event_id to monitor via a performance monitoring event: + */ +struct perf_event_attr { + + /* + * Major type: hardware/software/tracepoint/etc. + */ + __u32 type; + + /* + * Size of the attr structure, for fwd/bwd compat. + */ + __u32 size; + + /* + * Type specific configuration information. + */ + __u64 config; + + union { + __u64 sample_period; + __u64 sample_freq; + }; + + __u64 sample_type; + __u64 read_format; + + __u64 disabled : 1, /* off by default */ + inherit : 1, /* children inherit it */ + pinned : 1, /* must always be on PMU */ + exclusive : 1, /* only group on PMU */ + exclude_user : 1, /* don't count user */ + exclude_kernel : 1, /* ditto kernel */ + exclude_hv : 1, /* ditto hypervisor */ + exclude_idle : 1, /* don't count when idle */ + mmap : 1, /* include mmap data */ + comm : 1, /* include comm data */ + freq : 1, /* use freq, not period */ + inherit_stat : 1, /* per task counts */ + enable_on_exec : 1, /* next exec enables */ + task : 1, /* trace fork/exit */ + watermark : 1, /* wakeup_watermark */ + + __reserved_1 : 49; + + union { + __u32 wakeup_events; /* wakeup every n events */ + __u32 wakeup_watermark; /* bytes before wakeup */ + }; + __u32 __reserved_2; + + __u64 __reserved_3; +}; + +/* + * Ioctls that can be done on a perf event fd: + */ +#define PERF_EVENT_IOC_ENABLE _IO ('$', 0) +#define PERF_EVENT_IOC_DISABLE _IO ('$', 1) +#define PERF_EVENT_IOC_REFRESH _IO ('$', 2) +#define PERF_EVENT_IOC_RESET _IO ('$', 3) +#define PERF_EVENT_IOC_PERIOD _IOW('$', 4, u64) +#define PERF_EVENT_IOC_SET_OUTPUT _IO ('$', 5) + +enum perf_event_ioc_flags { + PERF_IOC_FLAG_GROUP = 1U << 0, +}; + +/* + * Structure of the page that can be mapped via mmap + */ +struct perf_event_mmap_page { + __u32 version; /* version number of this structure */ + __u32 compat_version; /* lowest version this is compat with */ + + /* + * Bits needed to read the hw events in user-space. + * + * u32 seq; + * s64 count; + * + * do { + * seq = pc->lock; + * + * barrier() + * if (pc->index) { + * count = pmc_read(pc->index - 1); + * count += pc->offset; + * } else + * goto regular_read; + * + * barrier(); + * } while (pc->lock != seq); + * + * NOTE: for obvious reason this only works on self-monitoring + * processes. + */ + __u32 lock; /* seqlock for synchronization */ + __u32 index; /* hardware event identifier */ + __s64 offset; /* add to hardware event value */ + __u64 time_enabled; /* time event active */ + __u64 time_running; /* time event on cpu */ + + /* + * Hole for extension of the self monitor capabilities + */ + + __u64 __reserved[123]; /* align to 1k */ + + /* + * Control data for the mmap() data buffer. + * + * User-space reading the @data_head value should issue an rmb(), on + * SMP capable platforms, after reading this value -- see + * perf_event_wakeup(). + * + * When the mapping is PROT_WRITE the @data_tail value should be + * written by userspace to reflect the last read data. In this case + * the kernel will not over-write unread data. + */ + __u64 data_head; /* head in the data section */ + __u64 data_tail; /* user-space written tail */ +}; + +#define PERF_RECORD_MISC_CPUMODE_MASK (3 << 0) +#define PERF_RECORD_MISC_CPUMODE_UNKNOWN (0 << 0) +#define PERF_RECORD_MISC_KERNEL (1 << 0) +#define PERF_RECORD_MISC_USER (2 << 0) +#define PERF_RECORD_MISC_HYPERVISOR (3 << 0) + +struct perf_event_header { + __u32 type; + __u16 misc; + __u16 size; +}; + +enum perf_event_type { + + /* + * The MMAP events record the PROT_EXEC mappings so that we can + * correlate userspace IPs to code. They have the following structure: + * + * struct { + * struct perf_event_header header; + * + * u32 pid, tid; + * u64 addr; + * u64 len; + * u64 pgoff; + * char filename[]; + * }; + */ + PERF_RECORD_MMAP = 1, + + /* + * struct { + * struct perf_event_header header; + * u64 id; + * u64 lost; + * }; + */ + PERF_RECORD_LOST = 2, + + /* + * struct { + * struct perf_event_header header; + * + * u32 pid, tid; + * char comm[]; + * }; + */ + PERF_RECORD_COMM = 3, + + /* + * struct { + * struct perf_event_header header; + * u32 pid, ppid; + * u32 tid, ptid; + * u64 time; + * }; + */ + PERF_RECORD_EXIT = 4, + + /* + * struct { + * struct perf_event_header header; + * u64 time; + * u64 id; + * u64 stream_id; + * }; + */ + PERF_RECORD_THROTTLE = 5, + PERF_RECORD_UNTHROTTLE = 6, + + /* + * struct { + * struct perf_event_header header; + * u32 pid, ppid; + * u32 tid, ptid; + * { u64 time; } && PERF_SAMPLE_TIME + * }; + */ + PERF_RECORD_FORK = 7, + + /* + * struct { + * struct perf_event_header header; + * u32 pid, tid; + * + * struct read_format values; + * }; + */ + PERF_RECORD_READ = 8, + + /* + * struct { + * struct perf_event_header header; + * + * { u64 ip; } && PERF_SAMPLE_IP + * { u32 pid, tid; } && PERF_SAMPLE_TID + * { u64 time; } && PERF_SAMPLE_TIME + * { u64 addr; } && PERF_SAMPLE_ADDR + * { u64 id; } && PERF_SAMPLE_ID + * { u64 stream_id;} && PERF_SAMPLE_STREAM_ID + * { u32 cpu, res; } && PERF_SAMPLE_CPU + * { u64 period; } && PERF_SAMPLE_PERIOD + * + * { struct read_format values; } && PERF_SAMPLE_READ + * + * { u64 nr, + * u64 ips[nr]; } && PERF_SAMPLE_CALLCHAIN + * + * # + * # The RAW record below is opaque data wrt the ABI + * # + * # That is, the ABI doesn't make any promises wrt to + * # the stability of its content, it may vary depending + * # on event_id, hardware, kernel version and phase of + * # the moon. + * # + * # In other words, PERF_SAMPLE_RAW contents are not an ABI. + * # + * + * { u32 size; + * char data[size];}&& PERF_SAMPLE_RAW + * }; + */ + PERF_RECORD_SAMPLE = 9, + + PERF_RECORD_MAX, /* non-ABI */ +}; + +enum perf_callchain_context { + PERF_CONTEXT_HV = (__u64)-32, + PERF_CONTEXT_KERNEL = (__u64)-128, + PERF_CONTEXT_USER = (__u64)-512, + + PERF_CONTEXT_GUEST = (__u64)-2048, + PERF_CONTEXT_GUEST_KERNEL = (__u64)-2176, + PERF_CONTEXT_GUEST_USER = (__u64)-2560, + + PERF_CONTEXT_MAX = (__u64)-4095, +}; + +#define PERF_FLAG_FD_NO_GROUP (1U << 0) +#define PERF_FLAG_FD_OUTPUT (1U << 1) + +#ifdef __KERNEL__ +/* + * Kernel-internal data types and definitions: + */ + +#ifdef CONFIG_PERF_EVENTS +# include +#endif + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define PERF_MAX_STACK_DEPTH 255 + +struct perf_callchain_entry { + __u64 nr; + __u64 ip[PERF_MAX_STACK_DEPTH]; +}; + +struct perf_raw_record { + u32 size; + void *data; +}; + +struct task_struct; + +/** + * struct hw_perf_event - performance event hardware details: + */ +struct hw_perf_event { +#ifdef CONFIG_PERF_EVENTS + union { + struct { /* hardware */ + u64 config; + unsigned long config_base; + unsigned long event_base; + int idx; + }; + union { /* software */ + atomic64_t count; + struct hrtimer hrtimer; + }; + }; + atomic64_t prev_count; + u64 sample_period; + u64 last_period; + atomic64_t period_left; + u64 interrupts; + + u64 freq_count; + u64 freq_interrupts; + u64 freq_stamp; +#endif +}; + +struct perf_event; + +/** + * struct pmu - generic performance monitoring unit + */ +struct pmu { + int (*enable) (struct perf_event *event); + void (*disable) (struct perf_event *event); + void (*read) (struct perf_event *event); + void (*unthrottle) (struct perf_event *event); +}; + +/** + * enum perf_event_active_state - the states of a event + */ +enum perf_event_active_state { + PERF_EVENT_STATE_ERROR = -2, + PERF_EVENT_STATE_OFF = -1, + PERF_EVENT_STATE_INACTIVE = 0, + PERF_EVENT_STATE_ACTIVE = 1, +}; + +struct file; + +struct perf_mmap_data { + struct rcu_head rcu_head; + int nr_pages; /* nr of data pages */ + int writable; /* are we writable */ + int nr_locked; /* nr pages mlocked */ + + atomic_t poll; /* POLL_ for wakeups */ + atomic_t events; /* event_id limit */ + + atomic_long_t head; /* write position */ + atomic_long_t done_head; /* completed head */ + + atomic_t lock; /* concurrent writes */ + atomic_t wakeup; /* needs a wakeup */ + atomic_t lost; /* nr records lost */ + + long watermark; /* wakeup watermark */ + + struct perf_event_mmap_page *user_page; + void *data_pages[0]; +}; + +struct perf_pending_entry { + struct perf_pending_entry *next; + void (*func)(struct perf_pending_entry *); +}; + +/** + * struct perf_event - performance event kernel representation: + */ +struct perf_event { +#ifdef CONFIG_PERF_EVENTS + struct list_head group_entry; + struct list_head event_entry; + struct list_head sibling_list; + int nr_siblings; + struct perf_event *group_leader; + struct perf_event *output; + const struct pmu *pmu; + + enum perf_event_active_state state; + atomic64_t count; + + /* + * These are the total time in nanoseconds that the event + * has been enabled (i.e. eligible to run, and the task has + * been scheduled in, if this is a per-task event) + * and running (scheduled onto the CPU), respectively. + * + * They are computed from tstamp_enabled, tstamp_running and + * tstamp_stopped when the event is in INACTIVE or ACTIVE state. + */ + u64 total_time_enabled; + u64 total_time_running; + + /* + * These are timestamps used for computing total_time_enabled + * and total_time_running when the event is in INACTIVE or + * ACTIVE state, measured in nanoseconds from an arbitrary point + * in time. + * tstamp_enabled: the notional time when the event was enabled + * tstamp_running: the notional time when the event was scheduled on + * tstamp_stopped: in INACTIVE state, the notional time when the + * event was scheduled off. + */ + u64 tstamp_enabled; + u64 tstamp_running; + u64 tstamp_stopped; + + struct perf_event_attr attr; + struct hw_perf_event hw; + + struct perf_event_context *ctx; + struct file *filp; + + /* + * These accumulate total time (in nanoseconds) that children + * events have been enabled and running, respectively. + */ + atomic64_t child_total_time_enabled; + atomic64_t child_total_time_running; + + /* + * Protect attach/detach and child_list: + */ + struct mutex child_mutex; + struct list_head child_list; + struct perf_event *parent; + + int oncpu; + int cpu; + + struct list_head owner_entry; + struct task_struct *owner; + + /* mmap bits */ + struct mutex mmap_mutex; + atomic_t mmap_count; + struct perf_mmap_data *data; + + /* poll related */ + wait_queue_head_t waitq; + struct fasync_struct *fasync; + + /* delayed work for NMIs and such */ + int pending_wakeup; + int pending_kill; + int pending_disable; + struct perf_pending_entry pending; + + atomic_t event_limit; + + void (*destroy)(struct perf_event *); + struct rcu_head rcu_head; + + struct pid_namespace *ns; + u64 id; +#endif +}; + +/** + * struct perf_event_context - event context structure + * + * Used as a container for task events and CPU events as well: + */ +struct perf_event_context { + /* + * Protect the states of the events in the list, + * nr_active, and the list: + */ + spinlock_t lock; + /* + * Protect the list of events. Locking either mutex or lock + * is sufficient to ensure the list doesn't change; to change + * the list you need to lock both the mutex and the spinlock. + */ + struct mutex mutex; + + struct list_head group_list; + struct list_head event_list; + int nr_events; + int nr_active; + int is_active; + int nr_stat; + atomic_t refcount; + struct task_struct *task; + + /* + * Context clock, runs when context enabled. + */ + u64 time; + u64 timestamp; + + /* + * These fields let us detect when two contexts have both + * been cloned (inherited) from a common ancestor. + */ + struct perf_event_context *parent_ctx; + u64 parent_gen; + u64 generation; + int pin_count; + struct rcu_head rcu_head; +}; + +/** + * struct perf_event_cpu_context - per cpu event context structure + */ +struct perf_cpu_context { + struct perf_event_context ctx; + struct perf_event_context *task_ctx; + int active_oncpu; + int max_pertask; + int exclusive; + + /* + * Recursion avoidance: + * + * task, softirq, irq, nmi context + */ + int recursion[4]; +}; + +struct perf_output_handle { + struct perf_event *event; + struct perf_mmap_data *data; + unsigned long head; + unsigned long offset; + int nmi; + int sample; + int locked; + unsigned long flags; +}; + +#ifdef CONFIG_PERF_EVENTS + +/* + * Set by architecture code: + */ +extern int perf_max_events; + +extern const struct pmu *hw_perf_event_init(struct perf_event *event); + +extern void perf_event_task_sched_in(struct task_struct *task, int cpu); +extern void perf_event_task_sched_out(struct task_struct *task, + struct task_struct *next, int cpu); +extern void perf_event_task_tick(struct task_struct *task, int cpu); +extern int perf_event_init_task(struct task_struct *child); +extern void perf_event_exit_task(struct task_struct *child); +extern void perf_event_free_task(struct task_struct *task); +extern void set_perf_event_pending(void); +extern void perf_event_do_pending(void); +extern void perf_event_print_debug(void); +extern void __perf_disable(void); +extern bool __perf_enable(void); +extern void perf_disable(void); +extern void perf_enable(void); +extern int perf_event_task_disable(void); +extern int perf_event_task_enable(void); +extern int hw_perf_group_sched_in(struct perf_event *group_leader, + struct perf_cpu_context *cpuctx, + struct perf_event_context *ctx, int cpu); +extern void perf_event_update_userpage(struct perf_event *event); + +struct perf_sample_data { + u64 type; + + u64 ip; + struct { + u32 pid; + u32 tid; + } tid_entry; + u64 time; + u64 addr; + u64 id; + u64 stream_id; + struct { + u32 cpu; + u32 reserved; + } cpu_entry; + u64 period; + struct perf_callchain_entry *callchain; + struct perf_raw_record *raw; +}; + +extern void perf_output_sample(struct perf_output_handle *handle, + struct perf_event_header *header, + struct perf_sample_data *data, + struct perf_event *event); +extern void perf_prepare_sample(struct perf_event_header *header, + struct perf_sample_data *data, + struct perf_event *event, + struct pt_regs *regs); + +extern int perf_event_overflow(struct perf_event *event, int nmi, + struct perf_sample_data *data, + struct pt_regs *regs); + +/* + * Return 1 for a software event, 0 for a hardware event + */ +static inline int is_software_event(struct perf_event *event) +{ + return (event->attr.type != PERF_TYPE_RAW) && + (event->attr.type != PERF_TYPE_HARDWARE) && + (event->attr.type != PERF_TYPE_HW_CACHE); +} + +extern atomic_t perf_swevent_enabled[PERF_COUNT_SW_MAX]; + +extern void __perf_sw_event(u32, u64, int, struct pt_regs *, u64); + +static inline void +perf_sw_event(u32 event_id, u64 nr, int nmi, struct pt_regs *regs, u64 addr) +{ + if (atomic_read(&perf_swevent_enabled[event_id])) + __perf_sw_event(event_id, nr, nmi, regs, addr); +} + +extern void __perf_event_mmap(struct vm_area_struct *vma); + +static inline void perf_event_mmap(struct vm_area_struct *vma) +{ + if (vma->vm_flags & VM_EXEC) + __perf_event_mmap(vma); +} + +extern void perf_event_comm(struct task_struct *tsk); +extern void perf_event_fork(struct task_struct *tsk); + +extern struct perf_callchain_entry *perf_callchain(struct pt_regs *regs); + +extern int sysctl_perf_event_paranoid; +extern int sysctl_perf_event_mlock; +extern int sysctl_perf_event_sample_rate; + +extern void perf_event_init(void); +extern void perf_tp_event(int event_id, u64 addr, u64 count, + void *record, int entry_size); + +#ifndef perf_misc_flags +#define perf_misc_flags(regs) (user_mode(regs) ? PERF_RECORD_MISC_USER : \ + PERF_RECORD_MISC_KERNEL) +#define perf_instruction_pointer(regs) instruction_pointer(regs) +#endif + +extern int perf_output_begin(struct perf_output_handle *handle, + struct perf_event *event, unsigned int size, + int nmi, int sample); +extern void perf_output_end(struct perf_output_handle *handle); +extern void perf_output_copy(struct perf_output_handle *handle, + const void *buf, unsigned int len); +#else +static inline void +perf_event_task_sched_in(struct task_struct *task, int cpu) { } +static inline void +perf_event_task_sched_out(struct task_struct *task, + struct task_struct *next, int cpu) { } +static inline void +perf_event_task_tick(struct task_struct *task, int cpu) { } +static inline int perf_event_init_task(struct task_struct *child) { return 0; } +static inline void perf_event_exit_task(struct task_struct *child) { } +static inline void perf_event_free_task(struct task_struct *task) { } +static inline void perf_event_do_pending(void) { } +static inline void perf_event_print_debug(void) { } +static inline void perf_disable(void) { } +static inline void perf_enable(void) { } +static inline int perf_event_task_disable(void) { return -EINVAL; } +static inline int perf_event_task_enable(void) { return -EINVAL; } + +static inline void +perf_sw_event(u32 event_id, u64 nr, int nmi, + struct pt_regs *regs, u64 addr) { } + +static inline void perf_event_mmap(struct vm_area_struct *vma) { } +static inline void perf_event_comm(struct task_struct *tsk) { } +static inline void perf_event_fork(struct task_struct *tsk) { } +static inline void perf_event_init(void) { } + +#endif + +#define perf_output_put(handle, x) \ + perf_output_copy((handle), &(x), sizeof(x)) + +#endif /* __KERNEL__ */ +#endif /* _LINUX_PERF_EVENT_H */ diff --git a/include/linux/prctl.h b/include/linux/prctl.h index b00df4c79c6..07bff666e65 100644 --- a/include/linux/prctl.h +++ b/include/linux/prctl.h @@ -85,7 +85,7 @@ #define PR_SET_TIMERSLACK 29 #define PR_GET_TIMERSLACK 30 -#define PR_TASK_PERF_COUNTERS_DISABLE 31 -#define PR_TASK_PERF_COUNTERS_ENABLE 32 +#define PR_TASK_PERF_EVENTS_DISABLE 31 +#define PR_TASK_PERF_EVENTS_ENABLE 32 #endif /* _LINUX_PRCTL_H */ diff --git a/include/linux/sched.h b/include/linux/sched.h index 8af3d249170..8b265a8986d 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -100,7 +100,7 @@ struct robust_list_head; struct bio; struct fs_struct; struct bts_context; -struct perf_counter_context; +struct perf_event_context; /* * List of flags we want to share for kernel threads, @@ -701,7 +701,7 @@ struct user_struct { #endif #endif -#ifdef CONFIG_PERF_COUNTERS +#ifdef CONFIG_PERF_EVENTS atomic_long_t locked_vm; #endif }; @@ -1449,10 +1449,10 @@ struct task_struct { struct list_head pi_state_list; struct futex_pi_state *pi_state_cache; #endif -#ifdef CONFIG_PERF_COUNTERS - struct perf_counter_context *perf_counter_ctxp; - struct mutex perf_counter_mutex; - struct list_head perf_counter_list; +#ifdef CONFIG_PERF_EVENTS + struct perf_event_context *perf_event_ctxp; + struct mutex perf_event_mutex; + struct list_head perf_event_list; #endif #ifdef CONFIG_NUMA struct mempolicy *mempolicy; /* Protected by alloc_lock */ diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h index a8e37821cc6..02f19f9a76c 100644 --- a/include/linux/syscalls.h +++ b/include/linux/syscalls.h @@ -55,7 +55,7 @@ struct compat_timeval; struct robust_list_head; struct getcpu_cache; struct old_linux_dirent; -struct perf_counter_attr; +struct perf_event_attr; #include #include @@ -885,7 +885,7 @@ asmlinkage long sys_ppoll(struct pollfd __user *, unsigned int, int kernel_execve(const char *filename, char *const argv[], char *const envp[]); -asmlinkage long sys_perf_counter_open( - struct perf_counter_attr __user *attr_uptr, +asmlinkage long sys_perf_event_open( + struct perf_event_attr __user *attr_uptr, pid_t pid, int cpu, int group_fd, unsigned long flags); #endif diff --git a/include/trace/ftrace.h b/include/trace/ftrace.h index 72a3b437b82..ec91e78244f 100644 --- a/include/trace/ftrace.h +++ b/include/trace/ftrace.h @@ -378,7 +378,7 @@ static inline int ftrace_get_offsets_##call( \ #ifdef CONFIG_EVENT_PROFILE /* - * Generate the functions needed for tracepoint perf_counter support. + * Generate the functions needed for tracepoint perf_event support. * * NOTE: The insertion profile callback (ftrace_profile_) is defined later * @@ -656,7 +656,7 @@ __attribute__((section("_ftrace_events"))) event_##call = { \ * { * struct ftrace_data_offsets_ __maybe_unused __data_offsets; * struct ftrace_event_call *event_call = &event_; - * extern void perf_tpcounter_event(int, u64, u64, void *, int); + * extern void perf_tp_event(int, u64, u64, void *, int); * struct ftrace_raw_##call *entry; * u64 __addr = 0, __count = 1; * unsigned long irq_flags; @@ -691,7 +691,7 @@ __attribute__((section("_ftrace_events"))) event_##call = { \ * * <- affect our values * - * perf_tpcounter_event(event_call->id, __addr, __count, entry, + * perf_tp_event(event_call->id, __addr, __count, entry, * __entry_size); <- submit them to perf counter * } while (0); * @@ -712,7 +712,7 @@ static void ftrace_profile_##call(proto) \ { \ struct ftrace_data_offsets_##call __maybe_unused __data_offsets;\ struct ftrace_event_call *event_call = &event_##call; \ - extern void perf_tpcounter_event(int, u64, u64, void *, int); \ + extern void perf_tp_event(int, u64, u64, void *, int); \ struct ftrace_raw_##call *entry; \ u64 __addr = 0, __count = 1; \ unsigned long irq_flags; \ @@ -742,7 +742,7 @@ static void ftrace_profile_##call(proto) \ \ { assign; } \ \ - perf_tpcounter_event(event_call->id, __addr, __count, entry,\ + perf_tp_event(event_call->id, __addr, __count, entry,\ __entry_size); \ } while (0); \ \ diff --git a/init/Kconfig b/init/Kconfig index 8e8b76d8a27..cfdf5c32280 100644 --- a/init/Kconfig +++ b/init/Kconfig @@ -915,17 +915,17 @@ config AIO by some high performance threaded applications. Disabling this option saves about 7k. -config HAVE_PERF_COUNTERS +config HAVE_PERF_EVENTS bool help See tools/perf/design.txt for details. menu "Performance Counters" -config PERF_COUNTERS +config PERF_EVENTS bool "Kernel Performance Counters" default y if PROFILING - depends on HAVE_PERF_COUNTERS + depends on HAVE_PERF_EVENTS select ANON_INODES help Enable kernel support for performance counter hardware. @@ -947,7 +947,7 @@ config PERF_COUNTERS config EVENT_PROFILE bool "Tracepoint profiling sources" - depends on PERF_COUNTERS && EVENT_TRACING + depends on PERF_EVENTS && EVENT_TRACING default y help Allow the use of tracepoints as software performance counters. diff --git a/kernel/Makefile b/kernel/Makefile index 3d9c7e27e3f..e26a546eac4 100644 --- a/kernel/Makefile +++ b/kernel/Makefile @@ -96,7 +96,7 @@ obj-$(CONFIG_X86_DS) += trace/ obj-$(CONFIG_RING_BUFFER) += trace/ obj-$(CONFIG_SMP) += sched_cpupri.o obj-$(CONFIG_SLOW_WORK) += slow-work.o -obj-$(CONFIG_PERF_COUNTERS) += perf_counter.o +obj-$(CONFIG_PERF_EVENTS) += perf_event.o ifneq ($(CONFIG_SCHED_OMIT_FRAME_POINTER),y) # According to Alan Modra , the -fno-omit-frame-pointer is diff --git a/kernel/exit.c b/kernel/exit.c index ae5d8660ddf..e47ee8a0613 100644 --- a/kernel/exit.c +++ b/kernel/exit.c @@ -47,7 +47,7 @@ #include #include #include -#include +#include #include #include @@ -154,8 +154,8 @@ static void delayed_put_task_struct(struct rcu_head *rhp) { struct task_struct *tsk = container_of(rhp, struct task_struct, rcu); -#ifdef CONFIG_PERF_COUNTERS - WARN_ON_ONCE(tsk->perf_counter_ctxp); +#ifdef CONFIG_PERF_EVENTS + WARN_ON_ONCE(tsk->perf_event_ctxp); #endif trace_sched_process_free(tsk); put_task_struct(tsk); @@ -981,7 +981,7 @@ NORET_TYPE void do_exit(long code) * Flush inherited counters to the parent - before the parent * gets woken up by child-exit notifications. */ - perf_counter_exit_task(tsk); + perf_event_exit_task(tsk); exit_notify(tsk, group_dead); #ifdef CONFIG_NUMA diff --git a/kernel/fork.c b/kernel/fork.c index bfee931ee3f..2cebfb23b0b 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -61,7 +61,7 @@ #include #include #include -#include +#include #include #include @@ -1078,7 +1078,7 @@ static struct task_struct *copy_process(unsigned long clone_flags, /* Perform scheduler related setup. Assign this task to a CPU. */ sched_fork(p, clone_flags); - retval = perf_counter_init_task(p); + retval = perf_event_init_task(p); if (retval) goto bad_fork_cleanup_policy; @@ -1253,7 +1253,7 @@ static struct task_struct *copy_process(unsigned long clone_flags, write_unlock_irq(&tasklist_lock); proc_fork_connector(p); cgroup_post_fork(p); - perf_counter_fork(p); + perf_event_fork(p); return p; bad_fork_free_pid: @@ -1280,7 +1280,7 @@ bad_fork_cleanup_semundo: bad_fork_cleanup_audit: audit_free(p); bad_fork_cleanup_policy: - perf_counter_free_task(p); + perf_event_free_task(p); #ifdef CONFIG_NUMA mpol_put(p->mempolicy); bad_fork_cleanup_cgroup: diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c deleted file mode 100644 index 62de0db8092..00000000000 --- a/kernel/perf_counter.c +++ /dev/null @@ -1,5000 +0,0 @@ -/* - * Performance counter core code - * - * Copyright (C) 2008 Thomas Gleixner - * Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar - * Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra - * Copyright © 2009 Paul Mackerras, IBM Corp. - * - * For licensing details see kernel-base/COPYING - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include - -/* - * Each CPU has a list of per CPU counters: - */ -DEFINE_PER_CPU(struct perf_cpu_context, perf_cpu_context); - -int perf_max_counters __read_mostly = 1; -static int perf_reserved_percpu __read_mostly; -static int perf_overcommit __read_mostly = 1; - -static atomic_t nr_counters __read_mostly; -static atomic_t nr_mmap_counters __read_mostly; -static atomic_t nr_comm_counters __read_mostly; -static atomic_t nr_task_counters __read_mostly; - -/* - * perf counter paranoia level: - * -1 - not paranoid at all - * 0 - disallow raw tracepoint access for unpriv - * 1 - disallow cpu counters for unpriv - * 2 - disallow kernel profiling for unpriv - */ -int sysctl_perf_counter_paranoid __read_mostly = 1; - -static inline bool perf_paranoid_tracepoint_raw(void) -{ - return sysctl_perf_counter_paranoid > -1; -} - -static inline bool perf_paranoid_cpu(void) -{ - return sysctl_perf_counter_paranoid > 0; -} - -static inline bool perf_paranoid_kernel(void) -{ - return sysctl_perf_counter_paranoid > 1; -} - -int sysctl_perf_counter_mlock __read_mostly = 512; /* 'free' kb per user */ - -/* - * max perf counter sample rate - */ -int sysctl_perf_counter_sample_rate __read_mostly = 100000; - -static atomic64_t perf_counter_id; - -/* - * Lock for (sysadmin-configurable) counter reservations: - */ -static DEFINE_SPINLOCK(perf_resource_lock); - -/* - * Architecture provided APIs - weak aliases: - */ -extern __weak const struct pmu *hw_perf_counter_init(struct perf_counter *counter) -{ - return NULL; -} - -void __weak hw_perf_disable(void) { barrier(); } -void __weak hw_perf_enable(void) { barrier(); } - -void __weak hw_perf_counter_setup(int cpu) { barrier(); } -void __weak hw_perf_counter_setup_online(int cpu) { barrier(); } - -int __weak -hw_perf_group_sched_in(struct perf_counter *group_leader, - struct perf_cpu_context *cpuctx, - struct perf_counter_context *ctx, int cpu) -{ - return 0; -} - -void __weak perf_counter_print_debug(void) { } - -static DEFINE_PER_CPU(int, perf_disable_count); - -void __perf_disable(void) -{ - __get_cpu_var(perf_disable_count)++; -} - -bool __perf_enable(void) -{ - return !--__get_cpu_var(perf_disable_count); -} - -void perf_disable(void) -{ - __perf_disable(); - hw_perf_disable(); -} - -void perf_enable(void) -{ - if (__perf_enable()) - hw_perf_enable(); -} - -static void get_ctx(struct perf_counter_context *ctx) -{ - WARN_ON(!atomic_inc_not_zero(&ctx->refcount)); -} - -static void free_ctx(struct rcu_head *head) -{ - struct perf_counter_context *ctx; - - ctx = container_of(head, struct perf_counter_context, rcu_head); - kfree(ctx); -} - -static void put_ctx(struct perf_counter_context *ctx) -{ - if (atomic_dec_and_test(&ctx->refcount)) { - if (ctx->parent_ctx) - put_ctx(ctx->parent_ctx); - if (ctx->task) - put_task_struct(ctx->task); - call_rcu(&ctx->rcu_head, free_ctx); - } -} - -static void unclone_ctx(struct perf_counter_context *ctx) -{ - if (ctx->parent_ctx) { - put_ctx(ctx->parent_ctx); - ctx->parent_ctx = NULL; - } -} - -/* - * If we inherit counters we want to return the parent counter id - * to userspace. - */ -static u64 primary_counter_id(struct perf_counter *counter) -{ - u64 id = counter->id; - - if (counter->parent) - id = counter->parent->id; - - return id; -} - -/* - * Get the perf_counter_context for a task and lock it. - * This has to cope with with the fact that until it is locked, - * the context could get moved to another task. - */ -static struct perf_counter_context * -perf_lock_task_context(struct task_struct *task, unsigned long *flags) -{ - struct perf_counter_context *ctx; - - rcu_read_lock(); - retry: - ctx = rcu_dereference(task->perf_counter_ctxp); - if (ctx) { - /* - * If this context is a clone of another, it might - * get swapped for another underneath us by - * perf_counter_task_sched_out, though the - * rcu_read_lock() protects us from any context - * getting freed. Lock the context and check if it - * got swapped before we could get the lock, and retry - * if so. If we locked the right context, then it - * can't get swapped on us any more. - */ - spin_lock_irqsave(&ctx->lock, *flags); - if (ctx != rcu_dereference(task->perf_counter_ctxp)) { - spin_unlock_irqrestore(&ctx->lock, *flags); - goto retry; - } - - if (!atomic_inc_not_zero(&ctx->refcount)) { - spin_unlock_irqrestore(&ctx->lock, *flags); - ctx = NULL; - } - } - rcu_read_unlock(); - return ctx; -} - -/* - * Get the context for a task and increment its pin_count so it - * can't get swapped to another task. This also increments its - * reference count so that the context can't get freed. - */ -static struct perf_counter_context *perf_pin_task_context(struct task_struct *task) -{ - struct perf_counter_context *ctx; - unsigned long flags; - - ctx = perf_lock_task_context(task, &flags); - if (ctx) { - ++ctx->pin_count; - spin_unlock_irqrestore(&ctx->lock, flags); - } - return ctx; -} - -static void perf_unpin_context(struct perf_counter_context *ctx) -{ - unsigned long flags; - - spin_lock_irqsave(&ctx->lock, flags); - --ctx->pin_count; - spin_unlock_irqrestore(&ctx->lock, flags); - put_ctx(ctx); -} - -/* - * Add a counter from the lists for its context. - * Must be called with ctx->mutex and ctx->lock held. - */ -static void -list_add_counter(struct perf_counter *counter, struct perf_counter_context *ctx) -{ - struct perf_counter *group_leader = counter->group_leader; - - /* - * Depending on whether it is a standalone or sibling counter, - * add it straight to the context's counter list, or to the group - * leader's sibling list: - */ - if (group_leader == counter) - list_add_tail(&counter->group_entry, &ctx->group_list); - else { - list_add_tail(&counter->group_entry, &group_leader->sibling_list); - group_leader->nr_siblings++; - } - - list_add_rcu(&counter->event_entry, &ctx->event_list); - ctx->nr_counters++; - if (counter->attr.inherit_stat) - ctx->nr_stat++; -} - -/* - * Remove a counter from the lists for its context. - * Must be called with ctx->mutex and ctx->lock held. - */ -static void -list_del_counter(struct perf_counter *counter, struct perf_counter_context *ctx) -{ - struct perf_counter *sibling, *tmp; - - if (list_empty(&counter->group_entry)) - return; - ctx->nr_counters--; - if (counter->attr.inherit_stat) - ctx->nr_stat--; - - list_del_init(&counter->group_entry); - list_del_rcu(&counter->event_entry); - - if (counter->group_leader != counter) - counter->group_leader->nr_siblings--; - - /* - * If this was a group counter with sibling counters then - * upgrade the siblings to singleton counters by adding them - * to the context list directly: - */ - list_for_each_entry_safe(sibling, tmp, &counter->sibling_list, group_entry) { - - list_move_tail(&sibling->group_entry, &ctx->group_list); - sibling->group_leader = sibling; - } -} - -static void -counter_sched_out(struct perf_counter *counter, - struct perf_cpu_context *cpuctx, - struct perf_counter_context *ctx) -{ - if (counter->state != PERF_COUNTER_STATE_ACTIVE) - return; - - counter->state = PERF_COUNTER_STATE_INACTIVE; - if (counter->pending_disable) { - counter->pending_disable = 0; - counter->state = PERF_COUNTER_STATE_OFF; - } - counter->tstamp_stopped = ctx->time; - counter->pmu->disable(counter); - counter->oncpu = -1; - - if (!is_software_counter(counter)) - cpuctx->active_oncpu--; - ctx->nr_active--; - if (counter->attr.exclusive || !cpuctx->active_oncpu) - cpuctx->exclusive = 0; -} - -static void -group_sched_out(struct perf_counter *group_counter, - struct perf_cpu_context *cpuctx, - struct perf_counter_context *ctx) -{ - struct perf_counter *counter; - - if (group_counter->state != PERF_COUNTER_STATE_ACTIVE) - return; - - counter_sched_out(group_counter, cpuctx, ctx); - - /* - * Schedule out siblings (if any): - */ - list_for_each_entry(counter, &group_counter->sibling_list, group_entry) - counter_sched_out(counter, cpuctx, ctx); - - if (group_counter->attr.exclusive) - cpuctx->exclusive = 0; -} - -/* - * Cross CPU call to remove a performance counter - * - * We disable the counter on the hardware level first. After that we - * remove it from the context list. - */ -static void __perf_counter_remove_from_context(void *info) -{ - struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context); - struct perf_counter *counter = info; - struct perf_counter_context *ctx = counter->ctx; - - /* - * If this is a task context, we need to check whether it is - * the current task context of this cpu. If not it has been - * scheduled out before the smp call arrived. - */ - if (ctx->task && cpuctx->task_ctx != ctx) - return; - - spin_lock(&ctx->lock); - /* - * Protect the list operation against NMI by disabling the - * counters on a global level. - */ - perf_disable(); - - counter_sched_out(counter, cpuctx, ctx); - - list_del_counter(counter, ctx); - - if (!ctx->task) { - /* - * Allow more per task counters with respect to the - * reservation: - */ - cpuctx->max_pertask = - min(perf_max_counters - ctx->nr_counters, - perf_max_counters - perf_reserved_percpu); - } - - perf_enable(); - spin_unlock(&ctx->lock); -} - - -/* - * Remove the counter from a task's (or a CPU's) list of counters. - * - * Must be called with ctx->mutex held. - * - * CPU counters are removed with a smp call. For task counters we only - * call when the task is on a CPU. - * - * If counter->ctx is a cloned context, callers must make sure that - * every task struct that counter->ctx->task could possibly point to - * remains valid. This is OK when called from perf_release since - * that only calls us on the top-level context, which can't be a clone. - * When called from perf_counter_exit_task, it's OK because the - * context has been detached from its task. - */ -static void perf_counter_remove_from_context(struct perf_counter *counter) -{ - struct perf_counter_context *ctx = counter->ctx; - struct task_struct *task = ctx->task; - - if (!task) { - /* - * Per cpu counters are removed via an smp call and - * the removal is always sucessful. - */ - smp_call_function_single(counter->cpu, - __perf_counter_remove_from_context, - counter, 1); - return; - } - -retry: - task_oncpu_function_call(task, __perf_counter_remove_from_context, - counter); - - spin_lock_irq(&ctx->lock); - /* - * If the context is active we need to retry the smp call. - */ - if (ctx->nr_active && !list_empty(&counter->group_entry)) { - spin_unlock_irq(&ctx->lock); - goto retry; - } - - /* - * The lock prevents that this context is scheduled in so we - * can remove the counter safely, if the call above did not - * succeed. - */ - if (!list_empty(&counter->group_entry)) { - list_del_counter(counter, ctx); - } - spin_unlock_irq(&ctx->lock); -} - -static inline u64 perf_clock(void) -{ - return cpu_clock(smp_processor_id()); -} - -/* - * Update the record of the current time in a context. - */ -static void update_context_time(struct perf_counter_context *ctx) -{ - u64 now = perf_clock(); - - ctx->time += now - ctx->timestamp; - ctx->timestamp = now; -} - -/* - * Update the total_time_enabled and total_time_running fields for a counter. - */ -static void update_counter_times(struct perf_counter *counter) -{ - struct perf_counter_context *ctx = counter->ctx; - u64 run_end; - - if (counter->state < PERF_COUNTER_STATE_INACTIVE || - counter->group_leader->state < PERF_COUNTER_STATE_INACTIVE) - return; - - counter->total_time_enabled = ctx->time - counter->tstamp_enabled; - - if (counter->state == PERF_COUNTER_STATE_INACTIVE) - run_end = counter->tstamp_stopped; - else - run_end = ctx->time; - - counter->total_time_running = run_end - counter->tstamp_running; -} - -/* - * Update total_time_enabled and total_time_running for all counters in a group. - */ -static void update_group_times(struct perf_counter *leader) -{ - struct perf_counter *counter; - - update_counter_times(leader); - list_for_each_entry(counter, &leader->sibling_list, group_entry) - update_counter_times(counter); -} - -/* - * Cross CPU call to disable a performance counter - */ -static void __perf_counter_disable(void *info) -{ - struct perf_counter *counter = info; - struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context); - struct perf_counter_context *ctx = counter->ctx; - - /* - * If this is a per-task counter, need to check whether this - * counter's task is the current task on this cpu. - */ - if (ctx->task && cpuctx->task_ctx != ctx) - return; - - spin_lock(&ctx->lock); - - /* - * If the counter is on, turn it off. - * If it is in error state, leave it in error state. - */ - if (counter->state >= PERF_COUNTER_STATE_INACTIVE) { - update_context_time(ctx); - update_group_times(counter); - if (counter == counter->group_leader) - group_sched_out(counter, cpuctx, ctx); - else - counter_sched_out(counter, cpuctx, ctx); - counter->state = PERF_COUNTER_STATE_OFF; - } - - spin_unlock(&ctx->lock); -} - -/* - * Disable a counter. - * - * If counter->ctx is a cloned context, callers must make sure that - * every task struct that counter->ctx->task could possibly point to - * remains valid. This condition is satisifed when called through - * perf_counter_for_each_child or perf_counter_for_each because they - * hold the top-level counter's child_mutex, so any descendant that - * goes to exit will block in sync_child_counter. - * When called from perf_pending_counter it's OK because counter->ctx - * is the current context on this CPU and preemption is disabled, - * hence we can't get into perf_counter_task_sched_out for this context. - */ -static void perf_counter_disable(struct perf_counter *counter) -{ - struct perf_counter_context *ctx = counter->ctx; - struct task_struct *task = ctx->task; - - if (!task) { - /* - * Disable the counter on the cpu that it's on - */ - smp_call_function_single(counter->cpu, __perf_counter_disable, - counter, 1); - return; - } - - retry: - task_oncpu_function_call(task, __perf_counter_disable, counter); - - spin_lock_irq(&ctx->lock); - /* - * If the counter is still active, we need to retry the cross-call. - */ - if (counter->state == PERF_COUNTER_STATE_ACTIVE) { - spin_unlock_irq(&ctx->lock); - goto retry; - } - - /* - * Since we have the lock this context can't be scheduled - * in, so we can change the state safely. - */ - if (counter->state == PERF_COUNTER_STATE_INACTIVE) { - update_group_times(counter); - counter->state = PERF_COUNTER_STATE_OFF; - } - - spin_unlock_irq(&ctx->lock); -} - -static int -counter_sched_in(struct perf_counter *counter, - struct perf_cpu_context *cpuctx, - struct perf_counter_context *ctx, - int cpu) -{ - if (counter->state <= PERF_COUNTER_STATE_OFF) - return 0; - - counter->state = PERF_COUNTER_STATE_ACTIVE; - counter->oncpu = cpu; /* TODO: put 'cpu' into cpuctx->cpu */ - /* - * The new state must be visible before we turn it on in the hardware: - */ - smp_wmb(); - - if (counter->pmu->enable(counter)) { - counter->state = PERF_COUNTER_STATE_INACTIVE; - counter->oncpu = -1; - return -EAGAIN; - } - - counter->tstamp_running += ctx->time - counter->tstamp_stopped; - - if (!is_software_counter(counter)) - cpuctx->active_oncpu++; - ctx->nr_active++; - - if (counter->attr.exclusive) - cpuctx->exclusive = 1; - - return 0; -} - -static int -group_sched_in(struct perf_counter *group_counter, - struct perf_cpu_context *cpuctx, - struct perf_counter_context *ctx, - int cpu) -{ - struct perf_counter *counter, *partial_group; - int ret; - - if (group_counter->state == PERF_COUNTER_STATE_OFF) - return 0; - - ret = hw_perf_group_sched_in(group_counter, cpuctx, ctx, cpu); - if (ret) - return ret < 0 ? ret : 0; - - if (counter_sched_in(group_counter, cpuctx, ctx, cpu)) - return -EAGAIN; - - /* - * Schedule in siblings as one group (if any): - */ - list_for_each_entry(counter, &group_counter->sibling_list, group_entry) { - if (counter_sched_in(counter, cpuctx, ctx, cpu)) { - partial_group = counter; - goto group_error; - } - } - - return 0; - -group_error: - /* - * Groups can be scheduled in as one unit only, so undo any - * partial group before returning: - */ - list_for_each_entry(counter, &group_counter->sibling_list, group_entry) { - if (counter == partial_group) - break; - counter_sched_out(counter, cpuctx, ctx); - } - counter_sched_out(group_counter, cpuctx, ctx); - - return -EAGAIN; -} - -/* - * Return 1 for a group consisting entirely of software counters, - * 0 if the group contains any hardware counters. - */ -static int is_software_only_group(struct perf_counter *leader) -{ - struct perf_counter *counter; - - if (!is_software_counter(leader)) - return 0; - - list_for_each_entry(counter, &leader->sibling_list, group_entry) - if (!is_software_counter(counter)) - return 0; - - return 1; -} - -/* - * Work out whether we can put this counter group on the CPU now. - */ -static int group_can_go_on(struct perf_counter *counter, - struct perf_cpu_context *cpuctx, - int can_add_hw) -{ - /* - * Groups consisting entirely of software counters can always go on. - */ - if (is_software_only_group(counter)) - return 1; - /* - * If an exclusive group is already on, no other hardware - * counters can go on. - */ - if (cpuctx->exclusive) - return 0; - /* - * If this group is exclusive and there are already - * counters on the CPU, it can't go on. - */ - if (counter->attr.exclusive && cpuctx->active_oncpu) - return 0; - /* - * Otherwise, try to add it if all previous groups were able - * to go on. - */ - return can_add_hw; -} - -static void add_counter_to_ctx(struct perf_counter *counter, - struct perf_counter_context *ctx) -{ - list_add_counter(counter, ctx); - counter->tstamp_enabled = ctx->time; - counter->tstamp_running = ctx->time; - counter->tstamp_stopped = ctx->time; -} - -/* - * Cross CPU call to install and enable a performance counter - * - * Must be called with ctx->mutex held - */ -static void __perf_install_in_context(void *info) -{ - struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context); - struct perf_counter *counter = info; - struct perf_counter_context *ctx = counter->ctx; - struct perf_counter *leader = counter->group_leader; - int cpu = smp_processor_id(); - int err; - - /* - * If this is a task context, we need to check whether it is - * the current task context of this cpu. If not it has been - * scheduled out before the smp call arrived. - * Or possibly this is the right context but it isn't - * on this cpu because it had no counters. - */ - if (ctx->task && cpuctx->task_ctx != ctx) { - if (cpuctx->task_ctx || ctx->task != current) - return; - cpuctx->task_ctx = ctx; - } - - spin_lock(&ctx->lock); - ctx->is_active = 1; - update_context_time(ctx); - - /* - * Protect the list operation against NMI by disabling the - * counters on a global level. NOP for non NMI based counters. - */ - perf_disable(); - - add_counter_to_ctx(counter, ctx); - - /* - * Don't put the counter on if it is disabled or if - * it is in a group and the group isn't on. - */ - if (counter->state != PERF_COUNTER_STATE_INACTIVE || - (leader != counter && leader->state != PERF_COUNTER_STATE_ACTIVE)) - goto unlock; - - /* - * An exclusive counter can't go on if there are already active - * hardware counters, and no hardware counter can go on if there - * is already an exclusive counter on. - */ - if (!group_can_go_on(counter, cpuctx, 1)) - err = -EEXIST; - else - err = counter_sched_in(counter, cpuctx, ctx, cpu); - - if (err) { - /* - * This counter couldn't go on. If it is in a group - * then we have to pull the whole group off. - * If the counter group is pinned then put it in error state. - */ - if (leader != counter) - group_sched_out(leader, cpuctx, ctx); - if (leader->attr.pinned) { - update_group_times(leader); - leader->state = PERF_COUNTER_STATE_ERROR; - } - } - - if (!err && !ctx->task && cpuctx->max_pertask) - cpuctx->max_pertask--; - - unlock: - perf_enable(); - - spin_unlock(&ctx->lock); -} - -/* - * Attach a performance counter to a context - * - * First we add the counter to the list with the hardware enable bit - * in counter->hw_config cleared. - * - * If the counter is attached to a task which is on a CPU we use a smp - * call to enable it in the task context. The task might have been - * scheduled away, but we check this in the smp call again. - * - * Must be called with ctx->mutex held. - */ -static void -perf_install_in_context(struct perf_counter_context *ctx, - struct perf_counter *counter, - int cpu) -{ - struct task_struct *task = ctx->task; - - if (!task) { - /* - * Per cpu counters are installed via an smp call and - * the install is always sucessful. - */ - smp_call_function_single(cpu, __perf_install_in_context, - counter, 1); - return; - } - -retry: - task_oncpu_function_call(task, __perf_install_in_context, - counter); - - spin_lock_irq(&ctx->lock); - /* - * we need to retry the smp call. - */ - if (ctx->is_active && list_empty(&counter->group_entry)) { - spin_unlock_irq(&ctx->lock); - goto retry; - } - - /* - * The lock prevents that this context is scheduled in so we - * can add the counter safely, if it the call above did not - * succeed. - */ - if (list_empty(&counter->group_entry)) - add_counter_to_ctx(counter, ctx); - spin_unlock_irq(&ctx->lock); -} - -/* - * Put a counter into inactive state and update time fields. - * Enabling the leader of a group effectively enables all - * the group members that aren't explicitly disabled, so we - * have to update their ->tstamp_enabled also. - * Note: this works for group members as well as group leaders - * since the non-leader members' sibling_lists will be empty. - */ -static void __perf_counter_mark_enabled(struct perf_counter *counter, - struct perf_counter_context *ctx) -{ - struct perf_counter *sub; - - counter->state = PERF_COUNTER_STATE_INACTIVE; - counter->tstamp_enabled = ctx->time - counter->total_time_enabled; - list_for_each_entry(sub, &counter->sibling_list, group_entry) - if (sub->state >= PERF_COUNTER_STATE_INACTIVE) - sub->tstamp_enabled = - ctx->time - sub->total_time_enabled; -} - -/* - * Cross CPU call to enable a performance counter - */ -static void __perf_counter_enable(void *info) -{ - struct perf_counter *counter = info; - struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context); - struct perf_counter_context *ctx = counter->ctx; - struct perf_counter *leader = counter->group_leader; - int err; - - /* - * If this is a per-task counter, need to check whether this - * counter's task is the current task on this cpu. - */ - if (ctx->task && cpuctx->task_ctx != ctx) { - if (cpuctx->task_ctx || ctx->task != current) - return; - cpuctx->task_ctx = ctx; - } - - spin_lock(&ctx->lock); - ctx->is_active = 1; - update_context_time(ctx); - - if (counter->state >= PERF_COUNTER_STATE_INACTIVE) - goto unlock; - __perf_counter_mark_enabled(counter, ctx); - - /* - * If the counter is in a group and isn't the group leader, - * then don't put it on unless the group is on. - */ - if (leader != counter && leader->state != PERF_COUNTER_STATE_ACTIVE) - goto unlock; - - if (!group_can_go_on(counter, cpuctx, 1)) { - err = -EEXIST; - } else { - perf_disable(); - if (counter == leader) - err = group_sched_in(counter, cpuctx, ctx, - smp_processor_id()); - else - err = counter_sched_in(counter, cpuctx, ctx, - smp_processor_id()); - perf_enable(); - } - - if (err) { - /* - * If this counter can't go on and it's part of a - * group, then the whole group has to come off. - */ - if (leader != counter) - group_sched_out(leader, cpuctx, ctx); - if (leader->attr.pinned) { - update_group_times(leader); - leader->state = PERF_COUNTER_STATE_ERROR; - } - } - - unlock: - spin_unlock(&ctx->lock); -} - -/* - * Enable a counter. - * - * If counter->ctx is a cloned context, callers must make sure that - * every task struct that counter->ctx->task could possibly point to - * remains valid. This condition is satisfied when called through - * perf_counter_for_each_child or perf_counter_for_each as described - * for perf_counter_disable. - */ -static void perf_counter_enable(struct perf_counter *counter) -{ - struct perf_counter_context *ctx = counter->ctx; - struct task_struct *task = ctx->task; - - if (!task) { - /* - * Enable the counter on the cpu that it's on - */ - smp_call_function_single(counter->cpu, __perf_counter_enable, - counter, 1); - return; - } - - spin_lock_irq(&ctx->lock); - if (counter->state >= PERF_COUNTER_STATE_INACTIVE) - goto out; - - /* - * If the counter is in error state, clear that first. - * That way, if we see the counter in error state below, we - * know that it has gone back into error state, as distinct - * from the task having been scheduled away before the - * cross-call arrived. - */ - if (counter->state == PERF_COUNTER_STATE_ERROR) - counter->state = PERF_COUNTER_STATE_OFF; - - retry: - spin_unlock_irq(&ctx->lock); - task_oncpu_function_call(task, __perf_counter_enable, counter); - - spin_lock_irq(&ctx->lock); - - /* - * If the context is active and the counter is still off, - * we need to retry the cross-call. - */ - if (ctx->is_active && counter->state == PERF_COUNTER_STATE_OFF) - goto retry; - - /* - * Since we have the lock this context can't be scheduled - * in, so we can change the state safely. - */ - if (counter->state == PERF_COUNTER_STATE_OFF) - __perf_counter_mark_enabled(counter, ctx); - - out: - spin_unlock_irq(&ctx->lock); -} - -static int perf_counter_refresh(struct perf_counter *counter, int refresh) -{ - /* - * not supported on inherited counters - */ - if (counter->attr.inherit) - return -EINVAL; - - atomic_add(refresh, &counter->event_limit); - perf_counter_enable(counter); - - return 0; -} - -void __perf_counter_sched_out(struct perf_counter_context *ctx, - struct perf_cpu_context *cpuctx) -{ - struct perf_counter *counter; - - spin_lock(&ctx->lock); - ctx->is_active = 0; - if (likely(!ctx->nr_counters)) - goto out; - update_context_time(ctx); - - perf_disable(); - if (ctx->nr_active) { - list_for_each_entry(counter, &ctx->group_list, group_entry) { - if (counter != counter->group_leader) - counter_sched_out(counter, cpuctx, ctx); - else - group_sched_out(counter, cpuctx, ctx); - } - } - perf_enable(); - out: - spin_unlock(&ctx->lock); -} - -/* - * Test whether two contexts are equivalent, i.e. whether they - * have both been cloned from the same version of the same context - * and they both have the same number of enabled counters. - * If the number of enabled counters is the same, then the set - * of enabled counters should be the same, because these are both - * inherited contexts, therefore we can't access individual counters - * in them directly with an fd; we can only enable/disable all - * counters via prctl, or enable/disable all counters in a family - * via ioctl, which will have the same effect on both contexts. - */ -static int context_equiv(struct perf_counter_context *ctx1, - struct perf_counter_context *ctx2) -{ - return ctx1->parent_ctx && ctx1->parent_ctx == ctx2->parent_ctx - && ctx1->parent_gen == ctx2->parent_gen - && !ctx1->pin_count && !ctx2->pin_count; -} - -static void __perf_counter_read(void *counter); - -static void __perf_counter_sync_stat(struct perf_counter *counter, - struct perf_counter *next_counter) -{ - u64 value; - - if (!counter->attr.inherit_stat) - return; - - /* - * Update the counter value, we cannot use perf_counter_read() - * because we're in the middle of a context switch and have IRQs - * disabled, which upsets smp_call_function_single(), however - * we know the counter must be on the current CPU, therefore we - * don't need to use it. - */ - switch (counter->state) { - case PERF_COUNTER_STATE_ACTIVE: - __perf_counter_read(counter); - break; - - case PERF_COUNTER_STATE_INACTIVE: - update_counter_times(counter); - break; - - default: - break; - } - - /* - * In order to keep per-task stats reliable we need to flip the counter - * values when we flip the contexts. - */ - value = atomic64_read(&next_counter->count); - value = atomic64_xchg(&counter->count, value); - atomic64_set(&next_counter->count, value); - - swap(counter->total_time_enabled, next_counter->total_time_enabled); - swap(counter->total_time_running, next_counter->total_time_running); - - /* - * Since we swizzled the values, update the user visible data too. - */ - perf_counter_update_userpage(counter); - perf_counter_update_userpage(next_counter); -} - -#define list_next_entry(pos, member) \ - list_entry(pos->member.next, typeof(*pos), member) - -static void perf_counter_sync_stat(struct perf_counter_context *ctx, - struct perf_counter_context *next_ctx) -{ - struct perf_counter *counter, *next_counter; - - if (!ctx->nr_stat) - return; - - counter = list_first_entry(&ctx->event_list, - struct perf_counter, event_entry); - - next_counter = list_first_entry(&next_ctx->event_list, - struct perf_counter, event_entry); - - while (&counter->event_entry != &ctx->event_list && - &next_counter->event_entry != &next_ctx->event_list) { - - __perf_counter_sync_stat(counter, next_counter); - - counter = list_next_entry(counter, event_entry); - next_counter = list_next_entry(next_counter, event_entry); - } -} - -/* - * Called from scheduler to remove the counters of the current task, - * with interrupts disabled. - * - * We stop each counter and update the counter value in counter->count. - * - * This does not protect us against NMI, but disable() - * sets the disabled bit in the control field of counter _before_ - * accessing the counter control register. If a NMI hits, then it will - * not restart the counter. - */ -void perf_counter_task_sched_out(struct task_struct *task, - struct task_struct *next, int cpu) -{ - struct perf_cpu_context *cpuctx = &per_cpu(perf_cpu_context, cpu); - struct perf_counter_context *ctx = task->perf_counter_ctxp; - struct perf_counter_context *next_ctx; - struct perf_counter_context *parent; - struct pt_regs *regs; - int do_switch = 1; - - regs = task_pt_regs(task); - perf_swcounter_event(PERF_COUNT_SW_CONTEXT_SWITCHES, 1, 1, regs, 0); - - if (likely(!ctx || !cpuctx->task_ctx)) - return; - - update_context_time(ctx); - - rcu_read_lock(); - parent = rcu_dereference(ctx->parent_ctx); - next_ctx = next->perf_counter_ctxp; - if (parent && next_ctx && - rcu_dereference(next_ctx->parent_ctx) == parent) { - /* - * Looks like the two contexts are clones, so we might be - * able to optimize the context switch. We lock both - * contexts and check that they are clones under the - * lock (including re-checking that neither has been - * uncloned in the meantime). It doesn't matter which - * order we take the locks because no other cpu could - * be trying to lock both of these tasks. - */ - spin_lock(&ctx->lock); - spin_lock_nested(&next_ctx->lock, SINGLE_DEPTH_NESTING); - if (context_equiv(ctx, next_ctx)) { - /* - * XXX do we need a memory barrier of sorts - * wrt to rcu_dereference() of perf_counter_ctxp - */ - task->perf_counter_ctxp = next_ctx; - next->perf_counter_ctxp = ctx; - ctx->task = next; - next_ctx->task = task; - do_switch = 0; - - perf_counter_sync_stat(ctx, next_ctx); - } - spin_unlock(&next_ctx->lock); - spin_unlock(&ctx->lock); - } - rcu_read_unlock(); - - if (do_switch) { - __perf_counter_sched_out(ctx, cpuctx); - cpuctx->task_ctx = NULL; - } -} - -/* - * Called with IRQs disabled - */ -static void __perf_counter_task_sched_out(struct perf_counter_context *ctx) -{ - struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context); - - if (!cpuctx->task_ctx) - return; - - if (WARN_ON_ONCE(ctx != cpuctx->task_ctx)) - return; - - __perf_counter_sched_out(ctx, cpuctx); - cpuctx->task_ctx = NULL; -} - -/* - * Called with IRQs disabled - */ -static void perf_counter_cpu_sched_out(struct perf_cpu_context *cpuctx) -{ - __perf_counter_sched_out(&cpuctx->ctx, cpuctx); -} - -static void -__perf_counter_sched_in(struct perf_counter_context *ctx, - struct perf_cpu_context *cpuctx, int cpu) -{ - struct perf_counter *counter; - int can_add_hw = 1; - - spin_lock(&ctx->lock); - ctx->is_active = 1; - if (likely(!ctx->nr_counters)) - goto out; - - ctx->timestamp = perf_clock(); - - perf_disable(); - - /* - * First go through the list and put on any pinned groups - * in order to give them the best chance of going on. - */ - list_for_each_entry(counter, &ctx->group_list, group_entry) { - if (counter->state <= PERF_COUNTER_STATE_OFF || - !counter->attr.pinned) - continue; - if (counter->cpu != -1 && counter->cpu != cpu) - continue; - - if (counter != counter->group_leader) - counter_sched_in(counter, cpuctx, ctx, cpu); - else { - if (group_can_go_on(counter, cpuctx, 1)) - group_sched_in(counter, cpuctx, ctx, cpu); - } - - /* - * If this pinned group hasn't been scheduled, - * put it in error state. - */ - if (counter->state == PERF_COUNTER_STATE_INACTIVE) { - update_group_times(counter); - counter->state = PERF_COUNTER_STATE_ERROR; - } - } - - list_for_each_entry(counter, &ctx->group_list, group_entry) { - /* - * Ignore counters in OFF or ERROR state, and - * ignore pinned counters since we did them already. - */ - if (counter->state <= PERF_COUNTER_STATE_OFF || - counter->attr.pinned) - continue; - - /* - * Listen to the 'cpu' scheduling filter constraint - * of counters: - */ - if (counter->cpu != -1 && counter->cpu != cpu) - continue; - - if (counter != counter->group_leader) { - if (counter_sched_in(counter, cpuctx, ctx, cpu)) - can_add_hw = 0; - } else { - if (group_can_go_on(counter, cpuctx, can_add_hw)) { - if (group_sched_in(counter, cpuctx, ctx, cpu)) - can_add_hw = 0; - } - } - } - perf_enable(); - out: - spin_unlock(&ctx->lock); -} - -/* - * Called from scheduler to add the counters of the current task - * with interrupts disabled. - * - * We restore the counter value and then enable it. - * - * This does not protect us against NMI, but enable() - * sets the enabled bit in the control field of counter _before_ - * accessing the counter control register. If a NMI hits, then it will - * keep the counter running. - */ -void perf_counter_task_sched_in(struct task_struct *task, int cpu) -{ - struct perf_cpu_context *cpuctx = &per_cpu(perf_cpu_context, cpu); - struct perf_counter_context *ctx = task->perf_counter_ctxp; - - if (likely(!ctx)) - return; - if (cpuctx->task_ctx == ctx) - return; - __perf_counter_sched_in(ctx, cpuctx, cpu); - cpuctx->task_ctx = ctx; -} - -static void perf_counter_cpu_sched_in(struct perf_cpu_context *cpuctx, int cpu) -{ - struct perf_counter_context *ctx = &cpuctx->ctx; - - __perf_counter_sched_in(ctx, cpuctx, cpu); -} - -#define MAX_INTERRUPTS (~0ULL) - -static void perf_log_throttle(struct perf_counter *counter, int enable); - -static void perf_adjust_period(struct perf_counter *counter, u64 events) -{ - struct hw_perf_counter *hwc = &counter->hw; - u64 period, sample_period; - s64 delta; - - events *= hwc->sample_period; - period = div64_u64(events, counter->attr.sample_freq); - - delta = (s64)(period - hwc->sample_period); - delta = (delta + 7) / 8; /* low pass filter */ - - sample_period = hwc->sample_period + delta; - - if (!sample_period) - sample_period = 1; - - hwc->sample_period = sample_period; -} - -static void perf_ctx_adjust_freq(struct perf_counter_context *ctx) -{ - struct perf_counter *counter; - struct hw_perf_counter *hwc; - u64 interrupts, freq; - - spin_lock(&ctx->lock); - list_for_each_entry(counter, &ctx->group_list, group_entry) { - if (counter->state != PERF_COUNTER_STATE_ACTIVE) - continue; - - hwc = &counter->hw; - - interrupts = hwc->interrupts; - hwc->interrupts = 0; - - /* - * unthrottle counters on the tick - */ - if (interrupts == MAX_INTERRUPTS) { - perf_log_throttle(counter, 1); - counter->pmu->unthrottle(counter); - interrupts = 2*sysctl_perf_counter_sample_rate/HZ; - } - - if (!counter->attr.freq || !counter->attr.sample_freq) - continue; - - /* - * if the specified freq < HZ then we need to skip ticks - */ - if (counter->attr.sample_freq < HZ) { - freq = counter->attr.sample_freq; - - hwc->freq_count += freq; - hwc->freq_interrupts += interrupts; - - if (hwc->freq_count < HZ) - continue; - - interrupts = hwc->freq_interrupts; - hwc->freq_interrupts = 0; - hwc->freq_count -= HZ; - } else - freq = HZ; - - perf_adjust_period(counter, freq * interrupts); - - /* - * In order to avoid being stalled by an (accidental) huge - * sample period, force reset the sample period if we didn't - * get any events in this freq period. - */ - if (!interrupts) { - perf_disable(); - counter->pmu->disable(counter); - atomic64_set(&hwc->period_left, 0); - counter->pmu->enable(counter); - perf_enable(); - } - } - spin_unlock(&ctx->lock); -} - -/* - * Round-robin a context's counters: - */ -static void rotate_ctx(struct perf_counter_context *ctx) -{ - struct perf_counter *counter; - - if (!ctx->nr_counters) - return; - - spin_lock(&ctx->lock); - /* - * Rotate the first entry last (works just fine for group counters too): - */ - perf_disable(); - list_for_each_entry(counter, &ctx->group_list, group_entry) { - list_move_tail(&counter->group_entry, &ctx->group_list); - break; - } - perf_enable(); - - spin_unlock(&ctx->lock); -} - -void perf_counter_task_tick(struct task_struct *curr, int cpu) -{ - struct perf_cpu_context *cpuctx; - struct perf_counter_context *ctx; - - if (!atomic_read(&nr_counters)) - return; - - cpuctx = &per_cpu(perf_cpu_context, cpu); - ctx = curr->perf_counter_ctxp; - - perf_ctx_adjust_freq(&cpuctx->ctx); - if (ctx) - perf_ctx_adjust_freq(ctx); - - perf_counter_cpu_sched_out(cpuctx); - if (ctx) - __perf_counter_task_sched_out(ctx); - - rotate_ctx(&cpuctx->ctx); - if (ctx) - rotate_ctx(ctx); - - perf_counter_cpu_sched_in(cpuctx, cpu); - if (ctx) - perf_counter_task_sched_in(curr, cpu); -} - -/* - * Enable all of a task's counters that have been marked enable-on-exec. - * This expects task == current. - */ -static void perf_counter_enable_on_exec(struct task_struct *task) -{ - struct perf_counter_context *ctx; - struct perf_counter *counter; - unsigned long flags; - int enabled = 0; - - local_irq_save(flags); - ctx = task->perf_counter_ctxp; - if (!ctx || !ctx->nr_counters) - goto out; - - __perf_counter_task_sched_out(ctx); - - spin_lock(&ctx->lock); - - list_for_each_entry(counter, &ctx->group_list, group_entry) { - if (!counter->attr.enable_on_exec) - continue; - counter->attr.enable_on_exec = 0; - if (counter->state >= PERF_COUNTER_STATE_INACTIVE) - continue; - __perf_counter_mark_enabled(counter, ctx); - enabled = 1; - } - - /* - * Unclone this context if we enabled any counter. - */ - if (enabled) - unclone_ctx(ctx); - - spin_unlock(&ctx->lock); - - perf_counter_task_sched_in(task, smp_processor_id()); - out: - local_irq_restore(flags); -} - -/* - * Cross CPU call to read the hardware counter - */ -static void __perf_counter_read(void *info) -{ - struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context); - struct perf_counter *counter = info; - struct perf_counter_context *ctx = counter->ctx; - unsigned long flags; - - /* - * If this is a task context, we need to check whether it is - * the current task context of this cpu. If not it has been - * scheduled out before the smp call arrived. In that case - * counter->count would have been updated to a recent sample - * when the counter was scheduled out. - */ - if (ctx->task && cpuctx->task_ctx != ctx) - return; - - local_irq_save(flags); - if (ctx->is_active) - update_context_time(ctx); - counter->pmu->read(counter); - update_counter_times(counter); - local_irq_restore(flags); -} - -static u64 perf_counter_read(struct perf_counter *counter) -{ - /* - * If counter is enabled and currently active on a CPU, update the - * value in the counter structure: - */ - if (counter->state == PERF_COUNTER_STATE_ACTIVE) { - smp_call_function_single(counter->oncpu, - __perf_counter_read, counter, 1); - } else if (counter->state == PERF_COUNTER_STATE_INACTIVE) { - update_counter_times(counter); - } - - return atomic64_read(&counter->count); -} - -/* - * Initialize the perf_counter context in a task_struct: - */ -static void -__perf_counter_init_context(struct perf_counter_context *ctx, - struct task_struct *task) -{ - memset(ctx, 0, sizeof(*ctx)); - spin_lock_init(&ctx->lock); - mutex_init(&ctx->mutex); - INIT_LIST_HEAD(&ctx->group_list); - INIT_LIST_HEAD(&ctx->event_list); - atomic_set(&ctx->refcount, 1); - ctx->task = task; -} - -static struct perf_counter_context *find_get_context(pid_t pid, int cpu) -{ - struct perf_counter_context *ctx; - struct perf_cpu_context *cpuctx; - struct task_struct *task; - unsigned long flags; - int err; - - /* - * If cpu is not a wildcard then this is a percpu counter: - */ - if (cpu != -1) { - /* Must be root to operate on a CPU counter: */ - if (perf_paranoid_cpu() && !capable(CAP_SYS_ADMIN)) - return ERR_PTR(-EACCES); - - if (cpu < 0 || cpu > num_possible_cpus()) - return ERR_PTR(-EINVAL); - - /* - * We could be clever and allow to attach a counter to an - * offline CPU and activate it when the CPU comes up, but - * that's for later. - */ - if (!cpu_isset(cpu, cpu_online_map)) - return ERR_PTR(-ENODEV); - - cpuctx = &per_cpu(perf_cpu_context, cpu); - ctx = &cpuctx->ctx; - get_ctx(ctx); - - return ctx; - } - - rcu_read_lock(); - if (!pid) - task = current; - else - task = find_task_by_vpid(pid); - if (task) - get_task_struct(task); - rcu_read_unlock(); - - if (!task) - return ERR_PTR(-ESRCH); - - /* - * Can't attach counters to a dying task. - */ - err = -ESRCH; - if (task->flags & PF_EXITING) - goto errout; - - /* Reuse ptrace permission checks for now. */ - err = -EACCES; - if (!ptrace_may_access(task, PTRACE_MODE_READ)) - goto errout; - - retry: - ctx = perf_lock_task_context(task, &flags); - if (ctx) { - unclone_ctx(ctx); - spin_unlock_irqrestore(&ctx->lock, flags); - } - - if (!ctx) { - ctx = kmalloc(sizeof(struct perf_counter_context), GFP_KERNEL); - err = -ENOMEM; - if (!ctx) - goto errout; - __perf_counter_init_context(ctx, task); - get_ctx(ctx); - if (cmpxchg(&task->perf_counter_ctxp, NULL, ctx)) { - /* - * We raced with some other task; use - * the context they set. - */ - kfree(ctx); - goto retry; - } - get_task_struct(task); - } - - put_task_struct(task); - return ctx; - - errout: - put_task_struct(task); - return ERR_PTR(err); -} - -static void free_counter_rcu(struct rcu_head *head) -{ - struct perf_counter *counter; - - counter = container_of(head, struct perf_counter, rcu_head); - if (counter->ns) - put_pid_ns(counter->ns); - kfree(counter); -} - -static void perf_pending_sync(struct perf_counter *counter); - -static void free_counter(struct perf_counter *counter) -{ - perf_pending_sync(counter); - - if (!counter->parent) { - atomic_dec(&nr_counters); - if (counter->attr.mmap) - atomic_dec(&nr_mmap_counters); - if (counter->attr.comm) - atomic_dec(&nr_comm_counters); - if (counter->attr.task) - atomic_dec(&nr_task_counters); - } - - if (counter->output) { - fput(counter->output->filp); - counter->output = NULL; - } - - if (counter->destroy) - counter->destroy(counter); - - put_ctx(counter->ctx); - call_rcu(&counter->rcu_head, free_counter_rcu); -} - -/* - * Called when the last reference to the file is gone. - */ -static int perf_release(struct inode *inode, struct file *file) -{ - struct perf_counter *counter = file->private_data; - struct perf_counter_context *ctx = counter->ctx; - - file->private_data = NULL; - - WARN_ON_ONCE(ctx->parent_ctx); - mutex_lock(&ctx->mutex); - perf_counter_remove_from_context(counter); - mutex_unlock(&ctx->mutex); - - mutex_lock(&counter->owner->perf_counter_mutex); - list_del_init(&counter->owner_entry); - mutex_unlock(&counter->owner->perf_counter_mutex); - put_task_struct(counter->owner); - - free_counter(counter); - - return 0; -} - -static int perf_counter_read_size(struct perf_counter *counter) -{ - int entry = sizeof(u64); /* value */ - int size = 0; - int nr = 1; - - if (counter->attr.read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) - size += sizeof(u64); - - if (counter->attr.read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) - size += sizeof(u64); - - if (counter->attr.read_format & PERF_FORMAT_ID) - entry += sizeof(u64); - - if (counter->attr.read_format & PERF_FORMAT_GROUP) { - nr += counter->group_leader->nr_siblings; - size += sizeof(u64); - } - - size += entry * nr; - - return size; -} - -static u64 perf_counter_read_value(struct perf_counter *counter) -{ - struct perf_counter *child; - u64 total = 0; - - total += perf_counter_read(counter); - list_for_each_entry(child, &counter->child_list, child_list) - total += perf_counter_read(child); - - return total; -} - -static int perf_counter_read_entry(struct perf_counter *counter, - u64 read_format, char __user *buf) -{ - int n = 0, count = 0; - u64 values[2]; - - values[n++] = perf_counter_read_value(counter); - if (read_format & PERF_FORMAT_ID) - values[n++] = primary_counter_id(counter); - - count = n * sizeof(u64); - - if (copy_to_user(buf, values, count)) - return -EFAULT; - - return count; -} - -static int perf_counter_read_group(struct perf_counter *counter, - u64 read_format, char __user *buf) -{ - struct perf_counter *leader = counter->group_leader, *sub; - int n = 0, size = 0, err = -EFAULT; - u64 values[3]; - - values[n++] = 1 + leader->nr_siblings; - if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) { - values[n++] = leader->total_time_enabled + - atomic64_read(&leader->child_total_time_enabled); - } - if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) { - values[n++] = leader->total_time_running + - atomic64_read(&leader->child_total_time_running); - } - - size = n * sizeof(u64); - - if (copy_to_user(buf, values, size)) - return -EFAULT; - - err = perf_counter_read_entry(leader, read_format, buf + size); - if (err < 0) - return err; - - size += err; - - list_for_each_entry(sub, &leader->sibling_list, group_entry) { - err = perf_counter_read_entry(sub, read_format, - buf + size); - if (err < 0) - return err; - - size += err; - } - - return size; -} - -static int perf_counter_read_one(struct perf_counter *counter, - u64 read_format, char __user *buf) -{ - u64 values[4]; - int n = 0; - - values[n++] = perf_counter_read_value(counter); - if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) { - values[n++] = counter->total_time_enabled + - atomic64_read(&counter->child_total_time_enabled); - } - if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) { - values[n++] = counter->total_time_running + - atomic64_read(&counter->child_total_time_running); - } - if (read_format & PERF_FORMAT_ID) - values[n++] = primary_counter_id(counter); - - if (copy_to_user(buf, values, n * sizeof(u64))) - return -EFAULT; - - return n * sizeof(u64); -} - -/* - * Read the performance counter - simple non blocking version for now - */ -static ssize_t -perf_read_hw(struct perf_counter *counter, char __user *buf, size_t count) -{ - u64 read_format = counter->attr.read_format; - int ret; - - /* - * Return end-of-file for a read on a counter that is in - * error state (i.e. because it was pinned but it couldn't be - * scheduled on to the CPU at some point). - */ - if (counter->state == PERF_COUNTER_STATE_ERROR) - return 0; - - if (count < perf_counter_read_size(counter)) - return -ENOSPC; - - WARN_ON_ONCE(counter->ctx->parent_ctx); - mutex_lock(&counter->child_mutex); - if (read_format & PERF_FORMAT_GROUP) - ret = perf_counter_read_group(counter, read_format, buf); - else - ret = perf_counter_read_one(counter, read_format, buf); - mutex_unlock(&counter->child_mutex); - - return ret; -} - -static ssize_t -perf_read(struct file *file, char __user *buf, size_t count, loff_t *ppos) -{ - struct perf_counter *counter = file->private_data; - - return perf_read_hw(counter, buf, count); -} - -static unsigned int perf_poll(struct file *file, poll_table *wait) -{ - struct perf_counter *counter = file->private_data; - struct perf_mmap_data *data; - unsigned int events = POLL_HUP; - - rcu_read_lock(); - data = rcu_dereference(counter->data); - if (data) - events = atomic_xchg(&data->poll, 0); - rcu_read_unlock(); - - poll_wait(file, &counter->waitq, wait); - - return events; -} - -static void perf_counter_reset(struct perf_counter *counter) -{ - (void)perf_counter_read(counter); - atomic64_set(&counter->count, 0); - perf_counter_update_userpage(counter); -} - -/* - * Holding the top-level counter's child_mutex means that any - * descendant process that has inherited this counter will block - * in sync_child_counter if it goes to exit, thus satisfying the - * task existence requirements of perf_counter_enable/disable. - */ -static void perf_counter_for_each_child(struct perf_counter *counter, - void (*func)(struct perf_counter *)) -{ - struct perf_counter *child; - - WARN_ON_ONCE(counter->ctx->parent_ctx); - mutex_lock(&counter->child_mutex); - func(counter); - list_for_each_entry(child, &counter->child_list, child_list) - func(child); - mutex_unlock(&counter->child_mutex); -} - -static void perf_counter_for_each(struct perf_counter *counter, - void (*func)(struct perf_counter *)) -{ - struct perf_counter_context *ctx = counter->ctx; - struct perf_counter *sibling; - - WARN_ON_ONCE(ctx->parent_ctx); - mutex_lock(&ctx->mutex); - counter = counter->group_leader; - - perf_counter_for_each_child(counter, func); - func(counter); - list_for_each_entry(sibling, &counter->sibling_list, group_entry) - perf_counter_for_each_child(counter, func); - mutex_unlock(&ctx->mutex); -} - -static int perf_counter_period(struct perf_counter *counter, u64 __user *arg) -{ - struct perf_counter_context *ctx = counter->ctx; - unsigned long size; - int ret = 0; - u64 value; - - if (!counter->attr.sample_period) - return -EINVAL; - - size = copy_from_user(&value, arg, sizeof(value)); - if (size != sizeof(value)) - return -EFAULT; - - if (!value) - return -EINVAL; - - spin_lock_irq(&ctx->lock); - if (counter->attr.freq) { - if (value > sysctl_perf_counter_sample_rate) { - ret = -EINVAL; - goto unlock; - } - - counter->attr.sample_freq = value; - } else { - counter->attr.sample_period = value; - counter->hw.sample_period = value; - } -unlock: - spin_unlock_irq(&ctx->lock); - - return ret; -} - -int perf_counter_set_output(struct perf_counter *counter, int output_fd); - -static long perf_ioctl(struct file *file, unsigned int cmd, unsigned long arg) -{ - struct perf_counter *counter = file->private_data; - void (*func)(struct perf_counter *); - u32 flags = arg; - - switch (cmd) { - case PERF_COUNTER_IOC_ENABLE: - func = perf_counter_enable; - break; - case PERF_COUNTER_IOC_DISABLE: - func = perf_counter_disable; - break; - case PERF_COUNTER_IOC_RESET: - func = perf_counter_reset; - break; - - case PERF_COUNTER_IOC_REFRESH: - return perf_counter_refresh(counter, arg); - - case PERF_COUNTER_IOC_PERIOD: - return perf_counter_period(counter, (u64 __user *)arg); - - case PERF_COUNTER_IOC_SET_OUTPUT: - return perf_counter_set_output(counter, arg); - - default: - return -ENOTTY; - } - - if (flags & PERF_IOC_FLAG_GROUP) - perf_counter_for_each(counter, func); - else - perf_counter_for_each_child(counter, func); - - return 0; -} - -int perf_counter_task_enable(void) -{ - struct perf_counter *counter; - - mutex_lock(¤t->perf_counter_mutex); - list_for_each_entry(counter, ¤t->perf_counter_list, owner_entry) - perf_counter_for_each_child(counter, perf_counter_enable); - mutex_unlock(¤t->perf_counter_mutex); - - return 0; -} - -int perf_counter_task_disable(void) -{ - struct perf_counter *counter; - - mutex_lock(¤t->perf_counter_mutex); - list_for_each_entry(counter, ¤t->perf_counter_list, owner_entry) - perf_counter_for_each_child(counter, perf_counter_disable); - mutex_unlock(¤t->perf_counter_mutex); - - return 0; -} - -#ifndef PERF_COUNTER_INDEX_OFFSET -# define PERF_COUNTER_INDEX_OFFSET 0 -#endif - -static int perf_counter_index(struct perf_counter *counter) -{ - if (counter->state != PERF_COUNTER_STATE_ACTIVE) - return 0; - - return counter->hw.idx + 1 - PERF_COUNTER_INDEX_OFFSET; -} - -/* - * Callers need to ensure there can be no nesting of this function, otherwise - * the seqlock logic goes bad. We can not serialize this because the arch - * code calls this from NMI context. - */ -void perf_counter_update_userpage(struct perf_counter *counter) -{ - struct perf_counter_mmap_page *userpg; - struct perf_mmap_data *data; - - rcu_read_lock(); - data = rcu_dereference(counter->data); - if (!data) - goto unlock; - - userpg = data->user_page; - - /* - * Disable preemption so as to not let the corresponding user-space - * spin too long if we get preempted. - */ - preempt_disable(); - ++userpg->lock; - barrier(); - userpg->index = perf_counter_index(counter); - userpg->offset = atomic64_read(&counter->count); - if (counter->state == PERF_COUNTER_STATE_ACTIVE) - userpg->offset -= atomic64_read(&counter->hw.prev_count); - - userpg->time_enabled = counter->total_time_enabled + - atomic64_read(&counter->child_total_time_enabled); - - userpg->time_running = counter->total_time_running + - atomic64_read(&counter->child_total_time_running); - - barrier(); - ++userpg->lock; - preempt_enable(); -unlock: - rcu_read_unlock(); -} - -static int perf_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf) -{ - struct perf_counter *counter = vma->vm_file->private_data; - struct perf_mmap_data *data; - int ret = VM_FAULT_SIGBUS; - - if (vmf->flags & FAULT_FLAG_MKWRITE) { - if (vmf->pgoff == 0) - ret = 0; - return ret; - } - - rcu_read_lock(); - data = rcu_dereference(counter->data); - if (!data) - goto unlock; - - if (vmf->pgoff == 0) { - vmf->page = virt_to_page(data->user_page); - } else { - int nr = vmf->pgoff - 1; - - if ((unsigned)nr > data->nr_pages) - goto unlock; - - if (vmf->flags & FAULT_FLAG_WRITE) - goto unlock; - - vmf->page = virt_to_page(data->data_pages[nr]); - } - - get_page(vmf->page); - vmf->page->mapping = vma->vm_file->f_mapping; - vmf->page->index = vmf->pgoff; - - ret = 0; -unlock: - rcu_read_unlock(); - - return ret; -} - -static int perf_mmap_data_alloc(struct perf_counter *counter, int nr_pages) -{ - struct perf_mmap_data *data; - unsigned long size; - int i; - - WARN_ON(atomic_read(&counter->mmap_count)); - - size = sizeof(struct perf_mmap_data); - size += nr_pages * sizeof(void *); - - data = kzalloc(size, GFP_KERNEL); - if (!data) - goto fail; - - data->user_page = (void *)get_zeroed_page(GFP_KERNEL); - if (!data->user_page) - goto fail_user_page; - - for (i = 0; i < nr_pages; i++) { - data->data_pages[i] = (void *)get_zeroed_page(GFP_KERNEL); - if (!data->data_pages[i]) - goto fail_data_pages; - } - - data->nr_pages = nr_pages; - atomic_set(&data->lock, -1); - - if (counter->attr.watermark) { - data->watermark = min_t(long, PAGE_SIZE * nr_pages, - counter->attr.wakeup_watermark); - } - if (!data->watermark) - data->watermark = max(PAGE_SIZE, PAGE_SIZE * nr_pages / 4); - - rcu_assign_pointer(counter->data, data); - - return 0; - -fail_data_pages: - for (i--; i >= 0; i--) - free_page((unsigned long)data->data_pages[i]); - - free_page((unsigned long)data->user_page); - -fail_user_page: - kfree(data); - -fail: - return -ENOMEM; -} - -static void perf_mmap_free_page(unsigned long addr) -{ - struct page *page = virt_to_page((void *)addr); - - page->mapping = NULL; - __free_page(page); -} - -static void __perf_mmap_data_free(struct rcu_head *rcu_head) -{ - struct perf_mmap_data *data; - int i; - - data = container_of(rcu_head, struct perf_mmap_data, rcu_head); - - perf_mmap_free_page((unsigned long)data->user_page); - for (i = 0; i < data->nr_pages; i++) - perf_mmap_free_page((unsigned long)data->data_pages[i]); - - kfree(data); -} - -static void perf_mmap_data_free(struct perf_counter *counter) -{ - struct perf_mmap_data *data = counter->data; - - WARN_ON(atomic_read(&counter->mmap_count)); - - rcu_assign_pointer(counter->data, NULL); - call_rcu(&data->rcu_head, __perf_mmap_data_free); -} - -static void perf_mmap_open(struct vm_area_struct *vma) -{ - struct perf_counter *counter = vma->vm_file->private_data; - - atomic_inc(&counter->mmap_count); -} - -static void perf_mmap_close(struct vm_area_struct *vma) -{ - struct perf_counter *counter = vma->vm_file->private_data; - - WARN_ON_ONCE(counter->ctx->parent_ctx); - if (atomic_dec_and_mutex_lock(&counter->mmap_count, &counter->mmap_mutex)) { - struct user_struct *user = current_user(); - - atomic_long_sub(counter->data->nr_pages + 1, &user->locked_vm); - vma->vm_mm->locked_vm -= counter->data->nr_locked; - perf_mmap_data_free(counter); - mutex_unlock(&counter->mmap_mutex); - } -} - -static struct vm_operations_struct perf_mmap_vmops = { - .open = perf_mmap_open, - .close = perf_mmap_close, - .fault = perf_mmap_fault, - .page_mkwrite = perf_mmap_fault, -}; - -static int perf_mmap(struct file *file, struct vm_area_struct *vma) -{ - struct perf_counter *counter = file->private_data; - unsigned long user_locked, user_lock_limit; - struct user_struct *user = current_user(); - unsigned long locked, lock_limit; - unsigned long vma_size; - unsigned long nr_pages; - long user_extra, extra; - int ret = 0; - - if (!(vma->vm_flags & VM_SHARED)) - return -EINVAL; - - vma_size = vma->vm_end - vma->vm_start; - nr_pages = (vma_size / PAGE_SIZE) - 1; - - /* - * If we have data pages ensure they're a power-of-two number, so we - * can do bitmasks instead of modulo. - */ - if (nr_pages != 0 && !is_power_of_2(nr_pages)) - return -EINVAL; - - if (vma_size != PAGE_SIZE * (1 + nr_pages)) - return -EINVAL; - - if (vma->vm_pgoff != 0) - return -EINVAL; - - WARN_ON_ONCE(counter->ctx->parent_ctx); - mutex_lock(&counter->mmap_mutex); - if (counter->output) { - ret = -EINVAL; - goto unlock; - } - - if (atomic_inc_not_zero(&counter->mmap_count)) { - if (nr_pages != counter->data->nr_pages) - ret = -EINVAL; - goto unlock; - } - - user_extra = nr_pages + 1; - user_lock_limit = sysctl_perf_counter_mlock >> (PAGE_SHIFT - 10); - - /* - * Increase the limit linearly with more CPUs: - */ - user_lock_limit *= num_online_cpus(); - - user_locked = atomic_long_read(&user->locked_vm) + user_extra; - - extra = 0; - if (user_locked > user_lock_limit) - extra = user_locked - user_lock_limit; - - lock_limit = current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur; - lock_limit >>= PAGE_SHIFT; - locked = vma->vm_mm->locked_vm + extra; - - if ((locked > lock_limit) && perf_paranoid_tracepoint_raw() && - !capable(CAP_IPC_LOCK)) { - ret = -EPERM; - goto unlock; - } - - WARN_ON(counter->data); - ret = perf_mmap_data_alloc(counter, nr_pages); - if (ret) - goto unlock; - - atomic_set(&counter->mmap_count, 1); - atomic_long_add(user_extra, &user->locked_vm); - vma->vm_mm->locked_vm += extra; - counter->data->nr_locked = extra; - if (vma->vm_flags & VM_WRITE) - counter->data->writable = 1; - -unlock: - mutex_unlock(&counter->mmap_mutex); - - vma->vm_flags |= VM_RESERVED; - vma->vm_ops = &perf_mmap_vmops; - - return ret; -} - -static int perf_fasync(int fd, struct file *filp, int on) -{ - struct inode *inode = filp->f_path.dentry->d_inode; - struct perf_counter *counter = filp->private_data; - int retval; - - mutex_lock(&inode->i_mutex); - retval = fasync_helper(fd, filp, on, &counter->fasync); - mutex_unlock(&inode->i_mutex); - - if (retval < 0) - return retval; - - return 0; -} - -static const struct file_operations perf_fops = { - .release = perf_release, - .read = perf_read, - .poll = perf_poll, - .unlocked_ioctl = perf_ioctl, - .compat_ioctl = perf_ioctl, - .mmap = perf_mmap, - .fasync = perf_fasync, -}; - -/* - * Perf counter wakeup - * - * If there's data, ensure we set the poll() state and publish everything - * to user-space before waking everybody up. - */ - -void perf_counter_wakeup(struct perf_counter *counter) -{ - wake_up_all(&counter->waitq); - - if (counter->pending_kill) { - kill_fasync(&counter->fasync, SIGIO, counter->pending_kill); - counter->pending_kill = 0; - } -} - -/* - * Pending wakeups - * - * Handle the case where we need to wakeup up from NMI (or rq->lock) context. - * - * The NMI bit means we cannot possibly take locks. Therefore, maintain a - * single linked list and use cmpxchg() to add entries lockless. - */ - -static void perf_pending_counter(struct perf_pending_entry *entry) -{ - struct perf_counter *counter = container_of(entry, - struct perf_counter, pending); - - if (counter->pending_disable) { - counter->pending_disable = 0; - __perf_counter_disable(counter); - } - - if (counter->pending_wakeup) { - counter->pending_wakeup = 0; - perf_counter_wakeup(counter); - } -} - -#define PENDING_TAIL ((struct perf_pending_entry *)-1UL) - -static DEFINE_PER_CPU(struct perf_pending_entry *, perf_pending_head) = { - PENDING_TAIL, -}; - -static void perf_pending_queue(struct perf_pending_entry *entry, - void (*func)(struct perf_pending_entry *)) -{ - struct perf_pending_entry **head; - - if (cmpxchg(&entry->next, NULL, PENDING_TAIL) != NULL) - return; - - entry->func = func; - - head = &get_cpu_var(perf_pending_head); - - do { - entry->next = *head; - } while (cmpxchg(head, entry->next, entry) != entry->next); - - set_perf_counter_pending(); - - put_cpu_var(perf_pending_head); -} - -static int __perf_pending_run(void) -{ - struct perf_pending_entry *list; - int nr = 0; - - list = xchg(&__get_cpu_var(perf_pending_head), PENDING_TAIL); - while (list != PENDING_TAIL) { - void (*func)(struct perf_pending_entry *); - struct perf_pending_entry *entry = list; - - list = list->next; - - func = entry->func; - entry->next = NULL; - /* - * Ensure we observe the unqueue before we issue the wakeup, - * so that we won't be waiting forever. - * -- see perf_not_pending(). - */ - smp_wmb(); - - func(entry); - nr++; - } - - return nr; -} - -static inline int perf_not_pending(struct perf_counter *counter) -{ - /* - * If we flush on whatever cpu we run, there is a chance we don't - * need to wait. - */ - get_cpu(); - __perf_pending_run(); - put_cpu(); - - /* - * Ensure we see the proper queue state before going to sleep - * so that we do not miss the wakeup. -- see perf_pending_handle() - */ - smp_rmb(); - return counter->pending.next == NULL; -} - -static void perf_pending_sync(struct perf_counter *counter) -{ - wait_event(counter->waitq, perf_not_pending(counter)); -} - -void perf_counter_do_pending(void) -{ - __perf_pending_run(); -} - -/* - * Callchain support -- arch specific - */ - -__weak struct perf_callchain_entry *perf_callchain(struct pt_regs *regs) -{ - return NULL; -} - -/* - * Output - */ -static bool perf_output_space(struct perf_mmap_data *data, unsigned long tail, - unsigned long offset, unsigned long head) -{ - unsigned long mask; - - if (!data->writable) - return true; - - mask = (data->nr_pages << PAGE_SHIFT) - 1; - - offset = (offset - tail) & mask; - head = (head - tail) & mask; - - if ((int)(head - offset) < 0) - return false; - - return true; -} - -static void perf_output_wakeup(struct perf_output_handle *handle) -{ - atomic_set(&handle->data->poll, POLL_IN); - - if (handle->nmi) { - handle->counter->pending_wakeup = 1; - perf_pending_queue(&handle->counter->pending, - perf_pending_counter); - } else - perf_counter_wakeup(handle->counter); -} - -/* - * Curious locking construct. - * - * We need to ensure a later event doesn't publish a head when a former - * event isn't done writing. However since we need to deal with NMIs we - * cannot fully serialize things. - * - * What we do is serialize between CPUs so we only have to deal with NMI - * nesting on a single CPU. - * - * We only publish the head (and generate a wakeup) when the outer-most - * event completes. - */ -static void perf_output_lock(struct perf_output_handle *handle) -{ - struct perf_mmap_data *data = handle->data; - int cpu; - - handle->locked = 0; - - local_irq_save(handle->flags); - cpu = smp_processor_id(); - - if (in_nmi() && atomic_read(&data->lock) == cpu) - return; - - while (atomic_cmpxchg(&data->lock, -1, cpu) != -1) - cpu_relax(); - - handle->locked = 1; -} - -static void perf_output_unlock(struct perf_output_handle *handle) -{ - struct perf_mmap_data *data = handle->data; - unsigned long head; - int cpu; - - data->done_head = data->head; - - if (!handle->locked) - goto out; - -again: - /* - * The xchg implies a full barrier that ensures all writes are done - * before we publish the new head, matched by a rmb() in userspace when - * reading this position. - */ - while ((head = atomic_long_xchg(&data->done_head, 0))) - data->user_page->data_head = head; - - /* - * NMI can happen here, which means we can miss a done_head update. - */ - - cpu = atomic_xchg(&data->lock, -1); - WARN_ON_ONCE(cpu != smp_processor_id()); - - /* - * Therefore we have to validate we did not indeed do so. - */ - if (unlikely(atomic_long_read(&data->done_head))) { - /* - * Since we had it locked, we can lock it again. - */ - while (atomic_cmpxchg(&data->lock, -1, cpu) != -1) - cpu_relax(); - - goto again; - } - - if (atomic_xchg(&data->wakeup, 0)) - perf_output_wakeup(handle); -out: - local_irq_restore(handle->flags); -} - -void perf_output_copy(struct perf_output_handle *handle, - const void *buf, unsigned int len) -{ - unsigned int pages_mask; - unsigned int offset; - unsigned int size; - void **pages; - - offset = handle->offset; - pages_mask = handle->data->nr_pages - 1; - pages = handle->data->data_pages; - - do { - unsigned int page_offset; - int nr; - - nr = (offset >> PAGE_SHIFT) & pages_mask; - page_offset = offset & (PAGE_SIZE - 1); - size = min_t(unsigned int, PAGE_SIZE - page_offset, len); - - memcpy(pages[nr] + page_offset, buf, size); - - len -= size; - buf += size; - offset += size; - } while (len); - - handle->offset = offset; - - /* - * Check we didn't copy past our reservation window, taking the - * possible unsigned int wrap into account. - */ - WARN_ON_ONCE(((long)(handle->head - handle->offset)) < 0); -} - -int perf_output_begin(struct perf_output_handle *handle, - struct perf_counter *counter, unsigned int size, - int nmi, int sample) -{ - struct perf_counter *output_counter; - struct perf_mmap_data *data; - unsigned long tail, offset, head; - int have_lost; - struct { - struct perf_event_header header; - u64 id; - u64 lost; - } lost_event; - - rcu_read_lock(); - /* - * For inherited counters we send all the output towards the parent. - */ - if (counter->parent) - counter = counter->parent; - - output_counter = rcu_dereference(counter->output); - if (output_counter) - counter = output_counter; - - data = rcu_dereference(counter->data); - if (!data) - goto out; - - handle->data = data; - handle->counter = counter; - handle->nmi = nmi; - handle->sample = sample; - - if (!data->nr_pages) - goto fail; - - have_lost = atomic_read(&data->lost); - if (have_lost) - size += sizeof(lost_event); - - perf_output_lock(handle); - - do { - /* - * Userspace could choose to issue a mb() before updating the - * tail pointer. So that all reads will be completed before the - * write is issued. - */ - tail = ACCESS_ONCE(data->user_page->data_tail); - smp_rmb(); - offset = head = atomic_long_read(&data->head); - head += size; - if (unlikely(!perf_output_space(data, tail, offset, head))) - goto fail; - } while (atomic_long_cmpxchg(&data->head, offset, head) != offset); - - handle->offset = offset; - handle->head = head; - - if (head - tail > data->watermark) - atomic_set(&data->wakeup, 1); - - if (have_lost) { - lost_event.header.type = PERF_EVENT_LOST; - lost_event.header.misc = 0; - lost_event.header.size = sizeof(lost_event); - lost_event.id = counter->id; - lost_event.lost = atomic_xchg(&data->lost, 0); - - perf_output_put(handle, lost_event); - } - - return 0; - -fail: - atomic_inc(&data->lost); - perf_output_unlock(handle); -out: - rcu_read_unlock(); - - return -ENOSPC; -} - -void perf_output_end(struct perf_output_handle *handle) -{ - struct perf_counter *counter = handle->counter; - struct perf_mmap_data *data = handle->data; - - int wakeup_events = counter->attr.wakeup_events; - - if (handle->sample && wakeup_events) { - int events = atomic_inc_return(&data->events); - if (events >= wakeup_events) { - atomic_sub(wakeup_events, &data->events); - atomic_set(&data->wakeup, 1); - } - } - - perf_output_unlock(handle); - rcu_read_unlock(); -} - -static u32 perf_counter_pid(struct perf_counter *counter, struct task_struct *p) -{ - /* - * only top level counters have the pid namespace they were created in - */ - if (counter->parent) - counter = counter->parent; - - return task_tgid_nr_ns(p, counter->ns); -} - -static u32 perf_counter_tid(struct perf_counter *counter, struct task_struct *p) -{ - /* - * only top level counters have the pid namespace they were created in - */ - if (counter->parent) - counter = counter->parent; - - return task_pid_nr_ns(p, counter->ns); -} - -static void perf_output_read_one(struct perf_output_handle *handle, - struct perf_counter *counter) -{ - u64 read_format = counter->attr.read_format; - u64 values[4]; - int n = 0; - - values[n++] = atomic64_read(&counter->count); - if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) { - values[n++] = counter->total_time_enabled + - atomic64_read(&counter->child_total_time_enabled); - } - if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) { - values[n++] = counter->total_time_running + - atomic64_read(&counter->child_total_time_running); - } - if (read_format & PERF_FORMAT_ID) - values[n++] = primary_counter_id(counter); - - perf_output_copy(handle, values, n * sizeof(u64)); -} - -/* - * XXX PERF_FORMAT_GROUP vs inherited counters seems difficult. - */ -static void perf_output_read_group(struct perf_output_handle *handle, - struct perf_counter *counter) -{ - struct perf_counter *leader = counter->group_leader, *sub; - u64 read_format = counter->attr.read_format; - u64 values[5]; - int n = 0; - - values[n++] = 1 + leader->nr_siblings; - - if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) - values[n++] = leader->total_time_enabled; - - if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) - values[n++] = leader->total_time_running; - - if (leader != counter) - leader->pmu->read(leader); - - values[n++] = atomic64_read(&leader->count); - if (read_format & PERF_FORMAT_ID) - values[n++] = primary_counter_id(leader); - - perf_output_copy(handle, values, n * sizeof(u64)); - - list_for_each_entry(sub, &leader->sibling_list, group_entry) { - n = 0; - - if (sub != counter) - sub->pmu->read(sub); - - values[n++] = atomic64_read(&sub->count); - if (read_format & PERF_FORMAT_ID) - values[n++] = primary_counter_id(sub); - - perf_output_copy(handle, values, n * sizeof(u64)); - } -} - -static void perf_output_read(struct perf_output_handle *handle, - struct perf_counter *counter) -{ - if (counter->attr.read_format & PERF_FORMAT_GROUP) - perf_output_read_group(handle, counter); - else - perf_output_read_one(handle, counter); -} - -void perf_output_sample(struct perf_output_handle *handle, - struct perf_event_header *header, - struct perf_sample_data *data, - struct perf_counter *counter) -{ - u64 sample_type = data->type; - - perf_output_put(handle, *header); - - if (sample_type & PERF_SAMPLE_IP) - perf_output_put(handle, data->ip); - - if (sample_type & PERF_SAMPLE_TID) - perf_output_put(handle, data->tid_entry); - - if (sample_type & PERF_SAMPLE_TIME) - perf_output_put(handle, data->time); - - if (sample_type & PERF_SAMPLE_ADDR) - perf_output_put(handle, data->addr); - - if (sample_type & PERF_SAMPLE_ID) - perf_output_put(handle, data->id); - - if (sample_type & PERF_SAMPLE_STREAM_ID) - perf_output_put(handle, data->stream_id); - - if (sample_type & PERF_SAMPLE_CPU) - perf_output_put(handle, data->cpu_entry); - - if (sample_type & PERF_SAMPLE_PERIOD) - perf_output_put(handle, data->period); - - if (sample_type & PERF_SAMPLE_READ) - perf_output_read(handle, counter); - - if (sample_type & PERF_SAMPLE_CALLCHAIN) { - if (data->callchain) { - int size = 1; - - if (data->callchain) - size += data->callchain->nr; - - size *= sizeof(u64); - - perf_output_copy(handle, data->callchain, size); - } else { - u64 nr = 0; - perf_output_put(handle, nr); - } - } - - if (sample_type & PERF_SAMPLE_RAW) { - if (data->raw) { - perf_output_put(handle, data->raw->size); - perf_output_copy(handle, data->raw->data, - data->raw->size); - } else { - struct { - u32 size; - u32 data; - } raw = { - .size = sizeof(u32), - .data = 0, - }; - perf_output_put(handle, raw); - } - } -} - -void perf_prepare_sample(struct perf_event_header *header, - struct perf_sample_data *data, - struct perf_counter *counter, - struct pt_regs *regs) -{ - u64 sample_type = counter->attr.sample_type; - - data->type = sample_type; - - header->type = PERF_EVENT_SAMPLE; - header->size = sizeof(*header); - - header->misc = 0; - header->misc |= perf_misc_flags(regs); - - if (sample_type & PERF_SAMPLE_IP) { - data->ip = perf_instruction_pointer(regs); - - header->size += sizeof(data->ip); - } - - if (sample_type & PERF_SAMPLE_TID) { - /* namespace issues */ - data->tid_entry.pid = perf_counter_pid(counter, current); - data->tid_entry.tid = perf_counter_tid(counter, current); - - header->size += sizeof(data->tid_entry); - } - - if (sample_type & PERF_SAMPLE_TIME) { - data->time = perf_clock(); - - header->size += sizeof(data->time); - } - - if (sample_type & PERF_SAMPLE_ADDR) - header->size += sizeof(data->addr); - - if (sample_type & PERF_SAMPLE_ID) { - data->id = primary_counter_id(counter); - - header->size += sizeof(data->id); - } - - if (sample_type & PERF_SAMPLE_STREAM_ID) { - data->stream_id = counter->id; - - header->size += sizeof(data->stream_id); - } - - if (sample_type & PERF_SAMPLE_CPU) { - data->cpu_entry.cpu = raw_smp_processor_id(); - data->cpu_entry.reserved = 0; - - header->size += sizeof(data->cpu_entry); - } - - if (sample_type & PERF_SAMPLE_PERIOD) - header->size += sizeof(data->period); - - if (sample_type & PERF_SAMPLE_READ) - header->size += perf_counter_read_size(counter); - - if (sample_type & PERF_SAMPLE_CALLCHAIN) { - int size = 1; - - data->callchain = perf_callchain(regs); - - if (data->callchain) - size += data->callchain->nr; - - header->size += size * sizeof(u64); - } - - if (sample_type & PERF_SAMPLE_RAW) { - int size = sizeof(u32); - - if (data->raw) - size += data->raw->size; - else - size += sizeof(u32); - - WARN_ON_ONCE(size & (sizeof(u64)-1)); - header->size += size; - } -} - -static void perf_counter_output(struct perf_counter *counter, int nmi, - struct perf_sample_data *data, - struct pt_regs *regs) -{ - struct perf_output_handle handle; - struct perf_event_header header; - - perf_prepare_sample(&header, data, counter, regs); - - if (perf_output_begin(&handle, counter, header.size, nmi, 1)) - return; - - perf_output_sample(&handle, &header, data, counter); - - perf_output_end(&handle); -} - -/* - * read event - */ - -struct perf_read_event { - struct perf_event_header header; - - u32 pid; - u32 tid; -}; - -static void -perf_counter_read_event(struct perf_counter *counter, - struct task_struct *task) -{ - struct perf_output_handle handle; - struct perf_read_event read_event = { - .header = { - .type = PERF_EVENT_READ, - .misc = 0, - .size = sizeof(read_event) + perf_counter_read_size(counter), - }, - .pid = perf_counter_pid(counter, task), - .tid = perf_counter_tid(counter, task), - }; - int ret; - - ret = perf_output_begin(&handle, counter, read_event.header.size, 0, 0); - if (ret) - return; - - perf_output_put(&handle, read_event); - perf_output_read(&handle, counter); - - perf_output_end(&handle); -} - -/* - * task tracking -- fork/exit - * - * enabled by: attr.comm | attr.mmap | attr.task - */ - -struct perf_task_event { - struct task_struct *task; - struct perf_counter_context *task_ctx; - - struct { - struct perf_event_header header; - - u32 pid; - u32 ppid; - u32 tid; - u32 ptid; - u64 time; - } event; -}; - -static void perf_counter_task_output(struct perf_counter *counter, - struct perf_task_event *task_event) -{ - struct perf_output_handle handle; - int size; - struct task_struct *task = task_event->task; - int ret; - - size = task_event->event.header.size; - ret = perf_output_begin(&handle, counter, size, 0, 0); - - if (ret) - return; - - task_event->event.pid = perf_counter_pid(counter, task); - task_event->event.ppid = perf_counter_pid(counter, current); - - task_event->event.tid = perf_counter_tid(counter, task); - task_event->event.ptid = perf_counter_tid(counter, current); - - task_event->event.time = perf_clock(); - - perf_output_put(&handle, task_event->event); - - perf_output_end(&handle); -} - -static int perf_counter_task_match(struct perf_counter *counter) -{ - if (counter->attr.comm || counter->attr.mmap || counter->attr.task) - return 1; - - return 0; -} - -static void perf_counter_task_ctx(struct perf_counter_context *ctx, - struct perf_task_event *task_event) -{ - struct perf_counter *counter; - - if (system_state != SYSTEM_RUNNING || list_empty(&ctx->event_list)) - return; - - rcu_read_lock(); - list_for_each_entry_rcu(counter, &ctx->event_list, event_entry) { - if (perf_counter_task_match(counter)) - perf_counter_task_output(counter, task_event); - } - rcu_read_unlock(); -} - -static void perf_counter_task_event(struct perf_task_event *task_event) -{ - struct perf_cpu_context *cpuctx; - struct perf_counter_context *ctx = task_event->task_ctx; - - cpuctx = &get_cpu_var(perf_cpu_context); - perf_counter_task_ctx(&cpuctx->ctx, task_event); - put_cpu_var(perf_cpu_context); - - rcu_read_lock(); - if (!ctx) - ctx = rcu_dereference(task_event->task->perf_counter_ctxp); - if (ctx) - perf_counter_task_ctx(ctx, task_event); - rcu_read_unlock(); -} - -static void perf_counter_task(struct task_struct *task, - struct perf_counter_context *task_ctx, - int new) -{ - struct perf_task_event task_event; - - if (!atomic_read(&nr_comm_counters) && - !atomic_read(&nr_mmap_counters) && - !atomic_read(&nr_task_counters)) - return; - - task_event = (struct perf_task_event){ - .task = task, - .task_ctx = task_ctx, - .event = { - .header = { - .type = new ? PERF_EVENT_FORK : PERF_EVENT_EXIT, - .misc = 0, - .size = sizeof(task_event.event), - }, - /* .pid */ - /* .ppid */ - /* .tid */ - /* .ptid */ - }, - }; - - perf_counter_task_event(&task_event); -} - -void perf_counter_fork(struct task_struct *task) -{ - perf_counter_task(task, NULL, 1); -} - -/* - * comm tracking - */ - -struct perf_comm_event { - struct task_struct *task; - char *comm; - int comm_size; - - struct { - struct perf_event_header header; - - u32 pid; - u32 tid; - } event; -}; - -static void perf_counter_comm_output(struct perf_counter *counter, - struct perf_comm_event *comm_event) -{ - struct perf_output_handle handle; - int size = comm_event->event.header.size; - int ret = perf_output_begin(&handle, counter, size, 0, 0); - - if (ret) - return; - - comm_event->event.pid = perf_counter_pid(counter, comm_event->task); - comm_event->event.tid = perf_counter_tid(counter, comm_event->task); - - perf_output_put(&handle, comm_event->event); - perf_output_copy(&handle, comm_event->comm, - comm_event->comm_size); - perf_output_end(&handle); -} - -static int perf_counter_comm_match(struct perf_counter *counter) -{ - if (counter->attr.comm) - return 1; - - return 0; -} - -static void perf_counter_comm_ctx(struct perf_counter_context *ctx, - struct perf_comm_event *comm_event) -{ - struct perf_counter *counter; - - if (system_state != SYSTEM_RUNNING || list_empty(&ctx->event_list)) - return; - - rcu_read_lock(); - list_for_each_entry_rcu(counter, &ctx->event_list, event_entry) { - if (perf_counter_comm_match(counter)) - perf_counter_comm_output(counter, comm_event); - } - rcu_read_unlock(); -} - -static void perf_counter_comm_event(struct perf_comm_event *comm_event) -{ - struct perf_cpu_context *cpuctx; - struct perf_counter_context *ctx; - unsigned int size; - char comm[TASK_COMM_LEN]; - - memset(comm, 0, sizeof(comm)); - strncpy(comm, comm_event->task->comm, sizeof(comm)); - size = ALIGN(strlen(comm)+1, sizeof(u64)); - - comm_event->comm = comm; - comm_event->comm_size = size; - - comm_event->event.header.size = sizeof(comm_event->event) + size; - - cpuctx = &get_cpu_var(perf_cpu_context); - perf_counter_comm_ctx(&cpuctx->ctx, comm_event); - put_cpu_var(perf_cpu_context); - - rcu_read_lock(); - /* - * doesn't really matter which of the child contexts the - * events ends up in. - */ - ctx = rcu_dereference(current->perf_counter_ctxp); - if (ctx) - perf_counter_comm_ctx(ctx, comm_event); - rcu_read_unlock(); -} - -void perf_counter_comm(struct task_struct *task) -{ - struct perf_comm_event comm_event; - - if (task->perf_counter_ctxp) - perf_counter_enable_on_exec(task); - - if (!atomic_read(&nr_comm_counters)) - return; - - comm_event = (struct perf_comm_event){ - .task = task, - /* .comm */ - /* .comm_size */ - .event = { - .header = { - .type = PERF_EVENT_COMM, - .misc = 0, - /* .size */ - }, - /* .pid */ - /* .tid */ - }, - }; - - perf_counter_comm_event(&comm_event); -} - -/* - * mmap tracking - */ - -struct perf_mmap_event { - struct vm_area_struct *vma; - - const char *file_name; - int file_size; - - struct { - struct perf_event_header header; - - u32 pid; - u32 tid; - u64 start; - u64 len; - u64 pgoff; - } event; -}; - -static void perf_counter_mmap_output(struct perf_counter *counter, - struct perf_mmap_event *mmap_event) -{ - struct perf_output_handle handle; - int size = mmap_event->event.header.size; - int ret = perf_output_begin(&handle, counter, size, 0, 0); - - if (ret) - return; - - mmap_event->event.pid = perf_counter_pid(counter, current); - mmap_event->event.tid = perf_counter_tid(counter, current); - - perf_output_put(&handle, mmap_event->event); - perf_output_copy(&handle, mmap_event->file_name, - mmap_event->file_size); - perf_output_end(&handle); -} - -static int perf_counter_mmap_match(struct perf_counter *counter, - struct perf_mmap_event *mmap_event) -{ - if (counter->attr.mmap) - return 1; - - return 0; -} - -static void perf_counter_mmap_ctx(struct perf_counter_context *ctx, - struct perf_mmap_event *mmap_event) -{ - struct perf_counter *counter; - - if (system_state != SYSTEM_RUNNING || list_empty(&ctx->event_list)) - return; - - rcu_read_lock(); - list_for_each_entry_rcu(counter, &ctx->event_list, event_entry) { - if (perf_counter_mmap_match(counter, mmap_event)) - perf_counter_mmap_output(counter, mmap_event); - } - rcu_read_unlock(); -} - -static void perf_counter_mmap_event(struct perf_mmap_event *mmap_event) -{ - struct perf_cpu_context *cpuctx; - struct perf_counter_context *ctx; - struct vm_area_struct *vma = mmap_event->vma; - struct file *file = vma->vm_file; - unsigned int size; - char tmp[16]; - char *buf = NULL; - const char *name; - - memset(tmp, 0, sizeof(tmp)); - - if (file) { - /* - * d_path works from the end of the buffer backwards, so we - * need to add enough zero bytes after the string to handle - * the 64bit alignment we do later. - */ - buf = kzalloc(PATH_MAX + sizeof(u64), GFP_KERNEL); - if (!buf) { - name = strncpy(tmp, "//enomem", sizeof(tmp)); - goto got_name; - } - name = d_path(&file->f_path, buf, PATH_MAX); - if (IS_ERR(name)) { - name = strncpy(tmp, "//toolong", sizeof(tmp)); - goto got_name; - } - } else { - if (arch_vma_name(mmap_event->vma)) { - name = strncpy(tmp, arch_vma_name(mmap_event->vma), - sizeof(tmp)); - goto got_name; - } - - if (!vma->vm_mm) { - name = strncpy(tmp, "[vdso]", sizeof(tmp)); - goto got_name; - } - - name = strncpy(tmp, "//anon", sizeof(tmp)); - goto got_name; - } - -got_name: - size = ALIGN(strlen(name)+1, sizeof(u64)); - - mmap_event->file_name = name; - mmap_event->file_size = size; - - mmap_event->event.header.size = sizeof(mmap_event->event) + size; - - cpuctx = &get_cpu_var(perf_cpu_context); - perf_counter_mmap_ctx(&cpuctx->ctx, mmap_event); - put_cpu_var(perf_cpu_context); - - rcu_read_lock(); - /* - * doesn't really matter which of the child contexts the - * events ends up in. - */ - ctx = rcu_dereference(current->perf_counter_ctxp); - if (ctx) - perf_counter_mmap_ctx(ctx, mmap_event); - rcu_read_unlock(); - - kfree(buf); -} - -void __perf_counter_mmap(struct vm_area_struct *vma) -{ - struct perf_mmap_event mmap_event; - - if (!atomic_read(&nr_mmap_counters)) - return; - - mmap_event = (struct perf_mmap_event){ - .vma = vma, - /* .file_name */ - /* .file_size */ - .event = { - .header = { - .type = PERF_EVENT_MMAP, - .misc = 0, - /* .size */ - }, - /* .pid */ - /* .tid */ - .start = vma->vm_start, - .len = vma->vm_end - vma->vm_start, - .pgoff = vma->vm_pgoff, - }, - }; - - perf_counter_mmap_event(&mmap_event); -} - -/* - * IRQ throttle logging - */ - -static void perf_log_throttle(struct perf_counter *counter, int enable) -{ - struct perf_output_handle handle; - int ret; - - struct { - struct perf_event_header header; - u64 time; - u64 id; - u64 stream_id; - } throttle_event = { - .header = { - .type = PERF_EVENT_THROTTLE, - .misc = 0, - .size = sizeof(throttle_event), - }, - .time = perf_clock(), - .id = primary_counter_id(counter), - .stream_id = counter->id, - }; - - if (enable) - throttle_event.header.type = PERF_EVENT_UNTHROTTLE; - - ret = perf_output_begin(&handle, counter, sizeof(throttle_event), 1, 0); - if (ret) - return; - - perf_output_put(&handle, throttle_event); - perf_output_end(&handle); -} - -/* - * Generic counter overflow handling, sampling. - */ - -static int __perf_counter_overflow(struct perf_counter *counter, int nmi, - int throttle, struct perf_sample_data *data, - struct pt_regs *regs) -{ - int events = atomic_read(&counter->event_limit); - struct hw_perf_counter *hwc = &counter->hw; - int ret = 0; - - throttle = (throttle && counter->pmu->unthrottle != NULL); - - if (!throttle) { - hwc->interrupts++; - } else { - if (hwc->interrupts != MAX_INTERRUPTS) { - hwc->interrupts++; - if (HZ * hwc->interrupts > - (u64)sysctl_perf_counter_sample_rate) { - hwc->interrupts = MAX_INTERRUPTS; - perf_log_throttle(counter, 0); - ret = 1; - } - } else { - /* - * Keep re-disabling counters even though on the previous - * pass we disabled it - just in case we raced with a - * sched-in and the counter got enabled again: - */ - ret = 1; - } - } - - if (counter->attr.freq) { - u64 now = perf_clock(); - s64 delta = now - hwc->freq_stamp; - - hwc->freq_stamp = now; - - if (delta > 0 && delta < TICK_NSEC) - perf_adjust_period(counter, NSEC_PER_SEC / (int)delta); - } - - /* - * XXX event_limit might not quite work as expected on inherited - * counters - */ - - counter->pending_kill = POLL_IN; - if (events && atomic_dec_and_test(&counter->event_limit)) { - ret = 1; - counter->pending_kill = POLL_HUP; - if (nmi) { - counter->pending_disable = 1; - perf_pending_queue(&counter->pending, - perf_pending_counter); - } else - perf_counter_disable(counter); - } - - perf_counter_output(counter, nmi, data, regs); - return ret; -} - -int perf_counter_overflow(struct perf_counter *counter, int nmi, - struct perf_sample_data *data, - struct pt_regs *regs) -{ - return __perf_counter_overflow(counter, nmi, 1, data, regs); -} - -/* - * Generic software counter infrastructure - */ - -/* - * We directly increment counter->count and keep a second value in - * counter->hw.period_left to count intervals. This period counter - * is kept in the range [-sample_period, 0] so that we can use the - * sign as trigger. - */ - -static u64 perf_swcounter_set_period(struct perf_counter *counter) -{ - struct hw_perf_counter *hwc = &counter->hw; - u64 period = hwc->last_period; - u64 nr, offset; - s64 old, val; - - hwc->last_period = hwc->sample_period; - -again: - old = val = atomic64_read(&hwc->period_left); - if (val < 0) - return 0; - - nr = div64_u64(period + val, period); - offset = nr * period; - val -= offset; - if (atomic64_cmpxchg(&hwc->period_left, old, val) != old) - goto again; - - return nr; -} - -static void perf_swcounter_overflow(struct perf_counter *counter, - int nmi, struct perf_sample_data *data, - struct pt_regs *regs) -{ - struct hw_perf_counter *hwc = &counter->hw; - int throttle = 0; - u64 overflow; - - data->period = counter->hw.last_period; - overflow = perf_swcounter_set_period(counter); - - if (hwc->interrupts == MAX_INTERRUPTS) - return; - - for (; overflow; overflow--) { - if (__perf_counter_overflow(counter, nmi, throttle, - data, regs)) { - /* - * We inhibit the overflow from happening when - * hwc->interrupts == MAX_INTERRUPTS. - */ - break; - } - throttle = 1; - } -} - -static void perf_swcounter_unthrottle(struct perf_counter *counter) -{ - /* - * Nothing to do, we already reset hwc->interrupts. - */ -} - -static void perf_swcounter_add(struct perf_counter *counter, u64 nr, - int nmi, struct perf_sample_data *data, - struct pt_regs *regs) -{ - struct hw_perf_counter *hwc = &counter->hw; - - atomic64_add(nr, &counter->count); - - if (!hwc->sample_period) - return; - - if (!regs) - return; - - if (!atomic64_add_negative(nr, &hwc->period_left)) - perf_swcounter_overflow(counter, nmi, data, regs); -} - -static int perf_swcounter_is_counting(struct perf_counter *counter) -{ - /* - * The counter is active, we're good! - */ - if (counter->state == PERF_COUNTER_STATE_ACTIVE) - return 1; - - /* - * The counter is off/error, not counting. - */ - if (counter->state != PERF_COUNTER_STATE_INACTIVE) - return 0; - - /* - * The counter is inactive, if the context is active - * we're part of a group that didn't make it on the 'pmu', - * not counting. - */ - if (counter->ctx->is_active) - return 0; - - /* - * We're inactive and the context is too, this means the - * task is scheduled out, we're counting events that happen - * to us, like migration events. - */ - return 1; -} - -static int perf_swcounter_match(struct perf_counter *counter, - enum perf_type_id type, - u32 event_id, struct pt_regs *regs) -{ - if (!perf_swcounter_is_counting(counter)) - return 0; - - if (counter->attr.type != type) - return 0; - if (counter->attr.config != event_id) - return 0; - - if (regs) { - if (counter->attr.exclude_user && user_mode(regs)) - return 0; - - if (counter->attr.exclude_kernel && !user_mode(regs)) - return 0; - } - - return 1; -} - -static void perf_swcounter_ctx_event(struct perf_counter_context *ctx, - enum perf_type_id type, - u32 event_id, u64 nr, int nmi, - struct perf_sample_data *data, - struct pt_regs *regs) -{ - struct perf_counter *counter; - - if (system_state != SYSTEM_RUNNING || list_empty(&ctx->event_list)) - return; - - rcu_read_lock(); - list_for_each_entry_rcu(counter, &ctx->event_list, event_entry) { - if (perf_swcounter_match(counter, type, event_id, regs)) - perf_swcounter_add(counter, nr, nmi, data, regs); - } - rcu_read_unlock(); -} - -static int *perf_swcounter_recursion_context(struct perf_cpu_context *cpuctx) -{ - if (in_nmi()) - return &cpuctx->recursion[3]; - - if (in_irq()) - return &cpuctx->recursion[2]; - - if (in_softirq()) - return &cpuctx->recursion[1]; - - return &cpuctx->recursion[0]; -} - -static void do_perf_swcounter_event(enum perf_type_id type, u32 event, - u64 nr, int nmi, - struct perf_sample_data *data, - struct pt_regs *regs) -{ - struct perf_cpu_context *cpuctx = &get_cpu_var(perf_cpu_context); - int *recursion = perf_swcounter_recursion_context(cpuctx); - struct perf_counter_context *ctx; - - if (*recursion) - goto out; - - (*recursion)++; - barrier(); - - perf_swcounter_ctx_event(&cpuctx->ctx, type, event, - nr, nmi, data, regs); - rcu_read_lock(); - /* - * doesn't really matter which of the child contexts the - * events ends up in. - */ - ctx = rcu_dereference(current->perf_counter_ctxp); - if (ctx) - perf_swcounter_ctx_event(ctx, type, event, nr, nmi, data, regs); - rcu_read_unlock(); - - barrier(); - (*recursion)--; - -out: - put_cpu_var(perf_cpu_context); -} - -void __perf_swcounter_event(u32 event, u64 nr, int nmi, - struct pt_regs *regs, u64 addr) -{ - struct perf_sample_data data = { - .addr = addr, - }; - - do_perf_swcounter_event(PERF_TYPE_SOFTWARE, event, nr, nmi, - &data, regs); -} - -static void perf_swcounter_read(struct perf_counter *counter) -{ -} - -static int perf_swcounter_enable(struct perf_counter *counter) -{ - struct hw_perf_counter *hwc = &counter->hw; - - if (hwc->sample_period) { - hwc->last_period = hwc->sample_period; - perf_swcounter_set_period(counter); - } - return 0; -} - -static void perf_swcounter_disable(struct perf_counter *counter) -{ -} - -static const struct pmu perf_ops_generic = { - .enable = perf_swcounter_enable, - .disable = perf_swcounter_disable, - .read = perf_swcounter_read, - .unthrottle = perf_swcounter_unthrottle, -}; - -/* - * hrtimer based swcounter callback - */ - -static enum hrtimer_restart perf_swcounter_hrtimer(struct hrtimer *hrtimer) -{ - enum hrtimer_restart ret = HRTIMER_RESTART; - struct perf_sample_data data; - struct pt_regs *regs; - struct perf_counter *counter; - u64 period; - - counter = container_of(hrtimer, struct perf_counter, hw.hrtimer); - counter->pmu->read(counter); - - data.addr = 0; - regs = get_irq_regs(); - /* - * In case we exclude kernel IPs or are somehow not in interrupt - * context, provide the next best thing, the user IP. - */ - if ((counter->attr.exclude_kernel || !regs) && - !counter->attr.exclude_user) - regs = task_pt_regs(current); - - if (regs) { - if (perf_counter_overflow(counter, 0, &data, regs)) - ret = HRTIMER_NORESTART; - } - - period = max_t(u64, 10000, counter->hw.sample_period); - hrtimer_forward_now(hrtimer, ns_to_ktime(period)); - - return ret; -} - -/* - * Software counter: cpu wall time clock - */ - -static void cpu_clock_perf_counter_update(struct perf_counter *counter) -{ - int cpu = raw_smp_processor_id(); - s64 prev; - u64 now; - - now = cpu_clock(cpu); - prev = atomic64_read(&counter->hw.prev_count); - atomic64_set(&counter->hw.prev_count, now); - atomic64_add(now - prev, &counter->count); -} - -static int cpu_clock_perf_counter_enable(struct perf_counter *counter) -{ - struct hw_perf_counter *hwc = &counter->hw; - int cpu = raw_smp_processor_id(); - - atomic64_set(&hwc->prev_count, cpu_clock(cpu)); - hrtimer_init(&hwc->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); - hwc->hrtimer.function = perf_swcounter_hrtimer; - if (hwc->sample_period) { - u64 period = max_t(u64, 10000, hwc->sample_period); - __hrtimer_start_range_ns(&hwc->hrtimer, - ns_to_ktime(period), 0, - HRTIMER_MODE_REL, 0); - } - - return 0; -} - -static void cpu_clock_perf_counter_disable(struct perf_counter *counter) -{ - if (counter->hw.sample_period) - hrtimer_cancel(&counter->hw.hrtimer); - cpu_clock_perf_counter_update(counter); -} - -static void cpu_clock_perf_counter_read(struct perf_counter *counter) -{ - cpu_clock_perf_counter_update(counter); -} - -static const struct pmu perf_ops_cpu_clock = { - .enable = cpu_clock_perf_counter_enable, - .disable = cpu_clock_perf_counter_disable, - .read = cpu_clock_perf_counter_read, -}; - -/* - * Software counter: task time clock - */ - -static void task_clock_perf_counter_update(struct perf_counter *counter, u64 now) -{ - u64 prev; - s64 delta; - - prev = atomic64_xchg(&counter->hw.prev_count, now); - delta = now - prev; - atomic64_add(delta, &counter->count); -} - -static int task_clock_perf_counter_enable(struct perf_counter *counter) -{ - struct hw_perf_counter *hwc = &counter->hw; - u64 now; - - now = counter->ctx->time; - - atomic64_set(&hwc->prev_count, now); - hrtimer_init(&hwc->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); - hwc->hrtimer.function = perf_swcounter_hrtimer; - if (hwc->sample_period) { - u64 period = max_t(u64, 10000, hwc->sample_period); - __hrtimer_start_range_ns(&hwc->hrtimer, - ns_to_ktime(period), 0, - HRTIMER_MODE_REL, 0); - } - - return 0; -} - -static void task_clock_perf_counter_disable(struct perf_counter *counter) -{ - if (counter->hw.sample_period) - hrtimer_cancel(&counter->hw.hrtimer); - task_clock_perf_counter_update(counter, counter->ctx->time); - -} - -static void task_clock_perf_counter_read(struct perf_counter *counter) -{ - u64 time; - - if (!in_nmi()) { - update_context_time(counter->ctx); - time = counter->ctx->time; - } else { - u64 now = perf_clock(); - u64 delta = now - counter->ctx->timestamp; - time = counter->ctx->time + delta; - } - - task_clock_perf_counter_update(counter, time); -} - -static const struct pmu perf_ops_task_clock = { - .enable = task_clock_perf_counter_enable, - .disable = task_clock_perf_counter_disable, - .read = task_clock_perf_counter_read, -}; - -#ifdef CONFIG_EVENT_PROFILE -void perf_tpcounter_event(int event_id, u64 addr, u64 count, void *record, - int entry_size) -{ - struct perf_raw_record raw = { - .size = entry_size, - .data = record, - }; - - struct perf_sample_data data = { - .addr = addr, - .raw = &raw, - }; - - struct pt_regs *regs = get_irq_regs(); - - if (!regs) - regs = task_pt_regs(current); - - do_perf_swcounter_event(PERF_TYPE_TRACEPOINT, event_id, count, 1, - &data, regs); -} -EXPORT_SYMBOL_GPL(perf_tpcounter_event); - -extern int ftrace_profile_enable(int); -extern void ftrace_profile_disable(int); - -static void tp_perf_counter_destroy(struct perf_counter *counter) -{ - ftrace_profile_disable(counter->attr.config); -} - -static const struct pmu *tp_perf_counter_init(struct perf_counter *counter) -{ - /* - * Raw tracepoint data is a severe data leak, only allow root to - * have these. - */ - if ((counter->attr.sample_type & PERF_SAMPLE_RAW) && - perf_paranoid_tracepoint_raw() && - !capable(CAP_SYS_ADMIN)) - return ERR_PTR(-EPERM); - - if (ftrace_profile_enable(counter->attr.config)) - return NULL; - - counter->destroy = tp_perf_counter_destroy; - - return &perf_ops_generic; -} -#else -static const struct pmu *tp_perf_counter_init(struct perf_counter *counter) -{ - return NULL; -} -#endif - -atomic_t perf_swcounter_enabled[PERF_COUNT_SW_MAX]; - -static void sw_perf_counter_destroy(struct perf_counter *counter) -{ - u64 event_id = counter->attr.config; - - WARN_ON(counter->parent); - - atomic_dec(&perf_swcounter_enabled[event_id]); -} - -static const struct pmu *sw_perf_counter_init(struct perf_counter *counter) -{ - const struct pmu *pmu = NULL; - u64 event_id = counter->attr.config; - - /* - * Software counters (currently) can't in general distinguish - * between user, kernel and hypervisor events. - * However, context switches and cpu migrations are considered - * to be kernel events, and page faults are never hypervisor - * events. - */ - switch (event_id) { - case PERF_COUNT_SW_CPU_CLOCK: - pmu = &perf_ops_cpu_clock; - - break; - case PERF_COUNT_SW_TASK_CLOCK: - /* - * If the user instantiates this as a per-cpu counter, - * use the cpu_clock counter instead. - */ - if (counter->ctx->task) - pmu = &perf_ops_task_clock; - else - pmu = &perf_ops_cpu_clock; - - break; - case PERF_COUNT_SW_PAGE_FAULTS: - case PERF_COUNT_SW_PAGE_FAULTS_MIN: - case PERF_COUNT_SW_PAGE_FAULTS_MAJ: - case PERF_COUNT_SW_CONTEXT_SWITCHES: - case PERF_COUNT_SW_CPU_MIGRATIONS: - if (!counter->parent) { - atomic_inc(&perf_swcounter_enabled[event_id]); - counter->destroy = sw_perf_counter_destroy; - } - pmu = &perf_ops_generic; - break; - } - - return pmu; -} - -/* - * Allocate and initialize a counter structure - */ -static struct perf_counter * -perf_counter_alloc(struct perf_counter_attr *attr, - int cpu, - struct perf_counter_context *ctx, - struct perf_counter *group_leader, - struct perf_counter *parent_counter, - gfp_t gfpflags) -{ - const struct pmu *pmu; - struct perf_counter *counter; - struct hw_perf_counter *hwc; - long err; - - counter = kzalloc(sizeof(*counter), gfpflags); - if (!counter) - return ERR_PTR(-ENOMEM); - - /* - * Single counters are their own group leaders, with an - * empty sibling list: - */ - if (!group_leader) - group_leader = counter; - - mutex_init(&counter->child_mutex); - INIT_LIST_HEAD(&counter->child_list); - - INIT_LIST_HEAD(&counter->group_entry); - INIT_LIST_HEAD(&counter->event_entry); - INIT_LIST_HEAD(&counter->sibling_list); - init_waitqueue_head(&counter->waitq); - - mutex_init(&counter->mmap_mutex); - - counter->cpu = cpu; - counter->attr = *attr; - counter->group_leader = group_leader; - counter->pmu = NULL; - counter->ctx = ctx; - counter->oncpu = -1; - - counter->parent = parent_counter; - - counter->ns = get_pid_ns(current->nsproxy->pid_ns); - counter->id = atomic64_inc_return(&perf_counter_id); - - counter->state = PERF_COUNTER_STATE_INACTIVE; - - if (attr->disabled) - counter->state = PERF_COUNTER_STATE_OFF; - - pmu = NULL; - - hwc = &counter->hw; - hwc->sample_period = attr->sample_period; - if (attr->freq && attr->sample_freq) - hwc->sample_period = 1; - hwc->last_period = hwc->sample_period; - - atomic64_set(&hwc->period_left, hwc->sample_period); - - /* - * we currently do not support PERF_FORMAT_GROUP on inherited counters - */ - if (attr->inherit && (attr->read_format & PERF_FORMAT_GROUP)) - goto done; - - switch (attr->type) { - case PERF_TYPE_RAW: - case PERF_TYPE_HARDWARE: - case PERF_TYPE_HW_CACHE: - pmu = hw_perf_counter_init(counter); - break; - - case PERF_TYPE_SOFTWARE: - pmu = sw_perf_counter_init(counter); - break; - - case PERF_TYPE_TRACEPOINT: - pmu = tp_perf_counter_init(counter); - break; - - default: - break; - } -done: - err = 0; - if (!pmu) - err = -EINVAL; - else if (IS_ERR(pmu)) - err = PTR_ERR(pmu); - - if (err) { - if (counter->ns) - put_pid_ns(counter->ns); - kfree(counter); - return ERR_PTR(err); - } - - counter->pmu = pmu; - - if (!counter->parent) { - atomic_inc(&nr_counters); - if (counter->attr.mmap) - atomic_inc(&nr_mmap_counters); - if (counter->attr.comm) - atomic_inc(&nr_comm_counters); - if (counter->attr.task) - atomic_inc(&nr_task_counters); - } - - return counter; -} - -static int perf_copy_attr(struct perf_counter_attr __user *uattr, - struct perf_counter_attr *attr) -{ - u32 size; - int ret; - - if (!access_ok(VERIFY_WRITE, uattr, PERF_ATTR_SIZE_VER0)) - return -EFAULT; - - /* - * zero the full structure, so that a short copy will be nice. - */ - memset(attr, 0, sizeof(*attr)); - - ret = get_user(size, &uattr->size); - if (ret) - return ret; - - if (size > PAGE_SIZE) /* silly large */ - goto err_size; - - if (!size) /* abi compat */ - size = PERF_ATTR_SIZE_VER0; - - if (size < PERF_ATTR_SIZE_VER0) - goto err_size; - - /* - * If we're handed a bigger struct than we know of, - * ensure all the unknown bits are 0 - i.e. new - * user-space does not rely on any kernel feature - * extensions we dont know about yet. - */ - if (size > sizeof(*attr)) { - unsigned char __user *addr; - unsigned char __user *end; - unsigned char val; - - addr = (void __user *)uattr + sizeof(*attr); - end = (void __user *)uattr + size; - - for (; addr < end; addr++) { - ret = get_user(val, addr); - if (ret) - return ret; - if (val) - goto err_size; - } - size = sizeof(*attr); - } - - ret = copy_from_user(attr, uattr, size); - if (ret) - return -EFAULT; - - /* - * If the type exists, the corresponding creation will verify - * the attr->config. - */ - if (attr->type >= PERF_TYPE_MAX) - return -EINVAL; - - if (attr->__reserved_1 || attr->__reserved_2 || attr->__reserved_3) - return -EINVAL; - - if (attr->sample_type & ~(PERF_SAMPLE_MAX-1)) - return -EINVAL; - - if (attr->read_format & ~(PERF_FORMAT_MAX-1)) - return -EINVAL; - -out: - return ret; - -err_size: - put_user(sizeof(*attr), &uattr->size); - ret = -E2BIG; - goto out; -} - -int perf_counter_set_output(struct perf_counter *counter, int output_fd) -{ - struct perf_counter *output_counter = NULL; - struct file *output_file = NULL; - struct perf_counter *old_output; - int fput_needed = 0; - int ret = -EINVAL; - - if (!output_fd) - goto set; - - output_file = fget_light(output_fd, &fput_needed); - if (!output_file) - return -EBADF; - - if (output_file->f_op != &perf_fops) - goto out; - - output_counter = output_file->private_data; - - /* Don't chain output fds */ - if (output_counter->output) - goto out; - - /* Don't set an output fd when we already have an output channel */ - if (counter->data) - goto out; - - atomic_long_inc(&output_file->f_count); - -set: - mutex_lock(&counter->mmap_mutex); - old_output = counter->output; - rcu_assign_pointer(counter->output, output_counter); - mutex_unlock(&counter->mmap_mutex); - - if (old_output) { - /* - * we need to make sure no existing perf_output_*() - * is still referencing this counter. - */ - synchronize_rcu(); - fput(old_output->filp); - } - - ret = 0; -out: - fput_light(output_file, fput_needed); - return ret; -} - -/** - * sys_perf_counter_open - open a performance counter, associate it to a task/cpu - * - * @attr_uptr: event type attributes for monitoring/sampling - * @pid: target pid - * @cpu: target cpu - * @group_fd: group leader counter fd - */ -SYSCALL_DEFINE5(perf_counter_open, - struct perf_counter_attr __user *, attr_uptr, - pid_t, pid, int, cpu, int, group_fd, unsigned long, flags) -{ - struct perf_counter *counter, *group_leader; - struct perf_counter_attr attr; - struct perf_counter_context *ctx; - struct file *counter_file = NULL; - struct file *group_file = NULL; - int fput_needed = 0; - int fput_needed2 = 0; - int err; - - /* for future expandability... */ - if (flags & ~(PERF_FLAG_FD_NO_GROUP | PERF_FLAG_FD_OUTPUT)) - return -EINVAL; - - err = perf_copy_attr(attr_uptr, &attr); - if (err) - return err; - - if (!attr.exclude_kernel) { - if (perf_paranoid_kernel() && !capable(CAP_SYS_ADMIN)) - return -EACCES; - } - - if (attr.freq) { - if (attr.sample_freq > sysctl_perf_counter_sample_rate) - return -EINVAL; - } - - /* - * Get the target context (task or percpu): - */ - ctx = find_get_context(pid, cpu); - if (IS_ERR(ctx)) - return PTR_ERR(ctx); - - /* - * Look up the group leader (we will attach this counter to it): - */ - group_leader = NULL; - if (group_fd != -1 && !(flags & PERF_FLAG_FD_NO_GROUP)) { - err = -EINVAL; - group_file = fget_light(group_fd, &fput_needed); - if (!group_file) - goto err_put_context; - if (group_file->f_op != &perf_fops) - goto err_put_context; - - group_leader = group_file->private_data; - /* - * Do not allow a recursive hierarchy (this new sibling - * becoming part of another group-sibling): - */ - if (group_leader->group_leader != group_leader) - goto err_put_context; - /* - * Do not allow to attach to a group in a different - * task or CPU context: - */ - if (group_leader->ctx != ctx) - goto err_put_context; - /* - * Only a group leader can be exclusive or pinned - */ - if (attr.exclusive || attr.pinned) - goto err_put_context; - } - - counter = perf_counter_alloc(&attr, cpu, ctx, group_leader, - NULL, GFP_KERNEL); - err = PTR_ERR(counter); - if (IS_ERR(counter)) - goto err_put_context; - - err = anon_inode_getfd("[perf_counter]", &perf_fops, counter, 0); - if (err < 0) - goto err_free_put_context; - - counter_file = fget_light(err, &fput_needed2); - if (!counter_file) - goto err_free_put_context; - - if (flags & PERF_FLAG_FD_OUTPUT) { - err = perf_counter_set_output(counter, group_fd); - if (err) - goto err_fput_free_put_context; - } - - counter->filp = counter_file; - WARN_ON_ONCE(ctx->parent_ctx); - mutex_lock(&ctx->mutex); - perf_install_in_context(ctx, counter, cpu); - ++ctx->generation; - mutex_unlock(&ctx->mutex); - - counter->owner = current; - get_task_struct(current); - mutex_lock(¤t->perf_counter_mutex); - list_add_tail(&counter->owner_entry, ¤t->perf_counter_list); - mutex_unlock(¤t->perf_counter_mutex); - -err_fput_free_put_context: - fput_light(counter_file, fput_needed2); - -err_free_put_context: - if (err < 0) - kfree(counter); - -err_put_context: - if (err < 0) - put_ctx(ctx); - - fput_light(group_file, fput_needed); - - return err; -} - -/* - * inherit a counter from parent task to child task: - */ -static struct perf_counter * -inherit_counter(struct perf_counter *parent_counter, - struct task_struct *parent, - struct perf_counter_context *parent_ctx, - struct task_struct *child, - struct perf_counter *group_leader, - struct perf_counter_context *child_ctx) -{ - struct perf_counter *child_counter; - - /* - * Instead of creating recursive hierarchies of counters, - * we link inherited counters back to the original parent, - * which has a filp for sure, which we use as the reference - * count: - */ - if (parent_counter->parent) - parent_counter = parent_counter->parent; - - child_counter = perf_counter_alloc(&parent_counter->attr, - parent_counter->cpu, child_ctx, - group_leader, parent_counter, - GFP_KERNEL); - if (IS_ERR(child_counter)) - return child_counter; - get_ctx(child_ctx); - - /* - * Make the child state follow the state of the parent counter, - * not its attr.disabled bit. We hold the parent's mutex, - * so we won't race with perf_counter_{en, dis}able_family. - */ - if (parent_counter->state >= PERF_COUNTER_STATE_INACTIVE) - child_counter->state = PERF_COUNTER_STATE_INACTIVE; - else - child_counter->state = PERF_COUNTER_STATE_OFF; - - if (parent_counter->attr.freq) - child_counter->hw.sample_period = parent_counter->hw.sample_period; - - /* - * Link it up in the child's context: - */ - add_counter_to_ctx(child_counter, child_ctx); - - /* - * Get a reference to the parent filp - we will fput it - * when the child counter exits. This is safe to do because - * we are in the parent and we know that the filp still - * exists and has a nonzero count: - */ - atomic_long_inc(&parent_counter->filp->f_count); - - /* - * Link this into the parent counter's child list - */ - WARN_ON_ONCE(parent_counter->ctx->parent_ctx); - mutex_lock(&parent_counter->child_mutex); - list_add_tail(&child_counter->child_list, &parent_counter->child_list); - mutex_unlock(&parent_counter->child_mutex); - - return child_counter; -} - -static int inherit_group(struct perf_counter *parent_counter, - struct task_struct *parent, - struct perf_counter_context *parent_ctx, - struct task_struct *child, - struct perf_counter_context *child_ctx) -{ - struct perf_counter *leader; - struct perf_counter *sub; - struct perf_counter *child_ctr; - - leader = inherit_counter(parent_counter, parent, parent_ctx, - child, NULL, child_ctx); - if (IS_ERR(leader)) - return PTR_ERR(leader); - list_for_each_entry(sub, &parent_counter->sibling_list, group_entry) { - child_ctr = inherit_counter(sub, parent, parent_ctx, - child, leader, child_ctx); - if (IS_ERR(child_ctr)) - return PTR_ERR(child_ctr); - } - return 0; -} - -static void sync_child_counter(struct perf_counter *child_counter, - struct task_struct *child) -{ - struct perf_counter *parent_counter = child_counter->parent; - u64 child_val; - - if (child_counter->attr.inherit_stat) - perf_counter_read_event(child_counter, child); - - child_val = atomic64_read(&child_counter->count); - - /* - * Add back the child's count to the parent's count: - */ - atomic64_add(child_val, &parent_counter->count); - atomic64_add(child_counter->total_time_enabled, - &parent_counter->child_total_time_enabled); - atomic64_add(child_counter->total_time_running, - &parent_counter->child_total_time_running); - - /* - * Remove this counter from the parent's list - */ - WARN_ON_ONCE(parent_counter->ctx->parent_ctx); - mutex_lock(&parent_counter->child_mutex); - list_del_init(&child_counter->child_list); - mutex_unlock(&parent_counter->child_mutex); - - /* - * Release the parent counter, if this was the last - * reference to it. - */ - fput(parent_counter->filp); -} - -static void -__perf_counter_exit_task(struct perf_counter *child_counter, - struct perf_counter_context *child_ctx, - struct task_struct *child) -{ - struct perf_counter *parent_counter; - - update_counter_times(child_counter); - perf_counter_remove_from_context(child_counter); - - parent_counter = child_counter->parent; - /* - * It can happen that parent exits first, and has counters - * that are still around due to the child reference. These - * counters need to be zapped - but otherwise linger. - */ - if (parent_counter) { - sync_child_counter(child_counter, child); - free_counter(child_counter); - } -} - -/* - * When a child task exits, feed back counter values to parent counters. - */ -void perf_counter_exit_task(struct task_struct *child) -{ - struct perf_counter *child_counter, *tmp; - struct perf_counter_context *child_ctx; - unsigned long flags; - - if (likely(!child->perf_counter_ctxp)) { - perf_counter_task(child, NULL, 0); - return; - } - - local_irq_save(flags); - /* - * We can't reschedule here because interrupts are disabled, - * and either child is current or it is a task that can't be - * scheduled, so we are now safe from rescheduling changing - * our context. - */ - child_ctx = child->perf_counter_ctxp; - __perf_counter_task_sched_out(child_ctx); - - /* - * Take the context lock here so that if find_get_context is - * reading child->perf_counter_ctxp, we wait until it has - * incremented the context's refcount before we do put_ctx below. - */ - spin_lock(&child_ctx->lock); - child->perf_counter_ctxp = NULL; - /* - * If this context is a clone; unclone it so it can't get - * swapped to another process while we're removing all - * the counters from it. - */ - unclone_ctx(child_ctx); - spin_unlock_irqrestore(&child_ctx->lock, flags); - - /* - * Report the task dead after unscheduling the counters so that we - * won't get any samples after PERF_EVENT_EXIT. We can however still - * get a few PERF_EVENT_READ events. - */ - perf_counter_task(child, child_ctx, 0); - - /* - * We can recurse on the same lock type through: - * - * __perf_counter_exit_task() - * sync_child_counter() - * fput(parent_counter->filp) - * perf_release() - * mutex_lock(&ctx->mutex) - * - * But since its the parent context it won't be the same instance. - */ - mutex_lock_nested(&child_ctx->mutex, SINGLE_DEPTH_NESTING); - -again: - list_for_each_entry_safe(child_counter, tmp, &child_ctx->group_list, - group_entry) - __perf_counter_exit_task(child_counter, child_ctx, child); - - /* - * If the last counter was a group counter, it will have appended all - * its siblings to the list, but we obtained 'tmp' before that which - * will still point to the list head terminating the iteration. - */ - if (!list_empty(&child_ctx->group_list)) - goto again; - - mutex_unlock(&child_ctx->mutex); - - put_ctx(child_ctx); -} - -/* - * free an unexposed, unused context as created by inheritance by - * init_task below, used by fork() in case of fail. - */ -void perf_counter_free_task(struct task_struct *task) -{ - struct perf_counter_context *ctx = task->perf_counter_ctxp; - struct perf_counter *counter, *tmp; - - if (!ctx) - return; - - mutex_lock(&ctx->mutex); -again: - list_for_each_entry_safe(counter, tmp, &ctx->group_list, group_entry) { - struct perf_counter *parent = counter->parent; - - if (WARN_ON_ONCE(!parent)) - continue; - - mutex_lock(&parent->child_mutex); - list_del_init(&counter->child_list); - mutex_unlock(&parent->child_mutex); - - fput(parent->filp); - - list_del_counter(counter, ctx); - free_counter(counter); - } - - if (!list_empty(&ctx->group_list)) - goto again; - - mutex_unlock(&ctx->mutex); - - put_ctx(ctx); -} - -/* - * Initialize the perf_counter context in task_struct - */ -int perf_counter_init_task(struct task_struct *child) -{ - struct perf_counter_context *child_ctx, *parent_ctx; - struct perf_counter_context *cloned_ctx; - struct perf_counter *counter; - struct task_struct *parent = current; - int inherited_all = 1; - int ret = 0; - - child->perf_counter_ctxp = NULL; - - mutex_init(&child->perf_counter_mutex); - INIT_LIST_HEAD(&child->perf_counter_list); - - if (likely(!parent->perf_counter_ctxp)) - return 0; - - /* - * This is executed from the parent task context, so inherit - * counters that have been marked for cloning. - * First allocate and initialize a context for the child. - */ - - child_ctx = kmalloc(sizeof(struct perf_counter_context), GFP_KERNEL); - if (!child_ctx) - return -ENOMEM; - - __perf_counter_init_context(child_ctx, child); - child->perf_counter_ctxp = child_ctx; - get_task_struct(child); - - /* - * If the parent's context is a clone, pin it so it won't get - * swapped under us. - */ - parent_ctx = perf_pin_task_context(parent); - - /* - * No need to check if parent_ctx != NULL here; since we saw - * it non-NULL earlier, the only reason for it to become NULL - * is if we exit, and since we're currently in the middle of - * a fork we can't be exiting at the same time. - */ - - /* - * Lock the parent list. No need to lock the child - not PID - * hashed yet and not running, so nobody can access it. - */ - mutex_lock(&parent_ctx->mutex); - - /* - * We dont have to disable NMIs - we are only looking at - * the list, not manipulating it: - */ - list_for_each_entry_rcu(counter, &parent_ctx->event_list, event_entry) { - if (counter != counter->group_leader) - continue; - - if (!counter->attr.inherit) { - inherited_all = 0; - continue; - } - - ret = inherit_group(counter, parent, parent_ctx, - child, child_ctx); - if (ret) { - inherited_all = 0; - break; - } - } - - if (inherited_all) { - /* - * Mark the child context as a clone of the parent - * context, or of whatever the parent is a clone of. - * Note that if the parent is a clone, it could get - * uncloned at any point, but that doesn't matter - * because the list of counters and the generation - * count can't have changed since we took the mutex. - */ - cloned_ctx = rcu_dereference(parent_ctx->parent_ctx); - if (cloned_ctx) { - child_ctx->parent_ctx = cloned_ctx; - child_ctx->parent_gen = parent_ctx->parent_gen; - } else { - child_ctx->parent_ctx = parent_ctx; - child_ctx->parent_gen = parent_ctx->generation; - } - get_ctx(child_ctx->parent_ctx); - } - - mutex_unlock(&parent_ctx->mutex); - - perf_unpin_context(parent_ctx); - - return ret; -} - -static void __cpuinit perf_counter_init_cpu(int cpu) -{ - struct perf_cpu_context *cpuctx; - - cpuctx = &per_cpu(perf_cpu_context, cpu); - __perf_counter_init_context(&cpuctx->ctx, NULL); - - spin_lock(&perf_resource_lock); - cpuctx->max_pertask = perf_max_counters - perf_reserved_percpu; - spin_unlock(&perf_resource_lock); - - hw_perf_counter_setup(cpu); -} - -#ifdef CONFIG_HOTPLUG_CPU -static void __perf_counter_exit_cpu(void *info) -{ - struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context); - struct perf_counter_context *ctx = &cpuctx->ctx; - struct perf_counter *counter, *tmp; - - list_for_each_entry_safe(counter, tmp, &ctx->group_list, group_entry) - __perf_counter_remove_from_context(counter); -} -static void perf_counter_exit_cpu(int cpu) -{ - struct perf_cpu_context *cpuctx = &per_cpu(perf_cpu_context, cpu); - struct perf_counter_context *ctx = &cpuctx->ctx; - - mutex_lock(&ctx->mutex); - smp_call_function_single(cpu, __perf_counter_exit_cpu, NULL, 1); - mutex_unlock(&ctx->mutex); -} -#else -static inline void perf_counter_exit_cpu(int cpu) { } -#endif - -static int __cpuinit -perf_cpu_notify(struct notifier_block *self, unsigned long action, void *hcpu) -{ - unsigned int cpu = (long)hcpu; - - switch (action) { - - case CPU_UP_PREPARE: - case CPU_UP_PREPARE_FROZEN: - perf_counter_init_cpu(cpu); - break; - - case CPU_ONLINE: - case CPU_ONLINE_FROZEN: - hw_perf_counter_setup_online(cpu); - break; - - case CPU_DOWN_PREPARE: - case CPU_DOWN_PREPARE_FROZEN: - perf_counter_exit_cpu(cpu); - break; - - default: - break; - } - - return NOTIFY_OK; -} - -/* - * This has to have a higher priority than migration_notifier in sched.c. - */ -static struct notifier_block __cpuinitdata perf_cpu_nb = { - .notifier_call = perf_cpu_notify, - .priority = 20, -}; - -void __init perf_counter_init(void) -{ - perf_cpu_notify(&perf_cpu_nb, (unsigned long)CPU_UP_PREPARE, - (void *)(long)smp_processor_id()); - perf_cpu_notify(&perf_cpu_nb, (unsigned long)CPU_ONLINE, - (void *)(long)smp_processor_id()); - register_cpu_notifier(&perf_cpu_nb); -} - -static ssize_t perf_show_reserve_percpu(struct sysdev_class *class, char *buf) -{ - return sprintf(buf, "%d\n", perf_reserved_percpu); -} - -static ssize_t -perf_set_reserve_percpu(struct sysdev_class *class, - const char *buf, - size_t count) -{ - struct perf_cpu_context *cpuctx; - unsigned long val; - int err, cpu, mpt; - - err = strict_strtoul(buf, 10, &val); - if (err) - return err; - if (val > perf_max_counters) - return -EINVAL; - - spin_lock(&perf_resource_lock); - perf_reserved_percpu = val; - for_each_online_cpu(cpu) { - cpuctx = &per_cpu(perf_cpu_context, cpu); - spin_lock_irq(&cpuctx->ctx.lock); - mpt = min(perf_max_counters - cpuctx->ctx.nr_counters, - perf_max_counters - perf_reserved_percpu); - cpuctx->max_pertask = mpt; - spin_unlock_irq(&cpuctx->ctx.lock); - } - spin_unlock(&perf_resource_lock); - - return count; -} - -static ssize_t perf_show_overcommit(struct sysdev_class *class, char *buf) -{ - return sprintf(buf, "%d\n", perf_overcommit); -} - -static ssize_t -perf_set_overcommit(struct sysdev_class *class, const char *buf, size_t count) -{ - unsigned long val; - int err; - - err = strict_strtoul(buf, 10, &val); - if (err) - return err; - if (val > 1) - return -EINVAL; - - spin_lock(&perf_resource_lock); - perf_overcommit = val; - spin_unlock(&perf_resource_lock); - - return count; -} - -static SYSDEV_CLASS_ATTR( - reserve_percpu, - 0644, - perf_show_reserve_percpu, - perf_set_reserve_percpu - ); - -static SYSDEV_CLASS_ATTR( - overcommit, - 0644, - perf_show_overcommit, - perf_set_overcommit - ); - -static struct attribute *perfclass_attrs[] = { - &attr_reserve_percpu.attr, - &attr_overcommit.attr, - NULL -}; - -static struct attribute_group perfclass_attr_group = { - .attrs = perfclass_attrs, - .name = "perf_counters", -}; - -static int __init perf_counter_sysfs_init(void) -{ - return sysfs_create_group(&cpu_sysdev_class.kset.kobj, - &perfclass_attr_group); -} -device_initcall(perf_counter_sysfs_init); diff --git a/kernel/perf_event.c b/kernel/perf_event.c new file mode 100644 index 00000000000..6e8b99a04e1 --- /dev/null +++ b/kernel/perf_event.c @@ -0,0 +1,5000 @@ +/* + * Performance event core code + * + * Copyright (C) 2008 Thomas Gleixner + * Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar + * Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra + * Copyright © 2009 Paul Mackerras, IBM Corp. + * + * For licensing details see kernel-base/COPYING + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +/* + * Each CPU has a list of per CPU events: + */ +DEFINE_PER_CPU(struct perf_cpu_context, perf_cpu_context); + +int perf_max_events __read_mostly = 1; +static int perf_reserved_percpu __read_mostly; +static int perf_overcommit __read_mostly = 1; + +static atomic_t nr_events __read_mostly; +static atomic_t nr_mmap_events __read_mostly; +static atomic_t nr_comm_events __read_mostly; +static atomic_t nr_task_events __read_mostly; + +/* + * perf event paranoia level: + * -1 - not paranoid at all + * 0 - disallow raw tracepoint access for unpriv + * 1 - disallow cpu events for unpriv + * 2 - disallow kernel profiling for unpriv + */ +int sysctl_perf_event_paranoid __read_mostly = 1; + +static inline bool perf_paranoid_tracepoint_raw(void) +{ + return sysctl_perf_event_paranoid > -1; +} + +static inline bool perf_paranoid_cpu(void) +{ + return sysctl_perf_event_paranoid > 0; +} + +static inline bool perf_paranoid_kernel(void) +{ + return sysctl_perf_event_paranoid > 1; +} + +int sysctl_perf_event_mlock __read_mostly = 512; /* 'free' kb per user */ + +/* + * max perf event sample rate + */ +int sysctl_perf_event_sample_rate __read_mostly = 100000; + +static atomic64_t perf_event_id; + +/* + * Lock for (sysadmin-configurable) event reservations: + */ +static DEFINE_SPINLOCK(perf_resource_lock); + +/* + * Architecture provided APIs - weak aliases: + */ +extern __weak const struct pmu *hw_perf_event_init(struct perf_event *event) +{ + return NULL; +} + +void __weak hw_perf_disable(void) { barrier(); } +void __weak hw_perf_enable(void) { barrier(); } + +void __weak hw_perf_event_setup(int cpu) { barrier(); } +void __weak hw_perf_event_setup_online(int cpu) { barrier(); } + +int __weak +hw_perf_group_sched_in(struct perf_event *group_leader, + struct perf_cpu_context *cpuctx, + struct perf_event_context *ctx, int cpu) +{ + return 0; +} + +void __weak perf_event_print_debug(void) { } + +static DEFINE_PER_CPU(int, perf_disable_count); + +void __perf_disable(void) +{ + __get_cpu_var(perf_disable_count)++; +} + +bool __perf_enable(void) +{ + return !--__get_cpu_var(perf_disable_count); +} + +void perf_disable(void) +{ + __perf_disable(); + hw_perf_disable(); +} + +void perf_enable(void) +{ + if (__perf_enable()) + hw_perf_enable(); +} + +static void get_ctx(struct perf_event_context *ctx) +{ + WARN_ON(!atomic_inc_not_zero(&ctx->refcount)); +} + +static void free_ctx(struct rcu_head *head) +{ + struct perf_event_context *ctx; + + ctx = container_of(head, struct perf_event_context, rcu_head); + kfree(ctx); +} + +static void put_ctx(struct perf_event_context *ctx) +{ + if (atomic_dec_and_test(&ctx->refcount)) { + if (ctx->parent_ctx) + put_ctx(ctx->parent_ctx); + if (ctx->task) + put_task_struct(ctx->task); + call_rcu(&ctx->rcu_head, free_ctx); + } +} + +static void unclone_ctx(struct perf_event_context *ctx) +{ + if (ctx->parent_ctx) { + put_ctx(ctx->parent_ctx); + ctx->parent_ctx = NULL; + } +} + +/* + * If we inherit events we want to return the parent event id + * to userspace. + */ +static u64 primary_event_id(struct perf_event *event) +{ + u64 id = event->id; + + if (event->parent) + id = event->parent->id; + + return id; +} + +/* + * Get the perf_event_context for a task and lock it. + * This has to cope with with the fact that until it is locked, + * the context could get moved to another task. + */ +static struct perf_event_context * +perf_lock_task_context(struct task_struct *task, unsigned long *flags) +{ + struct perf_event_context *ctx; + + rcu_read_lock(); + retry: + ctx = rcu_dereference(task->perf_event_ctxp); + if (ctx) { + /* + * If this context is a clone of another, it might + * get swapped for another underneath us by + * perf_event_task_sched_out, though the + * rcu_read_lock() protects us from any context + * getting freed. Lock the context and check if it + * got swapped before we could get the lock, and retry + * if so. If we locked the right context, then it + * can't get swapped on us any more. + */ + spin_lock_irqsave(&ctx->lock, *flags); + if (ctx != rcu_dereference(task->perf_event_ctxp)) { + spin_unlock_irqrestore(&ctx->lock, *flags); + goto retry; + } + + if (!atomic_inc_not_zero(&ctx->refcount)) { + spin_unlock_irqrestore(&ctx->lock, *flags); + ctx = NULL; + } + } + rcu_read_unlock(); + return ctx; +} + +/* + * Get the context for a task and increment its pin_count so it + * can't get swapped to another task. This also increments its + * reference count so that the context can't get freed. + */ +static struct perf_event_context *perf_pin_task_context(struct task_struct *task) +{ + struct perf_event_context *ctx; + unsigned long flags; + + ctx = perf_lock_task_context(task, &flags); + if (ctx) { + ++ctx->pin_count; + spin_unlock_irqrestore(&ctx->lock, flags); + } + return ctx; +} + +static void perf_unpin_context(struct perf_event_context *ctx) +{ + unsigned long flags; + + spin_lock_irqsave(&ctx->lock, flags); + --ctx->pin_count; + spin_unlock_irqrestore(&ctx->lock, flags); + put_ctx(ctx); +} + +/* + * Add a event from the lists for its context. + * Must be called with ctx->mutex and ctx->lock held. + */ +static void +list_add_event(struct perf_event *event, struct perf_event_context *ctx) +{ + struct perf_event *group_leader = event->group_leader; + + /* + * Depending on whether it is a standalone or sibling event, + * add it straight to the context's event list, or to the group + * leader's sibling list: + */ + if (group_leader == event) + list_add_tail(&event->group_entry, &ctx->group_list); + else { + list_add_tail(&event->group_entry, &group_leader->sibling_list); + group_leader->nr_siblings++; + } + + list_add_rcu(&event->event_entry, &ctx->event_list); + ctx->nr_events++; + if (event->attr.inherit_stat) + ctx->nr_stat++; +} + +/* + * Remove a event from the lists for its context. + * Must be called with ctx->mutex and ctx->lock held. + */ +static void +list_del_event(struct perf_event *event, struct perf_event_context *ctx) +{ + struct perf_event *sibling, *tmp; + + if (list_empty(&event->group_entry)) + return; + ctx->nr_events--; + if (event->attr.inherit_stat) + ctx->nr_stat--; + + list_del_init(&event->group_entry); + list_del_rcu(&event->event_entry); + + if (event->group_leader != event) + event->group_leader->nr_siblings--; + + /* + * If this was a group event with sibling events then + * upgrade the siblings to singleton events by adding them + * to the context list directly: + */ + list_for_each_entry_safe(sibling, tmp, &event->sibling_list, group_entry) { + + list_move_tail(&sibling->group_entry, &ctx->group_list); + sibling->group_leader = sibling; + } +} + +static void +event_sched_out(struct perf_event *event, + struct perf_cpu_context *cpuctx, + struct perf_event_context *ctx) +{ + if (event->state != PERF_EVENT_STATE_ACTIVE) + return; + + event->state = PERF_EVENT_STATE_INACTIVE; + if (event->pending_disable) { + event->pending_disable = 0; + event->state = PERF_EVENT_STATE_OFF; + } + event->tstamp_stopped = ctx->time; + event->pmu->disable(event); + event->oncpu = -1; + + if (!is_software_event(event)) + cpuctx->active_oncpu--; + ctx->nr_active--; + if (event->attr.exclusive || !cpuctx->active_oncpu) + cpuctx->exclusive = 0; +} + +static void +group_sched_out(struct perf_event *group_event, + struct perf_cpu_context *cpuctx, + struct perf_event_context *ctx) +{ + struct perf_event *event; + + if (group_event->state != PERF_EVENT_STATE_ACTIVE) + return; + + event_sched_out(group_event, cpuctx, ctx); + + /* + * Schedule out siblings (if any): + */ + list_for_each_entry(event, &group_event->sibling_list, group_entry) + event_sched_out(event, cpuctx, ctx); + + if (group_event->attr.exclusive) + cpuctx->exclusive = 0; +} + +/* + * Cross CPU call to remove a performance event + * + * We disable the event on the hardware level first. After that we + * remove it from the context list. + */ +static void __perf_event_remove_from_context(void *info) +{ + struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context); + struct perf_event *event = info; + struct perf_event_context *ctx = event->ctx; + + /* + * If this is a task context, we need to check whether it is + * the current task context of this cpu. If not it has been + * scheduled out before the smp call arrived. + */ + if (ctx->task && cpuctx->task_ctx != ctx) + return; + + spin_lock(&ctx->lock); + /* + * Protect the list operation against NMI by disabling the + * events on a global level. + */ + perf_disable(); + + event_sched_out(event, cpuctx, ctx); + + list_del_event(event, ctx); + + if (!ctx->task) { + /* + * Allow more per task events with respect to the + * reservation: + */ + cpuctx->max_pertask = + min(perf_max_events - ctx->nr_events, + perf_max_events - perf_reserved_percpu); + } + + perf_enable(); + spin_unlock(&ctx->lock); +} + + +/* + * Remove the event from a task's (or a CPU's) list of events. + * + * Must be called with ctx->mutex held. + * + * CPU events are removed with a smp call. For task events we only + * call when the task is on a CPU. + * + * If event->ctx is a cloned context, callers must make sure that + * every task struct that event->ctx->task could possibly point to + * remains valid. This is OK when called from perf_release since + * that only calls us on the top-level context, which can't be a clone. + * When called from perf_event_exit_task, it's OK because the + * context has been detached from its task. + */ +static void perf_event_remove_from_context(struct perf_event *event) +{ + struct perf_event_context *ctx = event->ctx; + struct task_struct *task = ctx->task; + + if (!task) { + /* + * Per cpu events are removed via an smp call and + * the removal is always sucessful. + */ + smp_call_function_single(event->cpu, + __perf_event_remove_from_context, + event, 1); + return; + } + +retry: + task_oncpu_function_call(task, __perf_event_remove_from_context, + event); + + spin_lock_irq(&ctx->lock); + /* + * If the context is active we need to retry the smp call. + */ + if (ctx->nr_active && !list_empty(&event->group_entry)) { + spin_unlock_irq(&ctx->lock); + goto retry; + } + + /* + * The lock prevents that this context is scheduled in so we + * can remove the event safely, if the call above did not + * succeed. + */ + if (!list_empty(&event->group_entry)) { + list_del_event(event, ctx); + } + spin_unlock_irq(&ctx->lock); +} + +static inline u64 perf_clock(void) +{ + return cpu_clock(smp_processor_id()); +} + +/* + * Update the record of the current time in a context. + */ +static void update_context_time(struct perf_event_context *ctx) +{ + u64 now = perf_clock(); + + ctx->time += now - ctx->timestamp; + ctx->timestamp = now; +} + +/* + * Update the total_time_enabled and total_time_running fields for a event. + */ +static void update_event_times(struct perf_event *event) +{ + struct perf_event_context *ctx = event->ctx; + u64 run_end; + + if (event->state < PERF_EVENT_STATE_INACTIVE || + event->group_leader->state < PERF_EVENT_STATE_INACTIVE) + return; + + event->total_time_enabled = ctx->time - event->tstamp_enabled; + + if (event->state == PERF_EVENT_STATE_INACTIVE) + run_end = event->tstamp_stopped; + else + run_end = ctx->time; + + event->total_time_running = run_end - event->tstamp_running; +} + +/* + * Update total_time_enabled and total_time_running for all events in a group. + */ +static void update_group_times(struct perf_event *leader) +{ + struct perf_event *event; + + update_event_times(leader); + list_for_each_entry(event, &leader->sibling_list, group_entry) + update_event_times(event); +} + +/* + * Cross CPU call to disable a performance event + */ +static void __perf_event_disable(void *info) +{ + struct perf_event *event = info; + struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context); + struct perf_event_context *ctx = event->ctx; + + /* + * If this is a per-task event, need to check whether this + * event's task is the current task on this cpu. + */ + if (ctx->task && cpuctx->task_ctx != ctx) + return; + + spin_lock(&ctx->lock); + + /* + * If the event is on, turn it off. + * If it is in error state, leave it in error state. + */ + if (event->state >= PERF_EVENT_STATE_INACTIVE) { + update_context_time(ctx); + update_group_times(event); + if (event == event->group_leader) + group_sched_out(event, cpuctx, ctx); + else + event_sched_out(event, cpuctx, ctx); + event->state = PERF_EVENT_STATE_OFF; + } + + spin_unlock(&ctx->lock); +} + +/* + * Disable a event. + * + * If event->ctx is a cloned context, callers must make sure that + * every task struct that event->ctx->task could possibly point to + * remains valid. This condition is satisifed when called through + * perf_event_for_each_child or perf_event_for_each because they + * hold the top-level event's child_mutex, so any descendant that + * goes to exit will block in sync_child_event. + * When called from perf_pending_event it's OK because event->ctx + * is the current context on this CPU and preemption is disabled, + * hence we can't get into perf_event_task_sched_out for this context. + */ +static void perf_event_disable(struct perf_event *event) +{ + struct perf_event_context *ctx = event->ctx; + struct task_struct *task = ctx->task; + + if (!task) { + /* + * Disable the event on the cpu that it's on + */ + smp_call_function_single(event->cpu, __perf_event_disable, + event, 1); + return; + } + + retry: + task_oncpu_function_call(task, __perf_event_disable, event); + + spin_lock_irq(&ctx->lock); + /* + * If the event is still active, we need to retry the cross-call. + */ + if (event->state == PERF_EVENT_STATE_ACTIVE) { + spin_unlock_irq(&ctx->lock); + goto retry; + } + + /* + * Since we have the lock this context can't be scheduled + * in, so we can change the state safely. + */ + if (event->state == PERF_EVENT_STATE_INACTIVE) { + update_group_times(event); + event->state = PERF_EVENT_STATE_OFF; + } + + spin_unlock_irq(&ctx->lock); +} + +static int +event_sched_in(struct perf_event *event, + struct perf_cpu_context *cpuctx, + struct perf_event_context *ctx, + int cpu) +{ + if (event->state <= PERF_EVENT_STATE_OFF) + return 0; + + event->state = PERF_EVENT_STATE_ACTIVE; + event->oncpu = cpu; /* TODO: put 'cpu' into cpuctx->cpu */ + /* + * The new state must be visible before we turn it on in the hardware: + */ + smp_wmb(); + + if (event->pmu->enable(event)) { + event->state = PERF_EVENT_STATE_INACTIVE; + event->oncpu = -1; + return -EAGAIN; + } + + event->tstamp_running += ctx->time - event->tstamp_stopped; + + if (!is_software_event(event)) + cpuctx->active_oncpu++; + ctx->nr_active++; + + if (event->attr.exclusive) + cpuctx->exclusive = 1; + + return 0; +} + +static int +group_sched_in(struct perf_event *group_event, + struct perf_cpu_context *cpuctx, + struct perf_event_context *ctx, + int cpu) +{ + struct perf_event *event, *partial_group; + int ret; + + if (group_event->state == PERF_EVENT_STATE_OFF) + return 0; + + ret = hw_perf_group_sched_in(group_event, cpuctx, ctx, cpu); + if (ret) + return ret < 0 ? ret : 0; + + if (event_sched_in(group_event, cpuctx, ctx, cpu)) + return -EAGAIN; + + /* + * Schedule in siblings as one group (if any): + */ + list_for_each_entry(event, &group_event->sibling_list, group_entry) { + if (event_sched_in(event, cpuctx, ctx, cpu)) { + partial_group = event; + goto group_error; + } + } + + return 0; + +group_error: + /* + * Groups can be scheduled in as one unit only, so undo any + * partial group before returning: + */ + list_for_each_entry(event, &group_event->sibling_list, group_entry) { + if (event == partial_group) + break; + event_sched_out(event, cpuctx, ctx); + } + event_sched_out(group_event, cpuctx, ctx); + + return -EAGAIN; +} + +/* + * Return 1 for a group consisting entirely of software events, + * 0 if the group contains any hardware events. + */ +static int is_software_only_group(struct perf_event *leader) +{ + struct perf_event *event; + + if (!is_software_event(leader)) + return 0; + + list_for_each_entry(event, &leader->sibling_list, group_entry) + if (!is_software_event(event)) + return 0; + + return 1; +} + +/* + * Work out whether we can put this event group on the CPU now. + */ +static int group_can_go_on(struct perf_event *event, + struct perf_cpu_context *cpuctx, + int can_add_hw) +{ + /* + * Groups consisting entirely of software events can always go on. + */ + if (is_software_only_group(event)) + return 1; + /* + * If an exclusive group is already on, no other hardware + * events can go on. + */ + if (cpuctx->exclusive) + return 0; + /* + * If this group is exclusive and there are already + * events on the CPU, it can't go on. + */ + if (event->attr.exclusive && cpuctx->active_oncpu) + return 0; + /* + * Otherwise, try to add it if all previous groups were able + * to go on. + */ + return can_add_hw; +} + +static void add_event_to_ctx(struct perf_event *event, + struct perf_event_context *ctx) +{ + list_add_event(event, ctx); + event->tstamp_enabled = ctx->time; + event->tstamp_running = ctx->time; + event->tstamp_stopped = ctx->time; +} + +/* + * Cross CPU call to install and enable a performance event + * + * Must be called with ctx->mutex held + */ +static void __perf_install_in_context(void *info) +{ + struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context); + struct perf_event *event = info; + struct perf_event_context *ctx = event->ctx; + struct perf_event *leader = event->group_leader; + int cpu = smp_processor_id(); + int err; + + /* + * If this is a task context, we need to check whether it is + * the current task context of this cpu. If not it has been + * scheduled out before the smp call arrived. + * Or possibly this is the right context but it isn't + * on this cpu because it had no events. + */ + if (ctx->task && cpuctx->task_ctx != ctx) { + if (cpuctx->task_ctx || ctx->task != current) + return; + cpuctx->task_ctx = ctx; + } + + spin_lock(&ctx->lock); + ctx->is_active = 1; + update_context_time(ctx); + + /* + * Protect the list operation against NMI by disabling the + * events on a global level. NOP for non NMI based events. + */ + perf_disable(); + + add_event_to_ctx(event, ctx); + + /* + * Don't put the event on if it is disabled or if + * it is in a group and the group isn't on. + */ + if (event->state != PERF_EVENT_STATE_INACTIVE || + (leader != event && leader->state != PERF_EVENT_STATE_ACTIVE)) + goto unlock; + + /* + * An exclusive event can't go on if there are already active + * hardware events, and no hardware event can go on if there + * is already an exclusive event on. + */ + if (!group_can_go_on(event, cpuctx, 1)) + err = -EEXIST; + else + err = event_sched_in(event, cpuctx, ctx, cpu); + + if (err) { + /* + * This event couldn't go on. If it is in a group + * then we have to pull the whole group off. + * If the event group is pinned then put it in error state. + */ + if (leader != event) + group_sched_out(leader, cpuctx, ctx); + if (leader->attr.pinned) { + update_group_times(leader); + leader->state = PERF_EVENT_STATE_ERROR; + } + } + + if (!err && !ctx->task && cpuctx->max_pertask) + cpuctx->max_pertask--; + + unlock: + perf_enable(); + + spin_unlock(&ctx->lock); +} + +/* + * Attach a performance event to a context + * + * First we add the event to the list with the hardware enable bit + * in event->hw_config cleared. + * + * If the event is attached to a task which is on a CPU we use a smp + * call to enable it in the task context. The task might have been + * scheduled away, but we check this in the smp call again. + * + * Must be called with ctx->mutex held. + */ +static void +perf_install_in_context(struct perf_event_context *ctx, + struct perf_event *event, + int cpu) +{ + struct task_struct *task = ctx->task; + + if (!task) { + /* + * Per cpu events are installed via an smp call and + * the install is always sucessful. + */ + smp_call_function_single(cpu, __perf_install_in_context, + event, 1); + return; + } + +retry: + task_oncpu_function_call(task, __perf_install_in_context, + event); + + spin_lock_irq(&ctx->lock); + /* + * we need to retry the smp call. + */ + if (ctx->is_active && list_empty(&event->group_entry)) { + spin_unlock_irq(&ctx->lock); + goto retry; + } + + /* + * The lock prevents that this context is scheduled in so we + * can add the event safely, if it the call above did not + * succeed. + */ + if (list_empty(&event->group_entry)) + add_event_to_ctx(event, ctx); + spin_unlock_irq(&ctx->lock); +} + +/* + * Put a event into inactive state and update time fields. + * Enabling the leader of a group effectively enables all + * the group members that aren't explicitly disabled, so we + * have to update their ->tstamp_enabled also. + * Note: this works for group members as well as group leaders + * since the non-leader members' sibling_lists will be empty. + */ +static void __perf_event_mark_enabled(struct perf_event *event, + struct perf_event_context *ctx) +{ + struct perf_event *sub; + + event->state = PERF_EVENT_STATE_INACTIVE; + event->tstamp_enabled = ctx->time - event->total_time_enabled; + list_for_each_entry(sub, &event->sibling_list, group_entry) + if (sub->state >= PERF_EVENT_STATE_INACTIVE) + sub->tstamp_enabled = + ctx->time - sub->total_time_enabled; +} + +/* + * Cross CPU call to enable a performance event + */ +static void __perf_event_enable(void *info) +{ + struct perf_event *event = info; + struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context); + struct perf_event_context *ctx = event->ctx; + struct perf_event *leader = event->group_leader; + int err; + + /* + * If this is a per-task event, need to check whether this + * event's task is the current task on this cpu. + */ + if (ctx->task && cpuctx->task_ctx != ctx) { + if (cpuctx->task_ctx || ctx->task != current) + return; + cpuctx->task_ctx = ctx; + } + + spin_lock(&ctx->lock); + ctx->is_active = 1; + update_context_time(ctx); + + if (event->state >= PERF_EVENT_STATE_INACTIVE) + goto unlock; + __perf_event_mark_enabled(event, ctx); + + /* + * If the event is in a group and isn't the group leader, + * then don't put it on unless the group is on. + */ + if (leader != event && leader->state != PERF_EVENT_STATE_ACTIVE) + goto unlock; + + if (!group_can_go_on(event, cpuctx, 1)) { + err = -EEXIST; + } else { + perf_disable(); + if (event == leader) + err = group_sched_in(event, cpuctx, ctx, + smp_processor_id()); + else + err = event_sched_in(event, cpuctx, ctx, + smp_processor_id()); + perf_enable(); + } + + if (err) { + /* + * If this event can't go on and it's part of a + * group, then the whole group has to come off. + */ + if (leader != event) + group_sched_out(leader, cpuctx, ctx); + if (leader->attr.pinned) { + update_group_times(leader); + leader->state = PERF_EVENT_STATE_ERROR; + } + } + + unlock: + spin_unlock(&ctx->lock); +} + +/* + * Enable a event. + * + * If event->ctx is a cloned context, callers must make sure that + * every task struct that event->ctx->task could possibly point to + * remains valid. This condition is satisfied when called through + * perf_event_for_each_child or perf_event_for_each as described + * for perf_event_disable. + */ +static void perf_event_enable(struct perf_event *event) +{ + struct perf_event_context *ctx = event->ctx; + struct task_struct *task = ctx->task; + + if (!task) { + /* + * Enable the event on the cpu that it's on + */ + smp_call_function_single(event->cpu, __perf_event_enable, + event, 1); + return; + } + + spin_lock_irq(&ctx->lock); + if (event->state >= PERF_EVENT_STATE_INACTIVE) + goto out; + + /* + * If the event is in error state, clear that first. + * That way, if we see the event in error state below, we + * know that it has gone back into error state, as distinct + * from the task having been scheduled away before the + * cross-call arrived. + */ + if (event->state == PERF_EVENT_STATE_ERROR) + event->state = PERF_EVENT_STATE_OFF; + + retry: + spin_unlock_irq(&ctx->lock); + task_oncpu_function_call(task, __perf_event_enable, event); + + spin_lock_irq(&ctx->lock); + + /* + * If the context is active and the event is still off, + * we need to retry the cross-call. + */ + if (ctx->is_active && event->state == PERF_EVENT_STATE_OFF) + goto retry; + + /* + * Since we have the lock this context can't be scheduled + * in, so we can change the state safely. + */ + if (event->state == PERF_EVENT_STATE_OFF) + __perf_event_mark_enabled(event, ctx); + + out: + spin_unlock_irq(&ctx->lock); +} + +static int perf_event_refresh(struct perf_event *event, int refresh) +{ + /* + * not supported on inherited events + */ + if (event->attr.inherit) + return -EINVAL; + + atomic_add(refresh, &event->event_limit); + perf_event_enable(event); + + return 0; +} + +void __perf_event_sched_out(struct perf_event_context *ctx, + struct perf_cpu_context *cpuctx) +{ + struct perf_event *event; + + spin_lock(&ctx->lock); + ctx->is_active = 0; + if (likely(!ctx->nr_events)) + goto out; + update_context_time(ctx); + + perf_disable(); + if (ctx->nr_active) { + list_for_each_entry(event, &ctx->group_list, group_entry) { + if (event != event->group_leader) + event_sched_out(event, cpuctx, ctx); + else + group_sched_out(event, cpuctx, ctx); + } + } + perf_enable(); + out: + spin_unlock(&ctx->lock); +} + +/* + * Test whether two contexts are equivalent, i.e. whether they + * have both been cloned from the same version of the same context + * and they both have the same number of enabled events. + * If the number of enabled events is the same, then the set + * of enabled events should be the same, because these are both + * inherited contexts, therefore we can't access individual events + * in them directly with an fd; we can only enable/disable all + * events via prctl, or enable/disable all events in a family + * via ioctl, which will have the same effect on both contexts. + */ +static int context_equiv(struct perf_event_context *ctx1, + struct perf_event_context *ctx2) +{ + return ctx1->parent_ctx && ctx1->parent_ctx == ctx2->parent_ctx + && ctx1->parent_gen == ctx2->parent_gen + && !ctx1->pin_count && !ctx2->pin_count; +} + +static void __perf_event_read(void *event); + +static void __perf_event_sync_stat(struct perf_event *event, + struct perf_event *next_event) +{ + u64 value; + + if (!event->attr.inherit_stat) + return; + + /* + * Update the event value, we cannot use perf_event_read() + * because we're in the middle of a context switch and have IRQs + * disabled, which upsets smp_call_function_single(), however + * we know the event must be on the current CPU, therefore we + * don't need to use it. + */ + switch (event->state) { + case PERF_EVENT_STATE_ACTIVE: + __perf_event_read(event); + break; + + case PERF_EVENT_STATE_INACTIVE: + update_event_times(event); + break; + + default: + break; + } + + /* + * In order to keep per-task stats reliable we need to flip the event + * values when we flip the contexts. + */ + value = atomic64_read(&next_event->count); + value = atomic64_xchg(&event->count, value); + atomic64_set(&next_event->count, value); + + swap(event->total_time_enabled, next_event->total_time_enabled); + swap(event->total_time_running, next_event->total_time_running); + + /* + * Since we swizzled the values, update the user visible data too. + */ + perf_event_update_userpage(event); + perf_event_update_userpage(next_event); +} + +#define list_next_entry(pos, member) \ + list_entry(pos->member.next, typeof(*pos), member) + +static void perf_event_sync_stat(struct perf_event_context *ctx, + struct perf_event_context *next_ctx) +{ + struct perf_event *event, *next_event; + + if (!ctx->nr_stat) + return; + + event = list_first_entry(&ctx->event_list, + struct perf_event, event_entry); + + next_event = list_first_entry(&next_ctx->event_list, + struct perf_event, event_entry); + + while (&event->event_entry != &ctx->event_list && + &next_event->event_entry != &next_ctx->event_list) { + + __perf_event_sync_stat(event, next_event); + + event = list_next_entry(event, event_entry); + next_event = list_next_entry(next_event, event_entry); + } +} + +/* + * Called from scheduler to remove the events of the current task, + * with interrupts disabled. + * + * We stop each event and update the event value in event->count. + * + * This does not protect us against NMI, but disable() + * sets the disabled bit in the control field of event _before_ + * accessing the event control register. If a NMI hits, then it will + * not restart the event. + */ +void perf_event_task_sched_out(struct task_struct *task, + struct task_struct *next, int cpu) +{ + struct perf_cpu_context *cpuctx = &per_cpu(perf_cpu_context, cpu); + struct perf_event_context *ctx = task->perf_event_ctxp; + struct perf_event_context *next_ctx; + struct perf_event_context *parent; + struct pt_regs *regs; + int do_switch = 1; + + regs = task_pt_regs(task); + perf_sw_event(PERF_COUNT_SW_CONTEXT_SWITCHES, 1, 1, regs, 0); + + if (likely(!ctx || !cpuctx->task_ctx)) + return; + + update_context_time(ctx); + + rcu_read_lock(); + parent = rcu_dereference(ctx->parent_ctx); + next_ctx = next->perf_event_ctxp; + if (parent && next_ctx && + rcu_dereference(next_ctx->parent_ctx) == parent) { + /* + * Looks like the two contexts are clones, so we might be + * able to optimize the context switch. We lock both + * contexts and check that they are clones under the + * lock (including re-checking that neither has been + * uncloned in the meantime). It doesn't matter which + * order we take the locks because no other cpu could + * be trying to lock both of these tasks. + */ + spin_lock(&ctx->lock); + spin_lock_nested(&next_ctx->lock, SINGLE_DEPTH_NESTING); + if (context_equiv(ctx, next_ctx)) { + /* + * XXX do we need a memory barrier of sorts + * wrt to rcu_dereference() of perf_event_ctxp + */ + task->perf_event_ctxp = next_ctx; + next->perf_event_ctxp = ctx; + ctx->task = next; + next_ctx->task = task; + do_switch = 0; + + perf_event_sync_stat(ctx, next_ctx); + } + spin_unlock(&next_ctx->lock); + spin_unlock(&ctx->lock); + } + rcu_read_unlock(); + + if (do_switch) { + __perf_event_sched_out(ctx, cpuctx); + cpuctx->task_ctx = NULL; + } +} + +/* + * Called with IRQs disabled + */ +static void __perf_event_task_sched_out(struct perf_event_context *ctx) +{ + struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context); + + if (!cpuctx->task_ctx) + return; + + if (WARN_ON_ONCE(ctx != cpuctx->task_ctx)) + return; + + __perf_event_sched_out(ctx, cpuctx); + cpuctx->task_ctx = NULL; +} + +/* + * Called with IRQs disabled + */ +static void perf_event_cpu_sched_out(struct perf_cpu_context *cpuctx) +{ + __perf_event_sched_out(&cpuctx->ctx, cpuctx); +} + +static void +__perf_event_sched_in(struct perf_event_context *ctx, + struct perf_cpu_context *cpuctx, int cpu) +{ + struct perf_event *event; + int can_add_hw = 1; + + spin_lock(&ctx->lock); + ctx->is_active = 1; + if (likely(!ctx->nr_events)) + goto out; + + ctx->timestamp = perf_clock(); + + perf_disable(); + + /* + * First go through the list and put on any pinned groups + * in order to give them the best chance of going on. + */ + list_for_each_entry(event, &ctx->group_list, group_entry) { + if (event->state <= PERF_EVENT_STATE_OFF || + !event->attr.pinned) + continue; + if (event->cpu != -1 && event->cpu != cpu) + continue; + + if (event != event->group_leader) + event_sched_in(event, cpuctx, ctx, cpu); + else { + if (group_can_go_on(event, cpuctx, 1)) + group_sched_in(event, cpuctx, ctx, cpu); + } + + /* + * If this pinned group hasn't been scheduled, + * put it in error state. + */ + if (event->state == PERF_EVENT_STATE_INACTIVE) { + update_group_times(event); + event->state = PERF_EVENT_STATE_ERROR; + } + } + + list_for_each_entry(event, &ctx->group_list, group_entry) { + /* + * Ignore events in OFF or ERROR state, and + * ignore pinned events since we did them already. + */ + if (event->state <= PERF_EVENT_STATE_OFF || + event->attr.pinned) + continue; + + /* + * Listen to the 'cpu' scheduling filter constraint + * of events: + */ + if (event->cpu != -1 && event->cpu != cpu) + continue; + + if (event != event->group_leader) { + if (event_sched_in(event, cpuctx, ctx, cpu)) + can_add_hw = 0; + } else { + if (group_can_go_on(event, cpuctx, can_add_hw)) { + if (group_sched_in(event, cpuctx, ctx, cpu)) + can_add_hw = 0; + } + } + } + perf_enable(); + out: + spin_unlock(&ctx->lock); +} + +/* + * Called from scheduler to add the events of the current task + * with interrupts disabled. + * + * We restore the event value and then enable it. + * + * This does not protect us against NMI, but enable() + * sets the enabled bit in the control field of event _before_ + * accessing the event control register. If a NMI hits, then it will + * keep the event running. + */ +void perf_event_task_sched_in(struct task_struct *task, int cpu) +{ + struct perf_cpu_context *cpuctx = &per_cpu(perf_cpu_context, cpu); + struct perf_event_context *ctx = task->perf_event_ctxp; + + if (likely(!ctx)) + return; + if (cpuctx->task_ctx == ctx) + return; + __perf_event_sched_in(ctx, cpuctx, cpu); + cpuctx->task_ctx = ctx; +} + +static void perf_event_cpu_sched_in(struct perf_cpu_context *cpuctx, int cpu) +{ + struct perf_event_context *ctx = &cpuctx->ctx; + + __perf_event_sched_in(ctx, cpuctx, cpu); +} + +#define MAX_INTERRUPTS (~0ULL) + +static void perf_log_throttle(struct perf_event *event, int enable); + +static void perf_adjust_period(struct perf_event *event, u64 events) +{ + struct hw_perf_event *hwc = &event->hw; + u64 period, sample_period; + s64 delta; + + events *= hwc->sample_period; + period = div64_u64(events, event->attr.sample_freq); + + delta = (s64)(period - hwc->sample_period); + delta = (delta + 7) / 8; /* low pass filter */ + + sample_period = hwc->sample_period + delta; + + if (!sample_period) + sample_period = 1; + + hwc->sample_period = sample_period; +} + +static void perf_ctx_adjust_freq(struct perf_event_context *ctx) +{ + struct perf_event *event; + struct hw_perf_event *hwc; + u64 interrupts, freq; + + spin_lock(&ctx->lock); + list_for_each_entry(event, &ctx->group_list, group_entry) { + if (event->state != PERF_EVENT_STATE_ACTIVE) + continue; + + hwc = &event->hw; + + interrupts = hwc->interrupts; + hwc->interrupts = 0; + + /* + * unthrottle events on the tick + */ + if (interrupts == MAX_INTERRUPTS) { + perf_log_throttle(event, 1); + event->pmu->unthrottle(event); + interrupts = 2*sysctl_perf_event_sample_rate/HZ; + } + + if (!event->attr.freq || !event->attr.sample_freq) + continue; + + /* + * if the specified freq < HZ then we need to skip ticks + */ + if (event->attr.sample_freq < HZ) { + freq = event->attr.sample_freq; + + hwc->freq_count += freq; + hwc->freq_interrupts += interrupts; + + if (hwc->freq_count < HZ) + continue; + + interrupts = hwc->freq_interrupts; + hwc->freq_interrupts = 0; + hwc->freq_count -= HZ; + } else + freq = HZ; + + perf_adjust_period(event, freq * interrupts); + + /* + * In order to avoid being stalled by an (accidental) huge + * sample period, force reset the sample period if we didn't + * get any events in this freq period. + */ + if (!interrupts) { + perf_disable(); + event->pmu->disable(event); + atomic64_set(&hwc->period_left, 0); + event->pmu->enable(event); + perf_enable(); + } + } + spin_unlock(&ctx->lock); +} + +/* + * Round-robin a context's events: + */ +static void rotate_ctx(struct perf_event_context *ctx) +{ + struct perf_event *event; + + if (!ctx->nr_events) + return; + + spin_lock(&ctx->lock); + /* + * Rotate the first entry last (works just fine for group events too): + */ + perf_disable(); + list_for_each_entry(event, &ctx->group_list, group_entry) { + list_move_tail(&event->group_entry, &ctx->group_list); + break; + } + perf_enable(); + + spin_unlock(&ctx->lock); +} + +void perf_event_task_tick(struct task_struct *curr, int cpu) +{ + struct perf_cpu_context *cpuctx; + struct perf_event_context *ctx; + + if (!atomic_read(&nr_events)) + return; + + cpuctx = &per_cpu(perf_cpu_context, cpu); + ctx = curr->perf_event_ctxp; + + perf_ctx_adjust_freq(&cpuctx->ctx); + if (ctx) + perf_ctx_adjust_freq(ctx); + + perf_event_cpu_sched_out(cpuctx); + if (ctx) + __perf_event_task_sched_out(ctx); + + rotate_ctx(&cpuctx->ctx); + if (ctx) + rotate_ctx(ctx); + + perf_event_cpu_sched_in(cpuctx, cpu); + if (ctx) + perf_event_task_sched_in(curr, cpu); +} + +/* + * Enable all of a task's events that have been marked enable-on-exec. + * This expects task == current. + */ +static void perf_event_enable_on_exec(struct task_struct *task) +{ + struct perf_event_context *ctx; + struct perf_event *event; + unsigned long flags; + int enabled = 0; + + local_irq_save(flags); + ctx = task->perf_event_ctxp; + if (!ctx || !ctx->nr_events) + goto out; + + __perf_event_task_sched_out(ctx); + + spin_lock(&ctx->lock); + + list_for_each_entry(event, &ctx->group_list, group_entry) { + if (!event->attr.enable_on_exec) + continue; + event->attr.enable_on_exec = 0; + if (event->state >= PERF_EVENT_STATE_INACTIVE) + continue; + __perf_event_mark_enabled(event, ctx); + enabled = 1; + } + + /* + * Unclone this context if we enabled any event. + */ + if (enabled) + unclone_ctx(ctx); + + spin_unlock(&ctx->lock); + + perf_event_task_sched_in(task, smp_processor_id()); + out: + local_irq_restore(flags); +} + +/* + * Cross CPU call to read the hardware event + */ +static void __perf_event_read(void *info) +{ + struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context); + struct perf_event *event = info; + struct perf_event_context *ctx = event->ctx; + unsigned long flags; + + /* + * If this is a task context, we need to check whether it is + * the current task context of this cpu. If not it has been + * scheduled out before the smp call arrived. In that case + * event->count would have been updated to a recent sample + * when the event was scheduled out. + */ + if (ctx->task && cpuctx->task_ctx != ctx) + return; + + local_irq_save(flags); + if (ctx->is_active) + update_context_time(ctx); + event->pmu->read(event); + update_event_times(event); + local_irq_restore(flags); +} + +static u64 perf_event_read(struct perf_event *event) +{ + /* + * If event is enabled and currently active on a CPU, update the + * value in the event structure: + */ + if (event->state == PERF_EVENT_STATE_ACTIVE) { + smp_call_function_single(event->oncpu, + __perf_event_read, event, 1); + } else if (event->state == PERF_EVENT_STATE_INACTIVE) { + update_event_times(event); + } + + return atomic64_read(&event->count); +} + +/* + * Initialize the perf_event context in a task_struct: + */ +static void +__perf_event_init_context(struct perf_event_context *ctx, + struct task_struct *task) +{ + memset(ctx, 0, sizeof(*ctx)); + spin_lock_init(&ctx->lock); + mutex_init(&ctx->mutex); + INIT_LIST_HEAD(&ctx->group_list); + INIT_LIST_HEAD(&ctx->event_list); + atomic_set(&ctx->refcount, 1); + ctx->task = task; +} + +static struct perf_event_context *find_get_context(pid_t pid, int cpu) +{ + struct perf_event_context *ctx; + struct perf_cpu_context *cpuctx; + struct task_struct *task; + unsigned long flags; + int err; + + /* + * If cpu is not a wildcard then this is a percpu event: + */ + if (cpu != -1) { + /* Must be root to operate on a CPU event: */ + if (perf_paranoid_cpu() && !capable(CAP_SYS_ADMIN)) + return ERR_PTR(-EACCES); + + if (cpu < 0 || cpu > num_possible_cpus()) + return ERR_PTR(-EINVAL); + + /* + * We could be clever and allow to attach a event to an + * offline CPU and activate it when the CPU comes up, but + * that's for later. + */ + if (!cpu_isset(cpu, cpu_online_map)) + return ERR_PTR(-ENODEV); + + cpuctx = &per_cpu(perf_cpu_context, cpu); + ctx = &cpuctx->ctx; + get_ctx(ctx); + + return ctx; + } + + rcu_read_lock(); + if (!pid) + task = current; + else + task = find_task_by_vpid(pid); + if (task) + get_task_struct(task); + rcu_read_unlock(); + + if (!task) + return ERR_PTR(-ESRCH); + + /* + * Can't attach events to a dying task. + */ + err = -ESRCH; + if (task->flags & PF_EXITING) + goto errout; + + /* Reuse ptrace permission checks for now. */ + err = -EACCES; + if (!ptrace_may_access(task, PTRACE_MODE_READ)) + goto errout; + + retry: + ctx = perf_lock_task_context(task, &flags); + if (ctx) { + unclone_ctx(ctx); + spin_unlock_irqrestore(&ctx->lock, flags); + } + + if (!ctx) { + ctx = kmalloc(sizeof(struct perf_event_context), GFP_KERNEL); + err = -ENOMEM; + if (!ctx) + goto errout; + __perf_event_init_context(ctx, task); + get_ctx(ctx); + if (cmpxchg(&task->perf_event_ctxp, NULL, ctx)) { + /* + * We raced with some other task; use + * the context they set. + */ + kfree(ctx); + goto retry; + } + get_task_struct(task); + } + + put_task_struct(task); + return ctx; + + errout: + put_task_struct(task); + return ERR_PTR(err); +} + +static void free_event_rcu(struct rcu_head *head) +{ + struct perf_event *event; + + event = container_of(head, struct perf_event, rcu_head); + if (event->ns) + put_pid_ns(event->ns); + kfree(event); +} + +static void perf_pending_sync(struct perf_event *event); + +static void free_event(struct perf_event *event) +{ + perf_pending_sync(event); + + if (!event->parent) { + atomic_dec(&nr_events); + if (event->attr.mmap) + atomic_dec(&nr_mmap_events); + if (event->attr.comm) + atomic_dec(&nr_comm_events); + if (event->attr.task) + atomic_dec(&nr_task_events); + } + + if (event->output) { + fput(event->output->filp); + event->output = NULL; + } + + if (event->destroy) + event->destroy(event); + + put_ctx(event->ctx); + call_rcu(&event->rcu_head, free_event_rcu); +} + +/* + * Called when the last reference to the file is gone. + */ +static int perf_release(struct inode *inode, struct file *file) +{ + struct perf_event *event = file->private_data; + struct perf_event_context *ctx = event->ctx; + + file->private_data = NULL; + + WARN_ON_ONCE(ctx->parent_ctx); + mutex_lock(&ctx->mutex); + perf_event_remove_from_context(event); + mutex_unlock(&ctx->mutex); + + mutex_lock(&event->owner->perf_event_mutex); + list_del_init(&event->owner_entry); + mutex_unlock(&event->owner->perf_event_mutex); + put_task_struct(event->owner); + + free_event(event); + + return 0; +} + +static int perf_event_read_size(struct perf_event *event) +{ + int entry = sizeof(u64); /* value */ + int size = 0; + int nr = 1; + + if (event->attr.read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) + size += sizeof(u64); + + if (event->attr.read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) + size += sizeof(u64); + + if (event->attr.read_format & PERF_FORMAT_ID) + entry += sizeof(u64); + + if (event->attr.read_format & PERF_FORMAT_GROUP) { + nr += event->group_leader->nr_siblings; + size += sizeof(u64); + } + + size += entry * nr; + + return size; +} + +static u64 perf_event_read_value(struct perf_event *event) +{ + struct perf_event *child; + u64 total = 0; + + total += perf_event_read(event); + list_for_each_entry(child, &event->child_list, child_list) + total += perf_event_read(child); + + return total; +} + +static int perf_event_read_entry(struct perf_event *event, + u64 read_format, char __user *buf) +{ + int n = 0, count = 0; + u64 values[2]; + + values[n++] = perf_event_read_value(event); + if (read_format & PERF_FORMAT_ID) + values[n++] = primary_event_id(event); + + count = n * sizeof(u64); + + if (copy_to_user(buf, values, count)) + return -EFAULT; + + return count; +} + +static int perf_event_read_group(struct perf_event *event, + u64 read_format, char __user *buf) +{ + struct perf_event *leader = event->group_leader, *sub; + int n = 0, size = 0, err = -EFAULT; + u64 values[3]; + + values[n++] = 1 + leader->nr_siblings; + if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) { + values[n++] = leader->total_time_enabled + + atomic64_read(&leader->child_total_time_enabled); + } + if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) { + values[n++] = leader->total_time_running + + atomic64_read(&leader->child_total_time_running); + } + + size = n * sizeof(u64); + + if (copy_to_user(buf, values, size)) + return -EFAULT; + + err = perf_event_read_entry(leader, read_format, buf + size); + if (err < 0) + return err; + + size += err; + + list_for_each_entry(sub, &leader->sibling_list, group_entry) { + err = perf_event_read_entry(sub, read_format, + buf + size); + if (err < 0) + return err; + + size += err; + } + + return size; +} + +static int perf_event_read_one(struct perf_event *event, + u64 read_format, char __user *buf) +{ + u64 values[4]; + int n = 0; + + values[n++] = perf_event_read_value(event); + if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) { + values[n++] = event->total_time_enabled + + atomic64_read(&event->child_total_time_enabled); + } + if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) { + values[n++] = event->total_time_running + + atomic64_read(&event->child_total_time_running); + } + if (read_format & PERF_FORMAT_ID) + values[n++] = primary_event_id(event); + + if (copy_to_user(buf, values, n * sizeof(u64))) + return -EFAULT; + + return n * sizeof(u64); +} + +/* + * Read the performance event - simple non blocking version for now + */ +static ssize_t +perf_read_hw(struct perf_event *event, char __user *buf, size_t count) +{ + u64 read_format = event->attr.read_format; + int ret; + + /* + * Return end-of-file for a read on a event that is in + * error state (i.e. because it was pinned but it couldn't be + * scheduled on to the CPU at some point). + */ + if (event->state == PERF_EVENT_STATE_ERROR) + return 0; + + if (count < perf_event_read_size(event)) + return -ENOSPC; + + WARN_ON_ONCE(event->ctx->parent_ctx); + mutex_lock(&event->child_mutex); + if (read_format & PERF_FORMAT_GROUP) + ret = perf_event_read_group(event, read_format, buf); + else + ret = perf_event_read_one(event, read_format, buf); + mutex_unlock(&event->child_mutex); + + return ret; +} + +static ssize_t +perf_read(struct file *file, char __user *buf, size_t count, loff_t *ppos) +{ + struct perf_event *event = file->private_data; + + return perf_read_hw(event, buf, count); +} + +static unsigned int perf_poll(struct file *file, poll_table *wait) +{ + struct perf_event *event = file->private_data; + struct perf_mmap_data *data; + unsigned int events = POLL_HUP; + + rcu_read_lock(); + data = rcu_dereference(event->data); + if (data) + events = atomic_xchg(&data->poll, 0); + rcu_read_unlock(); + + poll_wait(file, &event->waitq, wait); + + return events; +} + +static void perf_event_reset(struct perf_event *event) +{ + (void)perf_event_read(event); + atomic64_set(&event->count, 0); + perf_event_update_userpage(event); +} + +/* + * Holding the top-level event's child_mutex means that any + * descendant process that has inherited this event will block + * in sync_child_event if it goes to exit, thus satisfying the + * task existence requirements of perf_event_enable/disable. + */ +static void perf_event_for_each_child(struct perf_event *event, + void (*func)(struct perf_event *)) +{ + struct perf_event *child; + + WARN_ON_ONCE(event->ctx->parent_ctx); + mutex_lock(&event->child_mutex); + func(event); + list_for_each_entry(child, &event->child_list, child_list) + func(child); + mutex_unlock(&event->child_mutex); +} + +static void perf_event_for_each(struct perf_event *event, + void (*func)(struct perf_event *)) +{ + struct perf_event_context *ctx = event->ctx; + struct perf_event *sibling; + + WARN_ON_ONCE(ctx->parent_ctx); + mutex_lock(&ctx->mutex); + event = event->group_leader; + + perf_event_for_each_child(event, func); + func(event); + list_for_each_entry(sibling, &event->sibling_list, group_entry) + perf_event_for_each_child(event, func); + mutex_unlock(&ctx->mutex); +} + +static int perf_event_period(struct perf_event *event, u64 __user *arg) +{ + struct perf_event_context *ctx = event->ctx; + unsigned long size; + int ret = 0; + u64 value; + + if (!event->attr.sample_period) + return -EINVAL; + + size = copy_from_user(&value, arg, sizeof(value)); + if (size != sizeof(value)) + return -EFAULT; + + if (!value) + return -EINVAL; + + spin_lock_irq(&ctx->lock); + if (event->attr.freq) { + if (value > sysctl_perf_event_sample_rate) { + ret = -EINVAL; + goto unlock; + } + + event->attr.sample_freq = value; + } else { + event->attr.sample_period = value; + event->hw.sample_period = value; + } +unlock: + spin_unlock_irq(&ctx->lock); + + return ret; +} + +int perf_event_set_output(struct perf_event *event, int output_fd); + +static long perf_ioctl(struct file *file, unsigned int cmd, unsigned long arg) +{ + struct perf_event *event = file->private_data; + void (*func)(struct perf_event *); + u32 flags = arg; + + switch (cmd) { + case PERF_EVENT_IOC_ENABLE: + func = perf_event_enable; + break; + case PERF_EVENT_IOC_DISABLE: + func = perf_event_disable; + break; + case PERF_EVENT_IOC_RESET: + func = perf_event_reset; + break; + + case PERF_EVENT_IOC_REFRESH: + return perf_event_refresh(event, arg); + + case PERF_EVENT_IOC_PERIOD: + return perf_event_period(event, (u64 __user *)arg); + + case PERF_EVENT_IOC_SET_OUTPUT: + return perf_event_set_output(event, arg); + + default: + return -ENOTTY; + } + + if (flags & PERF_IOC_FLAG_GROUP) + perf_event_for_each(event, func); + else + perf_event_for_each_child(event, func); + + return 0; +} + +int perf_event_task_enable(void) +{ + struct perf_event *event; + + mutex_lock(¤t->perf_event_mutex); + list_for_each_entry(event, ¤t->perf_event_list, owner_entry) + perf_event_for_each_child(event, perf_event_enable); + mutex_unlock(¤t->perf_event_mutex); + + return 0; +} + +int perf_event_task_disable(void) +{ + struct perf_event *event; + + mutex_lock(¤t->perf_event_mutex); + list_for_each_entry(event, ¤t->perf_event_list, owner_entry) + perf_event_for_each_child(event, perf_event_disable); + mutex_unlock(¤t->perf_event_mutex); + + return 0; +} + +#ifndef PERF_EVENT_INDEX_OFFSET +# define PERF_EVENT_INDEX_OFFSET 0 +#endif + +static int perf_event_index(struct perf_event *event) +{ + if (event->state != PERF_EVENT_STATE_ACTIVE) + return 0; + + return event->hw.idx + 1 - PERF_EVENT_INDEX_OFFSET; +} + +/* + * Callers need to ensure there can be no nesting of this function, otherwise + * the seqlock logic goes bad. We can not serialize this because the arch + * code calls this from NMI context. + */ +void perf_event_update_userpage(struct perf_event *event) +{ + struct perf_event_mmap_page *userpg; + struct perf_mmap_data *data; + + rcu_read_lock(); + data = rcu_dereference(event->data); + if (!data) + goto unlock; + + userpg = data->user_page; + + /* + * Disable preemption so as to not let the corresponding user-space + * spin too long if we get preempted. + */ + preempt_disable(); + ++userpg->lock; + barrier(); + userpg->index = perf_event_index(event); + userpg->offset = atomic64_read(&event->count); + if (event->state == PERF_EVENT_STATE_ACTIVE) + userpg->offset -= atomic64_read(&event->hw.prev_count); + + userpg->time_enabled = event->total_time_enabled + + atomic64_read(&event->child_total_time_enabled); + + userpg->time_running = event->total_time_running + + atomic64_read(&event->child_total_time_running); + + barrier(); + ++userpg->lock; + preempt_enable(); +unlock: + rcu_read_unlock(); +} + +static int perf_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf) +{ + struct perf_event *event = vma->vm_file->private_data; + struct perf_mmap_data *data; + int ret = VM_FAULT_SIGBUS; + + if (vmf->flags & FAULT_FLAG_MKWRITE) { + if (vmf->pgoff == 0) + ret = 0; + return ret; + } + + rcu_read_lock(); + data = rcu_dereference(event->data); + if (!data) + goto unlock; + + if (vmf->pgoff == 0) { + vmf->page = virt_to_page(data->user_page); + } else { + int nr = vmf->pgoff - 1; + + if ((unsigned)nr > data->nr_pages) + goto unlock; + + if (vmf->flags & FAULT_FLAG_WRITE) + goto unlock; + + vmf->page = virt_to_page(data->data_pages[nr]); + } + + get_page(vmf->page); + vmf->page->mapping = vma->vm_file->f_mapping; + vmf->page->index = vmf->pgoff; + + ret = 0; +unlock: + rcu_read_unlock(); + + return ret; +} + +static int perf_mmap_data_alloc(struct perf_event *event, int nr_pages) +{ + struct perf_mmap_data *data; + unsigned long size; + int i; + + WARN_ON(atomic_read(&event->mmap_count)); + + size = sizeof(struct perf_mmap_data); + size += nr_pages * sizeof(void *); + + data = kzalloc(size, GFP_KERNEL); + if (!data) + goto fail; + + data->user_page = (void *)get_zeroed_page(GFP_KERNEL); + if (!data->user_page) + goto fail_user_page; + + for (i = 0; i < nr_pages; i++) { + data->data_pages[i] = (void *)get_zeroed_page(GFP_KERNEL); + if (!data->data_pages[i]) + goto fail_data_pages; + } + + data->nr_pages = nr_pages; + atomic_set(&data->lock, -1); + + if (event->attr.watermark) { + data->watermark = min_t(long, PAGE_SIZE * nr_pages, + event->attr.wakeup_watermark); + } + if (!data->watermark) + data->watermark = max(PAGE_SIZE, PAGE_SIZE * nr_pages / 4); + + rcu_assign_pointer(event->data, data); + + return 0; + +fail_data_pages: + for (i--; i >= 0; i--) + free_page((unsigned long)data->data_pages[i]); + + free_page((unsigned long)data->user_page); + +fail_user_page: + kfree(data); + +fail: + return -ENOMEM; +} + +static void perf_mmap_free_page(unsigned long addr) +{ + struct page *page = virt_to_page((void *)addr); + + page->mapping = NULL; + __free_page(page); +} + +static void __perf_mmap_data_free(struct rcu_head *rcu_head) +{ + struct perf_mmap_data *data; + int i; + + data = container_of(rcu_head, struct perf_mmap_data, rcu_head); + + perf_mmap_free_page((unsigned long)data->user_page); + for (i = 0; i < data->nr_pages; i++) + perf_mmap_free_page((unsigned long)data->data_pages[i]); + + kfree(data); +} + +static void perf_mmap_data_free(struct perf_event *event) +{ + struct perf_mmap_data *data = event->data; + + WARN_ON(atomic_read(&event->mmap_count)); + + rcu_assign_pointer(event->data, NULL); + call_rcu(&data->rcu_head, __perf_mmap_data_free); +} + +static void perf_mmap_open(struct vm_area_struct *vma) +{ + struct perf_event *event = vma->vm_file->private_data; + + atomic_inc(&event->mmap_count); +} + +static void perf_mmap_close(struct vm_area_struct *vma) +{ + struct perf_event *event = vma->vm_file->private_data; + + WARN_ON_ONCE(event->ctx->parent_ctx); + if (atomic_dec_and_mutex_lock(&event->mmap_count, &event->mmap_mutex)) { + struct user_struct *user = current_user(); + + atomic_long_sub(event->data->nr_pages + 1, &user->locked_vm); + vma->vm_mm->locked_vm -= event->data->nr_locked; + perf_mmap_data_free(event); + mutex_unlock(&event->mmap_mutex); + } +} + +static struct vm_operations_struct perf_mmap_vmops = { + .open = perf_mmap_open, + .close = perf_mmap_close, + .fault = perf_mmap_fault, + .page_mkwrite = perf_mmap_fault, +}; + +static int perf_mmap(struct file *file, struct vm_area_struct *vma) +{ + struct perf_event *event = file->private_data; + unsigned long user_locked, user_lock_limit; + struct user_struct *user = current_user(); + unsigned long locked, lock_limit; + unsigned long vma_size; + unsigned long nr_pages; + long user_extra, extra; + int ret = 0; + + if (!(vma->vm_flags & VM_SHARED)) + return -EINVAL; + + vma_size = vma->vm_end - vma->vm_start; + nr_pages = (vma_size / PAGE_SIZE) - 1; + + /* + * If we have data pages ensure they're a power-of-two number, so we + * can do bitmasks instead of modulo. + */ + if (nr_pages != 0 && !is_power_of_2(nr_pages)) + return -EINVAL; + + if (vma_size != PAGE_SIZE * (1 + nr_pages)) + return -EINVAL; + + if (vma->vm_pgoff != 0) + return -EINVAL; + + WARN_ON_ONCE(event->ctx->parent_ctx); + mutex_lock(&event->mmap_mutex); + if (event->output) { + ret = -EINVAL; + goto unlock; + } + + if (atomic_inc_not_zero(&event->mmap_count)) { + if (nr_pages != event->data->nr_pages) + ret = -EINVAL; + goto unlock; + } + + user_extra = nr_pages + 1; + user_lock_limit = sysctl_perf_event_mlock >> (PAGE_SHIFT - 10); + + /* + * Increase the limit linearly with more CPUs: + */ + user_lock_limit *= num_online_cpus(); + + user_locked = atomic_long_read(&user->locked_vm) + user_extra; + + extra = 0; + if (user_locked > user_lock_limit) + extra = user_locked - user_lock_limit; + + lock_limit = current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur; + lock_limit >>= PAGE_SHIFT; + locked = vma->vm_mm->locked_vm + extra; + + if ((locked > lock_limit) && perf_paranoid_tracepoint_raw() && + !capable(CAP_IPC_LOCK)) { + ret = -EPERM; + goto unlock; + } + + WARN_ON(event->data); + ret = perf_mmap_data_alloc(event, nr_pages); + if (ret) + goto unlock; + + atomic_set(&event->mmap_count, 1); + atomic_long_add(user_extra, &user->locked_vm); + vma->vm_mm->locked_vm += extra; + event->data->nr_locked = extra; + if (vma->vm_flags & VM_WRITE) + event->data->writable = 1; + +unlock: + mutex_unlock(&event->mmap_mutex); + + vma->vm_flags |= VM_RESERVED; + vma->vm_ops = &perf_mmap_vmops; + + return ret; +} + +static int perf_fasync(int fd, struct file *filp, int on) +{ + struct inode *inode = filp->f_path.dentry->d_inode; + struct perf_event *event = filp->private_data; + int retval; + + mutex_lock(&inode->i_mutex); + retval = fasync_helper(fd, filp, on, &event->fasync); + mutex_unlock(&inode->i_mutex); + + if (retval < 0) + return retval; + + return 0; +} + +static const struct file_operations perf_fops = { + .release = perf_release, + .read = perf_read, + .poll = perf_poll, + .unlocked_ioctl = perf_ioctl, + .compat_ioctl = perf_ioctl, + .mmap = perf_mmap, + .fasync = perf_fasync, +}; + +/* + * Perf event wakeup + * + * If there's data, ensure we set the poll() state and publish everything + * to user-space before waking everybody up. + */ + +void perf_event_wakeup(struct perf_event *event) +{ + wake_up_all(&event->waitq); + + if (event->pending_kill) { + kill_fasync(&event->fasync, SIGIO, event->pending_kill); + event->pending_kill = 0; + } +} + +/* + * Pending wakeups + * + * Handle the case where we need to wakeup up from NMI (or rq->lock) context. + * + * The NMI bit means we cannot possibly take locks. Therefore, maintain a + * single linked list and use cmpxchg() to add entries lockless. + */ + +static void perf_pending_event(struct perf_pending_entry *entry) +{ + struct perf_event *event = container_of(entry, + struct perf_event, pending); + + if (event->pending_disable) { + event->pending_disable = 0; + __perf_event_disable(event); + } + + if (event->pending_wakeup) { + event->pending_wakeup = 0; + perf_event_wakeup(event); + } +} + +#define PENDING_TAIL ((struct perf_pending_entry *)-1UL) + +static DEFINE_PER_CPU(struct perf_pending_entry *, perf_pending_head) = { + PENDING_TAIL, +}; + +static void perf_pending_queue(struct perf_pending_entry *entry, + void (*func)(struct perf_pending_entry *)) +{ + struct perf_pending_entry **head; + + if (cmpxchg(&entry->next, NULL, PENDING_TAIL) != NULL) + return; + + entry->func = func; + + head = &get_cpu_var(perf_pending_head); + + do { + entry->next = *head; + } while (cmpxchg(head, entry->next, entry) != entry->next); + + set_perf_event_pending(); + + put_cpu_var(perf_pending_head); +} + +static int __perf_pending_run(void) +{ + struct perf_pending_entry *list; + int nr = 0; + + list = xchg(&__get_cpu_var(perf_pending_head), PENDING_TAIL); + while (list != PENDING_TAIL) { + void (*func)(struct perf_pending_entry *); + struct perf_pending_entry *entry = list; + + list = list->next; + + func = entry->func; + entry->next = NULL; + /* + * Ensure we observe the unqueue before we issue the wakeup, + * so that we won't be waiting forever. + * -- see perf_not_pending(). + */ + smp_wmb(); + + func(entry); + nr++; + } + + return nr; +} + +static inline int perf_not_pending(struct perf_event *event) +{ + /* + * If we flush on whatever cpu we run, there is a chance we don't + * need to wait. + */ + get_cpu(); + __perf_pending_run(); + put_cpu(); + + /* + * Ensure we see the proper queue state before going to sleep + * so that we do not miss the wakeup. -- see perf_pending_handle() + */ + smp_rmb(); + return event->pending.next == NULL; +} + +static void perf_pending_sync(struct perf_event *event) +{ + wait_event(event->waitq, perf_not_pending(event)); +} + +void perf_event_do_pending(void) +{ + __perf_pending_run(); +} + +/* + * Callchain support -- arch specific + */ + +__weak struct perf_callchain_entry *perf_callchain(struct pt_regs *regs) +{ + return NULL; +} + +/* + * Output + */ +static bool perf_output_space(struct perf_mmap_data *data, unsigned long tail, + unsigned long offset, unsigned long head) +{ + unsigned long mask; + + if (!data->writable) + return true; + + mask = (data->nr_pages << PAGE_SHIFT) - 1; + + offset = (offset - tail) & mask; + head = (head - tail) & mask; + + if ((int)(head - offset) < 0) + return false; + + return true; +} + +static void perf_output_wakeup(struct perf_output_handle *handle) +{ + atomic_set(&handle->data->poll, POLL_IN); + + if (handle->nmi) { + handle->event->pending_wakeup = 1; + perf_pending_queue(&handle->event->pending, + perf_pending_event); + } else + perf_event_wakeup(handle->event); +} + +/* + * Curious locking construct. + * + * We need to ensure a later event_id doesn't publish a head when a former + * event_id isn't done writing. However since we need to deal with NMIs we + * cannot fully serialize things. + * + * What we do is serialize between CPUs so we only have to deal with NMI + * nesting on a single CPU. + * + * We only publish the head (and generate a wakeup) when the outer-most + * event_id completes. + */ +static void perf_output_lock(struct perf_output_handle *handle) +{ + struct perf_mmap_data *data = handle->data; + int cpu; + + handle->locked = 0; + + local_irq_save(handle->flags); + cpu = smp_processor_id(); + + if (in_nmi() && atomic_read(&data->lock) == cpu) + return; + + while (atomic_cmpxchg(&data->lock, -1, cpu) != -1) + cpu_relax(); + + handle->locked = 1; +} + +static void perf_output_unlock(struct perf_output_handle *handle) +{ + struct perf_mmap_data *data = handle->data; + unsigned long head; + int cpu; + + data->done_head = data->head; + + if (!handle->locked) + goto out; + +again: + /* + * The xchg implies a full barrier that ensures all writes are done + * before we publish the new head, matched by a rmb() in userspace when + * reading this position. + */ + while ((head = atomic_long_xchg(&data->done_head, 0))) + data->user_page->data_head = head; + + /* + * NMI can happen here, which means we can miss a done_head update. + */ + + cpu = atomic_xchg(&data->lock, -1); + WARN_ON_ONCE(cpu != smp_processor_id()); + + /* + * Therefore we have to validate we did not indeed do so. + */ + if (unlikely(atomic_long_read(&data->done_head))) { + /* + * Since we had it locked, we can lock it again. + */ + while (atomic_cmpxchg(&data->lock, -1, cpu) != -1) + cpu_relax(); + + goto again; + } + + if (atomic_xchg(&data->wakeup, 0)) + perf_output_wakeup(handle); +out: + local_irq_restore(handle->flags); +} + +void perf_output_copy(struct perf_output_handle *handle, + const void *buf, unsigned int len) +{ + unsigned int pages_mask; + unsigned int offset; + unsigned int size; + void **pages; + + offset = handle->offset; + pages_mask = handle->data->nr_pages - 1; + pages = handle->data->data_pages; + + do { + unsigned int page_offset; + int nr; + + nr = (offset >> PAGE_SHIFT) & pages_mask; + page_offset = offset & (PAGE_SIZE - 1); + size = min_t(unsigned int, PAGE_SIZE - page_offset, len); + + memcpy(pages[nr] + page_offset, buf, size); + + len -= size; + buf += size; + offset += size; + } while (len); + + handle->offset = offset; + + /* + * Check we didn't copy past our reservation window, taking the + * possible unsigned int wrap into account. + */ + WARN_ON_ONCE(((long)(handle->head - handle->offset)) < 0); +} + +int perf_output_begin(struct perf_output_handle *handle, + struct perf_event *event, unsigned int size, + int nmi, int sample) +{ + struct perf_event *output_event; + struct perf_mmap_data *data; + unsigned long tail, offset, head; + int have_lost; + struct { + struct perf_event_header header; + u64 id; + u64 lost; + } lost_event; + + rcu_read_lock(); + /* + * For inherited events we send all the output towards the parent. + */ + if (event->parent) + event = event->parent; + + output_event = rcu_dereference(event->output); + if (output_event) + event = output_event; + + data = rcu_dereference(event->data); + if (!data) + goto out; + + handle->data = data; + handle->event = event; + handle->nmi = nmi; + handle->sample = sample; + + if (!data->nr_pages) + goto fail; + + have_lost = atomic_read(&data->lost); + if (have_lost) + size += sizeof(lost_event); + + perf_output_lock(handle); + + do { + /* + * Userspace could choose to issue a mb() before updating the + * tail pointer. So that all reads will be completed before the + * write is issued. + */ + tail = ACCESS_ONCE(data->user_page->data_tail); + smp_rmb(); + offset = head = atomic_long_read(&data->head); + head += size; + if (unlikely(!perf_output_space(data, tail, offset, head))) + goto fail; + } while (atomic_long_cmpxchg(&data->head, offset, head) != offset); + + handle->offset = offset; + handle->head = head; + + if (head - tail > data->watermark) + atomic_set(&data->wakeup, 1); + + if (have_lost) { + lost_event.header.type = PERF_RECORD_LOST; + lost_event.header.misc = 0; + lost_event.header.size = sizeof(lost_event); + lost_event.id = event->id; + lost_event.lost = atomic_xchg(&data->lost, 0); + + perf_output_put(handle, lost_event); + } + + return 0; + +fail: + atomic_inc(&data->lost); + perf_output_unlock(handle); +out: + rcu_read_unlock(); + + return -ENOSPC; +} + +void perf_output_end(struct perf_output_handle *handle) +{ + struct perf_event *event = handle->event; + struct perf_mmap_data *data = handle->data; + + int wakeup_events = event->attr.wakeup_events; + + if (handle->sample && wakeup_events) { + int events = atomic_inc_return(&data->events); + if (events >= wakeup_events) { + atomic_sub(wakeup_events, &data->events); + atomic_set(&data->wakeup, 1); + } + } + + perf_output_unlock(handle); + rcu_read_unlock(); +} + +static u32 perf_event_pid(struct perf_event *event, struct task_struct *p) +{ + /* + * only top level events have the pid namespace they were created in + */ + if (event->parent) + event = event->parent; + + return task_tgid_nr_ns(p, event->ns); +} + +static u32 perf_event_tid(struct perf_event *event, struct task_struct *p) +{ + /* + * only top level events have the pid namespace they were created in + */ + if (event->parent) + event = event->parent; + + return task_pid_nr_ns(p, event->ns); +} + +static void perf_output_read_one(struct perf_output_handle *handle, + struct perf_event *event) +{ + u64 read_format = event->attr.read_format; + u64 values[4]; + int n = 0; + + values[n++] = atomic64_read(&event->count); + if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) { + values[n++] = event->total_time_enabled + + atomic64_read(&event->child_total_time_enabled); + } + if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) { + values[n++] = event->total_time_running + + atomic64_read(&event->child_total_time_running); + } + if (read_format & PERF_FORMAT_ID) + values[n++] = primary_event_id(event); + + perf_output_copy(handle, values, n * sizeof(u64)); +} + +/* + * XXX PERF_FORMAT_GROUP vs inherited events seems difficult. + */ +static void perf_output_read_group(struct perf_output_handle *handle, + struct perf_event *event) +{ + struct perf_event *leader = event->group_leader, *sub; + u64 read_format = event->attr.read_format; + u64 values[5]; + int n = 0; + + values[n++] = 1 + leader->nr_siblings; + + if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) + values[n++] = leader->total_time_enabled; + + if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) + values[n++] = leader->total_time_running; + + if (leader != event) + leader->pmu->read(leader); + + values[n++] = atomic64_read(&leader->count); + if (read_format & PERF_FORMAT_ID) + values[n++] = primary_event_id(leader); + + perf_output_copy(handle, values, n * sizeof(u64)); + + list_for_each_entry(sub, &leader->sibling_list, group_entry) { + n = 0; + + if (sub != event) + sub->pmu->read(sub); + + values[n++] = atomic64_read(&sub->count); + if (read_format & PERF_FORMAT_ID) + values[n++] = primary_event_id(sub); + + perf_output_copy(handle, values, n * sizeof(u64)); + } +} + +static void perf_output_read(struct perf_output_handle *handle, + struct perf_event *event) +{ + if (event->attr.read_format & PERF_FORMAT_GROUP) + perf_output_read_group(handle, event); + else + perf_output_read_one(handle, event); +} + +void perf_output_sample(struct perf_output_handle *handle, + struct perf_event_header *header, + struct perf_sample_data *data, + struct perf_event *event) +{ + u64 sample_type = data->type; + + perf_output_put(handle, *header); + + if (sample_type & PERF_SAMPLE_IP) + perf_output_put(handle, data->ip); + + if (sample_type & PERF_SAMPLE_TID) + perf_output_put(handle, data->tid_entry); + + if (sample_type & PERF_SAMPLE_TIME) + perf_output_put(handle, data->time); + + if (sample_type & PERF_SAMPLE_ADDR) + perf_output_put(handle, data->addr); + + if (sample_type & PERF_SAMPLE_ID) + perf_output_put(handle, data->id); + + if (sample_type & PERF_SAMPLE_STREAM_ID) + perf_output_put(handle, data->stream_id); + + if (sample_type & PERF_SAMPLE_CPU) + perf_output_put(handle, data->cpu_entry); + + if (sample_type & PERF_SAMPLE_PERIOD) + perf_output_put(handle, data->period); + + if (sample_type & PERF_SAMPLE_READ) + perf_output_read(handle, event); + + if (sample_type & PERF_SAMPLE_CALLCHAIN) { + if (data->callchain) { + int size = 1; + + if (data->callchain) + size += data->callchain->nr; + + size *= sizeof(u64); + + perf_output_copy(handle, data->callchain, size); + } else { + u64 nr = 0; + perf_output_put(handle, nr); + } + } + + if (sample_type & PERF_SAMPLE_RAW) { + if (data->raw) { + perf_output_put(handle, data->raw->size); + perf_output_copy(handle, data->raw->data, + data->raw->size); + } else { + struct { + u32 size; + u32 data; + } raw = { + .size = sizeof(u32), + .data = 0, + }; + perf_output_put(handle, raw); + } + } +} + +void perf_prepare_sample(struct perf_event_header *header, + struct perf_sample_data *data, + struct perf_event *event, + struct pt_regs *regs) +{ + u64 sample_type = event->attr.sample_type; + + data->type = sample_type; + + header->type = PERF_RECORD_SAMPLE; + header->size = sizeof(*header); + + header->misc = 0; + header->misc |= perf_misc_flags(regs); + + if (sample_type & PERF_SAMPLE_IP) { + data->ip = perf_instruction_pointer(regs); + + header->size += sizeof(data->ip); + } + + if (sample_type & PERF_SAMPLE_TID) { + /* namespace issues */ + data->tid_entry.pid = perf_event_pid(event, current); + data->tid_entry.tid = perf_event_tid(event, current); + + header->size += sizeof(data->tid_entry); + } + + if (sample_type & PERF_SAMPLE_TIME) { + data->time = perf_clock(); + + header->size += sizeof(data->time); + } + + if (sample_type & PERF_SAMPLE_ADDR) + header->size += sizeof(data->addr); + + if (sample_type & PERF_SAMPLE_ID) { + data->id = primary_event_id(event); + + header->size += sizeof(data->id); + } + + if (sample_type & PERF_SAMPLE_STREAM_ID) { + data->stream_id = event->id; + + header->size += sizeof(data->stream_id); + } + + if (sample_type & PERF_SAMPLE_CPU) { + data->cpu_entry.cpu = raw_smp_processor_id(); + data->cpu_entry.reserved = 0; + + header->size += sizeof(data->cpu_entry); + } + + if (sample_type & PERF_SAMPLE_PERIOD) + header->size += sizeof(data->period); + + if (sample_type & PERF_SAMPLE_READ) + header->size += perf_event_read_size(event); + + if (sample_type & PERF_SAMPLE_CALLCHAIN) { + int size = 1; + + data->callchain = perf_callchain(regs); + + if (data->callchain) + size += data->callchain->nr; + + header->size += size * sizeof(u64); + } + + if (sample_type & PERF_SAMPLE_RAW) { + int size = sizeof(u32); + + if (data->raw) + size += data->raw->size; + else + size += sizeof(u32); + + WARN_ON_ONCE(size & (sizeof(u64)-1)); + header->size += size; + } +} + +static void perf_event_output(struct perf_event *event, int nmi, + struct perf_sample_data *data, + struct pt_regs *regs) +{ + struct perf_output_handle handle; + struct perf_event_header header; + + perf_prepare_sample(&header, data, event, regs); + + if (perf_output_begin(&handle, event, header.size, nmi, 1)) + return; + + perf_output_sample(&handle, &header, data, event); + + perf_output_end(&handle); +} + +/* + * read event_id + */ + +struct perf_read_event { + struct perf_event_header header; + + u32 pid; + u32 tid; +}; + +static void +perf_event_read_event(struct perf_event *event, + struct task_struct *task) +{ + struct perf_output_handle handle; + struct perf_read_event read_event = { + .header = { + .type = PERF_RECORD_READ, + .misc = 0, + .size = sizeof(read_event) + perf_event_read_size(event), + }, + .pid = perf_event_pid(event, task), + .tid = perf_event_tid(event, task), + }; + int ret; + + ret = perf_output_begin(&handle, event, read_event.header.size, 0, 0); + if (ret) + return; + + perf_output_put(&handle, read_event); + perf_output_read(&handle, event); + + perf_output_end(&handle); +} + +/* + * task tracking -- fork/exit + * + * enabled by: attr.comm | attr.mmap | attr.task + */ + +struct perf_task_event { + struct task_struct *task; + struct perf_event_context *task_ctx; + + struct { + struct perf_event_header header; + + u32 pid; + u32 ppid; + u32 tid; + u32 ptid; + u64 time; + } event_id; +}; + +static void perf_event_task_output(struct perf_event *event, + struct perf_task_event *task_event) +{ + struct perf_output_handle handle; + int size; + struct task_struct *task = task_event->task; + int ret; + + size = task_event->event_id.header.size; + ret = perf_output_begin(&handle, event, size, 0, 0); + + if (ret) + return; + + task_event->event_id.pid = perf_event_pid(event, task); + task_event->event_id.ppid = perf_event_pid(event, current); + + task_event->event_id.tid = perf_event_tid(event, task); + task_event->event_id.ptid = perf_event_tid(event, current); + + task_event->event_id.time = perf_clock(); + + perf_output_put(&handle, task_event->event_id); + + perf_output_end(&handle); +} + +static int perf_event_task_match(struct perf_event *event) +{ + if (event->attr.comm || event->attr.mmap || event->attr.task) + return 1; + + return 0; +} + +static void perf_event_task_ctx(struct perf_event_context *ctx, + struct perf_task_event *task_event) +{ + struct perf_event *event; + + if (system_state != SYSTEM_RUNNING || list_empty(&ctx->event_list)) + return; + + rcu_read_lock(); + list_for_each_entry_rcu(event, &ctx->event_list, event_entry) { + if (perf_event_task_match(event)) + perf_event_task_output(event, task_event); + } + rcu_read_unlock(); +} + +static void perf_event_task_event(struct perf_task_event *task_event) +{ + struct perf_cpu_context *cpuctx; + struct perf_event_context *ctx = task_event->task_ctx; + + cpuctx = &get_cpu_var(perf_cpu_context); + perf_event_task_ctx(&cpuctx->ctx, task_event); + put_cpu_var(perf_cpu_context); + + rcu_read_lock(); + if (!ctx) + ctx = rcu_dereference(task_event->task->perf_event_ctxp); + if (ctx) + perf_event_task_ctx(ctx, task_event); + rcu_read_unlock(); +} + +static void perf_event_task(struct task_struct *task, + struct perf_event_context *task_ctx, + int new) +{ + struct perf_task_event task_event; + + if (!atomic_read(&nr_comm_events) && + !atomic_read(&nr_mmap_events) && + !atomic_read(&nr_task_events)) + return; + + task_event = (struct perf_task_event){ + .task = task, + .task_ctx = task_ctx, + .event_id = { + .header = { + .type = new ? PERF_RECORD_FORK : PERF_RECORD_EXIT, + .misc = 0, + .size = sizeof(task_event.event_id), + }, + /* .pid */ + /* .ppid */ + /* .tid */ + /* .ptid */ + }, + }; + + perf_event_task_event(&task_event); +} + +void perf_event_fork(struct task_struct *task) +{ + perf_event_task(task, NULL, 1); +} + +/* + * comm tracking + */ + +struct perf_comm_event { + struct task_struct *task; + char *comm; + int comm_size; + + struct { + struct perf_event_header header; + + u32 pid; + u32 tid; + } event_id; +}; + +static void perf_event_comm_output(struct perf_event *event, + struct perf_comm_event *comm_event) +{ + struct perf_output_handle handle; + int size = comm_event->event_id.header.size; + int ret = perf_output_begin(&handle, event, size, 0, 0); + + if (ret) + return; + + comm_event->event_id.pid = perf_event_pid(event, comm_event->task); + comm_event->event_id.tid = perf_event_tid(event, comm_event->task); + + perf_output_put(&handle, comm_event->event_id); + perf_output_copy(&handle, comm_event->comm, + comm_event->comm_size); + perf_output_end(&handle); +} + +static int perf_event_comm_match(struct perf_event *event) +{ + if (event->attr.comm) + return 1; + + return 0; +} + +static void perf_event_comm_ctx(struct perf_event_context *ctx, + struct perf_comm_event *comm_event) +{ + struct perf_event *event; + + if (system_state != SYSTEM_RUNNING || list_empty(&ctx->event_list)) + return; + + rcu_read_lock(); + list_for_each_entry_rcu(event, &ctx->event_list, event_entry) { + if (perf_event_comm_match(event)) + perf_event_comm_output(event, comm_event); + } + rcu_read_unlock(); +} + +static void perf_event_comm_event(struct perf_comm_event *comm_event) +{ + struct perf_cpu_context *cpuctx; + struct perf_event_context *ctx; + unsigned int size; + char comm[TASK_COMM_LEN]; + + memset(comm, 0, sizeof(comm)); + strncpy(comm, comm_event->task->comm, sizeof(comm)); + size = ALIGN(strlen(comm)+1, sizeof(u64)); + + comm_event->comm = comm; + comm_event->comm_size = size; + + comm_event->event_id.header.size = sizeof(comm_event->event_id) + size; + + cpuctx = &get_cpu_var(perf_cpu_context); + perf_event_comm_ctx(&cpuctx->ctx, comm_event); + put_cpu_var(perf_cpu_context); + + rcu_read_lock(); + /* + * doesn't really matter which of the child contexts the + * events ends up in. + */ + ctx = rcu_dereference(current->perf_event_ctxp); + if (ctx) + perf_event_comm_ctx(ctx, comm_event); + rcu_read_unlock(); +} + +void perf_event_comm(struct task_struct *task) +{ + struct perf_comm_event comm_event; + + if (task->perf_event_ctxp) + perf_event_enable_on_exec(task); + + if (!atomic_read(&nr_comm_events)) + return; + + comm_event = (struct perf_comm_event){ + .task = task, + /* .comm */ + /* .comm_size */ + .event_id = { + .header = { + .type = PERF_RECORD_COMM, + .misc = 0, + /* .size */ + }, + /* .pid */ + /* .tid */ + }, + }; + + perf_event_comm_event(&comm_event); +} + +/* + * mmap tracking + */ + +struct perf_mmap_event { + struct vm_area_struct *vma; + + const char *file_name; + int file_size; + + struct { + struct perf_event_header header; + + u32 pid; + u32 tid; + u64 start; + u64 len; + u64 pgoff; + } event_id; +}; + +static void perf_event_mmap_output(struct perf_event *event, + struct perf_mmap_event *mmap_event) +{ + struct perf_output_handle handle; + int size = mmap_event->event_id.header.size; + int ret = perf_output_begin(&handle, event, size, 0, 0); + + if (ret) + return; + + mmap_event->event_id.pid = perf_event_pid(event, current); + mmap_event->event_id.tid = perf_event_tid(event, current); + + perf_output_put(&handle, mmap_event->event_id); + perf_output_copy(&handle, mmap_event->file_name, + mmap_event->file_size); + perf_output_end(&handle); +} + +static int perf_event_mmap_match(struct perf_event *event, + struct perf_mmap_event *mmap_event) +{ + if (event->attr.mmap) + return 1; + + return 0; +} + +static void perf_event_mmap_ctx(struct perf_event_context *ctx, + struct perf_mmap_event *mmap_event) +{ + struct perf_event *event; + + if (system_state != SYSTEM_RUNNING || list_empty(&ctx->event_list)) + return; + + rcu_read_lock(); + list_for_each_entry_rcu(event, &ctx->event_list, event_entry) { + if (perf_event_mmap_match(event, mmap_event)) + perf_event_mmap_output(event, mmap_event); + } + rcu_read_unlock(); +} + +static void perf_event_mmap_event(struct perf_mmap_event *mmap_event) +{ + struct perf_cpu_context *cpuctx; + struct perf_event_context *ctx; + struct vm_area_struct *vma = mmap_event->vma; + struct file *file = vma->vm_file; + unsigned int size; + char tmp[16]; + char *buf = NULL; + const char *name; + + memset(tmp, 0, sizeof(tmp)); + + if (file) { + /* + * d_path works from the end of the buffer backwards, so we + * need to add enough zero bytes after the string to handle + * the 64bit alignment we do later. + */ + buf = kzalloc(PATH_MAX + sizeof(u64), GFP_KERNEL); + if (!buf) { + name = strncpy(tmp, "//enomem", sizeof(tmp)); + goto got_name; + } + name = d_path(&file->f_path, buf, PATH_MAX); + if (IS_ERR(name)) { + name = strncpy(tmp, "//toolong", sizeof(tmp)); + goto got_name; + } + } else { + if (arch_vma_name(mmap_event->vma)) { + name = strncpy(tmp, arch_vma_name(mmap_event->vma), + sizeof(tmp)); + goto got_name; + } + + if (!vma->vm_mm) { + name = strncpy(tmp, "[vdso]", sizeof(tmp)); + goto got_name; + } + + name = strncpy(tmp, "//anon", sizeof(tmp)); + goto got_name; + } + +got_name: + size = ALIGN(strlen(name)+1, sizeof(u64)); + + mmap_event->file_name = name; + mmap_event->file_size = size; + + mmap_event->event_id.header.size = sizeof(mmap_event->event_id) + size; + + cpuctx = &get_cpu_var(perf_cpu_context); + perf_event_mmap_ctx(&cpuctx->ctx, mmap_event); + put_cpu_var(perf_cpu_context); + + rcu_read_lock(); + /* + * doesn't really matter which of the child contexts the + * events ends up in. + */ + ctx = rcu_dereference(current->perf_event_ctxp); + if (ctx) + perf_event_mmap_ctx(ctx, mmap_event); + rcu_read_unlock(); + + kfree(buf); +} + +void __perf_event_mmap(struct vm_area_struct *vma) +{ + struct perf_mmap_event mmap_event; + + if (!atomic_read(&nr_mmap_events)) + return; + + mmap_event = (struct perf_mmap_event){ + .vma = vma, + /* .file_name */ + /* .file_size */ + .event_id = { + .header = { + .type = PERF_RECORD_MMAP, + .misc = 0, + /* .size */ + }, + /* .pid */ + /* .tid */ + .start = vma->vm_start, + .len = vma->vm_end - vma->vm_start, + .pgoff = vma->vm_pgoff, + }, + }; + + perf_event_mmap_event(&mmap_event); +} + +/* + * IRQ throttle logging + */ + +static void perf_log_throttle(struct perf_event *event, int enable) +{ + struct perf_output_handle handle; + int ret; + + struct { + struct perf_event_header header; + u64 time; + u64 id; + u64 stream_id; + } throttle_event = { + .header = { + .type = PERF_RECORD_THROTTLE, + .misc = 0, + .size = sizeof(throttle_event), + }, + .time = perf_clock(), + .id = primary_event_id(event), + .stream_id = event->id, + }; + + if (enable) + throttle_event.header.type = PERF_RECORD_UNTHROTTLE; + + ret = perf_output_begin(&handle, event, sizeof(throttle_event), 1, 0); + if (ret) + return; + + perf_output_put(&handle, throttle_event); + perf_output_end(&handle); +} + +/* + * Generic event overflow handling, sampling. + */ + +static int __perf_event_overflow(struct perf_event *event, int nmi, + int throttle, struct perf_sample_data *data, + struct pt_regs *regs) +{ + int events = atomic_read(&event->event_limit); + struct hw_perf_event *hwc = &event->hw; + int ret = 0; + + throttle = (throttle && event->pmu->unthrottle != NULL); + + if (!throttle) { + hwc->interrupts++; + } else { + if (hwc->interrupts != MAX_INTERRUPTS) { + hwc->interrupts++; + if (HZ * hwc->interrupts > + (u64)sysctl_perf_event_sample_rate) { + hwc->interrupts = MAX_INTERRUPTS; + perf_log_throttle(event, 0); + ret = 1; + } + } else { + /* + * Keep re-disabling events even though on the previous + * pass we disabled it - just in case we raced with a + * sched-in and the event got enabled again: + */ + ret = 1; + } + } + + if (event->attr.freq) { + u64 now = perf_clock(); + s64 delta = now - hwc->freq_stamp; + + hwc->freq_stamp = now; + + if (delta > 0 && delta < TICK_NSEC) + perf_adjust_period(event, NSEC_PER_SEC / (int)delta); + } + + /* + * XXX event_limit might not quite work as expected on inherited + * events + */ + + event->pending_kill = POLL_IN; + if (events && atomic_dec_and_test(&event->event_limit)) { + ret = 1; + event->pending_kill = POLL_HUP; + if (nmi) { + event->pending_disable = 1; + perf_pending_queue(&event->pending, + perf_pending_event); + } else + perf_event_disable(event); + } + + perf_event_output(event, nmi, data, regs); + return ret; +} + +int perf_event_overflow(struct perf_event *event, int nmi, + struct perf_sample_data *data, + struct pt_regs *regs) +{ + return __perf_event_overflow(event, nmi, 1, data, regs); +} + +/* + * Generic software event infrastructure + */ + +/* + * We directly increment event->count and keep a second value in + * event->hw.period_left to count intervals. This period event + * is kept in the range [-sample_period, 0] so that we can use the + * sign as trigger. + */ + +static u64 perf_swevent_set_period(struct perf_event *event) +{ + struct hw_perf_event *hwc = &event->hw; + u64 period = hwc->last_period; + u64 nr, offset; + s64 old, val; + + hwc->last_period = hwc->sample_period; + +again: + old = val = atomic64_read(&hwc->period_left); + if (val < 0) + return 0; + + nr = div64_u64(period + val, period); + offset = nr * period; + val -= offset; + if (atomic64_cmpxchg(&hwc->period_left, old, val) != old) + goto again; + + return nr; +} + +static void perf_swevent_overflow(struct perf_event *event, + int nmi, struct perf_sample_data *data, + struct pt_regs *regs) +{ + struct hw_perf_event *hwc = &event->hw; + int throttle = 0; + u64 overflow; + + data->period = event->hw.last_period; + overflow = perf_swevent_set_period(event); + + if (hwc->interrupts == MAX_INTERRUPTS) + return; + + for (; overflow; overflow--) { + if (__perf_event_overflow(event, nmi, throttle, + data, regs)) { + /* + * We inhibit the overflow from happening when + * hwc->interrupts == MAX_INTERRUPTS. + */ + break; + } + throttle = 1; + } +} + +static void perf_swevent_unthrottle(struct perf_event *event) +{ + /* + * Nothing to do, we already reset hwc->interrupts. + */ +} + +static void perf_swevent_add(struct perf_event *event, u64 nr, + int nmi, struct perf_sample_data *data, + struct pt_regs *regs) +{ + struct hw_perf_event *hwc = &event->hw; + + atomic64_add(nr, &event->count); + + if (!hwc->sample_period) + return; + + if (!regs) + return; + + if (!atomic64_add_negative(nr, &hwc->period_left)) + perf_swevent_overflow(event, nmi, data, regs); +} + +static int perf_swevent_is_counting(struct perf_event *event) +{ + /* + * The event is active, we're good! + */ + if (event->state == PERF_EVENT_STATE_ACTIVE) + return 1; + + /* + * The event is off/error, not counting. + */ + if (event->state != PERF_EVENT_STATE_INACTIVE) + return 0; + + /* + * The event is inactive, if the context is active + * we're part of a group that didn't make it on the 'pmu', + * not counting. + */ + if (event->ctx->is_active) + return 0; + + /* + * We're inactive and the context is too, this means the + * task is scheduled out, we're counting events that happen + * to us, like migration events. + */ + return 1; +} + +static int perf_swevent_match(struct perf_event *event, + enum perf_type_id type, + u32 event_id, struct pt_regs *regs) +{ + if (!perf_swevent_is_counting(event)) + return 0; + + if (event->attr.type != type) + return 0; + if (event->attr.config != event_id) + return 0; + + if (regs) { + if (event->attr.exclude_user && user_mode(regs)) + return 0; + + if (event->attr.exclude_kernel && !user_mode(regs)) + return 0; + } + + return 1; +} + +static void perf_swevent_ctx_event(struct perf_event_context *ctx, + enum perf_type_id type, + u32 event_id, u64 nr, int nmi, + struct perf_sample_data *data, + struct pt_regs *regs) +{ + struct perf_event *event; + + if (system_state != SYSTEM_RUNNING || list_empty(&ctx->event_list)) + return; + + rcu_read_lock(); + list_for_each_entry_rcu(event, &ctx->event_list, event_entry) { + if (perf_swevent_match(event, type, event_id, regs)) + perf_swevent_add(event, nr, nmi, data, regs); + } + rcu_read_unlock(); +} + +static int *perf_swevent_recursion_context(struct perf_cpu_context *cpuctx) +{ + if (in_nmi()) + return &cpuctx->recursion[3]; + + if (in_irq()) + return &cpuctx->recursion[2]; + + if (in_softirq()) + return &cpuctx->recursion[1]; + + return &cpuctx->recursion[0]; +} + +static void do_perf_sw_event(enum perf_type_id type, u32 event_id, + u64 nr, int nmi, + struct perf_sample_data *data, + struct pt_regs *regs) +{ + struct perf_cpu_context *cpuctx = &get_cpu_var(perf_cpu_context); + int *recursion = perf_swevent_recursion_context(cpuctx); + struct perf_event_context *ctx; + + if (*recursion) + goto out; + + (*recursion)++; + barrier(); + + perf_swevent_ctx_event(&cpuctx->ctx, type, event_id, + nr, nmi, data, regs); + rcu_read_lock(); + /* + * doesn't really matter which of the child contexts the + * events ends up in. + */ + ctx = rcu_dereference(current->perf_event_ctxp); + if (ctx) + perf_swevent_ctx_event(ctx, type, event_id, nr, nmi, data, regs); + rcu_read_unlock(); + + barrier(); + (*recursion)--; + +out: + put_cpu_var(perf_cpu_context); +} + +void __perf_sw_event(u32 event_id, u64 nr, int nmi, + struct pt_regs *regs, u64 addr) +{ + struct perf_sample_data data = { + .addr = addr, + }; + + do_perf_sw_event(PERF_TYPE_SOFTWARE, event_id, nr, nmi, + &data, regs); +} + +static void perf_swevent_read(struct perf_event *event) +{ +} + +static int perf_swevent_enable(struct perf_event *event) +{ + struct hw_perf_event *hwc = &event->hw; + + if (hwc->sample_period) { + hwc->last_period = hwc->sample_period; + perf_swevent_set_period(event); + } + return 0; +} + +static void perf_swevent_disable(struct perf_event *event) +{ +} + +static const struct pmu perf_ops_generic = { + .enable = perf_swevent_enable, + .disable = perf_swevent_disable, + .read = perf_swevent_read, + .unthrottle = perf_swevent_unthrottle, +}; + +/* + * hrtimer based swevent callback + */ + +static enum hrtimer_restart perf_swevent_hrtimer(struct hrtimer *hrtimer) +{ + enum hrtimer_restart ret = HRTIMER_RESTART; + struct perf_sample_data data; + struct pt_regs *regs; + struct perf_event *event; + u64 period; + + event = container_of(hrtimer, struct perf_event, hw.hrtimer); + event->pmu->read(event); + + data.addr = 0; + regs = get_irq_regs(); + /* + * In case we exclude kernel IPs or are somehow not in interrupt + * context, provide the next best thing, the user IP. + */ + if ((event->attr.exclude_kernel || !regs) && + !event->attr.exclude_user) + regs = task_pt_regs(current); + + if (regs) { + if (perf_event_overflow(event, 0, &data, regs)) + ret = HRTIMER_NORESTART; + } + + period = max_t(u64, 10000, event->hw.sample_period); + hrtimer_forward_now(hrtimer, ns_to_ktime(period)); + + return ret; +} + +/* + * Software event: cpu wall time clock + */ + +static void cpu_clock_perf_event_update(struct perf_event *event) +{ + int cpu = raw_smp_processor_id(); + s64 prev; + u64 now; + + now = cpu_clock(cpu); + prev = atomic64_read(&event->hw.prev_count); + atomic64_set(&event->hw.prev_count, now); + atomic64_add(now - prev, &event->count); +} + +static int cpu_clock_perf_event_enable(struct perf_event *event) +{ + struct hw_perf_event *hwc = &event->hw; + int cpu = raw_smp_processor_id(); + + atomic64_set(&hwc->prev_count, cpu_clock(cpu)); + hrtimer_init(&hwc->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); + hwc->hrtimer.function = perf_swevent_hrtimer; + if (hwc->sample_period) { + u64 period = max_t(u64, 10000, hwc->sample_period); + __hrtimer_start_range_ns(&hwc->hrtimer, + ns_to_ktime(period), 0, + HRTIMER_MODE_REL, 0); + } + + return 0; +} + +static void cpu_clock_perf_event_disable(struct perf_event *event) +{ + if (event->hw.sample_period) + hrtimer_cancel(&event->hw.hrtimer); + cpu_clock_perf_event_update(event); +} + +static void cpu_clock_perf_event_read(struct perf_event *event) +{ + cpu_clock_perf_event_update(event); +} + +static const struct pmu perf_ops_cpu_clock = { + .enable = cpu_clock_perf_event_enable, + .disable = cpu_clock_perf_event_disable, + .read = cpu_clock_perf_event_read, +}; + +/* + * Software event: task time clock + */ + +static void task_clock_perf_event_update(struct perf_event *event, u64 now) +{ + u64 prev; + s64 delta; + + prev = atomic64_xchg(&event->hw.prev_count, now); + delta = now - prev; + atomic64_add(delta, &event->count); +} + +static int task_clock_perf_event_enable(struct perf_event *event) +{ + struct hw_perf_event *hwc = &event->hw; + u64 now; + + now = event->ctx->time; + + atomic64_set(&hwc->prev_count, now); + hrtimer_init(&hwc->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); + hwc->hrtimer.function = perf_swevent_hrtimer; + if (hwc->sample_period) { + u64 period = max_t(u64, 10000, hwc->sample_period); + __hrtimer_start_range_ns(&hwc->hrtimer, + ns_to_ktime(period), 0, + HRTIMER_MODE_REL, 0); + } + + return 0; +} + +static void task_clock_perf_event_disable(struct perf_event *event) +{ + if (event->hw.sample_period) + hrtimer_cancel(&event->hw.hrtimer); + task_clock_perf_event_update(event, event->ctx->time); + +} + +static void task_clock_perf_event_read(struct perf_event *event) +{ + u64 time; + + if (!in_nmi()) { + update_context_time(event->ctx); + time = event->ctx->time; + } else { + u64 now = perf_clock(); + u64 delta = now - event->ctx->timestamp; + time = event->ctx->time + delta; + } + + task_clock_perf_event_update(event, time); +} + +static const struct pmu perf_ops_task_clock = { + .enable = task_clock_perf_event_enable, + .disable = task_clock_perf_event_disable, + .read = task_clock_perf_event_read, +}; + +#ifdef CONFIG_EVENT_PROFILE +void perf_tp_event(int event_id, u64 addr, u64 count, void *record, + int entry_size) +{ + struct perf_raw_record raw = { + .size = entry_size, + .data = record, + }; + + struct perf_sample_data data = { + .addr = addr, + .raw = &raw, + }; + + struct pt_regs *regs = get_irq_regs(); + + if (!regs) + regs = task_pt_regs(current); + + do_perf_sw_event(PERF_TYPE_TRACEPOINT, event_id, count, 1, + &data, regs); +} +EXPORT_SYMBOL_GPL(perf_tp_event); + +extern int ftrace_profile_enable(int); +extern void ftrace_profile_disable(int); + +static void tp_perf_event_destroy(struct perf_event *event) +{ + ftrace_profile_disable(event->attr.config); +} + +static const struct pmu *tp_perf_event_init(struct perf_event *event) +{ + /* + * Raw tracepoint data is a severe data leak, only allow root to + * have these. + */ + if ((event->attr.sample_type & PERF_SAMPLE_RAW) && + perf_paranoid_tracepoint_raw() && + !capable(CAP_SYS_ADMIN)) + return ERR_PTR(-EPERM); + + if (ftrace_profile_enable(event->attr.config)) + return NULL; + + event->destroy = tp_perf_event_destroy; + + return &perf_ops_generic; +} +#else +static const struct pmu *tp_perf_event_init(struct perf_event *event) +{ + return NULL; +} +#endif + +atomic_t perf_swevent_enabled[PERF_COUNT_SW_MAX]; + +static void sw_perf_event_destroy(struct perf_event *event) +{ + u64 event_id = event->attr.config; + + WARN_ON(event->parent); + + atomic_dec(&perf_swevent_enabled[event_id]); +} + +static const struct pmu *sw_perf_event_init(struct perf_event *event) +{ + const struct pmu *pmu = NULL; + u64 event_id = event->attr.config; + + /* + * Software events (currently) can't in general distinguish + * between user, kernel and hypervisor events. + * However, context switches and cpu migrations are considered + * to be kernel events, and page faults are never hypervisor + * events. + */ + switch (event_id) { + case PERF_COUNT_SW_CPU_CLOCK: + pmu = &perf_ops_cpu_clock; + + break; + case PERF_COUNT_SW_TASK_CLOCK: + /* + * If the user instantiates this as a per-cpu event, + * use the cpu_clock event instead. + */ + if (event->ctx->task) + pmu = &perf_ops_task_clock; + else + pmu = &perf_ops_cpu_clock; + + break; + case PERF_COUNT_SW_PAGE_FAULTS: + case PERF_COUNT_SW_PAGE_FAULTS_MIN: + case PERF_COUNT_SW_PAGE_FAULTS_MAJ: + case PERF_COUNT_SW_CONTEXT_SWITCHES: + case PERF_COUNT_SW_CPU_MIGRATIONS: + if (!event->parent) { + atomic_inc(&perf_swevent_enabled[event_id]); + event->destroy = sw_perf_event_destroy; + } + pmu = &perf_ops_generic; + break; + } + + return pmu; +} + +/* + * Allocate and initialize a event structure + */ +static struct perf_event * +perf_event_alloc(struct perf_event_attr *attr, + int cpu, + struct perf_event_context *ctx, + struct perf_event *group_leader, + struct perf_event *parent_event, + gfp_t gfpflags) +{ + const struct pmu *pmu; + struct perf_event *event; + struct hw_perf_event *hwc; + long err; + + event = kzalloc(sizeof(*event), gfpflags); + if (!event) + return ERR_PTR(-ENOMEM); + + /* + * Single events are their own group leaders, with an + * empty sibling list: + */ + if (!group_leader) + group_leader = event; + + mutex_init(&event->child_mutex); + INIT_LIST_HEAD(&event->child_list); + + INIT_LIST_HEAD(&event->group_entry); + INIT_LIST_HEAD(&event->event_entry); + INIT_LIST_HEAD(&event->sibling_list); + init_waitqueue_head(&event->waitq); + + mutex_init(&event->mmap_mutex); + + event->cpu = cpu; + event->attr = *attr; + event->group_leader = group_leader; + event->pmu = NULL; + event->ctx = ctx; + event->oncpu = -1; + + event->parent = parent_event; + + event->ns = get_pid_ns(current->nsproxy->pid_ns); + event->id = atomic64_inc_return(&perf_event_id); + + event->state = PERF_EVENT_STATE_INACTIVE; + + if (attr->disabled) + event->state = PERF_EVENT_STATE_OFF; + + pmu = NULL; + + hwc = &event->hw; + hwc->sample_period = attr->sample_period; + if (attr->freq && attr->sample_freq) + hwc->sample_period = 1; + hwc->last_period = hwc->sample_period; + + atomic64_set(&hwc->period_left, hwc->sample_period); + + /* + * we currently do not support PERF_FORMAT_GROUP on inherited events + */ + if (attr->inherit && (attr->read_format & PERF_FORMAT_GROUP)) + goto done; + + switch (attr->type) { + case PERF_TYPE_RAW: + case PERF_TYPE_HARDWARE: + case PERF_TYPE_HW_CACHE: + pmu = hw_perf_event_init(event); + break; + + case PERF_TYPE_SOFTWARE: + pmu = sw_perf_event_init(event); + break; + + case PERF_TYPE_TRACEPOINT: + pmu = tp_perf_event_init(event); + break; + + default: + break; + } +done: + err = 0; + if (!pmu) + err = -EINVAL; + else if (IS_ERR(pmu)) + err = PTR_ERR(pmu); + + if (err) { + if (event->ns) + put_pid_ns(event->ns); + kfree(event); + return ERR_PTR(err); + } + + event->pmu = pmu; + + if (!event->parent) { + atomic_inc(&nr_events); + if (event->attr.mmap) + atomic_inc(&nr_mmap_events); + if (event->attr.comm) + atomic_inc(&nr_comm_events); + if (event->attr.task) + atomic_inc(&nr_task_events); + } + + return event; +} + +static int perf_copy_attr(struct perf_event_attr __user *uattr, + struct perf_event_attr *attr) +{ + u32 size; + int ret; + + if (!access_ok(VERIFY_WRITE, uattr, PERF_ATTR_SIZE_VER0)) + return -EFAULT; + + /* + * zero the full structure, so that a short copy will be nice. + */ + memset(attr, 0, sizeof(*attr)); + + ret = get_user(size, &uattr->size); + if (ret) + return ret; + + if (size > PAGE_SIZE) /* silly large */ + goto err_size; + + if (!size) /* abi compat */ + size = PERF_ATTR_SIZE_VER0; + + if (size < PERF_ATTR_SIZE_VER0) + goto err_size; + + /* + * If we're handed a bigger struct than we know of, + * ensure all the unknown bits are 0 - i.e. new + * user-space does not rely on any kernel feature + * extensions we dont know about yet. + */ + if (size > sizeof(*attr)) { + unsigned char __user *addr; + unsigned char __user *end; + unsigned char val; + + addr = (void __user *)uattr + sizeof(*attr); + end = (void __user *)uattr + size; + + for (; addr < end; addr++) { + ret = get_user(val, addr); + if (ret) + return ret; + if (val) + goto err_size; + } + size = sizeof(*attr); + } + + ret = copy_from_user(attr, uattr, size); + if (ret) + return -EFAULT; + + /* + * If the type exists, the corresponding creation will verify + * the attr->config. + */ + if (attr->type >= PERF_TYPE_MAX) + return -EINVAL; + + if (attr->__reserved_1 || attr->__reserved_2 || attr->__reserved_3) + return -EINVAL; + + if (attr->sample_type & ~(PERF_SAMPLE_MAX-1)) + return -EINVAL; + + if (attr->read_format & ~(PERF_FORMAT_MAX-1)) + return -EINVAL; + +out: + return ret; + +err_size: + put_user(sizeof(*attr), &uattr->size); + ret = -E2BIG; + goto out; +} + +int perf_event_set_output(struct perf_event *event, int output_fd) +{ + struct perf_event *output_event = NULL; + struct file *output_file = NULL; + struct perf_event *old_output; + int fput_needed = 0; + int ret = -EINVAL; + + if (!output_fd) + goto set; + + output_file = fget_light(output_fd, &fput_needed); + if (!output_file) + return -EBADF; + + if (output_file->f_op != &perf_fops) + goto out; + + output_event = output_file->private_data; + + /* Don't chain output fds */ + if (output_event->output) + goto out; + + /* Don't set an output fd when we already have an output channel */ + if (event->data) + goto out; + + atomic_long_inc(&output_file->f_count); + +set: + mutex_lock(&event->mmap_mutex); + old_output = event->output; + rcu_assign_pointer(event->output, output_event); + mutex_unlock(&event->mmap_mutex); + + if (old_output) { + /* + * we need to make sure no existing perf_output_*() + * is still referencing this event. + */ + synchronize_rcu(); + fput(old_output->filp); + } + + ret = 0; +out: + fput_light(output_file, fput_needed); + return ret; +} + +/** + * sys_perf_event_open - open a performance event, associate it to a task/cpu + * + * @attr_uptr: event_id type attributes for monitoring/sampling + * @pid: target pid + * @cpu: target cpu + * @group_fd: group leader event fd + */ +SYSCALL_DEFINE5(perf_event_open, + struct perf_event_attr __user *, attr_uptr, + pid_t, pid, int, cpu, int, group_fd, unsigned long, flags) +{ + struct perf_event *event, *group_leader; + struct perf_event_attr attr; + struct perf_event_context *ctx; + struct file *event_file = NULL; + struct file *group_file = NULL; + int fput_needed = 0; + int fput_needed2 = 0; + int err; + + /* for future expandability... */ + if (flags & ~(PERF_FLAG_FD_NO_GROUP | PERF_FLAG_FD_OUTPUT)) + return -EINVAL; + + err = perf_copy_attr(attr_uptr, &attr); + if (err) + return err; + + if (!attr.exclude_kernel) { + if (perf_paranoid_kernel() && !capable(CAP_SYS_ADMIN)) + return -EACCES; + } + + if (attr.freq) { + if (attr.sample_freq > sysctl_perf_event_sample_rate) + return -EINVAL; + } + + /* + * Get the target context (task or percpu): + */ + ctx = find_get_context(pid, cpu); + if (IS_ERR(ctx)) + return PTR_ERR(ctx); + + /* + * Look up the group leader (we will attach this event to it): + */ + group_leader = NULL; + if (group_fd != -1 && !(flags & PERF_FLAG_FD_NO_GROUP)) { + err = -EINVAL; + group_file = fget_light(group_fd, &fput_needed); + if (!group_file) + goto err_put_context; + if (group_file->f_op != &perf_fops) + goto err_put_context; + + group_leader = group_file->private_data; + /* + * Do not allow a recursive hierarchy (this new sibling + * becoming part of another group-sibling): + */ + if (group_leader->group_leader != group_leader) + goto err_put_context; + /* + * Do not allow to attach to a group in a different + * task or CPU context: + */ + if (group_leader->ctx != ctx) + goto err_put_context; + /* + * Only a group leader can be exclusive or pinned + */ + if (attr.exclusive || attr.pinned) + goto err_put_context; + } + + event = perf_event_alloc(&attr, cpu, ctx, group_leader, + NULL, GFP_KERNEL); + err = PTR_ERR(event); + if (IS_ERR(event)) + goto err_put_context; + + err = anon_inode_getfd("[perf_event]", &perf_fops, event, 0); + if (err < 0) + goto err_free_put_context; + + event_file = fget_light(err, &fput_needed2); + if (!event_file) + goto err_free_put_context; + + if (flags & PERF_FLAG_FD_OUTPUT) { + err = perf_event_set_output(event, group_fd); + if (err) + goto err_fput_free_put_context; + } + + event->filp = event_file; + WARN_ON_ONCE(ctx->parent_ctx); + mutex_lock(&ctx->mutex); + perf_install_in_context(ctx, event, cpu); + ++ctx->generation; + mutex_unlock(&ctx->mutex); + + event->owner = current; + get_task_struct(current); + mutex_lock(¤t->perf_event_mutex); + list_add_tail(&event->owner_entry, ¤t->perf_event_list); + mutex_unlock(¤t->perf_event_mutex); + +err_fput_free_put_context: + fput_light(event_file, fput_needed2); + +err_free_put_context: + if (err < 0) + kfree(event); + +err_put_context: + if (err < 0) + put_ctx(ctx); + + fput_light(group_file, fput_needed); + + return err; +} + +/* + * inherit a event from parent task to child task: + */ +static struct perf_event * +inherit_event(struct perf_event *parent_event, + struct task_struct *parent, + struct perf_event_context *parent_ctx, + struct task_struct *child, + struct perf_event *group_leader, + struct perf_event_context *child_ctx) +{ + struct perf_event *child_event; + + /* + * Instead of creating recursive hierarchies of events, + * we link inherited events back to the original parent, + * which has a filp for sure, which we use as the reference + * count: + */ + if (parent_event->parent) + parent_event = parent_event->parent; + + child_event = perf_event_alloc(&parent_event->attr, + parent_event->cpu, child_ctx, + group_leader, parent_event, + GFP_KERNEL); + if (IS_ERR(child_event)) + return child_event; + get_ctx(child_ctx); + + /* + * Make the child state follow the state of the parent event, + * not its attr.disabled bit. We hold the parent's mutex, + * so we won't race with perf_event_{en, dis}able_family. + */ + if (parent_event->state >= PERF_EVENT_STATE_INACTIVE) + child_event->state = PERF_EVENT_STATE_INACTIVE; + else + child_event->state = PERF_EVENT_STATE_OFF; + + if (parent_event->attr.freq) + child_event->hw.sample_period = parent_event->hw.sample_period; + + /* + * Link it up in the child's context: + */ + add_event_to_ctx(child_event, child_ctx); + + /* + * Get a reference to the parent filp - we will fput it + * when the child event exits. This is safe to do because + * we are in the parent and we know that the filp still + * exists and has a nonzero count: + */ + atomic_long_inc(&parent_event->filp->f_count); + + /* + * Link this into the parent event's child list + */ + WARN_ON_ONCE(parent_event->ctx->parent_ctx); + mutex_lock(&parent_event->child_mutex); + list_add_tail(&child_event->child_list, &parent_event->child_list); + mutex_unlock(&parent_event->child_mutex); + + return child_event; +} + +static int inherit_group(struct perf_event *parent_event, + struct task_struct *parent, + struct perf_event_context *parent_ctx, + struct task_struct *child, + struct perf_event_context *child_ctx) +{ + struct perf_event *leader; + struct perf_event *sub; + struct perf_event *child_ctr; + + leader = inherit_event(parent_event, parent, parent_ctx, + child, NULL, child_ctx); + if (IS_ERR(leader)) + return PTR_ERR(leader); + list_for_each_entry(sub, &parent_event->sibling_list, group_entry) { + child_ctr = inherit_event(sub, parent, parent_ctx, + child, leader, child_ctx); + if (IS_ERR(child_ctr)) + return PTR_ERR(child_ctr); + } + return 0; +} + +static void sync_child_event(struct perf_event *child_event, + struct task_struct *child) +{ + struct perf_event *parent_event = child_event->parent; + u64 child_val; + + if (child_event->attr.inherit_stat) + perf_event_read_event(child_event, child); + + child_val = atomic64_read(&child_event->count); + + /* + * Add back the child's count to the parent's count: + */ + atomic64_add(child_val, &parent_event->count); + atomic64_add(child_event->total_time_enabled, + &parent_event->child_total_time_enabled); + atomic64_add(child_event->total_time_running, + &parent_event->child_total_time_running); + + /* + * Remove this event from the parent's list + */ + WARN_ON_ONCE(parent_event->ctx->parent_ctx); + mutex_lock(&parent_event->child_mutex); + list_del_init(&child_event->child_list); + mutex_unlock(&parent_event->child_mutex); + + /* + * Release the parent event, if this was the last + * reference to it. + */ + fput(parent_event->filp); +} + +static void +__perf_event_exit_task(struct perf_event *child_event, + struct perf_event_context *child_ctx, + struct task_struct *child) +{ + struct perf_event *parent_event; + + update_event_times(child_event); + perf_event_remove_from_context(child_event); + + parent_event = child_event->parent; + /* + * It can happen that parent exits first, and has events + * that are still around due to the child reference. These + * events need to be zapped - but otherwise linger. + */ + if (parent_event) { + sync_child_event(child_event, child); + free_event(child_event); + } +} + +/* + * When a child task exits, feed back event values to parent events. + */ +void perf_event_exit_task(struct task_struct *child) +{ + struct perf_event *child_event, *tmp; + struct perf_event_context *child_ctx; + unsigned long flags; + + if (likely(!child->perf_event_ctxp)) { + perf_event_task(child, NULL, 0); + return; + } + + local_irq_save(flags); + /* + * We can't reschedule here because interrupts are disabled, + * and either child is current or it is a task that can't be + * scheduled, so we are now safe from rescheduling changing + * our context. + */ + child_ctx = child->perf_event_ctxp; + __perf_event_task_sched_out(child_ctx); + + /* + * Take the context lock here so that if find_get_context is + * reading child->perf_event_ctxp, we wait until it has + * incremented the context's refcount before we do put_ctx below. + */ + spin_lock(&child_ctx->lock); + child->perf_event_ctxp = NULL; + /* + * If this context is a clone; unclone it so it can't get + * swapped to another process while we're removing all + * the events from it. + */ + unclone_ctx(child_ctx); + spin_unlock_irqrestore(&child_ctx->lock, flags); + + /* + * Report the task dead after unscheduling the events so that we + * won't get any samples after PERF_RECORD_EXIT. We can however still + * get a few PERF_RECORD_READ events. + */ + perf_event_task(child, child_ctx, 0); + + /* + * We can recurse on the same lock type through: + * + * __perf_event_exit_task() + * sync_child_event() + * fput(parent_event->filp) + * perf_release() + * mutex_lock(&ctx->mutex) + * + * But since its the parent context it won't be the same instance. + */ + mutex_lock_nested(&child_ctx->mutex, SINGLE_DEPTH_NESTING); + +again: + list_for_each_entry_safe(child_event, tmp, &child_ctx->group_list, + group_entry) + __perf_event_exit_task(child_event, child_ctx, child); + + /* + * If the last event was a group event, it will have appended all + * its siblings to the list, but we obtained 'tmp' before that which + * will still point to the list head terminating the iteration. + */ + if (!list_empty(&child_ctx->group_list)) + goto again; + + mutex_unlock(&child_ctx->mutex); + + put_ctx(child_ctx); +} + +/* + * free an unexposed, unused context as created by inheritance by + * init_task below, used by fork() in case of fail. + */ +void perf_event_free_task(struct task_struct *task) +{ + struct perf_event_context *ctx = task->perf_event_ctxp; + struct perf_event *event, *tmp; + + if (!ctx) + return; + + mutex_lock(&ctx->mutex); +again: + list_for_each_entry_safe(event, tmp, &ctx->group_list, group_entry) { + struct perf_event *parent = event->parent; + + if (WARN_ON_ONCE(!parent)) + continue; + + mutex_lock(&parent->child_mutex); + list_del_init(&event->child_list); + mutex_unlock(&parent->child_mutex); + + fput(parent->filp); + + list_del_event(event, ctx); + free_event(event); + } + + if (!list_empty(&ctx->group_list)) + goto again; + + mutex_unlock(&ctx->mutex); + + put_ctx(ctx); +} + +/* + * Initialize the perf_event context in task_struct + */ +int perf_event_init_task(struct task_struct *child) +{ + struct perf_event_context *child_ctx, *parent_ctx; + struct perf_event_context *cloned_ctx; + struct perf_event *event; + struct task_struct *parent = current; + int inherited_all = 1; + int ret = 0; + + child->perf_event_ctxp = NULL; + + mutex_init(&child->perf_event_mutex); + INIT_LIST_HEAD(&child->perf_event_list); + + if (likely(!parent->perf_event_ctxp)) + return 0; + + /* + * This is executed from the parent task context, so inherit + * events that have been marked for cloning. + * First allocate and initialize a context for the child. + */ + + child_ctx = kmalloc(sizeof(struct perf_event_context), GFP_KERNEL); + if (!child_ctx) + return -ENOMEM; + + __perf_event_init_context(child_ctx, child); + child->perf_event_ctxp = child_ctx; + get_task_struct(child); + + /* + * If the parent's context is a clone, pin it so it won't get + * swapped under us. + */ + parent_ctx = perf_pin_task_context(parent); + + /* + * No need to check if parent_ctx != NULL here; since we saw + * it non-NULL earlier, the only reason for it to become NULL + * is if we exit, and since we're currently in the middle of + * a fork we can't be exiting at the same time. + */ + + /* + * Lock the parent list. No need to lock the child - not PID + * hashed yet and not running, so nobody can access it. + */ + mutex_lock(&parent_ctx->mutex); + + /* + * We dont have to disable NMIs - we are only looking at + * the list, not manipulating it: + */ + list_for_each_entry_rcu(event, &parent_ctx->event_list, event_entry) { + if (event != event->group_leader) + continue; + + if (!event->attr.inherit) { + inherited_all = 0; + continue; + } + + ret = inherit_group(event, parent, parent_ctx, + child, child_ctx); + if (ret) { + inherited_all = 0; + break; + } + } + + if (inherited_all) { + /* + * Mark the child context as a clone of the parent + * context, or of whatever the parent is a clone of. + * Note that if the parent is a clone, it could get + * uncloned at any point, but that doesn't matter + * because the list of events and the generation + * count can't have changed since we took the mutex. + */ + cloned_ctx = rcu_dereference(parent_ctx->parent_ctx); + if (cloned_ctx) { + child_ctx->parent_ctx = cloned_ctx; + child_ctx->parent_gen = parent_ctx->parent_gen; + } else { + child_ctx->parent_ctx = parent_ctx; + child_ctx->parent_gen = parent_ctx->generation; + } + get_ctx(child_ctx->parent_ctx); + } + + mutex_unlock(&parent_ctx->mutex); + + perf_unpin_context(parent_ctx); + + return ret; +} + +static void __cpuinit perf_event_init_cpu(int cpu) +{ + struct perf_cpu_context *cpuctx; + + cpuctx = &per_cpu(perf_cpu_context, cpu); + __perf_event_init_context(&cpuctx->ctx, NULL); + + spin_lock(&perf_resource_lock); + cpuctx->max_pertask = perf_max_events - perf_reserved_percpu; + spin_unlock(&perf_resource_lock); + + hw_perf_event_setup(cpu); +} + +#ifdef CONFIG_HOTPLUG_CPU +static void __perf_event_exit_cpu(void *info) +{ + struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context); + struct perf_event_context *ctx = &cpuctx->ctx; + struct perf_event *event, *tmp; + + list_for_each_entry_safe(event, tmp, &ctx->group_list, group_entry) + __perf_event_remove_from_context(event); +} +static void perf_event_exit_cpu(int cpu) +{ + struct perf_cpu_context *cpuctx = &per_cpu(perf_cpu_context, cpu); + struct perf_event_context *ctx = &cpuctx->ctx; + + mutex_lock(&ctx->mutex); + smp_call_function_single(cpu, __perf_event_exit_cpu, NULL, 1); + mutex_unlock(&ctx->mutex); +} +#else +static inline void perf_event_exit_cpu(int cpu) { } +#endif + +static int __cpuinit +perf_cpu_notify(struct notifier_block *self, unsigned long action, void *hcpu) +{ + unsigned int cpu = (long)hcpu; + + switch (action) { + + case CPU_UP_PREPARE: + case CPU_UP_PREPARE_FROZEN: + perf_event_init_cpu(cpu); + break; + + case CPU_ONLINE: + case CPU_ONLINE_FROZEN: + hw_perf_event_setup_online(cpu); + break; + + case CPU_DOWN_PREPARE: + case CPU_DOWN_PREPARE_FROZEN: + perf_event_exit_cpu(cpu); + break; + + default: + break; + } + + return NOTIFY_OK; +} + +/* + * This has to have a higher priority than migration_notifier in sched.c. + */ +static struct notifier_block __cpuinitdata perf_cpu_nb = { + .notifier_call = perf_cpu_notify, + .priority = 20, +}; + +void __init perf_event_init(void) +{ + perf_cpu_notify(&perf_cpu_nb, (unsigned long)CPU_UP_PREPARE, + (void *)(long)smp_processor_id()); + perf_cpu_notify(&perf_cpu_nb, (unsigned long)CPU_ONLINE, + (void *)(long)smp_processor_id()); + register_cpu_notifier(&perf_cpu_nb); +} + +static ssize_t perf_show_reserve_percpu(struct sysdev_class *class, char *buf) +{ + return sprintf(buf, "%d\n", perf_reserved_percpu); +} + +static ssize_t +perf_set_reserve_percpu(struct sysdev_class *class, + const char *buf, + size_t count) +{ + struct perf_cpu_context *cpuctx; + unsigned long val; + int err, cpu, mpt; + + err = strict_strtoul(buf, 10, &val); + if (err) + return err; + if (val > perf_max_events) + return -EINVAL; + + spin_lock(&perf_resource_lock); + perf_reserved_percpu = val; + for_each_online_cpu(cpu) { + cpuctx = &per_cpu(perf_cpu_context, cpu); + spin_lock_irq(&cpuctx->ctx.lock); + mpt = min(perf_max_events - cpuctx->ctx.nr_events, + perf_max_events - perf_reserved_percpu); + cpuctx->max_pertask = mpt; + spin_unlock_irq(&cpuctx->ctx.lock); + } + spin_unlock(&perf_resource_lock); + + return count; +} + +static ssize_t perf_show_overcommit(struct sysdev_class *class, char *buf) +{ + return sprintf(buf, "%d\n", perf_overcommit); +} + +static ssize_t +perf_set_overcommit(struct sysdev_class *class, const char *buf, size_t count) +{ + unsigned long val; + int err; + + err = strict_strtoul(buf, 10, &val); + if (err) + return err; + if (val > 1) + return -EINVAL; + + spin_lock(&perf_resource_lock); + perf_overcommit = val; + spin_unlock(&perf_resource_lock); + + return count; +} + +static SYSDEV_CLASS_ATTR( + reserve_percpu, + 0644, + perf_show_reserve_percpu, + perf_set_reserve_percpu + ); + +static SYSDEV_CLASS_ATTR( + overcommit, + 0644, + perf_show_overcommit, + perf_set_overcommit + ); + +static struct attribute *perfclass_attrs[] = { + &attr_reserve_percpu.attr, + &attr_overcommit.attr, + NULL +}; + +static struct attribute_group perfclass_attr_group = { + .attrs = perfclass_attrs, + .name = "perf_events", +}; + +static int __init perf_event_sysfs_init(void) +{ + return sysfs_create_group(&cpu_sysdev_class.kset.kobj, + &perfclass_attr_group); +} +device_initcall(perf_event_sysfs_init); diff --git a/kernel/sched.c b/kernel/sched.c index faf4d463bbf..291c8d213d1 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -39,7 +39,7 @@ #include #include #include -#include +#include #include #include #include @@ -2059,7 +2059,7 @@ void set_task_cpu(struct task_struct *p, unsigned int new_cpu) if (task_hot(p, old_rq->clock, NULL)) schedstat_inc(p, se.nr_forced2_migrations); #endif - perf_swcounter_event(PERF_COUNT_SW_CPU_MIGRATIONS, + perf_sw_event(PERF_COUNT_SW_CPU_MIGRATIONS, 1, 1, NULL, 0); } p->se.vruntime -= old_cfsrq->min_vruntime - @@ -2724,7 +2724,7 @@ static void finish_task_switch(struct rq *rq, struct task_struct *prev) */ prev_state = prev->state; finish_arch_switch(prev); - perf_counter_task_sched_in(current, cpu_of(rq)); + perf_event_task_sched_in(current, cpu_of(rq)); finish_lock_switch(rq, prev); fire_sched_in_preempt_notifiers(current); @@ -5199,7 +5199,7 @@ void scheduler_tick(void) curr->sched_class->task_tick(rq, curr, 0); spin_unlock(&rq->lock); - perf_counter_task_tick(curr, cpu); + perf_event_task_tick(curr, cpu); #ifdef CONFIG_SMP rq->idle_at_tick = idle_cpu(cpu); @@ -5415,7 +5415,7 @@ need_resched_nonpreemptible: if (likely(prev != next)) { sched_info_switch(prev, next); - perf_counter_task_sched_out(prev, next, cpu); + perf_event_task_sched_out(prev, next, cpu); rq->nr_switches++; rq->curr = next; @@ -7692,7 +7692,7 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu) /* * Register at high priority so that task migration (migrate_all_tasks) * happens before everything else. This has to be lower priority than - * the notifier in the perf_counter subsystem, though. + * the notifier in the perf_event subsystem, though. */ static struct notifier_block __cpuinitdata migration_notifier = { .notifier_call = migration_call, @@ -9549,7 +9549,7 @@ void __init sched_init(void) alloc_cpumask_var(&cpu_isolated_map, GFP_NOWAIT); #endif /* SMP */ - perf_counter_init(); + perf_event_init(); scheduler_running = 1; } diff --git a/kernel/sys.c b/kernel/sys.c index b3f1097c76f..ea5c3bcac88 100644 --- a/kernel/sys.c +++ b/kernel/sys.c @@ -14,7 +14,7 @@ #include #include #include -#include +#include #include #include #include @@ -1511,11 +1511,11 @@ SYSCALL_DEFINE5(prctl, int, option, unsigned long, arg2, unsigned long, arg3, case PR_SET_TSC: error = SET_TSC_CTL(arg2); break; - case PR_TASK_PERF_COUNTERS_DISABLE: - error = perf_counter_task_disable(); + case PR_TASK_PERF_EVENTS_DISABLE: + error = perf_event_task_disable(); break; - case PR_TASK_PERF_COUNTERS_ENABLE: - error = perf_counter_task_enable(); + case PR_TASK_PERF_EVENTS_ENABLE: + error = perf_event_task_enable(); break; case PR_GET_TIMERSLACK: error = current->timer_slack_ns; diff --git a/kernel/sys_ni.c b/kernel/sys_ni.c index 68320f6b07b..515bc230ac2 100644 --- a/kernel/sys_ni.c +++ b/kernel/sys_ni.c @@ -177,4 +177,4 @@ cond_syscall(sys_eventfd); cond_syscall(sys_eventfd2); /* performance counters: */ -cond_syscall(sys_perf_counter_open); +cond_syscall(sys_perf_event_open); diff --git a/kernel/sysctl.c b/kernel/sysctl.c index 1a631ba684a..6ba49c7cb12 100644 --- a/kernel/sysctl.c +++ b/kernel/sysctl.c @@ -50,7 +50,7 @@ #include #include #include -#include +#include #include #include @@ -964,28 +964,28 @@ static struct ctl_table kern_table[] = { .child = slow_work_sysctls, }, #endif -#ifdef CONFIG_PERF_COUNTERS +#ifdef CONFIG_PERF_EVENTS { .ctl_name = CTL_UNNUMBERED, - .procname = "perf_counter_paranoid", - .data = &sysctl_perf_counter_paranoid, - .maxlen = sizeof(sysctl_perf_counter_paranoid), + .procname = "perf_event_paranoid", + .data = &sysctl_perf_event_paranoid, + .maxlen = sizeof(sysctl_perf_event_paranoid), .mode = 0644, .proc_handler = &proc_dointvec, }, { .ctl_name = CTL_UNNUMBERED, - .procname = "perf_counter_mlock_kb", - .data = &sysctl_perf_counter_mlock, - .maxlen = sizeof(sysctl_perf_counter_mlock), + .procname = "perf_event_mlock_kb", + .data = &sysctl_perf_event_mlock, + .maxlen = sizeof(sysctl_perf_event_mlock), .mode = 0644, .proc_handler = &proc_dointvec, }, { .ctl_name = CTL_UNNUMBERED, - .procname = "perf_counter_max_sample_rate", - .data = &sysctl_perf_counter_sample_rate, - .maxlen = sizeof(sysctl_perf_counter_sample_rate), + .procname = "perf_event_max_sample_rate", + .data = &sysctl_perf_event_sample_rate, + .maxlen = sizeof(sysctl_perf_event_sample_rate), .mode = 0644, .proc_handler = &proc_dointvec, }, diff --git a/kernel/timer.c b/kernel/timer.c index bbb51074680..811e5c39145 100644 --- a/kernel/timer.c +++ b/kernel/timer.c @@ -37,7 +37,7 @@ #include #include #include -#include +#include #include #include @@ -1187,7 +1187,7 @@ static void run_timer_softirq(struct softirq_action *h) { struct tvec_base *base = __get_cpu_var(tvec_bases); - perf_counter_do_pending(); + perf_event_do_pending(); hrtimer_run_pending(); diff --git a/kernel/trace/trace_syscalls.c b/kernel/trace/trace_syscalls.c index 8712ce3c6a0..233f3483ac8 100644 --- a/kernel/trace/trace_syscalls.c +++ b/kernel/trace/trace_syscalls.c @@ -2,7 +2,7 @@ #include #include #include -#include +#include #include #include "trace_output.h" @@ -414,7 +414,7 @@ static void prof_syscall_enter(struct pt_regs *regs, long id) rec->nr = syscall_nr; syscall_get_arguments(current, regs, 0, sys_data->nb_args, (unsigned long *)&rec->args); - perf_tpcounter_event(sys_data->enter_id, 0, 1, rec, size); + perf_tp_event(sys_data->enter_id, 0, 1, rec, size); } while(0); } @@ -476,7 +476,7 @@ static void prof_syscall_exit(struct pt_regs *regs, long ret) rec.nr = syscall_nr; rec.ret = syscall_get_return_value(current, regs); - perf_tpcounter_event(sys_data->exit_id, 0, 1, &rec, sizeof(rec)); + perf_tp_event(sys_data->exit_id, 0, 1, &rec, sizeof(rec)); } int reg_prof_syscall_exit(char *name) diff --git a/mm/mmap.c b/mm/mmap.c index 26892e346d8..376492ed08f 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -28,7 +28,7 @@ #include #include #include -#include +#include #include #include @@ -1220,7 +1220,7 @@ munmap_back: if (correct_wcount) atomic_inc(&inode->i_writecount); out: - perf_counter_mmap(vma); + perf_event_mmap(vma); mm->total_vm += len >> PAGE_SHIFT; vm_stat_account(mm, vm_flags, file, len >> PAGE_SHIFT); @@ -2308,7 +2308,7 @@ int install_special_mapping(struct mm_struct *mm, mm->total_vm += len >> PAGE_SHIFT; - perf_counter_mmap(vma); + perf_event_mmap(vma); return 0; } diff --git a/mm/mprotect.c b/mm/mprotect.c index d80311baeb2..8bc969d8112 100644 --- a/mm/mprotect.c +++ b/mm/mprotect.c @@ -23,7 +23,7 @@ #include #include #include -#include +#include #include #include #include @@ -300,7 +300,7 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len, error = mprotect_fixup(vma, &prev, nstart, tmp, newflags); if (error) goto out; - perf_counter_mmap(vma); + perf_event_mmap(vma); nstart = tmp; if (nstart < prev->vm_end) diff --git a/tools/perf/Makefile b/tools/perf/Makefile index 0aba8b6e9c5..b5f1953b614 100644 --- a/tools/perf/Makefile +++ b/tools/perf/Makefile @@ -318,7 +318,7 @@ export PERL_PATH LIB_FILE=libperf.a -LIB_H += ../../include/linux/perf_counter.h +LIB_H += ../../include/linux/perf_event.h LIB_H += ../../include/linux/rbtree.h LIB_H += ../../include/linux/list.h LIB_H += util/include/linux/list.h diff --git a/tools/perf/builtin-annotate.c b/tools/perf/builtin-annotate.c index 043d85b7e25..1ec74161581 100644 --- a/tools/perf/builtin-annotate.c +++ b/tools/perf/builtin-annotate.c @@ -505,7 +505,7 @@ process_sample_event(event_t *event, unsigned long offset, unsigned long head) return -1; } - if (event->header.misc & PERF_EVENT_MISC_KERNEL) { + if (event->header.misc & PERF_RECORD_MISC_KERNEL) { show = SHOW_KERNEL; level = 'k'; @@ -513,7 +513,7 @@ process_sample_event(event_t *event, unsigned long offset, unsigned long head) dump_printf(" ...... dso: %s\n", dso->name); - } else if (event->header.misc & PERF_EVENT_MISC_USER) { + } else if (event->header.misc & PERF_RECORD_MISC_USER) { show = SHOW_USER; level = '.'; @@ -565,7 +565,7 @@ process_mmap_event(event_t *event, unsigned long offset, unsigned long head) thread = threads__findnew(event->mmap.pid, &threads, &last_match); - dump_printf("%p [%p]: PERF_EVENT_MMAP %d: [%p(%p) @ %p]: %s\n", + dump_printf("%p [%p]: PERF_RECORD_MMAP %d: [%p(%p) @ %p]: %s\n", (void *)(offset + head), (void *)(long)(event->header.size), event->mmap.pid, @@ -575,7 +575,7 @@ process_mmap_event(event_t *event, unsigned long offset, unsigned long head) event->mmap.filename); if (thread == NULL || map == NULL) { - dump_printf("problem processing PERF_EVENT_MMAP, skipping event.\n"); + dump_printf("problem processing PERF_RECORD_MMAP, skipping event.\n"); return 0; } @@ -591,14 +591,14 @@ process_comm_event(event_t *event, unsigned long offset, unsigned long head) struct thread *thread; thread = threads__findnew(event->comm.pid, &threads, &last_match); - dump_printf("%p [%p]: PERF_EVENT_COMM: %s:%d\n", + dump_printf("%p [%p]: PERF_RECORD_COMM: %s:%d\n", (void *)(offset + head), (void *)(long)(event->header.size), event->comm.comm, event->comm.pid); if (thread == NULL || thread__set_comm(thread, event->comm.comm)) { - dump_printf("problem processing PERF_EVENT_COMM, skipping event.\n"); + dump_printf("problem processing PERF_RECORD_COMM, skipping event.\n"); return -1; } total_comm++; @@ -614,7 +614,7 @@ process_fork_event(event_t *event, unsigned long offset, unsigned long head) thread = threads__findnew(event->fork.pid, &threads, &last_match); parent = threads__findnew(event->fork.ppid, &threads, &last_match); - dump_printf("%p [%p]: PERF_EVENT_FORK: %d:%d\n", + dump_printf("%p [%p]: PERF_RECORD_FORK: %d:%d\n", (void *)(offset + head), (void *)(long)(event->header.size), event->fork.pid, event->fork.ppid); @@ -627,7 +627,7 @@ process_fork_event(event_t *event, unsigned long offset, unsigned long head) return 0; if (!thread || !parent || thread__fork(thread, parent)) { - dump_printf("problem processing PERF_EVENT_FORK, skipping event.\n"); + dump_printf("problem processing PERF_RECORD_FORK, skipping event.\n"); return -1; } total_fork++; @@ -639,23 +639,23 @@ static int process_event(event_t *event, unsigned long offset, unsigned long head) { switch (event->header.type) { - case PERF_EVENT_SAMPLE: + case PERF_RECORD_SAMPLE: return process_sample_event(event, offset, head); - case PERF_EVENT_MMAP: + case PERF_RECORD_MMAP: return process_mmap_event(event, offset, head); - case PERF_EVENT_COMM: + case PERF_RECORD_COMM: return process_comm_event(event, offset, head); - case PERF_EVENT_FORK: + case PERF_RECORD_FORK: return process_fork_event(event, offset, head); /* * We dont process them right now but they are fine: */ - case PERF_EVENT_THROTTLE: - case PERF_EVENT_UNTHROTTLE: + case PERF_RECORD_THROTTLE: + case PERF_RECORD_UNTHROTTLE: return 0; default: diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c index 2459e5a22ed..a5a050af8e7 100644 --- a/tools/perf/builtin-record.c +++ b/tools/perf/builtin-record.c @@ -77,7 +77,7 @@ static struct mmap_data mmap_array[MAX_NR_CPUS][MAX_COUNTERS]; static unsigned long mmap_read_head(struct mmap_data *md) { - struct perf_counter_mmap_page *pc = md->base; + struct perf_event_mmap_page *pc = md->base; long head; head = pc->data_head; @@ -88,7 +88,7 @@ static unsigned long mmap_read_head(struct mmap_data *md) static void mmap_write_tail(struct mmap_data *md, unsigned long tail) { - struct perf_counter_mmap_page *pc = md->base; + struct perf_event_mmap_page *pc = md->base; /* * ensure all reads are done before we write the tail out. @@ -233,7 +233,7 @@ static pid_t pid_synthesize_comm_event(pid_t pid, int full) } } - comm_ev.header.type = PERF_EVENT_COMM; + comm_ev.header.type = PERF_RECORD_COMM; size = ALIGN(size, sizeof(u64)); comm_ev.header.size = sizeof(comm_ev) - (sizeof(comm_ev.comm) - size); @@ -288,7 +288,7 @@ static void pid_synthesize_mmap_samples(pid_t pid, pid_t tgid) while (1) { char bf[BUFSIZ], *pbf = bf; struct mmap_event mmap_ev = { - .header = { .type = PERF_EVENT_MMAP }, + .header = { .type = PERF_RECORD_MMAP }, }; int n; size_t size; @@ -355,7 +355,7 @@ static void synthesize_all(void) static int group_fd; -static struct perf_header_attr *get_header_attr(struct perf_counter_attr *a, int nr) +static struct perf_header_attr *get_header_attr(struct perf_event_attr *a, int nr) { struct perf_header_attr *h_attr; @@ -371,7 +371,7 @@ static struct perf_header_attr *get_header_attr(struct perf_counter_attr *a, int static void create_counter(int counter, int cpu, pid_t pid) { - struct perf_counter_attr *attr = attrs + counter; + struct perf_event_attr *attr = attrs + counter; struct perf_header_attr *h_attr; int track = !counter; /* only the first counter needs these */ struct { @@ -417,7 +417,7 @@ static void create_counter(int counter, int cpu, pid_t pid) attr->disabled = 1; try_again: - fd[nr_cpu][counter] = sys_perf_counter_open(attr, pid, cpu, group_fd, 0); + fd[nr_cpu][counter] = sys_perf_event_open(attr, pid, cpu, group_fd, 0); if (fd[nr_cpu][counter] < 0) { int err = errno; @@ -444,7 +444,7 @@ try_again: printf("\n"); error("perfcounter syscall returned with %d (%s)\n", fd[nr_cpu][counter], strerror(err)); - die("No CONFIG_PERF_COUNTERS=y kernel support configured?\n"); + die("No CONFIG_PERF_EVENTS=y kernel support configured?\n"); exit(-1); } @@ -478,7 +478,7 @@ try_again: if (multiplex && fd[nr_cpu][counter] != multiplex_fd) { int ret; - ret = ioctl(fd[nr_cpu][counter], PERF_COUNTER_IOC_SET_OUTPUT, multiplex_fd); + ret = ioctl(fd[nr_cpu][counter], PERF_EVENT_IOC_SET_OUTPUT, multiplex_fd); assert(ret != -1); } else { event_array[nr_poll].fd = fd[nr_cpu][counter]; @@ -496,7 +496,7 @@ try_again: } } - ioctl(fd[nr_cpu][counter], PERF_COUNTER_IOC_ENABLE); + ioctl(fd[nr_cpu][counter], PERF_EVENT_IOC_ENABLE); } static void open_counters(int cpu, pid_t pid) @@ -642,7 +642,7 @@ static int __cmd_record(int argc, const char **argv) if (done) { for (i = 0; i < nr_cpu; i++) { for (counter = 0; counter < nr_counters; counter++) - ioctl(fd[i][counter], PERF_COUNTER_IOC_DISABLE); + ioctl(fd[i][counter], PERF_EVENT_IOC_DISABLE); } } } diff --git a/tools/perf/builtin-report.c b/tools/perf/builtin-report.c index cdf9a8d27bb..19669c20088 100644 --- a/tools/perf/builtin-report.c +++ b/tools/perf/builtin-report.c @@ -1121,7 +1121,7 @@ process_sample_event(event_t *event, unsigned long offset, unsigned long head) more_data += sizeof(u64); } - dump_printf("%p [%p]: PERF_EVENT_SAMPLE (IP, %d): %d/%d: %p period: %Ld\n", + dump_printf("%p [%p]: PERF_RECORD_SAMPLE (IP, %d): %d/%d: %p period: %Ld\n", (void *)(offset + head), (void *)(long)(event->header.size), event->header.misc, @@ -1158,9 +1158,9 @@ process_sample_event(event_t *event, unsigned long offset, unsigned long head) if (comm_list && !strlist__has_entry(comm_list, thread->comm)) return 0; - cpumode = event->header.misc & PERF_EVENT_MISC_CPUMODE_MASK; + cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK; - if (cpumode == PERF_EVENT_MISC_KERNEL) { + if (cpumode == PERF_RECORD_MISC_KERNEL) { show = SHOW_KERNEL; level = 'k'; @@ -1168,7 +1168,7 @@ process_sample_event(event_t *event, unsigned long offset, unsigned long head) dump_printf(" ...... dso: %s\n", dso->name); - } else if (cpumode == PERF_EVENT_MISC_USER) { + } else if (cpumode == PERF_RECORD_MISC_USER) { show = SHOW_USER; level = '.'; @@ -1210,7 +1210,7 @@ process_mmap_event(event_t *event, unsigned long offset, unsigned long head) thread = threads__findnew(event->mmap.pid, &threads, &last_match); - dump_printf("%p [%p]: PERF_EVENT_MMAP %d/%d: [%p(%p) @ %p]: %s\n", + dump_printf("%p [%p]: PERF_RECORD_MMAP %d/%d: [%p(%p) @ %p]: %s\n", (void *)(offset + head), (void *)(long)(event->header.size), event->mmap.pid, @@ -1221,7 +1221,7 @@ process_mmap_event(event_t *event, unsigned long offset, unsigned long head) event->mmap.filename); if (thread == NULL || map == NULL) { - dump_printf("problem processing PERF_EVENT_MMAP, skipping event.\n"); + dump_printf("problem processing PERF_RECORD_MMAP, skipping event.\n"); return 0; } @@ -1238,14 +1238,14 @@ process_comm_event(event_t *event, unsigned long offset, unsigned long head) thread = threads__findnew(event->comm.pid, &threads, &last_match); - dump_printf("%p [%p]: PERF_EVENT_COMM: %s:%d\n", + dump_printf("%p [%p]: PERF_RECORD_COMM: %s:%d\n", (void *)(offset + head), (void *)(long)(event->header.size), event->comm.comm, event->comm.pid); if (thread == NULL || thread__set_comm_adjust(thread, event->comm.comm)) { - dump_printf("problem processing PERF_EVENT_COMM, skipping event.\n"); + dump_printf("problem processing PERF_RECORD_COMM, skipping event.\n"); return -1; } total_comm++; @@ -1262,10 +1262,10 @@ process_task_event(event_t *event, unsigned long offset, unsigned long head) thread = threads__findnew(event->fork.pid, &threads, &last_match); parent = threads__findnew(event->fork.ppid, &threads, &last_match); - dump_printf("%p [%p]: PERF_EVENT_%s: (%d:%d):(%d:%d)\n", + dump_printf("%p [%p]: PERF_RECORD_%s: (%d:%d):(%d:%d)\n", (void *)(offset + head), (void *)(long)(event->header.size), - event->header.type == PERF_EVENT_FORK ? "FORK" : "EXIT", + event->header.type == PERF_RECORD_FORK ? "FORK" : "EXIT", event->fork.pid, event->fork.tid, event->fork.ppid, event->fork.ptid); @@ -1276,11 +1276,11 @@ process_task_event(event_t *event, unsigned long offset, unsigned long head) if (thread == parent) return 0; - if (event->header.type == PERF_EVENT_EXIT) + if (event->header.type == PERF_RECORD_EXIT) return 0; if (!thread || !parent || thread__fork(thread, parent)) { - dump_printf("problem processing PERF_EVENT_FORK, skipping event.\n"); + dump_printf("problem processing PERF_RECORD_FORK, skipping event.\n"); return -1; } total_fork++; @@ -1291,7 +1291,7 @@ process_task_event(event_t *event, unsigned long offset, unsigned long head) static int process_lost_event(event_t *event, unsigned long offset, unsigned long head) { - dump_printf("%p [%p]: PERF_EVENT_LOST: id:%Ld: lost:%Ld\n", + dump_printf("%p [%p]: PERF_RECORD_LOST: id:%Ld: lost:%Ld\n", (void *)(offset + head), (void *)(long)(event->header.size), event->lost.id, @@ -1305,7 +1305,7 @@ process_lost_event(event_t *event, unsigned long offset, unsigned long head) static int process_read_event(event_t *event, unsigned long offset, unsigned long head) { - struct perf_counter_attr *attr; + struct perf_event_attr *attr; attr = perf_header__find_attr(event->read.id, header); @@ -1319,7 +1319,7 @@ process_read_event(event_t *event, unsigned long offset, unsigned long head) event->read.value); } - dump_printf("%p [%p]: PERF_EVENT_READ: %d %d %s %Lu\n", + dump_printf("%p [%p]: PERF_RECORD_READ: %d %d %s %Lu\n", (void *)(offset + head), (void *)(long)(event->header.size), event->read.pid, @@ -1337,31 +1337,31 @@ process_event(event_t *event, unsigned long offset, unsigned long head) trace_event(event); switch (event->header.type) { - case PERF_EVENT_SAMPLE: + case PERF_RECORD_SAMPLE: return process_sample_event(event, offset, head); - case PERF_EVENT_MMAP: + case PERF_RECORD_MMAP: return process_mmap_event(event, offset, head); - case PERF_EVENT_COMM: + case PERF_RECORD_COMM: return process_comm_event(event, offset, head); - case PERF_EVENT_FORK: - case PERF_EVENT_EXIT: + case PERF_RECORD_FORK: + case PERF_RECORD_EXIT: return process_task_event(event, offset, head); - case PERF_EVENT_LOST: + case PERF_RECORD_LOST: return process_lost_event(event, offset, head); - case PERF_EVENT_READ: + case PERF_RECORD_READ: return process_read_event(event, offset, head); /* * We dont process them right now but they are fine: */ - case PERF_EVENT_THROTTLE: - case PERF_EVENT_UNTHROTTLE: + case PERF_RECORD_THROTTLE: + case PERF_RECORD_UNTHROTTLE: return 0; default: diff --git a/tools/perf/builtin-sched.c b/tools/perf/builtin-sched.c index 275d79c6627..ea9c15c0cdf 100644 --- a/tools/perf/builtin-sched.c +++ b/tools/perf/builtin-sched.c @@ -1573,7 +1573,7 @@ process_sample_event(event_t *event, unsigned long offset, unsigned long head) more_data += sizeof(u64); } - dump_printf("%p [%p]: PERF_EVENT_SAMPLE (IP, %d): %d/%d: %p period: %Ld\n", + dump_printf("%p [%p]: PERF_RECORD_SAMPLE (IP, %d): %d/%d: %p period: %Ld\n", (void *)(offset + head), (void *)(long)(event->header.size), event->header.misc, @@ -1589,9 +1589,9 @@ process_sample_event(event_t *event, unsigned long offset, unsigned long head) return -1; } - cpumode = event->header.misc & PERF_EVENT_MISC_CPUMODE_MASK; + cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK; - if (cpumode == PERF_EVENT_MISC_KERNEL) { + if (cpumode == PERF_RECORD_MISC_KERNEL) { show = SHOW_KERNEL; level = 'k'; @@ -1599,7 +1599,7 @@ process_sample_event(event_t *event, unsigned long offset, unsigned long head) dump_printf(" ...... dso: %s\n", dso->name); - } else if (cpumode == PERF_EVENT_MISC_USER) { + } else if (cpumode == PERF_RECORD_MISC_USER) { show = SHOW_USER; level = '.'; @@ -1626,23 +1626,23 @@ process_event(event_t *event, unsigned long offset, unsigned long head) nr_events++; switch (event->header.type) { - case PERF_EVENT_MMAP: + case PERF_RECORD_MMAP: return 0; - case PERF_EVENT_LOST: + case PERF_RECORD_LOST: nr_lost_chunks++; nr_lost_events += event->lost.lost; return 0; - case PERF_EVENT_COMM: + case PERF_RECORD_COMM: return process_comm_event(event, offset, head); - case PERF_EVENT_EXIT ... PERF_EVENT_READ: + case PERF_RECORD_EXIT ... PERF_RECORD_READ: return 0; - case PERF_EVENT_SAMPLE: + case PERF_RECORD_SAMPLE: return process_sample_event(event, offset, head); - case PERF_EVENT_MAX: + case PERF_RECORD_MAX: default: return -1; } diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c index 61b828236c1..16af2d82e85 100644 --- a/tools/perf/builtin-stat.c +++ b/tools/perf/builtin-stat.c @@ -48,7 +48,7 @@ #include #include -static struct perf_counter_attr default_attrs[] = { +static struct perf_event_attr default_attrs[] = { { .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_SW_TASK_CLOCK }, { .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_SW_CONTEXT_SWITCHES}, @@ -130,11 +130,11 @@ struct stats runtime_cycles_stats; attrs[counter].config == PERF_COUNT_##c) #define ERR_PERF_OPEN \ -"Error: counter %d, sys_perf_counter_open() syscall returned with %d (%s)\n" +"Error: counter %d, sys_perf_event_open() syscall returned with %d (%s)\n" static void create_perf_stat_counter(int counter, int pid) { - struct perf_counter_attr *attr = attrs + counter; + struct perf_event_attr *attr = attrs + counter; if (scale) attr->read_format = PERF_FORMAT_TOTAL_TIME_ENABLED | @@ -144,7 +144,7 @@ static void create_perf_stat_counter(int counter, int pid) unsigned int cpu; for (cpu = 0; cpu < nr_cpus; cpu++) { - fd[cpu][counter] = sys_perf_counter_open(attr, -1, cpu, -1, 0); + fd[cpu][counter] = sys_perf_event_open(attr, -1, cpu, -1, 0); if (fd[cpu][counter] < 0 && verbose) fprintf(stderr, ERR_PERF_OPEN, counter, fd[cpu][counter], strerror(errno)); @@ -154,7 +154,7 @@ static void create_perf_stat_counter(int counter, int pid) attr->disabled = 1; attr->enable_on_exec = 1; - fd[0][counter] = sys_perf_counter_open(attr, pid, -1, -1, 0); + fd[0][counter] = sys_perf_event_open(attr, pid, -1, -1, 0); if (fd[0][counter] < 0 && verbose) fprintf(stderr, ERR_PERF_OPEN, counter, fd[0][counter], strerror(errno)); diff --git a/tools/perf/builtin-timechart.c b/tools/perf/builtin-timechart.c index 60040639627..4405681b313 100644 --- a/tools/perf/builtin-timechart.c +++ b/tools/perf/builtin-timechart.c @@ -937,21 +937,21 @@ process_event(event_t *event) switch (event->header.type) { - case PERF_EVENT_COMM: + case PERF_RECORD_COMM: return process_comm_event(event); - case PERF_EVENT_FORK: + case PERF_RECORD_FORK: return process_fork_event(event); - case PERF_EVENT_EXIT: + case PERF_RECORD_EXIT: return process_exit_event(event); - case PERF_EVENT_SAMPLE: + case PERF_RECORD_SAMPLE: return queue_sample_event(event); /* * We dont process them right now but they are fine: */ - case PERF_EVENT_MMAP: - case PERF_EVENT_THROTTLE: - case PERF_EVENT_UNTHROTTLE: + case PERF_RECORD_MMAP: + case PERF_RECORD_THROTTLE: + case PERF_RECORD_UNTHROTTLE: return 0; default: diff --git a/tools/perf/builtin-top.c b/tools/perf/builtin-top.c index 4002ccb3675..1ca88896eee 100644 --- a/tools/perf/builtin-top.c +++ b/tools/perf/builtin-top.c @@ -901,7 +901,7 @@ struct mmap_data { static unsigned int mmap_read_head(struct mmap_data *md) { - struct perf_counter_mmap_page *pc = md->base; + struct perf_event_mmap_page *pc = md->base; int head; head = pc->data_head; @@ -977,9 +977,9 @@ static void mmap_read_counter(struct mmap_data *md) old += size; - if (event->header.type == PERF_EVENT_SAMPLE) { + if (event->header.type == PERF_RECORD_SAMPLE) { int user = - (event->header.misc & PERF_EVENT_MISC_CPUMODE_MASK) == PERF_EVENT_MISC_USER; + (event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK) == PERF_RECORD_MISC_USER; process_event(event->ip.ip, md->counter, user); } } @@ -1005,7 +1005,7 @@ int group_fd; static void start_counter(int i, int counter) { - struct perf_counter_attr *attr; + struct perf_event_attr *attr; int cpu; cpu = profile_cpu; @@ -1019,7 +1019,7 @@ static void start_counter(int i, int counter) attr->inherit = (cpu < 0) && inherit; try_again: - fd[i][counter] = sys_perf_counter_open(attr, target_pid, cpu, group_fd, 0); + fd[i][counter] = sys_perf_event_open(attr, target_pid, cpu, group_fd, 0); if (fd[i][counter] < 0) { int err = errno; @@ -1044,7 +1044,7 @@ try_again: printf("\n"); error("perfcounter syscall returned with %d (%s)\n", fd[i][counter], strerror(err)); - die("No CONFIG_PERF_COUNTERS=y kernel support configured?\n"); + die("No CONFIG_PERF_EVENTS=y kernel support configured?\n"); exit(-1); } assert(fd[i][counter] >= 0); diff --git a/tools/perf/builtin-trace.c b/tools/perf/builtin-trace.c index 914ab366e36..e9d256e2f47 100644 --- a/tools/perf/builtin-trace.c +++ b/tools/perf/builtin-trace.c @@ -35,14 +35,14 @@ process_comm_event(event_t *event, unsigned long offset, unsigned long head) thread = threads__findnew(event->comm.pid, &threads, &last_match); - dump_printf("%p [%p]: PERF_EVENT_COMM: %s:%d\n", + dump_printf("%p [%p]: PERF_RECORD_COMM: %s:%d\n", (void *)(offset + head), (void *)(long)(event->header.size), event->comm.comm, event->comm.pid); if (thread == NULL || thread__set_comm(thread, event->comm.comm)) { - dump_printf("problem processing PERF_EVENT_COMM, skipping event.\n"); + dump_printf("problem processing PERF_RECORD_COMM, skipping event.\n"); return -1; } total_comm++; @@ -82,7 +82,7 @@ process_sample_event(event_t *event, unsigned long offset, unsigned long head) more_data += sizeof(u64); } - dump_printf("%p [%p]: PERF_EVENT_SAMPLE (IP, %d): %d/%d: %p period: %Ld\n", + dump_printf("%p [%p]: PERF_RECORD_SAMPLE (IP, %d): %d/%d: %p period: %Ld\n", (void *)(offset + head), (void *)(long)(event->header.size), event->header.misc, @@ -98,9 +98,9 @@ process_sample_event(event_t *event, unsigned long offset, unsigned long head) return -1; } - cpumode = event->header.misc & PERF_EVENT_MISC_CPUMODE_MASK; + cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK; - if (cpumode == PERF_EVENT_MISC_KERNEL) { + if (cpumode == PERF_RECORD_MISC_KERNEL) { show = SHOW_KERNEL; level = 'k'; @@ -108,7 +108,7 @@ process_sample_event(event_t *event, unsigned long offset, unsigned long head) dump_printf(" ...... dso: %s\n", dso->name); - } else if (cpumode == PERF_EVENT_MISC_USER) { + } else if (cpumode == PERF_RECORD_MISC_USER) { show = SHOW_USER; level = '.'; @@ -146,19 +146,19 @@ process_event(event_t *event, unsigned long offset, unsigned long head) trace_event(event); switch (event->header.type) { - case PERF_EVENT_MMAP ... PERF_EVENT_LOST: + case PERF_RECORD_MMAP ... PERF_RECORD_LOST: return 0; - case PERF_EVENT_COMM: + case PERF_RECORD_COMM: return process_comm_event(event, offset, head); - case PERF_EVENT_EXIT ... PERF_EVENT_READ: + case PERF_RECORD_EXIT ... PERF_RECORD_READ: return 0; - case PERF_EVENT_SAMPLE: + case PERF_RECORD_SAMPLE: return process_sample_event(event, offset, head); - case PERF_EVENT_MAX: + case PERF_RECORD_MAX: default: return -1; } diff --git a/tools/perf/design.txt b/tools/perf/design.txt index f71e0d245cb..f1946d107b1 100644 --- a/tools/perf/design.txt +++ b/tools/perf/design.txt @@ -18,10 +18,10 @@ underlying hardware counters. Performance counters are accessed via special file descriptors. There's one file descriptor per virtual counter used. -The special file descriptor is opened via the perf_counter_open() +The special file descriptor is opened via the perf_event_open() system call: - int sys_perf_counter_open(struct perf_counter_hw_event *hw_event_uptr, + int sys_perf_event_open(struct perf_event_hw_event *hw_event_uptr, pid_t pid, int cpu, int group_fd, unsigned long flags); @@ -32,9 +32,9 @@ can be used to set the blocking mode, etc. Multiple counters can be kept open at a time, and the counters can be poll()ed. -When creating a new counter fd, 'perf_counter_hw_event' is: +When creating a new counter fd, 'perf_event_hw_event' is: -struct perf_counter_hw_event { +struct perf_event_hw_event { /* * The MSB of the config word signifies if the rest contains cpu * specific (raw) counter configuration data, if unset, the next @@ -93,7 +93,7 @@ specified by 'event_id': /* * Generalized performance counter event types, used by the hw_event.event_id - * parameter of the sys_perf_counter_open() syscall: + * parameter of the sys_perf_event_open() syscall: */ enum hw_event_ids { /* @@ -159,7 +159,7 @@ in size. * reads on the counter should return the indicated quantities, * in increasing order of bit value, after the counter value. */ -enum perf_counter_read_format { +enum perf_event_read_format { PERF_FORMAT_TOTAL_TIME_ENABLED = 1, PERF_FORMAT_TOTAL_TIME_RUNNING = 2, }; @@ -178,7 +178,7 @@ interrupt: * Bits that can be set in hw_event.record_type to request information * in the overflow packets. */ -enum perf_counter_record_format { +enum perf_event_record_format { PERF_RECORD_IP = 1U << 0, PERF_RECORD_TID = 1U << 1, PERF_RECORD_TIME = 1U << 2, @@ -228,7 +228,7 @@ these events are recorded in the ring-buffer (see below). The 'comm' bit allows tracking of process comm data on process creation. This too is recorded in the ring-buffer (see below). -The 'pid' parameter to the perf_counter_open() system call allows the +The 'pid' parameter to the perf_event_open() system call allows the counter to be specific to a task: pid == 0: if the pid parameter is zero, the counter is attached to the @@ -258,7 +258,7 @@ The 'flags' parameter is currently unused and must be zero. The 'group_fd' parameter allows counter "groups" to be set up. A counter group has one counter which is the group "leader". The leader -is created first, with group_fd = -1 in the perf_counter_open call +is created first, with group_fd = -1 in the perf_event_open call that creates it. The rest of the group members are created subsequently, with group_fd giving the fd of the group leader. (A single counter on its own is created with group_fd = -1 and is @@ -277,13 +277,13 @@ tracking are logged into a ring-buffer. This ring-buffer is created and accessed through mmap(). The mmap size should be 1+2^n pages, where the first page is a meta-data page -(struct perf_counter_mmap_page) that contains various bits of information such +(struct perf_event_mmap_page) that contains various bits of information such as where the ring-buffer head is. /* * Structure of the page that can be mapped via mmap */ -struct perf_counter_mmap_page { +struct perf_event_mmap_page { __u32 version; /* version number of this structure */ __u32 compat_version; /* lowest version this is compat with */ @@ -317,7 +317,7 @@ struct perf_counter_mmap_page { * Control data for the mmap() data buffer. * * User-space reading this value should issue an rmb(), on SMP capable - * platforms, after reading this value -- see perf_counter_wakeup(). + * platforms, after reading this value -- see perf_event_wakeup(). */ __u32 data_head; /* head in the data section */ }; @@ -327,9 +327,9 @@ NOTE: the hw-counter userspace bits are arch specific and are currently only The following 2^n pages are the ring-buffer which contains events of the form: -#define PERF_EVENT_MISC_KERNEL (1 << 0) -#define PERF_EVENT_MISC_USER (1 << 1) -#define PERF_EVENT_MISC_OVERFLOW (1 << 2) +#define PERF_RECORD_MISC_KERNEL (1 << 0) +#define PERF_RECORD_MISC_USER (1 << 1) +#define PERF_RECORD_MISC_OVERFLOW (1 << 2) struct perf_event_header { __u32 type; @@ -353,8 +353,8 @@ enum perf_event_type { * char filename[]; * }; */ - PERF_EVENT_MMAP = 1, - PERF_EVENT_MUNMAP = 2, + PERF_RECORD_MMAP = 1, + PERF_RECORD_MUNMAP = 2, /* * struct { @@ -364,10 +364,10 @@ enum perf_event_type { * char comm[]; * }; */ - PERF_EVENT_COMM = 3, + PERF_RECORD_COMM = 3, /* - * When header.misc & PERF_EVENT_MISC_OVERFLOW the event_type field + * When header.misc & PERF_RECORD_MISC_OVERFLOW the event_type field * will be PERF_RECORD_* * * struct { @@ -397,7 +397,7 @@ Notification of new events is possible through poll()/select()/epoll() and fcntl() managing signals. Normally a notification is generated for every page filled, however one can -additionally set perf_counter_hw_event.wakeup_events to generate one every +additionally set perf_event_hw_event.wakeup_events to generate one every so many counter overflow events. Future work will include a splice() interface to the ring-buffer. @@ -409,11 +409,11 @@ events but does continue to exist and maintain its count value. An individual counter or counter group can be enabled with - ioctl(fd, PERF_COUNTER_IOC_ENABLE); + ioctl(fd, PERF_EVENT_IOC_ENABLE); or disabled with - ioctl(fd, PERF_COUNTER_IOC_DISABLE); + ioctl(fd, PERF_EVENT_IOC_DISABLE); Enabling or disabling the leader of a group enables or disables the whole group; that is, while the group leader is disabled, none of the @@ -424,16 +424,16 @@ other counter. Additionally, non-inherited overflow counters can use - ioctl(fd, PERF_COUNTER_IOC_REFRESH, nr); + ioctl(fd, PERF_EVENT_IOC_REFRESH, nr); to enable a counter for 'nr' events, after which it gets disabled again. A process can enable or disable all the counter groups that are attached to it, using prctl: - prctl(PR_TASK_PERF_COUNTERS_ENABLE); + prctl(PR_TASK_PERF_EVENTS_ENABLE); - prctl(PR_TASK_PERF_COUNTERS_DISABLE); + prctl(PR_TASK_PERF_EVENTS_DISABLE); This applies to all counters on the current process, whether created by this process or by another, and doesn't affect any counters that @@ -447,11 +447,11 @@ Arch requirements If your architecture does not have hardware performance metrics, you can still use the generic software counters based on hrtimers for sampling. -So to start with, in order to add HAVE_PERF_COUNTERS to your Kconfig, you +So to start with, in order to add HAVE_PERF_EVENTS to your Kconfig, you will need at least this: - - asm/perf_counter.h - a basic stub will suffice at first + - asm/perf_event.h - a basic stub will suffice at first - support for atomic64 types (and associated helper functions) - - set_perf_counter_pending() implemented + - set_perf_event_pending() implemented If your architecture does have hardware capabilities, you can override the -weak stub hw_perf_counter_init() to register hardware counters. +weak stub hw_perf_event_init() to register hardware counters. diff --git a/tools/perf/perf.h b/tools/perf/perf.h index 2abeb20d0bf..8cc4623afd6 100644 --- a/tools/perf/perf.h +++ b/tools/perf/perf.h @@ -52,15 +52,15 @@ #include #include -#include "../../include/linux/perf_counter.h" +#include "../../include/linux/perf_event.h" #include "util/types.h" /* - * prctl(PR_TASK_PERF_COUNTERS_DISABLE) will (cheaply) disable all + * prctl(PR_TASK_PERF_EVENTS_DISABLE) will (cheaply) disable all * counters in the current task. */ -#define PR_TASK_PERF_COUNTERS_DISABLE 31 -#define PR_TASK_PERF_COUNTERS_ENABLE 32 +#define PR_TASK_PERF_EVENTS_DISABLE 31 +#define PR_TASK_PERF_EVENTS_ENABLE 32 #ifndef NSEC_PER_SEC # define NSEC_PER_SEC 1000000000ULL @@ -90,12 +90,12 @@ static inline unsigned long long rdclock(void) _min1 < _min2 ? _min1 : _min2; }) static inline int -sys_perf_counter_open(struct perf_counter_attr *attr, +sys_perf_event_open(struct perf_event_attr *attr, pid_t pid, int cpu, int group_fd, unsigned long flags) { attr->size = sizeof(*attr); - return syscall(__NR_perf_counter_open, attr, pid, cpu, + return syscall(__NR_perf_event_open, attr, pid, cpu, group_fd, flags); } diff --git a/tools/perf/util/event.h b/tools/perf/util/event.h index 018d414a09d..2c9c26d6ded 100644 --- a/tools/perf/util/event.h +++ b/tools/perf/util/event.h @@ -1,5 +1,5 @@ -#ifndef __PERF_EVENT_H -#define __PERF_EVENT_H +#ifndef __PERF_RECORD_H +#define __PERF_RECORD_H #include "../perf.h" #include "util.h" #include diff --git a/tools/perf/util/header.c b/tools/perf/util/header.c index bb4fca3efcc..e306857b2c2 100644 --- a/tools/perf/util/header.c +++ b/tools/perf/util/header.c @@ -9,7 +9,7 @@ /* * Create new perf.data header attribute: */ -struct perf_header_attr *perf_header_attr__new(struct perf_counter_attr *attr) +struct perf_header_attr *perf_header_attr__new(struct perf_event_attr *attr) { struct perf_header_attr *self = malloc(sizeof(*self)); @@ -134,7 +134,7 @@ struct perf_file_section { }; struct perf_file_attr { - struct perf_counter_attr attr; + struct perf_event_attr attr; struct perf_file_section ids; }; @@ -320,7 +320,7 @@ u64 perf_header__sample_type(struct perf_header *header) return type; } -struct perf_counter_attr * +struct perf_event_attr * perf_header__find_attr(u64 id, struct perf_header *header) { int i; diff --git a/tools/perf/util/header.h b/tools/perf/util/header.h index 7b0e84a8717..a0761bc7863 100644 --- a/tools/perf/util/header.h +++ b/tools/perf/util/header.h @@ -1,12 +1,12 @@ #ifndef _PERF_HEADER_H #define _PERF_HEADER_H -#include "../../../include/linux/perf_counter.h" +#include "../../../include/linux/perf_event.h" #include #include "types.h" struct perf_header_attr { - struct perf_counter_attr attr; + struct perf_event_attr attr; int ids, size; u64 *id; off_t id_offset; @@ -34,11 +34,11 @@ char *perf_header__find_event(u64 id); struct perf_header_attr * -perf_header_attr__new(struct perf_counter_attr *attr); +perf_header_attr__new(struct perf_event_attr *attr); void perf_header_attr__add_id(struct perf_header_attr *self, u64 id); u64 perf_header__sample_type(struct perf_header *header); -struct perf_counter_attr * +struct perf_event_attr * perf_header__find_attr(u64 id, struct perf_header *header); diff --git a/tools/perf/util/parse-events.c b/tools/perf/util/parse-events.c index 89172fd0038..13ab4b842d4 100644 --- a/tools/perf/util/parse-events.c +++ b/tools/perf/util/parse-events.c @@ -10,7 +10,7 @@ int nr_counters; -struct perf_counter_attr attrs[MAX_COUNTERS]; +struct perf_event_attr attrs[MAX_COUNTERS]; struct event_symbol { u8 type; @@ -48,13 +48,13 @@ static struct event_symbol event_symbols[] = { { CSW(CPU_MIGRATIONS), "cpu-migrations", "migrations" }, }; -#define __PERF_COUNTER_FIELD(config, name) \ - ((config & PERF_COUNTER_##name##_MASK) >> PERF_COUNTER_##name##_SHIFT) +#define __PERF_EVENT_FIELD(config, name) \ + ((config & PERF_EVENT_##name##_MASK) >> PERF_EVENT_##name##_SHIFT) -#define PERF_COUNTER_RAW(config) __PERF_COUNTER_FIELD(config, RAW) -#define PERF_COUNTER_CONFIG(config) __PERF_COUNTER_FIELD(config, CONFIG) -#define PERF_COUNTER_TYPE(config) __PERF_COUNTER_FIELD(config, TYPE) -#define PERF_COUNTER_ID(config) __PERF_COUNTER_FIELD(config, EVENT) +#define PERF_EVENT_RAW(config) __PERF_EVENT_FIELD(config, RAW) +#define PERF_EVENT_CONFIG(config) __PERF_EVENT_FIELD(config, CONFIG) +#define PERF_EVENT_TYPE(config) __PERF_EVENT_FIELD(config, TYPE) +#define PERF_EVENT_ID(config) __PERF_EVENT_FIELD(config, EVENT) static const char *hw_event_names[] = { "cycles", @@ -352,7 +352,7 @@ static int parse_aliases(const char **str, const char *names[][MAX_ALIASES], int } static enum event_result -parse_generic_hw_event(const char **str, struct perf_counter_attr *attr) +parse_generic_hw_event(const char **str, struct perf_event_attr *attr) { const char *s = *str; int cache_type = -1, cache_op = -1, cache_result = -1; @@ -417,7 +417,7 @@ parse_single_tracepoint_event(char *sys_name, const char *evt_name, unsigned int evt_length, char *flags, - struct perf_counter_attr *attr, + struct perf_event_attr *attr, const char **strp) { char evt_path[MAXPATHLEN]; @@ -505,7 +505,7 @@ parse_subsystem_tracepoint_event(char *sys_name, char *flags) static enum event_result parse_tracepoint_event(const char **strp, - struct perf_counter_attr *attr) + struct perf_event_attr *attr) { const char *evt_name; char *flags; @@ -563,7 +563,7 @@ static int check_events(const char *str, unsigned int i) } static enum event_result -parse_symbolic_event(const char **strp, struct perf_counter_attr *attr) +parse_symbolic_event(const char **strp, struct perf_event_attr *attr) { const char *str = *strp; unsigned int i; @@ -582,7 +582,7 @@ parse_symbolic_event(const char **strp, struct perf_counter_attr *attr) } static enum event_result -parse_raw_event(const char **strp, struct perf_counter_attr *attr) +parse_raw_event(const char **strp, struct perf_event_attr *attr) { const char *str = *strp; u64 config; @@ -601,7 +601,7 @@ parse_raw_event(const char **strp, struct perf_counter_attr *attr) } static enum event_result -parse_numeric_event(const char **strp, struct perf_counter_attr *attr) +parse_numeric_event(const char **strp, struct perf_event_attr *attr) { const char *str = *strp; char *endp; @@ -623,7 +623,7 @@ parse_numeric_event(const char **strp, struct perf_counter_attr *attr) } static enum event_result -parse_event_modifier(const char **strp, struct perf_counter_attr *attr) +parse_event_modifier(const char **strp, struct perf_event_attr *attr) { const char *str = *strp; int eu = 1, ek = 1, eh = 1; @@ -656,7 +656,7 @@ parse_event_modifier(const char **strp, struct perf_counter_attr *attr) * Symbolic names are (almost) exactly matched. */ static enum event_result -parse_event_symbols(const char **str, struct perf_counter_attr *attr) +parse_event_symbols(const char **str, struct perf_event_attr *attr) { enum event_result ret; @@ -711,7 +711,7 @@ static void store_event_type(const char *orgname) int parse_events(const struct option *opt __used, const char *str, int unset __used) { - struct perf_counter_attr attr; + struct perf_event_attr attr; enum event_result ret; if (strchr(str, ':')) diff --git a/tools/perf/util/parse-events.h b/tools/perf/util/parse-events.h index 60704c15961..30c60811284 100644 --- a/tools/perf/util/parse-events.h +++ b/tools/perf/util/parse-events.h @@ -16,7 +16,7 @@ extern struct tracepoint_path *tracepoint_id_to_path(u64 config); extern int nr_counters; -extern struct perf_counter_attr attrs[MAX_COUNTERS]; +extern struct perf_event_attr attrs[MAX_COUNTERS]; extern const char *event_name(int ctr); extern const char *__event_name(int type, u64 config); diff --git a/tools/perf/util/trace-event-info.c b/tools/perf/util/trace-event-info.c index 1fd824c1f1c..af4b0573b37 100644 --- a/tools/perf/util/trace-event-info.c +++ b/tools/perf/util/trace-event-info.c @@ -480,12 +480,12 @@ out: } static struct tracepoint_path * -get_tracepoints_path(struct perf_counter_attr *pattrs, int nb_counters) +get_tracepoints_path(struct perf_event_attr *pattrs, int nb_events) { struct tracepoint_path path, *ppath = &path; int i; - for (i = 0; i < nb_counters; i++) { + for (i = 0; i < nb_events; i++) { if (pattrs[i].type != PERF_TYPE_TRACEPOINT) continue; ppath->next = tracepoint_id_to_path(pattrs[i].config); @@ -496,7 +496,7 @@ get_tracepoints_path(struct perf_counter_attr *pattrs, int nb_counters) return path.next; } -void read_tracing_data(struct perf_counter_attr *pattrs, int nb_counters) +void read_tracing_data(struct perf_event_attr *pattrs, int nb_events) { char buf[BUFSIZ]; struct tracepoint_path *tps; @@ -530,7 +530,7 @@ void read_tracing_data(struct perf_counter_attr *pattrs, int nb_counters) page_size = getpagesize(); write_or_die(&page_size, 4); - tps = get_tracepoints_path(pattrs, nb_counters); + tps = get_tracepoints_path(pattrs, nb_events); read_header_files(); read_ftrace_files(tps); diff --git a/tools/perf/util/trace-event.h b/tools/perf/util/trace-event.h index d35ebf1e29f..693f815c942 100644 --- a/tools/perf/util/trace-event.h +++ b/tools/perf/util/trace-event.h @@ -240,6 +240,6 @@ unsigned long long raw_field_value(struct event *event, const char *name, void *data); void *raw_field_ptr(struct event *event, const char *name, void *data); -void read_tracing_data(struct perf_counter_attr *pattrs, int nb_counters); +void read_tracing_data(struct perf_event_attr *pattrs, int nb_events); #endif /* _TRACE_EVENTS_H */ -- cgit v1.2.3-70-g09d2 From 57c0c15b5244320065374ad2c54f4fbec77a6428 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Mon, 21 Sep 2009 12:20:38 +0200 Subject: perf: Tidy up after the big rename - provide compatibility Kconfig entry for existing PERF_COUNTERS .config's - provide courtesy copy of old perf_counter.h, for user-space projects - small indentation fixups - fix up MAINTAINERS - fix small x86 printout fallout - fix up small PowerPC comment fallout (use 'counter' as in register) Reviewed-by: Arjan van de Ven Acked-by: Peter Zijlstra Cc: Mike Galbraith Cc: Paul Mackerras Cc: Benjamin Herrenschmidt Cc: Frederic Weisbecker LKML-Reference: Signed-off-by: Ingo Molnar --- MAINTAINERS | 2 +- arch/powerpc/include/asm/paca.h | 2 +- arch/powerpc/kernel/perf_event.c | 12 +- arch/x86/kernel/cpu/perf_event.c | 14 +- include/linux/perf_counter.h | 441 +++++++++++++++++++++++++++++++++++++++ include/linux/perf_event.h | 98 ++++----- init/Kconfig | 37 +++- kernel/perf_event.c | 4 +- 8 files changed, 534 insertions(+), 76 deletions(-) create mode 100644 include/linux/perf_counter.h (limited to 'include') diff --git a/MAINTAINERS b/MAINTAINERS index 43761a00e3f..751a307dc44 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -4000,7 +4000,7 @@ S: Maintained F: include/linux/delayacct.h F: kernel/delayacct.c -PERFORMANCE COUNTER SUBSYSTEM +PERFORMANCE EVENTS SUBSYSTEM M: Peter Zijlstra M: Paul Mackerras M: Ingo Molnar diff --git a/arch/powerpc/include/asm/paca.h b/arch/powerpc/include/asm/paca.h index 154f405b642..7d8514cecea 100644 --- a/arch/powerpc/include/asm/paca.h +++ b/arch/powerpc/include/asm/paca.h @@ -122,7 +122,7 @@ struct paca_struct { u8 soft_enabled; /* irq soft-enable flag */ u8 hard_enabled; /* set if irqs are enabled in MSR */ u8 io_sync; /* writel() needs spin_unlock sync */ - u8 perf_event_pending; /* PM interrupt while soft-disabled */ + u8 perf_event_pending; /* PM interrupt while soft-disabled */ /* Stuff for accurate time accounting */ u64 user_time; /* accumulated usermode TB ticks */ diff --git a/arch/powerpc/kernel/perf_event.c b/arch/powerpc/kernel/perf_event.c index c98321fcb45..197b7d95879 100644 --- a/arch/powerpc/kernel/perf_event.c +++ b/arch/powerpc/kernel/perf_event.c @@ -41,7 +41,7 @@ DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events); struct power_pmu *ppmu; /* - * Normally, to ignore kernel events we set the FCS (freeze events + * Normally, to ignore kernel events we set the FCS (freeze counters * in supervisor mode) bit in MMCR0, but if the kernel runs with the * hypervisor bit set in the MSR, or if we are running on a processor * where the hypervisor bit is forced to 1 (as on Apple G5 processors), @@ -159,7 +159,7 @@ void perf_event_print_debug(void) } /* - * Read one performance monitor event (PMC). + * Read one performance monitor counter (PMC). */ static unsigned long read_pmc(int idx) { @@ -409,7 +409,7 @@ static void power_pmu_read(struct perf_event *event) val = read_pmc(event->hw.idx); } while (atomic64_cmpxchg(&event->hw.prev_count, prev, val) != prev); - /* The events are only 32 bits wide */ + /* The counters are only 32 bits wide */ delta = (val - prev) & 0xfffffffful; atomic64_add(delta, &event->count); atomic64_sub(delta, &event->hw.period_left); @@ -543,7 +543,7 @@ void hw_perf_disable(void) } /* - * Set the 'freeze events' bit. + * Set the 'freeze counters' bit. * The barrier is to make sure the mtspr has been * executed and the PMU has frozen the events * before we return. @@ -1124,7 +1124,7 @@ const struct pmu *hw_perf_event_init(struct perf_event *event) } /* - * A event has overflowed; update its count and record + * A counter has overflowed; update its count and record * things if requested. Note that interrupts are hard-disabled * here so there is no possibility of being interrupted. */ @@ -1271,7 +1271,7 @@ static void perf_event_interrupt(struct pt_regs *regs) /* * Reset MMCR0 to its normal value. This will set PMXE and - * clear FC (freeze events) and PMAO (perf mon alert occurred) + * clear FC (freeze counters) and PMAO (perf mon alert occurred) * and thus allow interrupts to occur again. * XXX might want to use MSR.PM to keep the events frozen until * we get back out of this interrupt. diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c index 0d03629fb1a..a3c7adb06b7 100644 --- a/arch/x86/kernel/cpu/perf_event.c +++ b/arch/x86/kernel/cpu/perf_event.c @@ -2081,13 +2081,13 @@ void __init init_hw_perf_events(void) perf_events_lapic_init(); register_die_notifier(&perf_event_nmi_notifier); - pr_info("... version: %d\n", x86_pmu.version); - pr_info("... bit width: %d\n", x86_pmu.event_bits); - pr_info("... generic events: %d\n", x86_pmu.num_events); - pr_info("... value mask: %016Lx\n", x86_pmu.event_mask); - pr_info("... max period: %016Lx\n", x86_pmu.max_period); - pr_info("... fixed-purpose events: %d\n", x86_pmu.num_events_fixed); - pr_info("... event mask: %016Lx\n", perf_event_mask); + pr_info("... version: %d\n", x86_pmu.version); + pr_info("... bit width: %d\n", x86_pmu.event_bits); + pr_info("... generic registers: %d\n", x86_pmu.num_events); + pr_info("... value mask: %016Lx\n", x86_pmu.event_mask); + pr_info("... max period: %016Lx\n", x86_pmu.max_period); + pr_info("... fixed-purpose events: %d\n", x86_pmu.num_events_fixed); + pr_info("... event mask: %016Lx\n", perf_event_mask); } static inline void x86_pmu_read(struct perf_event *event) diff --git a/include/linux/perf_counter.h b/include/linux/perf_counter.h new file mode 100644 index 00000000000..368bd70f1d2 --- /dev/null +++ b/include/linux/perf_counter.h @@ -0,0 +1,441 @@ +/* + * NOTE: this file will be removed in a future kernel release, it is + * provided as a courtesy copy of user-space code that relies on the + * old (pre-rename) symbols and constants. + * + * Performance events: + * + * Copyright (C) 2008-2009, Thomas Gleixner + * Copyright (C) 2008-2009, Red Hat, Inc., Ingo Molnar + * Copyright (C) 2008-2009, Red Hat, Inc., Peter Zijlstra + * + * Data type definitions, declarations, prototypes. + * + * Started by: Thomas Gleixner and Ingo Molnar + * + * For licencing details see kernel-base/COPYING + */ +#ifndef _LINUX_PERF_COUNTER_H +#define _LINUX_PERF_COUNTER_H + +#include +#include +#include + +/* + * User-space ABI bits: + */ + +/* + * attr.type + */ +enum perf_type_id { + PERF_TYPE_HARDWARE = 0, + PERF_TYPE_SOFTWARE = 1, + PERF_TYPE_TRACEPOINT = 2, + PERF_TYPE_HW_CACHE = 3, + PERF_TYPE_RAW = 4, + + PERF_TYPE_MAX, /* non-ABI */ +}; + +/* + * Generalized performance counter event types, used by the + * attr.event_id parameter of the sys_perf_counter_open() + * syscall: + */ +enum perf_hw_id { + /* + * Common hardware events, generalized by the kernel: + */ + PERF_COUNT_HW_CPU_CYCLES = 0, + PERF_COUNT_HW_INSTRUCTIONS = 1, + PERF_COUNT_HW_CACHE_REFERENCES = 2, + PERF_COUNT_HW_CACHE_MISSES = 3, + PERF_COUNT_HW_BRANCH_INSTRUCTIONS = 4, + PERF_COUNT_HW_BRANCH_MISSES = 5, + PERF_COUNT_HW_BUS_CYCLES = 6, + + PERF_COUNT_HW_MAX, /* non-ABI */ +}; + +/* + * Generalized hardware cache counters: + * + * { L1-D, L1-I, LLC, ITLB, DTLB, BPU } x + * { read, write, prefetch } x + * { accesses, misses } + */ +enum perf_hw_cache_id { + PERF_COUNT_HW_CACHE_L1D = 0, + PERF_COUNT_HW_CACHE_L1I = 1, + PERF_COUNT_HW_CACHE_LL = 2, + PERF_COUNT_HW_CACHE_DTLB = 3, + PERF_COUNT_HW_CACHE_ITLB = 4, + PERF_COUNT_HW_CACHE_BPU = 5, + + PERF_COUNT_HW_CACHE_MAX, /* non-ABI */ +}; + +enum perf_hw_cache_op_id { + PERF_COUNT_HW_CACHE_OP_READ = 0, + PERF_COUNT_HW_CACHE_OP_WRITE = 1, + PERF_COUNT_HW_CACHE_OP_PREFETCH = 2, + + PERF_COUNT_HW_CACHE_OP_MAX, /* non-ABI */ +}; + +enum perf_hw_cache_op_result_id { + PERF_COUNT_HW_CACHE_RESULT_ACCESS = 0, + PERF_COUNT_HW_CACHE_RESULT_MISS = 1, + + PERF_COUNT_HW_CACHE_RESULT_MAX, /* non-ABI */ +}; + +/* + * Special "software" counters provided by the kernel, even if the hardware + * does not support performance counters. These counters measure various + * physical and sw events of the kernel (and allow the profiling of them as + * well): + */ +enum perf_sw_ids { + PERF_COUNT_SW_CPU_CLOCK = 0, + PERF_COUNT_SW_TASK_CLOCK = 1, + PERF_COUNT_SW_PAGE_FAULTS = 2, + PERF_COUNT_SW_CONTEXT_SWITCHES = 3, + PERF_COUNT_SW_CPU_MIGRATIONS = 4, + PERF_COUNT_SW_PAGE_FAULTS_MIN = 5, + PERF_COUNT_SW_PAGE_FAULTS_MAJ = 6, + + PERF_COUNT_SW_MAX, /* non-ABI */ +}; + +/* + * Bits that can be set in attr.sample_type to request information + * in the overflow packets. + */ +enum perf_counter_sample_format { + PERF_SAMPLE_IP = 1U << 0, + PERF_SAMPLE_TID = 1U << 1, + PERF_SAMPLE_TIME = 1U << 2, + PERF_SAMPLE_ADDR = 1U << 3, + PERF_SAMPLE_READ = 1U << 4, + PERF_SAMPLE_CALLCHAIN = 1U << 5, + PERF_SAMPLE_ID = 1U << 6, + PERF_SAMPLE_CPU = 1U << 7, + PERF_SAMPLE_PERIOD = 1U << 8, + PERF_SAMPLE_STREAM_ID = 1U << 9, + PERF_SAMPLE_RAW = 1U << 10, + + PERF_SAMPLE_MAX = 1U << 11, /* non-ABI */ +}; + +/* + * The format of the data returned by read() on a perf counter fd, + * as specified by attr.read_format: + * + * struct read_format { + * { u64 value; + * { u64 time_enabled; } && PERF_FORMAT_ENABLED + * { u64 time_running; } && PERF_FORMAT_RUNNING + * { u64 id; } && PERF_FORMAT_ID + * } && !PERF_FORMAT_GROUP + * + * { u64 nr; + * { u64 time_enabled; } && PERF_FORMAT_ENABLED + * { u64 time_running; } && PERF_FORMAT_RUNNING + * { u64 value; + * { u64 id; } && PERF_FORMAT_ID + * } cntr[nr]; + * } && PERF_FORMAT_GROUP + * }; + */ +enum perf_counter_read_format { + PERF_FORMAT_TOTAL_TIME_ENABLED = 1U << 0, + PERF_FORMAT_TOTAL_TIME_RUNNING = 1U << 1, + PERF_FORMAT_ID = 1U << 2, + PERF_FORMAT_GROUP = 1U << 3, + + PERF_FORMAT_MAX = 1U << 4, /* non-ABI */ +}; + +#define PERF_ATTR_SIZE_VER0 64 /* sizeof first published struct */ + +/* + * Hardware event to monitor via a performance monitoring counter: + */ +struct perf_counter_attr { + + /* + * Major type: hardware/software/tracepoint/etc. + */ + __u32 type; + + /* + * Size of the attr structure, for fwd/bwd compat. + */ + __u32 size; + + /* + * Type specific configuration information. + */ + __u64 config; + + union { + __u64 sample_period; + __u64 sample_freq; + }; + + __u64 sample_type; + __u64 read_format; + + __u64 disabled : 1, /* off by default */ + inherit : 1, /* children inherit it */ + pinned : 1, /* must always be on PMU */ + exclusive : 1, /* only group on PMU */ + exclude_user : 1, /* don't count user */ + exclude_kernel : 1, /* ditto kernel */ + exclude_hv : 1, /* ditto hypervisor */ + exclude_idle : 1, /* don't count when idle */ + mmap : 1, /* include mmap data */ + comm : 1, /* include comm data */ + freq : 1, /* use freq, not period */ + inherit_stat : 1, /* per task counts */ + enable_on_exec : 1, /* next exec enables */ + task : 1, /* trace fork/exit */ + watermark : 1, /* wakeup_watermark */ + + __reserved_1 : 49; + + union { + __u32 wakeup_events; /* wakeup every n events */ + __u32 wakeup_watermark; /* bytes before wakeup */ + }; + __u32 __reserved_2; + + __u64 __reserved_3; +}; + +/* + * Ioctls that can be done on a perf counter fd: + */ +#define PERF_COUNTER_IOC_ENABLE _IO ('$', 0) +#define PERF_COUNTER_IOC_DISABLE _IO ('$', 1) +#define PERF_COUNTER_IOC_REFRESH _IO ('$', 2) +#define PERF_COUNTER_IOC_RESET _IO ('$', 3) +#define PERF_COUNTER_IOC_PERIOD _IOW('$', 4, u64) +#define PERF_COUNTER_IOC_SET_OUTPUT _IO ('$', 5) + +enum perf_counter_ioc_flags { + PERF_IOC_FLAG_GROUP = 1U << 0, +}; + +/* + * Structure of the page that can be mapped via mmap + */ +struct perf_counter_mmap_page { + __u32 version; /* version number of this structure */ + __u32 compat_version; /* lowest version this is compat with */ + + /* + * Bits needed to read the hw counters in user-space. + * + * u32 seq; + * s64 count; + * + * do { + * seq = pc->lock; + * + * barrier() + * if (pc->index) { + * count = pmc_read(pc->index - 1); + * count += pc->offset; + * } else + * goto regular_read; + * + * barrier(); + * } while (pc->lock != seq); + * + * NOTE: for obvious reason this only works on self-monitoring + * processes. + */ + __u32 lock; /* seqlock for synchronization */ + __u32 index; /* hardware counter identifier */ + __s64 offset; /* add to hardware counter value */ + __u64 time_enabled; /* time counter active */ + __u64 time_running; /* time counter on cpu */ + + /* + * Hole for extension of the self monitor capabilities + */ + + __u64 __reserved[123]; /* align to 1k */ + + /* + * Control data for the mmap() data buffer. + * + * User-space reading the @data_head value should issue an rmb(), on + * SMP capable platforms, after reading this value -- see + * perf_counter_wakeup(). + * + * When the mapping is PROT_WRITE the @data_tail value should be + * written by userspace to reflect the last read data. In this case + * the kernel will not over-write unread data. + */ + __u64 data_head; /* head in the data section */ + __u64 data_tail; /* user-space written tail */ +}; + +#define PERF_EVENT_MISC_CPUMODE_MASK (3 << 0) +#define PERF_EVENT_MISC_CPUMODE_UNKNOWN (0 << 0) +#define PERF_EVENT_MISC_KERNEL (1 << 0) +#define PERF_EVENT_MISC_USER (2 << 0) +#define PERF_EVENT_MISC_HYPERVISOR (3 << 0) + +struct perf_event_header { + __u32 type; + __u16 misc; + __u16 size; +}; + +enum perf_event_type { + + /* + * The MMAP events record the PROT_EXEC mappings so that we can + * correlate userspace IPs to code. They have the following structure: + * + * struct { + * struct perf_event_header header; + * + * u32 pid, tid; + * u64 addr; + * u64 len; + * u64 pgoff; + * char filename[]; + * }; + */ + PERF_EVENT_MMAP = 1, + + /* + * struct { + * struct perf_event_header header; + * u64 id; + * u64 lost; + * }; + */ + PERF_EVENT_LOST = 2, + + /* + * struct { + * struct perf_event_header header; + * + * u32 pid, tid; + * char comm[]; + * }; + */ + PERF_EVENT_COMM = 3, + + /* + * struct { + * struct perf_event_header header; + * u32 pid, ppid; + * u32 tid, ptid; + * u64 time; + * }; + */ + PERF_EVENT_EXIT = 4, + + /* + * struct { + * struct perf_event_header header; + * u64 time; + * u64 id; + * u64 stream_id; + * }; + */ + PERF_EVENT_THROTTLE = 5, + PERF_EVENT_UNTHROTTLE = 6, + + /* + * struct { + * struct perf_event_header header; + * u32 pid, ppid; + * u32 tid, ptid; + * { u64 time; } && PERF_SAMPLE_TIME + * }; + */ + PERF_EVENT_FORK = 7, + + /* + * struct { + * struct perf_event_header header; + * u32 pid, tid; + * + * struct read_format values; + * }; + */ + PERF_EVENT_READ = 8, + + /* + * struct { + * struct perf_event_header header; + * + * { u64 ip; } && PERF_SAMPLE_IP + * { u32 pid, tid; } && PERF_SAMPLE_TID + * { u64 time; } && PERF_SAMPLE_TIME + * { u64 addr; } && PERF_SAMPLE_ADDR + * { u64 id; } && PERF_SAMPLE_ID + * { u64 stream_id;} && PERF_SAMPLE_STREAM_ID + * { u32 cpu, res; } && PERF_SAMPLE_CPU + * { u64 period; } && PERF_SAMPLE_PERIOD + * + * { struct read_format values; } && PERF_SAMPLE_READ + * + * { u64 nr, + * u64 ips[nr]; } && PERF_SAMPLE_CALLCHAIN + * + * # + * # The RAW record below is opaque data wrt the ABI + * # + * # That is, the ABI doesn't make any promises wrt to + * # the stability of its content, it may vary depending + * # on event, hardware, kernel version and phase of + * # the moon. + * # + * # In other words, PERF_SAMPLE_RAW contents are not an ABI. + * # + * + * { u32 size; + * char data[size];}&& PERF_SAMPLE_RAW + * }; + */ + PERF_EVENT_SAMPLE = 9, + + PERF_EVENT_MAX, /* non-ABI */ +}; + +enum perf_callchain_context { + PERF_CONTEXT_HV = (__u64)-32, + PERF_CONTEXT_KERNEL = (__u64)-128, + PERF_CONTEXT_USER = (__u64)-512, + + PERF_CONTEXT_GUEST = (__u64)-2048, + PERF_CONTEXT_GUEST_KERNEL = (__u64)-2176, + PERF_CONTEXT_GUEST_USER = (__u64)-2560, + + PERF_CONTEXT_MAX = (__u64)-4095, +}; + +#define PERF_FLAG_FD_NO_GROUP (1U << 0) +#define PERF_FLAG_FD_OUTPUT (1U << 1) + +/* + * In case some app still references the old symbols: + */ + +#define __NR_perf_counter_open __NR_perf_event_open + +#define PR_TASK_PERF_COUNTERS_DISABLE PR_TASK_PERF_EVENTS_DISABLE +#define PR_TASK_PERF_COUNTERS_ENABLE PR_TASK_PERF_EVENTS_ENABLE + +#endif /* _LINUX_PERF_COUNTER_H */ diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h index ae9d9ed6df2..acefaf71e6d 100644 --- a/include/linux/perf_event.h +++ b/include/linux/perf_event.h @@ -1,15 +1,15 @@ /* - * Performance events: + * Performance events: * * Copyright (C) 2008-2009, Thomas Gleixner * Copyright (C) 2008-2009, Red Hat, Inc., Ingo Molnar * Copyright (C) 2008-2009, Red Hat, Inc., Peter Zijlstra * - * Data type definitions, declarations, prototypes. + * Data type definitions, declarations, prototypes. * * Started by: Thomas Gleixner and Ingo Molnar * - * For licencing details see kernel-base/COPYING + * For licencing details see kernel-base/COPYING */ #ifndef _LINUX_PERF_EVENT_H #define _LINUX_PERF_EVENT_H @@ -131,19 +131,19 @@ enum perf_event_sample_format { * as specified by attr.read_format: * * struct read_format { - * { u64 value; - * { u64 time_enabled; } && PERF_FORMAT_ENABLED - * { u64 time_running; } && PERF_FORMAT_RUNNING - * { u64 id; } && PERF_FORMAT_ID - * } && !PERF_FORMAT_GROUP + * { u64 value; + * { u64 time_enabled; } && PERF_FORMAT_ENABLED + * { u64 time_running; } && PERF_FORMAT_RUNNING + * { u64 id; } && PERF_FORMAT_ID + * } && !PERF_FORMAT_GROUP * - * { u64 nr; - * { u64 time_enabled; } && PERF_FORMAT_ENABLED - * { u64 time_running; } && PERF_FORMAT_RUNNING - * { u64 value; - * { u64 id; } && PERF_FORMAT_ID - * } cntr[nr]; - * } && PERF_FORMAT_GROUP + * { u64 nr; + * { u64 time_enabled; } && PERF_FORMAT_ENABLED + * { u64 time_running; } && PERF_FORMAT_RUNNING + * { u64 value; + * { u64 id; } && PERF_FORMAT_ID + * } cntr[nr]; + * } && PERF_FORMAT_GROUP * }; */ enum perf_event_read_format { @@ -152,7 +152,7 @@ enum perf_event_read_format { PERF_FORMAT_ID = 1U << 2, PERF_FORMAT_GROUP = 1U << 3, - PERF_FORMAT_MAX = 1U << 4, /* non-ABI */ + PERF_FORMAT_MAX = 1U << 4, /* non-ABI */ }; #define PERF_ATTR_SIZE_VER0 64 /* sizeof first published struct */ @@ -216,8 +216,8 @@ struct perf_event_attr { * Ioctls that can be done on a perf event fd: */ #define PERF_EVENT_IOC_ENABLE _IO ('$', 0) -#define PERF_EVENT_IOC_DISABLE _IO ('$', 1) -#define PERF_EVENT_IOC_REFRESH _IO ('$', 2) +#define PERF_EVENT_IOC_DISABLE _IO ('$', 1) +#define PERF_EVENT_IOC_REFRESH _IO ('$', 2) #define PERF_EVENT_IOC_RESET _IO ('$', 3) #define PERF_EVENT_IOC_PERIOD _IOW('$', 4, u64) #define PERF_EVENT_IOC_SET_OUTPUT _IO ('$', 5) @@ -314,9 +314,9 @@ enum perf_event_type { /* * struct { - * struct perf_event_header header; - * u64 id; - * u64 lost; + * struct perf_event_header header; + * u64 id; + * u64 lost; * }; */ PERF_RECORD_LOST = 2, @@ -383,23 +383,23 @@ enum perf_event_type { * { u64 id; } && PERF_SAMPLE_ID * { u64 stream_id;} && PERF_SAMPLE_STREAM_ID * { u32 cpu, res; } && PERF_SAMPLE_CPU - * { u64 period; } && PERF_SAMPLE_PERIOD + * { u64 period; } && PERF_SAMPLE_PERIOD * * { struct read_format values; } && PERF_SAMPLE_READ * * { u64 nr, * u64 ips[nr]; } && PERF_SAMPLE_CALLCHAIN * - * # - * # The RAW record below is opaque data wrt the ABI - * # - * # That is, the ABI doesn't make any promises wrt to - * # the stability of its content, it may vary depending - * # on event_id, hardware, kernel version and phase of - * # the moon. - * # - * # In other words, PERF_SAMPLE_RAW contents are not an ABI. - * # + * # + * # The RAW record below is opaque data wrt the ABI + * # + * # That is, the ABI doesn't make any promises wrt to + * # the stability of its content, it may vary depending + * # on event, hardware, kernel version and phase of + * # the moon. + * # + * # In other words, PERF_SAMPLE_RAW contents are not an ABI. + * # * * { u32 size; * char data[size];}&& PERF_SAMPLE_RAW @@ -503,10 +503,10 @@ struct pmu { * enum perf_event_active_state - the states of a event */ enum perf_event_active_state { - PERF_EVENT_STATE_ERROR = -2, + PERF_EVENT_STATE_ERROR = -2, PERF_EVENT_STATE_OFF = -1, PERF_EVENT_STATE_INACTIVE = 0, - PERF_EVENT_STATE_ACTIVE = 1, + PERF_EVENT_STATE_ACTIVE = 1, }; struct file; @@ -529,7 +529,7 @@ struct perf_mmap_data { long watermark; /* wakeup watermark */ - struct perf_event_mmap_page *user_page; + struct perf_event_mmap_page *user_page; void *data_pages[0]; }; @@ -694,14 +694,14 @@ struct perf_cpu_context { }; struct perf_output_handle { - struct perf_event *event; - struct perf_mmap_data *data; - unsigned long head; - unsigned long offset; - int nmi; - int sample; - int locked; - unsigned long flags; + struct perf_event *event; + struct perf_mmap_data *data; + unsigned long head; + unsigned long offset; + int nmi; + int sample; + int locked; + unsigned long flags; }; #ifdef CONFIG_PERF_EVENTS @@ -829,22 +829,22 @@ static inline void perf_event_task_sched_out(struct task_struct *task, struct task_struct *next, int cpu) { } static inline void -perf_event_task_tick(struct task_struct *task, int cpu) { } +perf_event_task_tick(struct task_struct *task, int cpu) { } static inline int perf_event_init_task(struct task_struct *child) { return 0; } static inline void perf_event_exit_task(struct task_struct *child) { } static inline void perf_event_free_task(struct task_struct *task) { } -static inline void perf_event_do_pending(void) { } -static inline void perf_event_print_debug(void) { } +static inline void perf_event_do_pending(void) { } +static inline void perf_event_print_debug(void) { } static inline void perf_disable(void) { } static inline void perf_enable(void) { } -static inline int perf_event_task_disable(void) { return -EINVAL; } -static inline int perf_event_task_enable(void) { return -EINVAL; } +static inline int perf_event_task_disable(void) { return -EINVAL; } +static inline int perf_event_task_enable(void) { return -EINVAL; } static inline void perf_sw_event(u32 event_id, u64 nr, int nmi, struct pt_regs *regs, u64 addr) { } -static inline void perf_event_mmap(struct vm_area_struct *vma) { } +static inline void perf_event_mmap(struct vm_area_struct *vma) { } static inline void perf_event_comm(struct task_struct *tsk) { } static inline void perf_event_fork(struct task_struct *tsk) { } static inline void perf_event_init(void) { } diff --git a/init/Kconfig b/init/Kconfig index cfdf5c32280..706728be312 100644 --- a/init/Kconfig +++ b/init/Kconfig @@ -920,26 +920,31 @@ config HAVE_PERF_EVENTS help See tools/perf/design.txt for details. -menu "Performance Counters" +menu "Kernel Performance Events And Counters" config PERF_EVENTS - bool "Kernel Performance Counters" - default y if PROFILING + bool "Kernel performance events and counters" + default y if (PROFILING || PERF_COUNTERS) depends on HAVE_PERF_EVENTS select ANON_INODES help - Enable kernel support for performance counter hardware. + Enable kernel support for various performance events provided + by software and hardware. - Performance counters are special hardware registers available - on most modern CPUs. These registers count the number of certain + Software events are supported either build-in or via the + use of generic tracepoints. + + Most modern CPUs support performance events via performance + counter registers. These registers count the number of certain types of hw events: such as instructions executed, cachemisses suffered, or branches mis-predicted - without slowing down the kernel or applications. These registers can also trigger interrupts when a threshold number of events have passed - and can thus be used to profile the code that runs on that CPU. - The Linux Performance Counter subsystem provides an abstraction of - these hardware capabilities, available via a system call. It + The Linux Performance Event subsystem provides an abstraction of + these software and hardware cevent apabilities, available via a + system call and used by the "perf" utility in tools/perf/. It provides per task and per CPU counters, and it provides event capabilities on top of those. @@ -950,14 +955,26 @@ config EVENT_PROFILE depends on PERF_EVENTS && EVENT_TRACING default y help - Allow the use of tracepoints as software performance counters. + Allow the use of tracepoints as software performance events. - When this is enabled, you can create perf counters based on + When this is enabled, you can create perf events based on tracepoints using PERF_TYPE_TRACEPOINT and the tracepoint ID found in debugfs://tracing/events/*/*/id. (The -e/--events option to the perf tool can parse and interpret symbolic tracepoints, in the subsystem:tracepoint_name format.) +config PERF_COUNTERS + bool "Kernel performance counters (old config option)" + depends on HAVE_PERF_EVENTS + help + This config has been obsoleted by the PERF_EVENTS + config option - please see that one for details. + + It has no effect on the kernel whether you enable + it or not, it is a compatibility placeholder. + + Say N if unsure. + endmenu config VM_EVENT_COUNTERS diff --git a/kernel/perf_event.c b/kernel/perf_event.c index 6e8b99a04e1..76ac4db405e 100644 --- a/kernel/perf_event.c +++ b/kernel/perf_event.c @@ -1,12 +1,12 @@ /* - * Performance event core code + * Performance events core code: * * Copyright (C) 2008 Thomas Gleixner * Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar * Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra * Copyright © 2009 Paul Mackerras, IBM Corp. * - * For licensing details see kernel-base/COPYING + * For licensing details see kernel-base/COPYING */ #include -- cgit v1.2.3-70-g09d2 From bcf56442429a15bdd6e1d81a9d4c89f93a44fdf7 Mon Sep 17 00:00:00 2001 From: GeunSik Lim Date: Tue, 16 Jun 2009 10:26:25 +0200 Subject: trivial: change address of the libcap source. This is patch to change ftp site of the libcap source. "ftp://linux.kernel.org" address does not exist. Signed-off-by: GeunSik Lim Signed-off-by: Jiri Kosina --- include/linux/capability.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include') diff --git a/include/linux/capability.h b/include/linux/capability.h index c3021105edc..c8f2a5f70ed 100644 --- a/include/linux/capability.h +++ b/include/linux/capability.h @@ -7,7 +7,7 @@ * * See here for the libcap library ("POSIX draft" compliance): * - * ftp://linux.kernel.org/pub/linux/libs/security/linux-privs/kernel-2.6/ + * ftp://www.kernel.org/pub/linux/libs/security/linux-privs/kernel-2.6/ */ #ifndef _LINUX_CAPABILITY_H -- cgit v1.2.3-70-g09d2 From 47a0dfaad9eb82b16193477cf99c2462af03c329 Mon Sep 17 00:00:00 2001 From: Ori Avtalion Date: Tue, 16 Jun 2009 12:17:53 +0300 Subject: trivial: fix typo in namei.h comment Signed-off-by: Ori Avtalion Signed-off-by: Jiri Kosina --- include/linux/namei.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include') diff --git a/include/linux/namei.h b/include/linux/namei.h index d870ae2faed..ec0f607b364 100644 --- a/include/linux/namei.h +++ b/include/linux/namei.h @@ -40,7 +40,7 @@ enum {LAST_NORM, LAST_ROOT, LAST_DOT, LAST_DOTDOT, LAST_BIND}; * - follow links at the end * - require a directory * - ending slashes ok even for nonexistent files - * - internal "there are more path compnents" flag + * - internal "there are more path components" flag * - locked when lookup done with dcache_lock held * - dentry cache is untrusted; force a real lookup */ -- cgit v1.2.3-70-g09d2 From b9049df5a0e7f35456c06b949b08b898b9c2e7bc Mon Sep 17 00:00:00 2001 From: Dmitri Vorobiev Date: Tue, 23 Jun 2009 12:09:29 +0200 Subject: Change "useing" -> "using". Signed-off-by: Dmitri Vorobiev Signed-off-by: Jiri Kosina --- include/linux/workqueue.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include') diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h index 6273fa97b52..7ef0c7b94f3 100644 --- a/include/linux/workqueue.h +++ b/include/linux/workqueue.h @@ -94,7 +94,7 @@ struct execute_work { /* * initialize all of a work item in one go * - * NOTE! No point in using "atomic_long_set()": useing a direct + * NOTE! No point in using "atomic_long_set()": using a direct * assignment of the work data initializer allows the compiler * to generate better code. */ -- cgit v1.2.3-70-g09d2 From 411c94038594b2a3fd123d09bdec3fe2500e383d Mon Sep 17 00:00:00 2001 From: Anand Gadiyar Date: Tue, 7 Jul 2009 15:24:23 +0530 Subject: trivial: fix typo "for for" in multiple files trivial: fix typo "for for" in multiple files Signed-off-by: Anand Gadiyar Signed-off-by: Jiri Kosina --- Documentation/filesystems/nfsroot.txt | 2 +- Documentation/powerpc/dts-bindings/marvell.txt | 2 +- arch/blackfin/mach-bf538/include/mach/defBF539.h | 2 +- arch/powerpc/kernel/udbg_16550.c | 2 +- arch/powerpc/platforms/powermac/udbg_scc.c | 2 +- drivers/edac/edac_core.h | 2 +- drivers/infiniband/hw/ipath/ipath_iba6110.c | 2 +- drivers/isdn/i4l/isdn_common.c | 4 ++-- drivers/md/md.h | 2 +- drivers/net/bnx2x_reg.h | 2 +- drivers/net/rionet.c | 2 +- drivers/usb/host/ehci-pci.c | 2 +- drivers/usb/host/ehci.h | 2 +- fs/xfs/xfs_fs.h | 2 +- include/linux/usb.h | 2 +- 15 files changed, 16 insertions(+), 16 deletions(-) (limited to 'include') diff --git a/Documentation/filesystems/nfsroot.txt b/Documentation/filesystems/nfsroot.txt index 68baddf3c3e..3ba0b945aaf 100644 --- a/Documentation/filesystems/nfsroot.txt +++ b/Documentation/filesystems/nfsroot.txt @@ -105,7 +105,7 @@ ip=:::::: the client address and this parameter is NOT empty only replies from the specified server are accepted. - Only required for for NFS root. That is autoconfiguration + Only required for NFS root. That is autoconfiguration will not be triggered if it is missing and NFS root is not in operation. diff --git a/Documentation/powerpc/dts-bindings/marvell.txt b/Documentation/powerpc/dts-bindings/marvell.txt index 3708a2fd474..f1533d91953 100644 --- a/Documentation/powerpc/dts-bindings/marvell.txt +++ b/Documentation/powerpc/dts-bindings/marvell.txt @@ -32,7 +32,7 @@ prefixed with the string "marvell,", for Marvell Technology Group Ltd. devices. This field represents the number of cells needed to represent the address of the memory-mapped registers of devices within the system controller chip. - - #size-cells : Size representation for for the memory-mapped + - #size-cells : Size representation for the memory-mapped registers within the system controller chip. - #interrupt-cells : Defines the width of cells used to represent interrupts. diff --git a/arch/blackfin/mach-bf538/include/mach/defBF539.h b/arch/blackfin/mach-bf538/include/mach/defBF539.h index bdc330cd0e1..1c58914a874 100644 --- a/arch/blackfin/mach-bf538/include/mach/defBF539.h +++ b/arch/blackfin/mach-bf538/include/mach/defBF539.h @@ -2325,7 +2325,7 @@ #define AMBEN_B0_B1 0x0004 /* Enable Asynchronous Memory Banks 0 & 1 only */ #define AMBEN_B0_B1_B2 0x0006 /* Enable Asynchronous Memory Banks 0, 1, and 2 */ #define AMBEN_ALL 0x0008 /* Enable Asynchronous Memory Banks (all) 0, 1, 2, and 3 */ -#define CDPRIO 0x0100 /* DMA has priority over core for for external accesses */ +#define CDPRIO 0x0100 /* DMA has priority over core for external accesses */ /* EBIU_AMGCTL Bit Positions */ #define AMCKEN_P 0x0000 /* Enable CLKOUT */ diff --git a/arch/powerpc/kernel/udbg_16550.c b/arch/powerpc/kernel/udbg_16550.c index acb74a17bbb..b4b167b3364 100644 --- a/arch/powerpc/kernel/udbg_16550.c +++ b/arch/powerpc/kernel/udbg_16550.c @@ -1,5 +1,5 @@ /* - * udbg for for NS16550 compatable serial ports + * udbg for NS16550 compatable serial ports * * Copyright (C) 2001-2005 PPC 64 Team, IBM Corp * diff --git a/arch/powerpc/platforms/powermac/udbg_scc.c b/arch/powerpc/platforms/powermac/udbg_scc.c index 572771fd846..9490157da62 100644 --- a/arch/powerpc/platforms/powermac/udbg_scc.c +++ b/arch/powerpc/platforms/powermac/udbg_scc.c @@ -1,5 +1,5 @@ /* - * udbg for for zilog scc ports as found on Apple PowerMacs + * udbg for zilog scc ports as found on Apple PowerMacs * * Copyright (C) 2001-2005 PPC 64 Team, IBM Corp * diff --git a/drivers/edac/edac_core.h b/drivers/edac/edac_core.h index 871c13b4c14..12f355cafdb 100644 --- a/drivers/edac/edac_core.h +++ b/drivers/edac/edac_core.h @@ -286,7 +286,7 @@ enum scrub_type { * is irrespective of the memory devices being mounted * on both sides of the memory stick. * - * Socket set: All of the memory sticks that are required for for + * Socket set: All of the memory sticks that are required for * a single memory access or all of the memory sticks * spanned by a chip-select row. A single socket set * has two chip-select rows and if double-sided sticks diff --git a/drivers/infiniband/hw/ipath/ipath_iba6110.c b/drivers/infiniband/hw/ipath/ipath_iba6110.c index 02831ad070b..4bd39c8af80 100644 --- a/drivers/infiniband/hw/ipath/ipath_iba6110.c +++ b/drivers/infiniband/hw/ipath/ipath_iba6110.c @@ -809,7 +809,7 @@ static int ipath_setup_ht_reset(struct ipath_devdata *dd) * errors. We only bother to do this at load time, because it's OK if * it happened before we were loaded (first time after boot/reset), * but any time after that, it's fatal anyway. Also need to not check - * for for upper byte errors if we are in 8 bit mode, so figure out + * for upper byte errors if we are in 8 bit mode, so figure out * our width. For now, at least, also complain if it's 8 bit. */ static void slave_or_pri_blk(struct ipath_devdata *dd, struct pci_dev *pdev, diff --git a/drivers/isdn/i4l/isdn_common.c b/drivers/isdn/i4l/isdn_common.c index 7188c59a76f..adb1e8c36b4 100644 --- a/drivers/isdn/i4l/isdn_common.c +++ b/drivers/isdn/i4l/isdn_common.c @@ -761,7 +761,7 @@ isdn_getnum(char **p) * Be aware that this is not an atomic operation when sleep != 0, even though * interrupts are turned off! Well, like that we are currently only called * on behalf of a read system call on raw device files (which are documented - * to be dangerous and for for debugging purpose only). The inode semaphore + * to be dangerous and for debugging purpose only). The inode semaphore * takes care that this is not called for the same minor device number while * we are sleeping, but access is not serialized against simultaneous read() * from the corresponding ttyI device. Can other ugly events, like changes @@ -873,7 +873,7 @@ isdn_readbchan(int di, int channel, u_char * buf, u_char * fp, int len, wait_que * Be aware that this is not an atomic operation when sleep != 0, even though * interrupts are turned off! Well, like that we are currently only called * on behalf of a read system call on raw device files (which are documented - * to be dangerous and for for debugging purpose only). The inode semaphore + * to be dangerous and for debugging purpose only). The inode semaphore * takes care that this is not called for the same minor device number while * we are sleeping, but access is not serialized against simultaneous read() * from the corresponding ttyI device. Can other ugly events, like changes diff --git a/drivers/md/md.h b/drivers/md/md.h index f8fc188bc76..f55d2ff9513 100644 --- a/drivers/md/md.h +++ b/drivers/md/md.h @@ -201,7 +201,7 @@ struct mddev_s * INTR: resync needs to be aborted for some reason * DONE: thread is done and is waiting to be reaped * REQUEST: user-space has requested a sync (used with SYNC) - * CHECK: user-space request for for check-only, no repair + * CHECK: user-space request for check-only, no repair * RESHAPE: A reshape is happening * * If neither SYNC or RESHAPE are set, then it is a recovery. diff --git a/drivers/net/bnx2x_reg.h b/drivers/net/bnx2x_reg.h index 0695be14cf9..aa76cbada5e 100644 --- a/drivers/net/bnx2x_reg.h +++ b/drivers/net/bnx2x_reg.h @@ -3122,7 +3122,7 @@ The fields are:[4:0] - tail pointer; [10:5] - Link List size; 15:11] - header pointer. */ #define TCM_REG_XX_TABLE 0x50240 -/* [RW 4] Load value for for cfc ac credit cnt. */ +/* [RW 4] Load value for cfc ac credit cnt. */ #define TM_REG_CFC_AC_CRDCNT_VAL 0x164208 /* [RW 4] Load value for cfc cld credit cnt. */ #define TM_REG_CFC_CLD_CRDCNT_VAL 0x164210 diff --git a/drivers/net/rionet.c b/drivers/net/rionet.c index bc98e7f69ee..ede937ee50c 100644 --- a/drivers/net/rionet.c +++ b/drivers/net/rionet.c @@ -72,7 +72,7 @@ static int rionet_check = 0; static int rionet_capable = 1; /* - * This is a fast lookup table for for translating TX + * This is a fast lookup table for translating TX * Ethernet packets into a destination RIO device. It * could be made into a hash table to save memory depending * on system trade-offs. diff --git a/drivers/usb/host/ehci-pci.c b/drivers/usb/host/ehci-pci.c index c2f1b7df918..b5b83c43898 100644 --- a/drivers/usb/host/ehci-pci.c +++ b/drivers/usb/host/ehci-pci.c @@ -242,7 +242,7 @@ static int ehci_pci_setup(struct usb_hcd *hcd) * System suspend currently expects to be able to suspend the entire * device tree, device-at-a-time. If we failed selective suspend * reports, system suspend would fail; so the root hub code must claim - * success. That's lying to usbcore, and it matters for for runtime + * success. That's lying to usbcore, and it matters for runtime * PM scenarios with selective suspend and remote wakeup... */ if (ehci->no_selective_suspend && device_can_wakeup(&pdev->dev)) diff --git a/drivers/usb/host/ehci.h b/drivers/usb/host/ehci.h index 2bfff30f470..48b9e889a18 100644 --- a/drivers/usb/host/ehci.h +++ b/drivers/usb/host/ehci.h @@ -37,7 +37,7 @@ typedef __u16 __bitwise __hc16; #define __hc16 __le16 #endif -/* statistics can be kept for for tuning/monitoring */ +/* statistics can be kept for tuning/monitoring */ struct ehci_stats { /* irq usage */ unsigned long normal; diff --git a/fs/xfs/xfs_fs.h b/fs/xfs/xfs_fs.h index c4ea51b55dc..f52ac276277 100644 --- a/fs/xfs/xfs_fs.h +++ b/fs/xfs/xfs_fs.h @@ -117,7 +117,7 @@ struct getbmapx { #define BMV_IF_VALID \ (BMV_IF_ATTRFORK|BMV_IF_NO_DMAPI_READ|BMV_IF_PREALLOC|BMV_IF_DELALLOC) -/* bmv_oflags values - returned for for each non-header segment */ +/* bmv_oflags values - returned for each non-header segment */ #define BMV_OF_PREALLOC 0x1 /* segment = unwritten pre-allocation */ #define BMV_OF_DELALLOC 0x2 /* segment = delayed allocation */ #define BMV_OF_LAST 0x4 /* segment is the last in the file */ diff --git a/include/linux/usb.h b/include/linux/usb.h index a8fe05f224e..19fabc487be 100644 --- a/include/linux/usb.h +++ b/include/linux/usb.h @@ -1071,7 +1071,7 @@ typedef void (*usb_complete_t)(struct urb *); * @start_frame: Returns the initial frame for isochronous transfers. * @number_of_packets: Lists the number of ISO transfer buffers. * @interval: Specifies the polling interval for interrupt or isochronous - * transfers. The units are frames (milliseconds) for for full and low + * transfers. The units are frames (milliseconds) for full and low * speed devices, and microframes (1/8 millisecond) for highspeed ones. * @error_count: Returns the number of ISO transfers that reported errors. * @context: For use in completion functions. This normally points to -- cgit v1.2.3-70-g09d2 From fd589a8f0a13f53a2dd580b1fe170633cf6b095f Mon Sep 17 00:00:00 2001 From: Anand Gadiyar Date: Thu, 16 Jul 2009 17:13:03 +0200 Subject: trivial: fix typo "to to" in multiple files Signed-off-by: Anand Gadiyar Signed-off-by: Jiri Kosina --- Documentation/hwmon/pc87427 | 2 +- Documentation/networking/regulatory.txt | 2 +- Documentation/scsi/scsi_fc_transport.txt | 2 +- arch/ia64/ia32/sys_ia32.c | 2 +- arch/um/include/shared/ptrace_user.h | 2 +- drivers/char/agp/uninorth-agp.c | 2 +- drivers/gpu/drm/mga/mga_state.c | 4 ++-- drivers/lguest/page_tables.c | 2 +- drivers/media/video/gspca/m5602/m5602_core.c | 2 +- drivers/mmc/host/mxcmmc.c | 2 +- drivers/mtd/maps/ixp2000.c | 2 +- drivers/mtd/ubi/eba.c | 2 +- drivers/mtd/ubi/ubi.h | 2 +- drivers/net/bonding/bond_3ad.c | 2 +- drivers/net/e1000/e1000_hw.c | 2 +- drivers/net/wireless/zd1211rw/zd_chip.c | 2 +- drivers/scsi/megaraid/megaraid_sas.c | 2 +- drivers/scsi/qla4xxx/ql4_os.c | 4 ++-- drivers/staging/rt2860/rtmp.h | 2 +- drivers/usb/serial/cypress_m8.h | 2 +- drivers/usb/serial/io_edgeport.c | 2 +- drivers/usb/serial/kl5kusb105.c | 2 +- fs/ext4/inode.c | 2 +- fs/gfs2/rgrp.c | 2 +- fs/ntfs/layout.h | 2 +- include/acpi/actypes.h | 2 +- include/acpi/platform/acgcc.h | 2 +- include/rdma/ib_cm.h | 2 +- kernel/tracepoint.c | 2 +- lib/zlib_deflate/deflate.c | 4 ++-- net/rxrpc/ar-call.c | 2 +- net/sched/sch_hfsc.c | 2 +- 32 files changed, 35 insertions(+), 35 deletions(-) (limited to 'include') diff --git a/Documentation/hwmon/pc87427 b/Documentation/hwmon/pc87427 index d1ebbe510f3..db5cc1227a8 100644 --- a/Documentation/hwmon/pc87427 +++ b/Documentation/hwmon/pc87427 @@ -34,5 +34,5 @@ Fan rotation speeds are reported as 14-bit values from a gated clock signal. Speeds down to 83 RPM can be measured. An alarm is triggered if the rotation speed drops below a programmable -limit. Another alarm is triggered if the speed is too low to to be measured +limit. Another alarm is triggered if the speed is too low to be measured (including stalled or missing fan). diff --git a/Documentation/networking/regulatory.txt b/Documentation/networking/regulatory.txt index eaa1a25946c..ee31369e9e5 100644 --- a/Documentation/networking/regulatory.txt +++ b/Documentation/networking/regulatory.txt @@ -96,7 +96,7 @@ Example code - drivers hinting an alpha2: This example comes from the zd1211rw device driver. You can start by having a mapping of your device's EEPROM country/regulatory -domain value to to a specific alpha2 as follows: +domain value to a specific alpha2 as follows: static struct zd_reg_alpha2_map reg_alpha2_map[] = { { ZD_REGDOMAIN_FCC, "US" }, diff --git a/Documentation/scsi/scsi_fc_transport.txt b/Documentation/scsi/scsi_fc_transport.txt index d7f181701dc..aec6549ab09 100644 --- a/Documentation/scsi/scsi_fc_transport.txt +++ b/Documentation/scsi/scsi_fc_transport.txt @@ -378,7 +378,7 @@ Vport Disable/Enable: int vport_disable(struct fc_vport *vport, bool disable) where: - vport: Is vport to to be enabled or disabled + vport: Is vport to be enabled or disabled disable: If "true", the vport is to be disabled. If "false", the vport is to be enabled. diff --git a/arch/ia64/ia32/sys_ia32.c b/arch/ia64/ia32/sys_ia32.c index 16ef61a91d9..625ed8f76fc 100644 --- a/arch/ia64/ia32/sys_ia32.c +++ b/arch/ia64/ia32/sys_ia32.c @@ -1270,7 +1270,7 @@ putreg (struct task_struct *child, int regno, unsigned int value) case PT_CS: if (value != __USER_CS) printk(KERN_ERR - "ia32.putreg: attempt to to set invalid segment register %d = %x\n", + "ia32.putreg: attempt to set invalid segment register %d = %x\n", regno, value); break; default: diff --git a/arch/um/include/shared/ptrace_user.h b/arch/um/include/shared/ptrace_user.h index 4bce6e01288..7fd8539bc19 100644 --- a/arch/um/include/shared/ptrace_user.h +++ b/arch/um/include/shared/ptrace_user.h @@ -29,7 +29,7 @@ extern int ptrace_setregs(long pid, unsigned long *regs_in); * recompilation. So, we use PTRACE_OLDSETOPTIONS in UML. * We also want to be able to build the kernel on 2.4, which doesn't * have PTRACE_OLDSETOPTIONS. So, if it is missing, we declare - * PTRACE_OLDSETOPTIONS to to be the same as PTRACE_SETOPTIONS. + * PTRACE_OLDSETOPTIONS to be the same as PTRACE_SETOPTIONS. * * On architectures, that start to support PTRACE_O_TRACESYSGOOD on * linux 2.6, PTRACE_OLDSETOPTIONS never is defined, and also isn't diff --git a/drivers/char/agp/uninorth-agp.c b/drivers/char/agp/uninorth-agp.c index 20ef1bf5e72..703959eba45 100644 --- a/drivers/char/agp/uninorth-agp.c +++ b/drivers/char/agp/uninorth-agp.c @@ -270,7 +270,7 @@ static void uninorth_agp_enable(struct agp_bridge_data *bridge, u32 mode) if ((uninorth_rev >= 0x30) && (uninorth_rev <= 0x33)) { /* - * We need to to set REQ_DEPTH to 7 for U3 versions 1.0, 2.1, + * We need to set REQ_DEPTH to 7 for U3 versions 1.0, 2.1, * 2.2 and 2.3, Darwin do so. */ if ((command >> AGPSTAT_RQ_DEPTH_SHIFT) > 7) diff --git a/drivers/gpu/drm/mga/mga_state.c b/drivers/gpu/drm/mga/mga_state.c index b710fab21cb..a53b848e0f1 100644 --- a/drivers/gpu/drm/mga/mga_state.c +++ b/drivers/gpu/drm/mga/mga_state.c @@ -239,7 +239,7 @@ static __inline__ void mga_g200_emit_pipe(drm_mga_private_t * dev_priv) MGA_WR34, 0x00000000, MGA_WR42, 0x0000ffff, MGA_WR60, 0x0000ffff); - /* Padding required to to hardware bug. + /* Padding required due to hardware bug. */ DMA_BLOCK(MGA_DMAPAD, 0xffffffff, MGA_DMAPAD, 0xffffffff, @@ -317,7 +317,7 @@ static __inline__ void mga_g400_emit_pipe(drm_mga_private_t * dev_priv) MGA_WR52, MGA_G400_WR_MAGIC, /* tex1 width */ MGA_WR60, MGA_G400_WR_MAGIC); /* tex1 height */ - /* Padding required to to hardware bug */ + /* Padding required due to hardware bug */ DMA_BLOCK(MGA_DMAPAD, 0xffffffff, MGA_DMAPAD, 0xffffffff, MGA_DMAPAD, 0xffffffff, diff --git a/drivers/lguest/page_tables.c b/drivers/lguest/page_tables.c index a8d0aee3bc0..8aaad65c3bb 100644 --- a/drivers/lguest/page_tables.c +++ b/drivers/lguest/page_tables.c @@ -894,7 +894,7 @@ void guest_set_pte(struct lg_cpu *cpu, * tells us they've changed. When the Guest tries to use the new entry it will * fault and demand_page() will fix it up. * - * So with that in mind here's our code to to update a (top-level) PGD entry: + * So with that in mind here's our code to update a (top-level) PGD entry: */ void guest_set_pgd(struct lguest *lg, unsigned long gpgdir, u32 idx) { diff --git a/drivers/media/video/gspca/m5602/m5602_core.c b/drivers/media/video/gspca/m5602/m5602_core.c index 8a5bba16ff3..7f1e5415850 100644 --- a/drivers/media/video/gspca/m5602/m5602_core.c +++ b/drivers/media/video/gspca/m5602/m5602_core.c @@ -56,7 +56,7 @@ int m5602_read_bridge(struct sd *sd, const u8 address, u8 *i2c_data) return (err < 0) ? err : 0; } -/* Writes a byte to to the m5602 */ +/* Writes a byte to the m5602 */ int m5602_write_bridge(struct sd *sd, const u8 address, const u8 i2c_data) { int err; diff --git a/drivers/mmc/host/mxcmmc.c b/drivers/mmc/host/mxcmmc.c index bc14bb1b057..88671529c45 100644 --- a/drivers/mmc/host/mxcmmc.c +++ b/drivers/mmc/host/mxcmmc.c @@ -512,7 +512,7 @@ static void mxcmci_cmd_done(struct mxcmci_host *host, unsigned int stat) } /* For the DMA case the DMA engine handles the data transfer - * automatically. For non DMA we have to to it ourselves. + * automatically. For non DMA we have to do it ourselves. * Don't do it in interrupt context though. */ if (!mxcmci_use_dma(host) && host->data) diff --git a/drivers/mtd/maps/ixp2000.c b/drivers/mtd/maps/ixp2000.c index d4fb9a3ab4d..1bdf0ee6d0b 100644 --- a/drivers/mtd/maps/ixp2000.c +++ b/drivers/mtd/maps/ixp2000.c @@ -184,7 +184,7 @@ static int ixp2000_flash_probe(struct platform_device *dev) info->map.bankwidth = 1; /* - * map_priv_2 is used to store a ptr to to the bank_setup routine + * map_priv_2 is used to store a ptr to the bank_setup routine */ info->map.map_priv_2 = (unsigned long) ixp_data->bank_setup; diff --git a/drivers/mtd/ubi/eba.c b/drivers/mtd/ubi/eba.c index e4d9ef0c965..9f87c99189a 100644 --- a/drivers/mtd/ubi/eba.c +++ b/drivers/mtd/ubi/eba.c @@ -1065,7 +1065,7 @@ int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to, } /* - * Now we have got to calculate how much data we have to to copy. In + * Now we have got to calculate how much data we have to copy. In * case of a static volume it is fairly easy - the VID header contains * the data size. In case of a dynamic volume it is more difficult - we * have to read the contents, cut 0xFF bytes from the end and copy only diff --git a/drivers/mtd/ubi/ubi.h b/drivers/mtd/ubi/ubi.h index 6a5fe963378..47877942dec 100644 --- a/drivers/mtd/ubi/ubi.h +++ b/drivers/mtd/ubi/ubi.h @@ -570,7 +570,7 @@ void ubi_do_get_volume_info(struct ubi_device *ubi, struct ubi_volume *vol, /* * ubi_rb_for_each_entry - walk an RB-tree. - * @rb: a pointer to type 'struct rb_node' to to use as a loop counter + * @rb: a pointer to type 'struct rb_node' to use as a loop counter * @pos: a pointer to RB-tree entry type to use as a loop counter * @root: RB-tree's root * @member: the name of the 'struct rb_node' within the RB-tree entry diff --git a/drivers/net/bonding/bond_3ad.c b/drivers/net/bonding/bond_3ad.c index cea5cfe23b7..c3fa31c9f2a 100644 --- a/drivers/net/bonding/bond_3ad.c +++ b/drivers/net/bonding/bond_3ad.c @@ -1987,7 +1987,7 @@ void bond_3ad_unbind_slave(struct slave *slave) // find new aggregator for the related port(s) new_aggregator = __get_first_agg(port); for (; new_aggregator; new_aggregator = __get_next_agg(new_aggregator)) { - // if the new aggregator is empty, or it connected to to our port only + // if the new aggregator is empty, or it is connected to our port only if (!new_aggregator->lag_ports || ((new_aggregator->lag_ports == port) && !new_aggregator->lag_ports->next_port_in_aggregator)) { break; } diff --git a/drivers/net/e1000/e1000_hw.c b/drivers/net/e1000/e1000_hw.c index cda6b397550..45ac225a7aa 100644 --- a/drivers/net/e1000/e1000_hw.c +++ b/drivers/net/e1000/e1000_hw.c @@ -3035,7 +3035,7 @@ s32 e1000_check_for_link(struct e1000_hw *hw) /* If TBI compatibility is was previously off, turn it on. For * compatibility with a TBI link partner, we will store bad * packets. Some frames have an additional byte on the end and - * will look like CRC errors to to the hardware. + * will look like CRC errors to the hardware. */ if (!hw->tbi_compatibility_on) { hw->tbi_compatibility_on = true; diff --git a/drivers/net/wireless/zd1211rw/zd_chip.c b/drivers/net/wireless/zd1211rw/zd_chip.c index 5e110a2328a..4e79a980013 100644 --- a/drivers/net/wireless/zd1211rw/zd_chip.c +++ b/drivers/net/wireless/zd1211rw/zd_chip.c @@ -368,7 +368,7 @@ error: return r; } -/* MAC address: if custom mac addresses are to to be used CR_MAC_ADDR_P1 and +/* MAC address: if custom mac addresses are to be used CR_MAC_ADDR_P1 and * CR_MAC_ADDR_P2 must be overwritten */ int zd_write_mac_addr(struct zd_chip *chip, const u8 *mac_addr) diff --git a/drivers/scsi/megaraid/megaraid_sas.c b/drivers/scsi/megaraid/megaraid_sas.c index 7dc3d1894b1..a39addc3a59 100644 --- a/drivers/scsi/megaraid/megaraid_sas.c +++ b/drivers/scsi/megaraid/megaraid_sas.c @@ -718,7 +718,7 @@ megasas_build_dcdb(struct megasas_instance *instance, struct scsi_cmnd *scp, * megasas_build_ldio - Prepares IOs to logical devices * @instance: Adapter soft state * @scp: SCSI command - * @cmd: Command to to be prepared + * @cmd: Command to be prepared * * Frames (and accompanying SGLs) for regular SCSI IOs use this function. */ diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c index 40e3cafb3a9..83c8b5e4fc8 100644 --- a/drivers/scsi/qla4xxx/ql4_os.c +++ b/drivers/scsi/qla4xxx/ql4_os.c @@ -1422,7 +1422,7 @@ static void qla4xxx_slave_destroy(struct scsi_device *sdev) /** * qla4xxx_del_from_active_array - returns an active srb * @ha: Pointer to host adapter structure. - * @index: index into to the active_array + * @index: index into the active_array * * This routine removes and returns the srb at the specified index **/ @@ -1500,7 +1500,7 @@ static int qla4xxx_wait_for_hba_online(struct scsi_qla_host *ha) /** * qla4xxx_eh_wait_for_commands - wait for active cmds to finish. - * @ha: pointer to to HBA + * @ha: pointer to HBA * @t: target id * @l: lun id * diff --git a/drivers/staging/rt2860/rtmp.h b/drivers/staging/rt2860/rtmp.h index 3f498f6f3ff..90fd40f2473 100644 --- a/drivers/staging/rt2860/rtmp.h +++ b/drivers/staging/rt2860/rtmp.h @@ -2060,7 +2060,7 @@ typedef struct _STA_ADMIN_CONFIG { BOOLEAN AdhocBGJoined; // Indicate Adhoc B/G Join. BOOLEAN Adhoc20NJoined; // Indicate Adhoc 20MHz N Join. #endif - // New for WPA, windows want us to to keep association information and + // New for WPA, windows want us to keep association information and // Fixed IEs from last association response NDIS_802_11_ASSOCIATION_INFORMATION AssocInfo; USHORT ReqVarIELen; // Length of next VIE include EID & Length diff --git a/drivers/usb/serial/cypress_m8.h b/drivers/usb/serial/cypress_m8.h index e772b01ac3a..1fd360e0406 100644 --- a/drivers/usb/serial/cypress_m8.h +++ b/drivers/usb/serial/cypress_m8.h @@ -57,7 +57,7 @@ #define UART_RI 0x10 /* ring indicator - modem - device to host */ #define UART_CD 0x40 /* carrier detect - modem - device to host */ #define CYP_ERROR 0x08 /* received from input report - device to host */ -/* Note - the below has nothing to to with the "feature report" reset */ +/* Note - the below has nothing to do with the "feature report" reset */ #define CONTROL_RESET 0x08 /* sent with output report - host to device */ /* End of RS-232 protocol definitions */ diff --git a/drivers/usb/serial/io_edgeport.c b/drivers/usb/serial/io_edgeport.c index dc0f832657e..b97960ac92f 100644 --- a/drivers/usb/serial/io_edgeport.c +++ b/drivers/usb/serial/io_edgeport.c @@ -2540,7 +2540,7 @@ static int calc_baud_rate_divisor(int baudrate, int *divisor) /***************************************************************************** * send_cmd_write_uart_register - * this function builds up a uart register message and sends to to the device. + * this function builds up a uart register message and sends to the device. *****************************************************************************/ static int send_cmd_write_uart_register(struct edgeport_port *edge_port, __u8 regNum, __u8 regValue) diff --git a/drivers/usb/serial/kl5kusb105.c b/drivers/usb/serial/kl5kusb105.c index a61673133d7..f7373371b13 100644 --- a/drivers/usb/serial/kl5kusb105.c +++ b/drivers/usb/serial/kl5kusb105.c @@ -38,7 +38,7 @@ * 0.3a - implemented pools of write URBs * 0.3 - alpha version for public testing * 0.2 - TIOCMGET works, so autopilot(1) can be used! - * 0.1 - can be used to to pilot-xfer -p /dev/ttyUSB0 -l + * 0.1 - can be used to do pilot-xfer -p /dev/ttyUSB0 -l * * The driver skeleton is mainly based on mct_u232.c and various other * pieces of code shamelessly copied from the drivers/usb/serial/ directory. diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c index 4abd683b963..3a798737e30 100644 --- a/fs/ext4/inode.c +++ b/fs/ext4/inode.c @@ -2337,7 +2337,7 @@ static int __mpage_da_writepage(struct page *page, /* * Rest of the page in the page_vec * redirty then and skip then. We will - * try to to write them again after + * try to write them again after * starting a new transaction */ redirty_page_for_writepage(wbc, page); diff --git a/fs/gfs2/rgrp.c b/fs/gfs2/rgrp.c index 28c590b7c9d..8f1cfb02a6c 100644 --- a/fs/gfs2/rgrp.c +++ b/fs/gfs2/rgrp.c @@ -179,7 +179,7 @@ static inline u64 gfs2_bit_search(const __le64 *ptr, u64 mask, u8 state) * always aligned to a 64 bit boundary. * * The size of the buffer is in bytes, but is it assumed that it is - * always ok to to read a complete multiple of 64 bits at the end + * always ok to read a complete multiple of 64 bits at the end * of the block in case the end is no aligned to a natural boundary. * * Return: the block number (bitmap buffer scope) that was found diff --git a/fs/ntfs/layout.h b/fs/ntfs/layout.h index 50931b1ce4b..8b2549f672b 100644 --- a/fs/ntfs/layout.h +++ b/fs/ntfs/layout.h @@ -829,7 +829,7 @@ enum { /* Note, FILE_ATTR_VALID_SET_FLAGS masks out the old DOS VolId, the F_A_DEVICE, F_A_DIRECTORY, F_A_SPARSE_FILE, F_A_REPARSE_POINT, F_A_COMPRESSED, and F_A_ENCRYPTED and preserves the rest. This mask - is used to to obtain all flags that are valid for setting. */ + is used to obtain all flags that are valid for setting. */ /* * The flag FILE_ATTR_DUP_FILENAME_INDEX_PRESENT is present in all * FILENAME_ATTR attributes but not in the STANDARD_INFORMATION diff --git a/include/acpi/actypes.h b/include/acpi/actypes.h index 37ba576d06e..8052236d1a3 100644 --- a/include/acpi/actypes.h +++ b/include/acpi/actypes.h @@ -288,7 +288,7 @@ typedef u32 acpi_physical_address; /* * Some compilers complain about unused variables. Sometimes we don't want to * use all the variables (for example, _acpi_module_name). This allows us - * to to tell the compiler in a per-variable manner that a variable + * to tell the compiler in a per-variable manner that a variable * is unused */ #ifndef ACPI_UNUSED_VAR diff --git a/include/acpi/platform/acgcc.h b/include/acpi/platform/acgcc.h index 935c5d7fc86..6aadbf84ae7 100644 --- a/include/acpi/platform/acgcc.h +++ b/include/acpi/platform/acgcc.h @@ -57,7 +57,7 @@ /* * Some compilers complain about unused variables. Sometimes we don't want to * use all the variables (for example, _acpi_module_name). This allows us - * to to tell the compiler warning in a per-variable manner that a variable + * to tell the compiler warning in a per-variable manner that a variable * is unused. */ #define ACPI_UNUSED_VAR __attribute__ ((unused)) diff --git a/include/rdma/ib_cm.h b/include/rdma/ib_cm.h index 93885830430..c8f94e8db69 100644 --- a/include/rdma/ib_cm.h +++ b/include/rdma/ib_cm.h @@ -482,7 +482,7 @@ int ib_send_cm_rej(struct ib_cm_id *cm_id, * message. * @cm_id: Connection identifier associated with the connection message. * @service_timeout: The lower 5-bits specify the maximum time required for - * the sender to reply to to the connection message. The upper 3-bits + * the sender to reply to the connection message. The upper 3-bits * specify additional control flags. * @private_data: Optional user-defined private data sent with the * message receipt acknowledgement. diff --git a/kernel/tracepoint.c b/kernel/tracepoint.c index 9489a0a9b1b..cc89be5bc0f 100644 --- a/kernel/tracepoint.c +++ b/kernel/tracepoint.c @@ -48,7 +48,7 @@ static struct hlist_head tracepoint_table[TRACEPOINT_TABLE_SIZE]; /* * Note about RCU : - * It is used to to delay the free of multiple probes array until a quiescent + * It is used to delay the free of multiple probes array until a quiescent * state is reached. * Tracepoint entries modifications are protected by the tracepoints_mutex. */ diff --git a/lib/zlib_deflate/deflate.c b/lib/zlib_deflate/deflate.c index c3e4a2baf83..46a31e5f49c 100644 --- a/lib/zlib_deflate/deflate.c +++ b/lib/zlib_deflate/deflate.c @@ -135,7 +135,7 @@ static const config configuration_table[10] = { /* =========================================================================== * Update a hash value with the given input byte - * IN assertion: all calls to to UPDATE_HASH are made with consecutive + * IN assertion: all calls to UPDATE_HASH are made with consecutive * input characters, so that a running hash key can be computed from the * previous key instead of complete recalculation each time. */ @@ -146,7 +146,7 @@ static const config configuration_table[10] = { * Insert string str in the dictionary and set match_head to the previous head * of the hash chain (the most recent string with same hash key). Return * the previous length of the hash chain. - * IN assertion: all calls to to INSERT_STRING are made with consecutive + * IN assertion: all calls to INSERT_STRING are made with consecutive * input characters and the first MIN_MATCH bytes of str are valid * (except for the last MIN_MATCH-1 bytes of the input file). */ diff --git a/net/rxrpc/ar-call.c b/net/rxrpc/ar-call.c index d9231245a79..bc0019f704f 100644 --- a/net/rxrpc/ar-call.c +++ b/net/rxrpc/ar-call.c @@ -96,7 +96,7 @@ static struct rxrpc_call *rxrpc_alloc_call(gfp_t gfp) } /* - * allocate a new client call and attempt to to get a connection slot for it + * allocate a new client call and attempt to get a connection slot for it */ static struct rxrpc_call *rxrpc_alloc_client_call( struct rxrpc_sock *rx, diff --git a/net/sched/sch_hfsc.c b/net/sched/sch_hfsc.c index 375d64cb1a3..2c5c76be18f 100644 --- a/net/sched/sch_hfsc.c +++ b/net/sched/sch_hfsc.c @@ -77,7 +77,7 @@ * The service curve parameters are converted to the internal * representation. The slope values are scaled to avoid overflow. * the inverse slope values as well as the y-projection of the 1st - * segment are kept in order to to avoid 64-bit divide operations + * segment are kept in order to avoid 64-bit divide operations * that are expensive on 32-bit architectures. */ -- cgit v1.2.3-70-g09d2 From 3dbda77e6f3375f87090cfce97b2551d3723521b Mon Sep 17 00:00:00 2001 From: Uwe Kleine-Koenig Date: Thu, 23 Jul 2009 08:31:31 +0200 Subject: trivial: fix typos "man[ae]g?ment" -> "management" Signed-off-by: Uwe Kleine-Koenig Signed-off-by: Jiri Kosina --- Documentation/DocBook/mtdnand.tmpl | 2 +- Documentation/DocBook/scsi.tmpl | 2 +- Documentation/scsi/ChangeLog.megaraid | 2 +- Documentation/sound/alsa/HD-Audio-Models.txt | 2 +- Documentation/trace/ftrace.txt | 2 +- arch/arm/Makefile | 2 +- arch/frv/lib/cache.S | 2 +- arch/mn10300/include/asm/cacheflush.h | 4 ++-- drivers/media/dvb/siano/smscoreapi.c | 2 +- drivers/media/dvb/siano/smscoreapi.h | 4 ++-- drivers/media/radio/radio-mr800.c | 2 +- drivers/message/fusion/mptbase.c | 4 ++-- drivers/net/macb.c | 2 +- drivers/net/wireless/ath/ath5k/reg.h | 2 +- drivers/net/wireless/atmel.c | 2 +- drivers/usb/host/xhci.h | 2 +- drivers/usb/wusbcore/wa-hc.h | 2 +- include/linux/mISDNif.h | 2 +- 18 files changed, 21 insertions(+), 21 deletions(-) (limited to 'include') diff --git a/Documentation/DocBook/mtdnand.tmpl b/Documentation/DocBook/mtdnand.tmpl index 8e145857fc9..df0d089d0fb 100644 --- a/Documentation/DocBook/mtdnand.tmpl +++ b/Documentation/DocBook/mtdnand.tmpl @@ -568,7 +568,7 @@ static void board_select_chip (struct mtd_info *mtd, int chip) The blocks in which the tables are stored are procteted against accidental access by marking them bad in the memory bad block - table. The bad block table managment functions are allowed + table. The bad block table management functions are allowed to circumvernt this protection. diff --git a/Documentation/DocBook/scsi.tmpl b/Documentation/DocBook/scsi.tmpl index 10a150ae2a7..d87f4569e76 100644 --- a/Documentation/DocBook/scsi.tmpl +++ b/Documentation/DocBook/scsi.tmpl @@ -317,7 +317,7 @@ The SAS transport class contains common code to deal with SAS HBAs, an aproximated representation of SAS topologies in the driver model, - and various sysfs attributes to expose these topologies and managment + and various sysfs attributes to expose these topologies and management interfaces to userspace. diff --git a/Documentation/scsi/ChangeLog.megaraid b/Documentation/scsi/ChangeLog.megaraid index eaa4801f2ce..38e9e7cadc9 100644 --- a/Documentation/scsi/ChangeLog.megaraid +++ b/Documentation/scsi/ChangeLog.megaraid @@ -514,7 +514,7 @@ iv. Remove yield() while mailbox handshake in synchronous commands v. Remove redundant __megaraid_busywait_mbox routine -vi. Fix bug in the managment module, which causes a system lockup when the +vi. Fix bug in the management module, which causes a system lockup when the IO module is loaded and then unloaded, followed by executing any management utility. The current version of management module does not handle the adapter unregister properly. diff --git a/Documentation/sound/alsa/HD-Audio-Models.txt b/Documentation/sound/alsa/HD-Audio-Models.txt index 97eebd63bed..f1708b79f96 100644 --- a/Documentation/sound/alsa/HD-Audio-Models.txt +++ b/Documentation/sound/alsa/HD-Audio-Models.txt @@ -387,7 +387,7 @@ STAC92HD73* STAC92HD83* =========== ref Reference board - mic-ref Reference board with power managment for ports + mic-ref Reference board with power management for ports dell-s14 Dell laptop auto BIOS setup (default) diff --git a/Documentation/trace/ftrace.txt b/Documentation/trace/ftrace.txt index 1b6292bbdd6..957b22fde2d 100644 --- a/Documentation/trace/ftrace.txt +++ b/Documentation/trace/ftrace.txt @@ -133,7 +133,7 @@ of ftrace. Here is a list of some of the key files: than requested, the rest of the page will be used, making the actual allocation bigger than requested. ( Note, the size may not be a multiple of the page size - due to buffer managment overhead. ) + due to buffer management overhead. ) This can only be updated when the current_tracer is set to "nop". diff --git a/arch/arm/Makefile b/arch/arm/Makefile index 7350557a81e..54661125a8b 100644 --- a/arch/arm/Makefile +++ b/arch/arm/Makefile @@ -25,7 +25,7 @@ KBUILD_CFLAGS +=$(call cc-option,-marm,) # Select a platform tht is kept up-to-date KBUILD_DEFCONFIG := versatile_defconfig -# defines filename extension depending memory manement type. +# defines filename extension depending memory management type. ifeq ($(CONFIG_MMU),) MMUEXT := -nommu endif diff --git a/arch/frv/lib/cache.S b/arch/frv/lib/cache.S index 0e10ad8dc46..0c4fb204911 100644 --- a/arch/frv/lib/cache.S +++ b/arch/frv/lib/cache.S @@ -1,4 +1,4 @@ -/* cache.S: cache managment routines +/* cache.S: cache management routines * * Copyright (C) 2004 Red Hat, Inc. All Rights Reserved. * Written by David Howells (dhowells@redhat.com) diff --git a/arch/mn10300/include/asm/cacheflush.h b/arch/mn10300/include/asm/cacheflush.h index 2db746a251f..1a55d61f0d0 100644 --- a/arch/mn10300/include/asm/cacheflush.h +++ b/arch/mn10300/include/asm/cacheflush.h @@ -17,7 +17,7 @@ #include /* - * virtually-indexed cache managment (our cache is physically indexed) + * virtually-indexed cache management (our cache is physically indexed) */ #define flush_cache_all() do {} while (0) #define flush_cache_mm(mm) do {} while (0) @@ -31,7 +31,7 @@ #define flush_dcache_mmap_unlock(mapping) do {} while (0) /* - * physically-indexed cache managment + * physically-indexed cache management */ #ifndef CONFIG_MN10300_CACHE_DISABLED diff --git a/drivers/media/dvb/siano/smscoreapi.c b/drivers/media/dvb/siano/smscoreapi.c index bd9ab9d0d12..fa6a62369a7 100644 --- a/drivers/media/dvb/siano/smscoreapi.c +++ b/drivers/media/dvb/siano/smscoreapi.c @@ -1367,7 +1367,7 @@ int smscore_set_gpio(struct smscore_device_t *coredev, u32 pin, int level) &msg, sizeof(msg)); } -/* new GPIO managment implementation */ +/* new GPIO management implementation */ static int GetGpioPinParams(u32 PinNum, u32 *pTranslatedPinNum, u32 *pGroupNum, u32 *pGroupCfg) { diff --git a/drivers/media/dvb/siano/smscoreapi.h b/drivers/media/dvb/siano/smscoreapi.h index f1108c64e89..eec18aaf551 100644 --- a/drivers/media/dvb/siano/smscoreapi.h +++ b/drivers/media/dvb/siano/smscoreapi.h @@ -657,12 +657,12 @@ struct smscore_buffer_t *smscore_getbuffer(struct smscore_device_t *coredev); extern void smscore_putbuffer(struct smscore_device_t *coredev, struct smscore_buffer_t *cb); -/* old GPIO managment */ +/* old GPIO management */ int smscore_configure_gpio(struct smscore_device_t *coredev, u32 pin, struct smscore_config_gpio *pinconfig); int smscore_set_gpio(struct smscore_device_t *coredev, u32 pin, int level); -/* new GPIO managment */ +/* new GPIO management */ extern int smscore_gpio_configure(struct smscore_device_t *coredev, u8 PinNum, struct smscore_gpio_config *pGpioConfig); extern int smscore_gpio_set_level(struct smscore_device_t *coredev, u8 PinNum, diff --git a/drivers/media/radio/radio-mr800.c b/drivers/media/radio/radio-mr800.c index 575bf9d8941..a1239083472 100644 --- a/drivers/media/radio/radio-mr800.c +++ b/drivers/media/radio/radio-mr800.c @@ -46,7 +46,7 @@ * Version 0.11: Converted to v4l2_device. * * Many things to do: - * - Correct power managment of device (suspend & resume) + * - Correct power management of device (suspend & resume) * - Add code for scanning and smooth tuning * - Add code for sensitivity value * - Correct mistakes diff --git a/drivers/message/fusion/mptbase.c b/drivers/message/fusion/mptbase.c index 76fa2ee0b57..610e914abe6 100644 --- a/drivers/message/fusion/mptbase.c +++ b/drivers/message/fusion/mptbase.c @@ -6821,7 +6821,7 @@ mpt_print_ioc_summary(MPT_ADAPTER *ioc, char *buffer, int *size, int len, int sh *size = y; } /** - * mpt_set_taskmgmt_in_progress_flag - set flags associated with task managment + * mpt_set_taskmgmt_in_progress_flag - set flags associated with task management * @ioc: Pointer to MPT_ADAPTER structure * * Returns 0 for SUCCESS or -1 if FAILED. @@ -6854,7 +6854,7 @@ mpt_set_taskmgmt_in_progress_flag(MPT_ADAPTER *ioc) EXPORT_SYMBOL(mpt_set_taskmgmt_in_progress_flag); /** - * mpt_clear_taskmgmt_in_progress_flag - clear flags associated with task managment + * mpt_clear_taskmgmt_in_progress_flag - clear flags associated with task management * @ioc: Pointer to MPT_ADAPTER structure * **/ diff --git a/drivers/net/macb.c b/drivers/net/macb.c index fb65b427c69..1d0d4d9ab62 100644 --- a/drivers/net/macb.c +++ b/drivers/net/macb.c @@ -241,7 +241,7 @@ static int macb_mii_init(struct macb *bp) struct eth_platform_data *pdata; int err = -ENXIO, i; - /* Enable managment port */ + /* Enable management port */ macb_writel(bp, NCR, MACB_BIT(MPE)); bp->mii_bus = mdiobus_alloc(); diff --git a/drivers/net/wireless/ath/ath5k/reg.h b/drivers/net/wireless/ath/ath5k/reg.h index debad07d990..c63ea6afd96 100644 --- a/drivers/net/wireless/ath/ath5k/reg.h +++ b/drivers/net/wireless/ath/ath5k/reg.h @@ -982,7 +982,7 @@ #define AR5K_5414_CBCFG_BUF_DIS 0x10 /* Disable buffer */ /* - * PCI-E Power managment configuration + * PCI-E Power management configuration * and status register [5424+] */ #define AR5K_PCIE_PM_CTL 0x4068 /* Register address */ diff --git a/drivers/net/wireless/atmel.c b/drivers/net/wireless/atmel.c index a3b36b3a9d6..cce188837d1 100644 --- a/drivers/net/wireless/atmel.c +++ b/drivers/net/wireless/atmel.c @@ -3330,7 +3330,7 @@ static void atmel_smooth_qual(struct atmel_private *priv) priv->wstats.qual.updated &= ~IW_QUAL_QUAL_INVALID; } -/* deals with incoming managment frames. */ +/* deals with incoming management frames. */ static void atmel_management_frame(struct atmel_private *priv, struct ieee80211_hdr *header, u16 frame_len, u8 rssi) diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h index d31d32206ba..ffe1625d4e1 100644 --- a/drivers/usb/host/xhci.h +++ b/drivers/usb/host/xhci.h @@ -1150,7 +1150,7 @@ void xhci_dbg_cmd_ptrs(struct xhci_hcd *xhci); void xhci_dbg_ring_ptrs(struct xhci_hcd *xhci, struct xhci_ring *ring); void xhci_dbg_ctx(struct xhci_hcd *xhci, struct xhci_container_ctx *ctx, unsigned int last_ep); -/* xHCI memory managment */ +/* xHCI memory management */ void xhci_mem_cleanup(struct xhci_hcd *xhci); int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags); void xhci_free_virt_device(struct xhci_hcd *xhci, int slot_id); diff --git a/drivers/usb/wusbcore/wa-hc.h b/drivers/usb/wusbcore/wa-hc.h index 586d350cdb4..d6bea3e0b54 100644 --- a/drivers/usb/wusbcore/wa-hc.h +++ b/drivers/usb/wusbcore/wa-hc.h @@ -47,7 +47,7 @@ * to an endpoint on a WUSB device that is connected to a * HWA RC. * - * xfer Transfer managment -- this is all the code that gets a + * xfer Transfer management -- this is all the code that gets a * buffer and pushes it to a device (or viceversa). * * * Some day a lot of this code will be shared between this driver and diff --git a/include/linux/mISDNif.h b/include/linux/mISDNif.h index 536ca12442c..78c3bed1c3f 100644 --- a/include/linux/mISDNif.h +++ b/include/linux/mISDNif.h @@ -104,7 +104,7 @@ #define DL_UNITDATA_IND 0x3108 #define DL_INFORMATION_IND 0x0008 -/* intern layer 2 managment */ +/* intern layer 2 management */ #define MDL_ASSIGN_REQ 0x1804 #define MDL_ASSIGN_IND 0x1904 #define MDL_REMOVE_REQ 0x1A04 -- cgit v1.2.3-70-g09d2 From a419aef8b858a2bdb98df60336063d28df4b272f Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Tue, 18 Aug 2009 11:18:35 -0700 Subject: trivial: remove unnecessary semicolons Signed-off-by: Joe Perches Signed-off-by: Jiri Kosina --- arch/s390/hypfs/inode.c | 2 +- arch/s390/kvm/interrupt.c | 2 +- arch/sparc/kernel/irq_64.c | 2 +- arch/um/drivers/net_kern.c | 2 +- drivers/block/DAC960.c | 4 ++-- drivers/block/swim3.c | 2 +- drivers/char/epca.c | 2 +- drivers/gpu/drm/i915/intel_dp.c | 2 +- drivers/gpu/drm/radeon/r300.c | 4 ++-- drivers/ide/ide-probe.c | 2 +- drivers/ide/umc8672.c | 4 ++-- drivers/isdn/capi/capiutil.c | 2 +- drivers/macintosh/rack-meter.c | 2 +- drivers/net/arcnet/arc-rawmode.c | 1 - drivers/net/arcnet/capmode.c | 1 - drivers/net/gianfar_ethtool.c | 2 +- drivers/net/ibm_newemac/core.c | 8 ++++---- drivers/net/igb/igb_main.c | 2 +- drivers/net/ll_temac_main.c | 2 +- drivers/net/ni52.c | 4 ++-- drivers/net/qlge/qlge_main.c | 4 ++-- drivers/net/skfp/pcmplc.c | 2 +- drivers/net/skfp/pmf.c | 8 ++++---- drivers/net/skge.c | 2 +- drivers/net/sky2.c | 2 +- drivers/net/vxge/vxge-config.h | 2 +- drivers/net/vxge/vxge-main.c | 2 +- drivers/rtc/rtc-omap.c | 2 +- drivers/s390/block/dasd_eckd.c | 2 +- drivers/s390/net/netiucv.c | 2 +- drivers/s390/scsi/zfcp_scsi.c | 2 +- drivers/scsi/bnx2i/bnx2i_hwi.c | 2 +- drivers/scsi/lpfc/lpfc_ct.c | 2 +- drivers/spi/omap_uwire.c | 2 +- drivers/spi/spi_s3c24xx.c | 2 +- drivers/usb/class/cdc-wdm.c | 2 -- drivers/usb/serial/spcp8x5.c | 2 +- drivers/uwb/i1480/i1480u-wlp/netdev.c | 2 +- drivers/video/cfbcopyarea.c | 2 +- drivers/video/imxfb.c | 2 +- drivers/video/s3c2410fb.c | 2 +- drivers/xen/balloon.c | 2 +- fs/autofs/dirhash.c | 2 +- fs/btrfs/tree-log.c | 2 +- fs/cifs/cifs_dfs_ref.c | 2 +- fs/nfs/callback_xdr.c | 2 +- fs/ocfs2/quota_global.c | 2 +- include/scsi/fc/fc_fc2.h | 3 +-- kernel/trace/trace_hw_branches.c | 2 +- net/wireless/wext-compat.c | 2 +- sound/oss/sys_timer.c | 3 --- sound/soc/codecs/wm9081.c | 2 +- sound/soc/pxa/pxa-ssp.c | 2 +- sound/soc/s3c24xx/s3c24xx_uda134x.c | 2 +- 54 files changed, 61 insertions(+), 69 deletions(-) (limited to 'include') diff --git a/arch/s390/hypfs/inode.c b/arch/s390/hypfs/inode.c index bd9914b8948..1baa635717b 100644 --- a/arch/s390/hypfs/inode.c +++ b/arch/s390/hypfs/inode.c @@ -496,7 +496,7 @@ static int __init hypfs_init(void) } s390_kobj = kobject_create_and_add("s390", hypervisor_kobj); if (!s390_kobj) { - rc = -ENOMEM;; + rc = -ENOMEM; goto fail_sysfs; } rc = register_filesystem(&hypfs_type); diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/interrupt.c index 2c2f9835341..43486c2408e 100644 --- a/arch/s390/kvm/interrupt.c +++ b/arch/s390/kvm/interrupt.c @@ -478,7 +478,7 @@ int kvm_s390_inject_program_int(struct kvm_vcpu *vcpu, u16 code) if (!inti) return -ENOMEM; - inti->type = KVM_S390_PROGRAM_INT;; + inti->type = KVM_S390_PROGRAM_INT; inti->pgm.code = code; VCPU_EVENT(vcpu, 3, "inject: program check %d (from kernel)", code); diff --git a/arch/sparc/kernel/irq_64.c b/arch/sparc/kernel/irq_64.c index 8daab33fc17..8ab1d4728a4 100644 --- a/arch/sparc/kernel/irq_64.c +++ b/arch/sparc/kernel/irq_64.c @@ -229,7 +229,7 @@ static unsigned int sun4u_compute_tid(unsigned long imap, unsigned long cpuid) tid = ((a << IMAP_AID_SHIFT) | (n << IMAP_NID_SHIFT)); tid &= (IMAP_AID_SAFARI | - IMAP_NID_SAFARI);; + IMAP_NID_SAFARI); } } else { tid = cpuid << IMAP_TID_SHIFT; diff --git a/arch/um/drivers/net_kern.c b/arch/um/drivers/net_kern.c index f114813ae25..a74245ae3a8 100644 --- a/arch/um/drivers/net_kern.c +++ b/arch/um/drivers/net_kern.c @@ -533,7 +533,7 @@ static int eth_parse(char *str, int *index_out, char **str_out, char **error_out) { char *end; - int n, err = -EINVAL;; + int n, err = -EINVAL; n = simple_strtoul(str, &end, 0); if (end == str) { diff --git a/drivers/block/DAC960.c b/drivers/block/DAC960.c index b839af1702e..26caa3ffaff 100644 --- a/drivers/block/DAC960.c +++ b/drivers/block/DAC960.c @@ -6653,7 +6653,7 @@ static long DAC960_gam_ioctl(struct file *file, unsigned int Request, else ErrorCode = get_user(ControllerNumber, &UserSpaceControllerInfo->ControllerNumber); if (ErrorCode != 0) - break;; + break; ErrorCode = -ENXIO; if (ControllerNumber < 0 || ControllerNumber > DAC960_ControllerCount - 1) { @@ -6661,7 +6661,7 @@ static long DAC960_gam_ioctl(struct file *file, unsigned int Request, } Controller = DAC960_Controllers[ControllerNumber]; if (Controller == NULL) - break;; + break; memset(&ControllerInfo, 0, sizeof(DAC960_ControllerInfo_T)); ControllerInfo.ControllerNumber = ControllerNumber; ControllerInfo.FirmwareType = Controller->FirmwareType; diff --git a/drivers/block/swim3.c b/drivers/block/swim3.c index 80df93e3cdd..572ec6164f2 100644 --- a/drivers/block/swim3.c +++ b/drivers/block/swim3.c @@ -1062,7 +1062,7 @@ static int swim3_add_device(struct macio_dev *mdev, int index) goto out_release; } fs->swim3_intr = macio_irq(mdev, 0); - fs->dma_intr = macio_irq(mdev, 1);; + fs->dma_intr = macio_irq(mdev, 1); fs->cur_cyl = -1; fs->cur_sector = -1; fs->secpercyl = 36; diff --git a/drivers/char/epca.c b/drivers/char/epca.c index ff647ca1c48..9d589e3144d 100644 --- a/drivers/char/epca.c +++ b/drivers/char/epca.c @@ -2239,7 +2239,7 @@ static void do_softint(struct work_struct *work) struct channel *ch = container_of(work, struct channel, tqueue); /* Called in response to a modem change event */ if (ch && ch->magic == EPCA_MAGIC) { - struct tty_struct *tty = tty_port_tty_get(&ch->port);; + struct tty_struct *tty = tty_port_tty_get(&ch->port); if (tty && tty->driver_data) { if (test_and_clear_bit(EPCA_EVENT_HANGUP, &ch->event)) { diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c index 2b914d73207..f4856a51047 100644 --- a/drivers/gpu/drm/i915/intel_dp.c +++ b/drivers/gpu/drm/i915/intel_dp.c @@ -232,7 +232,7 @@ intel_dp_aux_ch(struct intel_output *intel_output, for (try = 0; try < 5; try++) { /* Load the send data into the aux channel data registers */ for (i = 0; i < send_bytes; i += 4) { - uint32_t d = pack_aux(send + i, send_bytes - i);; + uint32_t d = pack_aux(send + i, send_bytes - i); I915_WRITE(ch_data + i, d); } diff --git a/drivers/gpu/drm/radeon/r300.c b/drivers/gpu/drm/radeon/r300.c index 051bca6e3a4..b2f5d32efb0 100644 --- a/drivers/gpu/drm/radeon/r300.c +++ b/drivers/gpu/drm/radeon/r300.c @@ -1319,11 +1319,11 @@ static int r300_packet0_check(struct radeon_cs_parser *p, case 0x443C: /* TX_FILTER0_[0-15] */ i = (reg - 0x4400) >> 2; - tmp = ib_chunk->kdata[idx] & 0x7;; + tmp = ib_chunk->kdata[idx] & 0x7; if (tmp == 2 || tmp == 4 || tmp == 6) { track->textures[i].roundup_w = false; } - tmp = (ib_chunk->kdata[idx] >> 3) & 0x7;; + tmp = (ib_chunk->kdata[idx] >> 3) & 0x7; if (tmp == 2 || tmp == 4 || tmp == 6) { track->textures[i].roundup_h = false; } diff --git a/drivers/ide/ide-probe.c b/drivers/ide/ide-probe.c index 8de442cbee9..63c53d65e87 100644 --- a/drivers/ide/ide-probe.c +++ b/drivers/ide/ide-probe.c @@ -1212,7 +1212,7 @@ static int ide_find_port_slot(const struct ide_port_info *d) { int idx = -ENOENT; u8 bootable = (d && (d->host_flags & IDE_HFLAG_NON_BOOTABLE)) ? 0 : 1; - u8 i = (d && (d->host_flags & IDE_HFLAG_QD_2ND_PORT)) ? 1 : 0;; + u8 i = (d && (d->host_flags & IDE_HFLAG_QD_2ND_PORT)) ? 1 : 0; /* * Claim an unassigned slot. diff --git a/drivers/ide/umc8672.c b/drivers/ide/umc8672.c index 0608d41fb6d..60f936e2319 100644 --- a/drivers/ide/umc8672.c +++ b/drivers/ide/umc8672.c @@ -170,9 +170,9 @@ static int __init umc8672_init(void) goto out; if (umc8672_probe() == 0) - return 0;; + return 0; out: - return -ENODEV;; + return -ENODEV; } module_init(umc8672_init); diff --git a/drivers/isdn/capi/capiutil.c b/drivers/isdn/capi/capiutil.c index 16f2e465e5f..26626eead82 100644 --- a/drivers/isdn/capi/capiutil.c +++ b/drivers/isdn/capi/capiutil.c @@ -1019,7 +1019,7 @@ int __init cdebug_init(void) if (!g_debbuf->buf) { kfree(g_cmsg); kfree(g_debbuf); - return -ENOMEM;; + return -ENOMEM; } g_debbuf->size = CDEBUG_GSIZE; g_debbuf->buf[0] = 0; diff --git a/drivers/macintosh/rack-meter.c b/drivers/macintosh/rack-meter.c index a98ab72adf9..93fb32038b1 100644 --- a/drivers/macintosh/rack-meter.c +++ b/drivers/macintosh/rack-meter.c @@ -274,7 +274,7 @@ static void __devinit rackmeter_init_cpu_sniffer(struct rackmeter *rm) if (cpu > 1) continue; - rcpu = &rm->cpu[cpu];; + rcpu = &rm->cpu[cpu]; rcpu->prev_idle = get_cpu_idle_time(cpu); rcpu->prev_wall = jiffies64_to_cputime64(get_jiffies_64()); schedule_delayed_work_on(cpu, &rm->cpu[cpu].sniffer, diff --git a/drivers/net/arcnet/arc-rawmode.c b/drivers/net/arcnet/arc-rawmode.c index 646dfc5f50c..8ea9c7545c1 100644 --- a/drivers/net/arcnet/arc-rawmode.c +++ b/drivers/net/arcnet/arc-rawmode.c @@ -123,7 +123,6 @@ static void rx(struct net_device *dev, int bufnum, BUGLVL(D_SKB) arcnet_dump_skb(dev, skb, "rx"); skb->protocol = cpu_to_be16(ETH_P_ARCNET); -; netif_rx(skb); } diff --git a/drivers/net/arcnet/capmode.c b/drivers/net/arcnet/capmode.c index 083e21094b2..66bcbbb6bab 100644 --- a/drivers/net/arcnet/capmode.c +++ b/drivers/net/arcnet/capmode.c @@ -149,7 +149,6 @@ static void rx(struct net_device *dev, int bufnum, BUGLVL(D_SKB) arcnet_dump_skb(dev, skb, "rx"); skb->protocol = cpu_to_be16(ETH_P_ARCNET); -; netif_rx(skb); } diff --git a/drivers/net/gianfar_ethtool.c b/drivers/net/gianfar_ethtool.c index 2234118eedb..6c144b525b4 100644 --- a/drivers/net/gianfar_ethtool.c +++ b/drivers/net/gianfar_ethtool.c @@ -293,7 +293,7 @@ static int gfar_gcoalesce(struct net_device *dev, struct ethtool_coalesce *cvals rxtime = get_ictt_value(priv->rxic); rxcount = get_icft_value(priv->rxic); txtime = get_ictt_value(priv->txic); - txcount = get_icft_value(priv->txic);; + txcount = get_icft_value(priv->txic); cvals->rx_coalesce_usecs = gfar_ticks2usecs(priv, rxtime); cvals->rx_max_coalesced_frames = rxcount; diff --git a/drivers/net/ibm_newemac/core.c b/drivers/net/ibm_newemac/core.c index 1d7d7fef414..89c82c5e63e 100644 --- a/drivers/net/ibm_newemac/core.c +++ b/drivers/net/ibm_newemac/core.c @@ -2556,13 +2556,13 @@ static int __devinit emac_init_config(struct emac_instance *dev) if (emac_read_uint_prop(np, "mdio-device", &dev->mdio_ph, 0)) dev->mdio_ph = 0; if (emac_read_uint_prop(np, "zmii-device", &dev->zmii_ph, 0)) - dev->zmii_ph = 0;; + dev->zmii_ph = 0; if (emac_read_uint_prop(np, "zmii-channel", &dev->zmii_port, 0)) - dev->zmii_port = 0xffffffff;; + dev->zmii_port = 0xffffffff; if (emac_read_uint_prop(np, "rgmii-device", &dev->rgmii_ph, 0)) - dev->rgmii_ph = 0;; + dev->rgmii_ph = 0; if (emac_read_uint_prop(np, "rgmii-channel", &dev->rgmii_port, 0)) - dev->rgmii_port = 0xffffffff;; + dev->rgmii_port = 0xffffffff; if (emac_read_uint_prop(np, "fifo-entry-size", &dev->fifo_entry_size, 0)) dev->fifo_entry_size = 16; if (emac_read_uint_prop(np, "mal-burst-size", &dev->mal_burst_size, 0)) diff --git a/drivers/net/igb/igb_main.c b/drivers/net/igb/igb_main.c index d2639c4a086..5d6c1530a8c 100644 --- a/drivers/net/igb/igb_main.c +++ b/drivers/net/igb/igb_main.c @@ -3966,7 +3966,7 @@ static int igb_set_vf_multicasts(struct igb_adapter *adapter, /* VFs are limited to using the MTA hash table for their multicast * addresses */ for (i = 0; i < n; i++) - vf_data->vf_mc_hashes[i] = hash_list[i];; + vf_data->vf_mc_hashes[i] = hash_list[i]; /* Flush and reset the mta with the new values */ igb_set_rx_mode(adapter->netdev); diff --git a/drivers/net/ll_temac_main.c b/drivers/net/ll_temac_main.c index da8d0a0ca94..f2a197fd47a 100644 --- a/drivers/net/ll_temac_main.c +++ b/drivers/net/ll_temac_main.c @@ -865,7 +865,7 @@ temac_of_probe(struct of_device *op, const struct of_device_id *match) dcrs = dcr_resource_start(np, 0); if (dcrs == 0) { dev_err(&op->dev, "could not get DMA register address\n"); - goto nodev;; + goto nodev; } lp->sdma_dcrs = dcr_map(np, dcrs, dcr_resource_len(np, 0)); dev_dbg(&op->dev, "DCR base: %x\n", dcrs); diff --git a/drivers/net/ni52.c b/drivers/net/ni52.c index bd0ac690d12..aad3b370c56 100644 --- a/drivers/net/ni52.c +++ b/drivers/net/ni52.c @@ -615,10 +615,10 @@ static int init586(struct net_device *dev) /* addr_len |!src_insert |pre-len |loopback */ writeb(0x2e, &cfg_cmd->adr_len); writeb(0x00, &cfg_cmd->priority); - writeb(0x60, &cfg_cmd->ifs);; + writeb(0x60, &cfg_cmd->ifs); writeb(0x00, &cfg_cmd->time_low); writeb(0xf2, &cfg_cmd->time_high); - writeb(0x00, &cfg_cmd->promisc);; + writeb(0x00, &cfg_cmd->promisc); if (dev->flags & IFF_ALLMULTI) { int len = ((char __iomem *)p->iscp - (char __iomem *)ptr - 8) / 6; if (num_addrs > len) { diff --git a/drivers/net/qlge/qlge_main.c b/drivers/net/qlge/qlge_main.c index 22052925782..7783c5db81d 100644 --- a/drivers/net/qlge/qlge_main.c +++ b/drivers/net/qlge/qlge_main.c @@ -2630,7 +2630,7 @@ static int ql_start_rx_ring(struct ql_adapter *qdev, struct rx_ring *rx_ring) FLAGS_LI; /* Load irq delay values */ if (rx_ring->lbq_len) { cqicb->flags |= FLAGS_LL; /* Load lbq values */ - tmp = (u64)rx_ring->lbq_base_dma;; + tmp = (u64)rx_ring->lbq_base_dma; base_indirect_ptr = (__le64 *) rx_ring->lbq_base_indirect; page_entries = 0; do { @@ -2654,7 +2654,7 @@ static int ql_start_rx_ring(struct ql_adapter *qdev, struct rx_ring *rx_ring) } if (rx_ring->sbq_len) { cqicb->flags |= FLAGS_LS; /* Load sbq values */ - tmp = (u64)rx_ring->sbq_base_dma;; + tmp = (u64)rx_ring->sbq_base_dma; base_indirect_ptr = (__le64 *) rx_ring->sbq_base_indirect; page_entries = 0; do { diff --git a/drivers/net/skfp/pcmplc.c b/drivers/net/skfp/pcmplc.c index f1df2ec8ad4..e6b33ee05ed 100644 --- a/drivers/net/skfp/pcmplc.c +++ b/drivers/net/skfp/pcmplc.c @@ -960,7 +960,7 @@ static void pcm_fsm(struct s_smc *smc, struct s_phy *phy, int cmd) /*PC88b*/ if (!phy->cf_join) { phy->cf_join = TRUE ; - queue_event(smc,EVENT_CFM,CF_JOIN+np) ; ; + queue_event(smc,EVENT_CFM,CF_JOIN+np) ; } if (cmd == PC_JOIN) GO_STATE(PC8_ACTIVE) ; diff --git a/drivers/net/skfp/pmf.c b/drivers/net/skfp/pmf.c index 79e665e0853..a320fdb3727 100644 --- a/drivers/net/skfp/pmf.c +++ b/drivers/net/skfp/pmf.c @@ -807,9 +807,9 @@ void smt_add_para(struct s_smc *smc, struct s_pcon *pcon, u_short para, mib_p->fddiPORTLerFlag ; sp->p4050_pad = 0 ; sp->p4050_cutoff = - mib_p->fddiPORTLer_Cutoff ; ; + mib_p->fddiPORTLer_Cutoff ; sp->p4050_alarm = - mib_p->fddiPORTLer_Alarm ; ; + mib_p->fddiPORTLer_Alarm ; sp->p4050_estimate = mib_p->fddiPORTLer_Estimate ; sp->p4050_reject_ct = @@ -829,7 +829,7 @@ void smt_add_para(struct s_smc *smc, struct s_pcon *pcon, u_short para, sp->p4051_porttype = mib_p->fddiPORTMy_Type ; sp->p4051_connectstate = - mib_p->fddiPORTConnectState ; ; + mib_p->fddiPORTConnectState ; sp->p4051_pc_neighbor = mib_p->fddiPORTNeighborType ; sp->p4051_pc_withhold = @@ -853,7 +853,7 @@ void smt_add_para(struct s_smc *smc, struct s_pcon *pcon, u_short para, struct smt_p_4053 *sp ; sp = (struct smt_p_4053 *) to ; sp->p4053_multiple = - mib_p->fddiPORTMultiple_P ; ; + mib_p->fddiPORTMultiple_P ; sp->p4053_availablepaths = mib_p->fddiPORTAvailablePaths ; sp->p4053_currentpath = diff --git a/drivers/net/skge.c b/drivers/net/skge.c index 62e852e21ab..55bad408196 100644 --- a/drivers/net/skge.c +++ b/drivers/net/skge.c @@ -215,7 +215,7 @@ static void skge_wol_init(struct skge_port *skge) if (skge->wol & WAKE_MAGIC) ctrl |= WOL_CTL_ENA_PME_ON_MAGIC_PKT|WOL_CTL_ENA_MAGIC_PKT_UNIT; else - ctrl |= WOL_CTL_DIS_PME_ON_MAGIC_PKT|WOL_CTL_DIS_MAGIC_PKT_UNIT;; + ctrl |= WOL_CTL_DIS_PME_ON_MAGIC_PKT|WOL_CTL_DIS_MAGIC_PKT_UNIT; ctrl |= WOL_CTL_DIS_PME_ON_PATTERN|WOL_CTL_DIS_PATTERN_UNIT; skge_write16(hw, WOL_REGS(port, WOL_CTRL_STAT), ctrl); diff --git a/drivers/net/sky2.c b/drivers/net/sky2.c index 4bb52e9cd37..15140f9f2e9 100644 --- a/drivers/net/sky2.c +++ b/drivers/net/sky2.c @@ -765,7 +765,7 @@ static void sky2_wol_init(struct sky2_port *sky2) if (sky2->wol & WAKE_MAGIC) ctrl |= WOL_CTL_ENA_PME_ON_MAGIC_PKT|WOL_CTL_ENA_MAGIC_PKT_UNIT; else - ctrl |= WOL_CTL_DIS_PME_ON_MAGIC_PKT|WOL_CTL_DIS_MAGIC_PKT_UNIT;; + ctrl |= WOL_CTL_DIS_PME_ON_MAGIC_PKT|WOL_CTL_DIS_MAGIC_PKT_UNIT; ctrl |= WOL_CTL_DIS_PME_ON_PATTERN|WOL_CTL_DIS_PATTERN_UNIT; sky2_write16(hw, WOL_REGS(port, WOL_CTRL_STAT), ctrl); diff --git a/drivers/net/vxge/vxge-config.h b/drivers/net/vxge/vxge-config.h index 62779a520ca..3e94f0ce090 100644 --- a/drivers/net/vxge/vxge-config.h +++ b/drivers/net/vxge/vxge-config.h @@ -1541,7 +1541,7 @@ void vxge_hw_ring_rxd_1b_info_get( rxd_info->l4_cksum_valid = (u32)VXGE_HW_RING_RXD_L4_CKSUM_CORRECT_GET(rxdp->control_0); rxd_info->l4_cksum = - (u32)VXGE_HW_RING_RXD_L4_CKSUM_GET(rxdp->control_0);; + (u32)VXGE_HW_RING_RXD_L4_CKSUM_GET(rxdp->control_0); rxd_info->frame = (u32)VXGE_HW_RING_RXD_ETHER_ENCAP_GET(rxdp->control_0); rxd_info->proto = diff --git a/drivers/net/vxge/vxge-main.c b/drivers/net/vxge/vxge-main.c index b378037a29b..068d7a9d3e3 100644 --- a/drivers/net/vxge/vxge-main.c +++ b/drivers/net/vxge/vxge-main.c @@ -2350,7 +2350,7 @@ static int vxge_enable_msix(struct vxgedev *vdev) enum vxge_hw_status status; /* 0 - Tx, 1 - Rx */ int tim_msix_id[4]; - int alarm_msix_id = 0, msix_intr_vect = 0;; + int alarm_msix_id = 0, msix_intr_vect = 0; vdev->intr_cnt = 0; /* allocate msix vectors */ diff --git a/drivers/rtc/rtc-omap.c b/drivers/rtc/rtc-omap.c index bd1ce8e2bc1..0587d53987f 100644 --- a/drivers/rtc/rtc-omap.c +++ b/drivers/rtc/rtc-omap.c @@ -430,7 +430,7 @@ fail: static int __exit omap_rtc_remove(struct platform_device *pdev) { - struct rtc_device *rtc = platform_get_drvdata(pdev);; + struct rtc_device *rtc = platform_get_drvdata(pdev); device_init_wakeup(&pdev->dev, 0); diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c index a1ce573648a..bd9fe2e36dc 100644 --- a/drivers/s390/block/dasd_eckd.c +++ b/drivers/s390/block/dasd_eckd.c @@ -706,7 +706,7 @@ static int dasd_eckd_generate_uid(struct dasd_device *device, sizeof(uid->serial) - 1); EBCASC(uid->serial, sizeof(uid->serial) - 1); uid->ssid = private->gneq->subsystemID; - uid->real_unit_addr = private->ned->unit_addr;; + uid->real_unit_addr = private->ned->unit_addr; if (private->sneq) { uid->type = private->sneq->sua_flags; if (uid->type == UA_BASE_PAV_ALIAS) diff --git a/drivers/s390/net/netiucv.c b/drivers/s390/net/netiucv.c index a4b2c576144..c84eadd3602 100644 --- a/drivers/s390/net/netiucv.c +++ b/drivers/s390/net/netiucv.c @@ -2113,7 +2113,7 @@ static ssize_t remove_write (struct device_driver *drv, IUCV_DBF_TEXT(trace, 3, __func__); if (count >= IFNAMSIZ) - count = IFNAMSIZ - 1;; + count = IFNAMSIZ - 1; for (i = 0, p = buf; i < count && *p; i++, p++) { if (*p == '\n' || *p == ' ') diff --git a/drivers/s390/scsi/zfcp_scsi.c b/drivers/s390/scsi/zfcp_scsi.c index 3ff726afafc..0e1a34627a2 100644 --- a/drivers/s390/scsi/zfcp_scsi.c +++ b/drivers/s390/scsi/zfcp_scsi.c @@ -102,7 +102,7 @@ static int zfcp_scsi_queuecommand(struct scsi_cmnd *scpnt, if (unlikely((status & ZFCP_STATUS_COMMON_ERP_FAILED) || !(status & ZFCP_STATUS_COMMON_RUNNING))) { zfcp_scsi_command_fail(scpnt, DID_ERROR); - return 0;; + return 0; } ret = zfcp_fsf_send_fcp_command_task(unit, scpnt); diff --git a/drivers/scsi/bnx2i/bnx2i_hwi.c b/drivers/scsi/bnx2i/bnx2i_hwi.c index 906cef5cda8..41e1b0e7e2e 100644 --- a/drivers/scsi/bnx2i/bnx2i_hwi.c +++ b/drivers/scsi/bnx2i/bnx2i_hwi.c @@ -1340,7 +1340,7 @@ static int bnx2i_process_login_resp(struct iscsi_session *session, resp_hdr->opcode = login->op_code; resp_hdr->flags = login->response_flags; resp_hdr->max_version = login->version_max; - resp_hdr->active_version = login->version_active;; + resp_hdr->active_version = login->version_active; resp_hdr->hlength = 0; hton24(resp_hdr->dlength, login->data_length); diff --git a/drivers/scsi/lpfc/lpfc_ct.c b/drivers/scsi/lpfc/lpfc_ct.c index 9df7ed38e1b..9a1bd9534d7 100644 --- a/drivers/scsi/lpfc/lpfc_ct.c +++ b/drivers/scsi/lpfc/lpfc_ct.c @@ -1207,7 +1207,7 @@ lpfc_ns_cmd(struct lpfc_vport *vport, int cmdcode, vport->ct_flags &= ~FC_CT_RFF_ID; CtReq->CommandResponse.bits.CmdRsp = be16_to_cpu(SLI_CTNS_RFF_ID); - CtReq->un.rff.PortId = cpu_to_be32(vport->fc_myDID);; + CtReq->un.rff.PortId = cpu_to_be32(vport->fc_myDID); CtReq->un.rff.fbits = FC4_FEATURE_INIT; CtReq->un.rff.type_code = FC_FCP_DATA; cmpl = lpfc_cmpl_ct_cmd_rff_id; diff --git a/drivers/spi/omap_uwire.c b/drivers/spi/omap_uwire.c index 8980a5640bd..e75ba9b2889 100644 --- a/drivers/spi/omap_uwire.c +++ b/drivers/spi/omap_uwire.c @@ -213,7 +213,7 @@ static int uwire_txrx(struct spi_device *spi, struct spi_transfer *t) unsigned bits = ust->bits_per_word; unsigned bytes; u16 val, w; - int status = 0;; + int status = 0; if (!t->tx_buf && !t->rx_buf) return 0; diff --git a/drivers/spi/spi_s3c24xx.c b/drivers/spi/spi_s3c24xx.c index 3f3119d760d..6ba8aece90b 100644 --- a/drivers/spi/spi_s3c24xx.c +++ b/drivers/spi/spi_s3c24xx.c @@ -388,7 +388,7 @@ static int __init s3c24xx_spi_probe(struct platform_device *pdev) err_no_iores: err_no_pdata: - spi_master_put(hw->master);; + spi_master_put(hw->master); err_nomem: return err; diff --git a/drivers/usb/class/cdc-wdm.c b/drivers/usb/class/cdc-wdm.c index ba589d4ca8b..8c64c018b67 100644 --- a/drivers/usb/class/cdc-wdm.c +++ b/drivers/usb/class/cdc-wdm.c @@ -506,8 +506,6 @@ static int wdm_open(struct inode *inode, struct file *file) desc = usb_get_intfdata(intf); if (test_bit(WDM_DISCONNECTING, &desc->flags)) goto out; - - ; file->private_data = desc; rv = usb_autopm_get_interface(desc->intf); diff --git a/drivers/usb/serial/spcp8x5.c b/drivers/usb/serial/spcp8x5.c index 61e7c40b94f..1e58220403d 100644 --- a/drivers/usb/serial/spcp8x5.c +++ b/drivers/usb/serial/spcp8x5.c @@ -544,7 +544,7 @@ static void spcp8x5_set_termios(struct tty_struct *tty, } /* Set Baud Rate */ - baud = tty_get_baud_rate(tty);; + baud = tty_get_baud_rate(tty); switch (baud) { case 300: buf[0] = 0x00; break; case 600: buf[0] = 0x01; break; diff --git a/drivers/uwb/i1480/i1480u-wlp/netdev.c b/drivers/uwb/i1480/i1480u-wlp/netdev.c index 73055530e60..b236e696994 100644 --- a/drivers/uwb/i1480/i1480u-wlp/netdev.c +++ b/drivers/uwb/i1480/i1480u-wlp/netdev.c @@ -214,7 +214,7 @@ int i1480u_open(struct net_device *net_dev) netif_wake_queue(net_dev); #ifdef i1480u_FLOW_CONTROL - result = usb_submit_urb(i1480u->notif_urb, GFP_KERNEL);; + result = usb_submit_urb(i1480u->notif_urb, GFP_KERNEL); if (result < 0) { dev_err(dev, "Can't submit notification URB: %d\n", result); goto error_notif_urb_submit; diff --git a/drivers/video/cfbcopyarea.c b/drivers/video/cfbcopyarea.c index df03f3776dc..79e5f40e648 100644 --- a/drivers/video/cfbcopyarea.c +++ b/drivers/video/cfbcopyarea.c @@ -114,7 +114,7 @@ bitcpy(struct fb_info *p, unsigned long __iomem *dst, int dst_idx, d0 >>= right; } else if (src_idx+n <= bits) { // Single source word - d0 <<= left;; + d0 <<= left; } else { // 2 source words d1 = FB_READL(src + 1); diff --git a/drivers/video/imxfb.c b/drivers/video/imxfb.c index 30ae3022f63..66358fa825f 100644 --- a/drivers/video/imxfb.c +++ b/drivers/video/imxfb.c @@ -710,7 +710,7 @@ static int __init imxfb_probe(struct platform_device *pdev) fbi->clk = clk_get(&pdev->dev, NULL); if (IS_ERR(fbi->clk)) { - ret = PTR_ERR(fbi->clk);; + ret = PTR_ERR(fbi->clk); dev_err(&pdev->dev, "unable to get clock: %d\n", ret); goto failed_getclock; } diff --git a/drivers/video/s3c2410fb.c b/drivers/video/s3c2410fb.c index 7da0027e240..5ffca2adc6a 100644 --- a/drivers/video/s3c2410fb.c +++ b/drivers/video/s3c2410fb.c @@ -1119,7 +1119,7 @@ int __init s3c2410fb_init(void) int ret = platform_driver_register(&s3c2410fb_driver); if (ret == 0) - ret = platform_driver_register(&s3c2412fb_driver);; + ret = platform_driver_register(&s3c2412fb_driver); return ret; } diff --git a/drivers/xen/balloon.c b/drivers/xen/balloon.c index f5bbd9e8341..4d1b322d2b4 100644 --- a/drivers/xen/balloon.c +++ b/drivers/xen/balloon.c @@ -214,7 +214,7 @@ static int increase_reservation(unsigned long nr_pages) page = balloon_first_page(); for (i = 0; i < nr_pages; i++) { BUG_ON(page == NULL); - frame_list[i] = page_to_pfn(page);; + frame_list[i] = page_to_pfn(page); page = balloon_next_page(page); } diff --git a/fs/autofs/dirhash.c b/fs/autofs/dirhash.c index 2316e944a10..e947915109e 100644 --- a/fs/autofs/dirhash.c +++ b/fs/autofs/dirhash.c @@ -90,7 +90,7 @@ struct autofs_dir_ent *autofs_expire(struct super_block *sb, DPRINTK(("autofs: not expirable (not a mounted directory): %s\n", ent->name)); continue; } - while (d_mountpoint(path.dentry) && follow_down(&path)); + while (d_mountpoint(path.dentry) && follow_down(&path)) ; umount_ok = may_umount(path.mnt); path_put(&path); diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c index d91b0de7c50..30c0d45c1b5 100644 --- a/fs/btrfs/tree-log.c +++ b/fs/btrfs/tree-log.c @@ -2605,7 +2605,7 @@ static noinline int copy_items(struct btrfs_trans_handle *trans, extent); cs = btrfs_file_extent_offset(src, extent); cl = btrfs_file_extent_num_bytes(src, - extent);; + extent); if (btrfs_file_extent_compression(src, extent)) { cs = 0; diff --git a/fs/cifs/cifs_dfs_ref.c b/fs/cifs/cifs_dfs_ref.c index 606912d8f2a..5e8bc99221c 100644 --- a/fs/cifs/cifs_dfs_ref.c +++ b/fs/cifs/cifs_dfs_ref.c @@ -142,7 +142,7 @@ char *cifs_compose_mount_options(const char *sb_mountdata, rc = dns_resolve_server_name_to_ip(*devname, &srvIP); if (rc != 0) { cERROR(1, ("%s: Failed to resolve server part of %s to IP: %d", - __func__, *devname, rc));; + __func__, *devname, rc)); goto compose_mount_options_err; } /* md_len = strlen(...) + 12 for 'sep+prefixpath=' diff --git a/fs/nfs/callback_xdr.c b/fs/nfs/callback_xdr.c index e5a2dac5f71..76b0aa0f73b 100644 --- a/fs/nfs/callback_xdr.c +++ b/fs/nfs/callback_xdr.c @@ -222,7 +222,7 @@ static unsigned decode_sessionid(struct xdr_stream *xdr, p = read_buf(xdr, len); if (unlikely(p == NULL)) - return htonl(NFS4ERR_RESOURCE);; + return htonl(NFS4ERR_RESOURCE); memcpy(sid->data, p, len); return 0; diff --git a/fs/ocfs2/quota_global.c b/fs/ocfs2/quota_global.c index 44f2a5e1d04..0578cc14b7a 100644 --- a/fs/ocfs2/quota_global.c +++ b/fs/ocfs2/quota_global.c @@ -154,7 +154,7 @@ static int ocfs2_get_quota_block(struct inode *inode, int block, err = -EIO; mlog_errno(err); } - return err;; + return err; } /* Read data from global quotafile - avoid pagecache and such because we cannot diff --git a/include/scsi/fc/fc_fc2.h b/include/scsi/fc/fc_fc2.h index cff8a8c22f5..f87777d0d5b 100644 --- a/include/scsi/fc/fc_fc2.h +++ b/include/scsi/fc/fc_fc2.h @@ -92,8 +92,7 @@ struct fc_esb { __u8 _esb_resvd[4]; __u8 esb_service_params[112]; /* TBD */ __u8 esb_seq_status[8]; /* sequence statuses, 8 bytes each */ -} __attribute__((packed));; - +} __attribute__((packed)); /* * Define expected size for ASSERTs. diff --git a/kernel/trace/trace_hw_branches.c b/kernel/trace/trace_hw_branches.c index ca7d7c4d0c2..23b63859130 100644 --- a/kernel/trace/trace_hw_branches.c +++ b/kernel/trace/trace_hw_branches.c @@ -155,7 +155,7 @@ static enum print_line_t bts_trace_print_line(struct trace_iterator *iter) seq_print_ip_sym(seq, it->from, symflags) && trace_seq_printf(seq, "\n")) return TRACE_TYPE_HANDLED; - return TRACE_TYPE_PARTIAL_LINE;; + return TRACE_TYPE_PARTIAL_LINE; } return TRACE_TYPE_UNHANDLED; } diff --git a/net/wireless/wext-compat.c b/net/wireless/wext-compat.c index 429dd06a4ec..561a45cf2a6 100644 --- a/net/wireless/wext-compat.c +++ b/net/wireless/wext-compat.c @@ -834,7 +834,7 @@ int cfg80211_wext_siwtxpower(struct net_device *dev, return 0; } - return rdev->ops->set_tx_power(wdev->wiphy, type, dbm);; + return rdev->ops->set_tx_power(wdev->wiphy, type, dbm); } EXPORT_SYMBOL_GPL(cfg80211_wext_siwtxpower); diff --git a/sound/oss/sys_timer.c b/sound/oss/sys_timer.c index 107534477a2..8db6aefe15e 100644 --- a/sound/oss/sys_timer.c +++ b/sound/oss/sys_timer.c @@ -100,9 +100,6 @@ def_tmr_open(int dev, int mode) curr_tempo = 60; curr_timebase = 100; opened = 1; - - ; - { def_tmr.expires = (1) + jiffies; add_timer(&def_tmr); diff --git a/sound/soc/codecs/wm9081.c b/sound/soc/codecs/wm9081.c index c64e55aa63b..686e5aa9720 100644 --- a/sound/soc/codecs/wm9081.c +++ b/sound/soc/codecs/wm9081.c @@ -1027,7 +1027,7 @@ static int wm9081_hw_params(struct snd_pcm_substream *substream, - wm9081->fs); for (i = 1; i < ARRAY_SIZE(clk_sys_rates); i++) { cur_val = abs((wm9081->sysclk_rate / - clk_sys_rates[i].ratio) - wm9081->fs);; + clk_sys_rates[i].ratio) - wm9081->fs); if (cur_val < best_val) { best = i; best_val = cur_val; diff --git a/sound/soc/pxa/pxa-ssp.c b/sound/soc/pxa/pxa-ssp.c index 5b9ed646478..d11a6d7e384 100644 --- a/sound/soc/pxa/pxa-ssp.c +++ b/sound/soc/pxa/pxa-ssp.c @@ -351,7 +351,7 @@ static int pxa_ssp_set_dai_pll(struct snd_soc_dai *cpu_dai, do_div(tmp, freq_out); val = tmp; - val = (val << 16) | 64;; + val = (val << 16) | 64; ssp_write_reg(ssp, SSACDD, val); ssacd |= (0x6 << 4); diff --git a/sound/soc/s3c24xx/s3c24xx_uda134x.c b/sound/soc/s3c24xx/s3c24xx_uda134x.c index 8e79a416db5..c215d32d632 100644 --- a/sound/soc/s3c24xx/s3c24xx_uda134x.c +++ b/sound/soc/s3c24xx/s3c24xx_uda134x.c @@ -67,7 +67,7 @@ static int s3c24xx_uda134x_startup(struct snd_pcm_substream *substream) { int ret = 0; #ifdef ENFORCE_RATES - struct snd_pcm_runtime *runtime = substream->runtime;; + struct snd_pcm_runtime *runtime = substream->runtime; #endif mutex_lock(&clk_lock); -- cgit v1.2.3-70-g09d2 From 325253a6b2de4bdfa9ef0e28b5df8a4a4fe2b677 Mon Sep 17 00:00:00 2001 From: Matthew Garrett Date: Tue, 14 Jul 2009 17:06:02 +0100 Subject: backlight: Allow drivers to update the core, and generate events on changes Certain hardware will send us events when the backlight brightness changes. Add a function to update the value in the core, and additionally send a uevent so that userspace can pop up appropriate UI. The uevents are flagged depending on whether the update originated in the kernel or from userspace, making it easier to only display UI at the appropriate time. Signed-off-by: Matthew Garrett Signed-off-by: Richard Purdie --- drivers/video/backlight/backlight.c | 41 +++++++++++++++++++++++++++++++++++++ include/linux/backlight.h | 7 +++++++ 2 files changed, 48 insertions(+) (limited to 'include') diff --git a/drivers/video/backlight/backlight.c b/drivers/video/backlight/backlight.c index 157057c79ca..6e1446ae7f5 100644 --- a/drivers/video/backlight/backlight.c +++ b/drivers/video/backlight/backlight.c @@ -73,6 +73,26 @@ static inline void backlight_unregister_fb(struct backlight_device *bd) } #endif /* CONFIG_FB */ +static void backlight_generate_event(struct backlight_device *bd, + enum backlight_update_reason reason) +{ + char *envp[2]; + + switch (reason) { + case BACKLIGHT_UPDATE_SYSFS: + envp[0] = "SOURCE=sysfs"; + break; + case BACKLIGHT_UPDATE_HOTKEY: + envp[0] = "SOURCE=hotkey"; + break; + default: + envp[0] = "SOURCE=unknown"; + break; + } + envp[1] = NULL; + kobject_uevent_env(&bd->dev.kobj, KOBJ_CHANGE, envp); +} + static ssize_t backlight_show_power(struct device *dev, struct device_attribute *attr,char *buf) { @@ -142,6 +162,8 @@ static ssize_t backlight_store_brightness(struct device *dev, } mutex_unlock(&bd->ops_lock); + backlight_generate_event(bd, BACKLIGHT_UPDATE_SYSFS); + return rc; } @@ -213,6 +235,25 @@ static struct device_attribute bl_device_attributes[] = { __ATTR_NULL, }; +/** + * backlight_force_update - tell the backlight subsystem that hardware state + * has changed + * @bd: the backlight device to update + * + * Updates the internal state of the backlight in response to a hardware event, + * and generate a uevent to notify userspace + */ +void backlight_force_update(struct backlight_device *bd, + enum backlight_update_reason reason) +{ + mutex_lock(&bd->ops_lock); + if (bd->ops && bd->ops->get_brightness) + bd->props.brightness = bd->ops->get_brightness(bd); + mutex_unlock(&bd->ops_lock); + backlight_generate_event(bd, reason); +} +EXPORT_SYMBOL(backlight_force_update); + /** * backlight_device_register - create and register a new object of * backlight_device class. diff --git a/include/linux/backlight.h b/include/linux/backlight.h index 79ca2da81c8..0f5f57858a2 100644 --- a/include/linux/backlight.h +++ b/include/linux/backlight.h @@ -27,6 +27,11 @@ * Any other use of the locks below is probably wrong. */ +enum backlight_update_reason { + BACKLIGHT_UPDATE_HOTKEY, + BACKLIGHT_UPDATE_SYSFS, +}; + struct backlight_device; struct fb_info; @@ -100,6 +105,8 @@ static inline void backlight_update_status(struct backlight_device *bd) extern struct backlight_device *backlight_device_register(const char *name, struct device *dev, void *devdata, struct backlight_ops *ops); extern void backlight_device_unregister(struct backlight_device *bd); +extern void backlight_force_update(struct backlight_device *bd, + enum backlight_update_reason reason); #define to_backlight_device(obj) container_of(obj, struct backlight_device, dev) -- cgit v1.2.3-70-g09d2 From 181f7c5dd3832763bdf2756b6d2d8a49bdf12791 Mon Sep 17 00:00:00 2001 From: Johannes Berg Date: Mon, 6 Jul 2009 11:53:03 +0200 Subject: kmemcheck: add missing braces to do-while in kmemcheck_annotate_bitfield Whether or not the sparse warning warning: do-while statement is not a compound statement is justified or not in this case, it is annoying and trivial to fix. [vegard.nossum@gmail.com: title and cleanup] Signed-off-by: Johannes Berg Signed-off-by: Vegard Nossum --- include/linux/kmemcheck.h | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) (limited to 'include') diff --git a/include/linux/kmemcheck.h b/include/linux/kmemcheck.h index 47b39b7c7e8..06c6c5501f1 100644 --- a/include/linux/kmemcheck.h +++ b/include/linux/kmemcheck.h @@ -137,7 +137,10 @@ static inline void kmemcheck_mark_initialized_pages(struct page *p, int name##_end[0]; #define kmemcheck_annotate_bitfield(ptr, name) \ - do if (ptr) { \ + do { \ + if (!ptr) \ + break; \ + \ int _n = (long) &((ptr)->name##_end) \ - (long) &((ptr)->name##_begin); \ BUILD_BUG_ON(_n < 0); \ -- cgit v1.2.3-70-g09d2 From 40f9244f4da8976eeb6d5ed6313c635ba238a9d3 Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Wed, 17 Jun 2009 17:56:39 +0100 Subject: regulator: Allow consumer supplies to be set up with dev_name() Follow the approach suggested by Russell King and implemented by him in the clkdev API and allow consumer device supply mapings to be set up using the dev_name() for the consumer instead of the struct device. In order to avoid making existing machines instabuggy and creating merge issues the use of struct device is still supported for the time being. This resolves problems working with buses such as I2C which make the struct device available late providing that the final device name is known, which is the case for most embedded systems with fixed setups. Consumers must still use the struct device when calling regulator_get(). Signed-off-by: Mark Brown Signed-off-by: Liam Girdwood --- drivers/regulator/core.c | 62 ++++++++++++++++++++++++++++++--------- include/linux/regulator/machine.h | 7 ++++- 2 files changed, 54 insertions(+), 15 deletions(-) (limited to 'include') diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c index 91ba9bfaa70..24e05b7607b 100644 --- a/drivers/regulator/core.c +++ b/drivers/regulator/core.c @@ -37,7 +37,7 @@ static int has_full_constraints; */ struct regulator_map { struct list_head list; - struct device *dev; + const char *dev_name; /* The dev_name() for the consumer */ const char *supply; struct regulator_dev *regulator; }; @@ -857,23 +857,33 @@ out: * set_consumer_device_supply: Bind a regulator to a symbolic supply * @rdev: regulator source * @consumer_dev: device the supply applies to + * @consumer_dev_name: dev_name() string for device supply applies to * @supply: symbolic name for supply * * Allows platform initialisation code to map physical regulator * sources to symbolic names for supplies for use by devices. Devices * should use these symbolic names to request regulators, avoiding the * need to provide board-specific regulator names as platform data. + * + * Only one of consumer_dev and consumer_dev_name may be specified. */ static int set_consumer_device_supply(struct regulator_dev *rdev, - struct device *consumer_dev, const char *supply) + struct device *consumer_dev, const char *consumer_dev_name, + const char *supply) { struct regulator_map *node; + if (consumer_dev && consumer_dev_name) + return -EINVAL; + + if (!consumer_dev_name && consumer_dev) + consumer_dev_name = dev_name(consumer_dev); + if (supply == NULL) return -EINVAL; list_for_each_entry(node, ®ulator_map_list, list) { - if (consumer_dev != node->dev) + if (consumer_dev_name != node->dev_name) continue; if (strcmp(node->supply, supply) != 0) continue; @@ -891,25 +901,38 @@ static int set_consumer_device_supply(struct regulator_dev *rdev, return -ENOMEM; node->regulator = rdev; - node->dev = consumer_dev; + node->dev_name = kstrdup(consumer_dev_name, GFP_KERNEL); node->supply = supply; + if (node->dev_name == NULL) { + kfree(node); + return -ENOMEM; + } + list_add(&node->list, ®ulator_map_list); return 0; } static void unset_consumer_device_supply(struct regulator_dev *rdev, - struct device *consumer_dev) + const char *consumer_dev_name, struct device *consumer_dev) { struct regulator_map *node, *n; + if (consumer_dev && !consumer_dev_name) + consumer_dev_name = dev_name(consumer_dev); + list_for_each_entry_safe(node, n, ®ulator_map_list, list) { - if (rdev == node->regulator && - consumer_dev == node->dev) { - list_del(&node->list); - kfree(node); - return; - } + if (rdev != node->regulator) + continue; + + if (consumer_dev_name && node->dev_name && + strcmp(consumer_dev_name, node->dev_name)) + continue; + + list_del(&node->list); + kfree(node->dev_name); + kfree(node); + return; } } @@ -920,6 +943,7 @@ static void unset_regulator_supplies(struct regulator_dev *rdev) list_for_each_entry_safe(node, n, ®ulator_map_list, list) { if (rdev == node->regulator) { list_del(&node->list); + kfree(node->dev_name); kfree(node); return; } @@ -1019,17 +1043,25 @@ struct regulator *regulator_get(struct device *dev, const char *id) struct regulator_dev *rdev; struct regulator_map *map; struct regulator *regulator = ERR_PTR(-ENODEV); + const char *devname = NULL; if (id == NULL) { printk(KERN_ERR "regulator: get() with no identifier\n"); return regulator; } + if (dev) + devname = dev_name(dev); + mutex_lock(®ulator_list_mutex); list_for_each_entry(map, ®ulator_map_list, list) { - if (dev == map->dev && - strcmp(map->supply, id) == 0) { + /* If the mapping has a device set up it must match */ + if (map->dev_name && + (!devname || strcmp(map->dev_name, devname))) + continue; + + if (strcmp(map->supply, id) == 0) { rdev = map->regulator; goto found; } @@ -2091,11 +2123,13 @@ struct regulator_dev *regulator_register(struct regulator_desc *regulator_desc, for (i = 0; i < init_data->num_consumer_supplies; i++) { ret = set_consumer_device_supply(rdev, init_data->consumer_supplies[i].dev, + init_data->consumer_supplies[i].dev_name, init_data->consumer_supplies[i].supply); if (ret < 0) { for (--i; i >= 0; i--) unset_consumer_device_supply(rdev, - init_data->consumer_supplies[i].dev); + init_data->consumer_supplies[i].dev_name, + init_data->consumer_supplies[i].dev); goto scrub; } } diff --git a/include/linux/regulator/machine.h b/include/linux/regulator/machine.h index bac64fa390f..9328090eca2 100644 --- a/include/linux/regulator/machine.h +++ b/include/linux/regulator/machine.h @@ -126,13 +126,18 @@ struct regulation_constraints { /** * struct regulator_consumer_supply - supply -> device mapping * - * This maps a supply name to a device. + * This maps a supply name to a device. Only one of dev or dev_name + * can be specified. Use of dev_name allows support for buses which + * make struct device available late such as I2C and is the preferred + * form. * * @dev: Device structure for the consumer. + * @dev_name: Result of dev_name() for the consumer. * @supply: Name for the supply. */ struct regulator_consumer_supply { struct device *dev; /* consumer */ + const char *dev_name; /* dev_name() for consumer */ const char *supply; /* consumer supply - e.g. "vcc" */ }; -- cgit v1.2.3-70-g09d2 From 9c19bc0444490e76197f47316c649590dc6f10a4 Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Thu, 9 Jul 2009 15:44:31 +0100 Subject: regulator: Define full constraints function with REGULATOR disabled This allows machine drivers to build without ifdefs if they have full constraints. Suggested by machine drivers contributed by Haojian Zhuang . Signed-off-by: Mark Brown Signed-off-by: Liam Girdwood --- include/linux/regulator/machine.h | 6 ++++++ 1 file changed, 6 insertions(+) (limited to 'include') diff --git a/include/linux/regulator/machine.h b/include/linux/regulator/machine.h index 9328090eca2..73a88f6cbb1 100644 --- a/include/linux/regulator/machine.h +++ b/include/linux/regulator/machine.h @@ -171,6 +171,12 @@ struct regulator_init_data { int regulator_suspend_prepare(suspend_state_t state); +#ifdef CONFIG_REGULATOR void regulator_has_full_constraints(void); +#else +static inline void regulator_has_full_constraints(void) +{ +} +#endif #endif -- cgit v1.2.3-70-g09d2 From 0198d1163b3e0313b3f073b62384abfab1a17cff Mon Sep 17 00:00:00 2001 From: Haojian Zhuang Date: Fri, 26 Jun 2009 19:20:59 +0800 Subject: regulator: add buck3 in da903x driver BUCK3 is the new component in DA9035. So there're three BUCKs in DA9035. And there're two BUCKs in DA9034. Signed-off-by: Haojian Zhuang Acked-by: Mark Brown Acked-by: Eric Miao Signed-off-by: Liam Girdwood --- drivers/regulator/da903x.c | 32 ++++++++++++++++++++++++++++++++ include/linux/mfd/da903x.h | 4 +++- 2 files changed, 35 insertions(+), 1 deletion(-) (limited to 'include') diff --git a/drivers/regulator/da903x.c b/drivers/regulator/da903x.c index b8b89ef10a8..33dfeeb9407 100644 --- a/drivers/regulator/da903x.c +++ b/drivers/regulator/da903x.c @@ -64,6 +64,14 @@ #define DA9034_MDTV2 (0x33) #define DA9034_MVRC (0x34) +/* DA9035 Registers. DA9034 Registers are comptabile to DA9035. */ +#define DA9035_OVER3 (0x12) +#define DA9035_VCC2 (0x1f) +#define DA9035_3DTV1 (0x2c) +#define DA9035_3DTV2 (0x2d) +#define DA9035_3VRC (0x2e) +#define DA9035_AUTOSKIP (0x2f) + struct da903x_regulator_info { struct regulator_desc desc; @@ -388,6 +396,27 @@ static struct regulator_ops da9034_regulator_ldo12_ops = { .enable_bit = (ebit), \ } +#define DA9035_DVC(_id, min, max, step, vreg, nbits, ureg, ubit, ereg, ebit) \ +{ \ + .desc = { \ + .name = #_id, \ + .ops = &da9034_regulator_dvc_ops, \ + .type = REGULATOR_VOLTAGE, \ + .id = DA9035_ID_##_id, \ + .owner = THIS_MODULE, \ + }, \ + .min_uV = (min) * 1000, \ + .max_uV = (max) * 1000, \ + .step_uV = (step) * 1000, \ + .vol_reg = DA9035_##vreg, \ + .vol_shift = (0), \ + .vol_nbits = (nbits), \ + .update_reg = DA9035_##ureg, \ + .update_bit = (ubit), \ + .enable_reg = DA9035_##ereg, \ + .enable_bit = (ebit), \ +} + #define DA9034_LDO(_id, min, max, step, vreg, shift, nbits, ereg, ebit) \ DA903x_LDO(DA9034, _id, min, max, step, vreg, shift, nbits, ereg, ebit) @@ -435,6 +464,9 @@ static struct da903x_regulator_info da903x_regulator_info[] = { DA9034_LDO(14, 1800, 3300, 100, LDO1514, 0, 4, OVER3, 0), DA9034_LDO(15, 1800, 3300, 100, LDO1514, 4, 4, OVER3, 1), DA9034_LDO(5, 3100, 3100, 0, INVAL, 0, 0, OVER3, 7), /* fixed @3.1V */ + + /* DA9035 */ + DA9035_DVC(BUCK3, 1800, 2200, 100, 3DTV1, 3, VCC2, 0, OVER3, 3), }; static inline struct da903x_regulator_info *find_regulator_info(int id) diff --git a/include/linux/mfd/da903x.h b/include/linux/mfd/da903x.h index 115dbe96508..c63b65c9442 100644 --- a/include/linux/mfd/da903x.h +++ b/include/linux/mfd/da903x.h @@ -1,7 +1,7 @@ #ifndef __LINUX_PMIC_DA903X_H #define __LINUX_PMIC_DA903X_H -/* Unified sub device IDs for DA9030/DA9034 */ +/* Unified sub device IDs for DA9030/DA9034/DA9035 */ enum { DA9030_ID_LED_1, DA9030_ID_LED_2, @@ -57,6 +57,8 @@ enum { DA9034_ID_LDO13, DA9034_ID_LDO14, DA9034_ID_LDO15, + + DA9035_ID_BUCK3, }; /* -- cgit v1.2.3-70-g09d2 From ed6543243a1c557dbe2005a86f6d8e851c1ebb79 Mon Sep 17 00:00:00 2001 From: roald Date: Mon, 13 Jul 2009 17:25:21 +0800 Subject: regulator: add initialization macro of regulator supply Signed-off-by: Haojian Zhuang Acked-by: Mark Brown Signed-off-by: Liam Girdwood --- include/linux/regulator/machine.h | 7 +++++++ 1 file changed, 7 insertions(+) (limited to 'include') diff --git a/include/linux/regulator/machine.h b/include/linux/regulator/machine.h index 73a88f6cbb1..99a4e2eb36a 100644 --- a/include/linux/regulator/machine.h +++ b/include/linux/regulator/machine.h @@ -141,6 +141,13 @@ struct regulator_consumer_supply { const char *supply; /* consumer supply - e.g. "vcc" */ }; +/* Initialize struct regulator_consumer_supply */ +#define REGULATOR_SUPPLY(_name, _dev_name) \ +{ \ + .supply = _name, \ + .dev_name = _dev_name, \ +} + /** * struct regulator_init_data - regulator platform initialisation data. * -- cgit v1.2.3-70-g09d2 From 5ffbd136e6c51c8d1eec7a4a0c5d2180c81aea30 Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Tue, 21 Jul 2009 16:00:23 +0100 Subject: regulator: Add regulator_get_exclusive() API Some consumers require complete control of the regulator and can't tolerate sharing it with other consumers, most commonly because they need to have the regulator actually disabled so can't have other consumers forcing it on. This new regulator_get_exclusive() API call allows these consumers to explicitly request this, documenting the assumptions that they are making. In order to simplify coding of such consumers the use count for regulators they request is forced to match the enabled state of the regulator when it is requested. This is not possible for consumers which can share regulators due to the need to keep track of the ownership of use counts. A new API call is used rather than an additional argument to the existing regulator_get() in order to avoid merge headaches with driver code in other trees. Signed-off-by: Mark Brown Signed-off-by: Liam Girdwood --- drivers/regulator/core.c | 88 ++++++++++++++++++++++++++++++++------ include/linux/regulator/consumer.h | 2 + include/linux/regulator/driver.h | 2 + 3 files changed, 78 insertions(+), 14 deletions(-) (limited to 'include') diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c index 24e05b7607b..68549008582 100644 --- a/drivers/regulator/core.c +++ b/drivers/regulator/core.c @@ -1025,25 +1025,15 @@ overflow_err: return NULL; } -/** - * regulator_get - lookup and obtain a reference to a regulator. - * @dev: device for regulator "consumer" - * @id: Supply name or regulator ID. - * - * Returns a struct regulator corresponding to the regulator producer, - * or IS_ERR() condition containing errno. - * - * Use of supply names configured via regulator_set_device_supply() is - * strongly encouraged. It is recommended that the supply name used - * should match the name used for the supply and/or the relevant - * device pins in the datasheet. - */ -struct regulator *regulator_get(struct device *dev, const char *id) +/* Internal regulator request function */ +static struct regulator *_regulator_get(struct device *dev, const char *id, + int exclusive) { struct regulator_dev *rdev; struct regulator_map *map; struct regulator *regulator = ERR_PTR(-ENODEV); const char *devname = NULL; + int ret; if (id == NULL) { printk(KERN_ERR "regulator: get() with no identifier\n"); @@ -1070,6 +1060,16 @@ struct regulator *regulator_get(struct device *dev, const char *id) return regulator; found: + if (rdev->exclusive) { + regulator = ERR_PTR(-EPERM); + goto out; + } + + if (exclusive && rdev->open_count) { + regulator = ERR_PTR(-EBUSY); + goto out; + } + if (!try_module_get(rdev->owner)) goto out; @@ -1079,12 +1079,69 @@ found: module_put(rdev->owner); } + rdev->open_count++; + if (exclusive) { + rdev->exclusive = 1; + + ret = _regulator_is_enabled(rdev); + if (ret > 0) + rdev->use_count = 1; + else + rdev->use_count = 0; + } + out: mutex_unlock(®ulator_list_mutex); + return regulator; } + +/** + * regulator_get - lookup and obtain a reference to a regulator. + * @dev: device for regulator "consumer" + * @id: Supply name or regulator ID. + * + * Returns a struct regulator corresponding to the regulator producer, + * or IS_ERR() condition containing errno. + * + * Use of supply names configured via regulator_set_device_supply() is + * strongly encouraged. It is recommended that the supply name used + * should match the name used for the supply and/or the relevant + * device pins in the datasheet. + */ +struct regulator *regulator_get(struct device *dev, const char *id) +{ + return _regulator_get(dev, id, 0); +} EXPORT_SYMBOL_GPL(regulator_get); +/** + * regulator_get_exclusive - obtain exclusive access to a regulator. + * @dev: device for regulator "consumer" + * @id: Supply name or regulator ID. + * + * Returns a struct regulator corresponding to the regulator producer, + * or IS_ERR() condition containing errno. Other consumers will be + * unable to obtain this reference is held and the use count for the + * regulator will be initialised to reflect the current state of the + * regulator. + * + * This is intended for use by consumers which cannot tolerate shared + * use of the regulator such as those which need to force the + * regulator off for correct operation of the hardware they are + * controlling. + * + * Use of supply names configured via regulator_set_device_supply() is + * strongly encouraged. It is recommended that the supply name used + * should match the name used for the supply and/or the relevant + * device pins in the datasheet. + */ +struct regulator *regulator_get_exclusive(struct device *dev, const char *id) +{ + return _regulator_get(dev, id, 1); +} +EXPORT_SYMBOL_GPL(regulator_get_exclusive); + /** * regulator_put - "free" the regulator source * @regulator: regulator source @@ -1113,6 +1170,9 @@ void regulator_put(struct regulator *regulator) list_del(®ulator->list); kfree(regulator); + rdev->open_count--; + rdev->exclusive = 0; + module_put(rdev->owner); mutex_unlock(®ulator_list_mutex); } diff --git a/include/linux/regulator/consumer.h b/include/linux/regulator/consumer.h index 277f4b964df..976b57b6912 100644 --- a/include/linux/regulator/consumer.h +++ b/include/linux/regulator/consumer.h @@ -125,6 +125,8 @@ struct regulator_bulk_data { /* regulator get and put */ struct regulator *__must_check regulator_get(struct device *dev, const char *id); +struct regulator *__must_check regulator_get_exclusive(struct device *dev, + const char *id); void regulator_put(struct regulator *regulator); /* regulator output control and status */ diff --git a/include/linux/regulator/driver.h b/include/linux/regulator/driver.h index ce1be708ca1..73c9cd6cda7 100644 --- a/include/linux/regulator/driver.h +++ b/include/linux/regulator/driver.h @@ -162,6 +162,8 @@ struct regulator_desc { struct regulator_dev { struct regulator_desc *desc; int use_count; + int open_count; + int exclusive; /* lists we belong to */ struct list_head list; /* list of all regulators */ -- cgit v1.2.3-70-g09d2 From a7a1ad9066e0266c8a4357ba3dbaeebfb80f531d Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Tue, 21 Jul 2009 16:00:24 +0100 Subject: regulator: Add regulator voltage range check API Simplify checking of support for voltage ranges by providing an API which wraps the existing count and list operations. Signed-off-by: Mark Brown Signed-off-by: Liam Girdwood --- drivers/regulator/core.c | 29 +++++++++++++++++++++++++++++ include/linux/regulator/consumer.h | 2 ++ 2 files changed, 31 insertions(+) (limited to 'include') diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c index 68549008582..e11c2222d9a 100644 --- a/drivers/regulator/core.c +++ b/drivers/regulator/core.c @@ -1441,6 +1441,35 @@ int regulator_list_voltage(struct regulator *regulator, unsigned selector) } EXPORT_SYMBOL_GPL(regulator_list_voltage); +/** + * regulator_is_supported_voltage - check if a voltage range can be supported + * + * @regulator: Regulator to check. + * @min_uV: Minimum required voltage in uV. + * @max_uV: Maximum required voltage in uV. + * + * Returns a boolean or a negative error code. + */ +int regulator_is_supported_voltage(struct regulator *regulator, + int min_uV, int max_uV) +{ + int i, voltages, ret; + + ret = regulator_count_voltages(regulator); + if (ret < 0) + return ret; + voltages = ret; + + for (i = 0; i < voltages; i++) { + ret = regulator_list_voltage(regulator, i); + + if (ret >= min_uV && ret <= max_uV) + return 1; + } + + return 0; +} + /** * regulator_set_voltage - set regulator output voltage * @regulator: regulator source diff --git a/include/linux/regulator/consumer.h b/include/linux/regulator/consumer.h index 976b57b6912..490c5b37b6d 100644 --- a/include/linux/regulator/consumer.h +++ b/include/linux/regulator/consumer.h @@ -146,6 +146,8 @@ void regulator_bulk_free(int num_consumers, int regulator_count_voltages(struct regulator *regulator); int regulator_list_voltage(struct regulator *regulator, unsigned selector); +int regulator_is_supported_voltage(struct regulator *regulator, + int min_uV, int max_uV); int regulator_set_voltage(struct regulator *regulator, int min_uV, int max_uV); int regulator_get_voltage(struct regulator *regulator); int regulator_set_current_limit(struct regulator *regulator, -- cgit v1.2.3-70-g09d2 From 86d9884b6a3646bc24e57430f1f694c5171c1bf6 Mon Sep 17 00:00:00 2001 From: Roger Quadros Date: Thu, 6 Aug 2009 19:37:29 +0300 Subject: regulator: Add GPIO enable control to fixed voltage regulator driver Now fixed regulators that have their enable pin connected to a GPIO line can use the fixed regulator driver for regulator enable/disable control. The GPIO number and polarity information is passed through platform data. GPIO enable control is achieved using gpiolib. Signed-off-by: Roger Quadros Reviewed-by: Philipp Zabel Reviewed-by: Felipe Balbi Acked-by: Mark Brown Signed-off-by: Liam Girdwood --- drivers/regulator/fixed.c | 88 ++++++++++++++++++++++++++++++++++++++++- include/linux/regulator/fixed.h | 24 +++++++++++ 2 files changed, 110 insertions(+), 2 deletions(-) (limited to 'include') diff --git a/drivers/regulator/fixed.c b/drivers/regulator/fixed.c index 9c7f956d57c..f8b295700d7 100644 --- a/drivers/regulator/fixed.c +++ b/drivers/regulator/fixed.c @@ -5,6 +5,9 @@ * * Author: Mark Brown * + * Copyright (c) 2009 Nokia Corporation + * Roger Quadros + * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation; either version 2 of the @@ -20,20 +23,45 @@ #include #include #include +#include struct fixed_voltage_data { struct regulator_desc desc; struct regulator_dev *dev; int microvolts; + int gpio; + unsigned enable_high:1; + unsigned is_enabled:1; }; static int fixed_voltage_is_enabled(struct regulator_dev *dev) { - return 1; + struct fixed_voltage_data *data = rdev_get_drvdata(dev); + + return data->is_enabled; } static int fixed_voltage_enable(struct regulator_dev *dev) { + struct fixed_voltage_data *data = rdev_get_drvdata(dev); + + if (gpio_is_valid(data->gpio)) { + gpio_set_value_cansleep(data->gpio, data->enable_high); + data->is_enabled = 1; + } + + return 0; +} + +static int fixed_voltage_disable(struct regulator_dev *dev) +{ + struct fixed_voltage_data *data = rdev_get_drvdata(dev); + + if (gpio_is_valid(data->gpio)) { + gpio_set_value_cansleep(data->gpio, !data->enable_high); + data->is_enabled = 0; + } + return 0; } @@ -58,6 +86,7 @@ static int fixed_voltage_list_voltage(struct regulator_dev *dev, static struct regulator_ops fixed_voltage_ops = { .is_enabled = fixed_voltage_is_enabled, .enable = fixed_voltage_enable, + .disable = fixed_voltage_disable, .get_voltage = fixed_voltage_get_voltage, .list_voltage = fixed_voltage_list_voltage, }; @@ -87,13 +116,62 @@ static int regulator_fixed_voltage_probe(struct platform_device *pdev) drvdata->desc.n_voltages = 1; drvdata->microvolts = config->microvolts; + drvdata->gpio = config->gpio; + + if (gpio_is_valid(config->gpio)) { + drvdata->enable_high = config->enable_high; + + /* FIXME: Remove below print warning + * + * config->gpio must be set to -EINVAL by platform code if + * GPIO control is not required. However, early adopters + * not requiring GPIO control may forget to initialize + * config->gpio to -EINVAL. This will cause GPIO 0 to be used + * for GPIO control. + * + * This warning will be removed once there are a couple of users + * for this driver. + */ + if (!config->gpio) + dev_warn(&pdev->dev, + "using GPIO 0 for regulator enable control\n"); + + ret = gpio_request(config->gpio, config->supply_name); + if (ret) { + dev_err(&pdev->dev, + "Could not obtain regulator enable GPIO %d: %d\n", + config->gpio, ret); + goto err_name; + } + + /* set output direction without changing state + * to prevent glitch + */ + drvdata->is_enabled = config->enabled_at_boot; + ret = drvdata->is_enabled ? + config->enable_high : !config->enable_high; + + ret = gpio_direction_output(config->gpio, ret); + if (ret) { + dev_err(&pdev->dev, + "Could not configure regulator enable GPIO %d direction: %d\n", + config->gpio, ret); + goto err_gpio; + } + + } else { + /* Regulator without GPIO control is considered + * always enabled + */ + drvdata->is_enabled = 1; + } drvdata->dev = regulator_register(&drvdata->desc, &pdev->dev, config->init_data, drvdata); if (IS_ERR(drvdata->dev)) { ret = PTR_ERR(drvdata->dev); dev_err(&pdev->dev, "Failed to register regulator: %d\n", ret); - goto err_name; + goto err_gpio; } platform_set_drvdata(pdev, drvdata); @@ -103,6 +181,9 @@ static int regulator_fixed_voltage_probe(struct platform_device *pdev) return 0; +err_gpio: + if (gpio_is_valid(config->gpio)) + gpio_free(config->gpio); err_name: kfree(drvdata->desc.name); err: @@ -118,6 +199,9 @@ static int regulator_fixed_voltage_remove(struct platform_device *pdev) kfree(drvdata->desc.name); kfree(drvdata); + if (gpio_is_valid(drvdata->gpio)) + gpio_free(drvdata->gpio); + return 0; } diff --git a/include/linux/regulator/fixed.h b/include/linux/regulator/fixed.h index 91b4da31f1b..e94a4a1c7c8 100644 --- a/include/linux/regulator/fixed.h +++ b/include/linux/regulator/fixed.h @@ -5,6 +5,9 @@ * * Author: Mark Brown * + * Copyright (c) 2009 Nokia Corporation + * Roger Quadros + * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation; either version 2 of the @@ -16,9 +19,30 @@ struct regulator_init_data; +/** + * struct fixed_voltage_config - fixed_voltage_config structure + * @supply_name: Name of the regulator supply + * @microvolts: Output voltage of regulator + * @gpio: GPIO to use for enable control + * set to -EINVAL if not used + * @enable_high: Polarity of enable GPIO + * 1 = Active high, 0 = Active low + * @enabled_at_boot: Whether regulator has been enabled at + * boot or not. 1 = Yes, 0 = No + * This is used to keep the regulator at + * the default state + * @init_data: regulator_init_data + * + * This structure contains fixed voltage regulator configuration + * information that must be passed by platform code to the fixed + * voltage regulator driver. + */ struct fixed_voltage_config { const char *supply_name; int microvolts; + int gpio; + unsigned enable_high:1; + unsigned enabled_at_boot:1; struct regulator_init_data *init_data; }; -- cgit v1.2.3-70-g09d2 From 2e7e65ce55566fc81036960b00e5e15f5d9578ea Mon Sep 17 00:00:00 2001 From: Wolfram Sang Date: Fri, 18 Sep 2009 22:44:43 +0200 Subject: regulator: fix typos Fix a couple of typos I found while working with this subsystem. Signed-off-by: Wolfram Sang Acked-by: Mark Brown Signed-off-by: Liam Girdwood --- Documentation/power/regulator/machine.txt | 4 ++-- include/linux/regulator/machine.h | 6 +++--- include/linux/regulator/max1586.h | 4 ++-- 3 files changed, 7 insertions(+), 7 deletions(-) (limited to 'include') diff --git a/Documentation/power/regulator/machine.txt b/Documentation/power/regulator/machine.txt index ce3487d99ab..63728fed620 100644 --- a/Documentation/power/regulator/machine.txt +++ b/Documentation/power/regulator/machine.txt @@ -87,7 +87,7 @@ static struct platform_device regulator_devices[] = { }, }; /* register regulator 1 device */ -platform_device_register(&wm8350_regulator_devices[0]); +platform_device_register(®ulator_devices[0]); /* register regulator 2 device */ -platform_device_register(&wm8350_regulator_devices[1]); +platform_device_register(®ulator_devices[1]); diff --git a/include/linux/regulator/machine.h b/include/linux/regulator/machine.h index 99a4e2eb36a..87f5f176d4e 100644 --- a/include/linux/regulator/machine.h +++ b/include/linux/regulator/machine.h @@ -41,7 +41,7 @@ struct regulator; #define REGULATOR_CHANGE_DRMS 0x10 /** - * struct regulator_state - regulator state during low power syatem states + * struct regulator_state - regulator state during low power system states * * This describes a regulators state during a system wide low power state. * @@ -117,10 +117,10 @@ struct regulation_constraints { /* mode to set on startup */ unsigned int initial_mode; - /* constriant flags */ + /* constraint flags */ unsigned always_on:1; /* regulator never off when system is on */ unsigned boot_on:1; /* bootloader/firmware enabled regulator */ - unsigned apply_uV:1; /* apply uV constraint iff min == max */ + unsigned apply_uV:1; /* apply uV constraint if min == max */ }; /** diff --git a/include/linux/regulator/max1586.h b/include/linux/regulator/max1586.h index 44563192bf1..de9a7fae20b 100644 --- a/include/linux/regulator/max1586.h +++ b/include/linux/regulator/max1586.h @@ -36,7 +36,7 @@ * max1586_subdev_data - regulator data * @id: regulator Id (either MAX1586_V3 or MAX1586_V6) * @name: regulator cute name (example for V3: "vcc_core") - * @platform_data: regulator init data (contraints, supplies, ...) + * @platform_data: regulator init data (constraints, supplies, ...) */ struct max1586_subdev_data { int id; @@ -46,7 +46,7 @@ struct max1586_subdev_data { /** * max1586_platform_data - platform data for max1586 - * @num_subdevs: number of regultors used (may be 1 or 2) + * @num_subdevs: number of regulators used (may be 1 or 2) * @subdevs: regulator used * At most, there will be a regulator for V3 and one for V6 voltages. * @v3_gain: gain on the V3 voltage output multiplied by 1e6. -- cgit v1.2.3-70-g09d2 From d87b969d15a084503870da598c97278fb4877753 Mon Sep 17 00:00:00 2001 From: Wolfram Sang Date: Fri, 18 Sep 2009 22:44:46 +0200 Subject: regulator/driver: be more specific in nanodoc for is_enabled Document the possibility that is_enabled may also return with negative errorcodes. Signed-off-by: Wolfram Sang Acked-by: Mark Brown Signed-off-by: Liam Girdwood --- include/linux/regulator/driver.h | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'include') diff --git a/include/linux/regulator/driver.h b/include/linux/regulator/driver.h index 73c9cd6cda7..31f2055eae2 100644 --- a/include/linux/regulator/driver.h +++ b/include/linux/regulator/driver.h @@ -37,7 +37,8 @@ enum regulator_status { * * @enable: Configure the regulator as enabled. * @disable: Configure the regulator as disabled. - * @is_enabled: Return 1 if the regulator is enabled, 0 otherwise. + * @is_enabled: Return 1 if the regulator is enabled, 0 if not. + * May also return negative errno. * * @set_voltage: Set the voltage for the regulator within the range specified. * The driver should select the voltage closest to min_uV. -- cgit v1.2.3-70-g09d2 From a6f10a2f5d8c2738b3ac05974bdbea3b68a2aecd Mon Sep 17 00:00:00 2001 From: Anton Blanchard Date: Tue, 22 Sep 2009 22:34:24 +1000 Subject: perf_event: Update PERF_EVENT_FORK header definition PERF_EVENT_FORK always outputs the time field, so update the header to reflect this. Signed-off-by: Anton Blanchard Cc: Arjan van de Ven Cc: Peter Zijlstra Cc: Paul Mackerras Cc: Frederic Weisbecker LKML-Reference: <20090922123424.GD19453@kryten> Signed-off-by: Ingo Molnar --- include/linux/perf_counter.h | 2 +- include/linux/perf_event.h | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) (limited to 'include') diff --git a/include/linux/perf_counter.h b/include/linux/perf_counter.h index 368bd70f1d2..7b7fbf433cf 100644 --- a/include/linux/perf_counter.h +++ b/include/linux/perf_counter.h @@ -361,7 +361,7 @@ enum perf_event_type { * struct perf_event_header header; * u32 pid, ppid; * u32 tid, ptid; - * { u64 time; } && PERF_SAMPLE_TIME + * u64 time; * }; */ PERF_EVENT_FORK = 7, diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h index acefaf71e6d..3a9d36d1e92 100644 --- a/include/linux/perf_event.h +++ b/include/linux/perf_event.h @@ -357,7 +357,7 @@ enum perf_event_type { * struct perf_event_header header; * u32 pid, ppid; * u32 tid, ptid; - * { u64 time; } && PERF_SAMPLE_TIME + * u64 time; * }; */ PERF_RECORD_FORK = 7, -- cgit v1.2.3-70-g09d2 From 61e225dc341107be304fd1088146c2a5e88ff9e0 Mon Sep 17 00:00:00 2001 From: Alexey Dobriyan Date: Mon, 21 Sep 2009 17:01:08 -0700 Subject: const: make struct super_block::dq_op const Signed-off-by: Alexey Dobriyan Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- fs/ext3/super.c | 2 +- fs/ext4/super.c | 2 +- fs/ocfs2/quota.h | 2 +- fs/ocfs2/quota_global.c | 2 +- fs/quota/dquot.c | 2 +- fs/reiserfs/super.c | 2 +- include/linux/fs.h | 2 +- include/linux/quotaops.h | 2 +- 8 files changed, 8 insertions(+), 8 deletions(-) (limited to 'include') diff --git a/fs/ext3/super.c b/fs/ext3/super.c index a8d80a7f110..e7a4e11352d 100644 --- a/fs/ext3/super.c +++ b/fs/ext3/super.c @@ -720,7 +720,7 @@ static ssize_t ext3_quota_read(struct super_block *sb, int type, char *data, static ssize_t ext3_quota_write(struct super_block *sb, int type, const char *data, size_t len, loff_t off); -static struct dquot_operations ext3_quota_operations = { +static const struct dquot_operations ext3_quota_operations = { .initialize = dquot_initialize, .drop = dquot_drop, .alloc_space = dquot_alloc_space, diff --git a/fs/ext4/super.c b/fs/ext4/super.c index a6b1ab73472..7ffb62eca4b 100644 --- a/fs/ext4/super.c +++ b/fs/ext4/super.c @@ -964,7 +964,7 @@ static ssize_t ext4_quota_read(struct super_block *sb, int type, char *data, static ssize_t ext4_quota_write(struct super_block *sb, int type, const char *data, size_t len, loff_t off); -static struct dquot_operations ext4_quota_operations = { +static const struct dquot_operations ext4_quota_operations = { .initialize = dquot_initialize, .drop = dquot_drop, .alloc_space = dquot_alloc_space, diff --git a/fs/ocfs2/quota.h b/fs/ocfs2/quota.h index 3fb96fcd4c8..e5df9d170b0 100644 --- a/fs/ocfs2/quota.h +++ b/fs/ocfs2/quota.h @@ -109,7 +109,7 @@ void ocfs2_unlock_global_qf(struct ocfs2_mem_dqinfo *oinfo, int ex); int ocfs2_read_quota_block(struct inode *inode, u64 v_block, struct buffer_head **bh); -extern struct dquot_operations ocfs2_quota_operations; +extern const struct dquot_operations ocfs2_quota_operations; extern struct quota_format_type ocfs2_quota_format; int ocfs2_quota_setup(void); diff --git a/fs/ocfs2/quota_global.c b/fs/ocfs2/quota_global.c index 44f2a5e1d04..3af4954d537 100644 --- a/fs/ocfs2/quota_global.c +++ b/fs/ocfs2/quota_global.c @@ -849,7 +849,7 @@ static void ocfs2_destroy_dquot(struct dquot *dquot) kmem_cache_free(ocfs2_dquot_cachep, dquot); } -struct dquot_operations ocfs2_quota_operations = { +const struct dquot_operations ocfs2_quota_operations = { .initialize = dquot_initialize, .drop = dquot_drop, .alloc_space = dquot_alloc_space, diff --git a/fs/quota/dquot.c b/fs/quota/dquot.c index 38f7bd559f3..635ae2e535b 100644 --- a/fs/quota/dquot.c +++ b/fs/quota/dquot.c @@ -1839,7 +1839,7 @@ EXPORT_SYMBOL(dquot_commit_info); /* * Definitions of diskquota operations. */ -struct dquot_operations dquot_operations = { +const struct dquot_operations dquot_operations = { .initialize = dquot_initialize, .drop = dquot_drop, .alloc_space = dquot_alloc_space, diff --git a/fs/reiserfs/super.c b/fs/reiserfs/super.c index 7adea74d6a8..09c93c12874 100644 --- a/fs/reiserfs/super.c +++ b/fs/reiserfs/super.c @@ -612,7 +612,7 @@ static int reiserfs_mark_dquot_dirty(struct dquot *); static int reiserfs_write_info(struct super_block *, int); static int reiserfs_quota_on(struct super_block *, int, int, char *, int); -static struct dquot_operations reiserfs_quota_operations = { +static const struct dquot_operations reiserfs_quota_operations = { .initialize = dquot_initialize, .drop = dquot_drop, .alloc_space = dquot_alloc_space, diff --git a/include/linux/fs.h b/include/linux/fs.h index 90162fb3bf0..83e1a0cea97 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -1318,7 +1318,7 @@ struct super_block { unsigned long long s_maxbytes; /* Max file size */ struct file_system_type *s_type; const struct super_operations *s_op; - struct dquot_operations *dq_op; + const struct dquot_operations *dq_op; struct quotactl_ops *s_qcop; const struct export_operations *s_export_op; unsigned long s_flags; diff --git a/include/linux/quotaops.h b/include/linux/quotaops.h index 26361c4c037..8dcbdb6e101 100644 --- a/include/linux/quotaops.h +++ b/include/linux/quotaops.h @@ -135,7 +135,7 @@ static inline int sb_any_quota_active(struct super_block *sb) /* * Operations supported for diskquotas. */ -extern struct dquot_operations dquot_operations; +extern const struct dquot_operations dquot_operations; extern struct quotactl_ops vfs_quotactl_ops; #define sb_dquot_ops (&dquot_operations) -- cgit v1.2.3-70-g09d2 From 0d54b217a247f39605361f867fefbb9e099a5432 Mon Sep 17 00:00:00 2001 From: Alexey Dobriyan Date: Mon, 21 Sep 2009 17:01:09 -0700 Subject: const: make struct super_block::s_qcop const Signed-off-by: Alexey Dobriyan Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- fs/cifs/cifsfs.c | 4 ++-- fs/ext3/super.c | 2 +- fs/ext4/super.c | 2 +- fs/ocfs2/super.c | 2 +- fs/quota/dquot.c | 2 +- fs/reiserfs/super.c | 2 +- fs/xfs/linux-2.6/xfs_quotaops.c | 2 +- fs/xfs/linux-2.6/xfs_super.h | 2 +- include/linux/fs.h | 2 +- include/linux/quotaops.h | 2 +- 10 files changed, 11 insertions(+), 11 deletions(-) (limited to 'include') diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c index 3610e9958b4..d79ce2e95c2 100644 --- a/fs/cifs/cifsfs.c +++ b/fs/cifs/cifsfs.c @@ -50,7 +50,7 @@ #define CIFS_MAGIC_NUMBER 0xFF534D42 /* the first four bytes of SMB PDUs */ #ifdef CONFIG_CIFS_QUOTA -static struct quotactl_ops cifs_quotactl_ops; +static const struct quotactl_ops cifs_quotactl_ops; #endif /* QUOTA */ int cifsFYI = 0; @@ -517,7 +517,7 @@ int cifs_xstate_get(struct super_block *sb, struct fs_quota_stat *qstats) return rc; } -static struct quotactl_ops cifs_quotactl_ops = { +static const struct quotactl_ops cifs_quotactl_ops = { .set_xquota = cifs_xquota_set, .get_xquota = cifs_xquota_get, .set_xstate = cifs_xstate_set, diff --git a/fs/ext3/super.c b/fs/ext3/super.c index e7a4e11352d..72743d36050 100644 --- a/fs/ext3/super.c +++ b/fs/ext3/super.c @@ -737,7 +737,7 @@ static const struct dquot_operations ext3_quota_operations = { .destroy_dquot = dquot_destroy, }; -static struct quotactl_ops ext3_qctl_operations = { +static const struct quotactl_ops ext3_qctl_operations = { .quota_on = ext3_quota_on, .quota_off = vfs_quota_off, .quota_sync = vfs_quota_sync, diff --git a/fs/ext4/super.c b/fs/ext4/super.c index 7ffb62eca4b..df539ba2777 100644 --- a/fs/ext4/super.c +++ b/fs/ext4/super.c @@ -985,7 +985,7 @@ static const struct dquot_operations ext4_quota_operations = { .destroy_dquot = dquot_destroy, }; -static struct quotactl_ops ext4_qctl_operations = { +static const struct quotactl_ops ext4_qctl_operations = { .quota_on = ext4_quota_on, .quota_off = vfs_quota_off, .quota_sync = vfs_quota_sync, diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c index a3f8871d21f..faca4720aa4 100644 --- a/fs/ocfs2/super.c +++ b/fs/ocfs2/super.c @@ -965,7 +965,7 @@ static int ocfs2_quota_off(struct super_block *sb, int type, int remount) return vfs_quota_disable(sb, type, DQUOT_LIMITS_ENABLED); } -static struct quotactl_ops ocfs2_quotactl_ops = { +static const struct quotactl_ops ocfs2_quotactl_ops = { .quota_on = ocfs2_quota_on, .quota_off = ocfs2_quota_off, .quota_sync = vfs_quota_sync, diff --git a/fs/quota/dquot.c b/fs/quota/dquot.c index 635ae2e535b..39b49c42a7e 100644 --- a/fs/quota/dquot.c +++ b/fs/quota/dquot.c @@ -2461,7 +2461,7 @@ out: } EXPORT_SYMBOL(vfs_set_dqinfo); -struct quotactl_ops vfs_quotactl_ops = { +const struct quotactl_ops vfs_quotactl_ops = { .quota_on = vfs_quota_on, .quota_off = vfs_quota_off, .quota_sync = vfs_quota_sync, diff --git a/fs/reiserfs/super.c b/fs/reiserfs/super.c index 09c93c12874..f0ad05f3802 100644 --- a/fs/reiserfs/super.c +++ b/fs/reiserfs/super.c @@ -629,7 +629,7 @@ static const struct dquot_operations reiserfs_quota_operations = { .destroy_dquot = dquot_destroy, }; -static struct quotactl_ops reiserfs_qctl_operations = { +static const struct quotactl_ops reiserfs_qctl_operations = { .quota_on = reiserfs_quota_on, .quota_off = vfs_quota_off, .quota_sync = vfs_quota_sync, diff --git a/fs/xfs/linux-2.6/xfs_quotaops.c b/fs/xfs/linux-2.6/xfs_quotaops.c index cb6e2cca214..9e41f91aa26 100644 --- a/fs/xfs/linux-2.6/xfs_quotaops.c +++ b/fs/xfs/linux-2.6/xfs_quotaops.c @@ -150,7 +150,7 @@ xfs_fs_set_xquota( return -xfs_qm_scall_setqlim(mp, id, xfs_quota_type(type), fdq); } -struct quotactl_ops xfs_quotactl_operations = { +const struct quotactl_ops xfs_quotactl_operations = { .quota_sync = xfs_fs_quota_sync, .get_xstate = xfs_fs_get_xstate, .set_xstate = xfs_fs_set_xstate, diff --git a/fs/xfs/linux-2.6/xfs_super.h b/fs/xfs/linux-2.6/xfs_super.h index 5a2ea3a2178..18175ebd58e 100644 --- a/fs/xfs/linux-2.6/xfs_super.h +++ b/fs/xfs/linux-2.6/xfs_super.h @@ -93,7 +93,7 @@ extern void xfs_blkdev_issue_flush(struct xfs_buftarg *); extern const struct export_operations xfs_export_operations; extern struct xattr_handler *xfs_xattr_handlers[]; -extern struct quotactl_ops xfs_quotactl_operations; +extern const struct quotactl_ops xfs_quotactl_operations; #define XFS_M(sb) ((struct xfs_mount *)((sb)->s_fs_info)) diff --git a/include/linux/fs.h b/include/linux/fs.h index 83e1a0cea97..13c030ebfd2 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -1319,7 +1319,7 @@ struct super_block { struct file_system_type *s_type; const struct super_operations *s_op; const struct dquot_operations *dq_op; - struct quotactl_ops *s_qcop; + const struct quotactl_ops *s_qcop; const struct export_operations *s_export_op; unsigned long s_flags; unsigned long s_magic; diff --git a/include/linux/quotaops.h b/include/linux/quotaops.h index 8dcbdb6e101..3ebb2315364 100644 --- a/include/linux/quotaops.h +++ b/include/linux/quotaops.h @@ -136,7 +136,7 @@ static inline int sb_any_quota_active(struct super_block *sb) * Operations supported for diskquotas. */ extern const struct dquot_operations dquot_operations; -extern struct quotactl_ops vfs_quotactl_ops; +extern const struct quotactl_ops vfs_quotactl_ops; #define sb_dquot_ops (&dquot_operations) #define sb_quotactl_ops (&vfs_quotactl_ops) -- cgit v1.2.3-70-g09d2 From 6aed62853c72e29f2c97bbac7712cb398e8c9437 Mon Sep 17 00:00:00 2001 From: Alexey Dobriyan Date: Mon, 21 Sep 2009 17:01:11 -0700 Subject: const: make file_lock_operations const Signed-off-by: Alexey Dobriyan Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- fs/afs/flock.c | 2 +- fs/lockd/clntproc.c | 2 +- fs/nfs/nfs4state.c | 2 +- include/linux/fs.h | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) (limited to 'include') diff --git a/fs/afs/flock.c b/fs/afs/flock.c index 3ff8bdd18fb..0931bc1325e 100644 --- a/fs/afs/flock.c +++ b/fs/afs/flock.c @@ -21,7 +21,7 @@ static void afs_fl_release_private(struct file_lock *fl); static struct workqueue_struct *afs_lock_manager; static DEFINE_MUTEX(afs_lock_manager_mutex); -static struct file_lock_operations afs_lock_ops = { +static const struct file_lock_operations afs_lock_ops = { .fl_copy_lock = afs_fl_copy_lock, .fl_release_private = afs_fl_release_private, }; diff --git a/fs/lockd/clntproc.c b/fs/lockd/clntproc.c index 4336adba952..c81249fef11 100644 --- a/fs/lockd/clntproc.c +++ b/fs/lockd/clntproc.c @@ -458,7 +458,7 @@ static void nlmclnt_locks_release_private(struct file_lock *fl) nlm_put_lockowner(fl->fl_u.nfs_fl.owner); } -static struct file_lock_operations nlmclnt_lock_ops = { +static const struct file_lock_operations nlmclnt_lock_ops = { .fl_copy_lock = nlmclnt_locks_copy_lock, .fl_release_private = nlmclnt_locks_release_private, }; diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c index 1434080aefe..2ef4fecf398 100644 --- a/fs/nfs/nfs4state.c +++ b/fs/nfs/nfs4state.c @@ -638,7 +638,7 @@ static void nfs4_fl_release_lock(struct file_lock *fl) nfs4_put_lock_state(fl->fl_u.nfs4_fl.owner); } -static struct file_lock_operations nfs4_fl_lock_ops = { +static const struct file_lock_operations nfs4_fl_lock_ops = { .fl_copy_lock = nfs4_fl_copy_lock, .fl_release_private = nfs4_fl_release_lock, }; diff --git a/include/linux/fs.h b/include/linux/fs.h index 13c030ebfd2..6146dec21c3 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -1066,7 +1066,7 @@ struct file_lock { struct fasync_struct * fl_fasync; /* for lease break notifications */ unsigned long fl_break_time; /* for nonblocking lease breaks */ - struct file_lock_operations *fl_ops; /* Callbacks for filesystems */ + const struct file_lock_operations *fl_ops; /* Callbacks for filesystems */ struct lock_manager_operations *fl_lmops; /* Callbacks for lockmanagers */ union { struct nfs_lock_info nfs_fl; -- cgit v1.2.3-70-g09d2 From 7b021967c5e1463936042c8da72b550d3cabe9ac Mon Sep 17 00:00:00 2001 From: Alexey Dobriyan Date: Mon, 21 Sep 2009 17:01:12 -0700 Subject: const: make lock_manager_operations const Signed-off-by: Alexey Dobriyan Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- fs/lockd/svclock.c | 2 +- fs/locks.c | 2 +- fs/nfsd/nfs4state.c | 4 ++-- include/linux/fs.h | 2 +- include/linux/lockd/lockd.h | 2 +- 5 files changed, 6 insertions(+), 6 deletions(-) (limited to 'include') diff --git a/fs/lockd/svclock.c b/fs/lockd/svclock.c index e577a78d7ba..d1001790fa9 100644 --- a/fs/lockd/svclock.c +++ b/fs/lockd/svclock.c @@ -705,7 +705,7 @@ static int nlmsvc_same_owner(struct file_lock *fl1, struct file_lock *fl2) return fl1->fl_owner == fl2->fl_owner && fl1->fl_pid == fl2->fl_pid; } -struct lock_manager_operations nlmsvc_lock_operations = { +const struct lock_manager_operations nlmsvc_lock_operations = { .fl_compare_owner = nlmsvc_same_owner, .fl_notify = nlmsvc_notify_blocked, .fl_grant = nlmsvc_grant_deferred, diff --git a/fs/locks.c b/fs/locks.c index 19ee18a6829..a8794f233bc 100644 --- a/fs/locks.c +++ b/fs/locks.c @@ -434,7 +434,7 @@ static int lease_mylease_callback(struct file_lock *fl, struct file_lock *try) return fl->fl_file == try->fl_file; } -static struct lock_manager_operations lease_manager_ops = { +static const struct lock_manager_operations lease_manager_ops = { .fl_break = lease_break_callback, .fl_release_private = lease_release_private_callback, .fl_mylease = lease_mylease_callback, diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c index 980a216a48c..766d3d54454 100644 --- a/fs/nfsd/nfs4state.c +++ b/fs/nfsd/nfs4state.c @@ -2163,7 +2163,7 @@ int nfsd_change_deleg_cb(struct file_lock **onlist, int arg) return -EAGAIN; } -static struct lock_manager_operations nfsd_lease_mng_ops = { +static const struct lock_manager_operations nfsd_lease_mng_ops = { .fl_break = nfsd_break_deleg_cb, .fl_release_private = nfsd_release_deleg_cb, .fl_copy_lock = nfsd_copy_lock_deleg_cb, @@ -3368,7 +3368,7 @@ nfs4_transform_lock_offset(struct file_lock *lock) /* Hack!: For now, we're defining this just so we can use a pointer to it * as a unique cookie to identify our (NFSv4's) posix locks. */ -static struct lock_manager_operations nfsd_posix_mng_ops = { +static const struct lock_manager_operations nfsd_posix_mng_ops = { }; static inline void diff --git a/include/linux/fs.h b/include/linux/fs.h index 6146dec21c3..51803528b09 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -1067,7 +1067,7 @@ struct file_lock { unsigned long fl_break_time; /* for nonblocking lease breaks */ const struct file_lock_operations *fl_ops; /* Callbacks for filesystems */ - struct lock_manager_operations *fl_lmops; /* Callbacks for lockmanagers */ + const struct lock_manager_operations *fl_lmops; /* Callbacks for lockmanagers */ union { struct nfs_lock_info nfs_fl; struct nfs4_lock_info nfs4_fl; diff --git a/include/linux/lockd/lockd.h b/include/linux/lockd/lockd.h index c325b187966..ccf2e0dc077 100644 --- a/include/linux/lockd/lockd.h +++ b/include/linux/lockd/lockd.h @@ -395,7 +395,7 @@ static inline int nlm_compare_locks(const struct file_lock *fl1, &&(fl1->fl_type == fl2->fl_type || fl2->fl_type == F_UNLCK); } -extern struct lock_manager_operations nlmsvc_lock_operations; +extern const struct lock_manager_operations nlmsvc_lock_operations; #endif /* __KERNEL__ */ -- cgit v1.2.3-70-g09d2 From 83d5cde47dedf01b6a4a4331882cbc0a7eea3c2e Mon Sep 17 00:00:00 2001 From: Alexey Dobriyan Date: Mon, 21 Sep 2009 17:01:13 -0700 Subject: const: make block_device_operations const Signed-off-by: Alexey Dobriyan Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/powerpc/sysdev/axonram.c | 2 +- arch/um/drivers/ubd_kern.c | 2 +- drivers/block/DAC960.c | 2 +- drivers/block/amiflop.c | 2 +- drivers/block/aoe/aoeblk.c | 2 +- drivers/block/ataflop.c | 2 +- drivers/block/brd.c | 2 +- drivers/block/cciss.c | 2 +- drivers/block/cpqarray.c | 2 +- drivers/block/floppy.c | 2 +- drivers/block/hd.c | 2 +- drivers/block/loop.c | 2 +- drivers/block/mg_disk.c | 2 +- drivers/block/nbd.c | 2 +- drivers/block/osdblk.c | 2 +- drivers/block/paride/pcd.c | 2 +- drivers/block/paride/pd.c | 2 +- drivers/block/paride/pf.c | 2 +- drivers/block/pktcdvd.c | 2 +- drivers/block/ps3disk.c | 2 +- drivers/block/ps3vram.c | 2 +- drivers/block/sunvdc.c | 2 +- drivers/block/swim.c | 2 +- drivers/block/swim3.c | 2 +- drivers/block/sx8.c | 2 +- drivers/block/ub.c | 2 +- drivers/block/umem.c | 3 +-- drivers/block/viodasd.c | 2 +- drivers/block/virtio_blk.c | 2 +- drivers/block/xd.c | 2 +- drivers/block/xen-blkfront.c | 4 ++-- drivers/block/xsysace.c | 2 +- drivers/block/z2ram.c | 3 +-- drivers/cdrom/gdrom.c | 2 +- drivers/cdrom/viocd.c | 2 +- drivers/ide/ide-cd.c | 2 +- drivers/ide/ide-gd.c | 2 +- drivers/ide/ide-tape.c | 2 +- drivers/md/dm.c | 4 ++-- drivers/md/md.c | 4 ++-- drivers/memstick/core/mspro_block.c | 2 +- drivers/message/i2o/i2o_block.c | 2 +- drivers/mmc/card/block.c | 2 +- drivers/mtd/mtd_blkdevs.c | 2 +- drivers/s390/block/dasd.c | 2 +- drivers/s390/block/dasd_int.h | 2 +- drivers/s390/block/dcssblk.c | 2 +- drivers/s390/block/xpram.c | 2 +- drivers/s390/char/tape_block.c | 2 +- drivers/sbus/char/jsflash.c | 2 +- drivers/scsi/sd.c | 2 +- drivers/scsi/sr.c | 2 +- fs/block_dev.c | 2 +- fs/ext2/xip.c | 2 +- fs/partitions/check.c | 2 +- include/linux/genhd.h | 2 +- 56 files changed, 59 insertions(+), 61 deletions(-) (limited to 'include') diff --git a/arch/powerpc/sysdev/axonram.c b/arch/powerpc/sysdev/axonram.c index a4779912a5c..88f4ae78783 100644 --- a/arch/powerpc/sysdev/axonram.c +++ b/arch/powerpc/sysdev/axonram.c @@ -165,7 +165,7 @@ axon_ram_direct_access(struct block_device *device, sector_t sector, return 0; } -static struct block_device_operations axon_ram_devops = { +static const struct block_device_operations axon_ram_devops = { .owner = THIS_MODULE, .direct_access = axon_ram_direct_access }; diff --git a/arch/um/drivers/ubd_kern.c b/arch/um/drivers/ubd_kern.c index 8f05d4d9da1..635d16d90a8 100644 --- a/arch/um/drivers/ubd_kern.c +++ b/arch/um/drivers/ubd_kern.c @@ -106,7 +106,7 @@ static int ubd_getgeo(struct block_device *bdev, struct hd_geometry *geo); #define MAX_DEV (16) -static struct block_device_operations ubd_blops = { +static const struct block_device_operations ubd_blops = { .owner = THIS_MODULE, .open = ubd_open, .release = ubd_release, diff --git a/drivers/block/DAC960.c b/drivers/block/DAC960.c index 1e6b7c14f69..8b50af381a8 100644 --- a/drivers/block/DAC960.c +++ b/drivers/block/DAC960.c @@ -152,7 +152,7 @@ static int DAC960_revalidate_disk(struct gendisk *disk) return 0; } -static struct block_device_operations DAC960_BlockDeviceOperations = { +static const struct block_device_operations DAC960_BlockDeviceOperations = { .owner = THIS_MODULE, .open = DAC960_open, .getgeo = DAC960_getgeo, diff --git a/drivers/block/amiflop.c b/drivers/block/amiflop.c index 2f07b7c99a9..05522583902 100644 --- a/drivers/block/amiflop.c +++ b/drivers/block/amiflop.c @@ -1632,7 +1632,7 @@ static int amiga_floppy_change(struct gendisk *disk) return 0; } -static struct block_device_operations floppy_fops = { +static const struct block_device_operations floppy_fops = { .owner = THIS_MODULE, .open = floppy_open, .release = floppy_release, diff --git a/drivers/block/aoe/aoeblk.c b/drivers/block/aoe/aoeblk.c index b6cd571adbf..3af97d4da2d 100644 --- a/drivers/block/aoe/aoeblk.c +++ b/drivers/block/aoe/aoeblk.c @@ -237,7 +237,7 @@ aoeblk_getgeo(struct block_device *bdev, struct hd_geometry *geo) return 0; } -static struct block_device_operations aoe_bdops = { +static const struct block_device_operations aoe_bdops = { .open = aoeblk_open, .release = aoeblk_release, .getgeo = aoeblk_getgeo, diff --git a/drivers/block/ataflop.c b/drivers/block/ataflop.c index 3ff02941b3d..847a9e57570 100644 --- a/drivers/block/ataflop.c +++ b/drivers/block/ataflop.c @@ -1856,7 +1856,7 @@ static int floppy_release(struct gendisk *disk, fmode_t mode) return 0; } -static struct block_device_operations floppy_fops = { +static const struct block_device_operations floppy_fops = { .owner = THIS_MODULE, .open = floppy_open, .release = floppy_release, diff --git a/drivers/block/brd.c b/drivers/block/brd.c index 4bf8705b3ac..4f688434daf 100644 --- a/drivers/block/brd.c +++ b/drivers/block/brd.c @@ -375,7 +375,7 @@ static int brd_ioctl(struct block_device *bdev, fmode_t mode, return error; } -static struct block_device_operations brd_fops = { +static const struct block_device_operations brd_fops = { .owner = THIS_MODULE, .locked_ioctl = brd_ioctl, #ifdef CONFIG_BLK_DEV_XIP diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c index d8372b43282..4f19105f755 100644 --- a/drivers/block/cciss.c +++ b/drivers/block/cciss.c @@ -205,7 +205,7 @@ static int cciss_compat_ioctl(struct block_device *, fmode_t, unsigned, unsigned long); #endif -static struct block_device_operations cciss_fops = { +static const struct block_device_operations cciss_fops = { .owner = THIS_MODULE, .open = cciss_open, .release = cciss_release, diff --git a/drivers/block/cpqarray.c b/drivers/block/cpqarray.c index 44fa2018f6b..b82d438e260 100644 --- a/drivers/block/cpqarray.c +++ b/drivers/block/cpqarray.c @@ -193,7 +193,7 @@ static inline ctlr_info_t *get_host(struct gendisk *disk) } -static struct block_device_operations ida_fops = { +static const struct block_device_operations ida_fops = { .owner = THIS_MODULE, .open = ida_open, .release = ida_release, diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c index 2b387c2260d..5c01f747571 100644 --- a/drivers/block/floppy.c +++ b/drivers/block/floppy.c @@ -3907,7 +3907,7 @@ static int floppy_revalidate(struct gendisk *disk) return res; } -static struct block_device_operations floppy_fops = { +static const struct block_device_operations floppy_fops = { .owner = THIS_MODULE, .open = floppy_open, .release = floppy_release, diff --git a/drivers/block/hd.c b/drivers/block/hd.c index f9d01608cbe..d5cdce08ffd 100644 --- a/drivers/block/hd.c +++ b/drivers/block/hd.c @@ -692,7 +692,7 @@ static irqreturn_t hd_interrupt(int irq, void *dev_id) return IRQ_HANDLED; } -static struct block_device_operations hd_fops = { +static const struct block_device_operations hd_fops = { .getgeo = hd_getgeo, }; diff --git a/drivers/block/loop.c b/drivers/block/loop.c index bbb79441d89..edda9ea7c62 100644 --- a/drivers/block/loop.c +++ b/drivers/block/loop.c @@ -1438,7 +1438,7 @@ out_unlocked: return 0; } -static struct block_device_operations lo_fops = { +static const struct block_device_operations lo_fops = { .owner = THIS_MODULE, .open = lo_open, .release = lo_release, diff --git a/drivers/block/mg_disk.c b/drivers/block/mg_disk.c index 6d7fbaa9224..e0339aaa181 100644 --- a/drivers/block/mg_disk.c +++ b/drivers/block/mg_disk.c @@ -775,7 +775,7 @@ static int mg_getgeo(struct block_device *bdev, struct hd_geometry *geo) return 0; } -static struct block_device_operations mg_disk_ops = { +static const struct block_device_operations mg_disk_ops = { .getgeo = mg_getgeo }; diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c index 5d23ffad7c7..cc923a5b430 100644 --- a/drivers/block/nbd.c +++ b/drivers/block/nbd.c @@ -722,7 +722,7 @@ static int nbd_ioctl(struct block_device *bdev, fmode_t mode, return error; } -static struct block_device_operations nbd_fops = +static const struct block_device_operations nbd_fops = { .owner = THIS_MODULE, .locked_ioctl = nbd_ioctl, diff --git a/drivers/block/osdblk.c b/drivers/block/osdblk.c index 13c1aee6aa3..a808b1530b3 100644 --- a/drivers/block/osdblk.c +++ b/drivers/block/osdblk.c @@ -125,7 +125,7 @@ static struct class *class_osdblk; /* /sys/class/osdblk */ static DEFINE_MUTEX(ctl_mutex); /* Serialize open/close/setup/teardown */ static LIST_HEAD(osdblkdev_list); -static struct block_device_operations osdblk_bd_ops = { +static const struct block_device_operations osdblk_bd_ops = { .owner = THIS_MODULE, }; diff --git a/drivers/block/paride/pcd.c b/drivers/block/paride/pcd.c index 9f3518c515a..8866ca369d5 100644 --- a/drivers/block/paride/pcd.c +++ b/drivers/block/paride/pcd.c @@ -247,7 +247,7 @@ static int pcd_block_media_changed(struct gendisk *disk) return cdrom_media_changed(&cd->info); } -static struct block_device_operations pcd_bdops = { +static const struct block_device_operations pcd_bdops = { .owner = THIS_MODULE, .open = pcd_block_open, .release = pcd_block_release, diff --git a/drivers/block/paride/pd.c b/drivers/block/paride/pd.c index bf5955b3d87..569e39e8f11 100644 --- a/drivers/block/paride/pd.c +++ b/drivers/block/paride/pd.c @@ -807,7 +807,7 @@ static int pd_revalidate(struct gendisk *p) return 0; } -static struct block_device_operations pd_fops = { +static const struct block_device_operations pd_fops = { .owner = THIS_MODULE, .open = pd_open, .release = pd_release, diff --git a/drivers/block/paride/pf.c b/drivers/block/paride/pf.c index 68a90834e99..ea54ea39355 100644 --- a/drivers/block/paride/pf.c +++ b/drivers/block/paride/pf.c @@ -262,7 +262,7 @@ static char *pf_buf; /* buffer for request in progress */ /* kernel glue structures */ -static struct block_device_operations pf_fops = { +static const struct block_device_operations pf_fops = { .owner = THIS_MODULE, .open = pf_open, .release = pf_release, diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c index fd5bb8ad59a..2ddf03ae034 100644 --- a/drivers/block/pktcdvd.c +++ b/drivers/block/pktcdvd.c @@ -2849,7 +2849,7 @@ static int pkt_media_changed(struct gendisk *disk) return attached_disk->fops->media_changed(attached_disk); } -static struct block_device_operations pktcdvd_ops = { +static const struct block_device_operations pktcdvd_ops = { .owner = THIS_MODULE, .open = pkt_open, .release = pkt_close, diff --git a/drivers/block/ps3disk.c b/drivers/block/ps3disk.c index 34cbb7f3efa..03a130dca8a 100644 --- a/drivers/block/ps3disk.c +++ b/drivers/block/ps3disk.c @@ -82,7 +82,7 @@ enum lv1_ata_in_out { static int ps3disk_major; -static struct block_device_operations ps3disk_fops = { +static const struct block_device_operations ps3disk_fops = { .owner = THIS_MODULE, }; diff --git a/drivers/block/ps3vram.c b/drivers/block/ps3vram.c index c8753a9ed29..3bb7c47c869 100644 --- a/drivers/block/ps3vram.c +++ b/drivers/block/ps3vram.c @@ -88,7 +88,7 @@ struct ps3vram_priv { static int ps3vram_major; -static struct block_device_operations ps3vram_fops = { +static const struct block_device_operations ps3vram_fops = { .owner = THIS_MODULE, }; diff --git a/drivers/block/sunvdc.c b/drivers/block/sunvdc.c index cbfd9c0aef0..411f064760b 100644 --- a/drivers/block/sunvdc.c +++ b/drivers/block/sunvdc.c @@ -103,7 +103,7 @@ static int vdc_getgeo(struct block_device *bdev, struct hd_geometry *geo) return 0; } -static struct block_device_operations vdc_fops = { +static const struct block_device_operations vdc_fops = { .owner = THIS_MODULE, .getgeo = vdc_getgeo, }; diff --git a/drivers/block/swim.c b/drivers/block/swim.c index cf7877fb8a7..8f569e3df89 100644 --- a/drivers/block/swim.c +++ b/drivers/block/swim.c @@ -748,7 +748,7 @@ static int floppy_revalidate(struct gendisk *disk) return !fs->disk_in; } -static struct block_device_operations floppy_fops = { +static const struct block_device_operations floppy_fops = { .owner = THIS_MODULE, .open = floppy_open, .release = floppy_release, diff --git a/drivers/block/swim3.c b/drivers/block/swim3.c index 80df93e3cdd..e39e3820fef 100644 --- a/drivers/block/swim3.c +++ b/drivers/block/swim3.c @@ -998,7 +998,7 @@ static int floppy_revalidate(struct gendisk *disk) return ret; } -static struct block_device_operations floppy_fops = { +static const struct block_device_operations floppy_fops = { .open = floppy_open, .release = floppy_release, .locked_ioctl = floppy_ioctl, diff --git a/drivers/block/sx8.c b/drivers/block/sx8.c index f5cd2e83ebc..a7c4184f4a6 100644 --- a/drivers/block/sx8.c +++ b/drivers/block/sx8.c @@ -423,7 +423,7 @@ static struct pci_driver carm_driver = { .remove = carm_remove_one, }; -static struct block_device_operations carm_bd_ops = { +static const struct block_device_operations carm_bd_ops = { .owner = THIS_MODULE, .getgeo = carm_bdev_getgeo, }; diff --git a/drivers/block/ub.c b/drivers/block/ub.c index cc54473b8e7..c739b203fe9 100644 --- a/drivers/block/ub.c +++ b/drivers/block/ub.c @@ -1789,7 +1789,7 @@ static int ub_bd_media_changed(struct gendisk *disk) return lun->changed; } -static struct block_device_operations ub_bd_fops = { +static const struct block_device_operations ub_bd_fops = { .owner = THIS_MODULE, .open = ub_bd_open, .release = ub_bd_release, diff --git a/drivers/block/umem.c b/drivers/block/umem.c index 858c34dd032..ad1ba393801 100644 --- a/drivers/block/umem.c +++ b/drivers/block/umem.c @@ -140,7 +140,6 @@ struct cardinfo { }; static struct cardinfo cards[MM_MAXCARDS]; -static struct block_device_operations mm_fops; static struct timer_list battery_timer; static int num_cards; @@ -789,7 +788,7 @@ static int mm_check_change(struct gendisk *disk) return 0; } -static struct block_device_operations mm_fops = { +static const struct block_device_operations mm_fops = { .owner = THIS_MODULE, .getgeo = mm_getgeo, .revalidate_disk = mm_revalidate, diff --git a/drivers/block/viodasd.c b/drivers/block/viodasd.c index b441ce3832e..a8c8b56b275 100644 --- a/drivers/block/viodasd.c +++ b/drivers/block/viodasd.c @@ -219,7 +219,7 @@ static int viodasd_getgeo(struct block_device *bdev, struct hd_geometry *geo) /* * Our file operations table */ -static struct block_device_operations viodasd_fops = { +static const struct block_device_operations viodasd_fops = { .owner = THIS_MODULE, .open = viodasd_open, .release = viodasd_release, diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c index aa1a3d5a3e2..aa89fe45237 100644 --- a/drivers/block/virtio_blk.c +++ b/drivers/block/virtio_blk.c @@ -243,7 +243,7 @@ static int virtblk_getgeo(struct block_device *bd, struct hd_geometry *geo) return 0; } -static struct block_device_operations virtblk_fops = { +static const struct block_device_operations virtblk_fops = { .locked_ioctl = virtblk_ioctl, .owner = THIS_MODULE, .getgeo = virtblk_getgeo, diff --git a/drivers/block/xd.c b/drivers/block/xd.c index ce242921992..0877d3628fd 100644 --- a/drivers/block/xd.c +++ b/drivers/block/xd.c @@ -130,7 +130,7 @@ static struct gendisk *xd_gendisk[2]; static int xd_getgeo(struct block_device *bdev, struct hd_geometry *geo); -static struct block_device_operations xd_fops = { +static const struct block_device_operations xd_fops = { .owner = THIS_MODULE, .locked_ioctl = xd_ioctl, .getgeo = xd_getgeo, diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c index e53284767f7..b8578bb3f4c 100644 --- a/drivers/block/xen-blkfront.c +++ b/drivers/block/xen-blkfront.c @@ -65,7 +65,7 @@ struct blk_shadow { unsigned long frame[BLKIF_MAX_SEGMENTS_PER_REQUEST]; }; -static struct block_device_operations xlvbd_block_fops; +static const struct block_device_operations xlvbd_block_fops; #define BLK_RING_SIZE __RING_SIZE((struct blkif_sring *)0, PAGE_SIZE) @@ -1039,7 +1039,7 @@ static int blkif_release(struct gendisk *disk, fmode_t mode) return 0; } -static struct block_device_operations xlvbd_block_fops = +static const struct block_device_operations xlvbd_block_fops = { .owner = THIS_MODULE, .open = blkif_open, diff --git a/drivers/block/xsysace.c b/drivers/block/xsysace.c index b20abe102a2..e5c5415eb45 100644 --- a/drivers/block/xsysace.c +++ b/drivers/block/xsysace.c @@ -941,7 +941,7 @@ static int ace_getgeo(struct block_device *bdev, struct hd_geometry *geo) return 0; } -static struct block_device_operations ace_fops = { +static const struct block_device_operations ace_fops = { .owner = THIS_MODULE, .open = ace_open, .release = ace_release, diff --git a/drivers/block/z2ram.c b/drivers/block/z2ram.c index b2590409f25..64f941e0f14 100644 --- a/drivers/block/z2ram.c +++ b/drivers/block/z2ram.c @@ -64,7 +64,6 @@ static int current_device = -1; static DEFINE_SPINLOCK(z2ram_lock); -static struct block_device_operations z2_fops; static struct gendisk *z2ram_gendisk; static void do_z2_request(struct request_queue *q) @@ -315,7 +314,7 @@ z2_release(struct gendisk *disk, fmode_t mode) return 0; } -static struct block_device_operations z2_fops = +static const struct block_device_operations z2_fops = { .owner = THIS_MODULE, .open = z2_open, diff --git a/drivers/cdrom/gdrom.c b/drivers/cdrom/gdrom.c index b5621f27c4b..a762283d2a2 100644 --- a/drivers/cdrom/gdrom.c +++ b/drivers/cdrom/gdrom.c @@ -512,7 +512,7 @@ static int gdrom_bdops_ioctl(struct block_device *bdev, fmode_t mode, return cdrom_ioctl(gd.cd_info, bdev, mode, cmd, arg); } -static struct block_device_operations gdrom_bdops = { +static const struct block_device_operations gdrom_bdops = { .owner = THIS_MODULE, .open = gdrom_bdops_open, .release = gdrom_bdops_release, diff --git a/drivers/cdrom/viocd.c b/drivers/cdrom/viocd.c index 0fff646cc2f..57ca69e0ac5 100644 --- a/drivers/cdrom/viocd.c +++ b/drivers/cdrom/viocd.c @@ -177,7 +177,7 @@ static int viocd_blk_media_changed(struct gendisk *disk) return cdrom_media_changed(&di->viocd_info); } -struct block_device_operations viocd_fops = { +static const struct block_device_operations viocd_fops = { .owner = THIS_MODULE, .open = viocd_blk_open, .release = viocd_blk_release, diff --git a/drivers/ide/ide-cd.c b/drivers/ide/ide-cd.c index b79ca419d8d..64207df8da8 100644 --- a/drivers/ide/ide-cd.c +++ b/drivers/ide/ide-cd.c @@ -1686,7 +1686,7 @@ static int idecd_revalidate_disk(struct gendisk *disk) return 0; } -static struct block_device_operations idecd_ops = { +static const struct block_device_operations idecd_ops = { .owner = THIS_MODULE, .open = idecd_open, .release = idecd_release, diff --git a/drivers/ide/ide-gd.c b/drivers/ide/ide-gd.c index 214119026b3..753241429c2 100644 --- a/drivers/ide/ide-gd.c +++ b/drivers/ide/ide-gd.c @@ -321,7 +321,7 @@ static int ide_gd_ioctl(struct block_device *bdev, fmode_t mode, return drive->disk_ops->ioctl(drive, bdev, mode, cmd, arg); } -static struct block_device_operations ide_gd_ops = { +static const struct block_device_operations ide_gd_ops = { .owner = THIS_MODULE, .open = ide_gd_open, .release = ide_gd_release, diff --git a/drivers/ide/ide-tape.c b/drivers/ide/ide-tape.c index 9d6f62baac2..58fc920d5c3 100644 --- a/drivers/ide/ide-tape.c +++ b/drivers/ide/ide-tape.c @@ -1913,7 +1913,7 @@ static int idetape_ioctl(struct block_device *bdev, fmode_t mode, return err; } -static struct block_device_operations idetape_block_ops = { +static const struct block_device_operations idetape_block_ops = { .owner = THIS_MODULE, .open = idetape_open, .release = idetape_release, diff --git a/drivers/md/dm.c b/drivers/md/dm.c index eee28fac210..376f1ab48a2 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c @@ -1716,7 +1716,7 @@ out: return r; } -static struct block_device_operations dm_blk_dops; +static const struct block_device_operations dm_blk_dops; static void dm_wq_work(struct work_struct *work); @@ -2663,7 +2663,7 @@ void dm_free_md_mempools(struct dm_md_mempools *pools) kfree(pools); } -static struct block_device_operations dm_blk_dops = { +static const struct block_device_operations dm_blk_dops = { .open = dm_blk_open, .release = dm_blk_close, .ioctl = dm_blk_ioctl, diff --git a/drivers/md/md.c b/drivers/md/md.c index 9dd872000ce..6aa497e4baf 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -138,7 +138,7 @@ static ctl_table raid_root_table[] = { { .ctl_name = 0 } }; -static struct block_device_operations md_fops; +static const struct block_device_operations md_fops; static int start_readonly; @@ -5556,7 +5556,7 @@ static int md_revalidate(struct gendisk *disk) mddev->changed = 0; return 0; } -static struct block_device_operations md_fops = +static const struct block_device_operations md_fops = { .owner = THIS_MODULE, .open = md_open, diff --git a/drivers/memstick/core/mspro_block.c b/drivers/memstick/core/mspro_block.c index 7847bbc1440..bd83fa0a497 100644 --- a/drivers/memstick/core/mspro_block.c +++ b/drivers/memstick/core/mspro_block.c @@ -235,7 +235,7 @@ static int mspro_block_bd_getgeo(struct block_device *bdev, return 0; } -static struct block_device_operations ms_block_bdops = { +static const struct block_device_operations ms_block_bdops = { .open = mspro_block_bd_open, .release = mspro_block_bd_release, .getgeo = mspro_block_bd_getgeo, diff --git a/drivers/message/i2o/i2o_block.c b/drivers/message/i2o/i2o_block.c index 335d4c78a77..d505b68cd37 100644 --- a/drivers/message/i2o/i2o_block.c +++ b/drivers/message/i2o/i2o_block.c @@ -925,7 +925,7 @@ static void i2o_block_request_fn(struct request_queue *q) }; /* I2O Block device operations definition */ -static struct block_device_operations i2o_block_fops = { +static const struct block_device_operations i2o_block_fops = { .owner = THIS_MODULE, .open = i2o_block_open, .release = i2o_block_release, diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c index adc205c49fb..85f0e8cd875 100644 --- a/drivers/mmc/card/block.c +++ b/drivers/mmc/card/block.c @@ -130,7 +130,7 @@ mmc_blk_getgeo(struct block_device *bdev, struct hd_geometry *geo) return 0; } -static struct block_device_operations mmc_bdops = { +static const struct block_device_operations mmc_bdops = { .open = mmc_blk_open, .release = mmc_blk_release, .getgeo = mmc_blk_getgeo, diff --git a/drivers/mtd/mtd_blkdevs.c b/drivers/mtd/mtd_blkdevs.c index 7baba40c1ed..0acbf4f5be5 100644 --- a/drivers/mtd/mtd_blkdevs.c +++ b/drivers/mtd/mtd_blkdevs.c @@ -210,7 +210,7 @@ static int blktrans_ioctl(struct block_device *bdev, fmode_t mode, } } -static struct block_device_operations mtd_blktrans_ops = { +static const struct block_device_operations mtd_blktrans_ops = { .owner = THIS_MODULE, .open = blktrans_open, .release = blktrans_release, diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c index e109da4583a..dad0449475b 100644 --- a/drivers/s390/block/dasd.c +++ b/drivers/s390/block/dasd.c @@ -2146,7 +2146,7 @@ static int dasd_getgeo(struct block_device *bdev, struct hd_geometry *geo) return 0; } -struct block_device_operations +const struct block_device_operations dasd_device_operations = { .owner = THIS_MODULE, .open = dasd_open, diff --git a/drivers/s390/block/dasd_int.h b/drivers/s390/block/dasd_int.h index 5e47a1ee52b..8afd9fa0087 100644 --- a/drivers/s390/block/dasd_int.h +++ b/drivers/s390/block/dasd_int.h @@ -540,7 +540,7 @@ dasd_check_blocksize(int bsize) extern debug_info_t *dasd_debug_area; extern struct dasd_profile_info_t dasd_global_profile; extern unsigned int dasd_profile_level; -extern struct block_device_operations dasd_device_operations; +extern const struct block_device_operations dasd_device_operations; extern struct kmem_cache *dasd_page_cache; diff --git a/drivers/s390/block/dcssblk.c b/drivers/s390/block/dcssblk.c index d34617682a6..f76f4bd82b9 100644 --- a/drivers/s390/block/dcssblk.c +++ b/drivers/s390/block/dcssblk.c @@ -34,7 +34,7 @@ static int dcssblk_direct_access(struct block_device *bdev, sector_t secnum, static char dcssblk_segments[DCSSBLK_PARM_LEN] = "\0"; static int dcssblk_major; -static struct block_device_operations dcssblk_devops = { +static const struct block_device_operations dcssblk_devops = { .owner = THIS_MODULE, .open = dcssblk_open, .release = dcssblk_release, diff --git a/drivers/s390/block/xpram.c b/drivers/s390/block/xpram.c index ee604e92a5f..116d1b3eeb1 100644 --- a/drivers/s390/block/xpram.c +++ b/drivers/s390/block/xpram.c @@ -244,7 +244,7 @@ static int xpram_getgeo(struct block_device *bdev, struct hd_geometry *geo) return 0; } -static struct block_device_operations xpram_devops = +static const struct block_device_operations xpram_devops = { .owner = THIS_MODULE, .getgeo = xpram_getgeo, diff --git a/drivers/s390/char/tape_block.c b/drivers/s390/char/tape_block.c index 4cb9e70507a..64f57ef2763 100644 --- a/drivers/s390/char/tape_block.c +++ b/drivers/s390/char/tape_block.c @@ -50,7 +50,7 @@ static int tapeblock_ioctl(struct block_device *, fmode_t, unsigned int, static int tapeblock_medium_changed(struct gendisk *); static int tapeblock_revalidate_disk(struct gendisk *); -static struct block_device_operations tapeblock_fops = { +static const struct block_device_operations tapeblock_fops = { .owner = THIS_MODULE, .open = tapeblock_open, .release = tapeblock_release, diff --git a/drivers/sbus/char/jsflash.c b/drivers/sbus/char/jsflash.c index 6d465168468..869a30b49ed 100644 --- a/drivers/sbus/char/jsflash.c +++ b/drivers/sbus/char/jsflash.c @@ -452,7 +452,7 @@ static const struct file_operations jsf_fops = { static struct miscdevice jsf_dev = { JSF_MINOR, "jsflash", &jsf_fops }; -static struct block_device_operations jsfd_fops = { +static const struct block_device_operations jsfd_fops = { .owner = THIS_MODULE, }; diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c index a89c421dab5..8dd96dcd716 100644 --- a/drivers/scsi/sd.c +++ b/drivers/scsi/sd.c @@ -956,7 +956,7 @@ static int sd_compat_ioctl(struct block_device *bdev, fmode_t mode, } #endif -static struct block_device_operations sd_fops = { +static const struct block_device_operations sd_fops = { .owner = THIS_MODULE, .open = sd_open, .release = sd_release, diff --git a/drivers/scsi/sr.c b/drivers/scsi/sr.c index cce0fe4c8a3..eb61f7a70e1 100644 --- a/drivers/scsi/sr.c +++ b/drivers/scsi/sr.c @@ -525,7 +525,7 @@ static int sr_block_media_changed(struct gendisk *disk) return cdrom_media_changed(&cd->cdi); } -static struct block_device_operations sr_bdops = +static const struct block_device_operations sr_bdops = { .owner = THIS_MODULE, .open = sr_block_open, diff --git a/fs/block_dev.c b/fs/block_dev.c index 71e7e03ac34..5d1ed50bd46 100644 --- a/fs/block_dev.c +++ b/fs/block_dev.c @@ -1114,7 +1114,7 @@ EXPORT_SYMBOL(revalidate_disk); int check_disk_change(struct block_device *bdev) { struct gendisk *disk = bdev->bd_disk; - struct block_device_operations * bdops = disk->fops; + const struct block_device_operations *bdops = disk->fops; if (!bdops->media_changed) return 0; diff --git a/fs/ext2/xip.c b/fs/ext2/xip.c index b72b8588422..c18fbf3e406 100644 --- a/fs/ext2/xip.c +++ b/fs/ext2/xip.c @@ -20,7 +20,7 @@ __inode_direct_access(struct inode *inode, sector_t block, void **kaddr, unsigned long *pfn) { struct block_device *bdev = inode->i_sb->s_bdev; - struct block_device_operations *ops = bdev->bd_disk->fops; + const struct block_device_operations *ops = bdev->bd_disk->fops; sector_t sector; sector = block * (PAGE_SIZE / 512); /* ext2 block to bdev sector */ diff --git a/fs/partitions/check.c b/fs/partitions/check.c index fbeaddf595d..7b685e10cba 100644 --- a/fs/partitions/check.c +++ b/fs/partitions/check.c @@ -581,7 +581,7 @@ try_scan: } if (from + size > get_capacity(disk)) { - struct block_device_operations *bdops = disk->fops; + const struct block_device_operations *bdops = disk->fops; unsigned long long capacity; printk(KERN_WARNING diff --git a/include/linux/genhd.h b/include/linux/genhd.h index 109d179adb9..297df45ffd0 100644 --- a/include/linux/genhd.h +++ b/include/linux/genhd.h @@ -151,7 +151,7 @@ struct gendisk { struct disk_part_tbl *part_tbl; struct hd_struct part0; - struct block_device_operations *fops; + const struct block_device_operations *fops; struct request_queue *queue; void *private_data; -- cgit v1.2.3-70-g09d2 From a5abeeacc44bbef2935a7a8e939264c28962def2 Mon Sep 17 00:00:00 2001 From: Johannes Weiner Date: Mon, 21 Sep 2009 17:01:13 -0700 Subject: mm: make swap token dummies static inlines Make use of the compiler's typechecking on !CONFIG_SWAP as well. [akpm@linux-foundation.org: build fix] Signed-off-by: Johannes Weiner Reviewed-by: Rik van Riel Cc: Randy Dunlap Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/swap.h | 20 ++++++++++++++++---- 1 file changed, 16 insertions(+), 4 deletions(-) (limited to 'include') diff --git a/include/linux/swap.h b/include/linux/swap.h index 7c15334f3ff..6c990e658f4 100644 --- a/include/linux/swap.h +++ b/include/linux/swap.h @@ -419,10 +419,22 @@ static inline swp_entry_t get_swap_page(void) } /* linux/mm/thrash.c */ -#define put_swap_token(mm) do { } while (0) -#define grab_swap_token(mm) do { } while (0) -#define has_swap_token(mm) 0 -#define disable_swap_token() do { } while (0) +static inline void put_swap_token(struct mm_struct *mm) +{ +} + +static inline void grab_swap_token(struct mm_struct *mm) +{ +} + +static inline int has_swap_token(struct mm_struct *mm) +{ + return 0; +} + +static inline void disable_swap_token(void) +{ +} static inline void mem_cgroup_uncharge_swapcache(struct page *page, swp_entry_t ent) -- cgit v1.2.3-70-g09d2 From 112067f0905b2de862c607ee62411cf47d2fe5c4 Mon Sep 17 00:00:00 2001 From: Shaohua Li Date: Mon, 21 Sep 2009 17:01:16 -0700 Subject: memory hotplug: update zone pcp at memory online In my test, 128M memory is hot added, but zone's pcp batch is 0, which is an obvious error. When pages are onlined, zone pcp should be updated accordingly. [akpm@linux-foundation.org: fix warnings] Signed-off-by: Shaohua Li Cc: Mel Gorman Cc: Christoph Lameter Cc: Yakui Zhao Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/mm.h | 2 ++ mm/memory_hotplug.c | 1 + mm/page_alloc.c | 26 ++++++++++++++++++++++++++ 3 files changed, 29 insertions(+) (limited to 'include') diff --git a/include/linux/mm.h b/include/linux/mm.h index 9a72cc78e6b..d3c8ae7c801 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -1058,6 +1058,8 @@ extern void setup_per_cpu_pageset(void); static inline void setup_per_cpu_pageset(void) {} #endif +extern void zone_pcp_update(struct zone *zone); + /* nommu.c */ extern atomic_long_t mmap_pages_allocated; diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c index e4412a676c8..616236e6343 100644 --- a/mm/memory_hotplug.c +++ b/mm/memory_hotplug.c @@ -422,6 +422,7 @@ int online_pages(unsigned long pfn, unsigned long nr_pages) zone->present_pages += onlined_pages; zone->zone_pgdat->node_present_pages += onlined_pages; + zone_pcp_update(zone); setup_per_zone_wmarks(); calculate_zone_inactive_ratio(zone); if (onlined_pages) { diff --git a/mm/page_alloc.c b/mm/page_alloc.c index f4e929e356d..1a3a893ef50 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -3142,6 +3142,32 @@ int zone_wait_table_init(struct zone *zone, unsigned long zone_size_pages) return 0; } +static int __zone_pcp_update(void *data) +{ + struct zone *zone = data; + int cpu; + unsigned long batch = zone_batchsize(zone), flags; + + for (cpu = 0; cpu < NR_CPUS; cpu++) { + struct per_cpu_pageset *pset; + struct per_cpu_pages *pcp; + + pset = zone_pcp(zone, cpu); + pcp = &pset->pcp; + + local_irq_save(flags); + free_pages_bulk(zone, pcp->count, &pcp->list, 0); + setup_pageset(pset, batch); + local_irq_restore(flags); + } + return 0; +} + +void zone_pcp_update(struct zone *zone) +{ + stop_machine(__zone_pcp_update, zone, NULL); +} + static __meminit void zone_pcp_init(struct zone *zone) { int cpu; -- cgit v1.2.3-70-g09d2 From e8c5c8249878fb6564125680a1d15e06adbd5639 Mon Sep 17 00:00:00 2001 From: Lee Schermerhorn Date: Mon, 21 Sep 2009 17:01:22 -0700 Subject: hugetlb: balance freeing of huge pages across nodes Free huges pages from nodes in round robin fashion in an attempt to keep [persistent a.k.a static] hugepages balanced across nodes New function free_pool_huge_page() is modeled on and performs roughly the inverse of alloc_fresh_huge_page(). Replaces dequeue_huge_page() which now has no callers, so this patch removes it. Helper function hstate_next_node_to_free() uses new hstate member next_to_free_nid to distribute "frees" across all nodes with huge pages. Acked-by: David Rientjes Signed-off-by: Lee Schermerhorn Acked-by: Mel Gorman Cc: Nishanth Aravamudan Cc: Adam Litke Cc: Andy Whitcroft Cc: Eric Whitney Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/hugetlb.h | 3 +- mm/hugetlb.c | 132 +++++++++++++++++++++++++++++++----------------- 2 files changed, 88 insertions(+), 47 deletions(-) (limited to 'include') diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h index 5cbc620bdfe..16cdb75a543 100644 --- a/include/linux/hugetlb.h +++ b/include/linux/hugetlb.h @@ -185,7 +185,8 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, #define HSTATE_NAME_LEN 32 /* Defines one hugetlb page size */ struct hstate { - int hugetlb_next_nid; + int next_nid_to_alloc; + int next_nid_to_free; unsigned int order; unsigned long mask; unsigned long max_huge_pages; diff --git a/mm/hugetlb.c b/mm/hugetlb.c index b16d6363477..38dab558682 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -456,24 +456,6 @@ static void enqueue_huge_page(struct hstate *h, struct page *page) h->free_huge_pages_node[nid]++; } -static struct page *dequeue_huge_page(struct hstate *h) -{ - int nid; - struct page *page = NULL; - - for (nid = 0; nid < MAX_NUMNODES; ++nid) { - if (!list_empty(&h->hugepage_freelists[nid])) { - page = list_entry(h->hugepage_freelists[nid].next, - struct page, lru); - list_del(&page->lru); - h->free_huge_pages--; - h->free_huge_pages_node[nid]--; - break; - } - } - return page; -} - static struct page *dequeue_huge_page_vma(struct hstate *h, struct vm_area_struct *vma, unsigned long address, int avoid_reserve) @@ -641,7 +623,7 @@ static struct page *alloc_fresh_huge_page_node(struct hstate *h, int nid) /* * Use a helper variable to find the next node and then - * copy it back to hugetlb_next_nid afterwards: + * copy it back to next_nid_to_alloc afterwards: * otherwise there's a window in which a racer might * pass invalid nid MAX_NUMNODES to alloc_pages_exact_node. * But we don't need to use a spin_lock here: it really @@ -650,13 +632,13 @@ static struct page *alloc_fresh_huge_page_node(struct hstate *h, int nid) * if we just successfully allocated a hugepage so that * the next caller gets hugepages on the next node. */ -static int hstate_next_node(struct hstate *h) +static int hstate_next_node_to_alloc(struct hstate *h) { int next_nid; - next_nid = next_node(h->hugetlb_next_nid, node_online_map); + next_nid = next_node(h->next_nid_to_alloc, node_online_map); if (next_nid == MAX_NUMNODES) next_nid = first_node(node_online_map); - h->hugetlb_next_nid = next_nid; + h->next_nid_to_alloc = next_nid; return next_nid; } @@ -667,14 +649,15 @@ static int alloc_fresh_huge_page(struct hstate *h) int next_nid; int ret = 0; - start_nid = h->hugetlb_next_nid; + start_nid = h->next_nid_to_alloc; + next_nid = start_nid; do { - page = alloc_fresh_huge_page_node(h, h->hugetlb_next_nid); + page = alloc_fresh_huge_page_node(h, next_nid); if (page) ret = 1; - next_nid = hstate_next_node(h); - } while (!page && h->hugetlb_next_nid != start_nid); + next_nid = hstate_next_node_to_alloc(h); + } while (!page && next_nid != start_nid); if (ret) count_vm_event(HTLB_BUDDY_PGALLOC); @@ -684,6 +667,52 @@ static int alloc_fresh_huge_page(struct hstate *h) return ret; } +/* + * helper for free_pool_huge_page() - find next node + * from which to free a huge page + */ +static int hstate_next_node_to_free(struct hstate *h) +{ + int next_nid; + next_nid = next_node(h->next_nid_to_free, node_online_map); + if (next_nid == MAX_NUMNODES) + next_nid = first_node(node_online_map); + h->next_nid_to_free = next_nid; + return next_nid; +} + +/* + * Free huge page from pool from next node to free. + * Attempt to keep persistent huge pages more or less + * balanced over allowed nodes. + * Called with hugetlb_lock locked. + */ +static int free_pool_huge_page(struct hstate *h) +{ + int start_nid; + int next_nid; + int ret = 0; + + start_nid = h->next_nid_to_free; + next_nid = start_nid; + + do { + if (!list_empty(&h->hugepage_freelists[next_nid])) { + struct page *page = + list_entry(h->hugepage_freelists[next_nid].next, + struct page, lru); + list_del(&page->lru); + h->free_huge_pages--; + h->free_huge_pages_node[next_nid]--; + update_and_free_page(h, page); + ret = 1; + } + next_nid = hstate_next_node_to_free(h); + } while (!ret && next_nid != start_nid); + + return ret; +} + static struct page *alloc_buddy_huge_page(struct hstate *h, struct vm_area_struct *vma, unsigned long address) { @@ -1008,7 +1037,7 @@ int __weak alloc_bootmem_huge_page(struct hstate *h) void *addr; addr = __alloc_bootmem_node_nopanic( - NODE_DATA(h->hugetlb_next_nid), + NODE_DATA(h->next_nid_to_alloc), huge_page_size(h), huge_page_size(h), 0); if (addr) { @@ -1020,7 +1049,7 @@ int __weak alloc_bootmem_huge_page(struct hstate *h) m = addr; goto found; } - hstate_next_node(h); + hstate_next_node_to_alloc(h); nr_nodes--; } return 0; @@ -1141,31 +1170,43 @@ static inline void try_to_free_low(struct hstate *h, unsigned long count) */ static int adjust_pool_surplus(struct hstate *h, int delta) { - static int prev_nid; - int nid = prev_nid; + int start_nid, next_nid; int ret = 0; VM_BUG_ON(delta != -1 && delta != 1); - do { - nid = next_node(nid, node_online_map); - if (nid == MAX_NUMNODES) - nid = first_node(node_online_map); - /* To shrink on this node, there must be a surplus page */ - if (delta < 0 && !h->surplus_huge_pages_node[nid]) - continue; - /* Surplus cannot exceed the total number of pages */ - if (delta > 0 && h->surplus_huge_pages_node[nid] >= + if (delta < 0) + start_nid = h->next_nid_to_alloc; + else + start_nid = h->next_nid_to_free; + next_nid = start_nid; + + do { + int nid = next_nid; + if (delta < 0) { + next_nid = hstate_next_node_to_alloc(h); + /* + * To shrink on this node, there must be a surplus page + */ + if (!h->surplus_huge_pages_node[nid]) + continue; + } + if (delta > 0) { + next_nid = hstate_next_node_to_free(h); + /* + * Surplus cannot exceed the total number of pages + */ + if (h->surplus_huge_pages_node[nid] >= h->nr_huge_pages_node[nid]) - continue; + continue; + } h->surplus_huge_pages += delta; h->surplus_huge_pages_node[nid] += delta; ret = 1; break; - } while (nid != prev_nid); + } while (next_nid != start_nid); - prev_nid = nid; return ret; } @@ -1227,10 +1268,8 @@ static unsigned long set_max_huge_pages(struct hstate *h, unsigned long count) min_count = max(count, min_count); try_to_free_low(h, min_count); while (min_count < persistent_huge_pages(h)) { - struct page *page = dequeue_huge_page(h); - if (!page) + if (!free_pool_huge_page(h)) break; - update_and_free_page(h, page); } while (count < persistent_huge_pages(h)) { if (!adjust_pool_surplus(h, 1)) @@ -1442,7 +1481,8 @@ void __init hugetlb_add_hstate(unsigned order) h->free_huge_pages = 0; for (i = 0; i < MAX_NUMNODES; ++i) INIT_LIST_HEAD(&h->hugepage_freelists[i]); - h->hugetlb_next_nid = first_node(node_online_map); + h->next_nid_to_alloc = first_node(node_online_map); + h->next_nid_to_free = first_node(node_online_map); snprintf(h->name, HSTATE_NAME_LEN, "hugepages-%lukB", huge_page_size(h)/1024); -- cgit v1.2.3-70-g09d2 From c6a7f5728a1db45d30df55a01adc130b4ab0327c Mon Sep 17 00:00:00 2001 From: KOSAKI Motohiro Date: Mon, 21 Sep 2009 17:01:32 -0700 Subject: mm: oom analysis: Show kernel stack usage in /proc/meminfo and OOM log output The amount of memory allocated to kernel stacks can become significant and cause OOM conditions. However, we do not display the amount of memory consumed by stacks. Add code to display the amount of memory used for stacks in /proc/meminfo. Signed-off-by: KOSAKI Motohiro Reviewed-by: Christoph Lameter Reviewed-by: Minchan Kim Reviewed-by: Rik van Riel Cc: David Rientjes Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- drivers/base/node.c | 3 +++ fs/proc/meminfo.c | 2 ++ include/linux/mmzone.h | 3 ++- kernel/fork.c | 11 +++++++++++ mm/page_alloc.c | 3 +++ mm/vmstat.c | 1 + 6 files changed, 22 insertions(+), 1 deletion(-) (limited to 'include') diff --git a/drivers/base/node.c b/drivers/base/node.c index 91d4087b403..b560c17f6d4 100644 --- a/drivers/base/node.c +++ b/drivers/base/node.c @@ -85,6 +85,7 @@ static ssize_t node_read_meminfo(struct sys_device * dev, "Node %d FilePages: %8lu kB\n" "Node %d Mapped: %8lu kB\n" "Node %d AnonPages: %8lu kB\n" + "Node %d KernelStack: %8lu kB\n" "Node %d PageTables: %8lu kB\n" "Node %d NFS_Unstable: %8lu kB\n" "Node %d Bounce: %8lu kB\n" @@ -116,6 +117,8 @@ static ssize_t node_read_meminfo(struct sys_device * dev, nid, K(node_page_state(nid, NR_FILE_PAGES)), nid, K(node_page_state(nid, NR_FILE_MAPPED)), nid, K(node_page_state(nid, NR_ANON_PAGES)), + nid, node_page_state(nid, NR_KERNEL_STACK) * + THREAD_SIZE / 1024, nid, K(node_page_state(nid, NR_PAGETABLE)), nid, K(node_page_state(nid, NR_UNSTABLE_NFS)), nid, K(node_page_state(nid, NR_BOUNCE)), diff --git a/fs/proc/meminfo.c b/fs/proc/meminfo.c index d5c410d47fa..1fc588f430e 100644 --- a/fs/proc/meminfo.c +++ b/fs/proc/meminfo.c @@ -84,6 +84,7 @@ static int meminfo_proc_show(struct seq_file *m, void *v) "Slab: %8lu kB\n" "SReclaimable: %8lu kB\n" "SUnreclaim: %8lu kB\n" + "KernelStack: %8lu kB\n" "PageTables: %8lu kB\n" #ifdef CONFIG_QUICKLIST "Quicklists: %8lu kB\n" @@ -128,6 +129,7 @@ static int meminfo_proc_show(struct seq_file *m, void *v) global_page_state(NR_SLAB_UNRECLAIMABLE)), K(global_page_state(NR_SLAB_RECLAIMABLE)), K(global_page_state(NR_SLAB_UNRECLAIMABLE)), + global_page_state(NR_KERNEL_STACK) * THREAD_SIZE / 1024, K(global_page_state(NR_PAGETABLE)), #ifdef CONFIG_QUICKLIST K(quicklist_total_size()), diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index 88959853737..d9335b8de84 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h @@ -94,10 +94,11 @@ enum zone_stat_item { NR_SLAB_RECLAIMABLE, NR_SLAB_UNRECLAIMABLE, NR_PAGETABLE, /* used for pagetables */ + NR_KERNEL_STACK, + /* Second 128 byte cacheline */ NR_UNSTABLE_NFS, /* NFS unstable pages */ NR_BOUNCE, NR_VMSCAN_WRITE, - /* Second 128 byte cacheline */ NR_WRITEBACK_TEMP, /* Writeback using temporary buffers */ #ifdef CONFIG_NUMA NUMA_HIT, /* allocated in intended node */ diff --git a/kernel/fork.c b/kernel/fork.c index 2cebfb23b0b..d4638c8cc19 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -136,9 +136,17 @@ struct kmem_cache *vm_area_cachep; /* SLAB cache for mm_struct structures (tsk->mm) */ static struct kmem_cache *mm_cachep; +static void account_kernel_stack(struct thread_info *ti, int account) +{ + struct zone *zone = page_zone(virt_to_page(ti)); + + mod_zone_page_state(zone, NR_KERNEL_STACK, account); +} + void free_task(struct task_struct *tsk) { prop_local_destroy_single(&tsk->dirties); + account_kernel_stack(tsk->stack, -1); free_thread_info(tsk->stack); rt_mutex_debug_task_free(tsk); ftrace_graph_exit_task(tsk); @@ -253,6 +261,9 @@ static struct task_struct *dup_task_struct(struct task_struct *orig) tsk->btrace_seq = 0; #endif tsk->splice_pipe = NULL; + + account_kernel_stack(ti, 1); + return tsk; out: diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 494c09196c3..4e050f325eb 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -2177,6 +2177,7 @@ void show_free_areas(void) " mapped:%lukB" " slab_reclaimable:%lukB" " slab_unreclaimable:%lukB" + " kernel_stack:%lukB" " pagetables:%lukB" " unstable:%lukB" " bounce:%lukB" @@ -2201,6 +2202,8 @@ void show_free_areas(void) K(zone_page_state(zone, NR_FILE_MAPPED)), K(zone_page_state(zone, NR_SLAB_RECLAIMABLE)), K(zone_page_state(zone, NR_SLAB_UNRECLAIMABLE)), + zone_page_state(zone, NR_KERNEL_STACK) * + THREAD_SIZE / 1024, K(zone_page_state(zone, NR_PAGETABLE)), K(zone_page_state(zone, NR_UNSTABLE_NFS)), K(zone_page_state(zone, NR_BOUNCE)), diff --git a/mm/vmstat.c b/mm/vmstat.c index 138bed53706..ceda39b63d7 100644 --- a/mm/vmstat.c +++ b/mm/vmstat.c @@ -639,6 +639,7 @@ static const char * const vmstat_text[] = { "nr_slab_reclaimable", "nr_slab_unreclaimable", "nr_page_table_pages", + "nr_kernel_stack", "nr_unstable", "nr_bounce", "nr_vmscan_write", -- cgit v1.2.3-70-g09d2 From 4b02108ac1b3354a22b0d83c684797692efdc395 Mon Sep 17 00:00:00 2001 From: KOSAKI Motohiro Date: Mon, 21 Sep 2009 17:01:33 -0700 Subject: mm: oom analysis: add shmem vmstat Recently we encountered OOM problems due to memory use of the GEM cache. Generally a large amuont of Shmem/Tmpfs pages tend to create a memory shortage problem. We often use the following calculation to determine the amount of shmem pages: shmem = NR_ACTIVE_ANON + NR_INACTIVE_ANON - NR_ANON_PAGES however the expression does not consider isolated and mlocked pages. This patch adds explicit accounting for pages used by shmem and tmpfs. Signed-off-by: KOSAKI Motohiro Acked-by: Rik van Riel Reviewed-by: Christoph Lameter Acked-by: Wu Fengguang Cc: David Rientjes Cc: Hugh Dickins Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- drivers/base/node.c | 2 ++ fs/proc/meminfo.c | 2 ++ include/linux/mmzone.h | 1 + mm/filemap.c | 4 ++++ mm/migrate.c | 5 ++++- mm/page_alloc.c | 5 ++++- mm/vmstat.c | 2 +- 7 files changed, 18 insertions(+), 3 deletions(-) (limited to 'include') diff --git a/drivers/base/node.c b/drivers/base/node.c index b560c17f6d4..1fe5536d404 100644 --- a/drivers/base/node.c +++ b/drivers/base/node.c @@ -85,6 +85,7 @@ static ssize_t node_read_meminfo(struct sys_device * dev, "Node %d FilePages: %8lu kB\n" "Node %d Mapped: %8lu kB\n" "Node %d AnonPages: %8lu kB\n" + "Node %d Shmem: %8lu kB\n" "Node %d KernelStack: %8lu kB\n" "Node %d PageTables: %8lu kB\n" "Node %d NFS_Unstable: %8lu kB\n" @@ -117,6 +118,7 @@ static ssize_t node_read_meminfo(struct sys_device * dev, nid, K(node_page_state(nid, NR_FILE_PAGES)), nid, K(node_page_state(nid, NR_FILE_MAPPED)), nid, K(node_page_state(nid, NR_ANON_PAGES)), + nid, K(node_page_state(nid, NR_SHMEM)), nid, node_page_state(nid, NR_KERNEL_STACK) * THREAD_SIZE / 1024, nid, K(node_page_state(nid, NR_PAGETABLE)), diff --git a/fs/proc/meminfo.c b/fs/proc/meminfo.c index 1fc588f430e..171e052c07b 100644 --- a/fs/proc/meminfo.c +++ b/fs/proc/meminfo.c @@ -81,6 +81,7 @@ static int meminfo_proc_show(struct seq_file *m, void *v) "Writeback: %8lu kB\n" "AnonPages: %8lu kB\n" "Mapped: %8lu kB\n" + "Shmem: %8lu kB\n" "Slab: %8lu kB\n" "SReclaimable: %8lu kB\n" "SUnreclaim: %8lu kB\n" @@ -125,6 +126,7 @@ static int meminfo_proc_show(struct seq_file *m, void *v) K(global_page_state(NR_WRITEBACK)), K(global_page_state(NR_ANON_PAGES)), K(global_page_state(NR_FILE_MAPPED)), + K(global_page_state(NR_SHMEM)), K(global_page_state(NR_SLAB_RECLAIMABLE) + global_page_state(NR_SLAB_UNRECLAIMABLE)), K(global_page_state(NR_SLAB_RECLAIMABLE)), diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index d9335b8de84..b3583b93b77 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h @@ -100,6 +100,7 @@ enum zone_stat_item { NR_BOUNCE, NR_VMSCAN_WRITE, NR_WRITEBACK_TEMP, /* Writeback using temporary buffers */ + NR_SHMEM, /* shmem pages (included tmpfs/GEM pages) */ #ifdef CONFIG_NUMA NUMA_HIT, /* allocated in intended node */ NUMA_MISS, /* allocated in non intended node */ diff --git a/mm/filemap.c b/mm/filemap.c index dd51c68e2b8..bcc7372aebb 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -119,6 +119,8 @@ void __remove_from_page_cache(struct page *page) page->mapping = NULL; mapping->nrpages--; __dec_zone_page_state(page, NR_FILE_PAGES); + if (PageSwapBacked(page)) + __dec_zone_page_state(page, NR_SHMEM); BUG_ON(page_mapped(page)); /* @@ -431,6 +433,8 @@ int add_to_page_cache_locked(struct page *page, struct address_space *mapping, if (likely(!error)) { mapping->nrpages++; __inc_zone_page_state(page, NR_FILE_PAGES); + if (PageSwapBacked(page)) + __inc_zone_page_state(page, NR_SHMEM); spin_unlock_irq(&mapping->tree_lock); } else { page->mapping = NULL; diff --git a/mm/migrate.c b/mm/migrate.c index 0edeac91348..37143b92448 100644 --- a/mm/migrate.c +++ b/mm/migrate.c @@ -312,7 +312,10 @@ static int migrate_page_move_mapping(struct address_space *mapping, */ __dec_zone_page_state(page, NR_FILE_PAGES); __inc_zone_page_state(newpage, NR_FILE_PAGES); - + if (PageSwapBacked(page)) { + __dec_zone_page_state(page, NR_SHMEM); + __inc_zone_page_state(newpage, NR_SHMEM); + } spin_unlock_irq(&mapping->tree_lock); return 0; diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 4e050f325eb..e50c22545b8 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -2139,7 +2139,7 @@ void show_free_areas(void) " unevictable:%lu" " dirty:%lu writeback:%lu unstable:%lu buffer:%lu\n" " free:%lu slab_reclaimable:%lu slab_unreclaimable:%lu\n" - " mapped:%lu pagetables:%lu bounce:%lu\n", + " mapped:%lu shmem:%lu pagetables:%lu bounce:%lu\n", global_page_state(NR_ACTIVE_ANON), global_page_state(NR_ACTIVE_FILE), global_page_state(NR_INACTIVE_ANON), @@ -2153,6 +2153,7 @@ void show_free_areas(void) global_page_state(NR_SLAB_RECLAIMABLE), global_page_state(NR_SLAB_UNRECLAIMABLE), global_page_state(NR_FILE_MAPPED), + global_page_state(NR_SHMEM), global_page_state(NR_PAGETABLE), global_page_state(NR_BOUNCE)); @@ -2175,6 +2176,7 @@ void show_free_areas(void) " dirty:%lukB" " writeback:%lukB" " mapped:%lukB" + " shmem:%lukB" " slab_reclaimable:%lukB" " slab_unreclaimable:%lukB" " kernel_stack:%lukB" @@ -2200,6 +2202,7 @@ void show_free_areas(void) K(zone_page_state(zone, NR_FILE_DIRTY)), K(zone_page_state(zone, NR_WRITEBACK)), K(zone_page_state(zone, NR_FILE_MAPPED)), + K(zone_page_state(zone, NR_SHMEM)), K(zone_page_state(zone, NR_SLAB_RECLAIMABLE)), K(zone_page_state(zone, NR_SLAB_UNRECLAIMABLE)), zone_page_state(zone, NR_KERNEL_STACK) * diff --git a/mm/vmstat.c b/mm/vmstat.c index ceda39b63d7..7214a451125 100644 --- a/mm/vmstat.c +++ b/mm/vmstat.c @@ -644,7 +644,7 @@ static const char * const vmstat_text[] = { "nr_bounce", "nr_vmscan_write", "nr_writeback_temp", - + "nr_shmem", #ifdef CONFIG_NUMA "numa_hit", "numa_miss", -- cgit v1.2.3-70-g09d2 From a731286de62294b63d8ceb3c5914ac52cc17e690 Mon Sep 17 00:00:00 2001 From: KOSAKI Motohiro Date: Mon, 21 Sep 2009 17:01:37 -0700 Subject: mm: vmstat: add isolate pages If the system is running a heavy load of processes then concurrent reclaim can isolate a large number of pages from the LRU. /proc/vmstat and the output generated for an OOM do not show how many pages were isolated. This has been observed during process fork bomb testing (mstctl11 in LTP). This patch shows the information about isolated pages. Reproduced via: ----------------------- % ./hackbench 140 process 1000 => OOM occur active_anon:146 inactive_anon:0 isolated_anon:49245 active_file:79 inactive_file:18 isolated_file:113 unevictable:0 dirty:0 writeback:0 unstable:0 buffer:39 free:370 slab_reclaimable:309 slab_unreclaimable:5492 mapped:53 shmem:15 pagetables:28140 bounce:0 Signed-off-by: KOSAKI Motohiro Acked-by: Rik van Riel Acked-by: Wu Fengguang Reviewed-by: Minchan Kim Cc: Hugh Dickins Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/mmzone.h | 2 ++ mm/migrate.c | 11 +++++++++++ mm/page_alloc.c | 12 +++++++++--- mm/vmscan.c | 12 +++++++++++- mm/vmstat.c | 2 ++ 5 files changed, 35 insertions(+), 4 deletions(-) (limited to 'include') diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index b3583b93b77..9c50309b30a 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h @@ -100,6 +100,8 @@ enum zone_stat_item { NR_BOUNCE, NR_VMSCAN_WRITE, NR_WRITEBACK_TEMP, /* Writeback using temporary buffers */ + NR_ISOLATED_ANON, /* Temporary isolated pages from anon lru */ + NR_ISOLATED_FILE, /* Temporary isolated pages from file lru */ NR_SHMEM, /* shmem pages (included tmpfs/GEM pages) */ #ifdef CONFIG_NUMA NUMA_HIT, /* allocated in intended node */ diff --git a/mm/migrate.c b/mm/migrate.c index 37143b92448..b535a2c1656 100644 --- a/mm/migrate.c +++ b/mm/migrate.c @@ -67,6 +67,8 @@ int putback_lru_pages(struct list_head *l) list_for_each_entry_safe(page, page2, l, lru) { list_del(&page->lru); + dec_zone_page_state(page, NR_ISOLATED_ANON + + !!page_is_file_cache(page)); putback_lru_page(page); count++; } @@ -698,6 +700,8 @@ unlock: * restored. */ list_del(&page->lru); + dec_zone_page_state(page, NR_ISOLATED_ANON + + !!page_is_file_cache(page)); putback_lru_page(page); } @@ -742,6 +746,13 @@ int migrate_pages(struct list_head *from, struct page *page2; int swapwrite = current->flags & PF_SWAPWRITE; int rc; + unsigned long flags; + + local_irq_save(flags); + list_for_each_entry(page, from, lru) + __inc_zone_page_state(page, NR_ISOLATED_ANON + + !!page_is_file_cache(page)); + local_irq_restore(flags); if (!swapwrite) current->flags |= PF_SWAPWRITE; diff --git a/mm/page_alloc.c b/mm/page_alloc.c index b6d0d09557e..afda8fd1648 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -2134,16 +2134,18 @@ void show_free_areas(void) } } - printk("Active_anon:%lu active_file:%lu inactive_anon:%lu\n" - " inactive_file:%lu" + printk("active_anon:%lu inactive_anon:%lu isolated_anon:%lu\n" + " active_file:%lu inactive_file:%lu isolated_file:%lu\n" " unevictable:%lu" " dirty:%lu writeback:%lu unstable:%lu buffer:%lu\n" " free:%lu slab_reclaimable:%lu slab_unreclaimable:%lu\n" " mapped:%lu shmem:%lu pagetables:%lu bounce:%lu\n", global_page_state(NR_ACTIVE_ANON), - global_page_state(NR_ACTIVE_FILE), global_page_state(NR_INACTIVE_ANON), + global_page_state(NR_ISOLATED_ANON), + global_page_state(NR_ACTIVE_FILE), global_page_state(NR_INACTIVE_FILE), + global_page_state(NR_ISOLATED_FILE), global_page_state(NR_UNEVICTABLE), global_page_state(NR_FILE_DIRTY), global_page_state(NR_WRITEBACK), @@ -2171,6 +2173,8 @@ void show_free_areas(void) " active_file:%lukB" " inactive_file:%lukB" " unevictable:%lukB" + " isolated(anon):%lukB" + " isolated(file):%lukB" " present:%lukB" " mlocked:%lukB" " dirty:%lukB" @@ -2197,6 +2201,8 @@ void show_free_areas(void) K(zone_page_state(zone, NR_ACTIVE_FILE)), K(zone_page_state(zone, NR_INACTIVE_FILE)), K(zone_page_state(zone, NR_UNEVICTABLE)), + K(zone_page_state(zone, NR_ISOLATED_ANON)), + K(zone_page_state(zone, NR_ISOLATED_FILE)), K(zone->present_pages), K(zone_page_state(zone, NR_MLOCK)), K(zone_page_state(zone, NR_FILE_DIRTY)), diff --git a/mm/vmscan.c b/mm/vmscan.c index d86a91f8c16..75c29974e87 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -1072,6 +1072,8 @@ static unsigned long shrink_inactive_list(unsigned long max_scan, unsigned long nr_active; unsigned int count[NR_LRU_LISTS] = { 0, }; int mode = lumpy_reclaim ? ISOLATE_BOTH : ISOLATE_INACTIVE; + unsigned long nr_anon; + unsigned long nr_file; nr_taken = sc->isolate_pages(sc->swap_cluster_max, &page_list, &nr_scan, sc->order, mode, @@ -1102,6 +1104,10 @@ static unsigned long shrink_inactive_list(unsigned long max_scan, __mod_zone_page_state(zone, NR_INACTIVE_ANON, -count[LRU_INACTIVE_ANON]); + nr_anon = count[LRU_ACTIVE_ANON] + count[LRU_INACTIVE_ANON]; + nr_file = count[LRU_ACTIVE_FILE] + count[LRU_INACTIVE_FILE]; + __mod_zone_page_state(zone, NR_ISOLATED_ANON, nr_anon); + __mod_zone_page_state(zone, NR_ISOLATED_FILE, nr_file); reclaim_stat->recent_scanned[0] += count[LRU_INACTIVE_ANON]; reclaim_stat->recent_scanned[0] += count[LRU_ACTIVE_ANON]; @@ -1169,6 +1175,9 @@ static unsigned long shrink_inactive_list(unsigned long max_scan, spin_lock_irq(&zone->lru_lock); } } + __mod_zone_page_state(zone, NR_ISOLATED_ANON, -nr_anon); + __mod_zone_page_state(zone, NR_ISOLATED_FILE, -nr_file); + } while (nr_scanned < max_scan); done: @@ -1279,6 +1288,7 @@ static void shrink_active_list(unsigned long nr_pages, struct zone *zone, __mod_zone_page_state(zone, NR_ACTIVE_FILE, -nr_taken); else __mod_zone_page_state(zone, NR_ACTIVE_ANON, -nr_taken); + __mod_zone_page_state(zone, NR_ISOLATED_ANON + file, nr_taken); spin_unlock_irq(&zone->lru_lock); while (!list_empty(&l_hold)) { @@ -1329,7 +1339,7 @@ static void shrink_active_list(unsigned long nr_pages, struct zone *zone, LRU_ACTIVE + file * LRU_FILE); move_active_pages_to_lru(zone, &l_inactive, LRU_BASE + file * LRU_FILE); - + __mod_zone_page_state(zone, NR_ISOLATED_ANON + file, -nr_taken); spin_unlock_irq(&zone->lru_lock); } diff --git a/mm/vmstat.c b/mm/vmstat.c index 7214a451125..c81321f9fee 100644 --- a/mm/vmstat.c +++ b/mm/vmstat.c @@ -644,6 +644,8 @@ static const char * const vmstat_text[] = { "nr_bounce", "nr_vmscan_write", "nr_writeback_temp", + "nr_isolated_anon", + "nr_isolated_file", "nr_shmem", #ifdef CONFIG_NUMA "numa_hit", -- cgit v1.2.3-70-g09d2 From 5a2ae913f5229d6e1d4a666f0477350789d5128e Mon Sep 17 00:00:00 2001 From: KOSAKI Motohiro Date: Mon, 21 Sep 2009 17:01:39 -0700 Subject: mm: remove __{add,sub}_zone_page_state() __add_zone_page_state() and __sub_zone_page_state() are unused. Signed-off-by: KOSAKI Motohiro Cc: Wu Fengguang Cc: Rik van Riel Cc: Minchan Kim Cc: Christoph Lameter Cc: KAMEZAWA Hiroyuki Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/vmstat.h | 5 ----- 1 file changed, 5 deletions(-) (limited to 'include') diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h index 81a97cf8f0a..d7f577f49d1 100644 --- a/include/linux/vmstat.h +++ b/include/linux/vmstat.h @@ -210,11 +210,6 @@ extern void zone_statistics(struct zone *, struct zone *); #endif /* CONFIG_NUMA */ -#define __add_zone_page_state(__z, __i, __d) \ - __mod_zone_page_state(__z, __i, __d) -#define __sub_zone_page_state(__z, __i, __d) \ - __mod_zone_page_state(__z, __i,-(__d)) - #define add_zone_page_state(__z, __i, __d) mod_zone_page_state(__z, __i, __d) #define sub_zone_page_state(__z, __i, __d) mod_zone_page_state(__z, __i, -(__d)) -- cgit v1.2.3-70-g09d2 From adea02a1bea71a508da32c04d715485a1fe62029 Mon Sep 17 00:00:00 2001 From: Wu Fengguang Date: Mon, 21 Sep 2009 17:01:42 -0700 Subject: mm: count only reclaimable lru pages global_lru_pages() / zone_lru_pages() can be used in two ways: - to estimate max reclaimable pages in determine_dirtyable_memory() - to calculate the slab scan ratio When swap is full or not present, the anon lru lists are not reclaimable and also won't be scanned. So the anon pages shall not be counted in both usage scenarios. Also rename to _reclaimable_pages: now they are counting the possibly reclaimable lru pages. It can greatly (and correctly) increase the slab scan rate under high memory pressure (when most file pages have been reclaimed and swap is full/absent), thus reduce false OOM kills. Acked-by: Peter Zijlstra Reviewed-by: Rik van Riel Reviewed-by: Christoph Lameter Reviewed-by: Minchan Kim Cc: KOSAKI Motohiro Signed-off-by: Wu Fengguang Acked-by: Johannes Weiner Reviewed-by: Minchan Kim Reviewed-by: Jesse Barnes Cc: David Howells Cc: "Li, Ming Chun" Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/vmstat.h | 11 ++--------- mm/page-writeback.c | 5 +++-- mm/vmscan.c | 50 +++++++++++++++++++++++++++++++++++++++----------- 3 files changed, 44 insertions(+), 22 deletions(-) (limited to 'include') diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h index d7f577f49d1..2d0f222388a 100644 --- a/include/linux/vmstat.h +++ b/include/linux/vmstat.h @@ -166,15 +166,8 @@ static inline unsigned long zone_page_state(struct zone *zone, return x; } -extern unsigned long global_lru_pages(void); - -static inline unsigned long zone_lru_pages(struct zone *zone) -{ - return (zone_page_state(zone, NR_ACTIVE_ANON) - + zone_page_state(zone, NR_ACTIVE_FILE) - + zone_page_state(zone, NR_INACTIVE_ANON) - + zone_page_state(zone, NR_INACTIVE_FILE)); -} +extern unsigned long global_reclaimable_pages(void); +extern unsigned long zone_reclaimable_pages(struct zone *zone); #ifdef CONFIG_NUMA /* diff --git a/mm/page-writeback.c b/mm/page-writeback.c index d1ba4644105..5f378dd5880 100644 --- a/mm/page-writeback.c +++ b/mm/page-writeback.c @@ -380,7 +380,8 @@ static unsigned long highmem_dirtyable_memory(unsigned long total) struct zone *z = &NODE_DATA(node)->node_zones[ZONE_HIGHMEM]; - x += zone_page_state(z, NR_FREE_PAGES) + zone_lru_pages(z); + x += zone_page_state(z, NR_FREE_PAGES) + + zone_reclaimable_pages(z); } /* * Make sure that the number of highmem pages is never larger @@ -404,7 +405,7 @@ unsigned long determine_dirtyable_memory(void) { unsigned long x; - x = global_page_state(NR_FREE_PAGES) + global_lru_pages(); + x = global_page_state(NR_FREE_PAGES) + global_reclaimable_pages(); if (!vm_highmem_is_dirtyable) x -= highmem_dirtyable_memory(x); diff --git a/mm/vmscan.c b/mm/vmscan.c index f90b76086ff..208071c48bf 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -1734,7 +1734,7 @@ static unsigned long do_try_to_free_pages(struct zonelist *zonelist, if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL)) continue; - lru_pages += zone_lru_pages(zone); + lru_pages += zone_reclaimable_pages(zone); } } @@ -1951,7 +1951,7 @@ loop_again: for (i = 0; i <= end_zone; i++) { struct zone *zone = pgdat->node_zones + i; - lru_pages += zone_lru_pages(zone); + lru_pages += zone_reclaimable_pages(zone); } /* @@ -1995,7 +1995,7 @@ loop_again: if (zone_is_all_unreclaimable(zone)) continue; if (nr_slab == 0 && zone->pages_scanned >= - (zone_lru_pages(zone) * 6)) + (zone_reclaimable_pages(zone) * 6)) zone_set_flag(zone, ZONE_ALL_UNRECLAIMABLE); /* @@ -2162,12 +2162,39 @@ void wakeup_kswapd(struct zone *zone, int order) wake_up_interruptible(&pgdat->kswapd_wait); } -unsigned long global_lru_pages(void) +/* + * The reclaimable count would be mostly accurate. + * The less reclaimable pages may be + * - mlocked pages, which will be moved to unevictable list when encountered + * - mapped pages, which may require several travels to be reclaimed + * - dirty pages, which is not "instantly" reclaimable + */ +unsigned long global_reclaimable_pages(void) { - return global_page_state(NR_ACTIVE_ANON) - + global_page_state(NR_ACTIVE_FILE) - + global_page_state(NR_INACTIVE_ANON) - + global_page_state(NR_INACTIVE_FILE); + int nr; + + nr = global_page_state(NR_ACTIVE_FILE) + + global_page_state(NR_INACTIVE_FILE); + + if (nr_swap_pages > 0) + nr += global_page_state(NR_ACTIVE_ANON) + + global_page_state(NR_INACTIVE_ANON); + + return nr; +} + +unsigned long zone_reclaimable_pages(struct zone *zone) +{ + int nr; + + nr = zone_page_state(zone, NR_ACTIVE_FILE) + + zone_page_state(zone, NR_INACTIVE_FILE); + + if (nr_swap_pages > 0) + nr += zone_page_state(zone, NR_ACTIVE_ANON) + + zone_page_state(zone, NR_INACTIVE_ANON); + + return nr; } #ifdef CONFIG_HIBERNATION @@ -2239,7 +2266,7 @@ unsigned long shrink_all_memory(unsigned long nr_pages) current->reclaim_state = &reclaim_state; - lru_pages = global_lru_pages(); + lru_pages = global_reclaimable_pages(); nr_slab = global_page_state(NR_SLAB_RECLAIMABLE); /* If slab caches are huge, it's better to hit them first */ while (nr_slab >= lru_pages) { @@ -2281,7 +2308,7 @@ unsigned long shrink_all_memory(unsigned long nr_pages) reclaim_state.reclaimed_slab = 0; shrink_slab(sc.nr_scanned, sc.gfp_mask, - global_lru_pages()); + global_reclaimable_pages()); sc.nr_reclaimed += reclaim_state.reclaimed_slab; if (sc.nr_reclaimed >= nr_pages) goto out; @@ -2298,7 +2325,8 @@ unsigned long shrink_all_memory(unsigned long nr_pages) if (!sc.nr_reclaimed) { do { reclaim_state.reclaimed_slab = 0; - shrink_slab(nr_pages, sc.gfp_mask, global_lru_pages()); + shrink_slab(nr_pages, sc.gfp_mask, + global_reclaimable_pages()); sc.nr_reclaimed += reclaim_state.reclaimed_slab; } while (sc.nr_reclaimed < nr_pages && reclaim_state.reclaimed_slab > 0); -- cgit v1.2.3-70-g09d2 From 451ea25da71590361c71bf3044c55b870a887d53 Mon Sep 17 00:00:00 2001 From: Johannes Weiner Date: Mon, 21 Sep 2009 17:01:48 -0700 Subject: mm: perform non-atomic test-clear of PG_mlocked on free By the time PG_mlocked is cleared in the page freeing path, nobody else is looking at our page->flags anymore. It is thus safe to make the test-and-clear non-atomic and thereby removing an unnecessary and expensive operation from a hotpath. Signed-off-by: Johannes Weiner Reviewed-by: Christoph Lameter Reviewed-by: KOSAKI Motohiro Cc: Christoph Lameter Cc: Mel Gorman Cc: Nick Piggin Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/page-flags.h | 12 +++++++++--- mm/page_alloc.c | 4 ++-- 2 files changed, 11 insertions(+), 5 deletions(-) (limited to 'include') diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h index 2b87acfc5f8..d07c0bb2203 100644 --- a/include/linux/page-flags.h +++ b/include/linux/page-flags.h @@ -158,6 +158,9 @@ static inline int TestSetPage##uname(struct page *page) \ static inline int TestClearPage##uname(struct page *page) \ { return test_and_clear_bit(PG_##lname, &page->flags); } +#define __TESTCLEARFLAG(uname, lname) \ +static inline int __TestClearPage##uname(struct page *page) \ + { return __test_and_clear_bit(PG_##lname, &page->flags); } #define PAGEFLAG(uname, lname) TESTPAGEFLAG(uname, lname) \ SETPAGEFLAG(uname, lname) CLEARPAGEFLAG(uname, lname) @@ -184,6 +187,9 @@ static inline void __ClearPage##uname(struct page *page) { } #define TESTCLEARFLAG_FALSE(uname) \ static inline int TestClearPage##uname(struct page *page) { return 0; } +#define __TESTCLEARFLAG_FALSE(uname) \ +static inline int __TestClearPage##uname(struct page *page) { return 0; } + struct page; /* forward declaration */ TESTPAGEFLAG(Locked, locked) TESTSETFLAG(Locked, locked) @@ -250,11 +256,11 @@ PAGEFLAG(Unevictable, unevictable) __CLEARPAGEFLAG(Unevictable, unevictable) #ifdef CONFIG_HAVE_MLOCKED_PAGE_BIT #define MLOCK_PAGES 1 PAGEFLAG(Mlocked, mlocked) __CLEARPAGEFLAG(Mlocked, mlocked) - TESTSCFLAG(Mlocked, mlocked) + TESTSCFLAG(Mlocked, mlocked) __TESTCLEARFLAG(Mlocked, mlocked) #else #define MLOCK_PAGES 0 -PAGEFLAG_FALSE(Mlocked) - SETPAGEFLAG_NOOP(Mlocked) TESTCLEARFLAG_FALSE(Mlocked) +PAGEFLAG_FALSE(Mlocked) SETPAGEFLAG_NOOP(Mlocked) + TESTCLEARFLAG_FALSE(Mlocked) __TESTCLEARFLAG_FALSE(Mlocked) #endif #ifdef CONFIG_ARCH_USES_PG_UNCACHED diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 81926c7ef6f..9242d13f4ff 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -557,7 +557,7 @@ static void __free_pages_ok(struct page *page, unsigned int order) unsigned long flags; int i; int bad = 0; - int wasMlocked = TestClearPageMlocked(page); + int wasMlocked = __TestClearPageMlocked(page); kmemcheck_free_shadow(page, order); @@ -1026,7 +1026,7 @@ static void free_hot_cold_page(struct page *page, int cold) struct zone *zone = page_zone(page); struct per_cpu_pages *pcp; unsigned long flags; - int wasMlocked = TestClearPageMlocked(page); + int wasMlocked = __TestClearPageMlocked(page); kmemcheck_free_shadow(page, 0); -- cgit v1.2.3-70-g09d2 From 828502d30073036a486d96b1fe051e0f08b6df83 Mon Sep 17 00:00:00 2001 From: Izik Eidus Date: Mon, 21 Sep 2009 17:01:51 -0700 Subject: ksm: add mmu_notifier set_pte_at_notify() KSM is a linux driver that allows dynamicly sharing identical memory pages between one or more processes. Unlike tradtional page sharing that is made at the allocation of the memory, ksm do it dynamicly after the memory was created. Memory is periodically scanned; identical pages are identified and merged. The sharing is made in a transparent way to the processes that use it. Ksm is highly important for hypervisors (kvm), where in production enviorments there might be many copys of the same data data among the host memory. This kind of data can be: similar kernels, librarys, cache, and so on. Even that ksm was wrote for kvm, any userspace application that want to use it to share its data can try it. Ksm may be useful for any application that might have similar (page aligment) data strctures among the memory, ksm will find this data merge it to one copy, and even if it will be changed and thereforew copy on writed, ksm will merge it again as soon as it will be identical again. Another reason to consider using ksm is the fact that it might simplify alot the userspace code of application that want to use shared private data, instead that the application will mange shared area, ksm will do this for the application, and even write to this data will be allowed without any synchinization acts from the application. Ksm was designed to be a loadable module that doesn't change the VM code of linux. This patch: The set_pte_at_notify() macro allows setting a pte in the shadow page table directly, instead of flushing the shadow page table entry and then getting vmexit to set it. It uses a new change_pte() callback to do so. set_pte_at_notify() is an optimization for kvm, and other users of mmu_notifiers, for COW pages. It is useful for kvm when ksm is used, because it allows kvm not to have to receive vmexit and only then map the ksm page into the shadow page table, but instead map it directly at the same time as Linux maps the page into the host page table. Users of mmu_notifiers who don't implement new mmu_notifier_change_pte() callback will just receive the mmu_notifier_invalidate_page() callback. Signed-off-by: Izik Eidus Signed-off-by: Chris Wright Signed-off-by: Hugh Dickins Cc: Andrea Arcangeli Cc: Rik van Riel Cc: Wu Fengguang Cc: Balbir Singh Cc: Hugh Dickins Cc: KAMEZAWA Hiroyuki Cc: Lee Schermerhorn Cc: Avi Kivity Cc: Nick Piggin Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/mmu_notifier.h | 34 ++++++++++++++++++++++++++++++++++ mm/memory.c | 9 +++++++-- mm/mmu_notifier.c | 20 ++++++++++++++++++++ 3 files changed, 61 insertions(+), 2 deletions(-) (limited to 'include') diff --git a/include/linux/mmu_notifier.h b/include/linux/mmu_notifier.h index b77486d152c..4e02ee2b071 100644 --- a/include/linux/mmu_notifier.h +++ b/include/linux/mmu_notifier.h @@ -61,6 +61,15 @@ struct mmu_notifier_ops { struct mm_struct *mm, unsigned long address); + /* + * change_pte is called in cases that pte mapping to page is changed: + * for example, when ksm remaps pte to point to a new shared page. + */ + void (*change_pte)(struct mmu_notifier *mn, + struct mm_struct *mm, + unsigned long address, + pte_t pte); + /* * Before this is invoked any secondary MMU is still ok to * read/write to the page previously pointed to by the Linux @@ -154,6 +163,8 @@ extern void __mmu_notifier_mm_destroy(struct mm_struct *mm); extern void __mmu_notifier_release(struct mm_struct *mm); extern int __mmu_notifier_clear_flush_young(struct mm_struct *mm, unsigned long address); +extern void __mmu_notifier_change_pte(struct mm_struct *mm, + unsigned long address, pte_t pte); extern void __mmu_notifier_invalidate_page(struct mm_struct *mm, unsigned long address); extern void __mmu_notifier_invalidate_range_start(struct mm_struct *mm, @@ -175,6 +186,13 @@ static inline int mmu_notifier_clear_flush_young(struct mm_struct *mm, return 0; } +static inline void mmu_notifier_change_pte(struct mm_struct *mm, + unsigned long address, pte_t pte) +{ + if (mm_has_notifiers(mm)) + __mmu_notifier_change_pte(mm, address, pte); +} + static inline void mmu_notifier_invalidate_page(struct mm_struct *mm, unsigned long address) { @@ -236,6 +254,16 @@ static inline void mmu_notifier_mm_destroy(struct mm_struct *mm) __young; \ }) +#define set_pte_at_notify(__mm, __address, __ptep, __pte) \ +({ \ + struct mm_struct *___mm = __mm; \ + unsigned long ___address = __address; \ + pte_t ___pte = __pte; \ + \ + set_pte_at(___mm, ___address, __ptep, ___pte); \ + mmu_notifier_change_pte(___mm, ___address, ___pte); \ +}) + #else /* CONFIG_MMU_NOTIFIER */ static inline void mmu_notifier_release(struct mm_struct *mm) @@ -248,6 +276,11 @@ static inline int mmu_notifier_clear_flush_young(struct mm_struct *mm, return 0; } +static inline void mmu_notifier_change_pte(struct mm_struct *mm, + unsigned long address, pte_t pte) +{ +} + static inline void mmu_notifier_invalidate_page(struct mm_struct *mm, unsigned long address) { @@ -273,6 +306,7 @@ static inline void mmu_notifier_mm_destroy(struct mm_struct *mm) #define ptep_clear_flush_young_notify ptep_clear_flush_young #define ptep_clear_flush_notify ptep_clear_flush +#define set_pte_at_notify set_pte_at #endif /* CONFIG_MMU_NOTIFIER */ diff --git a/mm/memory.c b/mm/memory.c index e8f63d9961e..368561f3200 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -2115,9 +2115,14 @@ gotten: * seen in the presence of one thread doing SMC and another * thread doing COW. */ - ptep_clear_flush_notify(vma, address, page_table); + ptep_clear_flush(vma, address, page_table); page_add_new_anon_rmap(new_page, vma, address); - set_pte_at(mm, address, page_table, entry); + /* + * We call the notify macro here because, when using secondary + * mmu page tables (such as kvm shadow page tables), we want the + * new page to be mapped directly into the secondary page table. + */ + set_pte_at_notify(mm, address, page_table, entry); update_mmu_cache(vma, address, entry); if (old_page) { /* diff --git a/mm/mmu_notifier.c b/mm/mmu_notifier.c index 5f4ef0250be..7e33f2cb3c7 100644 --- a/mm/mmu_notifier.c +++ b/mm/mmu_notifier.c @@ -99,6 +99,26 @@ int __mmu_notifier_clear_flush_young(struct mm_struct *mm, return young; } +void __mmu_notifier_change_pte(struct mm_struct *mm, unsigned long address, + pte_t pte) +{ + struct mmu_notifier *mn; + struct hlist_node *n; + + rcu_read_lock(); + hlist_for_each_entry_rcu(mn, n, &mm->mmu_notifier_mm->list, hlist) { + if (mn->ops->change_pte) + mn->ops->change_pte(mn, mm, address, pte); + /* + * Some drivers don't have change_pte, + * so we must call invalidate_page in that case. + */ + else if (mn->ops->invalidate_page) + mn->ops->invalidate_page(mn, mm, address); + } + rcu_read_unlock(); +} + void __mmu_notifier_invalidate_page(struct mm_struct *mm, unsigned long address) { -- cgit v1.2.3-70-g09d2 From d19f352484467a5e518639ddff0554669c10ffab Mon Sep 17 00:00:00 2001 From: Hugh Dickins Date: Mon, 21 Sep 2009 17:01:53 -0700 Subject: ksm: define MADV_MERGEABLE and MADV_UNMERGEABLE The out-of-tree KSM used ioctls on fds cloned from /dev/ksm to register a memory area for merging: we prefer now to use an madvise(2) interface. This patch just defines MADV_MERGEABLE (to tell KSM it may merge pages in this area found identical to pages in other mergeable areas) and MADV_UNMERGEABLE (to undo that). Most architectures use asm-generic, but alpha, mips, parisc, xtensa need their own definitions: included here for mmotm convenience, but we'll probably want to split this and feed pieces to arch maintainers. Based upon earlier patches by Chris Wright and Izik Eidus. Signed-off-by: Hugh Dickins Signed-off-by: Chris Wright Signed-off-by: Izik Eidus Cc: Michael Kerrisk Cc: Richard Henderson Cc: Ivan Kokshaysky Cc: Ralf Baechle Cc: Kyle McMartin Cc: Helge Deller Cc: Chris Zankel Cc: Andrea Arcangeli Cc: Rik van Riel Cc: Wu Fengguang Cc: Balbir Singh Cc: Hugh Dickins Cc: KAMEZAWA Hiroyuki Cc: Lee Schermerhorn Cc: Avi Kivity Cc: Nick Piggin Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/alpha/include/asm/mman.h | 3 +++ arch/mips/include/asm/mman.h | 3 +++ arch/parisc/include/asm/mman.h | 3 +++ arch/xtensa/include/asm/mman.h | 3 +++ include/asm-generic/mman-common.h | 3 +++ 5 files changed, 15 insertions(+) (limited to 'include') diff --git a/arch/alpha/include/asm/mman.h b/arch/alpha/include/asm/mman.h index 90d7c35d286..c77c55756a7 100644 --- a/arch/alpha/include/asm/mman.h +++ b/arch/alpha/include/asm/mman.h @@ -48,6 +48,9 @@ #define MADV_DONTFORK 10 /* don't inherit across fork */ #define MADV_DOFORK 11 /* do inherit across fork */ +#define MADV_MERGEABLE 12 /* KSM may merge identical pages */ +#define MADV_UNMERGEABLE 13 /* KSM may not merge identical pages */ + /* compatibility flags */ #define MAP_FILE 0 diff --git a/arch/mips/include/asm/mman.h b/arch/mips/include/asm/mman.h index e4d6f1fb1cf..f15554d1518 100644 --- a/arch/mips/include/asm/mman.h +++ b/arch/mips/include/asm/mman.h @@ -71,6 +71,9 @@ #define MADV_DONTFORK 10 /* don't inherit across fork */ #define MADV_DOFORK 11 /* do inherit across fork */ +#define MADV_MERGEABLE 12 /* KSM may merge identical pages */ +#define MADV_UNMERGEABLE 13 /* KSM may not merge identical pages */ + /* compatibility flags */ #define MAP_FILE 0 diff --git a/arch/parisc/include/asm/mman.h b/arch/parisc/include/asm/mman.h index defe752cc99..a12d9d43f50 100644 --- a/arch/parisc/include/asm/mman.h +++ b/arch/parisc/include/asm/mman.h @@ -54,6 +54,9 @@ #define MADV_16M_PAGES 24 /* Use 16 Megabyte pages */ #define MADV_64M_PAGES 26 /* Use 64 Megabyte pages */ +#define MADV_MERGEABLE 65 /* KSM may merge identical pages */ +#define MADV_UNMERGEABLE 66 /* KSM may not merge identical pages */ + /* compatibility flags */ #define MAP_FILE 0 #define MAP_VARIABLE 0 diff --git a/arch/xtensa/include/asm/mman.h b/arch/xtensa/include/asm/mman.h index 9b92620c8a1..6e55b4d1f9c 100644 --- a/arch/xtensa/include/asm/mman.h +++ b/arch/xtensa/include/asm/mman.h @@ -78,6 +78,9 @@ #define MADV_DONTFORK 10 /* don't inherit across fork */ #define MADV_DOFORK 11 /* do inherit across fork */ +#define MADV_MERGEABLE 12 /* KSM may merge identical pages */ +#define MADV_UNMERGEABLE 13 /* KSM may not merge identical pages */ + /* compatibility flags */ #define MAP_FILE 0 diff --git a/include/asm-generic/mman-common.h b/include/asm-generic/mman-common.h index 3b69ad34189..dd63bd38864 100644 --- a/include/asm-generic/mman-common.h +++ b/include/asm-generic/mman-common.h @@ -35,6 +35,9 @@ #define MADV_DONTFORK 10 /* don't inherit across fork */ #define MADV_DOFORK 11 /* do inherit across fork */ +#define MADV_MERGEABLE 12 /* KSM may merge identical pages */ +#define MADV_UNMERGEABLE 13 /* KSM may not merge identical pages */ + /* compatibility flags */ #define MAP_FILE 0 -- cgit v1.2.3-70-g09d2 From f8af4da3b4c14e7267c4ffb952079af3912c51c5 Mon Sep 17 00:00:00 2001 From: Hugh Dickins Date: Mon, 21 Sep 2009 17:01:57 -0700 Subject: ksm: the mm interface to ksm This patch presents the mm interface to a dummy version of ksm.c, for better scrutiny of that interface: the real ksm.c follows later. When CONFIG_KSM is not set, madvise(2) reject MADV_MERGEABLE and MADV_UNMERGEABLE with EINVAL, since that seems more helpful than pretending that they can be serviced. But when CONFIG_KSM=y, accept them even if KSM is not currently running, and even on areas which KSM will not touch (e.g. hugetlb or shared file or special driver mappings). Like other madvices, report ENOMEM despite success if any area in the range is unmapped, and use EAGAIN to report out of memory. Define vma flag VM_MERGEABLE to identify an area on which KSM may try merging pages: leave it to ksm_madvise() to decide whether to set it. Define mm flag MMF_VM_MERGEABLE to identify an mm which might contain VM_MERGEABLE areas, to minimize callouts when forking or exiting. Based upon earlier patches by Chris Wright and Izik Eidus. Signed-off-by: Hugh Dickins Signed-off-by: Chris Wright Signed-off-by: Izik Eidus Cc: Michael Kerrisk Cc: Andrea Arcangeli Cc: Rik van Riel Cc: Wu Fengguang Cc: Balbir Singh Cc: Hugh Dickins Cc: KAMEZAWA Hiroyuki Cc: Lee Schermerhorn Cc: Avi Kivity Cc: Nick Piggin Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/ksm.h | 50 +++++++++++++++++++++++++++++++++++++++++++++ include/linux/mm.h | 1 + include/linux/sched.h | 7 +++++++ kernel/fork.c | 8 +++++++- mm/Kconfig | 11 ++++++++++ mm/Makefile | 1 + mm/ksm.c | 56 +++++++++++++++++++++++++++++++++++++++++++++++++++ mm/madvise.c | 14 +++++++++++++ 8 files changed, 147 insertions(+), 1 deletion(-) create mode 100644 include/linux/ksm.h create mode 100644 mm/ksm.c (limited to 'include') diff --git a/include/linux/ksm.h b/include/linux/ksm.h new file mode 100644 index 00000000000..eb2a448981e --- /dev/null +++ b/include/linux/ksm.h @@ -0,0 +1,50 @@ +#ifndef __LINUX_KSM_H +#define __LINUX_KSM_H +/* + * Memory merging support. + * + * This code enables dynamic sharing of identical pages found in different + * memory areas, even if they are not shared by fork(). + */ + +#include +#include +#include + +#ifdef CONFIG_KSM +int ksm_madvise(struct vm_area_struct *vma, unsigned long start, + unsigned long end, int advice, unsigned long *vm_flags); +int __ksm_enter(struct mm_struct *mm); +void __ksm_exit(struct mm_struct *mm); + +static inline int ksm_fork(struct mm_struct *mm, struct mm_struct *oldmm) +{ + if (test_bit(MMF_VM_MERGEABLE, &oldmm->flags)) + return __ksm_enter(mm); + return 0; +} + +static inline void ksm_exit(struct mm_struct *mm) +{ + if (test_bit(MMF_VM_MERGEABLE, &mm->flags)) + __ksm_exit(mm); +} +#else /* !CONFIG_KSM */ + +static inline int ksm_madvise(struct vm_area_struct *vma, unsigned long start, + unsigned long end, int advice, unsigned long *vm_flags) +{ + return 0; +} + +static inline int ksm_fork(struct mm_struct *mm, struct mm_struct *oldmm) +{ + return 0; +} + +static inline void ksm_exit(struct mm_struct *mm) +{ +} +#endif /* !CONFIG_KSM */ + +#endif diff --git a/include/linux/mm.h b/include/linux/mm.h index d3c8ae7c801..d808cf832c4 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -103,6 +103,7 @@ extern unsigned int kobjsize(const void *objp); #define VM_MIXEDMAP 0x10000000 /* Can contain "struct page" and pure PFN pages */ #define VM_SAO 0x20000000 /* Strong Access Ordering (powerpc) */ #define VM_PFN_AT_MMAP 0x40000000 /* PFNMAP vma that is fully mapped at mmap time */ +#define VM_MERGEABLE 0x80000000 /* KSM may merge identical pages */ #ifndef VM_STACK_DEFAULT_FLAGS /* arch can override this */ #define VM_STACK_DEFAULT_FLAGS VM_DATA_DEFAULT_FLAGS diff --git a/include/linux/sched.h b/include/linux/sched.h index 8fe351c3914..8f3e63cb33a 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -434,7 +434,9 @@ extern int get_dumpable(struct mm_struct *mm); /* dumpable bits */ #define MMF_DUMPABLE 0 /* core dump is permitted */ #define MMF_DUMP_SECURELY 1 /* core file is readable only by root */ + #define MMF_DUMPABLE_BITS 2 +#define MMF_DUMPABLE_MASK ((1 << MMF_DUMPABLE_BITS) - 1) /* coredump filter bits */ #define MMF_DUMP_ANON_PRIVATE 2 @@ -444,6 +446,7 @@ extern int get_dumpable(struct mm_struct *mm); #define MMF_DUMP_ELF_HEADERS 6 #define MMF_DUMP_HUGETLB_PRIVATE 7 #define MMF_DUMP_HUGETLB_SHARED 8 + #define MMF_DUMP_FILTER_SHIFT MMF_DUMPABLE_BITS #define MMF_DUMP_FILTER_BITS 7 #define MMF_DUMP_FILTER_MASK \ @@ -457,6 +460,10 @@ extern int get_dumpable(struct mm_struct *mm); #else # define MMF_DUMP_MASK_DEFAULT_ELF 0 #endif + /* leave room for more dump flags */ +#define MMF_VM_MERGEABLE 16 /* KSM may merge identical pages */ + +#define MMF_INIT_MASK (MMF_DUMPABLE_MASK | MMF_DUMP_FILTER_MASK) struct sighand_struct { atomic_t count; diff --git a/kernel/fork.c b/kernel/fork.c index d4638c8cc19..73a442b7be6 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -49,6 +49,7 @@ #include #include #include +#include #include #include #include @@ -299,6 +300,9 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm) rb_link = &mm->mm_rb.rb_node; rb_parent = NULL; pprev = &mm->mmap; + retval = ksm_fork(mm, oldmm); + if (retval) + goto out; for (mpnt = oldmm->mmap; mpnt; mpnt = mpnt->vm_next) { struct file *file; @@ -435,7 +439,8 @@ static struct mm_struct * mm_init(struct mm_struct * mm, struct task_struct *p) atomic_set(&mm->mm_count, 1); init_rwsem(&mm->mmap_sem); INIT_LIST_HEAD(&mm->mmlist); - mm->flags = (current->mm) ? current->mm->flags : default_dump_filter; + mm->flags = (current->mm) ? + (current->mm->flags & MMF_INIT_MASK) : default_dump_filter; mm->core_state = NULL; mm->nr_ptes = 0; set_mm_counter(mm, file_rss, 0); @@ -496,6 +501,7 @@ void mmput(struct mm_struct *mm) if (atomic_dec_and_test(&mm->mm_users)) { exit_aio(mm); + ksm_exit(mm); exit_mmap(mm); set_mm_exe_file(mm, NULL); if (!list_empty(&mm->mmlist)) { diff --git a/mm/Kconfig b/mm/Kconfig index 3aa519f52e1..c0b6afa178a 100644 --- a/mm/Kconfig +++ b/mm/Kconfig @@ -214,6 +214,17 @@ config HAVE_MLOCKED_PAGE_BIT config MMU_NOTIFIER bool +config KSM + bool "Enable KSM for page merging" + depends on MMU + help + Enable Kernel Samepage Merging: KSM periodically scans those areas + of an application's address space that an app has advised may be + mergeable. When it finds pages of identical content, it replaces + the many instances by a single resident page with that content, so + saving memory until one or another app needs to modify the content. + Recommended for use with KVM, or with other duplicative applications. + config DEFAULT_MMAP_MIN_ADDR int "Low address space to protect from user allocation" default 4096 diff --git a/mm/Makefile b/mm/Makefile index ea4b18bd396..a63bf59a0c7 100644 --- a/mm/Makefile +++ b/mm/Makefile @@ -25,6 +25,7 @@ obj-$(CONFIG_SPARSEMEM_VMEMMAP) += sparse-vmemmap.o obj-$(CONFIG_TMPFS_POSIX_ACL) += shmem_acl.o obj-$(CONFIG_SLOB) += slob.o obj-$(CONFIG_MMU_NOTIFIER) += mmu_notifier.o +obj-$(CONFIG_KSM) += ksm.o obj-$(CONFIG_PAGE_POISONING) += debug-pagealloc.o obj-$(CONFIG_SLAB) += slab.o obj-$(CONFIG_SLUB) += slub.o diff --git a/mm/ksm.c b/mm/ksm.c new file mode 100644 index 00000000000..8b76008fcd3 --- /dev/null +++ b/mm/ksm.c @@ -0,0 +1,56 @@ +/* + * Initial dummy version just to illustrate KSM's interface to other files. + */ + +#include +#include +#include + +int ksm_madvise(struct vm_area_struct *vma, unsigned long start, + unsigned long end, int advice, unsigned long *vm_flags) +{ + struct mm_struct *mm = vma->vm_mm; + + switch (advice) { + case MADV_MERGEABLE: + /* + * Be somewhat over-protective for now! + */ + if (*vm_flags & (VM_MERGEABLE | VM_SHARED | VM_MAYSHARE | + VM_PFNMAP | VM_IO | VM_DONTEXPAND | + VM_RESERVED | VM_HUGETLB | VM_INSERTPAGE | + VM_MIXEDMAP | VM_SAO)) + return 0; /* just ignore the advice */ + + if (!test_bit(MMF_VM_MERGEABLE, &mm->flags)) + if (__ksm_enter(mm) < 0) + return -EAGAIN; + + *vm_flags |= VM_MERGEABLE; + break; + + case MADV_UNMERGEABLE: + if (!(*vm_flags & VM_MERGEABLE)) + return 0; /* just ignore the advice */ + + /* Unmerge any merged pages here */ + + *vm_flags &= ~VM_MERGEABLE; + break; + } + + return 0; +} + +int __ksm_enter(struct mm_struct *mm) +{ + /* Allocate a structure to track mm and link it into KSM's list */ + set_bit(MMF_VM_MERGEABLE, &mm->flags); + return 0; +} + +void __ksm_exit(struct mm_struct *mm) +{ + /* Unlink and free all KSM's structures which track this mm */ + clear_bit(MMF_VM_MERGEABLE, &mm->flags); +} diff --git a/mm/madvise.c b/mm/madvise.c index 66c31264f06..d9ae2067952 100644 --- a/mm/madvise.c +++ b/mm/madvise.c @@ -11,6 +11,7 @@ #include #include #include +#include /* * Any behaviour which results in changes to the vma->vm_flags needs to @@ -63,6 +64,12 @@ static long madvise_behavior(struct vm_area_struct * vma, } new_flags &= ~VM_DONTCOPY; break; + case MADV_MERGEABLE: + case MADV_UNMERGEABLE: + error = ksm_madvise(vma, start, end, behavior, &new_flags); + if (error) + goto out; + break; } if (new_flags == vma->vm_flags) { @@ -239,6 +246,10 @@ madvise_behavior_valid(int behavior) case MADV_REMOVE: case MADV_WILLNEED: case MADV_DONTNEED: +#ifdef CONFIG_KSM + case MADV_MERGEABLE: + case MADV_UNMERGEABLE: +#endif return 1; default: @@ -273,6 +284,9 @@ madvise_behavior_valid(int behavior) * MADV_DONTFORK - omit this area from child's address space when forking: * typically, to avoid COWing pages pinned by get_user_pages(). * MADV_DOFORK - cancel MADV_DONTFORK: no longer omit this area when forking. + * MADV_MERGEABLE - the application recommends that KSM try to merge pages in + * this area with pages of identical content from other such areas. + * MADV_UNMERGEABLE- cancel MADV_MERGEABLE: no longer merge pages with others. * * return values: * zero - success -- cgit v1.2.3-70-g09d2 From 21333b2b66b805a360641568588e5a0bb06d9d1f Mon Sep 17 00:00:00 2001 From: Hugh Dickins Date: Mon, 21 Sep 2009 17:01:59 -0700 Subject: ksm: no debug in page_dup_rmap() page_dup_rmap(), used on each mapped page when forking, was originally just an inline atomic_inc of mapcount. 2.6.22 added CONFIG_DEBUG_VM out-of-line checks to it, which would need to be ever-so-slightly complicated to allow for the PageKsm() we're about to define. But I think these checks never caught anything. And if it's coding errors we're worried about, such checks should be in page_remove_rmap() too, not just when forking; whereas if it's pagetable corruption we're worried about, then they shouldn't be limited to CONFIG_DEBUG_VM. Oh, just revert page_dup_rmap() to an inline atomic_inc of mapcount. Signed-off-by: Hugh Dickins Signed-off-by: Chris Wright Signed-off-by: Izik Eidus Cc: Nick Piggin Cc: Andrea Arcangeli Cc: Rik van Riel Cc: Wu Fengguang Cc: Balbir Singh Cc: Hugh Dickins Cc: KAMEZAWA Hiroyuki Cc: Lee Schermerhorn Cc: Avi Kivity Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/rmap.h | 6 +----- mm/memory.c | 2 +- mm/rmap.c | 21 --------------------- 3 files changed, 2 insertions(+), 27 deletions(-) (limited to 'include') diff --git a/include/linux/rmap.h b/include/linux/rmap.h index bf116d0dbf2..477841d29fc 100644 --- a/include/linux/rmap.h +++ b/include/linux/rmap.h @@ -71,14 +71,10 @@ void page_add_new_anon_rmap(struct page *, struct vm_area_struct *, unsigned lon void page_add_file_rmap(struct page *); void page_remove_rmap(struct page *); -#ifdef CONFIG_DEBUG_VM -void page_dup_rmap(struct page *page, struct vm_area_struct *vma, unsigned long address); -#else -static inline void page_dup_rmap(struct page *page, struct vm_area_struct *vma, unsigned long address) +static inline void page_dup_rmap(struct page *page) { atomic_inc(&page->_mapcount); } -#endif /* * Called from mm/vmscan.c to handle paging out diff --git a/mm/memory.c b/mm/memory.c index 368561f3200..7a61a11f186 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -597,7 +597,7 @@ copy_one_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm, page = vm_normal_page(vma, addr, pte); if (page) { get_page(page); - page_dup_rmap(page, vma, addr); + page_dup_rmap(page); rss[!!PageAnon(page)]++; } diff --git a/mm/rmap.c b/mm/rmap.c index 1406e67f961..720fc03a7bc 100644 --- a/mm/rmap.c +++ b/mm/rmap.c @@ -710,27 +710,6 @@ void page_add_file_rmap(struct page *page) } } -#ifdef CONFIG_DEBUG_VM -/** - * page_dup_rmap - duplicate pte mapping to a page - * @page: the page to add the mapping to - * @vma: the vm area being duplicated - * @address: the user virtual address mapped - * - * For copy_page_range only: minimal extract from page_add_file_rmap / - * page_add_anon_rmap, avoiding unnecessary tests (already checked) so it's - * quicker. - * - * The caller needs to hold the pte lock. - */ -void page_dup_rmap(struct page *page, struct vm_area_struct *vma, unsigned long address) -{ - if (PageAnon(page)) - __page_check_anon_rmap(page, vma, address); - atomic_inc(&page->_mapcount); -} -#endif - /** * page_remove_rmap - take down pte mapping from a page * @page: page to remove mapping from -- cgit v1.2.3-70-g09d2 From 9a840895147b12de5cdd633c600b38686840ee53 Mon Sep 17 00:00:00 2001 From: Hugh Dickins Date: Mon, 21 Sep 2009 17:02:01 -0700 Subject: ksm: identify PageKsm pages KSM will need to identify its kernel merged pages unambiguously, and /proc/kpageflags will probably like to do so too. Since KSM will only be substituting anonymous pages, statistics are best preserved by making a PageKsm page a special PageAnon page: one with no anon_vma. But KSM then needs its own page_add_ksm_rmap() - keep it in ksm.h near PageKsm; and do_wp_page() must COW them, unlike singly mapped PageAnons. Signed-off-by: Hugh Dickins Signed-off-by: Chris Wright Signed-off-by: Izik Eidus Cc: Wu Fengguang Cc: Andrea Arcangeli Cc: Rik van Riel Cc: Wu Fengguang Cc: Balbir Singh Cc: Hugh Dickins Cc: KAMEZAWA Hiroyuki Cc: Lee Schermerhorn Cc: Avi Kivity Cc: Nick Piggin Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- fs/proc/page.c | 5 +++++ include/linux/ksm.h | 29 +++++++++++++++++++++++++++++ mm/memory.c | 3 ++- 3 files changed, 36 insertions(+), 1 deletion(-) (limited to 'include') diff --git a/fs/proc/page.c b/fs/proc/page.c index 2707c6c7a20..2281c2cbfe2 100644 --- a/fs/proc/page.c +++ b/fs/proc/page.c @@ -2,6 +2,7 @@ #include #include #include +#include #include #include #include @@ -95,6 +96,8 @@ static const struct file_operations proc_kpagecount_operations = { #define KPF_UNEVICTABLE 18 #define KPF_NOPAGE 20 +#define KPF_KSM 21 + /* kernel hacking assistances * WARNING: subject to change, never rely on them! */ @@ -137,6 +140,8 @@ static u64 get_uflags(struct page *page) u |= 1 << KPF_MMAP; if (PageAnon(page)) u |= 1 << KPF_ANON; + if (PageKsm(page)) + u |= 1 << KPF_KSM; /* * compound pages: export both head/tail info diff --git a/include/linux/ksm.h b/include/linux/ksm.h index eb2a448981e..a485c14ecd5 100644 --- a/include/linux/ksm.h +++ b/include/linux/ksm.h @@ -10,6 +10,7 @@ #include #include #include +#include #ifdef CONFIG_KSM int ksm_madvise(struct vm_area_struct *vma, unsigned long start, @@ -29,6 +30,27 @@ static inline void ksm_exit(struct mm_struct *mm) if (test_bit(MMF_VM_MERGEABLE, &mm->flags)) __ksm_exit(mm); } + +/* + * A KSM page is one of those write-protected "shared pages" or "merged pages" + * which KSM maps into multiple mms, wherever identical anonymous page content + * is found in VM_MERGEABLE vmas. It's a PageAnon page, with NULL anon_vma. + */ +static inline int PageKsm(struct page *page) +{ + return ((unsigned long)page->mapping == PAGE_MAPPING_ANON); +} + +/* + * But we have to avoid the checking which page_add_anon_rmap() performs. + */ +static inline void page_add_ksm_rmap(struct page *page) +{ + if (atomic_inc_and_test(&page->_mapcount)) { + page->mapping = (void *) PAGE_MAPPING_ANON; + __inc_zone_page_state(page, NR_ANON_PAGES); + } +} #else /* !CONFIG_KSM */ static inline int ksm_madvise(struct vm_area_struct *vma, unsigned long start, @@ -45,6 +67,13 @@ static inline int ksm_fork(struct mm_struct *mm, struct mm_struct *oldmm) static inline void ksm_exit(struct mm_struct *mm) { } + +static inline int PageKsm(struct page *page) +{ + return 0; +} + +/* No stub required for page_add_ksm_rmap(page) */ #endif /* !CONFIG_KSM */ #endif diff --git a/mm/memory.c b/mm/memory.c index 7a61a11f186..1a435b81876 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -45,6 +45,7 @@ #include #include #include +#include #include #include #include @@ -1974,7 +1975,7 @@ static int do_wp_page(struct mm_struct *mm, struct vm_area_struct *vma, * Take out anonymous pages first, anonymous shared vmas are * not dirty accountable. */ - if (PageAnon(old_page)) { + if (PageAnon(old_page) && !PageKsm(old_page)) { if (!trylock_page(old_page)) { page_cache_get(old_page); pte_unmap_unlock(page_table, ptl); -- cgit v1.2.3-70-g09d2 From 9ba6929480088a85c1ff60a4b1f1c9fc80dbd2b7 Mon Sep 17 00:00:00 2001 From: Hugh Dickins Date: Mon, 21 Sep 2009 17:02:20 -0700 Subject: ksm: fix oom deadlock There's a now-obvious deadlock in KSM's out-of-memory handling: imagine ksmd or KSM_RUN_UNMERGE handling, holding ksm_thread_mutex, trying to allocate a page to break KSM in an mm which becomes the OOM victim (quite likely in the unmerge case): it's killed and goes to exit, and hangs there waiting to acquire ksm_thread_mutex. Clearly we must not require ksm_thread_mutex in __ksm_exit, simple though that made everything else: perhaps use mmap_sem somehow? And part of the answer lies in the comments on unmerge_ksm_pages: __ksm_exit should also leave all the rmap_item removal to ksmd. But there's a fundamental problem, that KSM relies upon mmap_sem to guarantee the consistency of the mm it's dealing with, yet exit_mmap tears down an mm without taking mmap_sem. And bumping mm_users won't help at all, that just ensures that the pages the OOM killer assumes are on their way to being freed will not be freed. The best answer seems to be, to move the ksm_exit callout from just before exit_mmap, to the middle of exit_mmap: after the mm's pages have been freed (if the mmu_gather is flushed), but before its page tables and vma structures have been freed; and down_write,up_write mmap_sem there to serialize with KSM's own reliance on mmap_sem. But KSM then needs to be careful, whenever it downs mmap_sem, to check that the mm is not already exiting: there's a danger of using find_vma on a layout that's being torn apart, or writing into page tables which have been freed for reuse; and even do_anonymous_page and __do_fault need to check they're not being called by break_ksm to reinstate a pte after zap_pte_range has zapped that page table. Though it might be clearer to add an exiting flag, set while holding mmap_sem in __ksm_exit, that wouldn't cover the issue of reinstating a zapped pte. All we need is to check whether mm_users is 0 - but must remember that ksmd may detect that before __ksm_exit is reached. So, ksm_test_exit(mm) added to comment such checks on mm->mm_users. __ksm_exit now has to leave clearing up the rmap_items to ksmd, that needs ksm_thread_mutex; but shift the exiting mm just after the ksm_scan cursor so that it will soon be dealt with. __ksm_enter raise mm_count to hold the mm_struct, ksmd's exit processing (exactly like its processing when it finds all VM_MERGEABLEs unmapped) mmdrop it, similar procedure for KSM_RUN_UNMERGE (which has stopped ksmd). But also give __ksm_exit a fast path: when there's no complication (no rmap_items attached to mm and it's not at the ksm_scan cursor), it can safely do all the exiting work itself. This is not just an optimization: when ksmd is not running, the raised mm_count would otherwise leak mm_structs. Signed-off-by: Hugh Dickins Acked-by: Izik Eidus Cc: Andrea Arcangeli Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/ksm.h | 31 +++++++++-- kernel/fork.c | 1 - mm/ksm.c | 144 +++++++++++++++++++++++++++++++++++----------------- mm/memory.c | 5 +- mm/mmap.c | 9 ++++ 5 files changed, 137 insertions(+), 53 deletions(-) (limited to 'include') diff --git a/include/linux/ksm.h b/include/linux/ksm.h index a485c14ecd5..2d64ff30c0d 100644 --- a/include/linux/ksm.h +++ b/include/linux/ksm.h @@ -12,11 +12,14 @@ #include #include +struct mmu_gather; + #ifdef CONFIG_KSM int ksm_madvise(struct vm_area_struct *vma, unsigned long start, unsigned long end, int advice, unsigned long *vm_flags); int __ksm_enter(struct mm_struct *mm); -void __ksm_exit(struct mm_struct *mm); +void __ksm_exit(struct mm_struct *mm, + struct mmu_gather **tlbp, unsigned long end); static inline int ksm_fork(struct mm_struct *mm, struct mm_struct *oldmm) { @@ -25,10 +28,24 @@ static inline int ksm_fork(struct mm_struct *mm, struct mm_struct *oldmm) return 0; } -static inline void ksm_exit(struct mm_struct *mm) +/* + * For KSM to handle OOM without deadlock when it's breaking COW in a + * likely victim of the OOM killer, exit_mmap() has to serialize with + * ksm_exit() after freeing mm's pages but before freeing its page tables. + * That leaves a window in which KSM might refault pages which have just + * been finally unmapped: guard against that with ksm_test_exit(), and + * use it after getting mmap_sem in ksm.c, to check if mm is exiting. + */ +static inline bool ksm_test_exit(struct mm_struct *mm) +{ + return atomic_read(&mm->mm_users) == 0; +} + +static inline void ksm_exit(struct mm_struct *mm, + struct mmu_gather **tlbp, unsigned long end) { if (test_bit(MMF_VM_MERGEABLE, &mm->flags)) - __ksm_exit(mm); + __ksm_exit(mm, tlbp, end); } /* @@ -64,7 +81,13 @@ static inline int ksm_fork(struct mm_struct *mm, struct mm_struct *oldmm) return 0; } -static inline void ksm_exit(struct mm_struct *mm) +static inline bool ksm_test_exit(struct mm_struct *mm) +{ + return 0; +} + +static inline void ksm_exit(struct mm_struct *mm, + struct mmu_gather **tlbp, unsigned long end) { } diff --git a/kernel/fork.c b/kernel/fork.c index 73a442b7be6..42f20f565b1 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -501,7 +501,6 @@ void mmput(struct mm_struct *mm) if (atomic_dec_and_test(&mm->mm_users)) { exit_aio(mm); - ksm_exit(mm); exit_mmap(mm); set_mm_exe_file(mm, NULL); if (!list_empty(&mm->mmlist)) { diff --git a/mm/ksm.c b/mm/ksm.c index 7e4d255dadc..722e3f2a8dc 100644 --- a/mm/ksm.c +++ b/mm/ksm.c @@ -32,6 +32,7 @@ #include #include +#include #include /* @@ -347,6 +348,8 @@ static void break_cow(struct mm_struct *mm, unsigned long addr) struct vm_area_struct *vma; down_read(&mm->mmap_sem); + if (ksm_test_exit(mm)) + goto out; vma = find_vma(mm, addr); if (!vma || vma->vm_start > addr) goto out; @@ -365,6 +368,8 @@ static struct page *get_mergeable_page(struct rmap_item *rmap_item) struct page *page; down_read(&mm->mmap_sem); + if (ksm_test_exit(mm)) + goto out; vma = find_vma(mm, addr); if (!vma || vma->vm_start > addr) goto out; @@ -439,11 +444,11 @@ static void remove_rmap_item_from_tree(struct rmap_item *rmap_item) } else if (rmap_item->address & NODE_FLAG) { unsigned char age; /* - * ksm_thread can and must skip the rb_erase, because + * Usually ksmd can and must skip the rb_erase, because * root_unstable_tree was already reset to RB_ROOT. - * But __ksm_exit has to be careful: do the rb_erase - * if it's interrupting a scan, and this rmap_item was - * inserted by this scan rather than left from before. + * But be careful when an mm is exiting: do the rb_erase + * if this rmap_item was inserted by this scan, rather + * than left over from before. */ age = (unsigned char)(ksm_scan.seqnr - rmap_item->address); BUG_ON(age > 1); @@ -491,6 +496,8 @@ static int unmerge_ksm_pages(struct vm_area_struct *vma, int err = 0; for (addr = start; addr < end && !err; addr += PAGE_SIZE) { + if (ksm_test_exit(vma->vm_mm)) + break; if (signal_pending(current)) err = -ERESTARTSYS; else @@ -507,34 +514,50 @@ static int unmerge_and_remove_all_rmap_items(void) int err = 0; spin_lock(&ksm_mmlist_lock); - mm_slot = list_entry(ksm_mm_head.mm_list.next, + ksm_scan.mm_slot = list_entry(ksm_mm_head.mm_list.next, struct mm_slot, mm_list); spin_unlock(&ksm_mmlist_lock); - while (mm_slot != &ksm_mm_head) { + for (mm_slot = ksm_scan.mm_slot; + mm_slot != &ksm_mm_head; mm_slot = ksm_scan.mm_slot) { mm = mm_slot->mm; down_read(&mm->mmap_sem); for (vma = mm->mmap; vma; vma = vma->vm_next) { + if (ksm_test_exit(mm)) + break; if (!(vma->vm_flags & VM_MERGEABLE) || !vma->anon_vma) continue; err = unmerge_ksm_pages(vma, vma->vm_start, vma->vm_end); - if (err) { - up_read(&mm->mmap_sem); - goto out; - } + if (err) + goto error; } + remove_trailing_rmap_items(mm_slot, mm_slot->rmap_list.next); - up_read(&mm->mmap_sem); spin_lock(&ksm_mmlist_lock); - mm_slot = list_entry(mm_slot->mm_list.next, + ksm_scan.mm_slot = list_entry(mm_slot->mm_list.next, struct mm_slot, mm_list); - spin_unlock(&ksm_mmlist_lock); + if (ksm_test_exit(mm)) { + hlist_del(&mm_slot->link); + list_del(&mm_slot->mm_list); + spin_unlock(&ksm_mmlist_lock); + + free_mm_slot(mm_slot); + clear_bit(MMF_VM_MERGEABLE, &mm->flags); + up_read(&mm->mmap_sem); + mmdrop(mm); + } else { + spin_unlock(&ksm_mmlist_lock); + up_read(&mm->mmap_sem); + } } ksm_scan.seqnr = 0; -out: + return 0; + +error: + up_read(&mm->mmap_sem); spin_lock(&ksm_mmlist_lock); ksm_scan.mm_slot = &ksm_mm_head; spin_unlock(&ksm_mmlist_lock); @@ -755,6 +778,9 @@ static int try_to_merge_with_ksm_page(struct mm_struct *mm1, int err = -EFAULT; down_read(&mm1->mmap_sem); + if (ksm_test_exit(mm1)) + goto out; + vma = find_vma(mm1, addr1); if (!vma || vma->vm_start > addr1) goto out; @@ -796,6 +822,10 @@ static int try_to_merge_two_pages(struct mm_struct *mm1, unsigned long addr1, return err; down_read(&mm1->mmap_sem); + if (ksm_test_exit(mm1)) { + up_read(&mm1->mmap_sem); + goto out; + } vma = find_vma(mm1, addr1); if (!vma || vma->vm_start > addr1) { up_read(&mm1->mmap_sem); @@ -1174,7 +1204,12 @@ next_mm: mm = slot->mm; down_read(&mm->mmap_sem); - for (vma = find_vma(mm, ksm_scan.address); vma; vma = vma->vm_next) { + if (ksm_test_exit(mm)) + vma = NULL; + else + vma = find_vma(mm, ksm_scan.address); + + for (; vma; vma = vma->vm_next) { if (!(vma->vm_flags & VM_MERGEABLE)) continue; if (ksm_scan.address < vma->vm_start) @@ -1183,6 +1218,8 @@ next_mm: ksm_scan.address = vma->vm_end; while (ksm_scan.address < vma->vm_end) { + if (ksm_test_exit(mm)) + break; *page = follow_page(vma, ksm_scan.address, FOLL_GET); if (*page && PageAnon(*page)) { flush_anon_page(vma, *page, ksm_scan.address); @@ -1205,6 +1242,11 @@ next_mm: } } + if (ksm_test_exit(mm)) { + ksm_scan.address = 0; + ksm_scan.rmap_item = list_entry(&slot->rmap_list, + struct rmap_item, link); + } /* * Nuke all the rmap_items that are above this current rmap: * because there were no VM_MERGEABLE vmas with such addresses. @@ -1219,24 +1261,29 @@ next_mm: * We've completed a full scan of all vmas, holding mmap_sem * throughout, and found no VM_MERGEABLE: so do the same as * __ksm_exit does to remove this mm from all our lists now. + * This applies either when cleaning up after __ksm_exit + * (but beware: we can reach here even before __ksm_exit), + * or when all VM_MERGEABLE areas have been unmapped (and + * mmap_sem then protects against race with MADV_MERGEABLE). */ hlist_del(&slot->link); list_del(&slot->mm_list); + spin_unlock(&ksm_mmlist_lock); + free_mm_slot(slot); clear_bit(MMF_VM_MERGEABLE, &mm->flags); + up_read(&mm->mmap_sem); + mmdrop(mm); + } else { + spin_unlock(&ksm_mmlist_lock); + up_read(&mm->mmap_sem); } - spin_unlock(&ksm_mmlist_lock); - up_read(&mm->mmap_sem); /* Repeat until we've completed scanning the whole list */ slot = ksm_scan.mm_slot; if (slot != &ksm_mm_head) goto next_mm; - /* - * Bump seqnr here rather than at top, so that __ksm_exit - * can skip rb_erase on unstable tree until we run again. - */ ksm_scan.seqnr++; return NULL; } @@ -1361,6 +1408,7 @@ int __ksm_enter(struct mm_struct *mm) spin_unlock(&ksm_mmlist_lock); set_bit(MMF_VM_MERGEABLE, &mm->flags); + atomic_inc(&mm->mm_count); if (needs_wakeup) wake_up_interruptible(&ksm_thread_wait); @@ -1368,41 +1416,45 @@ int __ksm_enter(struct mm_struct *mm) return 0; } -void __ksm_exit(struct mm_struct *mm) +void __ksm_exit(struct mm_struct *mm, + struct mmu_gather **tlbp, unsigned long end) { struct mm_slot *mm_slot; + int easy_to_free = 0; /* - * This process is exiting: doesn't hold and doesn't need mmap_sem; - * but we do need to exclude ksmd and other exiters while we modify - * the various lists and trees. + * This process is exiting: if it's straightforward (as is the + * case when ksmd was never running), free mm_slot immediately. + * But if it's at the cursor or has rmap_items linked to it, use + * mmap_sem to synchronize with any break_cows before pagetables + * are freed, and leave the mm_slot on the list for ksmd to free. + * Beware: ksm may already have noticed it exiting and freed the slot. */ - mutex_lock(&ksm_thread_mutex); + spin_lock(&ksm_mmlist_lock); mm_slot = get_mm_slot(mm); - if (!list_empty(&mm_slot->rmap_list)) { - spin_unlock(&ksm_mmlist_lock); - remove_trailing_rmap_items(mm_slot, mm_slot->rmap_list.next); - spin_lock(&ksm_mmlist_lock); - } - - if (ksm_scan.mm_slot == mm_slot) { - ksm_scan.mm_slot = list_entry( - mm_slot->mm_list.next, struct mm_slot, mm_list); - ksm_scan.address = 0; - ksm_scan.rmap_item = list_entry( - &ksm_scan.mm_slot->rmap_list, struct rmap_item, link); - if (ksm_scan.mm_slot == &ksm_mm_head) - ksm_scan.seqnr++; + if (mm_slot && ksm_scan.mm_slot != mm_slot) { + if (list_empty(&mm_slot->rmap_list)) { + hlist_del(&mm_slot->link); + list_del(&mm_slot->mm_list); + easy_to_free = 1; + } else { + list_move(&mm_slot->mm_list, + &ksm_scan.mm_slot->mm_list); + } } - - hlist_del(&mm_slot->link); - list_del(&mm_slot->mm_list); spin_unlock(&ksm_mmlist_lock); - free_mm_slot(mm_slot); - clear_bit(MMF_VM_MERGEABLE, &mm->flags); - mutex_unlock(&ksm_thread_mutex); + if (easy_to_free) { + free_mm_slot(mm_slot); + clear_bit(MMF_VM_MERGEABLE, &mm->flags); + mmdrop(mm); + } else if (mm_slot) { + tlb_finish_mmu(*tlbp, 0, end); + down_write(&mm->mmap_sem); + up_write(&mm->mmap_sem); + *tlbp = tlb_gather_mmu(mm, 1); + } } #define KSM_ATTR_RO(_name) \ diff --git a/mm/memory.c b/mm/memory.c index 1a435b81876..f47ffe97101 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -2648,8 +2648,9 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma, entry = maybe_mkwrite(pte_mkdirty(entry), vma); page_table = pte_offset_map_lock(mm, pmd, address, &ptl); - if (!pte_none(*page_table)) + if (!pte_none(*page_table) || ksm_test_exit(mm)) goto release; + inc_mm_counter(mm, anon_rss); page_add_new_anon_rmap(page, vma, address); set_pte_at(mm, address, page_table, entry); @@ -2791,7 +2792,7 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma, * handle that later. */ /* Only go through if we didn't race with anybody else... */ - if (likely(pte_same(*page_table, orig_pte))) { + if (likely(pte_same(*page_table, orig_pte) && !ksm_test_exit(mm))) { flush_icache_page(vma, page); entry = mk_pte(page, vma->vm_page_prot); if (flags & FAULT_FLAG_WRITE) diff --git a/mm/mmap.c b/mm/mmap.c index 376492ed08f..e02f1aa66a1 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -27,6 +27,7 @@ #include #include #include +#include #include #include @@ -2111,6 +2112,14 @@ void exit_mmap(struct mm_struct *mm) /* Use -1 here to ensure all VMAs in the mm are unmapped */ end = unmap_vmas(&tlb, vma, 0, -1, &nr_accounted, NULL); vm_unacct_memory(nr_accounted); + + /* + * For KSM to handle OOM without deadlock when it's breaking COW in a + * likely victim of the OOM killer, we must serialize with ksm_exit() + * after freeing mm's pages but before freeing its page tables. + */ + ksm_exit(mm, &tlb, end); + free_pgtables(tlb, vma, FIRST_USER_ADDRESS, 0); tlb_finish_mmu(tlb, 0, end); -- cgit v1.2.3-70-g09d2 From 1c2fb7a4c2ca7a958b02bc1e615d0254990bba8d Mon Sep 17 00:00:00 2001 From: Andrea Arcangeli Date: Mon, 21 Sep 2009 17:02:22 -0700 Subject: ksm: fix deadlock with munlock in exit_mmap Rawhide users have reported hang at startup when cryptsetup is run: the same problem can be simply reproduced by running a program int main() { mlockall(MCL_CURRENT | MCL_FUTURE); return 0; } The problem is that exit_mmap() applies munlock_vma_pages_all() to clean up VM_LOCKED areas, and its current implementation (stupidly) tries to fault in absent pages, for example where PROT_NONE prevented them being faulted in when mlocking. Whereas the "ksm: fix oom deadlock" patch, knowing there's a race by which KSM might try to fault in pages after exit_mmap() had finally zapped the range, backs out of such faults doing nothing when its ksm_test_exit() notices mm_users 0. So revert that part of "ksm: fix oom deadlock" which moved the ksm_exit() call from before exit_mmap() to the middle of exit_mmap(); and remove those ksm_test_exit() checks from the page fault paths, so allowing the munlocking to proceed without interference. ksm_exit, if there are rmap_items still chained on this mm slot, takes mmap_sem write side: so preventing KSM from working on an mm while exit_mmap runs. And KSM will bail out as soon as it notices that mm_users is already zero, thanks to its internal ksm_test_exit checks. So that when a task is killed by OOM killer or the user, KSM will not indefinitely prevent it from running exit_mmap to release its memory. This does break a part of what "ksm: fix oom deadlock" was trying to achieve. When unmerging KSM (echo 2 >/sys/kernel/mm/ksm), and even when ksmd itself has to cancel a KSM page, it is possible that the first OOM-kill victim would be the KSM process being faulted: then its memory won't be freed until a second victim has been selected (freeing memory for the unmerging fault to complete). But the OOM killer is already liable to kill a second victim once the intended victim's p->mm goes to NULL: so there's not much point in rejecting this KSM patch before fixing that OOM behaviour. It is very much more important to allow KSM users to boot up, than to haggle over an unlikely and poorly supported OOM case. We also intend to fix munlocking to not fault pages: at which point this patch _could_ be reverted; though that would be controversial, so we hope to find a better solution. Signed-off-by: Andrea Arcangeli Acked-by: Justin M. Forbes Acked-for-now-by: Hugh Dickins Cc: Izik Eidus Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/ksm.h | 11 ++++------- kernel/fork.c | 1 + mm/ksm.c | 5 +---- mm/memory.c | 4 ++-- mm/mmap.c | 7 ------- 5 files changed, 8 insertions(+), 20 deletions(-) (limited to 'include') diff --git a/include/linux/ksm.h b/include/linux/ksm.h index 2d64ff30c0d..0e26de6adb5 100644 --- a/include/linux/ksm.h +++ b/include/linux/ksm.h @@ -18,8 +18,7 @@ struct mmu_gather; int ksm_madvise(struct vm_area_struct *vma, unsigned long start, unsigned long end, int advice, unsigned long *vm_flags); int __ksm_enter(struct mm_struct *mm); -void __ksm_exit(struct mm_struct *mm, - struct mmu_gather **tlbp, unsigned long end); +void __ksm_exit(struct mm_struct *mm); static inline int ksm_fork(struct mm_struct *mm, struct mm_struct *oldmm) { @@ -41,11 +40,10 @@ static inline bool ksm_test_exit(struct mm_struct *mm) return atomic_read(&mm->mm_users) == 0; } -static inline void ksm_exit(struct mm_struct *mm, - struct mmu_gather **tlbp, unsigned long end) +static inline void ksm_exit(struct mm_struct *mm) { if (test_bit(MMF_VM_MERGEABLE, &mm->flags)) - __ksm_exit(mm, tlbp, end); + __ksm_exit(mm); } /* @@ -86,8 +84,7 @@ static inline bool ksm_test_exit(struct mm_struct *mm) return 0; } -static inline void ksm_exit(struct mm_struct *mm, - struct mmu_gather **tlbp, unsigned long end) +static inline void ksm_exit(struct mm_struct *mm) { } diff --git a/kernel/fork.c b/kernel/fork.c index 42f20f565b1..73a442b7be6 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -501,6 +501,7 @@ void mmput(struct mm_struct *mm) if (atomic_dec_and_test(&mm->mm_users)) { exit_aio(mm); + ksm_exit(mm); exit_mmap(mm); set_mm_exe_file(mm, NULL); if (!list_empty(&mm->mmlist)) { diff --git a/mm/ksm.c b/mm/ksm.c index 722e3f2a8dc..92034eb47eb 100644 --- a/mm/ksm.c +++ b/mm/ksm.c @@ -1416,8 +1416,7 @@ int __ksm_enter(struct mm_struct *mm) return 0; } -void __ksm_exit(struct mm_struct *mm, - struct mmu_gather **tlbp, unsigned long end) +void __ksm_exit(struct mm_struct *mm) { struct mm_slot *mm_slot; int easy_to_free = 0; @@ -1450,10 +1449,8 @@ void __ksm_exit(struct mm_struct *mm, clear_bit(MMF_VM_MERGEABLE, &mm->flags); mmdrop(mm); } else if (mm_slot) { - tlb_finish_mmu(*tlbp, 0, end); down_write(&mm->mmap_sem); up_write(&mm->mmap_sem); - *tlbp = tlb_gather_mmu(mm, 1); } } diff --git a/mm/memory.c b/mm/memory.c index f47ffe97101..05feaa11d87 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -2648,7 +2648,7 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma, entry = maybe_mkwrite(pte_mkdirty(entry), vma); page_table = pte_offset_map_lock(mm, pmd, address, &ptl); - if (!pte_none(*page_table) || ksm_test_exit(mm)) + if (!pte_none(*page_table)) goto release; inc_mm_counter(mm, anon_rss); @@ -2792,7 +2792,7 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma, * handle that later. */ /* Only go through if we didn't race with anybody else... */ - if (likely(pte_same(*page_table, orig_pte) && !ksm_test_exit(mm))) { + if (likely(pte_same(*page_table, orig_pte))) { flush_icache_page(vma, page); entry = mk_pte(page, vma->vm_page_prot); if (flags & FAULT_FLAG_WRITE) diff --git a/mm/mmap.c b/mm/mmap.c index e02f1aa66a1..ffd6c6c9bcf 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -2113,13 +2113,6 @@ void exit_mmap(struct mm_struct *mm) end = unmap_vmas(&tlb, vma, 0, -1, &nr_accounted, NULL); vm_unacct_memory(nr_accounted); - /* - * For KSM to handle OOM without deadlock when it's breaking COW in a - * likely victim of the OOM killer, we must serialize with ksm_exit() - * after freeing mm's pages but before freeing its page tables. - */ - ksm_exit(mm, &tlb, end); - free_pgtables(tlb, vma, FIRST_USER_ADDRESS, 0); tlb_finish_mmu(tlb, 0, end); -- cgit v1.2.3-70-g09d2 From a913e182ab9484308e870af37a14d372742d53b0 Mon Sep 17 00:00:00 2001 From: Hugh Dickins Date: Mon, 21 Sep 2009 17:02:26 -0700 Subject: ksm: clean up obsolete references A few cleanups, given the munlock fix: the comment on ksm_test_exit() no longer applies, and it can be made private to ksm.c; there's no more reference to mmu_gather or tlb.h, and mmap.c doesn't need ksm.h. Signed-off-by: Hugh Dickins Acked-by: Izik Eidus Cc: Andrea Arcangeli Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/ksm.h | 20 -------------------- mm/ksm.c | 14 +++++++++++++- mm/mmap.c | 1 - 3 files changed, 13 insertions(+), 22 deletions(-) (limited to 'include') diff --git a/include/linux/ksm.h b/include/linux/ksm.h index 0e26de6adb5..a485c14ecd5 100644 --- a/include/linux/ksm.h +++ b/include/linux/ksm.h @@ -12,8 +12,6 @@ #include #include -struct mmu_gather; - #ifdef CONFIG_KSM int ksm_madvise(struct vm_area_struct *vma, unsigned long start, unsigned long end, int advice, unsigned long *vm_flags); @@ -27,19 +25,6 @@ static inline int ksm_fork(struct mm_struct *mm, struct mm_struct *oldmm) return 0; } -/* - * For KSM to handle OOM without deadlock when it's breaking COW in a - * likely victim of the OOM killer, exit_mmap() has to serialize with - * ksm_exit() after freeing mm's pages but before freeing its page tables. - * That leaves a window in which KSM might refault pages which have just - * been finally unmapped: guard against that with ksm_test_exit(), and - * use it after getting mmap_sem in ksm.c, to check if mm is exiting. - */ -static inline bool ksm_test_exit(struct mm_struct *mm) -{ - return atomic_read(&mm->mm_users) == 0; -} - static inline void ksm_exit(struct mm_struct *mm) { if (test_bit(MMF_VM_MERGEABLE, &mm->flags)) @@ -79,11 +64,6 @@ static inline int ksm_fork(struct mm_struct *mm, struct mm_struct *oldmm) return 0; } -static inline bool ksm_test_exit(struct mm_struct *mm) -{ - return 0; -} - static inline void ksm_exit(struct mm_struct *mm) { } diff --git a/mm/ksm.c b/mm/ksm.c index 3bd54ce9eb3..e11e7a5ac84 100644 --- a/mm/ksm.c +++ b/mm/ksm.c @@ -32,7 +32,6 @@ #include #include -#include #include /* @@ -284,6 +283,19 @@ static inline int in_stable_tree(struct rmap_item *rmap_item) return rmap_item->address & STABLE_FLAG; } +/* + * ksmd, and unmerge_and_remove_all_rmap_items(), must not touch an mm's + * page tables after it has passed through ksm_exit() - which, if necessary, + * takes mmap_sem briefly to serialize against them. ksm_exit() does not set + * a special flag: they can just back out as soon as mm_users goes to zero. + * ksm_test_exit() is used throughout to make this test for exit: in some + * places for correctness, in some places just to avoid unnecessary work. + */ +static inline bool ksm_test_exit(struct mm_struct *mm) +{ + return atomic_read(&mm->mm_users) == 0; +} + /* * We use break_ksm to break COW on a ksm page: it's a stripped down * diff --git a/mm/mmap.c b/mm/mmap.c index 22dff49d579..6eed98c0054 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -27,7 +27,6 @@ #include #include #include -#include #include #include -- cgit v1.2.3-70-g09d2 From 35451beecbd7c86ce3249d543594517a5fe9a0cd Mon Sep 17 00:00:00 2001 From: Hugh Dickins Date: Mon, 21 Sep 2009 17:02:27 -0700 Subject: ksm: unmerge is an origin of OOMs Just as the swapoff system call allocates many pages of RAM to various processes, perhaps triggering OOM, so "echo 2 >/sys/kernel/mm/ksm/run" (unmerge) is liable to allocate many pages of RAM to various processes, perhaps triggering OOM; and each is normally run from a modest admin process (swapoff or shell), easily repeated until it succeeds. So treat unmerge_and_remove_all_rmap_items() in the same way that we treat try_to_unuse(): generalize PF_SWAPOFF to PF_OOM_ORIGIN, and bracket both with that, to ask the OOM killer to kill them first, to prevent them from spawning more and more OOM kills. Signed-off-by: Hugh Dickins Acked-by: Izik Eidus Cc: Andrea Arcangeli Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/sched.h | 2 +- mm/ksm.c | 2 ++ mm/oom_kill.c | 2 +- mm/swapfile.c | 4 ++-- 4 files changed, 6 insertions(+), 4 deletions(-) (limited to 'include') diff --git a/include/linux/sched.h b/include/linux/sched.h index 8f3e63cb33a..899d7304d59 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -1720,7 +1720,7 @@ extern cputime_t task_gtime(struct task_struct *p); #define PF_FROZEN 0x00010000 /* frozen for system suspend */ #define PF_FSTRANS 0x00020000 /* inside a filesystem transaction */ #define PF_KSWAPD 0x00040000 /* I am kswapd */ -#define PF_SWAPOFF 0x00080000 /* I am in swapoff */ +#define PF_OOM_ORIGIN 0x00080000 /* Allocating much memory to others */ #define PF_LESS_THROTTLE 0x00100000 /* Throttle me less: I clean memory */ #define PF_KTHREAD 0x00200000 /* I am a kernel thread */ #define PF_RANDOMIZE 0x00400000 /* randomize virtual address space */ diff --git a/mm/ksm.c b/mm/ksm.c index e11e7a5ac84..37cc3732509 100644 --- a/mm/ksm.c +++ b/mm/ksm.c @@ -1557,7 +1557,9 @@ static ssize_t run_store(struct kobject *kobj, struct kobj_attribute *attr, if (ksm_run != flags) { ksm_run = flags; if (flags & KSM_RUN_UNMERGE) { + current->flags |= PF_OOM_ORIGIN; err = unmerge_and_remove_all_rmap_items(); + current->flags &= ~PF_OOM_ORIGIN; if (err) { ksm_run = KSM_RUN_STOP; count = err; diff --git a/mm/oom_kill.c b/mm/oom_kill.c index a7b2460e922..da4c342f264 100644 --- a/mm/oom_kill.c +++ b/mm/oom_kill.c @@ -79,7 +79,7 @@ unsigned long badness(struct task_struct *p, unsigned long uptime) /* * swapoff can easily use up all memory, so kill those first. */ - if (p->flags & PF_SWAPOFF) + if (p->flags & PF_OOM_ORIGIN) return ULONG_MAX; /* diff --git a/mm/swapfile.c b/mm/swapfile.c index 74f1102e874..f1bf19daadc 100644 --- a/mm/swapfile.c +++ b/mm/swapfile.c @@ -1575,9 +1575,9 @@ SYSCALL_DEFINE1(swapoff, const char __user *, specialfile) p->flags &= ~SWP_WRITEOK; spin_unlock(&swap_lock); - current->flags |= PF_SWAPOFF; + current->flags |= PF_OOM_ORIGIN; err = try_to_unuse(type); - current->flags &= ~PF_SWAPOFF; + current->flags &= ~PF_OOM_ORIGIN; if (err) { /* re-insert swap space back into swap_list */ -- cgit v1.2.3-70-g09d2 From 38a398572fa2d8124f7479e40db581b5b72719c9 Mon Sep 17 00:00:00 2001 From: Mel Gorman Date: Mon, 21 Sep 2009 17:02:39 -0700 Subject: page-allocator: remove dead function free_cold_page() The function free_cold_page() has no callers so delete it. Signed-off-by: Mel Gorman Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/gfp.h | 1 - mm/page_alloc.c | 5 ----- 2 files changed, 6 deletions(-) (limited to 'include') diff --git a/include/linux/gfp.h b/include/linux/gfp.h index 7c777a0da17..c32bfa8e7f1 100644 --- a/include/linux/gfp.h +++ b/include/linux/gfp.h @@ -326,7 +326,6 @@ void free_pages_exact(void *virt, size_t size); extern void __free_pages(struct page *page, unsigned int order); extern void free_pages(unsigned long addr, unsigned int order); extern void free_hot_page(struct page *page); -extern void free_cold_page(struct page *page); #define __free_page(page) __free_pages((page), 0) #define free_page(addr) free_pages((addr),0) diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 20759803a64..913a8ebd3a8 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -1079,11 +1079,6 @@ void free_hot_page(struct page *page) free_hot_cold_page(page, 0); } -void free_cold_page(struct page *page) -{ - free_hot_cold_page(page, 1); -} - /* * split_page takes a non-compound higher-order page, and splits it into * n (1< Date: Mon, 21 Sep 2009 17:02:41 -0700 Subject: tracing, page-allocator: add trace events for page allocation and page freeing This patch adds trace events for the allocation and freeing of pages, including the freeing of pagevecs. Using the events, it will be known what struct page and pfns are being allocated and freed and what the call site was in many cases. The page alloc tracepoints be used as an indicator as to whether the workload was heavily dependant on the page allocator or not. You can make a guess based on vmstat but you can't get a per-process breakdown. Depending on the call path, the call_site for page allocation may be __get_free_pages() instead of a useful callsite. Instead of passing down a return address similar to slab debugging, the user should enable the stacktrace and seg-addr options to get a proper stack trace. The pagevec free tracepoint has a different usecase. It can be used to get a idea of how many pages are being dumped off the LRU and whether it is kswapd doing the work or a process doing direct reclaim. Signed-off-by: Mel Gorman Acked-by: Rik van Riel Reviewed-by: Ingo Molnar Cc: Larry Woodman Cc: Peter Zijlstra Cc: Li Ming Chun Reviewed-by: KOSAKI Motohiro Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/trace/events/kmem.h | 74 +++++++++++++++++++++++++++++++++++++++++++++ mm/page_alloc.c | 7 ++++- 2 files changed, 80 insertions(+), 1 deletion(-) (limited to 'include') diff --git a/include/trace/events/kmem.h b/include/trace/events/kmem.h index 1493c541f9c..0d358a0d45c 100644 --- a/include/trace/events/kmem.h +++ b/include/trace/events/kmem.h @@ -225,6 +225,80 @@ TRACE_EVENT(kmem_cache_free, TP_printk("call_site=%lx ptr=%p", __entry->call_site, __entry->ptr) ); + +TRACE_EVENT(mm_page_free_direct, + + TP_PROTO(struct page *page, unsigned int order), + + TP_ARGS(page, order), + + TP_STRUCT__entry( + __field( struct page *, page ) + __field( unsigned int, order ) + ), + + TP_fast_assign( + __entry->page = page; + __entry->order = order; + ), + + TP_printk("page=%p pfn=%lu order=%d", + __entry->page, + page_to_pfn(__entry->page), + __entry->order) +); + +TRACE_EVENT(mm_pagevec_free, + + TP_PROTO(struct page *page, int cold), + + TP_ARGS(page, cold), + + TP_STRUCT__entry( + __field( struct page *, page ) + __field( int, cold ) + ), + + TP_fast_assign( + __entry->page = page; + __entry->cold = cold; + ), + + TP_printk("page=%p pfn=%lu order=0 cold=%d", + __entry->page, + page_to_pfn(__entry->page), + __entry->cold) +); + +TRACE_EVENT(mm_page_alloc, + + TP_PROTO(struct page *page, unsigned int order, + gfp_t gfp_flags, int migratetype), + + TP_ARGS(page, order, gfp_flags, migratetype), + + TP_STRUCT__entry( + __field( struct page *, page ) + __field( unsigned int, order ) + __field( gfp_t, gfp_flags ) + __field( int, migratetype ) + ), + + TP_fast_assign( + __entry->page = page; + __entry->order = order; + __entry->gfp_flags = gfp_flags; + __entry->migratetype = migratetype; + ), + + TP_printk("page=%p pfn=%lu order=%d migratetype=%d gfp_flags=%s", + __entry->page, + page_to_pfn(__entry->page), + __entry->order, + __entry->migratetype, + show_gfp_flags(__entry->gfp_flags)) +); + #endif /* _TRACE_KMEM_H */ /* This part must be outside protection */ diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 913a8ebd3a8..80f954d82d7 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -1076,6 +1076,7 @@ static void free_hot_cold_page(struct page *page, int cold) void free_hot_page(struct page *page) { + trace_mm_page_free_direct(page, 0); free_hot_cold_page(page, 0); } @@ -1920,6 +1921,7 @@ __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order, zonelist, high_zoneidx, nodemask, preferred_zone, migratetype); + trace_mm_page_alloc(page, order, gfp_mask, migratetype); return page; } EXPORT_SYMBOL(__alloc_pages_nodemask); @@ -1954,13 +1956,16 @@ void __pagevec_free(struct pagevec *pvec) { int i = pagevec_count(pvec); - while (--i >= 0) + while (--i >= 0) { + trace_mm_pagevec_free(pvec->pages[i], pvec->cold); free_hot_cold_page(pvec->pages[i], pvec->cold); + } } void __free_pages(struct page *page, unsigned int order) { if (put_page_testzero(page)) { + trace_mm_page_free_direct(page, order); if (order == 0) free_hot_page(page); else -- cgit v1.2.3-70-g09d2 From e0fff1bd12469c45dab088e353d8882761387bb6 Mon Sep 17 00:00:00 2001 From: Mel Gorman Date: Mon, 21 Sep 2009 17:02:42 -0700 Subject: tracing, page-allocator: add trace events for anti-fragmentation falling back to other migratetypes Fragmentation avoidance depends on being able to use free pages from lists of the appropriate migrate type. In the event this is not possible, __rmqueue_fallback() selects a different list and in some circumstances change the migratetype of the pageblock. Simplistically, the more times this event occurs, the more likely that fragmentation will be a problem later for hugepage allocation at least but there are other considerations such as the order of page being split to satisfy the allocation. This patch adds a trace event for __rmqueue_fallback() that reports what page is being used for the fallback, the orders of relevant pages, the desired migratetype and the migratetype of the lists being used, whether the pageblock changed type and whether this event is important with respect to fragmentation avoidance or not. This information can be used to help analyse fragmentation avoidance and help decide whether min_free_kbytes should be increased or not. Signed-off-by: Mel Gorman Acked-by: Rik van Riel Reviewed-by: Ingo Molnar Cc: Larry Woodman Cc: Peter Zijlstra Cc: Li Ming Chun Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/trace/events/kmem.h | 38 ++++++++++++++++++++++++++++++++++++++ mm/page_alloc.c | 4 ++++ 2 files changed, 42 insertions(+) (limited to 'include') diff --git a/include/trace/events/kmem.h b/include/trace/events/kmem.h index 0d358a0d45c..aae16ee1760 100644 --- a/include/trace/events/kmem.h +++ b/include/trace/events/kmem.h @@ -299,6 +299,44 @@ TRACE_EVENT(mm_page_alloc, show_gfp_flags(__entry->gfp_flags)) ); +TRACE_EVENT(mm_page_alloc_extfrag, + + TP_PROTO(struct page *page, + int alloc_order, int fallback_order, + int alloc_migratetype, int fallback_migratetype), + + TP_ARGS(page, + alloc_order, fallback_order, + alloc_migratetype, fallback_migratetype), + + TP_STRUCT__entry( + __field( struct page *, page ) + __field( int, alloc_order ) + __field( int, fallback_order ) + __field( int, alloc_migratetype ) + __field( int, fallback_migratetype ) + ), + + TP_fast_assign( + __entry->page = page; + __entry->alloc_order = alloc_order; + __entry->fallback_order = fallback_order; + __entry->alloc_migratetype = alloc_migratetype; + __entry->fallback_migratetype = fallback_migratetype; + ), + + TP_printk("page=%p pfn=%lu alloc_order=%d fallback_order=%d pageblock_order=%d alloc_migratetype=%d fallback_migratetype=%d fragmenting=%d change_ownership=%d", + __entry->page, + page_to_pfn(__entry->page), + __entry->alloc_order, + __entry->fallback_order, + pageblock_order, + __entry->alloc_migratetype, + __entry->fallback_migratetype, + __entry->fallback_order < pageblock_order, + __entry->alloc_migratetype == __entry->fallback_migratetype) +); + #endif /* _TRACE_KMEM_H */ /* This part must be outside protection */ diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 80f954d82d7..77f517c18b3 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -853,6 +853,10 @@ __rmqueue_fallback(struct zone *zone, int order, int start_migratetype) start_migratetype); expand(zone, page, order, current_order, area, migratetype); + + trace_mm_page_alloc_extfrag(page, order, current_order, + start_migratetype, migratetype); + return page; } } -- cgit v1.2.3-70-g09d2 From 0d3d062a6e289e065bd0aa537a6806a1806bf8aa Mon Sep 17 00:00:00 2001 From: Mel Gorman Date: Mon, 21 Sep 2009 17:02:44 -0700 Subject: tracing, page-allocator: add trace event for page traffic related to the buddy lists The page allocation trace event reports that a page was successfully allocated but it does not specify where it came from. When analysing performance, it can be important to distinguish between pages coming from the per-cpu allocator and pages coming from the buddy lists as the latter requires the zone lock to the taken and more data structures to be examined. This patch adds a trace event for __rmqueue reporting when a page is being allocated from the buddy lists. It distinguishes between being called to refill the per-cpu lists or whether it is a high-order allocation. Similarly, this patch adds an event to catch when the PCP lists are being drained a little and pages are going back to the buddy lists. This is trickier to draw conclusions from but high activity on those events could explain why there were a large number of cache misses on a page-allocator-intensive workload. The coalescing and splitting of buddies involves a lot of writing of page metadata and cache line bounces not to mention the acquisition of an interrupt-safe lock necessary to enter this path. [akpm@linux-foundation.org: fix build] Signed-off-by: Mel Gorman Acked-by: Rik van Riel Reviewed-by: Ingo Molnar Cc: Larry Woodman Cc: Peter Zijlstra Cc: Li Ming Chun Reviewed-by: KOSAKI Motohiro Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/trace/events/kmem.h | 51 +++++++++++++++++++++++++++++++++++++++++++++ mm/page_alloc.c | 3 +++ 2 files changed, 54 insertions(+) (limited to 'include') diff --git a/include/trace/events/kmem.h b/include/trace/events/kmem.h index aae16ee1760..eaf46bdd18a 100644 --- a/include/trace/events/kmem.h +++ b/include/trace/events/kmem.h @@ -299,6 +299,57 @@ TRACE_EVENT(mm_page_alloc, show_gfp_flags(__entry->gfp_flags)) ); +TRACE_EVENT(mm_page_alloc_zone_locked, + + TP_PROTO(struct page *page, unsigned int order, int migratetype), + + TP_ARGS(page, order, migratetype), + + TP_STRUCT__entry( + __field( struct page *, page ) + __field( unsigned int, order ) + __field( int, migratetype ) + ), + + TP_fast_assign( + __entry->page = page; + __entry->order = order; + __entry->migratetype = migratetype; + ), + + TP_printk("page=%p pfn=%lu order=%u migratetype=%d percpu_refill=%d", + __entry->page, + page_to_pfn(__entry->page), + __entry->order, + __entry->migratetype, + __entry->order == 0) +); + +TRACE_EVENT(mm_page_pcpu_drain, + + TP_PROTO(struct page *page, int order, int migratetype), + + TP_ARGS(page, order, migratetype), + + TP_STRUCT__entry( + __field( struct page *, page ) + __field( int, order ) + __field( int, migratetype ) + ), + + TP_fast_assign( + __entry->page = page; + __entry->order = order; + __entry->migratetype = migratetype; + ), + + TP_printk("page=%p pfn=%lu order=%d migratetype=%d", + __entry->page, + page_to_pfn(__entry->page), + __entry->order, + __entry->migratetype) +); + TRACE_EVENT(mm_page_alloc_extfrag, TP_PROTO(struct page *page, diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 77f517c18b3..4c847cc57ca 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -48,6 +48,7 @@ #include #include #include +#include #include #include @@ -535,6 +536,7 @@ static void free_pages_bulk(struct zone *zone, int count, page = list_entry(list->prev, struct page, lru); /* have to delete it as __free_one_page list manipulates */ list_del(&page->lru); + trace_mm_page_pcpu_drain(page, order, page_private(page)); __free_one_page(page, zone, order, page_private(page)); } spin_unlock(&zone->lock); @@ -890,6 +892,7 @@ retry_reserve: } } + trace_mm_page_alloc_zone_locked(page, order, migratetype); return page; } -- cgit v1.2.3-70-g09d2 From bba78819548a59a52e60f0b259997bbd011164ae Mon Sep 17 00:00:00 2001 From: Sage Weil Date: Mon, 21 Sep 2009 17:02:56 -0700 Subject: mm: remove broken 'kzalloc' mempool The kzalloc mempool zeros items when they are initially allocated, but does not rezero used items that are returned to the pool. Consequently mempool_alloc()s may return non-zeroed memory. Since there are/were only two in-tree users for mempool_create_kzalloc_pool(), and 'fixing' this in a way that will re-zero used (but not new) items before first use is non-trivial, just remove it. Signed-off-by: Sage Weil Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/mempool.h | 10 ++-------- mm/mempool.c | 7 ------- 2 files changed, 2 insertions(+), 15 deletions(-) (limited to 'include') diff --git a/include/linux/mempool.h b/include/linux/mempool.h index 9be484d1128..7c08052e332 100644 --- a/include/linux/mempool.h +++ b/include/linux/mempool.h @@ -47,22 +47,16 @@ mempool_create_slab_pool(int min_nr, struct kmem_cache *kc) } /* - * 2 mempool_alloc_t's and a mempool_free_t to kmalloc/kzalloc and kfree - * the amount of memory specified by pool_data + * a mempool_alloc_t and a mempool_free_t to kmalloc and kfree the + * amount of memory specified by pool_data */ void *mempool_kmalloc(gfp_t gfp_mask, void *pool_data); -void *mempool_kzalloc(gfp_t gfp_mask, void *pool_data); void mempool_kfree(void *element, void *pool_data); static inline mempool_t *mempool_create_kmalloc_pool(int min_nr, size_t size) { return mempool_create(min_nr, mempool_kmalloc, mempool_kfree, (void *) size); } -static inline mempool_t *mempool_create_kzalloc_pool(int min_nr, size_t size) -{ - return mempool_create(min_nr, mempool_kzalloc, mempool_kfree, - (void *) size); -} /* * A mempool_alloc_t and mempool_free_t for a simple page allocator that diff --git a/mm/mempool.c b/mm/mempool.c index 32e75d40050..1a3bc3d4d55 100644 --- a/mm/mempool.c +++ b/mm/mempool.c @@ -308,13 +308,6 @@ void *mempool_kmalloc(gfp_t gfp_mask, void *pool_data) } EXPORT_SYMBOL(mempool_kmalloc); -void *mempool_kzalloc(gfp_t gfp_mask, void *pool_data) -{ - size_t size = (size_t)pool_data; - return kzalloc(size, gfp_mask); -} -EXPORT_SYMBOL(mempool_kzalloc); - void mempool_kfree(void *element, void *pool_data) { kfree(element); -- cgit v1.2.3-70-g09d2 From 401a8e1c1670085b8177330ca47d4f7c4ac88761 Mon Sep 17 00:00:00 2001 From: Johannes Weiner Date: Mon, 21 Sep 2009 17:02:58 -0700 Subject: mm: introduce page_lru_base_type() Instead of abusing page_is_file_cache() for LRU list index arithmetic, add another helper with a more appropriate name and convert the non-boolean users of page_is_file_cache() accordingly. This new helper gives the LRU base type a page is supposed to live on, inactive anon or inactive file. [hugh.dickins@tiscali.co.uk: convert del_page_from_lru() also] Signed-off-by: Johannes Weiner Reviewed-by: Rik van Riel Cc: Minchan Kim Reviewed-by: KOSAKI Motohiro Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/mm_inline.h | 23 +++++++++++++++++++---- mm/swap.c | 4 ++-- mm/vmscan.c | 6 +++--- 3 files changed, 24 insertions(+), 9 deletions(-) (limited to 'include') diff --git a/include/linux/mm_inline.h b/include/linux/mm_inline.h index 7fbb9726755..99977ff45b8 100644 --- a/include/linux/mm_inline.h +++ b/include/linux/mm_inline.h @@ -39,21 +39,36 @@ del_page_from_lru_list(struct zone *zone, struct page *page, enum lru_list l) mem_cgroup_del_lru_list(page, l); } +/** + * page_lru_base_type - which LRU list type should a page be on? + * @page: the page to test + * + * Used for LRU list index arithmetic. + * + * Returns the base LRU type - file or anon - @page should be on. + */ +static inline enum lru_list page_lru_base_type(struct page *page) +{ + if (page_is_file_cache(page)) + return LRU_INACTIVE_FILE; + return LRU_INACTIVE_ANON; +} + static inline void del_page_from_lru(struct zone *zone, struct page *page) { - enum lru_list l = LRU_BASE; + enum lru_list l; list_del(&page->lru); if (PageUnevictable(page)) { __ClearPageUnevictable(page); l = LRU_UNEVICTABLE; } else { + l = page_lru_base_type(page); if (PageActive(page)) { __ClearPageActive(page); l += LRU_ACTIVE; } - l += page_is_file_cache(page); } __dec_zone_state(zone, NR_LRU_BASE + l); mem_cgroup_del_lru_list(page, l); @@ -68,14 +83,14 @@ del_page_from_lru(struct zone *zone, struct page *page) */ static inline enum lru_list page_lru(struct page *page) { - enum lru_list lru = LRU_BASE; + enum lru_list lru; if (PageUnevictable(page)) lru = LRU_UNEVICTABLE; else { + lru = page_lru_base_type(page); if (PageActive(page)) lru += LRU_ACTIVE; - lru += page_is_file_cache(page); } return lru; diff --git a/mm/swap.c b/mm/swap.c index cb29ae5d33a..168d53e6e58 100644 --- a/mm/swap.c +++ b/mm/swap.c @@ -118,7 +118,7 @@ static void pagevec_move_tail(struct pagevec *pvec) spin_lock(&zone->lru_lock); } if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) { - int lru = page_is_file_cache(page); + int lru = page_lru_base_type(page); list_move_tail(&page->lru, &zone->lru[lru].list); pgmoved++; } @@ -181,7 +181,7 @@ void activate_page(struct page *page) spin_lock_irq(&zone->lru_lock); if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) { int file = page_is_file_cache(page); - int lru = LRU_BASE + file; + int lru = page_lru_base_type(page); del_page_from_lru_list(zone, page, lru); SetPageActive(page); diff --git a/mm/vmscan.c b/mm/vmscan.c index cad5d528a6f..30e56ee833f 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -531,7 +531,7 @@ redo: * unevictable page on [in]active list. * We know how to handle that. */ - lru = active + page_is_file_cache(page); + lru = active + page_lru_base_type(page); lru_cache_add_lru(page, lru); } else { /* @@ -986,7 +986,7 @@ static unsigned long clear_active_flags(struct list_head *page_list, struct page *page; list_for_each_entry(page, page_list, lru) { - lru = page_is_file_cache(page); + lru = page_lru_base_type(page); if (PageActive(page)) { lru += LRU_ACTIVE; ClearPageActive(page); @@ -2652,7 +2652,7 @@ static void check_move_unevictable_page(struct page *page, struct zone *zone) retry: ClearPageUnevictable(page); if (page_evictable(page, NULL)) { - enum lru_list l = LRU_INACTIVE_ANON + page_is_file_cache(page); + enum lru_list l = page_lru_base_type(page); __dec_zone_state(zone, NR_UNEVICTABLE); list_move(&page->lru, &zone->lru[l].list); -- cgit v1.2.3-70-g09d2 From 6c0b13519d1c755d874e82c8fb8a6dcef0ee402c Mon Sep 17 00:00:00 2001 From: Johannes Weiner Date: Mon, 21 Sep 2009 17:02:59 -0700 Subject: mm: return boolean from page_is_file_cache() page_is_file_cache() has been used for both boolean checks and LRU arithmetic, which was always a bit weird. Now that page_lru_base_type() exists for LRU arithmetic, make page_is_file_cache() a real predicate function and adjust the boolean-using callsites to drop those pesky double negations. Signed-off-by: Johannes Weiner Reviewed-by: KOSAKI Motohiro Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/mm_inline.h | 8 ++------ mm/migrate.c | 6 +++--- mm/swap.c | 2 +- mm/vmscan.c | 2 +- 4 files changed, 7 insertions(+), 11 deletions(-) (limited to 'include') diff --git a/include/linux/mm_inline.h b/include/linux/mm_inline.h index 99977ff45b8..8835b877b8d 100644 --- a/include/linux/mm_inline.h +++ b/include/linux/mm_inline.h @@ -5,7 +5,7 @@ * page_is_file_cache - should the page be on a file LRU or anon LRU? * @page: the page to test * - * Returns LRU_FILE if @page is page cache page backed by a regular filesystem, + * Returns 1 if @page is page cache page backed by a regular filesystem, * or 0 if @page is anonymous, tmpfs or otherwise ram or swap backed. * Used by functions that manipulate the LRU lists, to sort a page * onto the right LRU list. @@ -16,11 +16,7 @@ */ static inline int page_is_file_cache(struct page *page) { - if (PageSwapBacked(page)) - return 0; - - /* The page is page cache backed by a normal filesystem. */ - return LRU_FILE; + return !PageSwapBacked(page); } static inline void diff --git a/mm/migrate.c b/mm/migrate.c index b535a2c1656..e97e513fe89 100644 --- a/mm/migrate.c +++ b/mm/migrate.c @@ -68,7 +68,7 @@ int putback_lru_pages(struct list_head *l) list_for_each_entry_safe(page, page2, l, lru) { list_del(&page->lru); dec_zone_page_state(page, NR_ISOLATED_ANON + - !!page_is_file_cache(page)); + page_is_file_cache(page)); putback_lru_page(page); count++; } @@ -701,7 +701,7 @@ unlock: */ list_del(&page->lru); dec_zone_page_state(page, NR_ISOLATED_ANON + - !!page_is_file_cache(page)); + page_is_file_cache(page)); putback_lru_page(page); } @@ -751,7 +751,7 @@ int migrate_pages(struct list_head *from, local_irq_save(flags); list_for_each_entry(page, from, lru) __inc_zone_page_state(page, NR_ISOLATED_ANON + - !!page_is_file_cache(page)); + page_is_file_cache(page)); local_irq_restore(flags); if (!swapwrite) diff --git a/mm/swap.c b/mm/swap.c index 168d53e6e58..4a8a59e671f 100644 --- a/mm/swap.c +++ b/mm/swap.c @@ -189,7 +189,7 @@ void activate_page(struct page *page) add_page_to_lru_list(zone, page, lru); __count_vm_event(PGACTIVATE); - update_page_reclaim_stat(zone, page, !!file, 1); + update_page_reclaim_stat(zone, page, file, 1); } spin_unlock_irq(&zone->lru_lock); } diff --git a/mm/vmscan.c b/mm/vmscan.c index 30e56ee833f..172119caebc 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -821,7 +821,7 @@ int __isolate_lru_page(struct page *page, int mode, int file) if (mode != ISOLATE_BOTH && (!PageActive(page) != !mode)) return ret; - if (mode != ISOLATE_BOTH && (!page_is_file_cache(page) != !file)) + if (mode != ISOLATE_BOTH && page_is_file_cache(page) != file) return ret; /* -- cgit v1.2.3-70-g09d2 From edcf4748cd56adcdf0856cc99ef108a4ea3ac7fe Mon Sep 17 00:00:00 2001 From: Johannes Weiner Date: Mon, 21 Sep 2009 17:02:59 -0700 Subject: mm: return boolean from page_has_private() Make page_has_private() return a true boolean value and remove the double negations from the two callsites using it for arithmetic. Signed-off-by: Johannes Weiner Cc: Christoph Lameter Reviewed-by: Christoph Lameter Reviewed-by: KOSAKI Motohiro Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/page-flags.h | 13 ++++++++----- mm/migrate.c | 2 +- mm/vmscan.c | 2 +- 3 files changed, 10 insertions(+), 7 deletions(-) (limited to 'include') diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h index d07c0bb2203..13de789f0a5 100644 --- a/include/linux/page-flags.h +++ b/include/linux/page-flags.h @@ -402,8 +402,8 @@ static inline void __ClearPageTail(struct page *page) */ #define PAGE_FLAGS_CHECK_AT_PREP ((1 << NR_PAGEFLAGS) - 1) -#endif /* !__GENERATING_BOUNDS_H */ - +#define PAGE_FLAGS_PRIVATE \ + (1 << PG_private | 1 << PG_private_2) /** * page_has_private - Determine if page has private stuff * @page: The page to be checked @@ -411,8 +411,11 @@ static inline void __ClearPageTail(struct page *page) * Determine if a page has private stuff, indicating that release routines * should be invoked upon it. */ -#define page_has_private(page) \ - ((page)->flags & ((1 << PG_private) | \ - (1 << PG_private_2))) +static inline int page_has_private(struct page *page) +{ + return !!(page->flags & PAGE_FLAGS_PRIVATE); +} + +#endif /* !__GENERATING_BOUNDS_H */ #endif /* PAGE_FLAGS_H */ diff --git a/mm/migrate.c b/mm/migrate.c index e97e513fe89..16052e80aaa 100644 --- a/mm/migrate.c +++ b/mm/migrate.c @@ -272,7 +272,7 @@ static int migrate_page_move_mapping(struct address_space *mapping, pslot = radix_tree_lookup_slot(&mapping->page_tree, page_index(page)); - expected_count = 2 + !!page_has_private(page); + expected_count = 2 + page_has_private(page); if (page_count(page) != expected_count || (struct page *)radix_tree_deref_slot(pslot) != page) { spin_unlock_irq(&mapping->tree_lock); diff --git a/mm/vmscan.c b/mm/vmscan.c index 172119caebc..f5b5f029288 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -286,7 +286,7 @@ static inline int page_mapping_inuse(struct page *page) static inline int is_page_cache_freeable(struct page *page) { - return page_count(page) - !!page_has_private(page) == 2; + return page_count(page) - page_has_private(page) == 2; } static int may_write_to_queue(struct backing_dev_info *bdi) -- cgit v1.2.3-70-g09d2 From 4481374ce88ba8f460c8b89f2572027bd27057d0 Mon Sep 17 00:00:00 2001 From: Jan Beulich Date: Mon, 21 Sep 2009 17:03:05 -0700 Subject: mm: replace various uses of num_physpages by totalram_pages Sizing of memory allocations shouldn't depend on the number of physical pages found in a system, as that generally includes (perhaps a huge amount of) non-RAM pages. The amount of what actually is usable as storage should instead be used as a basis here. Some of the calculations (i.e. those not intending to use high memory) should likely even use (totalram_pages - totalhigh_pages). Signed-off-by: Jan Beulich Acked-by: Rusty Russell Acked-by: Ingo Molnar Cc: Dave Airlie Cc: Kyle McMartin Cc: Jeremy Fitzhardinge Cc: Pekka Enberg Cc: Hugh Dickins Cc: "David S. Miller" Cc: Patrick McHardy Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/x86/kernel/microcode_core.c | 4 ++-- drivers/char/agp/backend.c | 4 ++-- drivers/parisc/ccio-dma.c | 4 ++-- drivers/parisc/sba_iommu.c | 4 ++-- drivers/xen/balloon.c | 4 ---- fs/ntfs/malloc.h | 2 +- include/linux/mm.h | 1 + init/main.c | 4 ++-- mm/slab.c | 2 +- mm/swap.c | 2 +- mm/vmalloc.c | 4 ++-- net/core/sock.c | 4 ++-- net/dccp/proto.c | 6 +++--- net/decnet/dn_route.c | 2 +- net/ipv4/route.c | 2 +- net/ipv4/tcp.c | 4 ++-- net/netfilter/nf_conntrack_core.c | 4 ++-- net/netfilter/x_tables.c | 2 +- net/netfilter/xt_hashlimit.c | 8 ++++---- net/netlink/af_netlink.c | 6 +++--- net/sctp/protocol.c | 6 +++--- 21 files changed, 38 insertions(+), 41 deletions(-) (limited to 'include') diff --git a/arch/x86/kernel/microcode_core.c b/arch/x86/kernel/microcode_core.c index 0db7969b0dd..378e9a8f1bf 100644 --- a/arch/x86/kernel/microcode_core.c +++ b/arch/x86/kernel/microcode_core.c @@ -210,8 +210,8 @@ static ssize_t microcode_write(struct file *file, const char __user *buf, { ssize_t ret = -EINVAL; - if ((len >> PAGE_SHIFT) > num_physpages) { - pr_err("microcode: too much data (max %ld pages)\n", num_physpages); + if ((len >> PAGE_SHIFT) > totalram_pages) { + pr_err("microcode: too much data (max %ld pages)\n", totalram_pages); return ret; } diff --git a/drivers/char/agp/backend.c b/drivers/char/agp/backend.c index ad87753f6de..a56ca080e10 100644 --- a/drivers/char/agp/backend.c +++ b/drivers/char/agp/backend.c @@ -114,9 +114,9 @@ static int agp_find_max(void) long memory, index, result; #if PAGE_SHIFT < 20 - memory = num_physpages >> (20 - PAGE_SHIFT); + memory = totalram_pages >> (20 - PAGE_SHIFT); #else - memory = num_physpages << (PAGE_SHIFT - 20); + memory = totalram_pages << (PAGE_SHIFT - 20); #endif index = 1; diff --git a/drivers/parisc/ccio-dma.c b/drivers/parisc/ccio-dma.c index a45b0c0d574..a6b4a5a53d4 100644 --- a/drivers/parisc/ccio-dma.c +++ b/drivers/parisc/ccio-dma.c @@ -1266,7 +1266,7 @@ ccio_ioc_init(struct ioc *ioc) ** Hot-Plug/Removal of PCI cards. (aka PCI OLARD). */ - iova_space_size = (u32) (num_physpages / count_parisc_driver(&ccio_driver)); + iova_space_size = (u32) (totalram_pages / count_parisc_driver(&ccio_driver)); /* limit IOVA space size to 1MB-1GB */ @@ -1305,7 +1305,7 @@ ccio_ioc_init(struct ioc *ioc) DBG_INIT("%s() hpa 0x%p mem %luMB IOV %dMB (%d bits)\n", __func__, ioc->ioc_regs, - (unsigned long) num_physpages >> (20 - PAGE_SHIFT), + (unsigned long) totalram_pages >> (20 - PAGE_SHIFT), iova_space_size>>20, iov_order + PAGE_SHIFT); diff --git a/drivers/parisc/sba_iommu.c b/drivers/parisc/sba_iommu.c index 123d8fe3427..57a6d19eba4 100644 --- a/drivers/parisc/sba_iommu.c +++ b/drivers/parisc/sba_iommu.c @@ -1390,7 +1390,7 @@ sba_ioc_init(struct parisc_device *sba, struct ioc *ioc, int ioc_num) ** for DMA hints - ergo only 30 bits max. */ - iova_space_size = (u32) (num_physpages/global_ioc_cnt); + iova_space_size = (u32) (totalram_pages/global_ioc_cnt); /* limit IOVA space size to 1MB-1GB */ if (iova_space_size < (1 << (20 - PAGE_SHIFT))) { @@ -1415,7 +1415,7 @@ sba_ioc_init(struct parisc_device *sba, struct ioc *ioc, int ioc_num) DBG_INIT("%s() hpa 0x%lx mem %ldMB IOV %dMB (%d bits)\n", __func__, ioc->ioc_hpa, - (unsigned long) num_physpages >> (20 - PAGE_SHIFT), + (unsigned long) totalram_pages >> (20 - PAGE_SHIFT), iova_space_size>>20, iov_order + PAGE_SHIFT); diff --git a/drivers/xen/balloon.c b/drivers/xen/balloon.c index f5bbd9e8341..1b7123eb5d7 100644 --- a/drivers/xen/balloon.c +++ b/drivers/xen/balloon.c @@ -96,11 +96,7 @@ static struct balloon_stats balloon_stats; /* We increase/decrease in batches which fit in a page */ static unsigned long frame_list[PAGE_SIZE / sizeof(unsigned long)]; -/* VM /proc information for memory */ -extern unsigned long totalram_pages; - #ifdef CONFIG_HIGHMEM -extern unsigned long totalhigh_pages; #define inc_totalhigh_pages() (totalhigh_pages++) #define dec_totalhigh_pages() (totalhigh_pages--) #else diff --git a/fs/ntfs/malloc.h b/fs/ntfs/malloc.h index cd0be3f5c3c..a44b14cbcee 100644 --- a/fs/ntfs/malloc.h +++ b/fs/ntfs/malloc.h @@ -47,7 +47,7 @@ static inline void *__ntfs_malloc(unsigned long size, gfp_t gfp_mask) return kmalloc(PAGE_SIZE, gfp_mask & ~__GFP_HIGHMEM); /* return (void *)__get_free_page(gfp_mask); */ } - if (likely(size >> PAGE_SHIFT < num_physpages)) + if (likely((size >> PAGE_SHIFT) < totalram_pages)) return __vmalloc(size, gfp_mask, PAGE_KERNEL); return NULL; } diff --git a/include/linux/mm.h b/include/linux/mm.h index d808cf832c4..19ff81c49ba 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -25,6 +25,7 @@ extern unsigned long max_mapnr; #endif extern unsigned long num_physpages; +extern unsigned long totalram_pages; extern void * high_memory; extern int page_cluster; diff --git a/init/main.c b/init/main.c index 34971becbd3..2c48c315316 100644 --- a/init/main.c +++ b/init/main.c @@ -668,12 +668,12 @@ asmlinkage void __init start_kernel(void) #endif thread_info_cache_init(); cred_init(); - fork_init(num_physpages); + fork_init(totalram_pages); proc_caches_init(); buffer_init(); key_init(); security_init(); - vfs_caches_init(num_physpages); + vfs_caches_init(totalram_pages); radix_tree_init(); signals_init(); /* rootfs populating might need page-writeback */ diff --git a/mm/slab.c b/mm/slab.c index 7b5d4deacfc..7dfa481c96b 100644 --- a/mm/slab.c +++ b/mm/slab.c @@ -1384,7 +1384,7 @@ void __init kmem_cache_init(void) * Fragmentation resistance on low memory - only use bigger * page orders on machines with more than 32MB of memory. */ - if (num_physpages > (32 << 20) >> PAGE_SHIFT) + if (totalram_pages > (32 << 20) >> PAGE_SHIFT) slab_break_gfp_order = BREAK_GFP_ORDER_HI; /* Bootstrap is tricky, because several objects are allocated diff --git a/mm/swap.c b/mm/swap.c index 4a8a59e671f..308e57d8d7e 100644 --- a/mm/swap.c +++ b/mm/swap.c @@ -496,7 +496,7 @@ EXPORT_SYMBOL(pagevec_lookup_tag); */ void __init swap_setup(void) { - unsigned long megs = num_physpages >> (20 - PAGE_SHIFT); + unsigned long megs = totalram_pages >> (20 - PAGE_SHIFT); #ifdef CONFIG_SWAP bdi_init(swapper_space.backing_dev_info); diff --git a/mm/vmalloc.c b/mm/vmalloc.c index 9216b2555d0..5535da1d696 100644 --- a/mm/vmalloc.c +++ b/mm/vmalloc.c @@ -1386,7 +1386,7 @@ void *vmap(struct page **pages, unsigned int count, might_sleep(); - if (count > num_physpages) + if (count > totalram_pages) return NULL; area = get_vm_area_caller((count << PAGE_SHIFT), flags, @@ -1493,7 +1493,7 @@ static void *__vmalloc_node(unsigned long size, gfp_t gfp_mask, pgprot_t prot, unsigned long real_size = size; size = PAGE_ALIGN(size); - if (!size || (size >> PAGE_SHIFT) > num_physpages) + if (!size || (size >> PAGE_SHIFT) > totalram_pages) return NULL; area = __get_vm_area_node(size, VM_ALLOC, VMALLOC_START, VMALLOC_END, diff --git a/net/core/sock.c b/net/core/sock.c index 30d5446512f..524712a7b15 100644 --- a/net/core/sock.c +++ b/net/core/sock.c @@ -1206,12 +1206,12 @@ EXPORT_SYMBOL_GPL(sk_setup_caps); void __init sk_init(void) { - if (num_physpages <= 4096) { + if (totalram_pages <= 4096) { sysctl_wmem_max = 32767; sysctl_rmem_max = 32767; sysctl_wmem_default = 32767; sysctl_rmem_default = 32767; - } else if (num_physpages >= 131072) { + } else if (totalram_pages >= 131072) { sysctl_wmem_max = 131071; sysctl_rmem_max = 131071; } diff --git a/net/dccp/proto.c b/net/dccp/proto.c index 923db06c7e5..bc4467082a0 100644 --- a/net/dccp/proto.c +++ b/net/dccp/proto.c @@ -1049,10 +1049,10 @@ static int __init dccp_init(void) * * The methodology is similar to that of the buffer cache. */ - if (num_physpages >= (128 * 1024)) - goal = num_physpages >> (21 - PAGE_SHIFT); + if (totalram_pages >= (128 * 1024)) + goal = totalram_pages >> (21 - PAGE_SHIFT); else - goal = num_physpages >> (23 - PAGE_SHIFT); + goal = totalram_pages >> (23 - PAGE_SHIFT); if (thash_entries) goal = (thash_entries * diff --git a/net/decnet/dn_route.c b/net/decnet/dn_route.c index 9383d3e5a1a..57662cabaf9 100644 --- a/net/decnet/dn_route.c +++ b/net/decnet/dn_route.c @@ -1750,7 +1750,7 @@ void __init dn_route_init(void) dn_route_timer.expires = jiffies + decnet_dst_gc_interval * HZ; add_timer(&dn_route_timer); - goal = num_physpages >> (26 - PAGE_SHIFT); + goal = totalram_pages >> (26 - PAGE_SHIFT); for(order = 0; (1UL << order) < goal; order++) /* NOTHING */; diff --git a/net/ipv4/route.c b/net/ipv4/route.c index 91867d3e632..df934731453 100644 --- a/net/ipv4/route.c +++ b/net/ipv4/route.c @@ -3414,7 +3414,7 @@ int __init ip_rt_init(void) alloc_large_system_hash("IP route cache", sizeof(struct rt_hash_bucket), rhash_entries, - (num_physpages >= 128 * 1024) ? + (totalram_pages >= 128 * 1024) ? 15 : 17, 0, &rt_hash_log, diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index 19a0612b8a2..21387ebabf0 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c @@ -2862,7 +2862,7 @@ void __init tcp_init(void) alloc_large_system_hash("TCP established", sizeof(struct inet_ehash_bucket), thash_entries, - (num_physpages >= 128 * 1024) ? + (totalram_pages >= 128 * 1024) ? 13 : 15, 0, &tcp_hashinfo.ehash_size, @@ -2879,7 +2879,7 @@ void __init tcp_init(void) alloc_large_system_hash("TCP bind", sizeof(struct inet_bind_hashbucket), tcp_hashinfo.ehash_size, - (num_physpages >= 128 * 1024) ? + (totalram_pages >= 128 * 1024) ? 13 : 15, 0, &tcp_hashinfo.bhash_size, diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c index b37109817a9..7c9ec3dee96 100644 --- a/net/netfilter/nf_conntrack_core.c +++ b/net/netfilter/nf_conntrack_core.c @@ -1245,9 +1245,9 @@ static int nf_conntrack_init_init_net(void) * machine has 512 buckets. >= 1GB machines have 16384 buckets. */ if (!nf_conntrack_htable_size) { nf_conntrack_htable_size - = (((num_physpages << PAGE_SHIFT) / 16384) + = (((totalram_pages << PAGE_SHIFT) / 16384) / sizeof(struct hlist_head)); - if (num_physpages > (1024 * 1024 * 1024 / PAGE_SIZE)) + if (totalram_pages > (1024 * 1024 * 1024 / PAGE_SIZE)) nf_conntrack_htable_size = 16384; if (nf_conntrack_htable_size < 32) nf_conntrack_htable_size = 32; diff --git a/net/netfilter/x_tables.c b/net/netfilter/x_tables.c index a6ac83a9334..f01955cce31 100644 --- a/net/netfilter/x_tables.c +++ b/net/netfilter/x_tables.c @@ -617,7 +617,7 @@ struct xt_table_info *xt_alloc_table_info(unsigned int size) int cpu; /* Pedantry: prevent them from hitting BUG() in vmalloc.c --RR */ - if ((SMP_ALIGN(size) >> PAGE_SHIFT) + 2 > num_physpages) + if ((SMP_ALIGN(size) >> PAGE_SHIFT) + 2 > totalram_pages) return NULL; newinfo = kzalloc(XT_TABLE_INFO_SZ, GFP_KERNEL); diff --git a/net/netfilter/xt_hashlimit.c b/net/netfilter/xt_hashlimit.c index 219dcdbe388..dd16e404424 100644 --- a/net/netfilter/xt_hashlimit.c +++ b/net/netfilter/xt_hashlimit.c @@ -194,9 +194,9 @@ static int htable_create_v0(struct xt_hashlimit_info *minfo, u_int8_t family) if (minfo->cfg.size) size = minfo->cfg.size; else { - size = ((num_physpages << PAGE_SHIFT) / 16384) / + size = ((totalram_pages << PAGE_SHIFT) / 16384) / sizeof(struct list_head); - if (num_physpages > (1024 * 1024 * 1024 / PAGE_SIZE)) + if (totalram_pages > (1024 * 1024 * 1024 / PAGE_SIZE)) size = 8192; if (size < 16) size = 16; @@ -266,9 +266,9 @@ static int htable_create(struct xt_hashlimit_mtinfo1 *minfo, u_int8_t family) if (minfo->cfg.size) { size = minfo->cfg.size; } else { - size = (num_physpages << PAGE_SHIFT) / 16384 / + size = (totalram_pages << PAGE_SHIFT) / 16384 / sizeof(struct list_head); - if (num_physpages > 1024 * 1024 * 1024 / PAGE_SIZE) + if (totalram_pages > 1024 * 1024 * 1024 / PAGE_SIZE) size = 8192; if (size < 16) size = 16; diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c index c5aab6a368c..55180b99562 100644 --- a/net/netlink/af_netlink.c +++ b/net/netlink/af_netlink.c @@ -2091,10 +2091,10 @@ static int __init netlink_proto_init(void) if (!nl_table) goto panic; - if (num_physpages >= (128 * 1024)) - limit = num_physpages >> (21 - PAGE_SHIFT); + if (totalram_pages >= (128 * 1024)) + limit = totalram_pages >> (21 - PAGE_SHIFT); else - limit = num_physpages >> (23 - PAGE_SHIFT); + limit = totalram_pages >> (23 - PAGE_SHIFT); order = get_bitmask_order(limit) - 1 + PAGE_SHIFT; limit = (1UL << order) / sizeof(struct hlist_head); diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c index c557f1fb1c6..612dc878e05 100644 --- a/net/sctp/protocol.c +++ b/net/sctp/protocol.c @@ -1184,10 +1184,10 @@ SCTP_STATIC __init int sctp_init(void) /* Size and allocate the association hash table. * The methodology is similar to that of the tcp hash tables. */ - if (num_physpages >= (128 * 1024)) - goal = num_physpages >> (22 - PAGE_SHIFT); + if (totalram_pages >= (128 * 1024)) + goal = totalram_pages >> (22 - PAGE_SHIFT); else - goal = num_physpages >> (24 - PAGE_SHIFT); + goal = totalram_pages >> (24 - PAGE_SHIFT); for (order = 0; (1UL << order) < goal; order++) ; -- cgit v1.2.3-70-g09d2 From 2c85f51d222ccdd8c401d77a36b723a89156810d Mon Sep 17 00:00:00 2001 From: Jan Beulich Date: Mon, 21 Sep 2009 17:03:07 -0700 Subject: mm: also use alloc_large_system_hash() for the PID hash table This is being done by allowing boot time allocations to specify that they may want a sub-page sized amount of memory. Overall this seems more consistent with the other hash table allocations, and allows making two supposedly mm-only variables really mm-only (nr_{kernel,all}_pages). Signed-off-by: Jan Beulich Cc: Ingo Molnar Cc: "Eric W. Biederman" Cc: Mel Gorman Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/bootmem.h | 5 ++--- kernel/pid.c | 15 ++++----------- mm/page_alloc.c | 13 ++++++++++--- 3 files changed, 16 insertions(+), 17 deletions(-) (limited to 'include') diff --git a/include/linux/bootmem.h b/include/linux/bootmem.h index bc3ab707369..dd97fb8408a 100644 --- a/include/linux/bootmem.h +++ b/include/linux/bootmem.h @@ -132,9 +132,6 @@ static inline void *alloc_remap(int nid, unsigned long size) } #endif /* CONFIG_HAVE_ARCH_ALLOC_REMAP */ -extern unsigned long __meminitdata nr_kernel_pages; -extern unsigned long __meminitdata nr_all_pages; - extern void *alloc_large_system_hash(const char *tablename, unsigned long bucketsize, unsigned long numentries, @@ -145,6 +142,8 @@ extern void *alloc_large_system_hash(const char *tablename, unsigned long limit); #define HASH_EARLY 0x00000001 /* Allocating during early boot? */ +#define HASH_SMALL 0x00000002 /* sub-page allocation allowed, min + * shift passed via *_hash_shift */ /* Only NUMA needs hash distribution. 64bit NUMA architectures have * sufficient vmalloc space. diff --git a/kernel/pid.c b/kernel/pid.c index 31310b5d3f5..d3f722d20f9 100644 --- a/kernel/pid.c +++ b/kernel/pid.c @@ -40,7 +40,7 @@ #define pid_hashfn(nr, ns) \ hash_long((unsigned long)nr + (unsigned long)ns, pidhash_shift) static struct hlist_head *pid_hash; -static int pidhash_shift; +static unsigned int pidhash_shift = 4; struct pid init_struct_pid = INIT_STRUCT_PID; int pid_max = PID_MAX_DEFAULT; @@ -499,19 +499,12 @@ struct pid *find_ge_pid(int nr, struct pid_namespace *ns) void __init pidhash_init(void) { int i, pidhash_size; - unsigned long megabytes = nr_kernel_pages >> (20 - PAGE_SHIFT); - pidhash_shift = max(4, fls(megabytes * 4)); - pidhash_shift = min(12, pidhash_shift); + pid_hash = alloc_large_system_hash("PID", sizeof(*pid_hash), 0, 18, + HASH_EARLY | HASH_SMALL, + &pidhash_shift, NULL, 4096); pidhash_size = 1 << pidhash_shift; - printk("PID hash table entries: %d (order: %d, %Zd bytes)\n", - pidhash_size, pidhash_shift, - pidhash_size * sizeof(struct hlist_head)); - - pid_hash = alloc_bootmem(pidhash_size * sizeof(*(pid_hash))); - if (!pid_hash) - panic("Could not alloc pidhash!\n"); for (i = 0; i < pidhash_size; i++) INIT_HLIST_HEAD(&pid_hash[i]); } diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 33b1a4762a7..770f011e1c1 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -124,8 +124,8 @@ static char * const zone_names[MAX_NR_ZONES] = { int min_free_kbytes = 1024; -unsigned long __meminitdata nr_kernel_pages; -unsigned long __meminitdata nr_all_pages; +static unsigned long __meminitdata nr_kernel_pages; +static unsigned long __meminitdata nr_all_pages; static unsigned long __meminitdata dma_reserve; #ifdef CONFIG_ARCH_POPULATES_NODE_MAP @@ -4821,7 +4821,14 @@ void *__init alloc_large_system_hash(const char *tablename, numentries <<= (PAGE_SHIFT - scale); /* Make sure we've got at least a 0-order allocation.. */ - if (unlikely((numentries * bucketsize) < PAGE_SIZE)) + if (unlikely(flags & HASH_SMALL)) { + /* Makes no sense without HASH_EARLY */ + WARN_ON(!(flags & HASH_EARLY)); + if (!(numentries >> *_hash_shift)) { + numentries = 1UL << *_hash_shift; + BUG_ON(!numentries); + } + } else if (unlikely((numentries * bucketsize) < PAGE_SIZE)) numentries = PAGE_SIZE / bucketsize; } numentries = roundup_pow_of_two(numentries); -- cgit v1.2.3-70-g09d2 From 1a8670a29b5277cbe601f74ab63d2c5211fb3005 Mon Sep 17 00:00:00 2001 From: Alexey Dobriyan Date: Mon, 21 Sep 2009 17:03:09 -0700 Subject: oom: move oom_killer_enable()/oom_killer_disable to where they belong Signed-off-by: Alexey Dobriyan Acked-by: David Rientjes Reviewed-by: KOSAKI Motohiro Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/gfp.h | 12 ------------ include/linux/oom.h | 11 +++++++++++ kernel/power/process.c | 1 + 3 files changed, 12 insertions(+), 12 deletions(-) (limited to 'include') diff --git a/include/linux/gfp.h b/include/linux/gfp.h index c32bfa8e7f1..f53e9b868c2 100644 --- a/include/linux/gfp.h +++ b/include/linux/gfp.h @@ -335,18 +335,6 @@ void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp); void drain_all_pages(void); void drain_local_pages(void *dummy); -extern bool oom_killer_disabled; - -static inline void oom_killer_disable(void) -{ - oom_killer_disabled = true; -} - -static inline void oom_killer_enable(void) -{ - oom_killer_disabled = false; -} - extern gfp_t gfp_allowed_mask; static inline void set_gfp_allowed_mask(gfp_t mask) diff --git a/include/linux/oom.h b/include/linux/oom.h index a7979baf1e3..6aac5fe4f6f 100644 --- a/include/linux/oom.h +++ b/include/linux/oom.h @@ -30,5 +30,16 @@ extern void out_of_memory(struct zonelist *zonelist, gfp_t gfp_mask, int order); extern int register_oom_notifier(struct notifier_block *nb); extern int unregister_oom_notifier(struct notifier_block *nb); +extern bool oom_killer_disabled; + +static inline void oom_killer_disable(void) +{ + oom_killer_disabled = true; +} + +static inline void oom_killer_enable(void) +{ + oom_killer_disabled = false; +} #endif /* __KERNEL__*/ #endif /* _INCLUDE_LINUX_OOM_H */ diff --git a/kernel/power/process.c b/kernel/power/process.c index da2072d7381..cc2e55373b6 100644 --- a/kernel/power/process.c +++ b/kernel/power/process.c @@ -9,6 +9,7 @@ #undef DEBUG #include +#include #include #include #include -- cgit v1.2.3-70-g09d2 From f86296317434b21585e229f6c49a33cb9ebab4d3 Mon Sep 17 00:00:00 2001 From: Wu Fengguang Date: Mon, 21 Sep 2009 17:03:11 -0700 Subject: mm: do batched scans for mem_cgroup For mem_cgroup, shrink_zone() may call shrink_list() with nr_to_scan=1, in which case shrink_list() _still_ calls isolate_pages() with the much larger SWAP_CLUSTER_MAX. It effectively scales up the inactive list scan rate by up to 32 times. For example, with 16k inactive pages and DEF_PRIORITY=12, (16k >> 12)=4. So when shrink_zone() expects to scan 4 pages in the active/inactive list, the active list will be scanned 4 pages, while the inactive list will be (over) scanned SWAP_CLUSTER_MAX=32 pages in effect. And that could break the balance between the two lists. It can further impact the scan of anon active list, due to the anon active/inactive ratio rebalance logic in balance_pgdat()/shrink_zone(): inactive anon list over scanned => inactive_anon_is_low() == TRUE => shrink_active_list() => active anon list over scanned So the end result may be - anon inactive => over scanned - anon active => over scanned (maybe not as much) - file inactive => over scanned - file active => under scanned (relatively) The accesses to nr_saved_scan are not lock protected and so not 100% accurate, however we can tolerate small errors and the resulted small imbalanced scan rates between zones. Cc: Rik van Riel Reviewed-by: KOSAKI Motohiro Acked-by: Balbir Singh Reviewed-by: Minchan Kim Signed-off-by: KAMEZAWA Hiroyuki Signed-off-by: Wu Fengguang Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/mmzone.h | 6 +++++- mm/page_alloc.c | 2 +- mm/vmscan.c | 20 +++++++++++--------- 3 files changed, 17 insertions(+), 11 deletions(-) (limited to 'include') diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index 9c50309b30a..c188ea624c7 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h @@ -273,6 +273,11 @@ struct zone_reclaim_stat { */ unsigned long recent_rotated[2]; unsigned long recent_scanned[2]; + + /* + * accumulated for batching + */ + unsigned long nr_saved_scan[NR_LRU_LISTS]; }; struct zone { @@ -327,7 +332,6 @@ struct zone { spinlock_t lru_lock; struct zone_lru { struct list_head list; - unsigned long nr_saved_scan; /* accumulated for batching */ } lru[NR_LRU_LISTS]; struct zone_reclaim_stat reclaim_stat; diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 770f011e1c1..84d9da1e8f4 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -3809,7 +3809,7 @@ static void __paginginit free_area_init_core(struct pglist_data *pgdat, zone_pcp_init(zone); for_each_lru(l) { INIT_LIST_HEAD(&zone->lru[l].list); - zone->lru[l].nr_saved_scan = 0; + zone->reclaim_stat.nr_saved_scan[l] = 0; } zone->reclaim_stat.recent_rotated[0] = 0; zone->reclaim_stat.recent_rotated[1] = 0; diff --git a/mm/vmscan.c b/mm/vmscan.c index 5432c230c4c..0e7f5e4a22d 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -1586,6 +1586,7 @@ static void shrink_zone(int priority, struct zone *zone, enum lru_list l; unsigned long nr_reclaimed = sc->nr_reclaimed; unsigned long swap_cluster_max = sc->swap_cluster_max; + struct zone_reclaim_stat *reclaim_stat = get_reclaim_stat(zone, sc); int noswap = 0; /* If we have no swap space, do not bother scanning anon pages. */ @@ -1605,12 +1606,9 @@ static void shrink_zone(int priority, struct zone *zone, scan >>= priority; scan = (scan * percent[file]) / 100; } - if (scanning_global_lru(sc)) - nr[l] = nr_scan_try_batch(scan, - &zone->lru[l].nr_saved_scan, - swap_cluster_max); - else - nr[l] = scan; + nr[l] = nr_scan_try_batch(scan, + &reclaim_stat->nr_saved_scan[l], + swap_cluster_max); } while (nr[LRU_INACTIVE_ANON] || nr[LRU_ACTIVE_FILE] || @@ -2220,6 +2218,7 @@ static void shrink_all_zones(unsigned long nr_pages, int prio, { struct zone *zone; unsigned long nr_reclaimed = 0; + struct zone_reclaim_stat *reclaim_stat; for_each_populated_zone(zone) { enum lru_list l; @@ -2236,11 +2235,14 @@ static void shrink_all_zones(unsigned long nr_pages, int prio, l == LRU_ACTIVE_FILE)) continue; - zone->lru[l].nr_saved_scan += (lru_pages >> prio) + 1; - if (zone->lru[l].nr_saved_scan >= nr_pages || pass > 3) { + reclaim_stat = get_reclaim_stat(zone, sc); + reclaim_stat->nr_saved_scan[l] += + (lru_pages >> prio) + 1; + if (reclaim_stat->nr_saved_scan[l] + >= nr_pages || pass > 3) { unsigned long nr_to_scan; - zone->lru[l].nr_saved_scan = 0; + reclaim_stat->nr_saved_scan[l] = 0; nr_to_scan = min(nr_pages, lru_pages); nr_reclaimed += shrink_list(l, nr_to_scan, zone, sc, prio); -- cgit v1.2.3-70-g09d2 From 28b83c5193e7ab951e402252278f2cc79dc4d298 Mon Sep 17 00:00:00 2001 From: KOSAKI Motohiro Date: Mon, 21 Sep 2009 17:03:13 -0700 Subject: oom: move oom_adj value from task_struct to signal_struct Currently, OOM logic callflow is here. __out_of_memory() select_bad_process() for each task badness() calculate badness of one task oom_kill_process() search child oom_kill_task() kill target task and mm shared tasks with it example, process-A have two thread, thread-A and thread-B and it have very fat memory and each thread have following oom_adj and oom_score. thread-A: oom_adj = OOM_DISABLE, oom_score = 0 thread-B: oom_adj = 0, oom_score = very-high Then, select_bad_process() select thread-B, but oom_kill_task() refuse kill the task because thread-A have OOM_DISABLE. Thus __out_of_memory() call select_bad_process() again. but select_bad_process() select the same task. It mean kernel fall in livelock. The fact is, select_bad_process() must select killable task. otherwise OOM logic go into livelock. And root cause is, oom_adj shouldn't be per-thread value. it should be per-process value because OOM-killer kill a process, not thread. Thus This patch moves oomkilladj (now more appropriately named oom_adj) from struct task_struct to struct signal_struct. it naturally prevent select_bad_process() choose wrong task. Signed-off-by: KOSAKI Motohiro Cc: Paul Menage Cc: David Rientjes Cc: KAMEZAWA Hiroyuki Cc: Rik van Riel Cc: Oleg Nesterov Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- fs/proc/base.c | 24 ++++++++++++++++++++---- include/linux/sched.h | 3 ++- kernel/fork.c | 2 ++ mm/oom_kill.c | 34 +++++++++++++++------------------- 4 files changed, 39 insertions(+), 24 deletions(-) (limited to 'include') diff --git a/fs/proc/base.c b/fs/proc/base.c index 6f742f6658a..81cfff82875 100644 --- a/fs/proc/base.c +++ b/fs/proc/base.c @@ -999,11 +999,17 @@ static ssize_t oom_adjust_read(struct file *file, char __user *buf, struct task_struct *task = get_proc_task(file->f_path.dentry->d_inode); char buffer[PROC_NUMBUF]; size_t len; - int oom_adjust; + int oom_adjust = OOM_DISABLE; + unsigned long flags; if (!task) return -ESRCH; - oom_adjust = task->oomkilladj; + + if (lock_task_sighand(task, &flags)) { + oom_adjust = task->signal->oom_adj; + unlock_task_sighand(task, &flags); + } + put_task_struct(task); len = snprintf(buffer, sizeof(buffer), "%i\n", oom_adjust); @@ -1017,6 +1023,7 @@ static ssize_t oom_adjust_write(struct file *file, const char __user *buf, struct task_struct *task; char buffer[PROC_NUMBUF], *end; int oom_adjust; + unsigned long flags; memset(buffer, 0, sizeof(buffer)); if (count > sizeof(buffer) - 1) @@ -1032,11 +1039,20 @@ static ssize_t oom_adjust_write(struct file *file, const char __user *buf, task = get_proc_task(file->f_path.dentry->d_inode); if (!task) return -ESRCH; - if (oom_adjust < task->oomkilladj && !capable(CAP_SYS_RESOURCE)) { + if (!lock_task_sighand(task, &flags)) { + put_task_struct(task); + return -ESRCH; + } + + if (oom_adjust < task->signal->oom_adj && !capable(CAP_SYS_RESOURCE)) { + unlock_task_sighand(task, &flags); put_task_struct(task); return -EACCES; } - task->oomkilladj = oom_adjust; + + task->signal->oom_adj = oom_adjust; + + unlock_task_sighand(task, &flags); put_task_struct(task); if (end - buffer == 0) return -EIO; diff --git a/include/linux/sched.h b/include/linux/sched.h index 899d7304d59..17e9a8e9a51 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -639,6 +639,8 @@ struct signal_struct { unsigned audit_tty; struct tty_audit_buf *tty_audit_buf; #endif + + int oom_adj; /* OOM kill score adjustment (bit shift) */ }; /* Context switch must be unlocked if interrupts are to be enabled */ @@ -1221,7 +1223,6 @@ struct task_struct { * a short time */ unsigned char fpu_counter; - s8 oomkilladj; /* OOM kill score adjustment (bit shift). */ #ifdef CONFIG_BLK_DEV_IO_TRACE unsigned int btrace_seq; #endif diff --git a/kernel/fork.c b/kernel/fork.c index 73a442b7be6..1020977b57c 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -880,6 +880,8 @@ static int copy_signal(unsigned long clone_flags, struct task_struct *tsk) tty_audit_fork(sig); + sig->oom_adj = current->signal->oom_adj; + return 0; } diff --git a/mm/oom_kill.c b/mm/oom_kill.c index da4c342f264..630b77fe862 100644 --- a/mm/oom_kill.c +++ b/mm/oom_kill.c @@ -58,6 +58,10 @@ unsigned long badness(struct task_struct *p, unsigned long uptime) unsigned long points, cpu_time, run_time; struct mm_struct *mm; struct task_struct *child; + int oom_adj = p->signal->oom_adj; + + if (oom_adj == OOM_DISABLE) + return 0; task_lock(p); mm = p->mm; @@ -148,15 +152,15 @@ unsigned long badness(struct task_struct *p, unsigned long uptime) points /= 8; /* - * Adjust the score by oomkilladj. + * Adjust the score by oom_adj. */ - if (p->oomkilladj) { - if (p->oomkilladj > 0) { + if (oom_adj) { + if (oom_adj > 0) { if (!points) points = 1; - points <<= p->oomkilladj; + points <<= oom_adj; } else - points >>= -(p->oomkilladj); + points >>= -(oom_adj); } #ifdef DEBUG @@ -251,7 +255,7 @@ static struct task_struct *select_bad_process(unsigned long *ppoints, *ppoints = ULONG_MAX; } - if (p->oomkilladj == OOM_DISABLE) + if (p->signal->oom_adj == OOM_DISABLE) continue; points = badness(p, uptime.tv_sec); @@ -304,7 +308,7 @@ static void dump_tasks(const struct mem_cgroup *mem) } printk(KERN_INFO "[%5d] %5d %5d %8lu %8lu %3d %3d %s\n", p->pid, __task_cred(p)->uid, p->tgid, mm->total_vm, - get_mm_rss(mm), (int)task_cpu(p), p->oomkilladj, + get_mm_rss(mm), (int)task_cpu(p), p->signal->oom_adj, p->comm); task_unlock(p); } while_each_thread(g, p); @@ -359,18 +363,9 @@ static int oom_kill_task(struct task_struct *p) * change to NULL at any time since we do not hold task_lock(p). * However, this is of no concern to us. */ - - if (mm == NULL) + if (!mm || p->signal->oom_adj == OOM_DISABLE) return 1; - /* - * Don't kill the process if any threads are set to OOM_DISABLE - */ - do_each_thread(g, q) { - if (q->mm == mm && q->oomkilladj == OOM_DISABLE) - return 1; - } while_each_thread(g, q); - __oom_kill_task(p, 1); /* @@ -394,8 +389,9 @@ static int oom_kill_process(struct task_struct *p, gfp_t gfp_mask, int order, if (printk_ratelimit()) { printk(KERN_WARNING "%s invoked oom-killer: " - "gfp_mask=0x%x, order=%d, oomkilladj=%d\n", - current->comm, gfp_mask, order, current->oomkilladj); + "gfp_mask=0x%x, order=%d, oom_adj=%d\n", + current->comm, gfp_mask, order, + current->signal->oom_adj); task_lock(current); cpuset_print_task_mems_allowed(current); task_unlock(current); -- cgit v1.2.3-70-g09d2 From 5f8dcc21211a3d4e3a7a5ca366b469fb88117f61 Mon Sep 17 00:00:00 2001 From: Mel Gorman Date: Mon, 21 Sep 2009 17:03:19 -0700 Subject: page-allocator: split per-cpu list into one-list-per-migrate-type The following two patches remove searching in the page allocator fast-path by maintaining multiple free-lists in the per-cpu structure. At the time the search was introduced, increasing the per-cpu structures would waste a lot of memory as per-cpu structures were statically allocated at compile-time. This is no longer the case. The patches are as follows. They are based on mmotm-2009-08-27. Patch 1 adds multiple lists to struct per_cpu_pages, one per migratetype that can be stored on the PCP lists. Patch 2 notes that the pcpu drain path check empty lists multiple times. The patch reduces the number of checks by maintaining a count of free lists encountered. Lists containing pages will then free multiple pages in batch The patches were tested with kernbench, netperf udp/tcp, hackbench and sysbench. The netperf tests were not bound to any CPU in particular and were run such that the results should be 99% confidence that the reported results are within 1% of the estimated mean. sysbench was run with a postgres background and read-only tests. Similar to netperf, it was run multiple times so that it's 99% confidence results are within 1%. The patches were tested on x86, x86-64 and ppc64 as x86: Intel Pentium D 3GHz with 8G RAM (no-brand machine) kernbench - No significant difference, variance well within noise netperf-udp - 1.34% to 2.28% gain netperf-tcp - 0.45% to 1.22% gain hackbench - Small variances, very close to noise sysbench - Very small gains x86-64: AMD Phenom 9950 1.3GHz with 8G RAM (no-brand machine) kernbench - No significant difference, variance well within noise netperf-udp - 1.83% to 10.42% gains netperf-tcp - No conclusive until buffer >= PAGE_SIZE 4096 +15.83% 8192 + 0.34% (not significant) 16384 + 1% hackbench - Small gains, very close to noise sysbench - 0.79% to 1.6% gain ppc64: PPC970MP 2.5GHz with 10GB RAM (it's a terrasoft powerstation) kernbench - No significant difference, variance well within noise netperf-udp - 2-3% gain for almost all buffer sizes tested netperf-tcp - losses on small buffers, gains on larger buffers possibly indicates some bad caching effect. hackbench - No significant difference sysbench - 2-4% gain This patch: Currently the per-cpu page allocator searches the PCP list for pages of the correct migrate-type to reduce the possibility of pages being inappropriate placed from a fragmentation perspective. This search is potentially expensive in a fast-path and undesirable. Splitting the per-cpu list into multiple lists increases the size of a per-cpu structure and this was potentially a major problem at the time the search was introduced. These problem has been mitigated as now only the necessary number of structures is allocated for the running system. This patch replaces a list search in the per-cpu allocator with one list per migrate type. The potential snag with this approach is when bulk freeing pages. We round-robin free pages based on migrate type which has little bearing on the cache hotness of the page and potentially checks empty lists repeatedly in the event the majority of PCP pages are of one type. Signed-off-by: Mel Gorman Acked-by: Nick Piggin Cc: Christoph Lameter Cc: Minchan Kim Cc: Pekka Enberg Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/mmzone.h | 5 ++- mm/page_alloc.c | 106 +++++++++++++++++++++++++++---------------------- 2 files changed, 63 insertions(+), 48 deletions(-) (limited to 'include') diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index c188ea624c7..652ef01be58 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h @@ -38,6 +38,7 @@ #define MIGRATE_UNMOVABLE 0 #define MIGRATE_RECLAIMABLE 1 #define MIGRATE_MOVABLE 2 +#define MIGRATE_PCPTYPES 3 /* the number of types on the pcp lists */ #define MIGRATE_RESERVE 3 #define MIGRATE_ISOLATE 4 /* can't allocate from here */ #define MIGRATE_TYPES 5 @@ -169,7 +170,9 @@ struct per_cpu_pages { int count; /* number of pages in the list */ int high; /* high watermark, emptying needed */ int batch; /* chunk size for buddy add/remove */ - struct list_head list; /* the list of pages */ + + /* Lists of pages, one per migrate type stored on the pcp-lists */ + struct list_head lists[MIGRATE_PCPTYPES]; }; struct per_cpu_pageset { diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 84d9da1e8f4..1b1c39e6a9b 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -511,7 +511,7 @@ static inline int free_pages_check(struct page *page) } /* - * Frees a list of pages. + * Frees a number of pages from the PCP lists * Assumes all pages on list are in same zone, and of same order. * count is the number of pages to free. * @@ -521,23 +521,36 @@ static inline int free_pages_check(struct page *page) * And clear the zone's pages_scanned counter, to hold off the "all pages are * pinned" detection logic. */ -static void free_pages_bulk(struct zone *zone, int count, - struct list_head *list, int order) +static void free_pcppages_bulk(struct zone *zone, int count, + struct per_cpu_pages *pcp) { + int migratetype = 0; + spin_lock(&zone->lock); zone_clear_flag(zone, ZONE_ALL_UNRECLAIMABLE); zone->pages_scanned = 0; - __mod_zone_page_state(zone, NR_FREE_PAGES, count << order); + __mod_zone_page_state(zone, NR_FREE_PAGES, count); while (count--) { struct page *page; + struct list_head *list; + + /* + * Remove pages from lists in a round-robin fashion. This spinning + * around potentially empty lists is bloody awful, alternatives that + * don't suck are welcome + */ + do { + if (++migratetype == MIGRATE_PCPTYPES) + migratetype = 0; + list = &pcp->lists[migratetype]; + } while (list_empty(list)); - VM_BUG_ON(list_empty(list)); page = list_entry(list->prev, struct page, lru); /* have to delete it as __free_one_page list manipulates */ list_del(&page->lru); - trace_mm_page_pcpu_drain(page, order, page_private(page)); - __free_one_page(page, zone, order, page_private(page)); + trace_mm_page_pcpu_drain(page, 0, migratetype); + __free_one_page(page, zone, 0, migratetype); } spin_unlock(&zone->lock); } @@ -953,7 +966,7 @@ void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp) to_drain = pcp->batch; else to_drain = pcp->count; - free_pages_bulk(zone, to_drain, &pcp->list, 0); + free_pcppages_bulk(zone, to_drain, pcp); pcp->count -= to_drain; local_irq_restore(flags); } @@ -979,7 +992,7 @@ static void drain_pages(unsigned int cpu) pcp = &pset->pcp; local_irq_save(flags); - free_pages_bulk(zone, pcp->count, &pcp->list, 0); + free_pcppages_bulk(zone, pcp->count, pcp); pcp->count = 0; local_irq_restore(flags); } @@ -1045,6 +1058,7 @@ static void free_hot_cold_page(struct page *page, int cold) struct zone *zone = page_zone(page); struct per_cpu_pages *pcp; unsigned long flags; + int migratetype; int wasMlocked = __TestClearPageMlocked(page); kmemcheck_free_shadow(page, 0); @@ -1062,21 +1076,39 @@ static void free_hot_cold_page(struct page *page, int cold) kernel_map_pages(page, 1, 0); pcp = &zone_pcp(zone, get_cpu())->pcp; - set_page_private(page, get_pageblock_migratetype(page)); + migratetype = get_pageblock_migratetype(page); + set_page_private(page, migratetype); local_irq_save(flags); if (unlikely(wasMlocked)) free_page_mlock(page); __count_vm_event(PGFREE); + /* + * We only track unmovable, reclaimable and movable on pcp lists. + * Free ISOLATE pages back to the allocator because they are being + * offlined but treat RESERVE as movable pages so we can get those + * areas back if necessary. Otherwise, we may have to free + * excessively into the page allocator + */ + if (migratetype >= MIGRATE_PCPTYPES) { + if (unlikely(migratetype == MIGRATE_ISOLATE)) { + free_one_page(zone, page, 0, migratetype); + goto out; + } + migratetype = MIGRATE_MOVABLE; + } + if (cold) - list_add_tail(&page->lru, &pcp->list); + list_add_tail(&page->lru, &pcp->lists[migratetype]); else - list_add(&page->lru, &pcp->list); + list_add(&page->lru, &pcp->lists[migratetype]); pcp->count++; if (pcp->count >= pcp->high) { - free_pages_bulk(zone, pcp->batch, &pcp->list, 0); + free_pcppages_bulk(zone, pcp->batch, pcp); pcp->count -= pcp->batch; } + +out: local_irq_restore(flags); put_cpu(); } @@ -1134,46 +1166,24 @@ again: cpu = get_cpu(); if (likely(order == 0)) { struct per_cpu_pages *pcp; + struct list_head *list; pcp = &zone_pcp(zone, cpu)->pcp; + list = &pcp->lists[migratetype]; local_irq_save(flags); - if (!pcp->count) { - pcp->count = rmqueue_bulk(zone, 0, - pcp->batch, &pcp->list, - migratetype, cold); - if (unlikely(!pcp->count)) - goto failed; - } - - /* Find a page of the appropriate migrate type */ - if (cold) { - list_for_each_entry_reverse(page, &pcp->list, lru) - if (page_private(page) == migratetype) - break; - } else { - list_for_each_entry(page, &pcp->list, lru) - if (page_private(page) == migratetype) - break; - } - - /* Allocate more to the pcp list if necessary */ - if (unlikely(&page->lru == &pcp->list)) { - int get_one_page = 0; - + if (list_empty(list)) { pcp->count += rmqueue_bulk(zone, 0, - pcp->batch, &pcp->list, + pcp->batch, list, migratetype, cold); - list_for_each_entry(page, &pcp->list, lru) { - if (get_pageblock_migratetype(page) != - MIGRATE_ISOLATE) { - get_one_page = 1; - break; - } - } - if (!get_one_page) + if (unlikely(list_empty(list))) goto failed; } + if (cold) + page = list_entry(list->prev, struct page, lru); + else + page = list_entry(list->next, struct page, lru); + list_del(&page->lru); pcp->count--; } else { @@ -3024,6 +3034,7 @@ static int zone_batchsize(struct zone *zone) static void setup_pageset(struct per_cpu_pageset *p, unsigned long batch) { struct per_cpu_pages *pcp; + int migratetype; memset(p, 0, sizeof(*p)); @@ -3031,7 +3042,8 @@ static void setup_pageset(struct per_cpu_pageset *p, unsigned long batch) pcp->count = 0; pcp->high = 6 * batch; pcp->batch = max(1UL, 1 * batch); - INIT_LIST_HEAD(&pcp->list); + for (migratetype = 0; migratetype < MIGRATE_PCPTYPES; migratetype++) + INIT_LIST_HEAD(&pcp->lists[migratetype]); } /* @@ -3223,7 +3235,7 @@ static int __zone_pcp_update(void *data) pcp = &pset->pcp; local_irq_save(flags); - free_pages_bulk(zone, pcp->count, &pcp->list, 0); + free_pcppages_bulk(zone, pcp->count, pcp); setup_pageset(pset, batch); local_irq_restore(flags); } -- cgit v1.2.3-70-g09d2 From f3e8fccd06d27773186a0094371daf2d84c79469 Mon Sep 17 00:00:00 2001 From: Hugh Dickins Date: Mon, 21 Sep 2009 17:03:25 -0700 Subject: mm: add get_dump_page In preparation for the next patch, add a simple get_dump_page(addr) interface for the CONFIG_ELF_CORE dumpers to use, instead of calling get_user_pages() directly. They're not interested in errors: they just want to use holes as much as possible, to save space and make sure that the data is aligned where the headers said it would be. Oh, and don't use that horrid DUMP_SEEK(off) macro! Signed-off-by: Hugh Dickins Acked-by: Rik van Riel Cc: KAMEZAWA Hiroyuki Cc: KOSAKI Motohiro Cc: Nick Piggin Cc: Mel Gorman Cc: Minchan Kim Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- fs/binfmt_elf.c | 44 +++++++++++++--------------------------- fs/binfmt_elf_fdpic.c | 56 +++++++++++++++++---------------------------------- include/linux/mm.h | 1 + mm/memory.c | 33 +++++++++++++++++++++++++++++- 4 files changed, 66 insertions(+), 68 deletions(-) (limited to 'include') diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c index 7c1e65d5487..442d94fe255 100644 --- a/fs/binfmt_elf.c +++ b/fs/binfmt_elf.c @@ -1280,9 +1280,6 @@ static int writenote(struct memelfnote *men, struct file *file, #define DUMP_WRITE(addr, nr) \ if ((size += (nr)) > limit || !dump_write(file, (addr), (nr))) \ goto end_coredump; -#define DUMP_SEEK(off) \ - if (!dump_seek(file, (off))) \ - goto end_coredump; static void fill_elf_header(struct elfhdr *elf, int segs, u16 machine, u32 flags, u8 osabi) @@ -2016,7 +2013,8 @@ static int elf_core_dump(long signr, struct pt_regs *regs, struct file *file, un goto end_coredump; /* Align to page */ - DUMP_SEEK(dataoff - foffset); + if (!dump_seek(file, dataoff - foffset)) + goto end_coredump; for (vma = first_vma(current, gate_vma); vma != NULL; vma = next_vma(vma, gate_vma)) { @@ -2027,33 +2025,19 @@ static int elf_core_dump(long signr, struct pt_regs *regs, struct file *file, un for (addr = vma->vm_start; addr < end; addr += PAGE_SIZE) { struct page *page; - struct vm_area_struct *tmp_vma; - - if (get_user_pages(current, current->mm, addr, 1, 0, 1, - &page, &tmp_vma) <= 0) { - DUMP_SEEK(PAGE_SIZE); - } else { - if (page == ZERO_PAGE(0)) { - if (!dump_seek(file, PAGE_SIZE)) { - page_cache_release(page); - goto end_coredump; - } - } else { - void *kaddr; - flush_cache_page(tmp_vma, addr, - page_to_pfn(page)); - kaddr = kmap(page); - if ((size += PAGE_SIZE) > limit || - !dump_write(file, kaddr, - PAGE_SIZE)) { - kunmap(page); - page_cache_release(page); - goto end_coredump; - } - kunmap(page); - } + int stop; + + page = get_dump_page(addr); + if (page) { + void *kaddr = kmap(page); + stop = ((size += PAGE_SIZE) > limit) || + !dump_write(file, kaddr, PAGE_SIZE); + kunmap(page); page_cache_release(page); - } + } else + stop = !dump_seek(file, PAGE_SIZE); + if (stop) + goto end_coredump; } } diff --git a/fs/binfmt_elf_fdpic.c b/fs/binfmt_elf_fdpic.c index 20fbeced472..76285471073 100644 --- a/fs/binfmt_elf_fdpic.c +++ b/fs/binfmt_elf_fdpic.c @@ -1325,9 +1325,6 @@ static int writenote(struct memelfnote *men, struct file *file) #define DUMP_WRITE(addr, nr) \ if ((size += (nr)) > limit || !dump_write(file, (addr), (nr))) \ goto end_coredump; -#define DUMP_SEEK(off) \ - if (!dump_seek(file, (off))) \ - goto end_coredump; static inline void fill_elf_fdpic_header(struct elfhdr *elf, int segs) { @@ -1518,6 +1515,7 @@ static int elf_fdpic_dump_segments(struct file *file, size_t *size, unsigned long *limit, unsigned long mm_flags) { struct vm_area_struct *vma; + int err = 0; for (vma = current->mm->mmap; vma; vma = vma->vm_next) { unsigned long addr; @@ -1525,43 +1523,26 @@ static int elf_fdpic_dump_segments(struct file *file, size_t *size, if (!maydump(vma, mm_flags)) continue; - for (addr = vma->vm_start; - addr < vma->vm_end; - addr += PAGE_SIZE - ) { - struct vm_area_struct *vma; - struct page *page; - - if (get_user_pages(current, current->mm, addr, 1, 0, 1, - &page, &vma) <= 0) { - DUMP_SEEK(file->f_pos + PAGE_SIZE); - } - else if (page == ZERO_PAGE(0)) { - page_cache_release(page); - DUMP_SEEK(file->f_pos + PAGE_SIZE); - } - else { - void *kaddr; - - flush_cache_page(vma, addr, page_to_pfn(page)); - kaddr = kmap(page); - if ((*size += PAGE_SIZE) > *limit || - !dump_write(file, kaddr, PAGE_SIZE) - ) { - kunmap(page); - page_cache_release(page); - return -EIO; - } + for (addr = vma->vm_start; addr < vma->vm_end; + addr += PAGE_SIZE) { + struct page *page = get_dump_page(addr); + if (page) { + void *kaddr = kmap(page); + *size += PAGE_SIZE; + if (*size > *limit) + err = -EFBIG; + else if (!dump_write(file, kaddr, PAGE_SIZE)) + err = -EIO; kunmap(page); page_cache_release(page); - } + } else if (!dump_seek(file, file->f_pos + PAGE_SIZE)) + err = -EFBIG; + if (err) + goto out; } } - - return 0; - -end_coredump: - return -EFBIG; +out: + return err; } #endif @@ -1802,7 +1783,8 @@ static int elf_fdpic_core_dump(long signr, struct pt_regs *regs, goto end_coredump; } - DUMP_SEEK(dataoff); + if (!dump_seek(file, dataoff)) + goto end_coredump; if (elf_fdpic_dump_segments(file, &size, &limit, mm_flags) < 0) goto end_coredump; diff --git a/include/linux/mm.h b/include/linux/mm.h index 19ff81c49ba..e41795bba95 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -817,6 +817,7 @@ int get_user_pages(struct task_struct *tsk, struct mm_struct *mm, struct page **pages, struct vm_area_struct **vmas); int get_user_pages_fast(unsigned long start, int nr_pages, int write, struct page **pages); +struct page *get_dump_page(unsigned long addr); extern int try_to_release_page(struct page * page, gfp_t gfp_mask); extern void do_invalidatepage(struct page *page, unsigned long offset); diff --git a/mm/memory.c b/mm/memory.c index 4b5200f5f35..a8430ff1383 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -1423,9 +1423,40 @@ int get_user_pages(struct task_struct *tsk, struct mm_struct *mm, return __get_user_pages(tsk, mm, start, nr_pages, flags, pages, vmas); } - EXPORT_SYMBOL(get_user_pages); +/** + * get_dump_page() - pin user page in memory while writing it to core dump + * @addr: user address + * + * Returns struct page pointer of user page pinned for dump, + * to be freed afterwards by page_cache_release() or put_page(). + * + * Returns NULL on any kind of failure - a hole must then be inserted into + * the corefile, to preserve alignment with its headers; and also returns + * NULL wherever the ZERO_PAGE, or an anonymous pte_none, has been found - + * allowing a hole to be left in the corefile to save diskspace. + * + * Called without mmap_sem, but after all other threads have been killed. + */ +#ifdef CONFIG_ELF_CORE +struct page *get_dump_page(unsigned long addr) +{ + struct vm_area_struct *vma; + struct page *page; + + if (__get_user_pages(current, current->mm, addr, 1, + GUP_FLAGS_FORCE, &page, &vma) < 1) + return NULL; + if (page == ZERO_PAGE(0)) { + page_cache_release(page); + return NULL; + } + flush_cache_page(vma, addr, page_to_pfn(page)); + return page; +} +#endif /* CONFIG_ELF_CORE */ + pte_t *get_locked_pte(struct mm_struct *mm, unsigned long addr, spinlock_t **ptl) { -- cgit v1.2.3-70-g09d2 From 8e4b9a60718970bbc02dfd3abd0b956ab65af231 Mon Sep 17 00:00:00 2001 From: Hugh Dickins Date: Mon, 21 Sep 2009 17:03:26 -0700 Subject: mm: FOLL_DUMP replace FOLL_ANON The "FOLL_ANON optimization" and its use_zero_page() test have caused confusion and bugs: why does it test VM_SHARED? for the very good but unsatisfying reason that VMware crashed without. As we look to maybe reinstating anonymous use of the ZERO_PAGE, we need to sort this out. Easily done: it's silly for __get_user_pages() and follow_page() to be guessing whether it's safe to assume that they're being used for a coredump (which can take a shortcut snapshot where other uses must handle a fault) - just tell them with GUP_FLAGS_DUMP and FOLL_DUMP. get_dump_page() doesn't even want a ZERO_PAGE: an error suits fine. Signed-off-by: Hugh Dickins Acked-by: Rik van Riel Acked-by: Mel Gorman Reviewed-by: Minchan Kim Cc: KAMEZAWA Hiroyuki Cc: KOSAKI Motohiro Cc: Nick Piggin Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/mm.h | 2 +- mm/internal.h | 1 + mm/memory.c | 43 ++++++++++++------------------------------- 3 files changed, 14 insertions(+), 32 deletions(-) (limited to 'include') diff --git a/include/linux/mm.h b/include/linux/mm.h index e41795bba95..45ee5b5a343 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -1231,7 +1231,7 @@ struct page *follow_page(struct vm_area_struct *, unsigned long address, #define FOLL_WRITE 0x01 /* check pte is writable */ #define FOLL_TOUCH 0x02 /* mark page accessed */ #define FOLL_GET 0x04 /* do get_page on page */ -#define FOLL_ANON 0x08 /* give ZERO_PAGE if no pgtable */ +#define FOLL_DUMP 0x08 /* give error on hole if it would be zero */ typedef int (*pte_fn_t)(pte_t *pte, pgtable_t token, unsigned long addr, void *data); diff --git a/mm/internal.h b/mm/internal.h index 166765cd58d..d41475078b2 100644 --- a/mm/internal.h +++ b/mm/internal.h @@ -252,6 +252,7 @@ static inline void mminit_validate_memmodel_limits(unsigned long *start_pfn, #define GUP_FLAGS_WRITE 0x01 #define GUP_FLAGS_FORCE 0x02 +#define GUP_FLAGS_DUMP 0x04 int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm, unsigned long start, int len, int flags, diff --git a/mm/memory.c b/mm/memory.c index a8430ff1383..532a55bce6a 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -1174,41 +1174,22 @@ no_page: pte_unmap_unlock(ptep, ptl); if (!pte_none(pte)) return page; - /* Fall through to ZERO_PAGE handling */ + no_page_table: /* * When core dumping an enormous anonymous area that nobody - * has touched so far, we don't want to allocate page tables. + * has touched so far, we don't want to allocate unnecessary pages or + * page tables. Return error instead of NULL to skip handle_mm_fault, + * then get_dump_page() will return NULL to leave a hole in the dump. + * But we can only make this optimization where a hole would surely + * be zero-filled if handle_mm_fault() actually did handle it. */ - if (flags & FOLL_ANON) { - page = ZERO_PAGE(0); - if (flags & FOLL_GET) - get_page(page); - BUG_ON(flags & FOLL_WRITE); - } + if ((flags & FOLL_DUMP) && + (!vma->vm_ops || !vma->vm_ops->fault)) + return ERR_PTR(-EFAULT); return page; } -/* Can we do the FOLL_ANON optimization? */ -static inline int use_zero_page(struct vm_area_struct *vma) -{ - /* - * We don't want to optimize FOLL_ANON for make_pages_present() - * when it tries to page in a VM_LOCKED region. As to VM_SHARED, - * we want to get the page from the page tables to make sure - * that we serialize and update with any other user of that - * mapping. - */ - if (vma->vm_flags & (VM_LOCKED | VM_SHARED)) - return 0; - /* - * And if we have a fault routine, it's not an anonymous region. - */ - return !vma->vm_ops || !vma->vm_ops->fault; -} - - - int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm, unsigned long start, int nr_pages, int flags, struct page **pages, struct vm_area_struct **vmas) @@ -1288,8 +1269,8 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm, foll_flags = FOLL_TOUCH; if (pages) foll_flags |= FOLL_GET; - if (!write && use_zero_page(vma)) - foll_flags |= FOLL_ANON; + if (flags & GUP_FLAGS_DUMP) + foll_flags |= FOLL_DUMP; do { struct page *page; @@ -1446,7 +1427,7 @@ struct page *get_dump_page(unsigned long addr) struct page *page; if (__get_user_pages(current, current->mm, addr, 1, - GUP_FLAGS_FORCE, &page, &vma) < 1) + GUP_FLAGS_FORCE | GUP_FLAGS_DUMP, &page, &vma) < 1) return NULL; if (page == ZERO_PAGE(0)) { page_cache_release(page); -- cgit v1.2.3-70-g09d2 From 2a15efc953b26ad57d7d38b9e6782d57e53b4ab2 Mon Sep 17 00:00:00 2001 From: Hugh Dickins Date: Mon, 21 Sep 2009 17:03:27 -0700 Subject: mm: follow_hugetlb_page flags follow_hugetlb_page() shouldn't be guessing about the coredump case either: pass the foll_flags down to it, instead of just the write bit. Remove that obscure huge_zeropage_ok() test. The decision is easy, though unlike the non-huge case - here vm_ops->fault is always set. But we know that a fault would serve up zeroes, unless there's already a hugetlbfs pagecache page to back the range. (Alternatively, since hugetlb pages aren't swapped out under pressure, you could save more dump space by arguing that a page not yet faulted into this process cannot be relevant to the dump; but that would be more surprising.) Signed-off-by: Hugh Dickins Acked-by: Rik van Riel Cc: KAMEZAWA Hiroyuki Cc: KOSAKI Motohiro Cc: Nick Piggin Cc: Mel Gorman Cc: Minchan Kim Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/hugetlb.h | 4 +++- mm/hugetlb.c | 62 +++++++++++++++++++++++++++++-------------------- mm/memory.c | 14 ++++++----- 3 files changed, 48 insertions(+), 32 deletions(-) (limited to 'include') diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h index 16cdb75a543..e7f0fabfa1c 100644 --- a/include/linux/hugetlb.h +++ b/include/linux/hugetlb.h @@ -24,7 +24,9 @@ int hugetlb_sysctl_handler(struct ctl_table *, int, struct file *, void __user * int hugetlb_overcommit_handler(struct ctl_table *, int, struct file *, void __user *, size_t *, loff_t *); int hugetlb_treat_movable_handler(struct ctl_table *, int, struct file *, void __user *, size_t *, loff_t *); int copy_hugetlb_page_range(struct mm_struct *, struct mm_struct *, struct vm_area_struct *); -int follow_hugetlb_page(struct mm_struct *, struct vm_area_struct *, struct page **, struct vm_area_struct **, unsigned long *, int *, int, int); +int follow_hugetlb_page(struct mm_struct *, struct vm_area_struct *, + struct page **, struct vm_area_struct **, + unsigned long *, int *, int, unsigned int flags); void unmap_hugepage_range(struct vm_area_struct *, unsigned long, unsigned long, struct page *); void __unmap_hugepage_range(struct vm_area_struct *, diff --git a/mm/hugetlb.c b/mm/hugetlb.c index c001f846f17..6b41f70bbc7 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -2016,6 +2016,23 @@ static struct page *hugetlbfs_pagecache_page(struct hstate *h, return find_lock_page(mapping, idx); } +/* Return whether there is a pagecache page to back given address within VMA */ +static bool hugetlbfs_backed(struct hstate *h, + struct vm_area_struct *vma, unsigned long address) +{ + struct address_space *mapping; + pgoff_t idx; + struct page *page; + + mapping = vma->vm_file->f_mapping; + idx = vma_hugecache_offset(h, vma, address); + + page = find_get_page(mapping, idx); + if (page) + put_page(page); + return page != NULL; +} + static int hugetlb_no_page(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long address, pte_t *ptep, unsigned int flags) { @@ -2211,54 +2228,52 @@ follow_huge_pud(struct mm_struct *mm, unsigned long address, return NULL; } -static int huge_zeropage_ok(pte_t *ptep, int write, int shared) -{ - if (!ptep || write || shared) - return 0; - else - return huge_pte_none(huge_ptep_get(ptep)); -} - int follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma, struct page **pages, struct vm_area_struct **vmas, unsigned long *position, int *length, int i, - int write) + unsigned int flags) { unsigned long pfn_offset; unsigned long vaddr = *position; int remainder = *length; struct hstate *h = hstate_vma(vma); - int zeropage_ok = 0; - int shared = vma->vm_flags & VM_SHARED; spin_lock(&mm->page_table_lock); while (vaddr < vma->vm_end && remainder) { pte_t *pte; + int absent; struct page *page; /* * Some archs (sparc64, sh*) have multiple pte_ts to - * each hugepage. We have to make * sure we get the + * each hugepage. We have to make sure we get the * first, for the page indexing below to work. */ pte = huge_pte_offset(mm, vaddr & huge_page_mask(h)); - if (huge_zeropage_ok(pte, write, shared)) - zeropage_ok = 1; + absent = !pte || huge_pte_none(huge_ptep_get(pte)); + + /* + * When coredumping, it suits get_dump_page if we just return + * an error if there's a hole and no huge pagecache to back it. + */ + if (absent && + ((flags & FOLL_DUMP) && !hugetlbfs_backed(h, vma, vaddr))) { + remainder = 0; + break; + } - if (!pte || - (huge_pte_none(huge_ptep_get(pte)) && !zeropage_ok) || - (write && !pte_write(huge_ptep_get(pte)))) { + if (absent || + ((flags & FOLL_WRITE) && !pte_write(huge_ptep_get(pte)))) { int ret; spin_unlock(&mm->page_table_lock); - ret = hugetlb_fault(mm, vma, vaddr, write); + ret = hugetlb_fault(mm, vma, vaddr, + (flags & FOLL_WRITE) ? FAULT_FLAG_WRITE : 0); spin_lock(&mm->page_table_lock); if (!(ret & VM_FAULT_ERROR)) continue; remainder = 0; - if (!i) - i = -EFAULT; break; } @@ -2266,10 +2281,7 @@ int follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma, page = pte_page(huge_ptep_get(pte)); same_page: if (pages) { - if (zeropage_ok) - pages[i] = ZERO_PAGE(0); - else - pages[i] = mem_map_offset(page, pfn_offset); + pages[i] = mem_map_offset(page, pfn_offset); get_page(pages[i]); } @@ -2293,7 +2305,7 @@ same_page: *length = remainder; *position = vaddr; - return i; + return i ? i : -EFAULT; } void hugetlb_change_protection(struct vm_area_struct *vma, diff --git a/mm/memory.c b/mm/memory.c index 532a55bce6a..6359a4f80c4 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -1260,17 +1260,19 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm, !(vm_flags & vma->vm_flags)) return i ? : -EFAULT; - if (is_vm_hugetlb_page(vma)) { - i = follow_hugetlb_page(mm, vma, pages, vmas, - &start, &nr_pages, i, write); - continue; - } - foll_flags = FOLL_TOUCH; if (pages) foll_flags |= FOLL_GET; if (flags & GUP_FLAGS_DUMP) foll_flags |= FOLL_DUMP; + if (write) + foll_flags |= FOLL_WRITE; + + if (is_vm_hugetlb_page(vma)) { + i = follow_hugetlb_page(mm, vma, pages, vmas, + &start, &nr_pages, i, foll_flags); + continue; + } do { struct page *page; -- cgit v1.2.3-70-g09d2 From 58fa879e1e640a1856f736b418984ebeccee1c95 Mon Sep 17 00:00:00 2001 From: Hugh Dickins Date: Mon, 21 Sep 2009 17:03:31 -0700 Subject: mm: FOLL flags for GUP flags __get_user_pages() has been taking its own GUP flags, then processing them into FOLL flags for follow_page(). Though oddly named, the FOLL flags are more widely used, so pass them to __get_user_pages() now. Sorry, VM flags, VM_FAULT flags and FAULT_FLAGs are still distinct. (The patch to __get_user_pages() looks peculiar, with both gup_flags and foll_flags: the gup_flags remain constant; but as before there's an exceptional case, out of scope of the patch, in which foll_flags per page have FOLL_WRITE masked off.) Signed-off-by: Hugh Dickins Cc: Rik van Riel Cc: KAMEZAWA Hiroyuki Cc: KOSAKI Motohiro Cc: Nick Piggin Cc: Mel Gorman Cc: Minchan Kim Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/mm.h | 1 + mm/internal.h | 6 +----- mm/memory.c | 44 +++++++++++++++++++------------------------- mm/mlock.c | 4 ++-- mm/nommu.c | 16 ++++++++-------- 5 files changed, 31 insertions(+), 40 deletions(-) (limited to 'include') diff --git a/include/linux/mm.h b/include/linux/mm.h index 45ee5b5a343..5409eced7aa 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -1232,6 +1232,7 @@ struct page *follow_page(struct vm_area_struct *, unsigned long address, #define FOLL_TOUCH 0x02 /* mark page accessed */ #define FOLL_GET 0x04 /* do get_page on page */ #define FOLL_DUMP 0x08 /* give error on hole if it would be zero */ +#define FOLL_FORCE 0x10 /* get_user_pages read/write w/o permission */ typedef int (*pte_fn_t)(pte_t *pte, pgtable_t token, unsigned long addr, void *data); diff --git a/mm/internal.h b/mm/internal.h index d41475078b2..75596574911 100644 --- a/mm/internal.h +++ b/mm/internal.h @@ -250,12 +250,8 @@ static inline void mminit_validate_memmodel_limits(unsigned long *start_pfn, } #endif /* CONFIG_SPARSEMEM */ -#define GUP_FLAGS_WRITE 0x01 -#define GUP_FLAGS_FORCE 0x02 -#define GUP_FLAGS_DUMP 0x04 - int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm, - unsigned long start, int len, int flags, + unsigned long start, int len, unsigned int foll_flags, struct page **pages, struct vm_area_struct **vmas); #define ZONE_RECLAIM_NOSCAN -2 diff --git a/mm/memory.c b/mm/memory.c index c8b5b9435a9..5c694f2b9c1 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -1209,27 +1209,29 @@ no_page_table: } int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm, - unsigned long start, int nr_pages, int flags, + unsigned long start, int nr_pages, unsigned int gup_flags, struct page **pages, struct vm_area_struct **vmas) { int i; - unsigned int vm_flags = 0; - int write = !!(flags & GUP_FLAGS_WRITE); - int force = !!(flags & GUP_FLAGS_FORCE); + unsigned long vm_flags; if (nr_pages <= 0) return 0; + + VM_BUG_ON(!!pages != !!(gup_flags & FOLL_GET)); + /* * Require read or write permissions. - * If 'force' is set, we only require the "MAY" flags. + * If FOLL_FORCE is set, we only require the "MAY" flags. */ - vm_flags = write ? (VM_WRITE | VM_MAYWRITE) : (VM_READ | VM_MAYREAD); - vm_flags &= force ? (VM_MAYREAD | VM_MAYWRITE) : (VM_READ | VM_WRITE); + vm_flags = (gup_flags & FOLL_WRITE) ? + (VM_WRITE | VM_MAYWRITE) : (VM_READ | VM_MAYREAD); + vm_flags &= (gup_flags & FOLL_FORCE) ? + (VM_MAYREAD | VM_MAYWRITE) : (VM_READ | VM_WRITE); i = 0; do { struct vm_area_struct *vma; - unsigned int foll_flags; vma = find_extend_vma(mm, start); if (!vma && in_gate_area(tsk, start)) { @@ -1241,7 +1243,7 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm, pte_t *pte; /* user gate pages are read-only */ - if (write) + if (gup_flags & FOLL_WRITE) return i ? : -EFAULT; if (pg > TASK_SIZE) pgd = pgd_offset_k(pg); @@ -1278,22 +1280,15 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm, !(vm_flags & vma->vm_flags)) return i ? : -EFAULT; - foll_flags = FOLL_TOUCH; - if (pages) - foll_flags |= FOLL_GET; - if (flags & GUP_FLAGS_DUMP) - foll_flags |= FOLL_DUMP; - if (write) - foll_flags |= FOLL_WRITE; - if (is_vm_hugetlb_page(vma)) { i = follow_hugetlb_page(mm, vma, pages, vmas, - &start, &nr_pages, i, foll_flags); + &start, &nr_pages, i, gup_flags); continue; } do { struct page *page; + unsigned int foll_flags = gup_flags; /* * If we have a pending SIGKILL, don't keep faulting @@ -1302,9 +1297,6 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm, if (unlikely(fatal_signal_pending(current))) return i ? i : -ERESTARTSYS; - if (write) - foll_flags |= FOLL_WRITE; - cond_resched(); while (!(page = follow_page(vma, start, foll_flags))) { int ret; @@ -1415,12 +1407,14 @@ int get_user_pages(struct task_struct *tsk, struct mm_struct *mm, unsigned long start, int nr_pages, int write, int force, struct page **pages, struct vm_area_struct **vmas) { - int flags = 0; + int flags = FOLL_TOUCH; + if (pages) + flags |= FOLL_GET; if (write) - flags |= GUP_FLAGS_WRITE; + flags |= FOLL_WRITE; if (force) - flags |= GUP_FLAGS_FORCE; + flags |= FOLL_FORCE; return __get_user_pages(tsk, mm, start, nr_pages, flags, pages, vmas); } @@ -1447,7 +1441,7 @@ struct page *get_dump_page(unsigned long addr) struct page *page; if (__get_user_pages(current, current->mm, addr, 1, - GUP_FLAGS_FORCE | GUP_FLAGS_DUMP, &page, &vma) < 1) + FOLL_FORCE | FOLL_DUMP | FOLL_GET, &page, &vma) < 1) return NULL; if (page == ZERO_PAGE(0)) { page_cache_release(page); diff --git a/mm/mlock.c b/mm/mlock.c index e13918d4fc4..22041aa9f5c 100644 --- a/mm/mlock.c +++ b/mm/mlock.c @@ -166,9 +166,9 @@ static long __mlock_vma_pages_range(struct vm_area_struct *vma, VM_BUG_ON(end > vma->vm_end); VM_BUG_ON(!rwsem_is_locked(&mm->mmap_sem)); - gup_flags = 0; + gup_flags = FOLL_TOUCH | FOLL_GET; if (vma->vm_flags & VM_WRITE) - gup_flags = GUP_FLAGS_WRITE; + gup_flags |= FOLL_WRITE; while (nr_pages > 0) { int i; diff --git a/mm/nommu.c b/mm/nommu.c index 386443e9d2c..2d02ca17ce1 100644 --- a/mm/nommu.c +++ b/mm/nommu.c @@ -168,20 +168,20 @@ unsigned int kobjsize(const void *objp) } int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm, - unsigned long start, int nr_pages, int flags, + unsigned long start, int nr_pages, int foll_flags, struct page **pages, struct vm_area_struct **vmas) { struct vm_area_struct *vma; unsigned long vm_flags; int i; - int write = !!(flags & GUP_FLAGS_WRITE); - int force = !!(flags & GUP_FLAGS_FORCE); /* calculate required read or write permissions. - * - if 'force' is set, we only require the "MAY" flags. + * If FOLL_FORCE is set, we only require the "MAY" flags. */ - vm_flags = write ? (VM_WRITE | VM_MAYWRITE) : (VM_READ | VM_MAYREAD); - vm_flags &= force ? (VM_MAYREAD | VM_MAYWRITE) : (VM_READ | VM_WRITE); + vm_flags = (foll_flags & FOLL_WRITE) ? + (VM_WRITE | VM_MAYWRITE) : (VM_READ | VM_MAYREAD); + vm_flags &= (foll_flags & FOLL_FORCE) ? + (VM_MAYREAD | VM_MAYWRITE) : (VM_READ | VM_WRITE); for (i = 0; i < nr_pages; i++) { vma = find_vma(mm, start); @@ -223,9 +223,9 @@ int get_user_pages(struct task_struct *tsk, struct mm_struct *mm, int flags = 0; if (write) - flags |= GUP_FLAGS_WRITE; + flags |= FOLL_WRITE; if (force) - flags |= GUP_FLAGS_FORCE; + flags |= FOLL_FORCE; return __get_user_pages(tsk, mm, start, nr_pages, flags, pages, vmas); } -- cgit v1.2.3-70-g09d2 From 3f96b79ad96263cc0ece7bb340cddf9b2ddfb1b3 Mon Sep 17 00:00:00 2001 From: Hugh Dickins Date: Mon, 21 Sep 2009 17:03:37 -0700 Subject: tmpfs: depend on shmem CONFIG_SHMEM off gives you (ramfs masquerading as) tmpfs, even when CONFIG_TMPFS is off: that's a little anomalous, and I'd intended to make more sense of it by removing CONFIG_TMPFS altogether, always enabling its code when CONFIG_SHMEM; but so many defconfigs have CONFIG_SHMEM on CONFIG_TMPFS off that we'd better leave that as is. But there is no point in asking for CONFIG_TMPFS if CONFIG_SHMEM is off: make TMPFS depend on SHMEM, which also prevents TMPFS_POSIX_ACL shmem_acl.o being pointlessly built into the kernel when SHMEM is off. And a selfish change, to prevent the world from being rebuilt when I switch between CONFIG_SHMEM on and off: the only CONFIG_SHMEM in the header files is mm.h shmem_lock() - give that a shmem.c stub instead. Signed-off-by: Hugh Dickins Acked-by: Matt Mackall Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- fs/Kconfig | 1 + include/linux/mm.h | 11 +---------- mm/shmem.c | 5 +++++ 3 files changed, 7 insertions(+), 10 deletions(-) (limited to 'include') diff --git a/fs/Kconfig b/fs/Kconfig index 455aa207e67..d4bf8caad8d 100644 --- a/fs/Kconfig +++ b/fs/Kconfig @@ -109,6 +109,7 @@ source "fs/sysfs/Kconfig" config TMPFS bool "Virtual memory file system support (former shm fs)" + depends on SHMEM help Tmpfs is a file system which keeps all files in virtual memory. diff --git a/include/linux/mm.h b/include/linux/mm.h index 5409eced7aa..5946e2ff9fe 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -702,17 +702,8 @@ extern void pagefault_out_of_memory(void); extern void show_free_areas(void); -#ifdef CONFIG_SHMEM -extern int shmem_lock(struct file *file, int lock, struct user_struct *user); -#else -static inline int shmem_lock(struct file *file, int lock, - struct user_struct *user) -{ - return 0; -} -#endif +int shmem_lock(struct file *file, int lock, struct user_struct *user); struct file *shmem_file_setup(const char *name, loff_t size, unsigned long flags); - int shmem_zero_setup(struct vm_area_struct *); #ifndef CONFIG_MMU diff --git a/mm/shmem.c b/mm/shmem.c index 25ba75d0258..b4b56fd1e77 100644 --- a/mm/shmem.c +++ b/mm/shmem.c @@ -2593,6 +2593,11 @@ int shmem_unuse(swp_entry_t entry, struct page *page) return 0; } +int shmem_lock(struct file *file, int lock, struct user_struct *user) +{ + return 0; +} + #define shmem_vm_ops generic_file_vm_ops #define shmem_file_operations ramfs_file_operations #define shmem_get_inode(sb, mode, dev, flags) ramfs_get_inode(sb, mode, dev) -- cgit v1.2.3-70-g09d2 From 6bfde05bf5c9682e255c6a2c669dc80f91af6296 Mon Sep 17 00:00:00 2001 From: Eric B Munson Date: Mon, 21 Sep 2009 17:03:43 -0700 Subject: hugetlbfs: allow the creation of files suitable for MAP_PRIVATE on the vfs internal mount This patchset adds a flag to mmap that allows the user to request that an anonymous mapping be backed with huge pages. This mapping will borrow functionality from the huge page shm code to create a file on the kernel internal mount and use it to approximate an anonymous mapping. The MAP_HUGETLB flag is a modifier to MAP_ANONYMOUS and will not work without both flags being preset. A new flag is necessary because there is no other way to hook into huge pages without creating a file on a hugetlbfs mount which wouldn't be MAP_ANONYMOUS. To userspace, this mapping will behave just like an anonymous mapping because the file is not accessible outside of the kernel. This patchset is meant to simplify the programming model. Presently there is a large chunk of boiler platecode, contained in libhugetlbfs, required to create private, hugepage backed mappings. This patch set would allow use of hugepages without linking to libhugetlbfs or having hugetblfs mounted. Unification of the VM code would provide these same benefits, but it has been resisted each time that it has been suggested for several reasons: it would break PAGE_SIZE assumptions across the kernel, it makes page-table abstractions really expensive, and it does not provide any benefit on architectures that do not support huge pages, incurring fast path penalties without providing any benefit on these architectures. This patch: There are two means of creating mappings backed by huge pages: 1. mmap() a file created on hugetlbfs 2. Use shm which creates a file on an internal mount which essentially maps it MAP_SHARED The internal mount is only used for shared mappings but there is very little that stops it being used for private mappings. This patch extends hugetlbfs_file_setup() to deal with the creation of files that will be mapped MAP_PRIVATE on the internal hugetlbfs mount. This extended API is used in a subsequent patch to implement the MAP_HUGETLB mmap() flag. Signed-off-by: Eric Munson Acked-by: David Rientjes Cc: Mel Gorman Cc: Adam Litke Cc: David Gibson Cc: Lee Schermerhorn Cc: Nick Piggin Cc: Hugh Dickins Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- fs/hugetlbfs/inode.c | 21 +++++++++++++++++---- include/linux/hugetlb.h | 12 ++++++++++-- ipc/shm.c | 2 +- 3 files changed, 28 insertions(+), 7 deletions(-) (limited to 'include') diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c index a93b885311d..06b7c2623f9 100644 --- a/fs/hugetlbfs/inode.c +++ b/fs/hugetlbfs/inode.c @@ -507,6 +507,13 @@ static struct inode *hugetlbfs_get_inode(struct super_block *sb, uid_t uid, inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME; INIT_LIST_HEAD(&inode->i_mapping->private_list); info = HUGETLBFS_I(inode); + /* + * The policy is initialized here even if we are creating a + * private inode because initialization simply creates an + * an empty rb tree and calls spin_lock_init(), later when we + * call mpol_free_shared_policy() it will just return because + * the rb tree will still be empty. + */ mpol_shared_policy_init(&info->policy, NULL); switch (mode & S_IFMT) { default: @@ -931,13 +938,19 @@ static struct file_system_type hugetlbfs_fs_type = { static struct vfsmount *hugetlbfs_vfsmount; -static int can_do_hugetlb_shm(void) +static int can_do_hugetlb_shm(int creat_flags) { - return capable(CAP_IPC_LOCK) || in_group_p(sysctl_hugetlb_shm_group); + if (creat_flags != HUGETLB_SHMFS_INODE) + return 0; + if (capable(CAP_IPC_LOCK)) + return 1; + if (in_group_p(sysctl_hugetlb_shm_group)) + return 1; + return 0; } struct file *hugetlb_file_setup(const char *name, size_t size, int acctflag, - struct user_struct **user) + struct user_struct **user, int creat_flags) { int error = -ENOMEM; struct file *file; @@ -949,7 +962,7 @@ struct file *hugetlb_file_setup(const char *name, size_t size, int acctflag, if (!hugetlbfs_vfsmount) return ERR_PTR(-ENOENT); - if (!can_do_hugetlb_shm()) { + if (!can_do_hugetlb_shm(creat_flags)) { *user = current_user(); if (user_shm_lock(size, *user)) { WARN_ONCE(1, diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h index e7f0fabfa1c..f6505ad8665 100644 --- a/include/linux/hugetlb.h +++ b/include/linux/hugetlb.h @@ -112,6 +112,14 @@ static inline void hugetlb_report_meminfo(struct seq_file *m) #endif /* !CONFIG_HUGETLB_PAGE */ +enum { + /* + * The file will be used as an shm file so shmfs accounting rules + * apply + */ + HUGETLB_SHMFS_INODE = 1, +}; + #ifdef CONFIG_HUGETLBFS struct hugetlbfs_config { uid_t uid; @@ -150,7 +158,7 @@ static inline struct hugetlbfs_sb_info *HUGETLBFS_SB(struct super_block *sb) extern const struct file_operations hugetlbfs_file_operations; extern struct vm_operations_struct hugetlb_vm_ops; struct file *hugetlb_file_setup(const char *name, size_t size, int acct, - struct user_struct **user); + struct user_struct **user, int creat_flags); int hugetlb_get_quota(struct address_space *mapping, long delta); void hugetlb_put_quota(struct address_space *mapping, long delta); @@ -172,7 +180,7 @@ static inline void set_file_hugepages(struct file *file) #define is_file_hugepages(file) 0 #define set_file_hugepages(file) BUG() -#define hugetlb_file_setup(name,size,acct,user) ERR_PTR(-ENOSYS) +#define hugetlb_file_setup(name,size,acct,user,creat) ERR_PTR(-ENOSYS) #endif /* !CONFIG_HUGETLBFS */ diff --git a/ipc/shm.c b/ipc/shm.c index 30162a59621..9eb1488b543 100644 --- a/ipc/shm.c +++ b/ipc/shm.c @@ -370,7 +370,7 @@ static int newseg(struct ipc_namespace *ns, struct ipc_params *params) if (shmflg & SHM_NORESERVE) acctflag = VM_NORESERVE; file = hugetlb_file_setup(name, size, acctflag, - &shp->mlock_user); + &shp->mlock_user, HUGETLB_SHMFS_INODE); } else { /* * Do not allow no accounting for OVERCOMMIT_NEVER, even -- cgit v1.2.3-70-g09d2 From 90f72aa58bbf076b68e289fbd71eb829bc505923 Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Mon, 21 Sep 2009 17:03:45 -0700 Subject: mm: add MAP_HUGETLB for mmaping pseudo-anonymous huge page regions Add a flag for mmap that will be used to request a huge page region that will look like anonymous memory to user space. This is accomplished by using a file on the internal vfsmount. MAP_HUGETLB is a modifier of MAP_ANONYMOUS and so must be specified with it. The region will behave the same as a MAP_ANONYMOUS region using small pages. The patch also adds the MAP_STACK flag, which was previously defined only on some architectures but not on others. Since MAP_STACK is meant to be a hint only, architectures can define it without assigning a specific meaning to it. Signed-off-by: Arnd Bergmann Cc: Eric B Munson Cc: Hugh Dickins Cc: David Rientjes Cc: Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/alpha/include/asm/mman.h | 2 ++ arch/arm/include/asm/mman.h | 2 ++ arch/avr32/include/asm/mman.h | 2 ++ arch/cris/include/asm/mman.h | 2 ++ arch/frv/include/asm/mman.h | 2 ++ arch/h8300/include/asm/mman.h | 2 ++ arch/ia64/include/asm/mman.h | 2 ++ arch/m32r/include/asm/mman.h | 2 ++ arch/m68k/include/asm/mman.h | 2 ++ arch/mips/include/asm/mman.h | 2 ++ arch/mn10300/include/asm/mman.h | 2 ++ arch/parisc/include/asm/mman.h | 2 ++ arch/powerpc/include/asm/mman.h | 2 ++ arch/s390/include/asm/mman.h | 2 ++ arch/sparc/include/asm/mman.h | 2 ++ arch/xtensa/include/asm/mman.h | 2 ++ include/asm-generic/mman.h | 1 + 17 files changed, 33 insertions(+) (limited to 'include') diff --git a/arch/alpha/include/asm/mman.h b/arch/alpha/include/asm/mman.h index c77c55756a7..99c56d47879 100644 --- a/arch/alpha/include/asm/mman.h +++ b/arch/alpha/include/asm/mman.h @@ -28,6 +28,8 @@ #define MAP_NORESERVE 0x10000 /* don't check for reservations */ #define MAP_POPULATE 0x20000 /* populate (prefault) pagetables */ #define MAP_NONBLOCK 0x40000 /* do not block on IO */ +#define MAP_STACK 0x80000 /* give out an address that is best suited for process/thread stacks */ +#define MAP_HUGETLB 0x100000 /* create a huge page mapping */ #define MS_ASYNC 1 /* sync memory asynchronously */ #define MS_SYNC 2 /* synchronous memory sync */ diff --git a/arch/arm/include/asm/mman.h b/arch/arm/include/asm/mman.h index fc26976d8e3..6464d471bc7 100644 --- a/arch/arm/include/asm/mman.h +++ b/arch/arm/include/asm/mman.h @@ -10,6 +10,8 @@ #define MAP_NORESERVE 0x4000 /* don't check for reservations */ #define MAP_POPULATE 0x8000 /* populate (prefault) page tables */ #define MAP_NONBLOCK 0x10000 /* do not block on IO */ +#define MAP_STACK 0x20000 /* give out an address that is best suited for process/thread stacks */ +#define MAP_HUGETLB 0x40000 /* create a huge page mapping */ #define MCL_CURRENT 1 /* lock all current mappings */ #define MCL_FUTURE 2 /* lock all future mappings */ diff --git a/arch/avr32/include/asm/mman.h b/arch/avr32/include/asm/mman.h index 9a92b15f6a6..38cea1b597c 100644 --- a/arch/avr32/include/asm/mman.h +++ b/arch/avr32/include/asm/mman.h @@ -10,6 +10,8 @@ #define MAP_NORESERVE 0x4000 /* don't check for reservations */ #define MAP_POPULATE 0x8000 /* populate (prefault) page tables */ #define MAP_NONBLOCK 0x10000 /* do not block on IO */ +#define MAP_STACK 0x20000 /* give out an address that is best suited for process/thread stacks */ +#define MAP_HUGETLB 0x40000 /* create a huge page mapping */ #define MCL_CURRENT 1 /* lock all current mappings */ #define MCL_FUTURE 2 /* lock all future mappings */ diff --git a/arch/cris/include/asm/mman.h b/arch/cris/include/asm/mman.h index b7f0afba3ce..de6b903b22c 100644 --- a/arch/cris/include/asm/mman.h +++ b/arch/cris/include/asm/mman.h @@ -12,6 +12,8 @@ #define MAP_NORESERVE 0x4000 /* don't check for reservations */ #define MAP_POPULATE 0x8000 /* populate (prefault) pagetables */ #define MAP_NONBLOCK 0x10000 /* do not block on IO */ +#define MAP_STACK 0x20000 /* give out an address that is best suited for process/thread stacks */ +#define MAP_HUGETLB 0x40000 /* create a huge page mapping */ #define MCL_CURRENT 1 /* lock all current mappings */ #define MCL_FUTURE 2 /* lock all future mappings */ diff --git a/arch/frv/include/asm/mman.h b/arch/frv/include/asm/mman.h index 58c1d11e2ac..1939343322b 100644 --- a/arch/frv/include/asm/mman.h +++ b/arch/frv/include/asm/mman.h @@ -10,6 +10,8 @@ #define MAP_NORESERVE 0x4000 /* don't check for reservations */ #define MAP_POPULATE 0x8000 /* populate (prefault) pagetables */ #define MAP_NONBLOCK 0x10000 /* do not block on IO */ +#define MAP_STACK 0x20000 /* give out an address that is best suited for process/thread stacks */ +#define MAP_HUGETLB 0x40000 /* create a huge page mapping */ #define MCL_CURRENT 1 /* lock all current mappings */ #define MCL_FUTURE 2 /* lock all future mappings */ diff --git a/arch/h8300/include/asm/mman.h b/arch/h8300/include/asm/mman.h index cf35f0a6f12..eacacd04032 100644 --- a/arch/h8300/include/asm/mman.h +++ b/arch/h8300/include/asm/mman.h @@ -10,6 +10,8 @@ #define MAP_NORESERVE 0x4000 /* don't check for reservations */ #define MAP_POPULATE 0x8000 /* populate (prefault) pagetables */ #define MAP_NONBLOCK 0x10000 /* do not block on IO */ +#define MAP_STACK 0x20000 /* give out an address that is best suited for process/thread stacks */ +#define MAP_HUGETLB 0x40000 /* create a huge page mapping */ #define MCL_CURRENT 1 /* lock all current mappings */ #define MCL_FUTURE 2 /* lock all future mappings */ diff --git a/arch/ia64/include/asm/mman.h b/arch/ia64/include/asm/mman.h index 48cf8b98a0b..cf55884e7f3 100644 --- a/arch/ia64/include/asm/mman.h +++ b/arch/ia64/include/asm/mman.h @@ -18,6 +18,8 @@ #define MAP_NORESERVE 0x04000 /* don't check for reservations */ #define MAP_POPULATE 0x08000 /* populate (prefault) pagetables */ #define MAP_NONBLOCK 0x10000 /* do not block on IO */ +#define MAP_STACK 0x20000 /* give out an address that is best suited for process/thread stacks */ +#define MAP_HUGETLB 0x40000 /* create a huge page mapping */ #define MCL_CURRENT 1 /* lock all current mappings */ #define MCL_FUTURE 2 /* lock all future mappings */ diff --git a/arch/m32r/include/asm/mman.h b/arch/m32r/include/asm/mman.h index 04a5f40aa40..d191089808f 100644 --- a/arch/m32r/include/asm/mman.h +++ b/arch/m32r/include/asm/mman.h @@ -10,6 +10,8 @@ #define MAP_NORESERVE 0x4000 /* don't check for reservations */ #define MAP_POPULATE 0x8000 /* populate (prefault) pagetables */ #define MAP_NONBLOCK 0x10000 /* do not block on IO */ +#define MAP_STACK 0x20000 /* give out an address that is best suited for process/thread stacks */ +#define MAP_HUGETLB 0x40000 /* create a huge page mapping */ #define MCL_CURRENT 1 /* lock all current mappings */ #define MCL_FUTURE 2 /* lock all future mappings */ diff --git a/arch/m68k/include/asm/mman.h b/arch/m68k/include/asm/mman.h index 9f5c4c4b3c7..c421fef55f5 100644 --- a/arch/m68k/include/asm/mman.h +++ b/arch/m68k/include/asm/mman.h @@ -10,6 +10,8 @@ #define MAP_NORESERVE 0x4000 /* don't check for reservations */ #define MAP_POPULATE 0x8000 /* populate (prefault) pagetables */ #define MAP_NONBLOCK 0x10000 /* do not block on IO */ +#define MAP_STACK 0x20000 /* give out an address that is best suited for process/thread stacks */ +#define MAP_HUGETLB 0x40000 /* create a huge page mapping */ #define MCL_CURRENT 1 /* lock all current mappings */ #define MCL_FUTURE 2 /* lock all future mappings */ diff --git a/arch/mips/include/asm/mman.h b/arch/mips/include/asm/mman.h index f15554d1518..a2250f390a2 100644 --- a/arch/mips/include/asm/mman.h +++ b/arch/mips/include/asm/mman.h @@ -46,6 +46,8 @@ #define MAP_LOCKED 0x8000 /* pages are locked */ #define MAP_POPULATE 0x10000 /* populate (prefault) pagetables */ #define MAP_NONBLOCK 0x20000 /* do not block on IO */ +#define MAP_STACK 0x40000 /* give out an address that is best suited for process/thread stacks */ +#define MAP_HUGETLB 0x80000 /* create a huge page mapping */ /* * Flags for msync diff --git a/arch/mn10300/include/asm/mman.h b/arch/mn10300/include/asm/mman.h index d04fac1da5a..94611c356bb 100644 --- a/arch/mn10300/include/asm/mman.h +++ b/arch/mn10300/include/asm/mman.h @@ -21,6 +21,8 @@ #define MAP_NORESERVE 0x4000 /* don't check for reservations */ #define MAP_POPULATE 0x8000 /* populate (prefault) pagetables */ #define MAP_NONBLOCK 0x10000 /* do not block on IO */ +#define MAP_STACK 0x20000 /* give out an address that is best suited for process/thread stacks */ +#define MAP_HUGETLB 0x40000 /* create a huge page mapping */ #define MCL_CURRENT 1 /* lock all current mappings */ #define MCL_FUTURE 2 /* lock all future mappings */ diff --git a/arch/parisc/include/asm/mman.h b/arch/parisc/include/asm/mman.h index a12d9d43f50..9749c8afe83 100644 --- a/arch/parisc/include/asm/mman.h +++ b/arch/parisc/include/asm/mman.h @@ -22,6 +22,8 @@ #define MAP_GROWSDOWN 0x8000 /* stack-like segment */ #define MAP_POPULATE 0x10000 /* populate (prefault) pagetables */ #define MAP_NONBLOCK 0x20000 /* do not block on IO */ +#define MAP_STACK 0x40000 /* give out an address that is best suited for process/thread stacks */ +#define MAP_HUGETLB 0x80000 /* create a huge page mapping */ #define MS_SYNC 1 /* synchronous memory sync */ #define MS_ASYNC 2 /* sync memory asynchronously */ diff --git a/arch/powerpc/include/asm/mman.h b/arch/powerpc/include/asm/mman.h index 7b1c49811a2..d4a7f645c5d 100644 --- a/arch/powerpc/include/asm/mman.h +++ b/arch/powerpc/include/asm/mman.h @@ -25,6 +25,8 @@ #define MAP_POPULATE 0x8000 /* populate (prefault) pagetables */ #define MAP_NONBLOCK 0x10000 /* do not block on IO */ +#define MAP_STACK 0x20000 /* give out an address that is best suited for process/thread stacks */ +#define MAP_HUGETLB 0x40000 /* create a huge page mapping */ #ifdef __KERNEL__ #ifdef CONFIG_PPC64 diff --git a/arch/s390/include/asm/mman.h b/arch/s390/include/asm/mman.h index f63fe7b431e..22714ca181a 100644 --- a/arch/s390/include/asm/mman.h +++ b/arch/s390/include/asm/mman.h @@ -18,6 +18,8 @@ #define MAP_NORESERVE 0x4000 /* don't check for reservations */ #define MAP_POPULATE 0x8000 /* populate (prefault) pagetables */ #define MAP_NONBLOCK 0x10000 /* do not block on IO */ +#define MAP_STACK 0x20000 /* give out an address that is best suited for process/thread stacks */ +#define MAP_HUGETLB 0x40000 /* create a huge page mapping */ #define MCL_CURRENT 1 /* lock all current mappings */ #define MCL_FUTURE 2 /* lock all future mappings */ diff --git a/arch/sparc/include/asm/mman.h b/arch/sparc/include/asm/mman.h index 988192e8e95..c3029ad6619 100644 --- a/arch/sparc/include/asm/mman.h +++ b/arch/sparc/include/asm/mman.h @@ -20,6 +20,8 @@ #define MAP_POPULATE 0x8000 /* populate (prefault) pagetables */ #define MAP_NONBLOCK 0x10000 /* do not block on IO */ +#define MAP_STACK 0x20000 /* give out an address that is best suited for process/thread stacks */ +#define MAP_HUGETLB 0x40000 /* create a huge page mapping */ #ifdef __KERNEL__ #ifndef __ASSEMBLY__ diff --git a/arch/xtensa/include/asm/mman.h b/arch/xtensa/include/asm/mman.h index 6e55b4d1f9c..fca4db425f6 100644 --- a/arch/xtensa/include/asm/mman.h +++ b/arch/xtensa/include/asm/mman.h @@ -53,6 +53,8 @@ #define MAP_LOCKED 0x8000 /* pages are locked */ #define MAP_POPULATE 0x10000 /* populate (prefault) pagetables */ #define MAP_NONBLOCK 0x20000 /* do not block on IO */ +#define MAP_STACK 0x40000 /* give out an address that is best suited for process/thread stacks */ +#define MAP_HUGETLB 0x80000 /* create a huge page mapping */ /* * Flags for msync diff --git a/include/asm-generic/mman.h b/include/asm-generic/mman.h index 7cab4de2bca..32c8bd6a196 100644 --- a/include/asm-generic/mman.h +++ b/include/asm-generic/mman.h @@ -11,6 +11,7 @@ #define MAP_POPULATE 0x8000 /* populate (prefault) pagetables */ #define MAP_NONBLOCK 0x10000 /* do not block on IO */ #define MAP_STACK 0x20000 /* give out an address that is best suited for process/thread stacks */ +#define MAP_HUGETLB 0x40000 /* create a huge page mapping */ #define MCL_CURRENT 1 /* lock all current mappings */ #define MCL_FUTURE 2 /* lock all future mappings */ -- cgit v1.2.3-70-g09d2 From 4e52780d41a741fb4861ae1df2413dd816ec11b1 Mon Sep 17 00:00:00 2001 From: Eric B Munson Date: Mon, 21 Sep 2009 17:03:47 -0700 Subject: hugetlb: add MAP_HUGETLB for mmaping pseudo-anonymous huge page regions Add a flag for mmap that will be used to request a huge page region that will look like anonymous memory to userspace. This is accomplished by using a file on the internal vfsmount. MAP_HUGETLB is a modifier of MAP_ANONYMOUS and so must be specified with it. The region will behave the same as a MAP_ANONYMOUS region using small pages. [akpm@linux-foundation.org: fix arch definitions of MAP_HUGETLB] Signed-off-by: Eric B Munson Acked-by: David Rientjes Cc: Mel Gorman Cc: Adam Litke Cc: David Gibson Cc: Lee Schermerhorn Cc: Nick Piggin Cc: Hugh Dickins Cc: Arnd Bergmann Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/hugetlb.h | 7 +++++++ mm/mmap.c | 18 ++++++++++++++++++ 2 files changed, 25 insertions(+) (limited to 'include') diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h index f6505ad8665..176e7ee73ef 100644 --- a/include/linux/hugetlb.h +++ b/include/linux/hugetlb.h @@ -112,12 +112,19 @@ static inline void hugetlb_report_meminfo(struct seq_file *m) #endif /* !CONFIG_HUGETLB_PAGE */ +#define HUGETLB_ANON_FILE "anon_hugepage" + enum { /* * The file will be used as an shm file so shmfs accounting rules * apply */ HUGETLB_SHMFS_INODE = 1, + /* + * The file is being created on the internal vfs mount and shmfs + * accounting rules do not apply + */ + HUGETLB_ANONHUGE_INODE = 2, }; #ifdef CONFIG_HUGETLBFS diff --git a/mm/mmap.c b/mm/mmap.c index 1aeef6625e6..21d4029a07b 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -949,6 +949,24 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr, if (mm->map_count > sysctl_max_map_count) return -ENOMEM; + if (flags & MAP_HUGETLB) { + struct user_struct *user = NULL; + if (file) + return -EINVAL; + + /* + * VM_NORESERVE is used because the reservations will be + * taken when vm_ops->mmap() is called + * A dummy user value is used because we are not locking + * memory so no accounting is necessary + */ + len = ALIGN(len, huge_page_size(&default_hstate)); + file = hugetlb_file_setup(HUGETLB_ANON_FILE, len, VM_NORESERVE, + &user, HUGETLB_ANONHUGE_INODE); + if (IS_ERR(file)) + return PTR_ERR(file); + } + /* Obtain the address to map to. we verify (or select) it and ensure * that it represents a valid section of the address space. */ -- cgit v1.2.3-70-g09d2 From 3d2d827f5ca5e32816194119d5c980c7e04474a6 Mon Sep 17 00:00:00 2001 From: "Michael S. Tsirkin" Date: Mon, 21 Sep 2009 17:03:51 -0700 Subject: mm: move use_mm/unuse_mm from aio.c to mm/ Anyone who wants to do copy to/from user from a kernel thread, needs use_mm (like what fs/aio has). Move that into mm/, to make reusing and exporting easier down the line, and make aio use it. Next intended user, besides aio, will be vhost-net. Acked-by: Andrea Arcangeli Signed-off-by: Michael S. Tsirkin Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- fs/aio.c | 47 +------------------------------------- include/linux/mmu_context.h | 9 ++++++++ mm/Makefile | 2 +- mm/mmu_context.c | 55 +++++++++++++++++++++++++++++++++++++++++++++ 4 files changed, 66 insertions(+), 47 deletions(-) create mode 100644 include/linux/mmu_context.h create mode 100644 mm/mmu_context.c (limited to 'include') diff --git a/fs/aio.c b/fs/aio.c index d065b2c3273..fc21c23b238 100644 --- a/fs/aio.c +++ b/fs/aio.c @@ -24,6 +24,7 @@ #include #include #include +#include #include #include #include @@ -34,7 +35,6 @@ #include #include -#include #if DEBUG > 1 #define dprintk printk @@ -594,51 +594,6 @@ static struct kioctx *lookup_ioctx(unsigned long ctx_id) return ret; } -/* - * use_mm - * Makes the calling kernel thread take on the specified - * mm context. - * Called by the retry thread execute retries within the - * iocb issuer's mm context, so that copy_from/to_user - * operations work seamlessly for aio. - * (Note: this routine is intended to be called only - * from a kernel thread context) - */ -static void use_mm(struct mm_struct *mm) -{ - struct mm_struct *active_mm; - struct task_struct *tsk = current; - - task_lock(tsk); - active_mm = tsk->active_mm; - atomic_inc(&mm->mm_count); - tsk->mm = mm; - tsk->active_mm = mm; - switch_mm(active_mm, mm, tsk); - task_unlock(tsk); - - mmdrop(active_mm); -} - -/* - * unuse_mm - * Reverses the effect of use_mm, i.e. releases the - * specified mm context which was earlier taken on - * by the calling kernel thread - * (Note: this routine is intended to be called only - * from a kernel thread context) - */ -static void unuse_mm(struct mm_struct *mm) -{ - struct task_struct *tsk = current; - - task_lock(tsk); - tsk->mm = NULL; - /* active_mm is still 'mm' */ - enter_lazy_tlb(mm, tsk); - task_unlock(tsk); -} - /* * Queue up a kiocb to be retried. Assumes that the kiocb * has already been marked as kicked, and places it on diff --git a/include/linux/mmu_context.h b/include/linux/mmu_context.h new file mode 100644 index 00000000000..70fffeba749 --- /dev/null +++ b/include/linux/mmu_context.h @@ -0,0 +1,9 @@ +#ifndef _LINUX_MMU_CONTEXT_H +#define _LINUX_MMU_CONTEXT_H + +struct mm_struct; + +void use_mm(struct mm_struct *mm); +void unuse_mm(struct mm_struct *mm); + +#endif diff --git a/mm/Makefile b/mm/Makefile index a63bf59a0c7..728a9fde49d 100644 --- a/mm/Makefile +++ b/mm/Makefile @@ -11,7 +11,7 @@ obj-y := bootmem.o filemap.o mempool.o oom_kill.o fadvise.o \ maccess.o page_alloc.o page-writeback.o \ readahead.o swap.o truncate.o vmscan.o shmem.o \ prio_tree.o util.o mmzone.o vmstat.o backing-dev.o \ - page_isolation.o mm_init.o $(mmu-y) + page_isolation.o mm_init.o mmu_context.o $(mmu-y) obj-y += init-mm.o obj-$(CONFIG_PROC_PAGE_MONITOR) += pagewalk.o diff --git a/mm/mmu_context.c b/mm/mmu_context.c new file mode 100644 index 00000000000..fd473b51c90 --- /dev/null +++ b/mm/mmu_context.c @@ -0,0 +1,55 @@ +/* Copyright (C) 2009 Red Hat, Inc. + * + * See ../COPYING for licensing terms. + */ + +#include +#include +#include + +#include + +/* + * use_mm + * Makes the calling kernel thread take on the specified + * mm context. + * Called by the retry thread execute retries within the + * iocb issuer's mm context, so that copy_from/to_user + * operations work seamlessly for aio. + * (Note: this routine is intended to be called only + * from a kernel thread context) + */ +void use_mm(struct mm_struct *mm) +{ + struct mm_struct *active_mm; + struct task_struct *tsk = current; + + task_lock(tsk); + active_mm = tsk->active_mm; + atomic_inc(&mm->mm_count); + tsk->mm = mm; + tsk->active_mm = mm; + switch_mm(active_mm, mm, tsk); + task_unlock(tsk); + + mmdrop(active_mm); +} + +/* + * unuse_mm + * Reverses the effect of use_mm, i.e. releases the + * specified mm context which was earlier taken on + * by the calling kernel thread + * (Note: this routine is intended to be called only + * from a kernel thread context) + */ +void unuse_mm(struct mm_struct *mm) +{ + struct task_struct *tsk = current; + + task_lock(tsk); + tsk->mm = NULL; + /* active_mm is still 'mm' */ + enter_lazy_tlb(mm, tsk); + task_unlock(tsk); +} -- cgit v1.2.3-70-g09d2 From 69d25870f20c4b2563304f2b79c5300dd60a067e Mon Sep 17 00:00:00 2001 From: Arjan van de Ven Date: Mon, 21 Sep 2009 17:04:08 -0700 Subject: cpuidle: fix the menu governor to boost IO performance Fix the menu idle governor which balances power savings, energy efficiency and performance impact. The reason for a reworked governor is that there have been serious performance issues reported with the existing code on Nehalem server systems. To show this I'm sure Andrew wants to see benchmark results: (benchmark is "fio", "no cstates" is using "idle=poll") no cstates current linux new algorithm 1 disk 107 Mb/s 85 Mb/s 105 Mb/s 2 disks 215 Mb/s 123 Mb/s 209 Mb/s 12 disks 590 Mb/s 320 Mb/s 585 Mb/s In various power benchmark measurements, no degredation was found by our measurement&diagnostics team. Obviously a small percentage more power was used in the "fio" benchmark, due to the much higher performance. While it would be a novel idea to describe the new algorithm in this commit message, I cheaped out and described it in comments in the code instead. [changes since first post: spelling fixes from akpm, review feedback, folded menu-tng into menu.c] Signed-off-by: Arjan van de Ven Cc: Venkatesh Pallipadi Cc: Len Brown Cc: Ingo Molnar Cc: Peter Zijlstra Cc: Yanmin Zhang Acked-by: Ingo Molnar Signed-off-by: Andrew Morton Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- drivers/cpuidle/governors/menu.c | 251 +++++++++++++++++++++++++++++++++------ include/linux/sched.h | 4 + kernel/sched.c | 13 ++ 3 files changed, 229 insertions(+), 39 deletions(-) (limited to 'include') diff --git a/drivers/cpuidle/governors/menu.c b/drivers/cpuidle/governors/menu.c index f1df59f59a3..9f3d77532ab 100644 --- a/drivers/cpuidle/governors/menu.c +++ b/drivers/cpuidle/governors/menu.c @@ -2,8 +2,12 @@ * menu.c - the menu idle governor * * Copyright (C) 2006-2007 Adam Belay + * Copyright (C) 2009 Intel Corporation + * Author: + * Arjan van de Ven * - * This code is licenced under the GPL. + * This code is licenced under the GPL version 2 as described + * in the COPYING file that acompanies the Linux Kernel. */ #include @@ -13,20 +17,153 @@ #include #include #include +#include -#define BREAK_FUZZ 4 /* 4 us */ -#define PRED_HISTORY_PCT 50 +#define BUCKETS 12 +#define RESOLUTION 1024 +#define DECAY 4 +#define MAX_INTERESTING 50000 + +/* + * Concepts and ideas behind the menu governor + * + * For the menu governor, there are 3 decision factors for picking a C + * state: + * 1) Energy break even point + * 2) Performance impact + * 3) Latency tolerance (from pmqos infrastructure) + * These these three factors are treated independently. + * + * Energy break even point + * ----------------------- + * C state entry and exit have an energy cost, and a certain amount of time in + * the C state is required to actually break even on this cost. CPUIDLE + * provides us this duration in the "target_residency" field. So all that we + * need is a good prediction of how long we'll be idle. Like the traditional + * menu governor, we start with the actual known "next timer event" time. + * + * Since there are other source of wakeups (interrupts for example) than + * the next timer event, this estimation is rather optimistic. To get a + * more realistic estimate, a correction factor is applied to the estimate, + * that is based on historic behavior. For example, if in the past the actual + * duration always was 50% of the next timer tick, the correction factor will + * be 0.5. + * + * menu uses a running average for this correction factor, however it uses a + * set of factors, not just a single factor. This stems from the realization + * that the ratio is dependent on the order of magnitude of the expected + * duration; if we expect 500 milliseconds of idle time the likelihood of + * getting an interrupt very early is much higher than if we expect 50 micro + * seconds of idle time. A second independent factor that has big impact on + * the actual factor is if there is (disk) IO outstanding or not. + * (as a special twist, we consider every sleep longer than 50 milliseconds + * as perfect; there are no power gains for sleeping longer than this) + * + * For these two reasons we keep an array of 12 independent factors, that gets + * indexed based on the magnitude of the expected duration as well as the + * "is IO outstanding" property. + * + * Limiting Performance Impact + * --------------------------- + * C states, especially those with large exit latencies, can have a real + * noticable impact on workloads, which is not acceptable for most sysadmins, + * and in addition, less performance has a power price of its own. + * + * As a general rule of thumb, menu assumes that the following heuristic + * holds: + * The busier the system, the less impact of C states is acceptable + * + * This rule-of-thumb is implemented using a performance-multiplier: + * If the exit latency times the performance multiplier is longer than + * the predicted duration, the C state is not considered a candidate + * for selection due to a too high performance impact. So the higher + * this multiplier is, the longer we need to be idle to pick a deep C + * state, and thus the less likely a busy CPU will hit such a deep + * C state. + * + * Two factors are used in determing this multiplier: + * a value of 10 is added for each point of "per cpu load average" we have. + * a value of 5 points is added for each process that is waiting for + * IO on this CPU. + * (these values are experimentally determined) + * + * The load average factor gives a longer term (few seconds) input to the + * decision, while the iowait value gives a cpu local instantanious input. + * The iowait factor may look low, but realize that this is also already + * represented in the system load average. + * + */ struct menu_device { int last_state_idx; unsigned int expected_us; - unsigned int predicted_us; - unsigned int current_predicted_us; - unsigned int last_measured_us; - unsigned int elapsed_us; + u64 predicted_us; + unsigned int measured_us; + unsigned int exit_us; + unsigned int bucket; + u64 correction_factor[BUCKETS]; }; + +#define LOAD_INT(x) ((x) >> FSHIFT) +#define LOAD_FRAC(x) LOAD_INT(((x) & (FIXED_1-1)) * 100) + +static int get_loadavg(void) +{ + unsigned long this = this_cpu_load(); + + + return LOAD_INT(this) * 10 + LOAD_FRAC(this) / 10; +} + +static inline int which_bucket(unsigned int duration) +{ + int bucket = 0; + + /* + * We keep two groups of stats; one with no + * IO pending, one without. + * This allows us to calculate + * E(duration)|iowait + */ + if (nr_iowait_cpu()) + bucket = BUCKETS/2; + + if (duration < 10) + return bucket; + if (duration < 100) + return bucket + 1; + if (duration < 1000) + return bucket + 2; + if (duration < 10000) + return bucket + 3; + if (duration < 100000) + return bucket + 4; + return bucket + 5; +} + +/* + * Return a multiplier for the exit latency that is intended + * to take performance requirements into account. + * The more performance critical we estimate the system + * to be, the higher this multiplier, and thus the higher + * the barrier to go to an expensive C state. + */ +static inline int performance_multiplier(void) +{ + int mult = 1; + + /* for higher loadavg, we are more reluctant */ + + mult += 2 * get_loadavg(); + + /* for IO wait tasks (per cpu!) we add 5x each */ + mult += 10 * nr_iowait_cpu(); + + return mult; +} + static DEFINE_PER_CPU(struct menu_device, menu_devices); /** @@ -38,37 +175,59 @@ static int menu_select(struct cpuidle_device *dev) struct menu_device *data = &__get_cpu_var(menu_devices); int latency_req = pm_qos_requirement(PM_QOS_CPU_DMA_LATENCY); int i; + int multiplier; + + data->last_state_idx = 0; + data->exit_us = 0; /* Special case when user has set very strict latency requirement */ - if (unlikely(latency_req == 0)) { - data->last_state_idx = 0; + if (unlikely(latency_req == 0)) return 0; - } - /* determine the expected residency time */ + /* determine the expected residency time, round up */ data->expected_us = - (u32) ktime_to_ns(tick_nohz_get_sleep_length()) / 1000; + DIV_ROUND_UP((u32)ktime_to_ns(tick_nohz_get_sleep_length()), 1000); + + + data->bucket = which_bucket(data->expected_us); + + multiplier = performance_multiplier(); + + /* + * if the correction factor is 0 (eg first time init or cpu hotplug + * etc), we actually want to start out with a unity factor. + */ + if (data->correction_factor[data->bucket] == 0) + data->correction_factor[data->bucket] = RESOLUTION * DECAY; + + /* Make sure to round up for half microseconds */ + data->predicted_us = DIV_ROUND_CLOSEST( + data->expected_us * data->correction_factor[data->bucket], + RESOLUTION * DECAY); + + /* + * We want to default to C1 (hlt), not to busy polling + * unless the timer is happening really really soon. + */ + if (data->expected_us > 5) + data->last_state_idx = CPUIDLE_DRIVER_STATE_START; - /* Recalculate predicted_us based on prediction_history_pct */ - data->predicted_us *= PRED_HISTORY_PCT; - data->predicted_us += (100 - PRED_HISTORY_PCT) * - data->current_predicted_us; - data->predicted_us /= 100; /* find the deepest idle state that satisfies our constraints */ - for (i = CPUIDLE_DRIVER_STATE_START + 1; i < dev->state_count; i++) { + for (i = CPUIDLE_DRIVER_STATE_START; i < dev->state_count; i++) { struct cpuidle_state *s = &dev->states[i]; - if (s->target_residency > data->expected_us) - break; if (s->target_residency > data->predicted_us) break; if (s->exit_latency > latency_req) break; + if (s->exit_latency * multiplier > data->predicted_us) + break; + data->exit_us = s->exit_latency; + data->last_state_idx = i; } - data->last_state_idx = i - 1; - return i - 1; + return data->last_state_idx; } /** @@ -85,35 +244,49 @@ static void menu_reflect(struct cpuidle_device *dev) unsigned int last_idle_us = cpuidle_get_last_residency(dev); struct cpuidle_state *target = &dev->states[last_idx]; unsigned int measured_us; + u64 new_factor; /* * Ugh, this idle state doesn't support residency measurements, so we * are basically lost in the dark. As a compromise, assume we slept - * for one full standard timer tick. However, be aware that this - * could potentially result in a suboptimal state transition. + * for the whole expected time. */ if (unlikely(!(target->flags & CPUIDLE_FLAG_TIME_VALID))) - last_idle_us = USEC_PER_SEC / HZ; + last_idle_us = data->expected_us; + + + measured_us = last_idle_us; /* - * measured_us and elapsed_us are the cumulative idle time, since the - * last time we were woken out of idle by an interrupt. + * We correct for the exit latency; we are assuming here that the + * exit latency happens after the event that we're interested in. */ - if (data->elapsed_us <= data->elapsed_us + last_idle_us) - measured_us = data->elapsed_us + last_idle_us; + if (measured_us > data->exit_us) + measured_us -= data->exit_us; + + + /* update our correction ratio */ + + new_factor = data->correction_factor[data->bucket] + * (DECAY - 1) / DECAY; + + if (data->expected_us > 0 && data->measured_us < MAX_INTERESTING) + new_factor += RESOLUTION * measured_us / data->expected_us; else - measured_us = -1; + /* + * we were idle so long that we count it as a perfect + * prediction + */ + new_factor += RESOLUTION; - /* Predict time until next break event */ - data->current_predicted_us = max(measured_us, data->last_measured_us); + /* + * We don't want 0 as factor; we always want at least + * a tiny bit of estimated time. + */ + if (new_factor == 0) + new_factor = 1; - if (last_idle_us + BREAK_FUZZ < - data->expected_us - target->exit_latency) { - data->last_measured_us = measured_us; - data->elapsed_us = 0; - } else { - data->elapsed_us = measured_us; - } + data->correction_factor[data->bucket] = new_factor; } /** diff --git a/include/linux/sched.h b/include/linux/sched.h index 17e9a8e9a51..97b10da0a3e 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -140,6 +140,10 @@ extern int nr_processes(void); extern unsigned long nr_running(void); extern unsigned long nr_uninterruptible(void); extern unsigned long nr_iowait(void); +extern unsigned long nr_iowait_cpu(void); +extern unsigned long this_cpu_load(void); + + extern void calc_global_load(void); extern u64 cpu_nr_migrations(int cpu); diff --git a/kernel/sched.c b/kernel/sched.c index 91843ba7f23..0ac9053c21d 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -2904,6 +2904,19 @@ unsigned long nr_iowait(void) return sum; } +unsigned long nr_iowait_cpu(void) +{ + struct rq *this = this_rq(); + return atomic_read(&this->nr_iowait); +} + +unsigned long this_cpu_load(void) +{ + struct rq *this = this_rq(); + return this->cpu_load[0]; +} + + /* Variables and functions for calc_load */ static atomic_long_t calc_load_tasks; static unsigned long calc_load_update; -- cgit v1.2.3-70-g09d2 From e6de3988aa52debb25a427d085061f3bf1181d54 Mon Sep 17 00:00:00 2001 From: David Rientjes Date: Mon, 21 Sep 2009 17:04:30 -0700 Subject: flex_array: add flex_array_clear function Add a new function to the flex_array API: int flex_array_clear(struct flex_array *fa, unsigned int element_nr) This function will zero the element at element_nr in the flex_array. Although this is equivalent to using flex_array_put() and passing a pointer to zero'd memory, flex_array_clear() does not require such a pointer to memory that would most likely need to be allocated on the caller's stack which could be significantly large depending on element_size. Signed-off-by: David Rientjes Cc: Dave Hansen Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/flex_array.h | 1 + lib/flex_array.c | 26 ++++++++++++++++++++++++++ 2 files changed, 27 insertions(+) (limited to 'include') diff --git a/include/linux/flex_array.h b/include/linux/flex_array.h index 45ff1849151..3887b21f883 100644 --- a/include/linux/flex_array.h +++ b/include/linux/flex_array.h @@ -44,6 +44,7 @@ void flex_array_free(struct flex_array *fa); void flex_array_free_parts(struct flex_array *fa); int flex_array_put(struct flex_array *fa, unsigned int element_nr, void *src, gfp_t flags); +int flex_array_clear(struct flex_array *fa, unsigned int element_nr); void *flex_array_get(struct flex_array *fa, unsigned int element_nr); #endif /* _FLEX_ARRAY_H */ diff --git a/lib/flex_array.c b/lib/flex_array.c index 7baed2fc3bc..b68f99be408 100644 --- a/lib/flex_array.c +++ b/lib/flex_array.c @@ -206,6 +206,32 @@ int flex_array_put(struct flex_array *fa, unsigned int element_nr, void *src, return 0; } +/** + * flex_array_clear - clear element in array at @element_nr + * @element_nr: index of the position to clear. + * + * Locking must be provided by the caller. + */ +int flex_array_clear(struct flex_array *fa, unsigned int element_nr) +{ + int part_nr = fa_element_to_part_nr(fa, element_nr); + struct flex_array_part *part; + void *dst; + + if (element_nr >= fa->total_nr_elements) + return -ENOSPC; + if (elements_fit_in_base(fa)) + part = (struct flex_array_part *)&fa->parts[0]; + else { + part = fa->parts[part_nr]; + if (!part) + return -EINVAL; + } + dst = &part->elements[index_inside_part(fa, element_nr)]; + memset(dst, 0, fa->element_size); + return 0; +} + /** * flex_array_prealloc - guarantee that array space exists * @start: index of first array element for which space is allocated -- cgit v1.2.3-70-g09d2 From 19da3dd157f8db6fe727ff268dab4791d55a6371 Mon Sep 17 00:00:00 2001 From: David Rientjes Date: Mon, 21 Sep 2009 17:04:31 -0700 Subject: flex_array: poison free elements Newly initialized flex_array's and/or flex_array_part's are now poisoned with a new poison value, FLEX_ARRAY_FREE. It's value is similar to POISON_FREE used in the various slab allocators, but is different to distinguish between flex array's poisoned kmem and slab allocator poisoned kmem. This will allow us to identify flex_array_part's that only contain free elements (and free them with an addition to the flex_array API). This could also be extended in the future to identify `get' uses on elements that have not been `put'. If __GFP_ZERO is passed for a part's gfp mask, the poisoning is avoided. These elements are considered to be in-use since they have been initialized. Signed-off-by: David Rientjes Cc: Dave Hansen Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/poison.h | 3 +++ lib/flex_array.c | 15 +++++++-------- 2 files changed, 10 insertions(+), 8 deletions(-) (limited to 'include') diff --git a/include/linux/poison.h b/include/linux/poison.h index 6729f7dcd60..7fc194aef8c 100644 --- a/include/linux/poison.h +++ b/include/linux/poison.h @@ -65,6 +65,9 @@ #define MUTEX_DEBUG_INIT 0x11 #define MUTEX_DEBUG_FREE 0x22 +/********** lib/flex_array.c **********/ +#define FLEX_ARRAY_FREE 0x6c /* for use-after-free poisoning */ + /********** security/ **********/ #define KEY_DESTROY 0xbd diff --git a/lib/flex_array.c b/lib/flex_array.c index b68f99be408..e22d0e9776a 100644 --- a/lib/flex_array.c +++ b/lib/flex_array.c @@ -113,6 +113,8 @@ struct flex_array *flex_array_alloc(int element_size, unsigned int total, return NULL; ret->element_size = element_size; ret->total_nr_elements = total; + if (elements_fit_in_base(ret) && !(flags & __GFP_ZERO)) + memset(ret->parts[0], FLEX_ARRAY_FREE, bytes_left_in_base()); return ret; } @@ -159,15 +161,12 @@ __fa_get_part(struct flex_array *fa, int part_nr, gfp_t flags) { struct flex_array_part *part = fa->parts[part_nr]; if (!part) { - /* - * This leaves the part pages uninitialized - * and with potentially random data, just - * as if the user had kmalloc()'d the whole. - * __GFP_ZERO can be used to zero it. - */ - part = kmalloc(FLEX_ARRAY_PART_SIZE, flags); + part = kmalloc(sizeof(struct flex_array_part), flags); if (!part) return NULL; + if (!(flags & __GFP_ZERO)) + memset(part, FLEX_ARRAY_FREE, + sizeof(struct flex_array_part)); fa->parts[part_nr] = part; } return part; @@ -228,7 +227,7 @@ int flex_array_clear(struct flex_array *fa, unsigned int element_nr) return -EINVAL; } dst = &part->elements[index_inside_part(fa, element_nr)]; - memset(dst, 0, fa->element_size); + memset(dst, FLEX_ARRAY_FREE, fa->element_size); return 0; } -- cgit v1.2.3-70-g09d2 From 4af5a2f770cc8575840ccb1514ec76ecb592985c Mon Sep 17 00:00:00 2001 From: David Rientjes Date: Mon, 21 Sep 2009 17:04:31 -0700 Subject: flex_array: add flex_array_shrink function Add a new function to the flex_array API: int flex_array_shrink(struct flex_array *fa) This function will free all unused second-level pages. Since elements are now poisoned if they are not allocated with __GFP_ZERO, it's possible to identify parts that consist solely of unused elements. flex_array_shrink() returns the number of pages freed. Signed-off-by: David Rientjes Cc: Dave Hansen Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/flex_array.h | 1 + lib/flex_array.c | 40 ++++++++++++++++++++++++++++++++++++++++ 2 files changed, 41 insertions(+) (limited to 'include') diff --git a/include/linux/flex_array.h b/include/linux/flex_array.h index 3887b21f883..f12401e485f 100644 --- a/include/linux/flex_array.h +++ b/include/linux/flex_array.h @@ -46,5 +46,6 @@ int flex_array_put(struct flex_array *fa, unsigned int element_nr, void *src, gfp_t flags); int flex_array_clear(struct flex_array *fa, unsigned int element_nr); void *flex_array_get(struct flex_array *fa, unsigned int element_nr); +int flex_array_shrink(struct flex_array *fa); #endif /* _FLEX_ARRAY_H */ diff --git a/lib/flex_array.c b/lib/flex_array.c index e22d0e9776a..1b03bb55341 100644 --- a/lib/flex_array.c +++ b/lib/flex_array.c @@ -291,3 +291,43 @@ void *flex_array_get(struct flex_array *fa, unsigned int element_nr) } return &part->elements[index_inside_part(fa, element_nr)]; } + +static int part_is_free(struct flex_array_part *part) +{ + int i; + + for (i = 0; i < sizeof(struct flex_array_part); i++) + if (part->elements[i] != FLEX_ARRAY_FREE) + return 0; + return 1; +} + +/** + * flex_array_shrink - free unused second-level pages + * + * Frees all second-level pages that consist solely of unused + * elements. Returns the number of pages freed. + * + * Locking must be provided by the caller. + */ +int flex_array_shrink(struct flex_array *fa) +{ + struct flex_array_part *part; + int max_part = nr_base_part_ptrs(); + int part_nr; + int ret = 0; + + if (elements_fit_in_base(fa)) + return ret; + for (part_nr = 0; part_nr < max_part; part_nr++) { + part = fa->parts[part_nr]; + if (!part) + continue; + if (part_is_free(part)) { + fa->parts[part_nr] = NULL; + kfree(part); + ret++; + } + } + return ret; +} -- cgit v1.2.3-70-g09d2 From 45b588d6e5cc172704bac0c998ce54873b149b22 Mon Sep 17 00:00:00 2001 From: David Rientjes Date: Mon, 21 Sep 2009 17:04:33 -0700 Subject: flex_array: introduce DEFINE_FLEX_ARRAY FLEX_ARRAY_INIT(element_size, total_nr_elements) cannot determine if either parameter is valid, so flex arrays which are statically allocated with this interface can easily become corrupted or reference beyond its allocated memory. This removes FLEX_ARRAY_INIT() as a struct flex_array initializer since no initializer may perform the required checking. Instead, the array is now defined with a new interface: DEFINE_FLEX_ARRAY(name, element_size, total_nr_elements) This may be prefixed with `static' for file scope. This interface includes compile-time checking of the parameters to ensure they are valid. Since the validity of both element_size and total_nr_elements depend on FLEX_ARRAY_BASE_SIZE and FLEX_ARRAY_PART_SIZE, the kernel build will fail if either of these predefined values changes such that the array parameters are no longer valid. Since BUILD_BUG_ON() requires compile time constants, several of the static inline functions that were once local to lib/flex_array.c had to be moved to include/linux/flex_array.h. Signed-off-by: David Rientjes Acked-by: Dave Hansen Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/flex_array.h | 30 ++++++++++++++++++++++++++---- lib/flex_array.c | 36 ++++++++++-------------------------- 2 files changed, 36 insertions(+), 30 deletions(-) (limited to 'include') diff --git a/include/linux/flex_array.h b/include/linux/flex_array.h index f12401e485f..1d747f72298 100644 --- a/include/linux/flex_array.h +++ b/include/linux/flex_array.h @@ -31,10 +31,32 @@ struct flex_array { }; }; -#define FLEX_ARRAY_INIT(size, total) { { {\ - .element_size = (size), \ - .total_nr_elements = (total), \ -} } } +/* Number of bytes left in base struct flex_array, excluding metadata */ +#define FLEX_ARRAY_BASE_BYTES_LEFT \ + (FLEX_ARRAY_BASE_SIZE - offsetof(struct flex_array, parts)) + +/* Number of pointers in base to struct flex_array_part pages */ +#define FLEX_ARRAY_NR_BASE_PTRS \ + (FLEX_ARRAY_BASE_BYTES_LEFT / sizeof(struct flex_array_part *)) + +/* Number of elements of size that fit in struct flex_array_part */ +#define FLEX_ARRAY_ELEMENTS_PER_PART(size) \ + (FLEX_ARRAY_PART_SIZE / size) + +/* + * Defines a statically allocated flex array and ensures its parameters are + * valid. + */ +#define DEFINE_FLEX_ARRAY(__arrayname, __element_size, __total) \ + struct flex_array __arrayname = { { { \ + .element_size = (__element_size), \ + .total_nr_elements = (__total), \ + } } }; \ + static inline void __arrayname##_invalid_parameter(void) \ + { \ + BUILD_BUG_ON((__total) > FLEX_ARRAY_NR_BASE_PTRS * \ + FLEX_ARRAY_ELEMENTS_PER_PART(__element_size)); \ + } struct flex_array *flex_array_alloc(int element_size, unsigned int total, gfp_t flags); diff --git a/lib/flex_array.c b/lib/flex_array.c index 1b03bb55341..b62ce6cffd0 100644 --- a/lib/flex_array.c +++ b/lib/flex_array.c @@ -28,23 +28,6 @@ struct flex_array_part { char elements[FLEX_ARRAY_PART_SIZE]; }; -static inline int __elements_per_part(int element_size) -{ - return FLEX_ARRAY_PART_SIZE / element_size; -} - -static inline int bytes_left_in_base(void) -{ - int element_offset = offsetof(struct flex_array, parts); - int bytes_left = FLEX_ARRAY_BASE_SIZE - element_offset; - return bytes_left; -} - -static inline int nr_base_part_ptrs(void) -{ - return bytes_left_in_base() / sizeof(struct flex_array_part *); -} - /* * If a user requests an allocation which is small * enough, we may simply use the space in the @@ -54,7 +37,7 @@ static inline int nr_base_part_ptrs(void) static inline int elements_fit_in_base(struct flex_array *fa) { int data_size = fa->element_size * fa->total_nr_elements; - if (data_size <= bytes_left_in_base()) + if (data_size <= FLEX_ARRAY_BASE_BYTES_LEFT) return 1; return 0; } @@ -103,7 +86,8 @@ struct flex_array *flex_array_alloc(int element_size, unsigned int total, gfp_t flags) { struct flex_array *ret; - int max_size = nr_base_part_ptrs() * __elements_per_part(element_size); + int max_size = FLEX_ARRAY_NR_BASE_PTRS * + FLEX_ARRAY_ELEMENTS_PER_PART(element_size); /* max_size will end up 0 if element_size > PAGE_SIZE */ if (total > max_size) @@ -114,14 +98,15 @@ struct flex_array *flex_array_alloc(int element_size, unsigned int total, ret->element_size = element_size; ret->total_nr_elements = total; if (elements_fit_in_base(ret) && !(flags & __GFP_ZERO)) - memset(ret->parts[0], FLEX_ARRAY_FREE, bytes_left_in_base()); + memset(ret->parts[0], FLEX_ARRAY_FREE, + FLEX_ARRAY_BASE_BYTES_LEFT); return ret; } static int fa_element_to_part_nr(struct flex_array *fa, unsigned int element_nr) { - return element_nr / __elements_per_part(fa->element_size); + return element_nr / FLEX_ARRAY_ELEMENTS_PER_PART(fa->element_size); } /** @@ -133,11 +118,10 @@ static int fa_element_to_part_nr(struct flex_array *fa, void flex_array_free_parts(struct flex_array *fa) { int part_nr; - int max_part = nr_base_part_ptrs(); if (elements_fit_in_base(fa)) return; - for (part_nr = 0; part_nr < max_part; part_nr++) + for (part_nr = 0; part_nr < FLEX_ARRAY_NR_BASE_PTRS; part_nr++) kfree(fa->parts[part_nr]); } @@ -152,7 +136,8 @@ static unsigned int index_inside_part(struct flex_array *fa, { unsigned int part_offset; - part_offset = element_nr % __elements_per_part(fa->element_size); + part_offset = element_nr % + FLEX_ARRAY_ELEMENTS_PER_PART(fa->element_size); return part_offset * fa->element_size; } @@ -313,13 +298,12 @@ static int part_is_free(struct flex_array_part *part) int flex_array_shrink(struct flex_array *fa) { struct flex_array_part *part; - int max_part = nr_base_part_ptrs(); int part_nr; int ret = 0; if (elements_fit_in_base(fa)) return ret; - for (part_nr = 0; part_nr < max_part; part_nr++) { + for (part_nr = 0; part_nr < FLEX_ARRAY_NR_BASE_PTRS; part_nr++) { part = fa->parts[part_nr]; if (!part) continue; -- cgit v1.2.3-70-g09d2 From 0ec48915e8bbb37dea3df85c41e4c3498b95664b Mon Sep 17 00:00:00 2001 From: Daniel Mack Date: Mon, 21 Sep 2009 17:04:42 -0700 Subject: lis3: fix typo Bit 0x80 in CTRL_REG3 is an ACTIVE_LOW rather than an ACTIVE_HIGH function, I got that wrong during my last change. Signed-off-by: Daniel Mack Acked-by: Pavel Machek Cc: Eric Piel Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/lis3lv02d.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include') diff --git a/include/linux/lis3lv02d.h b/include/linux/lis3lv02d.h index ad651f4e45a..113778b5df3 100644 --- a/include/linux/lis3lv02d.h +++ b/include/linux/lis3lv02d.h @@ -32,7 +32,7 @@ struct lis3lv02d_platform_data { #define LIS3_IRQ2_DATA_READY (4 << 3) #define LIS3_IRQ2_CLICK (7 << 3) #define LIS3_IRQ_OPEN_DRAIN (1 << 6) -#define LIS3_IRQ_ACTIVE_HIGH (1 << 7) +#define LIS3_IRQ_ACTIVE_LOW (1 << 7) unsigned char irq_cfg; }; -- cgit v1.2.3-70-g09d2 From 8873c33483e62988ed886230aab71ef4c678f710 Mon Sep 17 00:00:00 2001 From: Daniel Mack Date: Mon, 21 Sep 2009 17:04:43 -0700 Subject: lis3: add free-fall/wakeup function via platform_data This offers a way for platforms to define flags and thresholds for the free-fall/wakeup functions of the lis302d chips. More registers needed to be seperated as they are specific to the Signed-off-by: Daniel Mack Acked-by: Pavel Machek Cc: Eric Piel Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- drivers/hwmon/lis3lv02d.c | 9 +++++++++ drivers/hwmon/lis3lv02d.h | 24 ++++++++++++++++++------ include/linux/lis3lv02d.h | 9 +++++++++ 3 files changed, 36 insertions(+), 6 deletions(-) (limited to 'include') diff --git a/drivers/hwmon/lis3lv02d.c b/drivers/hwmon/lis3lv02d.c index 271338bdb6b..cf5afb9a10a 100644 --- a/drivers/hwmon/lis3lv02d.c +++ b/drivers/hwmon/lis3lv02d.c @@ -454,6 +454,15 @@ int lis3lv02d_init_device(struct lis3lv02d *dev) (p->click_thresh_y << 4)); } + if (p->wakeup_flags && (dev->whoami == LIS_SINGLE_ID)) { + dev->write(dev, FF_WU_CFG_1, p->wakeup_flags); + dev->write(dev, FF_WU_THS_1, p->wakeup_thresh & 0x7f); + /* default to 2.5ms for now */ + dev->write(dev, FF_WU_DURATION_1, 1); + /* enable high pass filter for both free-fall units */ + dev->write(dev, CTRL_REG2, HP_FF_WU1 | HP_FF_WU2); + } + if (p->irq_cfg) dev->write(dev, CTRL_REG3, p->irq_cfg); } diff --git a/drivers/hwmon/lis3lv02d.h b/drivers/hwmon/lis3lv02d.h index e320e2f511f..3e1ff46f72d 100644 --- a/drivers/hwmon/lis3lv02d.h +++ b/drivers/hwmon/lis3lv02d.h @@ -58,15 +58,17 @@ enum lis3_reg { OUTZ_L = 0x2C, OUTZ_H = 0x2D, OUTZ = 0x2D, - FF_WU_CFG = 0x30, - FF_WU_SRC = 0x31, - FF_WU_ACK = 0x32, - FF_WU_THS_L = 0x34, - FF_WU_THS_H = 0x35, - FF_WU_DURATION = 0x36, }; enum lis302d_reg { + FF_WU_CFG_1 = 0x30, + FF_WU_SRC_1 = 0x31, + FF_WU_THS_1 = 0x32, + FF_WU_DURATION_1 = 0x33, + FF_WU_CFG_2 = 0x34, + FF_WU_SRC_2 = 0x35, + FF_WU_THS_2 = 0x36, + FF_WU_DURATION_2 = 0x37, CLICK_CFG = 0x38, CLICK_SRC = 0x39, CLICK_THSY_X = 0x3B, @@ -77,6 +79,12 @@ enum lis302d_reg { }; enum lis3lv02d_reg { + FF_WU_CFG = 0x30, + FF_WU_SRC = 0x31, + FF_WU_ACK = 0x32, + FF_WU_THS_L = 0x34, + FF_WU_THS_H = 0x35, + FF_WU_DURATION = 0x36, DD_CFG = 0x38, DD_SRC = 0x39, DD_ACK = 0x3A, @@ -107,6 +115,10 @@ enum lis3lv02d_ctrl2 { CTRL2_FS = 0x80, /* Full Scale selection */ }; +enum lis302d_ctrl2 { + HP_FF_WU2 = 0x08, + HP_FF_WU1 = 0x04, +}; enum lis3lv02d_ctrl3 { CTRL3_CFS0 = 0x01, diff --git a/include/linux/lis3lv02d.h b/include/linux/lis3lv02d.h index 113778b5df3..3cc2f2c53e4 100644 --- a/include/linux/lis3lv02d.h +++ b/include/linux/lis3lv02d.h @@ -34,6 +34,15 @@ struct lis3lv02d_platform_data { #define LIS3_IRQ_OPEN_DRAIN (1 << 6) #define LIS3_IRQ_ACTIVE_LOW (1 << 7) unsigned char irq_cfg; + +#define LIS3_WAKEUP_X_LO (1 << 0) +#define LIS3_WAKEUP_X_HI (1 << 1) +#define LIS3_WAKEUP_Y_LO (1 << 2) +#define LIS3_WAKEUP_Y_HI (1 << 3) +#define LIS3_WAKEUP_Z_LO (1 << 4) +#define LIS3_WAKEUP_Z_HI (1 << 5) + unsigned char wakeup_flags; + unsigned char wakeup_thresh; }; #endif /* __LIS3LV02D_H_ */ -- cgit v1.2.3-70-g09d2 From abd6633c67925f90775bb74755f9c547e30f1f20 Mon Sep 17 00:00:00 2001 From: David Härdeman Date: Mon, 21 Sep 2009 17:04:52 -0700 Subject: pnp: add a shutdown method to pnp drivers MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The shutdown method is used by the winbond cir driver to setup the hardware for wake-from-S5. Signed-off-by: Bjorn Helgaas Signed-off-by: David Härdeman Cc: Dmitry Torokhov Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- drivers/pnp/driver.c | 10 ++++++++++ include/linux/pnp.h | 1 + 2 files changed, 11 insertions(+) (limited to 'include') diff --git a/drivers/pnp/driver.c b/drivers/pnp/driver.c index 527ee764c93..cd11b113494 100644 --- a/drivers/pnp/driver.c +++ b/drivers/pnp/driver.c @@ -135,6 +135,15 @@ static int pnp_device_remove(struct device *dev) return 0; } +static void pnp_device_shutdown(struct device *dev) +{ + struct pnp_dev *pnp_dev = to_pnp_dev(dev); + struct pnp_driver *drv = pnp_dev->driver; + + if (drv && drv->shutdown) + drv->shutdown(pnp_dev); +} + static int pnp_bus_match(struct device *dev, struct device_driver *drv) { struct pnp_dev *pnp_dev = to_pnp_dev(dev); @@ -203,6 +212,7 @@ struct bus_type pnp_bus_type = { .match = pnp_bus_match, .probe = pnp_device_probe, .remove = pnp_device_remove, + .shutdown = pnp_device_shutdown, .suspend = pnp_bus_suspend, .resume = pnp_bus_resume, .dev_attrs = pnp_interface_attrs, diff --git a/include/linux/pnp.h b/include/linux/pnp.h index b063c7328ba..fddfafaed02 100644 --- a/include/linux/pnp.h +++ b/include/linux/pnp.h @@ -360,6 +360,7 @@ struct pnp_driver { unsigned int flags; int (*probe) (struct pnp_dev *dev, const struct pnp_device_id *dev_id); void (*remove) (struct pnp_dev *dev); + void (*shutdown) (struct pnp_dev *dev); int (*suspend) (struct pnp_dev *dev, pm_message_t state); int (*resume) (struct pnp_dev *dev); struct device_driver driver; -- cgit v1.2.3-70-g09d2 From 6ef297f86b62f187c59475784208f75c2ed8ccd8 Mon Sep 17 00:00:00 2001 From: Linus Walleij Date: Tue, 22 Sep 2009 14:29:36 +0100 Subject: ARM: 5720/1: Move MMCI header to amba include dir This moves the mmci platform data definition struct away from arch/arm/include/asm/mach/mmc.h into the more proper place among the other primecells in include/linux/amba/mmci.h and at the same time renames it to "mmci.h", and also the struct in this file confusingly named mmc_platform_data has been renamed mmci_platform_data for clarity. Cc: Catalin Marinas Signed-off-by: Linus Walleij Signed-off-by: Russell King --- arch/arm/include/asm/mach/mmc.h | 18 ------------------ arch/arm/mach-integrator/integrator_cp.c | 4 ++-- arch/arm/mach-realview/core.c | 6 +++--- arch/arm/mach-realview/core.h | 4 ++-- arch/arm/mach-realview/realview_eb.c | 2 +- arch/arm/mach-realview/realview_pb1176.c | 2 +- arch/arm/mach-realview/realview_pb11mp.c | 2 +- arch/arm/mach-realview/realview_pba8.c | 2 +- arch/arm/mach-realview/realview_pbx.c | 2 +- arch/arm/mach-u300/mmc.c | 4 ++-- arch/arm/mach-versatile/core.c | 4 ++-- arch/arm/mach-versatile/versatile_pb.c | 4 ++-- drivers/mmc/host/mmci.c | 4 ++-- drivers/mmc/host/mmci.h | 2 +- include/linux/amba/mmci.h | 18 ++++++++++++++++++ 15 files changed, 39 insertions(+), 39 deletions(-) delete mode 100644 arch/arm/include/asm/mach/mmc.h create mode 100644 include/linux/amba/mmci.h (limited to 'include') diff --git a/arch/arm/include/asm/mach/mmc.h b/arch/arm/include/asm/mach/mmc.h deleted file mode 100644 index 27bec555ee1..00000000000 --- a/arch/arm/include/asm/mach/mmc.h +++ /dev/null @@ -1,18 +0,0 @@ -/* - * arch/arm/include/asm/mach/mmc.h - */ -#ifndef ASMARM_MACH_MMC_H -#define ASMARM_MACH_MMC_H - -#include - -struct mmc_platform_data { - unsigned int ocr_mask; /* available voltages */ - u32 (*translate_vdd)(struct device *, unsigned int); - unsigned int (*status)(struct device *); - int gpio_wp; - int gpio_cd; - unsigned long capabilities; -}; - -#endif diff --git a/arch/arm/mach-integrator/integrator_cp.c b/arch/arm/mach-integrator/integrator_cp.c index 2a318eba1b0..3f35293d457 100644 --- a/arch/arm/mach-integrator/integrator_cp.c +++ b/arch/arm/mach-integrator/integrator_cp.c @@ -19,6 +19,7 @@ #include #include #include +#include #include #include @@ -35,7 +36,6 @@ #include #include #include -#include #include #include @@ -400,7 +400,7 @@ static unsigned int mmc_status(struct device *dev) return status & 8; } -static struct mmc_platform_data mmc_data = { +static struct mmci_platform_data mmc_data = { .ocr_mask = MMC_VDD_32_33|MMC_VDD_33_34, .status = mmc_status, .gpio_wp = -1, diff --git a/arch/arm/mach-realview/core.c b/arch/arm/mach-realview/core.c index 5a5e1f72461..a2083b60e3f 100644 --- a/arch/arm/mach-realview/core.c +++ b/arch/arm/mach-realview/core.c @@ -30,6 +30,7 @@ #include #include #include +#include #include #include @@ -44,7 +45,6 @@ #include #include #include -#include #include @@ -237,14 +237,14 @@ static unsigned int realview_mmc_status(struct device *dev) return readl(REALVIEW_SYSMCI) & mask; } -struct mmc_platform_data realview_mmc0_plat_data = { +struct mmci_platform_data realview_mmc0_plat_data = { .ocr_mask = MMC_VDD_32_33|MMC_VDD_33_34, .status = realview_mmc_status, .gpio_wp = 17, .gpio_cd = 16, }; -struct mmc_platform_data realview_mmc1_plat_data = { +struct mmci_platform_data realview_mmc1_plat_data = { .ocr_mask = MMC_VDD_32_33|MMC_VDD_33_34, .status = realview_mmc_status, .gpio_wp = 19, diff --git a/arch/arm/mach-realview/core.h b/arch/arm/mach-realview/core.h index 59a337ba4be..46cd6acb4d4 100644 --- a/arch/arm/mach-realview/core.h +++ b/arch/arm/mach-realview/core.h @@ -47,8 +47,8 @@ static struct amba_device name##_device = { \ extern struct platform_device realview_flash_device; extern struct platform_device realview_cf_device; extern struct platform_device realview_i2c_device; -extern struct mmc_platform_data realview_mmc0_plat_data; -extern struct mmc_platform_data realview_mmc1_plat_data; +extern struct mmci_platform_data realview_mmc0_plat_data; +extern struct mmci_platform_data realview_mmc1_plat_data; extern struct clcd_board clcd_plat_data; extern void __iomem *gic_cpu_base_addr; extern void __iomem *timer0_va_base; diff --git a/arch/arm/mach-realview/realview_eb.c b/arch/arm/mach-realview/realview_eb.c index c0795ea3b3a..1d65e64ae57 100644 --- a/arch/arm/mach-realview/realview_eb.c +++ b/arch/arm/mach-realview/realview_eb.c @@ -24,6 +24,7 @@ #include #include #include +#include #include #include @@ -37,7 +38,6 @@ #include #include -#include #include #include diff --git a/arch/arm/mach-realview/realview_pb1176.c b/arch/arm/mach-realview/realview_pb1176.c index 395dc18070b..2817fe09931 100644 --- a/arch/arm/mach-realview/realview_pb1176.c +++ b/arch/arm/mach-realview/realview_pb1176.c @@ -24,6 +24,7 @@ #include #include #include +#include #include #include @@ -37,7 +38,6 @@ #include #include #include -#include #include #include diff --git a/arch/arm/mach-realview/realview_pb11mp.c b/arch/arm/mach-realview/realview_pb11mp.c index c0c9e35e6e3..94680fcf726 100644 --- a/arch/arm/mach-realview/realview_pb11mp.c +++ b/arch/arm/mach-realview/realview_pb11mp.c @@ -24,6 +24,7 @@ #include #include #include +#include #include #include @@ -38,7 +39,6 @@ #include #include #include -#include #include #include diff --git a/arch/arm/mach-realview/realview_pba8.c b/arch/arm/mach-realview/realview_pba8.c index 4fc64e146c3..941beb2b970 100644 --- a/arch/arm/mach-realview/realview_pba8.c +++ b/arch/arm/mach-realview/realview_pba8.c @@ -24,6 +24,7 @@ #include #include #include +#include #include #include @@ -34,7 +35,6 @@ #include #include -#include #include #include diff --git a/arch/arm/mach-realview/realview_pbx.c b/arch/arm/mach-realview/realview_pbx.c index cf68b542606..7e4bc6cdca5 100644 --- a/arch/arm/mach-realview/realview_pbx.c +++ b/arch/arm/mach-realview/realview_pbx.c @@ -23,6 +23,7 @@ #include #include #include +#include #include #include @@ -34,7 +35,6 @@ #include #include -#include #include #include diff --git a/arch/arm/mach-u300/mmc.c b/arch/arm/mach-u300/mmc.c index 82af247760e..7b6b016786b 100644 --- a/arch/arm/mach-u300/mmc.c +++ b/arch/arm/mach-u300/mmc.c @@ -19,8 +19,8 @@ #include #include #include +#include -#include #include "mmc.h" #include "padmux.h" @@ -28,7 +28,7 @@ struct mmci_card_event { struct input_dev *mmc_input; int mmc_inserted; struct work_struct workq; - struct mmc_platform_data mmc0_plat_data; + struct mmci_platform_data mmc0_plat_data; }; static unsigned int mmc_status(struct device *dev) diff --git a/arch/arm/mach-versatile/core.c b/arch/arm/mach-versatile/core.c index 975eae41ee6..e13be7c444c 100644 --- a/arch/arm/mach-versatile/core.c +++ b/arch/arm/mach-versatile/core.c @@ -27,6 +27,7 @@ #include #include #include +#include #include #include #include @@ -47,7 +48,6 @@ #include #include #include -#include #include "core.h" #include "clock.h" @@ -369,7 +369,7 @@ unsigned int mmc_status(struct device *dev) return readl(VERSATILE_SYSMCI) & mask; } -static struct mmc_platform_data mmc0_plat_data = { +static struct mmci_platform_data mmc0_plat_data = { .ocr_mask = MMC_VDD_32_33|MMC_VDD_33_34, .status = mmc_status, .gpio_wp = -1, diff --git a/arch/arm/mach-versatile/versatile_pb.c b/arch/arm/mach-versatile/versatile_pb.c index 9af8d8154df..239cd30fc4f 100644 --- a/arch/arm/mach-versatile/versatile_pb.c +++ b/arch/arm/mach-versatile/versatile_pb.c @@ -24,6 +24,7 @@ #include #include #include +#include #include #include @@ -31,7 +32,6 @@ #include #include -#include #include "core.h" @@ -41,7 +41,7 @@ #define IRQ_MMCI1A IRQ_SIC_MMCI1A #endif -static struct mmc_platform_data mmc1_plat_data = { +static struct mmci_platform_data mmc1_plat_data = { .ocr_mask = MMC_VDD_32_33|MMC_VDD_33_34, .status = mmc_status, .gpio_wp = -1, diff --git a/drivers/mmc/host/mmci.c b/drivers/mmc/host/mmci.c index bf7c05b29e2..79205e565c0 100644 --- a/drivers/mmc/host/mmci.c +++ b/drivers/mmc/host/mmci.c @@ -22,12 +22,12 @@ #include #include #include +#include #include #include #include #include -#include #include "mmci.h" @@ -537,7 +537,7 @@ static void mmci_check_status(unsigned long data) static int __devinit mmci_probe(struct amba_device *dev, struct amba_id *id) { - struct mmc_platform_data *plat = dev->dev.platform_data; + struct mmci_platform_data *plat = dev->dev.platform_data; struct mmci_host *host; struct mmc_host *mmc; int ret; diff --git a/drivers/mmc/host/mmci.h b/drivers/mmc/host/mmci.h index 839f264c972..a7f9a51a0a3 100644 --- a/drivers/mmc/host/mmci.h +++ b/drivers/mmc/host/mmci.h @@ -161,7 +161,7 @@ struct mmci_host { unsigned int mclk; unsigned int cclk; u32 pwr; - struct mmc_platform_data *plat; + struct mmci_platform_data *plat; u8 hw_designer; u8 hw_revision:4; diff --git a/include/linux/amba/mmci.h b/include/linux/amba/mmci.h new file mode 100644 index 00000000000..6b4241748dd --- /dev/null +++ b/include/linux/amba/mmci.h @@ -0,0 +1,18 @@ +/* + * include/linux/amba/mmci.h + */ +#ifndef AMBA_MMCI_H +#define AMBA_MMCI_H + +#include + +struct mmci_platform_data { + unsigned int ocr_mask; /* available voltages */ + u32 (*translate_vdd)(struct device *, unsigned int); + unsigned int (*status)(struct device *); + int gpio_wp; + int gpio_cd; + unsigned long capabilities; +}; + +#endif -- cgit v1.2.3-70-g09d2 From 8cd09a5984c08dafade74c9c1ab4311af2bf2d24 Mon Sep 17 00:00:00 2001 From: Li Hong Date: Tue, 22 Sep 2009 18:00:44 +0800 Subject: tracing: Fix a comment and a trivial format issue in tracepoint.h Fix the tracepoint documentation path in tracepoints headers and a misaligned tabulation. Signed-off-by: Li Hong Cc: Steven Rostedt Cc: Ingo Molnar LKML-Reference: <3a3680030909220300h7cf18849q4d4702b9d4feaa67@mail.gmail.com> Signed-off-by: Frederic Weisbecker --- include/linux/tracepoint.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'include') diff --git a/include/linux/tracepoint.h b/include/linux/tracepoint.h index 63a3f7a8058..2aac8a83e89 100644 --- a/include/linux/tracepoint.h +++ b/include/linux/tracepoint.h @@ -4,7 +4,7 @@ /* * Kernel Tracepoint API. * - * See Documentation/tracepoint.txt. + * See Documentation/trace/tracepoints.txt. * * (C) Copyright 2008 Mathieu Desnoyers * @@ -36,7 +36,7 @@ struct tracepoint { #ifndef DECLARE_TRACE #define TP_PROTO(args...) args -#define TP_ARGS(args...) args +#define TP_ARGS(args...) args #ifdef CONFIG_TRACEPOINTS -- cgit v1.2.3-70-g09d2 From 9961079348d43dddb1f21118c17f054f63bbaa05 Mon Sep 17 00:00:00 2001 From: Anton Blanchard Date: Tue, 22 Sep 2009 12:40:33 +1000 Subject: tracing/workqueue: Use %pf in workqueue trace events Using %pf instead of %pF supresses printing of the function offset which will always be 0 in the case of worklet functions. Signed-off-by: Anton Blanchard Cc: KOSAKI Motohiro Cc: Li Zefan Cc: Zhaolei Cc: Lai Jiangshan Cc: Ingo Molnar Cc: Steven Rostedt LKML-Reference: <20090922024033.GB31801@kryten> Signed-off-by: Frederic Weisbecker --- include/trace/events/workqueue.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'include') diff --git a/include/trace/events/workqueue.h b/include/trace/events/workqueue.h index fcfd9a1e4b9..e4612dbd7ba 100644 --- a/include/trace/events/workqueue.h +++ b/include/trace/events/workqueue.h @@ -26,7 +26,7 @@ TRACE_EVENT(workqueue_insertion, __entry->func = work->func; ), - TP_printk("thread=%s:%d func=%pF", __entry->thread_comm, + TP_printk("thread=%s:%d func=%pf", __entry->thread_comm, __entry->thread_pid, __entry->func) ); @@ -48,7 +48,7 @@ TRACE_EVENT(workqueue_execution, __entry->func = work->func; ), - TP_printk("thread=%s:%d func=%pF", __entry->thread_comm, + TP_printk("thread=%s:%d func=%pf", __entry->thread_comm, __entry->thread_pid, __entry->func) ); -- cgit v1.2.3-70-g09d2 From ec4756238239f1a331d9fb95bad8b281dad56855 Mon Sep 17 00:00:00 2001 From: Steve Glendinning Date: Tue, 22 Sep 2009 04:00:27 +0000 Subject: smsc95xx: fix transmission where ZLP is expected Usbnet framework assumes USB hardware doesn't handle zero length packets, but SMSC LAN95xx requires these to be sent for correct operation. This patch fixes an easily reproducible tx lockup when sending a frame that results in exactly 512 bytes in a USB transmission (e.g. a UDP frame with 458 data bytes, due to IP headers and our USB headers). It adds an extra flag to usbnet for the hardware driver to indicate that it can handle and requires the zero length packets. This patch should not affect other usbnet users, please also consider for -stable. Signed-off-by: Steve Glendinning Signed-off-by: David S. Miller --- drivers/net/usb/smsc95xx.c | 2 +- drivers/net/usb/usbnet.c | 2 +- include/linux/usb/usbnet.h | 1 + 3 files changed, 3 insertions(+), 2 deletions(-) (limited to 'include') diff --git a/drivers/net/usb/smsc95xx.c b/drivers/net/usb/smsc95xx.c index 3aafebdbe7b..c6c922247d0 100644 --- a/drivers/net/usb/smsc95xx.c +++ b/drivers/net/usb/smsc95xx.c @@ -1227,7 +1227,7 @@ static const struct driver_info smsc95xx_info = { .rx_fixup = smsc95xx_rx_fixup, .tx_fixup = smsc95xx_tx_fixup, .status = smsc95xx_status, - .flags = FLAG_ETHER, + .flags = FLAG_ETHER | FLAG_SEND_ZLP, }; static const struct usb_device_id products[] = { diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c index 24b36f79515..ca5ca5ae061 100644 --- a/drivers/net/usb/usbnet.c +++ b/drivers/net/usb/usbnet.c @@ -1049,7 +1049,7 @@ netdev_tx_t usbnet_start_xmit (struct sk_buff *skb, * NOTE: strictly conforming cdc-ether devices should expect * the ZLP here, but ignore the one-byte packet. */ - if ((length % dev->maxpacket) == 0) { + if (!(info->flags & FLAG_SEND_ZLP) && (length % dev->maxpacket) == 0) { urb->transfer_buffer_length++; if (skb_tailroom(skb)) { skb->data[skb->len] = 0; diff --git a/include/linux/usb/usbnet.h b/include/linux/usb/usbnet.h index bb69e256cd1..f8147305205 100644 --- a/include/linux/usb/usbnet.h +++ b/include/linux/usb/usbnet.h @@ -89,6 +89,7 @@ struct driver_info { #define FLAG_FRAMING_AX 0x0040 /* AX88772/178 packets */ #define FLAG_WLAN 0x0080 /* use "wlan%d" names */ #define FLAG_AVOID_UNLINK_URBS 0x0100 /* don't unlink urbs at usbnet_stop() */ +#define FLAG_SEND_ZLP 0x0200 /* hw requires ZLPs are sent */ /* init device ... can sleep, or cause probe() failure */ -- cgit v1.2.3-70-g09d2 From 7c329288d72e025db4feac65f0fed95fb3e3ef1c Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Wed, 23 Sep 2009 09:52:18 +1000 Subject: vgaarb: make client interface config invariant. Fixes build when VGA_ARB is off. Reported-by: Ingo Molnar Signed-off-by: Ingo Molnar Signed-off-by: Dave Airlie --- include/linux/vgaarb.h | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) (limited to 'include') diff --git a/include/linux/vgaarb.h b/include/linux/vgaarb.h index e81c64af80c..923f9040ea2 100644 --- a/include/linux/vgaarb.h +++ b/include/linux/vgaarb.h @@ -41,7 +41,7 @@ * interrupts at any time. */ extern void vga_set_legacy_decoding(struct pci_dev *pdev, - unsigned int decodes); + unsigned int decodes); /** * vga_get - acquire & locks VGA resources @@ -193,8 +193,17 @@ static inline int vga_conflicts(struct pci_dev *p1, struct pci_dev *p2) * They driver will get a callback when VGA arbitration is first used * by userspace since we some older X servers have issues. */ +#if defined(CONFIG_VGA_ARB) int vga_client_register(struct pci_dev *pdev, void *cookie, void (*irq_set_state)(void *cookie, bool state), unsigned int (*set_vga_decode)(void *cookie, bool state)); +#else +static inline int vga_client_register(struct pci_dev *pdev, void *cookie, + void (*irq_set_state)(void *cookie, bool state), + unsigned int (*set_vga_decode)(void *cookie, bool state)) +{ + return 0; +} +#endif #endif /* LINUX_VGA_H */ -- cgit v1.2.3-70-g09d2 From bb6baf76f45708dbba651ed76a7ad94462f30c0b Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Tue, 22 Sep 2009 14:24:13 +0100 Subject: drm/i915: Track purged state. In order to correctly prevent the invalid reuse of a purged buffer, we need to track such events and warn the user before something bad happens. Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/i915_gem.c | 24 +++++++++++++++--------- include/drm/i915_drm.h | 1 + 2 files changed, 16 insertions(+), 9 deletions(-) (limited to 'include') diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 8beec97fa34..f4f714e39b7 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -1470,6 +1470,7 @@ i915_gem_object_put_pages(struct drm_gem_object *obj) int i; BUG_ON(obj_priv->pages_refcount == 0); + BUG_ON(obj_priv->madv == __I915_MADV_PURGED); if (--obj_priv->pages_refcount != 0) return; @@ -1534,11 +1535,14 @@ i915_gem_object_move_to_flushing(struct drm_gem_object *obj) static void i915_gem_object_truncate(struct drm_gem_object *obj) { - struct inode *inode; + struct drm_i915_gem_object *obj_priv = obj->driver_private; + struct inode *inode; - inode = obj->filp->f_path.dentry->d_inode; - if (inode->i_op->truncate) - inode->i_op->truncate (inode); + inode = obj->filp->f_path.dentry->d_inode; + if (inode->i_op->truncate) + inode->i_op->truncate (inode); + + obj_priv->madv = __I915_MADV_PURGED; } static inline int @@ -2559,7 +2563,7 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment) if (dev_priv->mm.suspended) return -EBUSY; - if (obj_priv->madv == I915_MADV_DONTNEED) { + if (obj_priv->madv != I915_MADV_WILLNEED) { DRM_ERROR("Attempting to bind a purgeable object\n"); return -EINVAL; } @@ -3928,8 +3932,8 @@ i915_gem_pin_ioctl(struct drm_device *dev, void *data, } obj_priv = obj->driver_private; - if (obj_priv->madv == I915_MADV_DONTNEED) { - DRM_ERROR("Attempting to pin a I915_MADV_DONTNEED buffer\n"); + if (obj_priv->madv != I915_MADV_WILLNEED) { + DRM_ERROR("Attempting to pin a purgeable buffer\n"); drm_gem_object_unreference(obj); mutex_unlock(&dev->struct_mutex); return -EINVAL; @@ -4081,14 +4085,16 @@ i915_gem_madvise_ioctl(struct drm_device *dev, void *data, return -EINVAL; } - obj_priv->madv = args->madv; - args->retained = obj_priv->gtt_space != NULL; + if (obj_priv->madv != __I915_MADV_PURGED) + obj_priv->madv = args->madv; /* if the object is no longer bound, discard its backing storage */ if (i915_gem_object_is_purgeable(obj_priv) && obj_priv->gtt_space == NULL) i915_gem_object_truncate(obj); + args->retained = obj_priv->madv != __I915_MADV_PURGED; + drm_gem_object_unreference(obj); mutex_unlock(&dev->struct_mutex); diff --git a/include/drm/i915_drm.h b/include/drm/i915_drm.h index 607c9da061e..7e0cb1da92e 100644 --- a/include/drm/i915_drm.h +++ b/include/drm/i915_drm.h @@ -671,6 +671,7 @@ struct drm_i915_get_pipe_from_crtc_id { #define I915_MADV_WILLNEED 0 #define I915_MADV_DONTNEED 1 +#define __I915_MADV_PURGED 2 /* internal state */ struct drm_i915_gem_madvise { /** Handle of the buffer to change the backing store advice */ -- cgit v1.2.3-70-g09d2 From 268e46712d57a6493cc0f98e7d200a0f674c31ed Mon Sep 17 00:00:00 2001 From: Roland McGrath Date: Tue, 22 Sep 2009 19:56:50 -0700 Subject: asm-generic: syscall_get_nr returns int Only 32 bits of system call number are meaningful, so make the specification for syscall_get_nr() be to return int, not long. Signed-off-by: Roland McGrath --- include/asm-generic/syscall.h | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) (limited to 'include') diff --git a/include/asm-generic/syscall.h b/include/asm-generic/syscall.h index ea8087b55ff..5c122ae6bfa 100644 --- a/include/asm-generic/syscall.h +++ b/include/asm-generic/syscall.h @@ -1,7 +1,7 @@ /* * Access to user system call parameters and results * - * Copyright (C) 2008 Red Hat, Inc. All rights reserved. + * Copyright (C) 2008-2009 Red Hat, Inc. All rights reserved. * * This copyrighted material is made available to anyone wishing to use, * modify, copy, or redistribute it subject to the terms and conditions @@ -32,9 +32,13 @@ struct pt_regs; * If @task is not executing a system call, i.e. it's blocked * inside the kernel for a fault or signal, returns -1. * + * Note this returns int even on 64-bit machines. Only 32 bits of + * system call number can be meaningful. If the actual arch value + * is 64 bits, this truncates to 32 bits so 0xffffffff means -1. + * * It's only valid to call this when @task is known to be blocked. */ -long syscall_get_nr(struct task_struct *task, struct pt_regs *regs); +int syscall_get_nr(struct task_struct *task, struct pt_regs *regs); /** * syscall_rollback - roll back registers after an aborted system call -- cgit v1.2.3-70-g09d2 From 3c1b27d5043086a485f8526353ae9fe37bfa1065 Mon Sep 17 00:00:00 2001 From: Rusty Russell Date: Wed, 23 Sep 2009 22:26:31 -0600 Subject: virtio: make add_buf return capacity remaining This API change means that virtio_net can tell how much capacity remains for buffers. It's necessarily fuzzy, since VIRTIO_RING_F_INDIRECT_DESC means we can fit any number of descriptors in one, *if* we can kmalloc. Signed-off-by: Rusty Russell Cc: Dinesh Subhraveti --- drivers/block/virtio_blk.c | 2 +- drivers/char/hw_random/virtio-rng.c | 2 +- drivers/char/virtio_console.c | 4 ++-- drivers/net/virtio_net.c | 14 +++++++------- drivers/virtio/virtio_balloon.c | 2 +- drivers/virtio/virtio_ring.c | 6 +++++- include/linux/virtio.h | 2 +- net/9p/trans_virtio.c | 2 +- 8 files changed, 19 insertions(+), 15 deletions(-) (limited to 'include') diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c index aa1a3d5a3e2..d739ee4201f 100644 --- a/drivers/block/virtio_blk.c +++ b/drivers/block/virtio_blk.c @@ -139,7 +139,7 @@ static bool do_req(struct request_queue *q, struct virtio_blk *vblk, } } - if (vblk->vq->vq_ops->add_buf(vblk->vq, vblk->sg, out, in, vbr)) { + if (vblk->vq->vq_ops->add_buf(vblk->vq, vblk->sg, out, in, vbr) < 0) { mempool_free(vbr, vblk->pool); return false; } diff --git a/drivers/char/hw_random/virtio-rng.c b/drivers/char/hw_random/virtio-rng.c index 32216b62324..b6c24dcc987 100644 --- a/drivers/char/hw_random/virtio-rng.c +++ b/drivers/char/hw_random/virtio-rng.c @@ -51,7 +51,7 @@ static void register_buffer(void) sg_init_one(&sg, random_data+data_left, RANDOM_DATA_SIZE-data_left); /* There should always be room for one buffer. */ - if (vq->vq_ops->add_buf(vq, &sg, 0, 1, random_data) != 0) + if (vq->vq_ops->add_buf(vq, &sg, 0, 1, random_data) < 0) BUG(); vq->vq_ops->kick(vq); } diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c index c74dacfa679..a035ae39a35 100644 --- a/drivers/char/virtio_console.c +++ b/drivers/char/virtio_console.c @@ -65,7 +65,7 @@ static int put_chars(u32 vtermno, const char *buf, int count) /* add_buf wants a token to identify this buffer: we hand it any * non-NULL pointer, since there's only ever one buffer. */ - if (out_vq->vq_ops->add_buf(out_vq, sg, 1, 0, (void *)1) == 0) { + if (out_vq->vq_ops->add_buf(out_vq, sg, 1, 0, (void *)1) >= 0) { /* Tell Host to go! */ out_vq->vq_ops->kick(out_vq); /* Chill out until it's done with the buffer. */ @@ -85,7 +85,7 @@ static void add_inbuf(void) sg_init_one(sg, inbuf, PAGE_SIZE); /* We should always be able to add one buffer to an empty queue. */ - if (in_vq->vq_ops->add_buf(in_vq, sg, 0, 1, inbuf) != 0) + if (in_vq->vq_ops->add_buf(in_vq, sg, 0, 1, inbuf) < 0) BUG(); in_vq->vq_ops->kick(in_vq); } diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index 32266fb89c2..fbf04a553f5 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c @@ -320,7 +320,7 @@ static bool try_fill_recv_maxbufs(struct virtnet_info *vi, gfp_t gfp) skb_queue_head(&vi->recv, skb); err = vi->rvq->vq_ops->add_buf(vi->rvq, sg, 0, num, skb); - if (err) { + if (err < 0) { skb_unlink(skb, &vi->recv); trim_pages(vi, skb); kfree_skb(skb); @@ -373,7 +373,7 @@ static bool try_fill_recv(struct virtnet_info *vi, gfp_t gfp) skb_queue_head(&vi->recv, skb); err = vi->rvq->vq_ops->add_buf(vi->rvq, sg, 0, 1, skb); - if (err) { + if (err < 0) { skb_unlink(skb, &vi->recv); kfree_skb(skb); break; @@ -527,7 +527,7 @@ static int xmit_skb(struct virtnet_info *vi, struct sk_buff *skb) num = skb_to_sgvec(skb, sg+1, 0, skb->len) + 1; err = vi->svq->vq_ops->add_buf(vi->svq, sg, num, 0, skb); - if (!err && !vi->free_in_tasklet) + if (err >= 0 && !vi->free_in_tasklet) mod_timer(&vi->xmit_free_timer, jiffies + (HZ/10)); return err; @@ -538,7 +538,7 @@ static void xmit_tasklet(unsigned long data) struct virtnet_info *vi = (void *)data; netif_tx_lock_bh(vi->dev); - if (vi->last_xmit_skb && xmit_skb(vi, vi->last_xmit_skb) == 0) { + if (vi->last_xmit_skb && xmit_skb(vi, vi->last_xmit_skb) >= 0) { vi->svq->vq_ops->kick(vi->svq); vi->last_xmit_skb = NULL; } @@ -557,7 +557,7 @@ again: /* If we has a buffer left over from last time, send it now. */ if (unlikely(vi->last_xmit_skb) && - xmit_skb(vi, vi->last_xmit_skb) != 0) + xmit_skb(vi, vi->last_xmit_skb) < 0) goto stop_queue; vi->last_xmit_skb = NULL; @@ -565,7 +565,7 @@ again: /* Put new one in send queue and do transmit */ if (likely(skb)) { __skb_queue_head(&vi->send, skb); - if (xmit_skb(vi, skb) != 0) { + if (xmit_skb(vi, skb) < 0) { vi->last_xmit_skb = skb; skb = NULL; goto stop_queue; @@ -668,7 +668,7 @@ static bool virtnet_send_command(struct virtnet_info *vi, u8 class, u8 cmd, sg_set_buf(&sg[i + 1], sg_virt(s), s->length); sg_set_buf(&sg[out + in - 1], &status, sizeof(status)); - BUG_ON(vi->cvq->vq_ops->add_buf(vi->cvq, sg, out, in, vi)); + BUG_ON(vi->cvq->vq_ops->add_buf(vi->cvq, sg, out, in, vi) < 0); vi->cvq->vq_ops->kick(vi->cvq); diff --git a/drivers/virtio/virtio_balloon.c b/drivers/virtio/virtio_balloon.c index 26b27826479..39789232646 100644 --- a/drivers/virtio/virtio_balloon.c +++ b/drivers/virtio/virtio_balloon.c @@ -84,7 +84,7 @@ static void tell_host(struct virtio_balloon *vb, struct virtqueue *vq) init_completion(&vb->acked); /* We should always be able to add one buffer to an empty queue. */ - if (vq->vq_ops->add_buf(vq, &sg, 1, 0, vb) != 0) + if (vq->vq_ops->add_buf(vq, &sg, 1, 0, vb) < 0) BUG(); vq->vq_ops->kick(vq); diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c index a882f260651..f5360058072 100644 --- a/drivers/virtio/virtio_ring.c +++ b/drivers/virtio/virtio_ring.c @@ -208,7 +208,11 @@ add_head: pr_debug("Added buffer head %i to %p\n", head, vq); END_USE(vq); - return 0; + + /* If we're indirect, we can fit many (assuming not OOM). */ + if (vq->indirect) + return vq->num_free ? vq->vring.num : 0; + return vq->num_free; } static void vring_kick(struct virtqueue *_vq) diff --git a/include/linux/virtio.h b/include/linux/virtio.h index 4fca4f5440b..057a2e01075 100644 --- a/include/linux/virtio.h +++ b/include/linux/virtio.h @@ -34,7 +34,7 @@ struct virtqueue { * out_num: the number of sg readable by other side * in_num: the number of sg which are writable (after readable ones) * data: the token identifying the buffer. - * Returns 0 or an error. + * Returns remaining capacity of queue (sg segments) or a negative error. * @kick: update after add_buf * vq: the struct virtqueue * After one or more add_buf calls, invoke this to kick the other side. diff --git a/net/9p/trans_virtio.c b/net/9p/trans_virtio.c index 9bf0b737aa5..53c139d31a2 100644 --- a/net/9p/trans_virtio.c +++ b/net/9p/trans_virtio.c @@ -200,7 +200,7 @@ p9_virtio_request(struct p9_client *client, struct p9_req_t *req) req->status = REQ_STATUS_SENT; - if (chan->vq->vq_ops->add_buf(chan->vq, chan->sg, out, in, req->tc)) { + if (chan->vq->vq_ops->add_buf(chan->vq, chan->sg, out, in, req->tc) < 0) { P9_DPRINTK(P9_DEBUG_TRANS, "9p debug: virtio rpc add_buf returned failure"); return -EIO; -- cgit v1.2.3-70-g09d2 From 3ca4f5ca73057a617f9444a91022d7127041970a Mon Sep 17 00:00:00 2001 From: Fernando Luis Vazquez Cao Date: Fri, 31 Jul 2009 15:25:56 +0900 Subject: virtio: add virtio IDs file Virtio IDs are spread all over the tree which makes assigning new IDs bothersome. Putting them together should make the process less error-prone. Signed-off-by: Fernando Luis Vazquez Cao Signed-off-by: Rusty Russell --- Documentation/lguest/lguest.c | 1 + drivers/block/virtio_blk.c | 1 + drivers/char/hw_random/virtio-rng.c | 1 + drivers/char/virtio_console.c | 1 + drivers/net/virtio_net.c | 1 + drivers/virtio/virtio_balloon.c | 1 + include/linux/virtio_9p.h | 2 -- include/linux/virtio_balloon.h | 3 --- include/linux/virtio_blk.h | 3 --- include/linux/virtio_console.h | 3 --- include/linux/virtio_ids.h | 17 +++++++++++++++++ include/linux/virtio_net.h | 3 --- include/linux/virtio_rng.h | 3 --- net/9p/trans_virtio.c | 1 + 14 files changed, 24 insertions(+), 17 deletions(-) create mode 100644 include/linux/virtio_ids.h (limited to 'include') diff --git a/Documentation/lguest/lguest.c b/Documentation/lguest/lguest.c index 950cde6d6e5..84bbb190bf7 100644 --- a/Documentation/lguest/lguest.c +++ b/Documentation/lguest/lguest.c @@ -42,6 +42,7 @@ #include #include "linux/lguest_launcher.h" #include "linux/virtio_config.h" +#include #include "linux/virtio_net.h" #include "linux/virtio_blk.h" #include "linux/virtio_console.h" diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c index d739ee4201f..73de7532fcf 100644 --- a/drivers/block/virtio_blk.c +++ b/drivers/block/virtio_blk.c @@ -3,6 +3,7 @@ #include #include #include +#include #include #include diff --git a/drivers/char/hw_random/virtio-rng.c b/drivers/char/hw_random/virtio-rng.c index b6c24dcc987..962968f05b9 100644 --- a/drivers/char/hw_random/virtio-rng.c +++ b/drivers/char/hw_random/virtio-rng.c @@ -21,6 +21,7 @@ #include #include #include +#include #include /* The host will fill any buffer we give it with sweet, sweet randomness. We diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c index a035ae39a35..0d328b59568 100644 --- a/drivers/char/virtio_console.c +++ b/drivers/char/virtio_console.c @@ -31,6 +31,7 @@ #include #include #include +#include #include #include "hvc_console.h" diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index fbf04a553f5..5c498d2b043 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c @@ -22,6 +22,7 @@ #include #include #include +#include #include #include #include diff --git a/drivers/virtio/virtio_balloon.c b/drivers/virtio/virtio_balloon.c index 39789232646..200c22f5513 100644 --- a/drivers/virtio/virtio_balloon.c +++ b/drivers/virtio/virtio_balloon.c @@ -19,6 +19,7 @@ */ //#define DEBUG #include +#include #include #include #include diff --git a/include/linux/virtio_9p.h b/include/linux/virtio_9p.h index b3c4a60ceeb..ea7226a45ac 100644 --- a/include/linux/virtio_9p.h +++ b/include/linux/virtio_9p.h @@ -4,8 +4,6 @@ * compatible drivers/servers. */ #include -/* The ID for virtio console */ -#define VIRTIO_ID_9P 9 /* Maximum number of virtio channels per partition (1 for now) */ #define MAX_9P_CHAN 1 diff --git a/include/linux/virtio_balloon.h b/include/linux/virtio_balloon.h index 8726ff77763..09d73008506 100644 --- a/include/linux/virtio_balloon.h +++ b/include/linux/virtio_balloon.h @@ -4,9 +4,6 @@ * compatible drivers/servers. */ #include -/* The ID for virtio_balloon */ -#define VIRTIO_ID_BALLOON 5 - /* The feature bitmap for virtio balloon */ #define VIRTIO_BALLOON_F_MUST_TELL_HOST 0 /* Tell before reclaiming pages */ diff --git a/include/linux/virtio_blk.h b/include/linux/virtio_blk.h index 8dab9f2b883..25fbabfa90d 100644 --- a/include/linux/virtio_blk.h +++ b/include/linux/virtio_blk.h @@ -5,9 +5,6 @@ #include #include -/* The ID for virtio_block */ -#define VIRTIO_ID_BLOCK 2 - /* Feature bits */ #define VIRTIO_BLK_F_BARRIER 0 /* Does host support barriers? */ #define VIRTIO_BLK_F_SIZE_MAX 1 /* Indicates maximum segment size */ diff --git a/include/linux/virtio_console.h b/include/linux/virtio_console.h index dc161115ae3..b5f51980601 100644 --- a/include/linux/virtio_console.h +++ b/include/linux/virtio_console.h @@ -5,9 +5,6 @@ /* This header, excluding the #ifdef __KERNEL__ part, is BSD licensed so * anyone can use the definitions to implement compatible drivers/servers. */ -/* The ID for virtio console */ -#define VIRTIO_ID_CONSOLE 3 - /* Feature bits */ #define VIRTIO_CONSOLE_F_SIZE 0 /* Does host provide console size? */ diff --git a/include/linux/virtio_ids.h b/include/linux/virtio_ids.h new file mode 100644 index 00000000000..06660c0a78d --- /dev/null +++ b/include/linux/virtio_ids.h @@ -0,0 +1,17 @@ +#ifndef _LINUX_VIRTIO_IDS_H +#define _LINUX_VIRTIO_IDS_H +/* + * Virtio IDs + * + * This header is BSD licensed so anyone can use the definitions to implement + * compatible drivers/servers. + */ + +#define VIRTIO_ID_NET 1 /* virtio net */ +#define VIRTIO_ID_BLOCK 2 /* virtio block */ +#define VIRTIO_ID_CONSOLE 3 /* virtio console */ +#define VIRTIO_ID_RNG 4 /* virtio ring */ +#define VIRTIO_ID_BALLOON 5 /* virtio balloon */ +#define VIRTIO_ID_9P 9 /* 9p virtio console */ + +#endif /* _LINUX_VIRTIO_IDS_H */ diff --git a/include/linux/virtio_net.h b/include/linux/virtio_net.h index d8dd539c9f4..1f41734bbb7 100644 --- a/include/linux/virtio_net.h +++ b/include/linux/virtio_net.h @@ -6,9 +6,6 @@ #include #include -/* The ID for virtio_net */ -#define VIRTIO_ID_NET 1 - /* The feature bitmap for virtio net */ #define VIRTIO_NET_F_CSUM 0 /* Host handles pkts w/ partial csum */ #define VIRTIO_NET_F_GUEST_CSUM 1 /* Guest handles pkts w/ partial csum */ diff --git a/include/linux/virtio_rng.h b/include/linux/virtio_rng.h index 1a85dab8a94..48121c3c434 100644 --- a/include/linux/virtio_rng.h +++ b/include/linux/virtio_rng.h @@ -4,7 +4,4 @@ * compatible drivers/servers. */ #include -/* The ID for virtio_rng */ -#define VIRTIO_ID_RNG 4 - #endif /* _LINUX_VIRTIO_RNG_H */ diff --git a/net/9p/trans_virtio.c b/net/9p/trans_virtio.c index ea1e3daabef..b2e07f0dd29 100644 --- a/net/9p/trans_virtio.c +++ b/net/9p/trans_virtio.c @@ -43,6 +43,7 @@ #include #include #include +#include #include #define VIRTQUEUE_NUM 128 -- cgit v1.2.3-70-g09d2 From f1b0ef062602713c2c7cfa12362d5d90ed01c5f6 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Thu, 17 Sep 2009 19:57:42 +0200 Subject: virtio_blk: add support for cache flush Recent qemu has added a VIRTIO_BLK_F_FLUSH flag to advertise that the virtual disk has a volatile write cache that needs to be flushed. In case we see this feature implement tell the Linux block layer about the fact and use the new VIRTIO_BLK_T_FLUSH to flush the cache when required. This allows for an correct and simple implementation of write barriers. Signed-off-by: Christoph Hellwig Signed-off-by: Rusty Russell --- drivers/block/virtio_blk.c | 30 +++++++++++++++++++++++++----- include/linux/virtio_blk.h | 15 +++++++++++++++ 2 files changed, 40 insertions(+), 5 deletions(-) (limited to 'include') diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c index 73de7532fcf..3d5fe97569c 100644 --- a/drivers/block/virtio_blk.c +++ b/drivers/block/virtio_blk.c @@ -92,15 +92,26 @@ static bool do_req(struct request_queue *q, struct virtio_blk *vblk, return false; vbr->req = req; - if (blk_fs_request(vbr->req)) { + switch (req->cmd_type) { + case REQ_TYPE_FS: vbr->out_hdr.type = 0; vbr->out_hdr.sector = blk_rq_pos(vbr->req); vbr->out_hdr.ioprio = req_get_ioprio(vbr->req); - } else if (blk_pc_request(vbr->req)) { + break; + case REQ_TYPE_BLOCK_PC: vbr->out_hdr.type = VIRTIO_BLK_T_SCSI_CMD; vbr->out_hdr.sector = 0; vbr->out_hdr.ioprio = req_get_ioprio(vbr->req); - } else { + break; + case REQ_TYPE_LINUX_BLOCK: + if (req->cmd[0] == REQ_LB_OP_FLUSH) { + vbr->out_hdr.type = VIRTIO_BLK_T_FLUSH; + vbr->out_hdr.sector = 0; + vbr->out_hdr.ioprio = req_get_ioprio(vbr->req); + break; + } + /*FALLTHRU*/ + default: /* We don't put anything else in the queue. */ BUG(); } @@ -200,6 +211,12 @@ out: return err; } +static void virtblk_prepare_flush(struct request_queue *q, struct request *req) +{ + req->cmd_type = REQ_TYPE_LINUX_BLOCK; + req->cmd[0] = REQ_LB_OP_FLUSH; +} + static int virtblk_ioctl(struct block_device *bdev, fmode_t mode, unsigned cmd, unsigned long data) { @@ -338,7 +355,10 @@ static int __devinit virtblk_probe(struct virtio_device *vdev) index++; /* If barriers are supported, tell block layer that queue is ordered */ - if (virtio_has_feature(vdev, VIRTIO_BLK_F_BARRIER)) + if (virtio_has_feature(vdev, VIRTIO_BLK_F_FLUSH)) + blk_queue_ordered(vblk->disk->queue, QUEUE_ORDERED_DRAIN_FLUSH, + virtblk_prepare_flush); + else if (virtio_has_feature(vdev, VIRTIO_BLK_F_BARRIER)) blk_queue_ordered(vblk->disk->queue, QUEUE_ORDERED_TAG, NULL); /* If disk is read-only in the host, the guest should obey */ @@ -425,7 +445,7 @@ static struct virtio_device_id id_table[] = { static unsigned int features[] = { VIRTIO_BLK_F_BARRIER, VIRTIO_BLK_F_SEG_MAX, VIRTIO_BLK_F_SIZE_MAX, VIRTIO_BLK_F_GEOMETRY, VIRTIO_BLK_F_RO, VIRTIO_BLK_F_BLK_SIZE, - VIRTIO_BLK_F_SCSI, VIRTIO_BLK_F_IDENTIFY + VIRTIO_BLK_F_SCSI, VIRTIO_BLK_F_IDENTIFY, VIRTIO_BLK_F_FLUSH }; /* diff --git a/include/linux/virtio_blk.h b/include/linux/virtio_blk.h index 25fbabfa90d..15cb666581d 100644 --- a/include/linux/virtio_blk.h +++ b/include/linux/virtio_blk.h @@ -14,6 +14,7 @@ #define VIRTIO_BLK_F_BLK_SIZE 6 /* Block size of disk is available*/ #define VIRTIO_BLK_F_SCSI 7 /* Supports scsi command passthru */ #define VIRTIO_BLK_F_IDENTIFY 8 /* ATA IDENTIFY supported */ +#define VIRTIO_BLK_F_FLUSH 9 /* Cache flush command support */ #define VIRTIO_BLK_ID_BYTES (sizeof(__u16[256])) /* IDENTIFY DATA */ @@ -35,6 +36,17 @@ struct virtio_blk_config { __u8 identify[VIRTIO_BLK_ID_BYTES]; } __attribute__((packed)); +/* + * Command types + * + * Usage is a bit tricky as some bits are used as flags and some are not. + * + * Rules: + * VIRTIO_BLK_T_OUT may be combined with VIRTIO_BLK_T_SCSI_CMD or + * VIRTIO_BLK_T_BARRIER. VIRTIO_BLK_T_FLUSH is a command of its own + * and may not be combined with any of the other flags. + */ + /* These two define direction. */ #define VIRTIO_BLK_T_IN 0 #define VIRTIO_BLK_T_OUT 1 @@ -42,6 +54,9 @@ struct virtio_blk_config { /* This bit says it's a scsi command, not an actual read or write. */ #define VIRTIO_BLK_T_SCSI_CMD 2 +/* Cache flush command */ +#define VIRTIO_BLK_T_FLUSH 4 + /* Barrier before this op. */ #define VIRTIO_BLK_T_BARRIER 0x80000000 -- cgit v1.2.3-70-g09d2 From 4e9e92003529e5c7bb11281f7c2c9b3fe8858403 Mon Sep 17 00:00:00 2001 From: Pete Zaitcev Date: Thu, 11 Jun 2009 08:53:20 -0600 Subject: USB: usbmon: end ugly tricks with DMA peeking This patch fixes crashes when usbmon attempts to access GART aperture. The old code attempted to take a bus address and convert it into a virtual address, which clearly was impossible on systems with actual IOMMUs. Let us not persist in this foolishness, and use transfer_buffer in all cases instead. I think downsides are negligible. The ones I see are: - A driver may pass an address of one buffer down as transfer_buffer, and entirely different entity mapped for DMA, resulting in misleading output of usbmon. Note, however, that PIO based controllers would do transfer the same data that usbmon sees here. - Out of tree drivers may crash usbmon if they store garbage in transfer_buffer. I inspected the in-tree drivers, and clarified the documentation in comments. - Drivers that use get_user_pages will not be possible to monitor. I only found one driver with this problem (drivers/staging/rspiusb). - Same happens with with usb_storage transferring from highmem, but it works fine on 64-bit systems, so I think it's not a concern. At least we don't crash anymore. Why didn't we do this in 2.6.10? That's because back in those days it was popular not to fill in transfer_buffer, so almost all traffic would be invisible (e.g. all of HID was like that). But now, the tree is almost 100% PIO friendly, so we can do the right thing at last. Signed-off-by: Pete Zaitcev Signed-off-by: Greg Kroah-Hartman --- drivers/usb/mon/Makefile | 2 +- drivers/usb/mon/mon_bin.c | 12 +----- drivers/usb/mon/mon_dma.c | 95 ---------------------------------------------- drivers/usb/mon/mon_main.c | 1 - drivers/usb/mon/mon_text.c | 14 ------- drivers/usb/mon/usb_mon.h | 14 ------- include/linux/usb.h | 19 +++++++--- 7 files changed, 15 insertions(+), 142 deletions(-) delete mode 100644 drivers/usb/mon/mon_dma.c (limited to 'include') diff --git a/drivers/usb/mon/Makefile b/drivers/usb/mon/Makefile index c6516b56673..384b198faa7 100644 --- a/drivers/usb/mon/Makefile +++ b/drivers/usb/mon/Makefile @@ -2,6 +2,6 @@ # Makefile for USB monitor # -usbmon-objs := mon_main.o mon_stat.o mon_text.o mon_bin.o mon_dma.o +usbmon-objs := mon_main.o mon_stat.o mon_text.o mon_bin.o obj-$(CONFIG_USB_MON) += usbmon.o diff --git a/drivers/usb/mon/mon_bin.c b/drivers/usb/mon/mon_bin.c index 0f7a30b7d2d..dfdc43e2e00 100644 --- a/drivers/usb/mon/mon_bin.c +++ b/drivers/usb/mon/mon_bin.c @@ -220,9 +220,8 @@ static void mon_free_buff(struct mon_pgmap *map, int npages); /* * This is a "chunked memcpy". It does not manipulate any counters. - * But it returns the new offset for repeated application. */ -unsigned int mon_copy_to_buff(const struct mon_reader_bin *this, +static void mon_copy_to_buff(const struct mon_reader_bin *this, unsigned int off, const unsigned char *from, unsigned int length) { unsigned int step_len; @@ -247,7 +246,6 @@ unsigned int mon_copy_to_buff(const struct mon_reader_bin *this, from += step_len; length -= step_len; } - return off; } /* @@ -400,15 +398,8 @@ static char mon_bin_get_data(const struct mon_reader_bin *rp, unsigned int offset, struct urb *urb, unsigned int length) { - if (urb->dev->bus->uses_dma && - (urb->transfer_flags & URB_NO_TRANSFER_DMA_MAP)) { - mon_dmapeek_vec(rp, offset, urb->transfer_dma, length); - return 0; - } - if (urb->transfer_buffer == NULL) return 'Z'; - mon_copy_to_buff(rp, offset, urb->transfer_buffer, length); return 0; } @@ -635,7 +626,6 @@ static int mon_bin_open(struct inode *inode, struct file *file) spin_lock_init(&rp->b_lock); init_waitqueue_head(&rp->b_wait); mutex_init(&rp->fetch_lock); - rp->b_size = BUFF_DFL; size = sizeof(struct mon_pgmap) * (rp->b_size/CHUNK_SIZE); diff --git a/drivers/usb/mon/mon_dma.c b/drivers/usb/mon/mon_dma.c deleted file mode 100644 index 140cc80bd2b..00000000000 --- a/drivers/usb/mon/mon_dma.c +++ /dev/null @@ -1,95 +0,0 @@ -/* - * The USB Monitor, inspired by Dave Harding's USBMon. - * - * mon_dma.c: Library which snoops on DMA areas. - * - * Copyright (C) 2005 Pete Zaitcev (zaitcev@redhat.com) - */ -#include -#include -#include -#include - -#include /* Only needed for declarations in usb_mon.h */ -#include "usb_mon.h" - -/* - * PC-compatibles, are, fortunately, sufficiently cache-coherent for this. - */ -#if defined(__i386__) || defined(__x86_64__) /* CONFIG_ARCH_I386 doesn't exit */ -#define MON_HAS_UNMAP 1 - -#define phys_to_page(phys) pfn_to_page((phys) >> PAGE_SHIFT) - -char mon_dmapeek(unsigned char *dst, dma_addr_t dma_addr, int len) -{ - struct page *pg; - unsigned long flags; - unsigned char *map; - unsigned char *ptr; - - /* - * On i386, a DMA handle is the "physical" address of a page. - * In other words, the bus address is equal to physical address. - * There is no IOMMU. - */ - pg = phys_to_page(dma_addr); - - /* - * We are called from hardware IRQs in case of callbacks. - * But we can be called from softirq or process context in case - * of submissions. In such case, we need to protect KM_IRQ0. - */ - local_irq_save(flags); - map = kmap_atomic(pg, KM_IRQ0); - ptr = map + (dma_addr & (PAGE_SIZE-1)); - memcpy(dst, ptr, len); - kunmap_atomic(map, KM_IRQ0); - local_irq_restore(flags); - return 0; -} - -void mon_dmapeek_vec(const struct mon_reader_bin *rp, - unsigned int offset, dma_addr_t dma_addr, unsigned int length) -{ - unsigned long flags; - unsigned int step_len; - struct page *pg; - unsigned char *map; - unsigned long page_off, page_len; - - local_irq_save(flags); - while (length) { - /* compute number of bytes we are going to copy in this page */ - step_len = length; - page_off = dma_addr & (PAGE_SIZE-1); - page_len = PAGE_SIZE - page_off; - if (page_len < step_len) - step_len = page_len; - - /* copy data and advance pointers */ - pg = phys_to_page(dma_addr); - map = kmap_atomic(pg, KM_IRQ0); - offset = mon_copy_to_buff(rp, offset, map + page_off, step_len); - kunmap_atomic(map, KM_IRQ0); - dma_addr += step_len; - length -= step_len; - } - local_irq_restore(flags); -} - -#endif /* __i386__ */ - -#ifndef MON_HAS_UNMAP -char mon_dmapeek(unsigned char *dst, dma_addr_t dma_addr, int len) -{ - return 'D'; -} - -void mon_dmapeek_vec(const struct mon_reader_bin *rp, - unsigned int offset, dma_addr_t dma_addr, unsigned int length) -{ - ; -} - -#endif /* MON_HAS_UNMAP */ diff --git a/drivers/usb/mon/mon_main.c b/drivers/usb/mon/mon_main.c index 5e0ab4201c0..e0c2db3b767 100644 --- a/drivers/usb/mon/mon_main.c +++ b/drivers/usb/mon/mon_main.c @@ -361,7 +361,6 @@ static int __init mon_init(void) } // MOD_INC_USE_COUNT(which_module?); - mutex_lock(&usb_bus_list_lock); list_for_each_entry (ubus, &usb_bus_list, bus_list) { mon_bus_init(ubus); diff --git a/drivers/usb/mon/mon_text.c b/drivers/usb/mon/mon_text.c index a7eb4c99342..9f1a9227ebe 100644 --- a/drivers/usb/mon/mon_text.c +++ b/drivers/usb/mon/mon_text.c @@ -150,20 +150,6 @@ static inline char mon_text_get_data(struct mon_event_text *ep, struct urb *urb, return '>'; } - /* - * The check to see if it's safe to poke at data has an enormous - * number of corner cases, but it seems that the following is - * more or less safe. - * - * We do not even try to look at transfer_buffer, because it can - * contain non-NULL garbage in case the upper level promised to - * set DMA for the HCD. - */ - if (urb->dev->bus->uses_dma && - (urb->transfer_flags & URB_NO_TRANSFER_DMA_MAP)) { - return mon_dmapeek(ep->data, urb->transfer_dma, len); - } - if (urb->transfer_buffer == NULL) return 'Z'; /* '0' would be not as pretty. */ diff --git a/drivers/usb/mon/usb_mon.h b/drivers/usb/mon/usb_mon.h index f5d84ff8c10..df9a4df342c 100644 --- a/drivers/usb/mon/usb_mon.h +++ b/drivers/usb/mon/usb_mon.h @@ -64,20 +64,6 @@ void mon_text_exit(void); int __init mon_bin_init(void); void mon_bin_exit(void); -/* - * DMA interface. - * - * XXX The vectored side needs a serious re-thinking. Abstracting vectors, - * like in Paolo's original patch, produces a double pkmap. We need an idea. -*/ -extern char mon_dmapeek(unsigned char *dst, dma_addr_t dma_addr, int len); - -struct mon_reader_bin; -extern void mon_dmapeek_vec(const struct mon_reader_bin *rp, - unsigned int offset, dma_addr_t dma_addr, unsigned int len); -extern unsigned int mon_copy_to_buff(const struct mon_reader_bin *rp, - unsigned int offset, const unsigned char *from, unsigned int len); - /* */ extern struct mutex mon_lock; diff --git a/include/linux/usb.h b/include/linux/usb.h index 19fabc487be..3b45a0d27b8 100644 --- a/include/linux/usb.h +++ b/include/linux/usb.h @@ -1036,9 +1036,10 @@ typedef void (*usb_complete_t)(struct urb *); * @transfer_flags: A variety of flags may be used to affect how URB * submission, unlinking, or operation are handled. Different * kinds of URB can use different flags. - * @transfer_buffer: This identifies the buffer to (or from) which - * the I/O request will be performed (unless URB_NO_TRANSFER_DMA_MAP - * is set). This buffer must be suitable for DMA; allocate it with + * @transfer_buffer: This identifies the buffer to (or from) which the I/O + * request will be performed unless URB_NO_TRANSFER_DMA_MAP is set + * (however, do not leave garbage in transfer_buffer even then). + * This buffer must be suitable for DMA; allocate it with * kmalloc() or equivalent. For transfers to "in" endpoints, contents * of this buffer will be modified. This buffer is used for the data * stage of control transfers. @@ -1104,9 +1105,15 @@ typedef void (*usb_complete_t)(struct urb *); * allocate a DMA buffer with usb_buffer_alloc() or call usb_buffer_map(). * When these transfer flags are provided, host controller drivers will * attempt to use the dma addresses found in the transfer_dma and/or - * setup_dma fields rather than determining a dma address themselves. (Note - * that transfer_buffer and setup_packet must still be set because not all - * host controllers use DMA, nor do virtual root hubs). + * setup_dma fields rather than determining a dma address themselves. + * + * Note that transfer_buffer must still be set if the controller + * does not support DMA (as indicated by bus.uses_dma) and when talking + * to root hub. If you have to trasfer between highmem zone and the device + * on such controller, create a bounce buffer or bail out with an error. + * If transfer_buffer cannot be set (is in highmem) and the controller is DMA + * capable, assign NULL to it, so that usbmon knows not to use the value. + * The setup_packet must always be set, so it cannot be located in highmem. * * Initialization: * -- cgit v1.2.3-70-g09d2 From 85e08ca54c5c203cd2638f0fc8fa899a539f6254 Mon Sep 17 00:00:00 2001 From: Laurent Pinchart Date: Sun, 21 Jun 2009 23:19:23 +0200 Subject: USB: Move endpoint sync type definitions from usb/audio.h to usb/ch9.h And use the new definitions in the USB Audio Class gadget driver. Signed-off-by: Laurent Pinchart Signed-off-by: Greg Kroah-Hartman --- drivers/usb/gadget/f_audio.c | 2 +- include/linux/usb/audio.h | 10 ---------- include/linux/usb/ch9.h | 6 ++++++ 3 files changed, 7 insertions(+), 11 deletions(-) (limited to 'include') diff --git a/drivers/usb/gadget/f_audio.c b/drivers/usb/gadget/f_audio.c index 66527ba2d2e..76afbd1b515 100644 --- a/drivers/usb/gadget/f_audio.c +++ b/drivers/usb/gadget/f_audio.c @@ -174,7 +174,7 @@ static struct usb_endpoint_descriptor as_out_ep_desc __initdata = { .bLength = USB_DT_ENDPOINT_AUDIO_SIZE, .bDescriptorType = USB_DT_ENDPOINT, .bEndpointAddress = USB_DIR_OUT, - .bmAttributes = USB_AS_ENDPOINT_ADAPTIVE + .bmAttributes = USB_ENDPOINT_SYNC_ADAPTIVE | USB_ENDPOINT_XFER_ISOC, .wMaxPacketSize = __constant_cpu_to_le16(OUT_EP_MAX_PACKET_SIZE), .bInterval = 4, diff --git a/include/linux/usb/audio.h b/include/linux/usb/audio.h index b5744bc218a..bcf4fb4f5e8 100644 --- a/include/linux/usb/audio.h +++ b/include/linux/usb/audio.h @@ -46,12 +46,6 @@ #define MIDI_IN_JACK 0x02 #define MIDI_OUT_JACK 0x03 -/* endpoint attributes */ -#define EP_ATTR_MASK 0x0c -#define EP_ATTR_ASYNC 0x04 -#define EP_ATTR_ADAPTIVE 0x08 -#define EP_ATTR_SYNC 0x0c - /* cs endpoint attributes */ #define EP_CS_ATTR_SAMPLE_RATE 0x01 #define EP_CS_ATTR_PITCH_CONTROL 0x02 @@ -244,10 +238,6 @@ struct usb_as_formate_type_i_discrete_descriptor_##n { \ #define USB_AS_FORMAT_TYPE_II 0x2 #define USB_AS_FORMAT_TYPE_III 0x3 -#define USB_AS_ENDPOINT_ASYNC (1 << 2) -#define USB_AS_ENDPOINT_ADAPTIVE (2 << 2) -#define USB_AS_ENDPOINT_SYNC (3 << 2) - struct usb_as_iso_endpoint_descriptor { __u8 bLength; /* in bytes: 7 */ __u8 bDescriptorType; /* USB_DT_CS_ENDPOINT */ diff --git a/include/linux/usb/ch9.h b/include/linux/usb/ch9.h index 93223638f70..8f8b7411b87 100644 --- a/include/linux/usb/ch9.h +++ b/include/linux/usb/ch9.h @@ -348,6 +348,12 @@ struct usb_endpoint_descriptor { #define USB_ENDPOINT_NUMBER_MASK 0x0f /* in bEndpointAddress */ #define USB_ENDPOINT_DIR_MASK 0x80 +#define USB_ENDPOINT_SYNCTYPE 0x0c +#define USB_ENDPOINT_SYNC_NONE (0 << 2) +#define USB_ENDPOINT_SYNC_ASYNC (1 << 2) +#define USB_ENDPOINT_SYNC_ADAPTIVE (2 << 2) +#define USB_ENDPOINT_SYNC_SYNC (3 << 2) + #define USB_ENDPOINT_XFERTYPE_MASK 0x03 /* in bmAttributes */ #define USB_ENDPOINT_XFER_CONTROL 0 #define USB_ENDPOINT_XFER_ISOC 1 -- cgit v1.2.3-70-g09d2 From 315ad3028c8aae14891797040f855fc3291a076b Mon Sep 17 00:00:00 2001 From: Laurent Pinchart Date: Sun, 21 Jun 2009 23:20:39 +0200 Subject: USB: Move vendor subclass definition from usb/audio.h to usb/ch9.h USB_SUBCLASS_VENDOR_SPEC is common to several USB classes and as such belongs to usb/ch9.h. Signed-off-by: Laurent Pinchart Signed-off-by: Greg Kroah-Hartman --- include/linux/usb/audio.h | 1 - include/linux/usb/ch9.h | 2 ++ 2 files changed, 2 insertions(+), 1 deletion(-) (limited to 'include') diff --git a/include/linux/usb/audio.h b/include/linux/usb/audio.h index bcf4fb4f5e8..f75092aba02 100644 --- a/include/linux/usb/audio.h +++ b/include/linux/usb/audio.h @@ -24,7 +24,6 @@ #define USB_SUBCLASS_AUDIOCONTROL 0x01 #define USB_SUBCLASS_AUDIOSTREAMING 0x02 #define USB_SUBCLASS_MIDISTREAMING 0x03 -#define USB_SUBCLASS_VENDOR_SPEC 0xff /* A.5 Audio Class-Specific AC interface Descriptor Subtypes*/ #define HEADER 0x01 diff --git a/include/linux/usb/ch9.h b/include/linux/usb/ch9.h index 8f8b7411b87..94012e649d8 100644 --- a/include/linux/usb/ch9.h +++ b/include/linux/usb/ch9.h @@ -258,6 +258,8 @@ struct usb_device_descriptor { #define USB_CLASS_APP_SPEC 0xfe #define USB_CLASS_VENDOR_SPEC 0xff +#define USB_SUBCLASS_VENDOR_SPEC 0xff + /*-------------------------------------------------------------------------*/ /* USB_DT_CONFIG: Configuration descriptor information. -- cgit v1.2.3-70-g09d2 From 512ad27d8667158747de2e8da8a23e8f50e91856 Mon Sep 17 00:00:00 2001 From: Laurent Pinchart Date: Sun, 21 Jun 2009 23:23:05 +0200 Subject: USB audio gadget: Prefix all macro definitions with UAC_ in linux/usb/audio.h linux/usb/audio.h is a public header file that includes definitions exported to userspace. To avoid namespace clashes, prefix all macro definitions with UAC_. Existing macros and structures prefixed with USB_AC_ and USB_AS_ are renamed for consistency. Signed-off-by: Laurent Pinchart Signed-off-by: Greg Kroah-Hartman --- drivers/usb/gadget/audio.c | 24 ++-- drivers/usb/gadget/f_audio.c | 80 +++++++------- drivers/usb/gadget/gmidi.c | 8 +- include/linux/usb/audio.h | 258 ++++++++++++++++++++++--------------------- 4 files changed, 188 insertions(+), 182 deletions(-) (limited to 'include') diff --git a/drivers/usb/gadget/audio.c b/drivers/usb/gadget/audio.c index 9f80f4e970b..a3a0f4a27ef 100644 --- a/drivers/usb/gadget/audio.c +++ b/drivers/usb/gadget/audio.c @@ -106,20 +106,20 @@ static int audio_set_endpoint_req(struct usb_configuration *c, ctrl->bRequest, w_value, len, ep); switch (ctrl->bRequest) { - case SET_CUR: + case UAC_SET_CUR: value = 0; break; - case SET_MIN: + case UAC_SET_MIN: break; - case SET_MAX: + case UAC_SET_MAX: break; - case SET_RES: + case UAC_SET_RES: break; - case SET_MEM: + case UAC_SET_MEM: break; default: @@ -142,13 +142,13 @@ static int audio_get_endpoint_req(struct usb_configuration *c, ctrl->bRequest, w_value, len, ep); switch (ctrl->bRequest) { - case GET_CUR: - case GET_MIN: - case GET_MAX: - case GET_RES: + case UAC_GET_CUR: + case UAC_GET_MIN: + case UAC_GET_MAX: + case UAC_GET_RES: value = 3; break; - case GET_MEM: + case UAC_GET_MEM: break; default: break; @@ -171,11 +171,11 @@ audio_setup(struct usb_configuration *c, const struct usb_ctrlrequest *ctrl) * Audio class messages; interface activation uses set_alt(). */ switch (ctrl->bRequestType) { - case USB_AUDIO_SET_ENDPOINT: + case USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_ENDPOINT: value = audio_set_endpoint_req(c, ctrl); break; - case USB_AUDIO_GET_ENDPOINT: + case USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_ENDPOINT: value = audio_get_endpoint_req(c, ctrl); break; diff --git a/drivers/usb/gadget/f_audio.c b/drivers/usb/gadget/f_audio.c index 76afbd1b515..7b05b3c8c0b 100644 --- a/drivers/usb/gadget/f_audio.c +++ b/drivers/usb/gadget/f_audio.c @@ -50,16 +50,16 @@ static struct usb_interface_descriptor ac_interface_desc __initdata = { .bInterfaceSubClass = USB_SUBCLASS_AUDIOCONTROL, }; -DECLARE_USB_AC_HEADER_DESCRIPTOR(2); +DECLARE_UAC_AC_HEADER_DESCRIPTOR(2); -#define USB_DT_AC_HEADER_LENGH USB_DT_AC_HEADER_SIZE(F_AUDIO_NUM_INTERFACES) +#define UAC_DT_AC_HEADER_LENGTH UAC_DT_AC_HEADER_SIZE(F_AUDIO_NUM_INTERFACES) /* B.3.2 Class-Specific AC Interface Descriptor */ -static struct usb_ac_header_descriptor_2 ac_header_desc = { - .bLength = USB_DT_AC_HEADER_LENGH, +static struct uac_ac_header_descriptor_2 ac_header_desc = { + .bLength = UAC_DT_AC_HEADER_LENGTH, .bDescriptorType = USB_DT_CS_INTERFACE, - .bDescriptorSubtype = HEADER, + .bDescriptorSubtype = UAC_HEADER, .bcdADC = __constant_cpu_to_le16(0x0100), - .wTotalLength = __constant_cpu_to_le16(USB_DT_AC_HEADER_LENGH), + .wTotalLength = __constant_cpu_to_le16(UAC_DT_AC_HEADER_LENGTH), .bInCollection = F_AUDIO_NUM_INTERFACES, .baInterfaceNr = { [0] = F_AUDIO_AC_INTERFACE, @@ -68,33 +68,33 @@ static struct usb_ac_header_descriptor_2 ac_header_desc = { }; #define INPUT_TERMINAL_ID 1 -static struct usb_input_terminal_descriptor input_terminal_desc = { - .bLength = USB_DT_AC_INPUT_TERMINAL_SIZE, +static struct uac_input_terminal_descriptor input_terminal_desc = { + .bLength = UAC_DT_INPUT_TERMINAL_SIZE, .bDescriptorType = USB_DT_CS_INTERFACE, - .bDescriptorSubtype = INPUT_TERMINAL, + .bDescriptorSubtype = UAC_INPUT_TERMINAL, .bTerminalID = INPUT_TERMINAL_ID, - .wTerminalType = USB_AC_TERMINAL_STREAMING, + .wTerminalType = UAC_TERMINAL_STREAMING, .bAssocTerminal = 0, .wChannelConfig = 0x3, }; -DECLARE_USB_AC_FEATURE_UNIT_DESCRIPTOR(0); +DECLARE_UAC_FEATURE_UNIT_DESCRIPTOR(0); #define FEATURE_UNIT_ID 2 -static struct usb_ac_feature_unit_descriptor_0 feature_unit_desc = { - .bLength = USB_DT_AC_FEATURE_UNIT_SIZE(0), +static struct uac_feature_unit_descriptor_0 feature_unit_desc = { + .bLength = UAC_DT_FEATURE_UNIT_SIZE(0), .bDescriptorType = USB_DT_CS_INTERFACE, - .bDescriptorSubtype = FEATURE_UNIT, + .bDescriptorSubtype = UAC_FEATURE_UNIT, .bUnitID = FEATURE_UNIT_ID, .bSourceID = INPUT_TERMINAL_ID, .bControlSize = 2, - .bmaControls[0] = (FU_MUTE | FU_VOLUME), + .bmaControls[0] = (UAC_FU_MUTE | UAC_FU_VOLUME), }; static struct usb_audio_control mute_control = { .list = LIST_HEAD_INIT(mute_control.list), .name = "Mute Control", - .type = MUTE_CONTROL, + .type = UAC_MUTE_CONTROL, /* Todo: add real Mute control code */ .set = generic_set_cmd, .get = generic_get_cmd, @@ -103,7 +103,7 @@ static struct usb_audio_control mute_control = { static struct usb_audio_control volume_control = { .list = LIST_HEAD_INIT(volume_control.list), .name = "Volume Control", - .type = VOLUME_CONTROL, + .type = UAC_VOLUME_CONTROL, /* Todo: add real Volume control code */ .set = generic_set_cmd, .get = generic_get_cmd, @@ -113,17 +113,17 @@ static struct usb_audio_control_selector feature_unit = { .list = LIST_HEAD_INIT(feature_unit.list), .id = FEATURE_UNIT_ID, .name = "Mute & Volume Control", - .type = FEATURE_UNIT, + .type = UAC_FEATURE_UNIT, .desc = (struct usb_descriptor_header *)&feature_unit_desc, }; #define OUTPUT_TERMINAL_ID 3 -static struct usb_output_terminal_descriptor output_terminal_desc = { - .bLength = USB_DT_AC_OUTPUT_TERMINAL_SIZE, +static struct uac_output_terminal_descriptor output_terminal_desc = { + .bLength = UAC_DT_OUTPUT_TERMINAL_SIZE, .bDescriptorType = USB_DT_CS_INTERFACE, - .bDescriptorSubtype = OUTPUT_TERMINAL, + .bDescriptorSubtype = UAC_OUTPUT_TERMINAL, .bTerminalID = OUTPUT_TERMINAL_ID, - .wTerminalType = USB_AC_OUTPUT_TERMINAL_SPEAKER, + .wTerminalType = UAC_OUTPUT_TERMINAL_SPEAKER, .bAssocTerminal = FEATURE_UNIT_ID, .bSourceID = FEATURE_UNIT_ID, }; @@ -148,22 +148,22 @@ static struct usb_interface_descriptor as_interface_alt_1_desc = { }; /* B.4.2 Class-Specific AS Interface Descriptor */ -static struct usb_as_header_descriptor as_header_desc = { - .bLength = USB_DT_AS_HEADER_SIZE, +static struct uac_as_header_descriptor as_header_desc = { + .bLength = UAC_DT_AS_HEADER_SIZE, .bDescriptorType = USB_DT_CS_INTERFACE, - .bDescriptorSubtype = AS_GENERAL, + .bDescriptorSubtype = UAC_AS_GENERAL, .bTerminalLink = INPUT_TERMINAL_ID, .bDelay = 1, - .wFormatTag = USB_AS_AUDIO_FORMAT_TYPE_I_PCM, + .wFormatTag = UAC_FORMAT_TYPE_I_PCM, }; -DECLARE_USB_AS_FORMAT_TYPE_I_DISCRETE_DESC(1); +DECLARE_UAC_FORMAT_TYPE_I_DISCRETE_DESC(1); -static struct usb_as_formate_type_i_discrete_descriptor_1 as_type_i_desc = { - .bLength = USB_AS_FORMAT_TYPE_I_DISCRETE_DESC_SIZE(1), +static struct uac_format_type_i_discrete_descriptor_1 as_type_i_desc = { + .bLength = UAC_FORMAT_TYPE_I_DISCRETE_DESC_SIZE(1), .bDescriptorType = USB_DT_CS_INTERFACE, - .bDescriptorSubtype = FORMAT_TYPE, - .bFormatType = USB_AS_FORMAT_TYPE_I, + .bDescriptorSubtype = UAC_FORMAT_TYPE, + .bFormatType = UAC_FORMAT_TYPE_I, .bSubframeSize = 2, .bBitResolution = 16, .bSamFreqType = 1, @@ -181,10 +181,10 @@ static struct usb_endpoint_descriptor as_out_ep_desc __initdata = { }; /* Class-specific AS ISO OUT Endpoint Descriptor */ -static struct usb_as_iso_endpoint_descriptor as_iso_out_desc __initdata = { - .bLength = USB_AS_ISO_ENDPOINT_DESC_SIZE, +static struct uac_iso_endpoint_descriptor as_iso_out_desc __initdata = { + .bLength = UAC_ISO_ENDPOINT_DESC_SIZE, .bDescriptorType = USB_DT_CS_ENDPOINT, - .bDescriptorSubtype = EP_GENERAL, + .bDescriptorSubtype = UAC_EP_GENERAL, .bmAttributes = 1, .bLockDelayUnits = 1, .wLockDelay = __constant_cpu_to_le16(1), @@ -456,11 +456,11 @@ f_audio_setup(struct usb_function *f, const struct usb_ctrlrequest *ctrl) * Audio class messages; interface activation uses set_alt(). */ switch (ctrl->bRequestType) { - case USB_AUDIO_SET_INTF: + case USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE: value = audio_set_intf_req(f, ctrl); break; - case USB_AUDIO_GET_INTF: + case USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE: value = audio_get_intf_req(f, ctrl); break; @@ -642,10 +642,10 @@ int __init control_selector_init(struct f_audio *audio) list_add(&mute_control.list, &feature_unit.control); list_add(&volume_control.list, &feature_unit.control); - volume_control.data[_CUR] = 0xffc0; - volume_control.data[_MIN] = 0xe3a0; - volume_control.data[_MAX] = 0xfff0; - volume_control.data[_RES] = 0x0030; + volume_control.data[UAC__CUR] = 0xffc0; + volume_control.data[UAC__MIN] = 0xe3a0; + volume_control.data[UAC__MAX] = 0xfff0; + volume_control.data[UAC__RES] = 0x0030; return 0; } diff --git a/drivers/usb/gadget/gmidi.c b/drivers/usb/gadget/gmidi.c index b9312dc6e04..d0b1e836f0e 100644 --- a/drivers/usb/gadget/gmidi.c +++ b/drivers/usb/gadget/gmidi.c @@ -191,7 +191,7 @@ module_param(qlen, uint, S_IRUGO); #define GMIDI_MS_INTERFACE 1 #define GMIDI_NUM_INTERFACES 2 -DECLARE_USB_AC_HEADER_DESCRIPTOR(1); +DECLARE_UAC_AC_HEADER_DESCRIPTOR(1); DECLARE_USB_MIDI_OUT_JACK_DESCRIPTOR(1); DECLARE_USB_MS_ENDPOINT_DESCRIPTOR(1); @@ -237,12 +237,12 @@ static const struct usb_interface_descriptor ac_interface_desc = { }; /* B.3.2 Class-Specific AC Interface Descriptor */ -static const struct usb_ac_header_descriptor_1 ac_header_desc = { - .bLength = USB_DT_AC_HEADER_SIZE(1), +static const struct uac_ac_header_descriptor_1 ac_header_desc = { + .bLength = UAC_DT_AC_HEADER_SIZE(1), .bDescriptorType = USB_DT_CS_INTERFACE, .bDescriptorSubtype = USB_MS_HEADER, .bcdADC = cpu_to_le16(0x0100), - .wTotalLength = cpu_to_le16(USB_DT_AC_HEADER_SIZE(1)), + .wTotalLength = cpu_to_le16(UAC_DT_AC_HEADER_SIZE(1)), .bInCollection = 1, .baInterfaceNr = { [0] = GMIDI_MS_INTERFACE, diff --git a/include/linux/usb/audio.h b/include/linux/usb/audio.h index f75092aba02..c3edfcb72c3 100644 --- a/include/linux/usb/audio.h +++ b/include/linux/usb/audio.h @@ -25,80 +25,77 @@ #define USB_SUBCLASS_AUDIOSTREAMING 0x02 #define USB_SUBCLASS_MIDISTREAMING 0x03 -/* A.5 Audio Class-Specific AC interface Descriptor Subtypes*/ -#define HEADER 0x01 -#define INPUT_TERMINAL 0x02 -#define OUTPUT_TERMINAL 0x03 -#define MIXER_UNIT 0x04 -#define SELECTOR_UNIT 0x05 -#define FEATURE_UNIT 0x06 -#define PROCESSING_UNIT 0x07 -#define EXTENSION_UNIT 0x08 - -#define AS_GENERAL 0x01 -#define FORMAT_TYPE 0x02 -#define FORMAT_SPECIFIC 0x03 - -#define EP_GENERAL 0x01 - -#define MS_GENERAL 0x01 -#define MIDI_IN_JACK 0x02 -#define MIDI_OUT_JACK 0x03 - -/* cs endpoint attributes */ -#define EP_CS_ATTR_SAMPLE_RATE 0x01 -#define EP_CS_ATTR_PITCH_CONTROL 0x02 -#define EP_CS_ATTR_FILL_MAX 0x80 - -/* Audio Class specific Request Codes */ -#define USB_AUDIO_SET_INTF 0x21 -#define USB_AUDIO_SET_ENDPOINT 0x22 -#define USB_AUDIO_GET_INTF 0xa1 -#define USB_AUDIO_GET_ENDPOINT 0xa2 - -#define SET_ 0x00 -#define GET_ 0x80 - -#define _CUR 0x1 -#define _MIN 0x2 -#define _MAX 0x3 -#define _RES 0x4 -#define _MEM 0x5 - -#define SET_CUR (SET_ | _CUR) -#define GET_CUR (GET_ | _CUR) -#define SET_MIN (SET_ | _MIN) -#define GET_MIN (GET_ | _MIN) -#define SET_MAX (SET_ | _MAX) -#define GET_MAX (GET_ | _MAX) -#define SET_RES (SET_ | _RES) -#define GET_RES (GET_ | _RES) -#define SET_MEM (SET_ | _MEM) -#define GET_MEM (GET_ | _MEM) - -#define GET_STAT 0xff - -#define USB_AC_TERMINAL_UNDEFINED 0x100 -#define USB_AC_TERMINAL_STREAMING 0x101 -#define USB_AC_TERMINAL_VENDOR_SPEC 0x1FF +/* A.5 Audio Class-Specific AC Interface Descriptor Subtypes */ +#define UAC_HEADER 0x01 +#define UAC_INPUT_TERMINAL 0x02 +#define UAC_OUTPUT_TERMINAL 0x03 +#define UAC_MIXER_UNIT 0x04 +#define UAC_SELECTOR_UNIT 0x05 +#define UAC_FEATURE_UNIT 0x06 +#define UAC_PROCESSING_UNIT 0x07 +#define UAC_EXTENSION_UNIT 0x08 + +/* A.6 Audio Class-Specific AS Interface Descriptor Subtypes */ +#define UAC_AS_GENERAL 0x01 +#define UAC_FORMAT_TYPE 0x02 +#define UAC_FORMAT_SPECIFIC 0x03 + +/* A.8 Audio Class-Specific Endpoint Descriptor Subtypes */ +#define UAC_EP_GENERAL 0x01 + +/* A.9 Audio Class-Specific Request Codes */ +#define UAC_SET_ 0x00 +#define UAC_GET_ 0x80 + +#define UAC__CUR 0x1 +#define UAC__MIN 0x2 +#define UAC__MAX 0x3 +#define UAC__RES 0x4 +#define UAC__MEM 0x5 + +#define UAC_SET_CUR (UAC_SET_ | UAC__CUR) +#define UAC_GET_CUR (UAC_GET_ | UAC__CUR) +#define UAC_SET_MIN (UAC_SET_ | UAC__MIN) +#define UAC_GET_MIN (UAC_GET_ | UAC__MIN) +#define UAC_SET_MAX (UAC_SET_ | UAC__MAX) +#define UAC_GET_MAX (UAC_GET_ | UAC__MAX) +#define UAC_SET_RES (UAC_SET_ | UAC__RES) +#define UAC_GET_RES (UAC_GET_ | UAC__RES) +#define UAC_SET_MEM (UAC_SET_ | UAC__MEM) +#define UAC_GET_MEM (UAC_GET_ | UAC__MEM) + +#define UAC_GET_STAT 0xff + +/* MIDI - A.1 MS Class-Specific Interface Descriptor Subtypes */ +#define UAC_MS_HEADER 0x01 +#define UAC_MIDI_IN_JACK 0x02 +#define UAC_MIDI_OUT_JACK 0x03 + +/* MIDI - A.1 MS Class-Specific Endpoint Descriptor Subtypes */ +#define UAC_MS_GENERAL 0x01 + +/* Terminals - 2.1 USB Terminal Types */ +#define UAC_TERMINAL_UNDEFINED 0x100 +#define UAC_TERMINAL_STREAMING 0x101 +#define UAC_TERMINAL_VENDOR_SPEC 0x1FF /* Terminal Control Selectors */ /* 4.3.2 Class-Specific AC Interface Descriptor */ -struct usb_ac_header_descriptor { +struct uac_ac_header_descriptor { __u8 bLength; /* 8 + n */ __u8 bDescriptorType; /* USB_DT_CS_INTERFACE */ - __u8 bDescriptorSubtype; /* USB_MS_HEADER */ + __u8 bDescriptorSubtype; /* UAC_MS_HEADER */ __le16 bcdADC; /* 0x0100 */ __le16 wTotalLength; /* includes Unit and Terminal desc. */ __u8 bInCollection; /* n */ __u8 baInterfaceNr[]; /* [n] */ } __attribute__ ((packed)); -#define USB_DT_AC_HEADER_SIZE(n) (8 + (n)) +#define UAC_DT_AC_HEADER_SIZE(n) (8 + (n)) /* As above, but more useful for defining your own descriptors: */ -#define DECLARE_USB_AC_HEADER_DESCRIPTOR(n) \ -struct usb_ac_header_descriptor_##n { \ +#define DECLARE_UAC_AC_HEADER_DESCRIPTOR(n) \ +struct uac_ac_header_descriptor_##n { \ __u8 bLength; \ __u8 bDescriptorType; \ __u8 bDescriptorSubtype; \ @@ -109,7 +106,7 @@ struct usb_ac_header_descriptor_##n { \ } __attribute__ ((packed)) /* 4.3.2.1 Input Terminal Descriptor */ -struct usb_input_terminal_descriptor { +struct uac_input_terminal_descriptor { __u8 bLength; /* in bytes: 12 */ __u8 bDescriptorType; /* CS_INTERFACE descriptor type */ __u8 bDescriptorSubtype; /* INPUT_TERMINAL descriptor subtype */ @@ -122,18 +119,19 @@ struct usb_input_terminal_descriptor { __u8 iTerminal; } __attribute__ ((packed)); -#define USB_DT_AC_INPUT_TERMINAL_SIZE 12 +#define UAC_DT_INPUT_TERMINAL_SIZE 12 -#define USB_AC_INPUT_TERMINAL_UNDEFINED 0x200 -#define USB_AC_INPUT_TERMINAL_MICROPHONE 0x201 -#define USB_AC_INPUT_TERMINAL_DESKTOP_MICROPHONE 0x202 -#define USB_AC_INPUT_TERMINAL_PERSONAL_MICROPHONE 0x203 -#define USB_AC_INPUT_TERMINAL_OMNI_DIR_MICROPHONE 0x204 -#define USB_AC_INPUT_TERMINAL_MICROPHONE_ARRAY 0x205 -#define USB_AC_INPUT_TERMINAL_PROC_MICROPHONE_ARRAY 0x206 +/* Terminals - 2.2 Input Terminal Types */ +#define UAC_INPUT_TERMINAL_UNDEFINED 0x200 +#define UAC_INPUT_TERMINAL_MICROPHONE 0x201 +#define UAC_INPUT_TERMINAL_DESKTOP_MICROPHONE 0x202 +#define UAC_INPUT_TERMINAL_PERSONAL_MICROPHONE 0x203 +#define UAC_INPUT_TERMINAL_OMNI_DIR_MICROPHONE 0x204 +#define UAC_INPUT_TERMINAL_MICROPHONE_ARRAY 0x205 +#define UAC_INPUT_TERMINAL_PROC_MICROPHONE_ARRAY 0x206 /* 4.3.2.2 Output Terminal Descriptor */ -struct usb_output_terminal_descriptor { +struct uac_output_terminal_descriptor { __u8 bLength; /* in bytes: 9 */ __u8 bDescriptorType; /* CS_INTERFACE descriptor type */ __u8 bDescriptorSubtype; /* OUTPUT_TERMINAL descriptor subtype */ @@ -144,23 +142,24 @@ struct usb_output_terminal_descriptor { __u8 iTerminal; } __attribute__ ((packed)); -#define USB_DT_AC_OUTPUT_TERMINAL_SIZE 9 +#define UAC_DT_OUTPUT_TERMINAL_SIZE 9 -#define USB_AC_OUTPUT_TERMINAL_UNDEFINED 0x300 -#define USB_AC_OUTPUT_TERMINAL_SPEAKER 0x301 -#define USB_AC_OUTPUT_TERMINAL_HEADPHONES 0x302 -#define USB_AC_OUTPUT_TERMINAL_HEAD_MOUNTED_DISPLAY_AUDIO 0x303 -#define USB_AC_OUTPUT_TERMINAL_DESKTOP_SPEAKER 0x304 -#define USB_AC_OUTPUT_TERMINAL_ROOM_SPEAKER 0x305 -#define USB_AC_OUTPUT_TERMINAL_COMMUNICATION_SPEAKER 0x306 -#define USB_AC_OUTPUT_TERMINAL_LOW_FREQ_EFFECTS_SPEAKER 0x307 +/* Terminals - 2.3 Output Terminal Types */ +#define UAC_OUTPUT_TERMINAL_UNDEFINED 0x300 +#define UAC_OUTPUT_TERMINAL_SPEAKER 0x301 +#define UAC_OUTPUT_TERMINAL_HEADPHONES 0x302 +#define UAC_OUTPUT_TERMINAL_HEAD_MOUNTED_DISPLAY_AUDIO 0x303 +#define UAC_OUTPUT_TERMINAL_DESKTOP_SPEAKER 0x304 +#define UAC_OUTPUT_TERMINAL_ROOM_SPEAKER 0x305 +#define UAC_OUTPUT_TERMINAL_COMMUNICATION_SPEAKER 0x306 +#define UAC_OUTPUT_TERMINAL_LOW_FREQ_EFFECTS_SPEAKER 0x307 /* Set bControlSize = 2 as default setting */ -#define USB_DT_AC_FEATURE_UNIT_SIZE(ch) (7 + ((ch) + 1) * 2) +#define UAC_DT_FEATURE_UNIT_SIZE(ch) (7 + ((ch) + 1) * 2) /* As above, but more useful for defining your own descriptors: */ -#define DECLARE_USB_AC_FEATURE_UNIT_DESCRIPTOR(ch) \ -struct usb_ac_feature_unit_descriptor_##ch { \ +#define DECLARE_UAC_FEATURE_UNIT_DESCRIPTOR(ch) \ +struct uac_feature_unit_descriptor_##ch { \ __u8 bLength; \ __u8 bDescriptorType; \ __u8 bDescriptorSubtype; \ @@ -172,7 +171,7 @@ struct usb_ac_feature_unit_descriptor_##ch { \ } __attribute__ ((packed)) /* 4.5.2 Class-Specific AS Interface Descriptor */ -struct usb_as_header_descriptor { +struct uac_as_header_descriptor { __u8 bLength; /* in bytes: 7 */ __u8 bDescriptorType; /* USB_DT_CS_INTERFACE */ __u8 bDescriptorSubtype; /* AS_GENERAL */ @@ -181,16 +180,17 @@ struct usb_as_header_descriptor { __le16 wFormatTag; /* The Audio Data Format */ } __attribute__ ((packed)); -#define USB_DT_AS_HEADER_SIZE 7 +#define UAC_DT_AS_HEADER_SIZE 7 -#define USB_AS_AUDIO_FORMAT_TYPE_I_UNDEFINED 0x0 -#define USB_AS_AUDIO_FORMAT_TYPE_I_PCM 0x1 -#define USB_AS_AUDIO_FORMAT_TYPE_I_PCM8 0x2 -#define USB_AS_AUDIO_FORMAT_TYPE_I_IEEE_FLOAT 0x3 -#define USB_AS_AUDIO_FORMAT_TYPE_I_ALAW 0x4 -#define USB_AS_AUDIO_FORMAT_TYPE_I_MULAW 0x5 +/* Formats - A.1.1 Audio Data Format Type I Codes */ +#define UAC_FORMAT_TYPE_I_UNDEFINED 0x0 +#define UAC_FORMAT_TYPE_I_PCM 0x1 +#define UAC_FORMAT_TYPE_I_PCM8 0x2 +#define UAC_FORMAT_TYPE_I_IEEE_FLOAT 0x3 +#define UAC_FORMAT_TYPE_I_ALAW 0x4 +#define UAC_FORMAT_TYPE_I_MULAW 0x5 -struct usb_as_format_type_i_continuous_descriptor { +struct uac_format_type_i_continuous_descriptor { __u8 bLength; /* in bytes: 8 + (ns * 3) */ __u8 bDescriptorType; /* USB_DT_CS_INTERFACE */ __u8 bDescriptorSubtype; /* FORMAT_TYPE */ @@ -203,9 +203,9 @@ struct usb_as_format_type_i_continuous_descriptor { __u8 tUpperSamFreq[3]; } __attribute__ ((packed)); -#define USB_AS_FORMAT_TYPE_I_CONTINUOUS_DESC_SIZE 14 +#define UAC_FORMAT_TYPE_I_CONTINUOUS_DESC_SIZE 14 -struct usb_as_formate_type_i_discrete_descriptor { +struct uac_format_type_i_discrete_descriptor { __u8 bLength; /* in bytes: 8 + (ns * 3) */ __u8 bDescriptorType; /* USB_DT_CS_INTERFACE */ __u8 bDescriptorSubtype; /* FORMAT_TYPE */ @@ -217,8 +217,8 @@ struct usb_as_formate_type_i_discrete_descriptor { __u8 tSamFreq[][3]; } __attribute__ ((packed)); -#define DECLARE_USB_AS_FORMAT_TYPE_I_DISCRETE_DESC(n) \ -struct usb_as_formate_type_i_discrete_descriptor_##n { \ +#define DECLARE_UAC_FORMAT_TYPE_I_DISCRETE_DESC(n) \ +struct uac_format_type_i_discrete_descriptor_##n { \ __u8 bLength; \ __u8 bDescriptorType; \ __u8 bDescriptorSubtype; \ @@ -230,14 +230,15 @@ struct usb_as_formate_type_i_discrete_descriptor_##n { \ __u8 tSamFreq[n][3]; \ } __attribute__ ((packed)) -#define USB_AS_FORMAT_TYPE_I_DISCRETE_DESC_SIZE(n) (8 + (n * 3)) +#define UAC_FORMAT_TYPE_I_DISCRETE_DESC_SIZE(n) (8 + (n * 3)) -#define USB_AS_FORMAT_TYPE_UNDEFINED 0x0 -#define USB_AS_FORMAT_TYPE_I 0x1 -#define USB_AS_FORMAT_TYPE_II 0x2 -#define USB_AS_FORMAT_TYPE_III 0x3 +/* Formats - A.2 Format Type Codes */ +#define UAC_FORMAT_TYPE_UNDEFINED 0x0 +#define UAC_FORMAT_TYPE_I 0x1 +#define UAC_FORMAT_TYPE_II 0x2 +#define UAC_FORMAT_TYPE_III 0x3 -struct usb_as_iso_endpoint_descriptor { +struct uac_iso_endpoint_descriptor { __u8 bLength; /* in bytes: 7 */ __u8 bDescriptorType; /* USB_DT_CS_ENDPOINT */ __u8 bDescriptorSubtype; /* EP_GENERAL */ @@ -245,30 +246,35 @@ struct usb_as_iso_endpoint_descriptor { __u8 bLockDelayUnits; __le16 wLockDelay; }; -#define USB_AS_ISO_ENDPOINT_DESC_SIZE 7 - -#define FU_CONTROL_UNDEFINED 0x00 -#define MUTE_CONTROL 0x01 -#define VOLUME_CONTROL 0x02 -#define BASS_CONTROL 0x03 -#define MID_CONTROL 0x04 -#define TREBLE_CONTROL 0x05 -#define GRAPHIC_EQUALIZER_CONTROL 0x06 -#define AUTOMATIC_GAIN_CONTROL 0x07 -#define DELAY_CONTROL 0x08 -#define BASS_BOOST_CONTROL 0x09 -#define LOUDNESS_CONTROL 0x0a - -#define FU_MUTE (1 << (MUTE_CONTROL - 1)) -#define FU_VOLUME (1 << (VOLUME_CONTROL - 1)) -#define FU_BASS (1 << (BASS_CONTROL - 1)) -#define FU_MID (1 << (MID_CONTROL - 1)) -#define FU_TREBLE (1 << (TREBLE_CONTROL - 1)) -#define FU_GRAPHIC_EQ (1 << (GRAPHIC_EQUALIZER_CONTROL - 1)) -#define FU_AUTO_GAIN (1 << (AUTOMATIC_GAIN_CONTROL - 1)) -#define FU_DELAY (1 << (DELAY_CONTROL - 1)) -#define FU_BASS_BOOST (1 << (BASS_BOOST_CONTROL - 1)) -#define FU_LOUDNESS (1 << (LOUDNESS_CONTROL - 1)) +#define UAC_ISO_ENDPOINT_DESC_SIZE 7 + +#define UAC_EP_CS_ATTR_SAMPLE_RATE 0x01 +#define UAC_EP_CS_ATTR_PITCH_CONTROL 0x02 +#define UAC_EP_CS_ATTR_FILL_MAX 0x80 + +/* A.10.2 Feature Unit Control Selectors */ +#define UAC_FU_CONTROL_UNDEFINED 0x00 +#define UAC_MUTE_CONTROL 0x01 +#define UAC_VOLUME_CONTROL 0x02 +#define UAC_BASS_CONTROL 0x03 +#define UAC_MID_CONTROL 0x04 +#define UAC_TREBLE_CONTROL 0x05 +#define UAC_GRAPHIC_EQUALIZER_CONTROL 0x06 +#define UAC_AUTOMATIC_GAIN_CONTROL 0x07 +#define UAC_DELAY_CONTROL 0x08 +#define UAC_BASS_BOOST_CONTROL 0x09 +#define UAC_LOUDNESS_CONTROL 0x0a + +#define UAC_FU_MUTE (1 << (UAC_MUTE_CONTROL - 1)) +#define UAC_FU_VOLUME (1 << (UAC_VOLUME_CONTROL - 1)) +#define UAC_FU_BASS (1 << (UAC_BASS_CONTROL - 1)) +#define UAC_FU_MID (1 << (UAC_MID_CONTROL - 1)) +#define UAC_FU_TREBLE (1 << (UAC_TREBLE_CONTROL - 1)) +#define UAC_FU_GRAPHIC_EQ (1 << (UAC_GRAPHIC_EQUALIZER_CONTROL - 1)) +#define UAC_FU_AUTO_GAIN (1 << (UAC_AUTOMATIC_GAIN_CONTROL - 1)) +#define UAC_FU_DELAY (1 << (UAC_DELAY_CONTROL - 1)) +#define UAC_FU_BASS_BOOST (1 << (UAC_BASS_BOOST_CONTROL - 1)) +#define UAC_FU_LOUDNESS (1 << (UAC_LOUDNESS_CONTROL - 1)) struct usb_audio_control { struct list_head list; -- cgit v1.2.3-70-g09d2 From b95cd7ec3e93bae199e820bd65b21b23e4538acc Mon Sep 17 00:00:00 2001 From: Laurent Pinchart Date: Sun, 21 Jun 2009 23:21:55 +0200 Subject: USB audio gadget: Un-inline generic_[gs]et_cmd Those functions are used only used to fill the set/get members of usb_audio_control. It doesn't make much sense to inline them. Signed-off-by: Laurent Pinchart Signed-off-by: Greg Kroah-Hartman --- drivers/usb/gadget/f_audio.c | 15 +++++++++++++++ include/linux/usb/audio.h | 12 ------------ 2 files changed, 15 insertions(+), 12 deletions(-) (limited to 'include') diff --git a/drivers/usb/gadget/f_audio.c b/drivers/usb/gadget/f_audio.c index 7b05b3c8c0b..98e9bb97729 100644 --- a/drivers/usb/gadget/f_audio.c +++ b/drivers/usb/gadget/f_audio.c @@ -28,6 +28,9 @@ static int audio_buf_size = 48000; module_param(audio_buf_size, int, S_IRUGO); MODULE_PARM_DESC(audio_buf_size, "Audio buffer size"); +static int generic_set_cmd(struct usb_audio_control *con, u8 cmd, int value); +static int generic_get_cmd(struct usb_audio_control *con, u8 cmd); + /* * DESCRIPTORS ... most are static, but strings and full * configuration descriptors are built on demand. @@ -632,6 +635,18 @@ f_audio_unbind(struct usb_configuration *c, struct usb_function *f) /*-------------------------------------------------------------------------*/ +static int generic_set_cmd(struct usb_audio_control *con, u8 cmd, int value) +{ + con->data[cmd] = value; + + return 0; +} + +static int generic_get_cmd(struct usb_audio_control *con, u8 cmd) +{ + return con->data[cmd]; +} + /* Todo: add more control selecotor dynamically */ int __init control_selector_init(struct f_audio *audio) { diff --git a/include/linux/usb/audio.h b/include/linux/usb/audio.h index c3edfcb72c3..7b33c493917 100644 --- a/include/linux/usb/audio.h +++ b/include/linux/usb/audio.h @@ -285,18 +285,6 @@ struct usb_audio_control { int (*get)(struct usb_audio_control *con, u8 cmd); }; -static inline int generic_set_cmd(struct usb_audio_control *con, u8 cmd, int value) -{ - con->data[cmd] = value; - - return 0; -} - -static inline int generic_get_cmd(struct usb_audio_control *con, u8 cmd) -{ - return con->data[cmd]; -} - struct usb_audio_control_selector { struct list_head list; struct list_head control; -- cgit v1.2.3-70-g09d2 From 7cbe5dca399a50ce8aa74314b1d276e2fb904e1b Mon Sep 17 00:00:00 2001 From: Alan Stern Date: Mon, 29 Jun 2009 10:56:54 -0400 Subject: USB: add API for userspace drivers to "claim" ports This patch (as1258) implements a feature that users have been asking for: It gives programs the ability to "claim" a port on a hub, via a new usbfs ioctl. A device plugged into a "claimed" port will not be touched by the kernel beyond the immediate necessities of initialization and enumeration. In particular, when a device is plugged into a "claimed" port, the kernel will not select and install a configuration. And when a config is installed by usbfs or sysfs, the kernel will not probe any drivers for any of the interfaces. (However the kernel will fetch various string descriptors during enumeration. One could argue that this isn't really necessary, but the strings are exported in sysfs.) The patch does not guarantee exclusive access to these devices; it is still possible for more than one program to open the device file concurrently. Programs are responsible for coordinating access among themselves. A demonstration program showing how to use the new interface can be found in an attachment to http://marc.info/?l=linux-usb&m=124345857431452&w=2 The patch also makes a small simplification to the hub driver, replacing a bunch of more-or-less useless variants of "out of memory" with a single message. Signed-off-by: Alan Stern Signed-off-by: Greg Kroah-Hartman --- drivers/usb/core/devio.c | 35 ++++++++++++++++++ drivers/usb/core/driver.c | 3 ++ drivers/usb/core/generic.c | 4 +- drivers/usb/core/hub.c | 88 +++++++++++++++++++++++++++++++++++++++++--- drivers/usb/core/usb.h | 7 ++++ include/linux/usbdevice_fs.h | 2 + 6 files changed, 133 insertions(+), 6 deletions(-) (limited to 'include') diff --git a/drivers/usb/core/devio.c b/drivers/usb/core/devio.c index 4247eccf858..165de5d5900 100644 --- a/drivers/usb/core/devio.c +++ b/drivers/usb/core/devio.c @@ -52,6 +52,7 @@ #include "hcd.h" /* for usbcore internals */ #include "usb.h" +#include "hub.h" #define USB_MAXBUS 64 #define USB_DEVICE_MAX USB_MAXBUS * 128 @@ -655,6 +656,7 @@ static int usbdev_release(struct inode *inode, struct file *file) struct async *as; usb_lock_device(dev); + usb_hub_release_all_ports(dev, ps); /* Protect against simultaneous open */ mutex_lock(&usbfs_mutex); @@ -1548,6 +1550,29 @@ static int proc_ioctl_compat(struct dev_state *ps, compat_uptr_t arg) } #endif +static int proc_claim_port(struct dev_state *ps, void __user *arg) +{ + unsigned portnum; + int rc; + + if (get_user(portnum, (unsigned __user *) arg)) + return -EFAULT; + rc = usb_hub_claim_port(ps->dev, portnum, ps); + if (rc == 0) + snoop(&ps->dev->dev, "port %d claimed by process %d: %s\n", + portnum, task_pid_nr(current), current->comm); + return rc; +} + +static int proc_release_port(struct dev_state *ps, void __user *arg) +{ + unsigned portnum; + + if (get_user(portnum, (unsigned __user *) arg)) + return -EFAULT; + return usb_hub_release_port(ps->dev, portnum, ps); +} + /* * NOTE: All requests here that have interface numbers as parameters * are assuming that somehow the configuration has been prevented from @@ -1689,6 +1714,16 @@ static int usbdev_ioctl(struct inode *inode, struct file *file, snoop(&dev->dev, "%s: IOCTL\n", __func__); ret = proc_ioctl_default(ps, p); break; + + case USBDEVFS_CLAIM_PORT: + snoop(&dev->dev, "%s: CLAIM_PORT\n", __func__); + ret = proc_claim_port(ps, p); + break; + + case USBDEVFS_RELEASE_PORT: + snoop(&dev->dev, "%s: RELEASE_PORT\n", __func__); + ret = proc_release_port(ps, p); + break; } usb_unlock_device(dev); if (ret >= 0) diff --git a/drivers/usb/core/driver.c b/drivers/usb/core/driver.c index 69e5773abfc..1bad4e5a6ab 100644 --- a/drivers/usb/core/driver.c +++ b/drivers/usb/core/driver.c @@ -207,6 +207,9 @@ static int usb_probe_interface(struct device *dev) intf->needs_binding = 0; + if (usb_device_is_owned(udev)) + return -ENODEV; + if (udev->authorized == 0) { dev_err(&intf->dev, "Device is not authorized for usage\n"); return -ENODEV; diff --git a/drivers/usb/core/generic.c b/drivers/usb/core/generic.c index 30ecac3af15..05e6d313961 100644 --- a/drivers/usb/core/generic.c +++ b/drivers/usb/core/generic.c @@ -158,7 +158,9 @@ static int generic_probe(struct usb_device *udev) /* Choose and set the configuration. This registers the interfaces * with the driver core and lets interface drivers bind to them. */ - if (udev->authorized == 0) + if (usb_device_is_owned(udev)) + ; /* Don't configure if the device is owned */ + else if (udev->authorized == 0) dev_err(&udev->dev, "Device is not authorized for usage\n"); else { c = usb_choose_configuration(udev); diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c index 71f86c60d83..cb54420ed58 100644 --- a/drivers/usb/core/hub.c +++ b/drivers/usb/core/hub.c @@ -78,6 +78,7 @@ struct usb_hub { u8 indicator[USB_MAXCHILDREN]; struct delayed_work leds; struct delayed_work init_work; + void **port_owners; }; @@ -860,19 +861,17 @@ static int hub_configure(struct usb_hub *hub, u16 wHubCharacteristics; unsigned int pipe; int maxp, ret; - char *message; + char *message = "out of memory"; hub->buffer = usb_buffer_alloc(hdev, sizeof(*hub->buffer), GFP_KERNEL, &hub->buffer_dma); if (!hub->buffer) { - message = "can't allocate hub irq buffer"; ret = -ENOMEM; goto fail; } hub->status = kmalloc(sizeof(*hub->status), GFP_KERNEL); if (!hub->status) { - message = "can't kmalloc hub status buffer"; ret = -ENOMEM; goto fail; } @@ -880,7 +879,6 @@ static int hub_configure(struct usb_hub *hub, hub->descriptor = kmalloc(sizeof(*hub->descriptor), GFP_KERNEL); if (!hub->descriptor) { - message = "can't kmalloc hub descriptor"; ret = -ENOMEM; goto fail; } @@ -904,6 +902,12 @@ static int hub_configure(struct usb_hub *hub, dev_info (hub_dev, "%d port%s detected\n", hdev->maxchild, (hdev->maxchild == 1) ? "" : "s"); + hub->port_owners = kzalloc(hdev->maxchild * sizeof(void *), GFP_KERNEL); + if (!hub->port_owners) { + ret = -ENOMEM; + goto fail; + } + wHubCharacteristics = le16_to_cpu(hub->descriptor->wHubCharacteristics); if (wHubCharacteristics & HUB_CHAR_COMPOUND) { @@ -1082,7 +1086,6 @@ static int hub_configure(struct usb_hub *hub, hub->urb = usb_alloc_urb(0, GFP_KERNEL); if (!hub->urb) { - message = "couldn't allocate interrupt urb"; ret = -ENOMEM; goto fail; } @@ -1131,11 +1134,13 @@ static void hub_disconnect(struct usb_interface *intf) hub_quiesce(hub, HUB_DISCONNECT); usb_set_intfdata (intf, NULL); + hub->hdev->maxchild = 0; if (hub->hdev->speed == USB_SPEED_HIGH) highspeed_hubs--; usb_free_urb(hub->urb); + kfree(hub->port_owners); kfree(hub->descriptor); kfree(hub->status); usb_buffer_free(hub->hdev, sizeof(*hub->buffer), hub->buffer, @@ -1250,6 +1255,79 @@ hub_ioctl(struct usb_interface *intf, unsigned int code, void *user_data) } } +/* + * Allow user programs to claim ports on a hub. When a device is attached + * to one of these "claimed" ports, the program will "own" the device. + */ +static int find_port_owner(struct usb_device *hdev, unsigned port1, + void ***ppowner) +{ + if (hdev->state == USB_STATE_NOTATTACHED) + return -ENODEV; + if (port1 == 0 || port1 > hdev->maxchild) + return -EINVAL; + + /* This assumes that devices not managed by the hub driver + * will always have maxchild equal to 0. + */ + *ppowner = &(hdev_to_hub(hdev)->port_owners[port1 - 1]); + return 0; +} + +/* In the following three functions, the caller must hold hdev's lock */ +int usb_hub_claim_port(struct usb_device *hdev, unsigned port1, void *owner) +{ + int rc; + void **powner; + + rc = find_port_owner(hdev, port1, &powner); + if (rc) + return rc; + if (*powner) + return -EBUSY; + *powner = owner; + return rc; +} + +int usb_hub_release_port(struct usb_device *hdev, unsigned port1, void *owner) +{ + int rc; + void **powner; + + rc = find_port_owner(hdev, port1, &powner); + if (rc) + return rc; + if (*powner != owner) + return -ENOENT; + *powner = NULL; + return rc; +} + +void usb_hub_release_all_ports(struct usb_device *hdev, void *owner) +{ + int n; + void **powner; + + n = find_port_owner(hdev, 1, &powner); + if (n == 0) { + for (; n < hdev->maxchild; (++n, ++powner)) { + if (*powner == owner) + *powner = NULL; + } + } +} + +/* The caller must hold udev's lock */ +bool usb_device_is_owned(struct usb_device *udev) +{ + struct usb_hub *hub; + + if (udev->state == USB_STATE_NOTATTACHED || !udev->parent) + return false; + hub = hdev_to_hub(udev->parent); + return !!hub->port_owners[udev->portnum - 1]; +} + static void recursively_mark_NOTATTACHED(struct usb_device *udev) { diff --git a/drivers/usb/core/usb.h b/drivers/usb/core/usb.h index c0e0ae2bb8e..9a8b15e6377 100644 --- a/drivers/usb/core/usb.h +++ b/drivers/usb/core/usb.h @@ -37,6 +37,13 @@ extern int usb_match_device(struct usb_device *dev, extern void usb_forced_unbind_intf(struct usb_interface *intf); extern void usb_rebind_intf(struct usb_interface *intf); +extern int usb_hub_claim_port(struct usb_device *hdev, unsigned port, + void *owner); +extern int usb_hub_release_port(struct usb_device *hdev, unsigned port, + void *owner); +extern void usb_hub_release_all_ports(struct usb_device *hdev, void *owner); +extern bool usb_device_is_owned(struct usb_device *udev); + extern int usb_hub_init(void); extern void usb_hub_cleanup(void); extern int usb_major_init(void); diff --git a/include/linux/usbdevice_fs.h b/include/linux/usbdevice_fs.h index 0044d9b4cb8..00ceebeb9e5 100644 --- a/include/linux/usbdevice_fs.h +++ b/include/linux/usbdevice_fs.h @@ -175,4 +175,6 @@ struct usbdevfs_ioctl32 { #define USBDEVFS_CLEAR_HALT _IOR('U', 21, unsigned int) #define USBDEVFS_DISCONNECT _IO('U', 22) #define USBDEVFS_CONNECT _IO('U', 23) +#define USBDEVFS_CLAIM_PORT _IOR('U', 24, unsigned int) +#define USBDEVFS_RELEASE_PORT _IOR('U', 25, unsigned int) #endif /* _LINUX_USBDEVICE_FS_H */ -- cgit v1.2.3-70-g09d2 From ccf5b801cef4f9e2d708d3b87e91e2bc6abd5206 Mon Sep 17 00:00:00 2001 From: Alan Stern Date: Mon, 29 Jun 2009 11:00:01 -0400 Subject: USB: make intf.pm_usage an atomic_t This patch (as1260) changes the pm_usage_cnt field in struct usb_interface from an int to an atomic_t. This is so that drivers can invoke the usb_autopm_get_interface_async() and usb_autopm_put_interface_async() routines without locking and without fear of corrupting the pm_usage_cnt value. Signed-off-by: Alan Stern Signed-off-by: Greg Kroah-Hartman --- drivers/usb/core/driver.c | 38 ++++++++++++++++++++++---------------- drivers/usb/core/hub.c | 5 +++-- include/linux/usb.h | 6 +++--- 3 files changed, 28 insertions(+), 21 deletions(-) (limited to 'include') diff --git a/drivers/usb/core/driver.c b/drivers/usb/core/driver.c index 1bad4e5a6ab..1c976c141f3 100644 --- a/drivers/usb/core/driver.c +++ b/drivers/usb/core/driver.c @@ -235,7 +235,7 @@ static int usb_probe_interface(struct device *dev) /* The interface should always appear to be in use * unless the driver suports autosuspend. */ - intf->pm_usage_cnt = !(driver->supports_autosuspend); + atomic_set(&intf->pm_usage_cnt, !driver->supports_autosuspend); /* Carry out a deferred switch to altsetting 0 */ if (intf->needs_altsetting0) { @@ -347,7 +347,7 @@ int usb_driver_claim_interface(struct usb_driver *driver, usb_pm_lock(udev); iface->condition = USB_INTERFACE_BOUND; mark_active(iface); - iface->pm_usage_cnt = !(driver->supports_autosuspend); + atomic_set(&iface->pm_usage_cnt, !driver->supports_autosuspend); usb_pm_unlock(udev); /* if interface was already added, bind now; else let @@ -1068,7 +1068,7 @@ static int autosuspend_check(struct usb_device *udev, int reschedule) intf = udev->actconfig->interface[i]; if (!is_active(intf)) continue; - if (intf->pm_usage_cnt > 0) + if (atomic_read(&intf->pm_usage_cnt) > 0) return -EBUSY; if (intf->needs_remote_wakeup && !udev->do_remote_wakeup) { @@ -1464,17 +1464,19 @@ static int usb_autopm_do_interface(struct usb_interface *intf, status = -ENODEV; else { udev->auto_pm = 1; - intf->pm_usage_cnt += inc_usage_cnt; + atomic_add(inc_usage_cnt, &intf->pm_usage_cnt); udev->last_busy = jiffies; - if (inc_usage_cnt >= 0 && intf->pm_usage_cnt > 0) { + if (inc_usage_cnt >= 0 && + atomic_read(&intf->pm_usage_cnt) > 0) { if (udev->state == USB_STATE_SUSPENDED) status = usb_resume_both(udev, PMSG_AUTO_RESUME); if (status != 0) - intf->pm_usage_cnt -= inc_usage_cnt; + atomic_sub(inc_usage_cnt, &intf->pm_usage_cnt); else udev->last_busy = jiffies; - } else if (inc_usage_cnt <= 0 && intf->pm_usage_cnt <= 0) { + } else if (inc_usage_cnt <= 0 && + atomic_read(&intf->pm_usage_cnt) <= 0) { status = usb_suspend_both(udev, PMSG_AUTO_SUSPEND); } } @@ -1519,7 +1521,7 @@ void usb_autopm_put_interface(struct usb_interface *intf) status = usb_autopm_do_interface(intf, -1); dev_vdbg(&intf->dev, "%s: status %d cnt %d\n", - __func__, status, intf->pm_usage_cnt); + __func__, status, atomic_read(&intf->pm_usage_cnt)); } EXPORT_SYMBOL_GPL(usb_autopm_put_interface); @@ -1547,10 +1549,10 @@ void usb_autopm_put_interface_async(struct usb_interface *intf) status = -ENODEV; } else { udev->last_busy = jiffies; - --intf->pm_usage_cnt; + atomic_dec(&intf->pm_usage_cnt); if (udev->autosuspend_disabled || udev->autosuspend_delay < 0) status = -EPERM; - else if (intf->pm_usage_cnt <= 0 && + else if (atomic_read(&intf->pm_usage_cnt) <= 0 && !timer_pending(&udev->autosuspend.timer)) { queue_delayed_work(ksuspend_usb_wq, &udev->autosuspend, round_jiffies_up_relative( @@ -1558,7 +1560,7 @@ void usb_autopm_put_interface_async(struct usb_interface *intf) } } dev_vdbg(&intf->dev, "%s: status %d cnt %d\n", - __func__, status, intf->pm_usage_cnt); + __func__, status, atomic_read(&intf->pm_usage_cnt)); } EXPORT_SYMBOL_GPL(usb_autopm_put_interface_async); @@ -1602,7 +1604,7 @@ int usb_autopm_get_interface(struct usb_interface *intf) status = usb_autopm_do_interface(intf, 1); dev_vdbg(&intf->dev, "%s: status %d cnt %d\n", - __func__, status, intf->pm_usage_cnt); + __func__, status, atomic_read(&intf->pm_usage_cnt)); return status; } EXPORT_SYMBOL_GPL(usb_autopm_get_interface); @@ -1630,10 +1632,14 @@ int usb_autopm_get_interface_async(struct usb_interface *intf) status = -ENODEV; else if (udev->autoresume_disabled) status = -EPERM; - else if (++intf->pm_usage_cnt > 0 && udev->state == USB_STATE_SUSPENDED) - queue_work(ksuspend_usb_wq, &udev->autoresume); + else { + atomic_inc(&intf->pm_usage_cnt); + if (atomic_read(&intf->pm_usage_cnt) > 0 && + udev->state == USB_STATE_SUSPENDED) + queue_work(ksuspend_usb_wq, &udev->autoresume); + } dev_vdbg(&intf->dev, "%s: status %d cnt %d\n", - __func__, status, intf->pm_usage_cnt); + __func__, status, atomic_read(&intf->pm_usage_cnt)); return status; } EXPORT_SYMBOL_GPL(usb_autopm_get_interface_async); @@ -1655,7 +1661,7 @@ int usb_autopm_set_interface(struct usb_interface *intf) status = usb_autopm_do_interface(intf, 0); dev_vdbg(&intf->dev, "%s: status %d cnt %d\n", - __func__, status, intf->pm_usage_cnt); + __func__, status, atomic_read(&intf->pm_usage_cnt)); return status; } EXPORT_SYMBOL_GPL(usb_autopm_set_interface); diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c index cb54420ed58..69e3a966a4b 100644 --- a/drivers/usb/core/hub.c +++ b/drivers/usb/core/hub.c @@ -373,7 +373,7 @@ static void kick_khubd(struct usb_hub *hub) unsigned long flags; /* Suppress autosuspend until khubd runs */ - to_usb_interface(hub->intfdev)->pm_usage_cnt = 1; + atomic_set(&to_usb_interface(hub->intfdev)->pm_usage_cnt, 1); spin_lock_irqsave(&hub_event_lock, flags); if (!hub->disconnected && list_empty(&hub->event_list)) { @@ -678,7 +678,8 @@ static void hub_activate(struct usb_hub *hub, enum hub_activation_type type) msecs_to_jiffies(delay)); /* Suppress autosuspend until init is done */ - to_usb_interface(hub->intfdev)->pm_usage_cnt = 1; + atomic_set(&to_usb_interface(hub->intfdev)-> + pm_usage_cnt, 1); return; /* Continues at init2: below */ } else { hub_power_on(hub, true); diff --git a/include/linux/usb.h b/include/linux/usb.h index 3b45a0d27b8..a34fa89f147 100644 --- a/include/linux/usb.h +++ b/include/linux/usb.h @@ -195,7 +195,7 @@ struct usb_interface { struct device dev; /* interface specific device info */ struct device *usb_dev; - int pm_usage_cnt; /* usage counter for autosuspend */ + atomic_t pm_usage_cnt; /* usage counter for autosuspend */ struct work_struct reset_ws; /* for resets in atomic context */ }; #define to_usb_interface(d) container_of(d, struct usb_interface, dev) @@ -551,13 +551,13 @@ extern void usb_autopm_put_interface_async(struct usb_interface *intf); static inline void usb_autopm_enable(struct usb_interface *intf) { - intf->pm_usage_cnt = 0; + atomic_set(&intf->pm_usage_cnt, 0); usb_autopm_set_interface(intf); } static inline void usb_autopm_disable(struct usb_interface *intf) { - intf->pm_usage_cnt = 1; + atomic_set(&intf->pm_usage_cnt, 1); usb_autopm_set_interface(intf); } -- cgit v1.2.3-70-g09d2 From 331ac6b288d9f3689514ced1878041fb0df7e13c Mon Sep 17 00:00:00 2001 From: Alek Du Date: Mon, 13 Jul 2009 12:41:20 +0800 Subject: USB: EHCI: Add Intel Moorestown EHCI controller HOSTPCx extensions and support phy low power mode The Intel Moorestown EHCI controller supports non-standard HOSTPCx register extension. This register controls the LPM behaviour and controls the behaviour of each USB port. Signed-off-by: Jacob Pan Signed-off-by: Alek Du Acked-by: Alan Stern Cc: David Brownell Signed-off-by: Greg Kroah-Hartman --- drivers/usb/host/ehci-hcd.c | 6 +++++ drivers/usb/host/ehci-hub.c | 62 ++++++++++++++++++++++++++++++++++++++++---- drivers/usb/host/ehci.h | 3 ++- include/linux/usb/ehci_def.h | 13 ++++++++++ 4 files changed, 78 insertions(+), 6 deletions(-) (limited to 'include') diff --git a/drivers/usb/host/ehci-hcd.c b/drivers/usb/host/ehci-hcd.c index 7bee1638dfd..5fbc67c5a91 100644 --- a/drivers/usb/host/ehci-hcd.c +++ b/drivers/usb/host/ehci-hcd.c @@ -249,6 +249,12 @@ static int ehci_reset (struct ehci_hcd *ehci) retval = handshake (ehci, &ehci->regs->command, CMD_RESET, 0, 250 * 1000); + if (ehci->has_hostpc) { + ehci_writel(ehci, USBMODE_EX_HC | USBMODE_EX_VBPS, + (u32 __iomem *)(((u8 *)ehci->regs) + USBMODE_EX)); + ehci_writel(ehci, TXFIFO_DEFAULT, + (u32 __iomem *)(((u8 *)ehci->regs) + TXFILLTUNING)); + } if (retval) return retval; diff --git a/drivers/usb/host/ehci-hub.c b/drivers/usb/host/ehci-hub.c index f46ad27c9a9..6fef1ee7d10 100644 --- a/drivers/usb/host/ehci-hub.c +++ b/drivers/usb/host/ehci-hub.c @@ -111,6 +111,7 @@ static int ehci_bus_suspend (struct usb_hcd *hcd) struct ehci_hcd *ehci = hcd_to_ehci (hcd); int port; int mask; + u32 __iomem *hostpc_reg = NULL; ehci_dbg(ehci, "suspend root hub\n"); @@ -142,6 +143,9 @@ static int ehci_bus_suspend (struct usb_hcd *hcd) u32 t1 = ehci_readl(ehci, reg) & ~PORT_RWC_BITS; u32 t2 = t1; + if (ehci->has_hostpc) + hostpc_reg = (u32 __iomem *)((u8 *)ehci->regs + + HOSTPC0 + 4 * (port & 0xff)); /* keep track of which ports we suspend */ if (t1 & PORT_OWNER) set_bit(port, &ehci->owned_ports); @@ -151,15 +155,37 @@ static int ehci_bus_suspend (struct usb_hcd *hcd) } /* enable remote wakeup on all ports */ - if (hcd->self.root_hub->do_remote_wakeup) - t2 |= PORT_WAKE_BITS; - else + if (hcd->self.root_hub->do_remote_wakeup) { + /* only enable appropriate wake bits, otherwise the + * hardware can not go phy low power mode. If a race + * condition happens here(connection change during bits + * set), the port change detection will finally fix it. + */ + if (t1 & PORT_CONNECT) { + t2 |= PORT_WKOC_E | PORT_WKDISC_E; + t2 &= ~PORT_WKCONN_E; + } else { + t2 |= PORT_WKOC_E | PORT_WKCONN_E; + t2 &= ~PORT_WKDISC_E; + } + } else t2 &= ~PORT_WAKE_BITS; if (t1 != t2) { ehci_vdbg (ehci, "port %d, %08x -> %08x\n", port + 1, t1, t2); ehci_writel(ehci, t2, reg); + if (hostpc_reg) { + u32 t3; + + msleep(5);/* 5ms for HCD enter low pwr mode */ + t3 = ehci_readl(ehci, hostpc_reg); + ehci_writel(ehci, t3 | HOSTPC_PHCD, hostpc_reg); + t3 = ehci_readl(ehci, hostpc_reg); + ehci_dbg(ehci, "Port%d phy low pwr mode %s\n", + port, (t3 & HOSTPC_PHCD) ? + "succeeded" : "failed"); + } } } @@ -563,7 +589,8 @@ static int ehci_hub_control ( int ports = HCS_N_PORTS (ehci->hcs_params); u32 __iomem *status_reg = &ehci->regs->port_status[ (wIndex & 0xff) - 1]; - u32 temp, status; + u32 __iomem *hostpc_reg = NULL; + u32 temp, temp1, status; unsigned long flags; int retval = 0; unsigned selector; @@ -575,6 +602,9 @@ static int ehci_hub_control ( * power, "this is the one", etc. EHCI spec supports this. */ + if (ehci->has_hostpc) + hostpc_reg = (u32 __iomem *)((u8 *)ehci->regs + + HOSTPC0 + 4 * ((wIndex & 0xff) - 1)); spin_lock_irqsave (&ehci->lock, flags); switch (typeReq) { case ClearHubFeature: @@ -773,7 +803,11 @@ static int ehci_hub_control ( if (temp & PORT_CONNECT) { status |= 1 << USB_PORT_FEAT_CONNECTION; // status may be from integrated TT - status |= ehci_port_speed(ehci, temp); + if (ehci->has_hostpc) { + temp1 = ehci_readl(ehci, hostpc_reg); + status |= ehci_port_speed(ehci, temp1); + } else + status |= ehci_port_speed(ehci, temp); } if (temp & PORT_PE) status |= 1 << USB_PORT_FEAT_ENABLE; @@ -832,6 +866,24 @@ static int ehci_hub_control ( || (temp & PORT_RESET) != 0) goto error; ehci_writel(ehci, temp | PORT_SUSPEND, status_reg); + /* After above check the port must be connected. + * Set appropriate bit thus could put phy into low power + * mode if we have hostpc feature + */ + if (hostpc_reg) { + temp &= ~PORT_WKCONN_E; + temp |= (PORT_WKDISC_E | PORT_WKOC_E); + ehci_writel(ehci, temp | PORT_SUSPEND, + status_reg); + msleep(5);/* 5ms for HCD enter low pwr mode */ + temp1 = ehci_readl(ehci, hostpc_reg); + ehci_writel(ehci, temp1 | HOSTPC_PHCD, + hostpc_reg); + temp1 = ehci_readl(ehci, hostpc_reg); + ehci_dbg(ehci, "Port%d phy low pwr mode %s\n", + wIndex, (temp1 & HOSTPC_PHCD) ? + "succeeded" : "failed"); + } set_bit(wIndex, &ehci->suspended_ports); break; case USB_PORT_FEAT_POWER: diff --git a/drivers/usb/host/ehci.h b/drivers/usb/host/ehci.h index fa12f20fbfe..ec3dba6b8e4 100644 --- a/drivers/usb/host/ehci.h +++ b/drivers/usb/host/ehci.h @@ -136,6 +136,7 @@ struct ehci_hcd { /* one per controller */ #define OHCI_HCCTRL_OFFSET 0x4 #define OHCI_HCCTRL_LEN 0x4 __hc32 *ohci_hcctrl_reg; + unsigned has_hostpc:1; u8 sbrn; /* packed release number */ @@ -548,7 +549,7 @@ static inline unsigned int ehci_port_speed(struct ehci_hcd *ehci, unsigned int portsc) { if (ehci_is_TDI(ehci)) { - switch ((portsc>>26)&3) { + switch ((portsc >> (ehci->has_hostpc ? 25 : 26)) & 3) { case 0: return 0; case 1: diff --git a/include/linux/usb/ehci_def.h b/include/linux/usb/ehci_def.h index 5b88e36c910..4c4b701ff26 100644 --- a/include/linux/usb/ehci_def.h +++ b/include/linux/usb/ehci_def.h @@ -132,6 +132,19 @@ struct ehci_regs { #define USBMODE_CM_HC (3<<0) /* host controller mode */ #define USBMODE_CM_IDLE (0<<0) /* idle state */ +/* Moorestown has some non-standard registers, partially due to the fact that + * its EHCI controller has both TT and LPM support. HOSTPCx are extentions to + * PORTSCx + */ +#define HOSTPC0 0x84 /* HOSTPC extension */ +#define HOSTPC_PHCD (1<<22) /* Phy clock disable */ +#define HOSTPC_PSPD (3<<25) /* Port speed detection */ +#define USBMODE_EX 0xc8 /* USB Device mode extension */ +#define USBMODE_EX_VBPS (1<<5) /* VBus Power Select On */ +#define USBMODE_EX_HC (3<<0) /* host controller mode */ +#define TXFILLTUNING 0x24 /* TX FIFO Tuning register */ +#define TXFIFO_DEFAULT (8<<16) /* FIFO burst threshold 8 */ + /* Appendix C, Debug port ... intended for use with special "debug devices" * that can help if there's no serial console. (nonstandard enumeration.) */ -- cgit v1.2.3-70-g09d2 From 9da69c604d87afea37b5411867bb76e3c624cc92 Mon Sep 17 00:00:00 2001 From: Michael Hennerich Date: Wed, 15 Jul 2009 23:22:54 -0400 Subject: USB: isp1760: allow platform devices to customize devflags Platform device support was merged earlier, but support for boards to customize the devflags aspect of the controller was not. We want this on Blackfin systems to control the bus width, but might as well expose all of the fields while we're at it. Signed-off-by: Michael Hennerich Signed-off-by: Bryan Wu Signed-off-by: Mike Frysinger Signed-off-by: Greg Kroah-Hartman --- drivers/usb/host/isp1760-hcd.c | 4 ++++ drivers/usb/host/isp1760-hcd.h | 2 ++ drivers/usb/host/isp1760-if.c | 21 ++++++++++++++++++++- include/linux/usb/isp1760.h | 18 ++++++++++++++++++ 4 files changed, 44 insertions(+), 1 deletion(-) create mode 100644 include/linux/usb/isp1760.h (limited to 'include') diff --git a/drivers/usb/host/isp1760-hcd.c b/drivers/usb/host/isp1760-hcd.c index 15438469f21..9600a58299d 100644 --- a/drivers/usb/host/isp1760-hcd.c +++ b/drivers/usb/host/isp1760-hcd.c @@ -386,6 +386,10 @@ static int isp1760_hc_setup(struct usb_hcd *hcd) hwmode |= HW_DACK_POL_HIGH; if (priv->devflags & ISP1760_FLAG_DREQ_POL_HIGH) hwmode |= HW_DREQ_POL_HIGH; + if (priv->devflags & ISP1760_FLAG_INTR_POL_HIGH) + hwmode |= HW_INTR_HIGH_ACT; + if (priv->devflags & ISP1760_FLAG_INTR_EDGE_TRIG) + hwmode |= HW_INTR_EDGE_TRIG; /* * We have to set this first in case we're in 16-bit mode. diff --git a/drivers/usb/host/isp1760-hcd.h b/drivers/usb/host/isp1760-hcd.h index 462f4943cb1..6931ef5c965 100644 --- a/drivers/usb/host/isp1760-hcd.h +++ b/drivers/usb/host/isp1760-hcd.h @@ -142,6 +142,8 @@ typedef void (packet_enqueue)(struct usb_hcd *hcd, struct isp1760_qh *qh, #define ISP1760_FLAG_DACK_POL_HIGH 0x00000010 /* DACK active high */ #define ISP1760_FLAG_DREQ_POL_HIGH 0x00000020 /* DREQ active high */ #define ISP1760_FLAG_ISP1761 0x00000040 /* Chip is ISP1761 */ +#define ISP1760_FLAG_INTR_POL_HIGH 0x00000080 /* Interrupt polarity active high */ +#define ISP1760_FLAG_INTR_EDGE_TRIG 0x00000100 /* Interrupt edge triggered */ /* chip memory management */ struct memory_chunk { diff --git a/drivers/usb/host/isp1760-if.c b/drivers/usb/host/isp1760-if.c index d4feebfc63b..1c9f977a5c9 100644 --- a/drivers/usb/host/isp1760-if.c +++ b/drivers/usb/host/isp1760-if.c @@ -3,6 +3,7 @@ * Currently there is support for * - OpenFirmware * - PCI + * - PDEV (generic platform device centralized driver model) * * (c) 2007 Sebastian Siewior * @@ -11,6 +12,7 @@ #include #include #include +#include #include "../core/hcd.h" #include "isp1760-hcd.h" @@ -308,6 +310,8 @@ static int __devinit isp1760_plat_probe(struct platform_device *pdev) struct resource *mem_res; struct resource *irq_res; resource_size_t mem_size; + struct isp1760_platform_data *priv = pdev->dev.platform_data; + unsigned int devflags = 0; unsigned long irqflags = IRQF_SHARED | IRQF_DISABLED; mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 0); @@ -330,8 +334,23 @@ static int __devinit isp1760_plat_probe(struct platform_device *pdev) } irqflags |= irq_res->flags & IRQF_TRIGGER_MASK; + if (priv) { + if (priv->is_isp1761) + devflags |= ISP1760_FLAG_ISP1761; + if (priv->bus_width_16) + devflags |= ISP1760_FLAG_BUS_WIDTH_16; + if (priv->port1_otg) + devflags |= ISP1760_FLAG_OTG_EN; + if (priv->analog_oc) + devflags |= ISP1760_FLAG_ANALOG_OC; + if (priv->dack_polarity_high) + devflags |= ISP1760_FLAG_DACK_POL_HIGH; + if (priv->dreq_polarity_high) + devflags |= ISP1760_FLAG_DREQ_POL_HIGH; + } + hcd = isp1760_register(mem_res->start, mem_size, irq_res->start, - irqflags, &pdev->dev, dev_name(&pdev->dev), 0); + irqflags, &pdev->dev, dev_name(&pdev->dev), devflags); if (IS_ERR(hcd)) { pr_warning("isp1760: Failed to register the HCD device\n"); ret = -ENODEV; diff --git a/include/linux/usb/isp1760.h b/include/linux/usb/isp1760.h new file mode 100644 index 00000000000..de7de53c553 --- /dev/null +++ b/include/linux/usb/isp1760.h @@ -0,0 +1,18 @@ +/* + * board initialization should put one of these into dev->platform_data + * and place the isp1760 onto platform_bus named "isp1760-hcd". + */ + +#ifndef __LINUX_USB_ISP1760_H +#define __LINUX_USB_ISP1760_H + +struct isp1760_platform_data { + unsigned is_isp1761:1; /* Chip is ISP1761 */ + unsigned bus_width_16:1; /* 16/32-bit data bus width */ + unsigned port1_otg:1; /* Port 1 supports OTG */ + unsigned analog_oc:1; /* Analog overcurrent */ + unsigned dack_polarity_high:1; /* DACK active high */ + unsigned dreq_polarity_high:1; /* DREQ active high */ +}; + +#endif /* __LINUX_USB_ISP1760_H */ -- cgit v1.2.3-70-g09d2 From a9d43091c5be1e7a60d5abe84be4f3050236b26a Mon Sep 17 00:00:00 2001 From: Lothar Wassmann Date: Thu, 16 Jul 2009 20:51:21 -0400 Subject: USB: NXP ISP1362 USB host driver Signed-off-by: Lothar Wassmann Signed-off-by: Michael Hennerich Signed-off-by: Bryan Wu Signed-off-by: Mike Frysinger Signed-off-by: Greg Kroah-Hartman --- drivers/usb/Makefile | 1 + drivers/usb/host/Kconfig | 12 + drivers/usb/host/Makefile | 1 + drivers/usb/host/isp1362-hcd.c | 2905 ++++++++++++++++++++++++++++++++++++++++ drivers/usb/host/isp1362.h | 1079 +++++++++++++++ include/linux/usb/isp1362.h | 46 + 6 files changed, 4044 insertions(+) create mode 100644 drivers/usb/host/isp1362-hcd.c create mode 100644 drivers/usb/host/isp1362.h create mode 100644 include/linux/usb/isp1362.h (limited to 'include') diff --git a/drivers/usb/Makefile b/drivers/usb/Makefile index 19cb7d5480d..dbe4fb694ad 100644 --- a/drivers/usb/Makefile +++ b/drivers/usb/Makefile @@ -16,6 +16,7 @@ obj-$(CONFIG_USB_UHCI_HCD) += host/ obj-$(CONFIG_USB_FHCI_HCD) += host/ obj-$(CONFIG_USB_XHCI_HCD) += host/ obj-$(CONFIG_USB_SL811_HCD) += host/ +obj-$(CONFIG_USB_ISP1362_HCD) += host/ obj-$(CONFIG_USB_U132_HCD) += host/ obj-$(CONFIG_USB_R8A66597_HCD) += host/ obj-$(CONFIG_USB_HWA_HCD) += host/ diff --git a/drivers/usb/host/Kconfig b/drivers/usb/host/Kconfig index 41b8d7de2e0..9b43b226817 100644 --- a/drivers/usb/host/Kconfig +++ b/drivers/usb/host/Kconfig @@ -159,6 +159,18 @@ config USB_ISP1760_HCD To compile this driver as a module, choose M here: the module will be called isp1760. +config USB_ISP1362_HCD + tristate "ISP1362 HCD support" + depends on USB + default N + ---help--- + Supports the Philips ISP1362 chip as a host controller + + This driver does not support isochronous transfers. + + To compile this driver as a module, choose M here: the + module will be called isp1362-hcd. + config USB_OHCI_HCD tristate "OHCI HCD support" depends on USB && USB_ARCH_HAS_OHCI diff --git a/drivers/usb/host/Makefile b/drivers/usb/host/Makefile index 289d748bb41..f58b2494c44 100644 --- a/drivers/usb/host/Makefile +++ b/drivers/usb/host/Makefile @@ -21,6 +21,7 @@ obj-$(CONFIG_PCI) += pci-quirks.o obj-$(CONFIG_USB_EHCI_HCD) += ehci-hcd.o obj-$(CONFIG_USB_OXU210HP_HCD) += oxu210hp-hcd.o obj-$(CONFIG_USB_ISP116X_HCD) += isp116x-hcd.o +obj-$(CONFIG_USB_ISP1362_HCD) += isp1362-hcd.o obj-$(CONFIG_USB_OHCI_HCD) += ohci-hcd.o obj-$(CONFIG_USB_UHCI_HCD) += uhci-hcd.o obj-$(CONFIG_USB_FHCI_HCD) += fhci.o diff --git a/drivers/usb/host/isp1362-hcd.c b/drivers/usb/host/isp1362-hcd.c new file mode 100644 index 00000000000..5819e10a146 --- /dev/null +++ b/drivers/usb/host/isp1362-hcd.c @@ -0,0 +1,2905 @@ +/* + * ISP1362 HCD (Host Controller Driver) for USB. + * + * Copyright (C) 2005 Lothar Wassmann + * + * Derived from the SL811 HCD, rewritten for ISP116x. + * Copyright (C) 2005 Olav Kongas + * + * Portions: + * Copyright (C) 2004 Psion Teklogix (for NetBook PRO) + * Copyright (C) 2004 David Brownell + */ + +/* + * The ISP1362 chip requires a large delay (300ns and 462ns) between + * accesses to the address and data register. + * The following timing options exist: + * + * 1. Configure your memory controller to add such delays if it can (the best) + * 2. Implement platform-specific delay function possibly + * combined with configuring the memory controller; see + * include/linux/usb_isp1362.h for more info. + * 3. Use ndelay (easiest, poorest). + * + * Use the corresponding macros USE_PLATFORM_DELAY and USE_NDELAY in the + * platform specific section of isp1362.h to select the appropriate variant. + * + * Also note that according to the Philips "ISP1362 Errata" document + * Rev 1.00 from 27 May data corruption may occur when the #WR signal + * is reasserted (even with #CS deasserted) within 132ns after a + * write cycle to any controller register. If the hardware doesn't + * implement the recommended fix (gating the #WR with #CS) software + * must ensure that no further write cycle (not necessarily to the chip!) + * is issued by the CPU within this interval. + + * For PXA25x this can be ensured by using VLIO with the maximum + * recovery time (MSCx = 0x7f8c) with a memory clock of 99.53 MHz. + */ + +#ifdef CONFIG_USB_DEBUG +# define ISP1362_DEBUG +#else +# undef ISP1362_DEBUG +#endif + +/* + * The PXA255 UDC apparently doesn't handle GET_STATUS, GET_CONFIG and + * GET_INTERFACE requests correctly when the SETUP and DATA stages of the + * requests are carried out in separate frames. This will delay any SETUP + * packets until the start of the next frame so that this situation is + * unlikely to occur (and makes usbtest happy running with a PXA255 target + * device). + */ +#undef BUGGY_PXA2XX_UDC_USBTEST + +#undef PTD_TRACE +#undef URB_TRACE +#undef VERBOSE +#undef REGISTERS + +/* This enables a memory test on the ISP1362 chip memory to make sure the + * chip access timing is correct. + */ +#undef CHIP_BUFFER_TEST + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +static int dbg_level; +#ifdef ISP1362_DEBUG +module_param(dbg_level, int, 0644); +#else +module_param(dbg_level, int, 0); +#define STUB_DEBUG_FILE +#endif + +#include "../core/hcd.h" +#include "../core/usb.h" +#include "isp1362.h" + + +#define DRIVER_VERSION "2005-04-04" +#define DRIVER_DESC "ISP1362 USB Host Controller Driver" + +MODULE_DESCRIPTION(DRIVER_DESC); +MODULE_LICENSE("GPL"); + +static const char hcd_name[] = "isp1362-hcd"; + +static void isp1362_hc_stop(struct usb_hcd *hcd); +static int isp1362_hc_start(struct usb_hcd *hcd); + +/*-------------------------------------------------------------------------*/ + +/* + * When called from the interrupthandler only isp1362_hcd->irqenb is modified, + * since the interrupt handler will write isp1362_hcd->irqenb to HCuPINT upon + * completion. + * We don't need a 'disable' counterpart, since interrupts will be disabled + * only by the interrupt handler. + */ +static inline void isp1362_enable_int(struct isp1362_hcd *isp1362_hcd, u16 mask) +{ + if ((isp1362_hcd->irqenb | mask) == isp1362_hcd->irqenb) + return; + if (mask & ~isp1362_hcd->irqenb) + isp1362_write_reg16(isp1362_hcd, HCuPINT, mask & ~isp1362_hcd->irqenb); + isp1362_hcd->irqenb |= mask; + if (isp1362_hcd->irq_active) + return; + isp1362_write_reg16(isp1362_hcd, HCuPINTENB, isp1362_hcd->irqenb); +} + +/*-------------------------------------------------------------------------*/ + +static inline struct isp1362_ep_queue *get_ptd_queue(struct isp1362_hcd *isp1362_hcd, + u16 offset) +{ + struct isp1362_ep_queue *epq = NULL; + + if (offset < isp1362_hcd->istl_queue[1].buf_start) + epq = &isp1362_hcd->istl_queue[0]; + else if (offset < isp1362_hcd->intl_queue.buf_start) + epq = &isp1362_hcd->istl_queue[1]; + else if (offset < isp1362_hcd->atl_queue.buf_start) + epq = &isp1362_hcd->intl_queue; + else if (offset < isp1362_hcd->atl_queue.buf_start + + isp1362_hcd->atl_queue.buf_size) + epq = &isp1362_hcd->atl_queue; + + if (epq) + DBG(1, "%s: PTD $%04x is on %s queue\n", __func__, offset, epq->name); + else + pr_warning("%s: invalid PTD $%04x\n", __func__, offset); + + return epq; +} + +static inline int get_ptd_offset(struct isp1362_ep_queue *epq, u8 index) +{ + int offset; + + if (index * epq->blk_size > epq->buf_size) { + pr_warning("%s: Bad %s index %d(%d)\n", __func__, epq->name, index, + epq->buf_size / epq->blk_size); + return -EINVAL; + } + offset = epq->buf_start + index * epq->blk_size; + DBG(3, "%s: %s PTD[%02x] # %04x\n", __func__, epq->name, index, offset); + + return offset; +} + +/*-------------------------------------------------------------------------*/ + +static inline u16 max_transfer_size(struct isp1362_ep_queue *epq, size_t size, + int mps) +{ + u16 xfer_size = min_t(size_t, MAX_XFER_SIZE, size); + + xfer_size = min_t(size_t, xfer_size, epq->buf_avail * epq->blk_size - PTD_HEADER_SIZE); + if (xfer_size < size && xfer_size % mps) + xfer_size -= xfer_size % mps; + + return xfer_size; +} + +static int claim_ptd_buffers(struct isp1362_ep_queue *epq, + struct isp1362_ep *ep, u16 len) +{ + int ptd_offset = -EINVAL; + int index; + int num_ptds = ((len + PTD_HEADER_SIZE - 1) / epq->blk_size) + 1; + int found = -1; + int last = -1; + + BUG_ON(len > epq->buf_size); + + if (!epq->buf_avail) + return -ENOMEM; + + if (ep->num_ptds) + pr_err("%s: %s len %d/%d num_ptds %d buf_map %08lx skip_map %08lx\n", __func__, + epq->name, len, epq->blk_size, num_ptds, epq->buf_map, epq->skip_map); + BUG_ON(ep->num_ptds != 0); + + for (index = 0; index <= epq->buf_count - num_ptds; index++) { + if (test_bit(index, &epq->buf_map)) + continue; + found = index; + for (last = index + 1; last < index + num_ptds; last++) { + if (test_bit(last, &epq->buf_map)) { + found = -1; + break; + } + } + if (found >= 0) + break; + } + if (found < 0) + return -EOVERFLOW; + + DBG(1, "%s: Found %d PTDs[%d] for %d/%d byte\n", __func__, + num_ptds, found, len, (int)(epq->blk_size - PTD_HEADER_SIZE)); + ptd_offset = get_ptd_offset(epq, found); + WARN_ON(ptd_offset < 0); + ep->ptd_offset = ptd_offset; + ep->num_ptds += num_ptds; + epq->buf_avail -= num_ptds; + BUG_ON(epq->buf_avail > epq->buf_count); + ep->ptd_index = found; + for (index = found; index < last; index++) + __set_bit(index, &epq->buf_map); + DBG(1, "%s: Done %s PTD[%d] $%04x, avail %d count %d claimed %d %08lx:%08lx\n", + __func__, epq->name, ep->ptd_index, ep->ptd_offset, + epq->buf_avail, epq->buf_count, num_ptds, epq->buf_map, epq->skip_map); + + return found; +} + +static inline void release_ptd_buffers(struct isp1362_ep_queue *epq, struct isp1362_ep *ep) +{ + int index = ep->ptd_index; + int last = ep->ptd_index + ep->num_ptds; + + if (last > epq->buf_count) + pr_err("%s: ep %p req %d len %d %s PTD[%d] $%04x num_ptds %d buf_count %d buf_avail %d buf_map %08lx skip_map %08lx\n", + __func__, ep, ep->num_req, ep->length, epq->name, ep->ptd_index, + ep->ptd_offset, ep->num_ptds, epq->buf_count, epq->buf_avail, + epq->buf_map, epq->skip_map); + BUG_ON(last > epq->buf_count); + + for (; index < last; index++) { + __clear_bit(index, &epq->buf_map); + __set_bit(index, &epq->skip_map); + } + epq->buf_avail += ep->num_ptds; + epq->ptd_count--; + + BUG_ON(epq->buf_avail > epq->buf_count); + BUG_ON(epq->ptd_count > epq->buf_count); + + DBG(1, "%s: Done %s PTDs $%04x released %d avail %d count %d\n", + __func__, epq->name, + ep->ptd_offset, ep->num_ptds, epq->buf_avail, epq->buf_count); + DBG(1, "%s: buf_map %08lx skip_map %08lx\n", __func__, + epq->buf_map, epq->skip_map); + + ep->num_ptds = 0; + ep->ptd_offset = -EINVAL; + ep->ptd_index = -EINVAL; +} + +/*-------------------------------------------------------------------------*/ + +/* + Set up PTD's. +*/ +static void prepare_ptd(struct isp1362_hcd *isp1362_hcd, struct urb *urb, + struct isp1362_ep *ep, struct isp1362_ep_queue *epq, + u16 fno) +{ + struct ptd *ptd; + int toggle; + int dir; + u16 len; + size_t buf_len = urb->transfer_buffer_length - urb->actual_length; + + DBG(3, "%s: %s ep %p\n", __func__, epq->name, ep); + + ptd = &ep->ptd; + + ep->data = (unsigned char *)urb->transfer_buffer + urb->actual_length; + + switch (ep->nextpid) { + case USB_PID_IN: + toggle = usb_gettoggle(urb->dev, ep->epnum, 0); + dir = PTD_DIR_IN; + if (usb_pipecontrol(urb->pipe)) { + len = min_t(size_t, ep->maxpacket, buf_len); + } else if (usb_pipeisoc(urb->pipe)) { + len = min_t(size_t, urb->iso_frame_desc[fno].length, MAX_XFER_SIZE); + ep->data = urb->transfer_buffer + urb->iso_frame_desc[fno].offset; + } else + len = max_transfer_size(epq, buf_len, ep->maxpacket); + DBG(1, "%s: IN len %d/%d/%d from URB\n", __func__, len, ep->maxpacket, + (int)buf_len); + break; + case USB_PID_OUT: + toggle = usb_gettoggle(urb->dev, ep->epnum, 1); + dir = PTD_DIR_OUT; + if (usb_pipecontrol(urb->pipe)) + len = min_t(size_t, ep->maxpacket, buf_len); + else if (usb_pipeisoc(urb->pipe)) + len = min_t(size_t, urb->iso_frame_desc[0].length, MAX_XFER_SIZE); + else + len = max_transfer_size(epq, buf_len, ep->maxpacket); + if (len == 0) + pr_info("%s: Sending ZERO packet: %d\n", __func__, + urb->transfer_flags & URB_ZERO_PACKET); + DBG(1, "%s: OUT len %d/%d/%d from URB\n", __func__, len, ep->maxpacket, + (int)buf_len); + break; + case USB_PID_SETUP: + toggle = 0; + dir = PTD_DIR_SETUP; + len = sizeof(struct usb_ctrlrequest); + DBG(1, "%s: SETUP len %d\n", __func__, len); + ep->data = urb->setup_packet; + break; + case USB_PID_ACK: + toggle = 1; + len = 0; + dir = (urb->transfer_buffer_length && usb_pipein(urb->pipe)) ? + PTD_DIR_OUT : PTD_DIR_IN; + DBG(1, "%s: ACK len %d\n", __func__, len); + break; + default: + toggle = dir = len = 0; + pr_err("%s@%d: ep->nextpid %02x\n", __func__, __LINE__, ep->nextpid); + BUG_ON(1); + } + + ep->length = len; + if (!len) + ep->data = NULL; + + ptd->count = PTD_CC_MSK | PTD_ACTIVE_MSK | PTD_TOGGLE(toggle); + ptd->mps = PTD_MPS(ep->maxpacket) | PTD_SPD(urb->dev->speed == USB_SPEED_LOW) | + PTD_EP(ep->epnum); + ptd->len = PTD_LEN(len) | PTD_DIR(dir); + ptd->faddr = PTD_FA(usb_pipedevice(urb->pipe)); + + if (usb_pipeint(urb->pipe)) { + ptd->faddr |= PTD_SF_INT(ep->branch); + ptd->faddr |= PTD_PR(ep->interval ? __ffs(ep->interval) : 0); + } + if (usb_pipeisoc(urb->pipe)) + ptd->faddr |= PTD_SF_ISO(fno); + + DBG(1, "%s: Finished\n", __func__); +} + +static void isp1362_write_ptd(struct isp1362_hcd *isp1362_hcd, struct isp1362_ep *ep, + struct isp1362_ep_queue *epq) +{ + struct ptd *ptd = &ep->ptd; + int len = PTD_GET_DIR(ptd) == PTD_DIR_IN ? 0 : ep->length; + + _BUG_ON(ep->ptd_offset < 0); + + prefetch(ptd); + isp1362_write_buffer(isp1362_hcd, ptd, ep->ptd_offset, PTD_HEADER_SIZE); + if (len) + isp1362_write_buffer(isp1362_hcd, ep->data, + ep->ptd_offset + PTD_HEADER_SIZE, len); + + dump_ptd(ptd); + dump_ptd_out_data(ptd, ep->data); +} + +static void isp1362_read_ptd(struct isp1362_hcd *isp1362_hcd, struct isp1362_ep *ep, + struct isp1362_ep_queue *epq) +{ + struct ptd *ptd = &ep->ptd; + int act_len; + + WARN_ON(list_empty(&ep->active)); + BUG_ON(ep->ptd_offset < 0); + + list_del_init(&ep->active); + DBG(1, "%s: ep %p removed from active list %p\n", __func__, ep, &epq->active); + + prefetchw(ptd); + isp1362_read_buffer(isp1362_hcd, ptd, ep->ptd_offset, PTD_HEADER_SIZE); + dump_ptd(ptd); + act_len = PTD_GET_COUNT(ptd); + if (PTD_GET_DIR(ptd) != PTD_DIR_IN || act_len == 0) + return; + if (act_len > ep->length) + pr_err("%s: ep %p PTD $%04x act_len %d ep->length %d\n", __func__, ep, + ep->ptd_offset, act_len, ep->length); + BUG_ON(act_len > ep->length); + /* Only transfer the amount of data that has actually been overwritten + * in the chip buffer. We don't want any data that doesn't belong to the + * transfer to leak out of the chip to the callers transfer buffer! + */ + prefetchw(ep->data); + isp1362_read_buffer(isp1362_hcd, ep->data, + ep->ptd_offset + PTD_HEADER_SIZE, act_len); + dump_ptd_in_data(ptd, ep->data); +} + +/* + * INT PTDs will stay in the chip until data is available. + * This function will remove a PTD from the chip when the URB is dequeued. + * Must be called with the spinlock held and IRQs disabled + */ +static void remove_ptd(struct isp1362_hcd *isp1362_hcd, struct isp1362_ep *ep) + +{ + int index; + struct isp1362_ep_queue *epq; + + DBG(1, "%s: ep %p PTD[%d] $%04x\n", __func__, ep, ep->ptd_index, ep->ptd_offset); + BUG_ON(ep->ptd_offset < 0); + + epq = get_ptd_queue(isp1362_hcd, ep->ptd_offset); + BUG_ON(!epq); + + /* put ep in remove_list for cleanup */ + WARN_ON(!list_empty(&ep->remove_list)); + list_add_tail(&ep->remove_list, &isp1362_hcd->remove_list); + /* let SOF interrupt handle the cleanup */ + isp1362_enable_int(isp1362_hcd, HCuPINT_SOF); + + index = ep->ptd_index; + if (index < 0) + /* ISO queues don't have SKIP registers */ + return; + + DBG(1, "%s: Disabling PTD[%02x] $%04x %08lx|%08x\n", __func__, + index, ep->ptd_offset, epq->skip_map, 1 << index); + + /* prevent further processing of PTD (will be effective after next SOF) */ + epq->skip_map |= 1 << index; + if (epq == &isp1362_hcd->atl_queue) { + DBG(2, "%s: ATLSKIP = %08x -> %08lx\n", __func__, + isp1362_read_reg32(isp1362_hcd, HCATLSKIP), epq->skip_map); + isp1362_write_reg32(isp1362_hcd, HCATLSKIP, epq->skip_map); + if (~epq->skip_map == 0) + isp1362_clr_mask16(isp1362_hcd, HCBUFSTAT, HCBUFSTAT_ATL_ACTIVE); + } else if (epq == &isp1362_hcd->intl_queue) { + DBG(2, "%s: INTLSKIP = %08x -> %08lx\n", __func__, + isp1362_read_reg32(isp1362_hcd, HCINTLSKIP), epq->skip_map); + isp1362_write_reg32(isp1362_hcd, HCINTLSKIP, epq->skip_map); + if (~epq->skip_map == 0) + isp1362_clr_mask16(isp1362_hcd, HCBUFSTAT, HCBUFSTAT_INTL_ACTIVE); + } +} + +/* + Take done or failed requests out of schedule. Give back + processed urbs. +*/ +static void finish_request(struct isp1362_hcd *isp1362_hcd, struct isp1362_ep *ep, + struct urb *urb, int status) + __releases(isp1362_hcd->lock) + __acquires(isp1362_hcd->lock) +{ + urb->hcpriv = NULL; + ep->error_count = 0; + + if (usb_pipecontrol(urb->pipe)) + ep->nextpid = USB_PID_SETUP; + + URB_DBG("%s: req %d FA %d ep%d%s %s: len %d/%d %s stat %d\n", __func__, + ep->num_req, usb_pipedevice(urb->pipe), + usb_pipeendpoint(urb->pipe), + !usb_pipein(urb->pipe) ? "out" : "in", + usb_pipecontrol(urb->pipe) ? "ctrl" : + usb_pipeint(urb->pipe) ? "int" : + usb_pipebulk(urb->pipe) ? "bulk" : + "iso", + urb->actual_length, urb->transfer_buffer_length, + !(urb->transfer_flags & URB_SHORT_NOT_OK) ? + "short_ok" : "", urb->status); + + + usb_hcd_unlink_urb_from_ep(isp1362_hcd_to_hcd(isp1362_hcd), urb); + spin_unlock(&isp1362_hcd->lock); + usb_hcd_giveback_urb(isp1362_hcd_to_hcd(isp1362_hcd), urb, status); + spin_lock(&isp1362_hcd->lock); + + /* take idle endpoints out of the schedule right away */ + if (!list_empty(&ep->hep->urb_list)) + return; + + /* async deschedule */ + if (!list_empty(&ep->schedule)) { + list_del_init(&ep->schedule); + return; + } + + + if (ep->interval) { + /* periodic deschedule */ + DBG(1, "deschedule qh%d/%p branch %d load %d bandwidth %d -> %d\n", ep->interval, + ep, ep->branch, ep->load, + isp1362_hcd->load[ep->branch], + isp1362_hcd->load[ep->branch] - ep->load); + isp1362_hcd->load[ep->branch] -= ep->load; + ep->branch = PERIODIC_SIZE; + } +} + +/* + * Analyze transfer results, handle partial transfers and errors +*/ +static void postproc_ep(struct isp1362_hcd *isp1362_hcd, struct isp1362_ep *ep) +{ + struct urb *urb = get_urb(ep); + struct usb_device *udev; + struct ptd *ptd; + int short_ok; + u16 len; + int urbstat = -EINPROGRESS; + u8 cc; + + DBG(2, "%s: ep %p req %d\n", __func__, ep, ep->num_req); + + udev = urb->dev; + ptd = &ep->ptd; + cc = PTD_GET_CC(ptd); + if (cc == PTD_NOTACCESSED) { + pr_err("%s: req %d PTD %p Untouched by ISP1362\n", __func__, + ep->num_req, ptd); + cc = PTD_DEVNOTRESP; + } + + short_ok = !(urb->transfer_flags & URB_SHORT_NOT_OK); + len = urb->transfer_buffer_length - urb->actual_length; + + /* Data underrun is special. For allowed underrun + we clear the error and continue as normal. For + forbidden underrun we finish the DATA stage + immediately while for control transfer, + we do a STATUS stage. + */ + if (cc == PTD_DATAUNDERRUN) { + if (short_ok) { + DBG(1, "%s: req %d Allowed data underrun short_%sok %d/%d/%d byte\n", + __func__, ep->num_req, short_ok ? "" : "not_", + PTD_GET_COUNT(ptd), ep->maxpacket, len); + cc = PTD_CC_NOERROR; + urbstat = 0; + } else { + DBG(1, "%s: req %d Data Underrun %s nextpid %02x short_%sok %d/%d/%d byte\n", + __func__, ep->num_req, + usb_pipein(urb->pipe) ? "IN" : "OUT", ep->nextpid, + short_ok ? "" : "not_", + PTD_GET_COUNT(ptd), ep->maxpacket, len); + if (usb_pipecontrol(urb->pipe)) { + ep->nextpid = USB_PID_ACK; + /* save the data underrun error code for later and + * procede with the status stage + */ + urb->actual_length += PTD_GET_COUNT(ptd); + BUG_ON(urb->actual_length > urb->transfer_buffer_length); + + if (urb->status == -EINPROGRESS) + urb->status = cc_to_error[PTD_DATAUNDERRUN]; + } else { + usb_settoggle(udev, ep->epnum, ep->nextpid == USB_PID_OUT, + PTD_GET_TOGGLE(ptd)); + urbstat = cc_to_error[PTD_DATAUNDERRUN]; + } + goto out; + } + } + + if (cc != PTD_CC_NOERROR) { + if (++ep->error_count >= 3 || cc == PTD_CC_STALL || cc == PTD_DATAOVERRUN) { + urbstat = cc_to_error[cc]; + DBG(1, "%s: req %d nextpid %02x, status %d, error %d, error_count %d\n", + __func__, ep->num_req, ep->nextpid, urbstat, cc, + ep->error_count); + } + goto out; + } + + switch (ep->nextpid) { + case USB_PID_OUT: + if (PTD_GET_COUNT(ptd) != ep->length) + pr_err("%s: count=%d len=%d\n", __func__, + PTD_GET_COUNT(ptd), ep->length); + BUG_ON(PTD_GET_COUNT(ptd) != ep->length); + urb->actual_length += ep->length; + BUG_ON(urb->actual_length > urb->transfer_buffer_length); + usb_settoggle(udev, ep->epnum, 1, PTD_GET_TOGGLE(ptd)); + if (urb->actual_length == urb->transfer_buffer_length) { + DBG(3, "%s: req %d xfer complete %d/%d status %d -> 0\n", __func__, + ep->num_req, len, ep->maxpacket, urbstat); + if (usb_pipecontrol(urb->pipe)) { + DBG(3, "%s: req %d %s Wait for ACK\n", __func__, + ep->num_req, + usb_pipein(urb->pipe) ? "IN" : "OUT"); + ep->nextpid = USB_PID_ACK; + } else { + if (len % ep->maxpacket || + !(urb->transfer_flags & URB_ZERO_PACKET)) { + urbstat = 0; + DBG(3, "%s: req %d URB %s status %d count %d/%d/%d\n", + __func__, ep->num_req, usb_pipein(urb->pipe) ? "IN" : "OUT", + urbstat, len, ep->maxpacket, urb->actual_length); + } + } + } + break; + case USB_PID_IN: + len = PTD_GET_COUNT(ptd); + BUG_ON(len > ep->length); + urb->actual_length += len; + BUG_ON(urb->actual_length > urb->transfer_buffer_length); + usb_settoggle(udev, ep->epnum, 0, PTD_GET_TOGGLE(ptd)); + /* if transfer completed or (allowed) data underrun */ + if ((urb->transfer_buffer_length == urb->actual_length) || + len % ep->maxpacket) { + DBG(3, "%s: req %d xfer complete %d/%d status %d -> 0\n", __func__, + ep->num_req, len, ep->maxpacket, urbstat); + if (usb_pipecontrol(urb->pipe)) { + DBG(3, "%s: req %d %s Wait for ACK\n", __func__, + ep->num_req, + usb_pipein(urb->pipe) ? "IN" : "OUT"); + ep->nextpid = USB_PID_ACK; + } else { + urbstat = 0; + DBG(3, "%s: req %d URB %s status %d count %d/%d/%d\n", + __func__, ep->num_req, usb_pipein(urb->pipe) ? "IN" : "OUT", + urbstat, len, ep->maxpacket, urb->actual_length); + } + } + break; + case USB_PID_SETUP: + if (urb->transfer_buffer_length == urb->actual_length) { + ep->nextpid = USB_PID_ACK; + } else if (usb_pipeout(urb->pipe)) { + usb_settoggle(udev, 0, 1, 1); + ep->nextpid = USB_PID_OUT; + } else { + usb_settoggle(udev, 0, 0, 1); + ep->nextpid = USB_PID_IN; + } + break; + case USB_PID_ACK: + DBG(3, "%s: req %d got ACK %d -> 0\n", __func__, ep->num_req, + urbstat); + WARN_ON(urbstat != -EINPROGRESS); + urbstat = 0; + ep->nextpid = 0; + break; + default: + BUG_ON(1); + } + + out: + if (urbstat != -EINPROGRESS) { + DBG(2, "%s: Finishing ep %p req %d urb %p status %d\n", __func__, + ep, ep->num_req, urb, urbstat); + finish_request(isp1362_hcd, ep, urb, urbstat); + } +} + +static void finish_unlinks(struct isp1362_hcd *isp1362_hcd) +{ + struct isp1362_ep *ep; + struct isp1362_ep *tmp; + + list_for_each_entry_safe(ep, tmp, &isp1362_hcd->remove_list, remove_list) { + struct isp1362_ep_queue *epq = + get_ptd_queue(isp1362_hcd, ep->ptd_offset); + int index = ep->ptd_index; + + BUG_ON(epq == NULL); + if (index >= 0) { + DBG(1, "%s: remove PTD[%d] $%04x\n", __func__, index, ep->ptd_offset); + BUG_ON(ep->num_ptds == 0); + release_ptd_buffers(epq, ep); + } + if (!list_empty(&ep->hep->urb_list)) { + struct urb *urb = get_urb(ep); + + DBG(1, "%s: Finishing req %d ep %p from remove_list\n", __func__, + ep->num_req, ep); + finish_request(isp1362_hcd, ep, urb, -ESHUTDOWN); + } + WARN_ON(list_empty(&ep->active)); + if (!list_empty(&ep->active)) { + list_del_init(&ep->active); + DBG(1, "%s: ep %p removed from active list\n", __func__, ep); + } + list_del_init(&ep->remove_list); + DBG(1, "%s: ep %p removed from remove_list\n", __func__, ep); + } + DBG(1, "%s: Done\n", __func__); +} + +static inline void enable_atl_transfers(struct isp1362_hcd *isp1362_hcd, int count) +{ + if (count > 0) { + if (count < isp1362_hcd->atl_queue.ptd_count) + isp1362_write_reg16(isp1362_hcd, HCATLDTC, count); + isp1362_enable_int(isp1362_hcd, HCuPINT_ATL); + isp1362_write_reg32(isp1362_hcd, HCATLSKIP, isp1362_hcd->atl_queue.skip_map); + isp1362_set_mask16(isp1362_hcd, HCBUFSTAT, HCBUFSTAT_ATL_ACTIVE); + } else + isp1362_enable_int(isp1362_hcd, HCuPINT_SOF); +} + +static inline void enable_intl_transfers(struct isp1362_hcd *isp1362_hcd) +{ + isp1362_enable_int(isp1362_hcd, HCuPINT_INTL); + isp1362_set_mask16(isp1362_hcd, HCBUFSTAT, HCBUFSTAT_INTL_ACTIVE); + isp1362_write_reg32(isp1362_hcd, HCINTLSKIP, isp1362_hcd->intl_queue.skip_map); +} + +static inline void enable_istl_transfers(struct isp1362_hcd *isp1362_hcd, int flip) +{ + isp1362_enable_int(isp1362_hcd, flip ? HCuPINT_ISTL1 : HCuPINT_ISTL0); + isp1362_set_mask16(isp1362_hcd, HCBUFSTAT, flip ? + HCBUFSTAT_ISTL1_FULL : HCBUFSTAT_ISTL0_FULL); +} + +static int submit_req(struct isp1362_hcd *isp1362_hcd, struct urb *urb, + struct isp1362_ep *ep, struct isp1362_ep_queue *epq) +{ + int index = epq->free_ptd; + + prepare_ptd(isp1362_hcd, urb, ep, epq, 0); + index = claim_ptd_buffers(epq, ep, ep->length); + if (index == -ENOMEM) { + DBG(1, "%s: req %d No free %s PTD available: %d, %08lx:%08lx\n", __func__, + ep->num_req, epq->name, ep->num_ptds, epq->buf_map, epq->skip_map); + return index; + } else if (index == -EOVERFLOW) { + DBG(1, "%s: req %d Not enough space for %d byte %s PTD %d %08lx:%08lx\n", + __func__, ep->num_req, ep->length, epq->name, ep->num_ptds, + epq->buf_map, epq->skip_map); + return index; + } else + BUG_ON(index < 0); + list_add_tail(&ep->active, &epq->active); + DBG(1, "%s: ep %p req %d len %d added to active list %p\n", __func__, + ep, ep->num_req, ep->length, &epq->active); + DBG(1, "%s: Submitting %s PTD $%04x for ep %p req %d\n", __func__, epq->name, + ep->ptd_offset, ep, ep->num_req); + isp1362_write_ptd(isp1362_hcd, ep, epq); + __clear_bit(ep->ptd_index, &epq->skip_map); + + return 0; +} + +static void start_atl_transfers(struct isp1362_hcd *isp1362_hcd) +{ + int ptd_count = 0; + struct isp1362_ep_queue *epq = &isp1362_hcd->atl_queue; + struct isp1362_ep *ep; + int defer = 0; + + if (atomic_read(&epq->finishing)) { + DBG(1, "%s: finish_transfers is active for %s\n", __func__, epq->name); + return; + } + + list_for_each_entry(ep, &isp1362_hcd->async, schedule) { + struct urb *urb = get_urb(ep); + int ret; + + if (!list_empty(&ep->active)) { + DBG(2, "%s: Skipping active %s ep %p\n", __func__, epq->name, ep); + continue; + } + + DBG(1, "%s: Processing %s ep %p req %d\n", __func__, epq->name, + ep, ep->num_req); + + ret = submit_req(isp1362_hcd, urb, ep, epq); + if (ret == -ENOMEM) { + defer = 1; + break; + } else if (ret == -EOVERFLOW) { + defer = 1; + continue; + } +#ifdef BUGGY_PXA2XX_UDC_USBTEST + defer = ep->nextpid == USB_PID_SETUP; +#endif + ptd_count++; + } + + /* Avoid starving of endpoints */ + if (isp1362_hcd->async.next != isp1362_hcd->async.prev) { + DBG(2, "%s: Cycling ASYNC schedule %d\n", __func__, ptd_count); + list_move(&isp1362_hcd->async, isp1362_hcd->async.next); + } + if (ptd_count || defer) + enable_atl_transfers(isp1362_hcd, defer ? 0 : ptd_count); + + epq->ptd_count += ptd_count; + if (epq->ptd_count > epq->stat_maxptds) { + epq->stat_maxptds = epq->ptd_count; + DBG(0, "%s: max_ptds: %d\n", __func__, epq->stat_maxptds); + } +} + +static void start_intl_transfers(struct isp1362_hcd *isp1362_hcd) +{ + int ptd_count = 0; + struct isp1362_ep_queue *epq = &isp1362_hcd->intl_queue; + struct isp1362_ep *ep; + + if (atomic_read(&epq->finishing)) { + DBG(1, "%s: finish_transfers is active for %s\n", __func__, epq->name); + return; + } + + list_for_each_entry(ep, &isp1362_hcd->periodic, schedule) { + struct urb *urb = get_urb(ep); + int ret; + + if (!list_empty(&ep->active)) { + DBG(1, "%s: Skipping active %s ep %p\n", __func__, + epq->name, ep); + continue; + } + + DBG(1, "%s: Processing %s ep %p req %d\n", __func__, + epq->name, ep, ep->num_req); + ret = submit_req(isp1362_hcd, urb, ep, epq); + if (ret == -ENOMEM) + break; + else if (ret == -EOVERFLOW) + continue; + ptd_count++; + } + + if (ptd_count) { + static int last_count; + + if (ptd_count != last_count) { + DBG(0, "%s: ptd_count: %d\n", __func__, ptd_count); + last_count = ptd_count; + } + enable_intl_transfers(isp1362_hcd); + } + + epq->ptd_count += ptd_count; + if (epq->ptd_count > epq->stat_maxptds) + epq->stat_maxptds = epq->ptd_count; +} + +static inline int next_ptd(struct isp1362_ep_queue *epq, struct isp1362_ep *ep) +{ + u16 ptd_offset = ep->ptd_offset; + int num_ptds = (ep->length + PTD_HEADER_SIZE + (epq->blk_size - 1)) / epq->blk_size; + + DBG(2, "%s: PTD offset $%04x + %04x => %d * %04x -> $%04x\n", __func__, ptd_offset, + ep->length, num_ptds, epq->blk_size, ptd_offset + num_ptds * epq->blk_size); + + ptd_offset += num_ptds * epq->blk_size; + if (ptd_offset < epq->buf_start + epq->buf_size) + return ptd_offset; + else + return -ENOMEM; +} + +static void start_iso_transfers(struct isp1362_hcd *isp1362_hcd) +{ + int ptd_count = 0; + int flip = isp1362_hcd->istl_flip; + struct isp1362_ep_queue *epq; + int ptd_offset; + struct isp1362_ep *ep; + struct isp1362_ep *tmp; + u16 fno = isp1362_read_reg32(isp1362_hcd, HCFMNUM); + + fill2: + epq = &isp1362_hcd->istl_queue[flip]; + if (atomic_read(&epq->finishing)) { + DBG(1, "%s: finish_transfers is active for %s\n", __func__, epq->name); + return; + } + + if (!list_empty(&epq->active)) + return; + + ptd_offset = epq->buf_start; + list_for_each_entry_safe(ep, tmp, &isp1362_hcd->isoc, schedule) { + struct urb *urb = get_urb(ep); + s16 diff = fno - (u16)urb->start_frame; + + DBG(1, "%s: Processing %s ep %p\n", __func__, epq->name, ep); + + if (diff > urb->number_of_packets) { + /* time frame for this URB has elapsed */ + finish_request(isp1362_hcd, ep, urb, -EOVERFLOW); + continue; + } else if (diff < -1) { + /* URB is not due in this frame or the next one. + * Comparing with '-1' instead of '0' accounts for double + * buffering in the ISP1362 which enables us to queue the PTD + * one frame ahead of time + */ + } else if (diff == -1) { + /* submit PTD's that are due in the next frame */ + prepare_ptd(isp1362_hcd, urb, ep, epq, fno); + if (ptd_offset + PTD_HEADER_SIZE + ep->length > + epq->buf_start + epq->buf_size) { + pr_err("%s: Not enough ISO buffer space for %d byte PTD\n", + __func__, ep->length); + continue; + } + ep->ptd_offset = ptd_offset; + list_add_tail(&ep->active, &epq->active); + + ptd_offset = next_ptd(epq, ep); + if (ptd_offset < 0) { + pr_warning("%s: req %d No more %s PTD buffers available\n", __func__, + ep->num_req, epq->name); + break; + } + } + } + list_for_each_entry(ep, &epq->active, active) { + if (epq->active.next == &ep->active) + ep->ptd.mps |= PTD_LAST_MSK; + isp1362_write_ptd(isp1362_hcd, ep, epq); + ptd_count++; + } + + if (ptd_count) + enable_istl_transfers(isp1362_hcd, flip); + + epq->ptd_count += ptd_count; + if (epq->ptd_count > epq->stat_maxptds) + epq->stat_maxptds = epq->ptd_count; + + /* check, whether the second ISTL buffer may also be filled */ + if (!(isp1362_read_reg16(isp1362_hcd, HCBUFSTAT) & + (flip ? HCBUFSTAT_ISTL0_FULL : HCBUFSTAT_ISTL1_FULL))) { + fno++; + ptd_count = 0; + flip = 1 - flip; + goto fill2; + } +} + +static void finish_transfers(struct isp1362_hcd *isp1362_hcd, unsigned long done_map, + struct isp1362_ep_queue *epq) +{ + struct isp1362_ep *ep; + struct isp1362_ep *tmp; + + if (list_empty(&epq->active)) { + DBG(1, "%s: Nothing to do for %s queue\n", __func__, epq->name); + return; + } + + DBG(1, "%s: Finishing %s transfers %08lx\n", __func__, epq->name, done_map); + + atomic_inc(&epq->finishing); + list_for_each_entry_safe(ep, tmp, &epq->active, active) { + int index = ep->ptd_index; + + DBG(1, "%s: Checking %s PTD[%02x] $%04x\n", __func__, epq->name, + index, ep->ptd_offset); + + BUG_ON(index < 0); + if (__test_and_clear_bit(index, &done_map)) { + isp1362_read_ptd(isp1362_hcd, ep, epq); + epq->free_ptd = index; + BUG_ON(ep->num_ptds == 0); + release_ptd_buffers(epq, ep); + + DBG(1, "%s: ep %p req %d removed from active list\n", __func__, + ep, ep->num_req); + if (!list_empty(&ep->remove_list)) { + list_del_init(&ep->remove_list); + DBG(1, "%s: ep %p removed from remove list\n", __func__, ep); + } + DBG(1, "%s: Postprocessing %s ep %p req %d\n", __func__, epq->name, + ep, ep->num_req); + postproc_ep(isp1362_hcd, ep); + } + if (!done_map) + break; + } + if (done_map) + pr_warning("%s: done_map not clear: %08lx:%08lx\n", __func__, done_map, + epq->skip_map); + atomic_dec(&epq->finishing); +} + +static void finish_iso_transfers(struct isp1362_hcd *isp1362_hcd, struct isp1362_ep_queue *epq) +{ + struct isp1362_ep *ep; + struct isp1362_ep *tmp; + + if (list_empty(&epq->active)) { + DBG(1, "%s: Nothing to do for %s queue\n", __func__, epq->name); + return; + } + + DBG(1, "%s: Finishing %s transfers\n", __func__, epq->name); + + atomic_inc(&epq->finishing); + list_for_each_entry_safe(ep, tmp, &epq->active, active) { + DBG(1, "%s: Checking PTD $%04x\n", __func__, ep->ptd_offset); + + isp1362_read_ptd(isp1362_hcd, ep, epq); + DBG(1, "%s: Postprocessing %s ep %p\n", __func__, epq->name, ep); + postproc_ep(isp1362_hcd, ep); + } + WARN_ON(epq->blk_size != 0); + atomic_dec(&epq->finishing); +} + +static irqreturn_t isp1362_irq(struct usb_hcd *hcd) +{ + int handled = 0; + struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd); + u16 irqstat; + u16 svc_mask; + + spin_lock(&isp1362_hcd->lock); + + BUG_ON(isp1362_hcd->irq_active++); + + isp1362_write_reg16(isp1362_hcd, HCuPINTENB, 0); + + irqstat = isp1362_read_reg16(isp1362_hcd, HCuPINT); + DBG(3, "%s: got IRQ %04x:%04x\n", __func__, irqstat, isp1362_hcd->irqenb); + + /* only handle interrupts that are currently enabled */ + irqstat &= isp1362_hcd->irqenb; + isp1362_write_reg16(isp1362_hcd, HCuPINT, irqstat); + svc_mask = irqstat; + + if (irqstat & HCuPINT_SOF) { + isp1362_hcd->irqenb &= ~HCuPINT_SOF; + isp1362_hcd->irq_stat[ISP1362_INT_SOF]++; + handled = 1; + svc_mask &= ~HCuPINT_SOF; + DBG(3, "%s: SOF\n", __func__); + isp1362_hcd->fmindex = isp1362_read_reg32(isp1362_hcd, HCFMNUM); + if (!list_empty(&isp1362_hcd->remove_list)) + finish_unlinks(isp1362_hcd); + if (!list_empty(&isp1362_hcd->async) && !(irqstat & HCuPINT_ATL)) { + if (list_empty(&isp1362_hcd->atl_queue.active)) { + start_atl_transfers(isp1362_hcd); + } else { + isp1362_enable_int(isp1362_hcd, HCuPINT_ATL); + isp1362_write_reg32(isp1362_hcd, HCATLSKIP, + isp1362_hcd->atl_queue.skip_map); + isp1362_set_mask16(isp1362_hcd, HCBUFSTAT, HCBUFSTAT_ATL_ACTIVE); + } + } + } + + if (irqstat & HCuPINT_ISTL0) { + isp1362_hcd->irq_stat[ISP1362_INT_ISTL0]++; + handled = 1; + svc_mask &= ~HCuPINT_ISTL0; + isp1362_clr_mask16(isp1362_hcd, HCBUFSTAT, HCBUFSTAT_ISTL0_FULL); + DBG(1, "%s: ISTL0\n", __func__); + WARN_ON((int)!!isp1362_hcd->istl_flip); + WARN_ON(isp1362_read_reg16(isp1362_hcd, HCBUFSTAT) & HCBUFSTAT_ISTL0_ACTIVE); + WARN_ON(!isp1362_read_reg16(isp1362_hcd, HCBUFSTAT) & HCBUFSTAT_ISTL0_DONE); + isp1362_hcd->irqenb &= ~HCuPINT_ISTL0; + } + + if (irqstat & HCuPINT_ISTL1) { + isp1362_hcd->irq_stat[ISP1362_INT_ISTL1]++; + handled = 1; + svc_mask &= ~HCuPINT_ISTL1; + isp1362_clr_mask16(isp1362_hcd, HCBUFSTAT, HCBUFSTAT_ISTL1_FULL); + DBG(1, "%s: ISTL1\n", __func__); + WARN_ON(!(int)isp1362_hcd->istl_flip); + WARN_ON(isp1362_read_reg16(isp1362_hcd, HCBUFSTAT) & HCBUFSTAT_ISTL1_ACTIVE); + WARN_ON(!isp1362_read_reg16(isp1362_hcd, HCBUFSTAT) & HCBUFSTAT_ISTL1_DONE); + isp1362_hcd->irqenb &= ~HCuPINT_ISTL1; + } + + if (irqstat & (HCuPINT_ISTL0 | HCuPINT_ISTL1)) { + WARN_ON((irqstat & (HCuPINT_ISTL0 | HCuPINT_ISTL1)) == + (HCuPINT_ISTL0 | HCuPINT_ISTL1)); + finish_iso_transfers(isp1362_hcd, + &isp1362_hcd->istl_queue[isp1362_hcd->istl_flip]); + start_iso_transfers(isp1362_hcd); + isp1362_hcd->istl_flip = 1 - isp1362_hcd->istl_flip; + } + + if (irqstat & HCuPINT_INTL) { + u32 done_map = isp1362_read_reg32(isp1362_hcd, HCINTLDONE); + u32 skip_map = isp1362_read_reg32(isp1362_hcd, HCINTLSKIP); + isp1362_hcd->irq_stat[ISP1362_INT_INTL]++; + + DBG(2, "%s: INTL\n", __func__); + + svc_mask &= ~HCuPINT_INTL; + + isp1362_write_reg32(isp1362_hcd, HCINTLSKIP, skip_map | done_map); + if (~(done_map | skip_map) == 0) + /* All PTDs are finished, disable INTL processing entirely */ + isp1362_clr_mask16(isp1362_hcd, HCBUFSTAT, HCBUFSTAT_INTL_ACTIVE); + + handled = 1; + WARN_ON(!done_map); + if (done_map) { + DBG(3, "%s: INTL done_map %08x\n", __func__, done_map); + finish_transfers(isp1362_hcd, done_map, &isp1362_hcd->intl_queue); + start_intl_transfers(isp1362_hcd); + } + } + + if (irqstat & HCuPINT_ATL) { + u32 done_map = isp1362_read_reg32(isp1362_hcd, HCATLDONE); + u32 skip_map = isp1362_read_reg32(isp1362_hcd, HCATLSKIP); + isp1362_hcd->irq_stat[ISP1362_INT_ATL]++; + + DBG(2, "%s: ATL\n", __func__); + + svc_mask &= ~HCuPINT_ATL; + + isp1362_write_reg32(isp1362_hcd, HCATLSKIP, skip_map | done_map); + if (~(done_map | skip_map) == 0) + isp1362_clr_mask16(isp1362_hcd, HCBUFSTAT, HCBUFSTAT_ATL_ACTIVE); + if (done_map) { + DBG(3, "%s: ATL done_map %08x\n", __func__, done_map); + finish_transfers(isp1362_hcd, done_map, &isp1362_hcd->atl_queue); + start_atl_transfers(isp1362_hcd); + } + handled = 1; + } + + if (irqstat & HCuPINT_OPR) { + u32 intstat = isp1362_read_reg32(isp1362_hcd, HCINTSTAT); + isp1362_hcd->irq_stat[ISP1362_INT_OPR]++; + + svc_mask &= ~HCuPINT_OPR; + DBG(2, "%s: OPR %08x:%08x\n", __func__, intstat, isp1362_hcd->intenb); + intstat &= isp1362_hcd->intenb; + if (intstat & OHCI_INTR_UE) { + pr_err("Unrecoverable error\n"); + /* FIXME: do here reset or cleanup or whatever */ + } + if (intstat & OHCI_INTR_RHSC) { + isp1362_hcd->rhstatus = isp1362_read_reg32(isp1362_hcd, HCRHSTATUS); + isp1362_hcd->rhport[0] = isp1362_read_reg32(isp1362_hcd, HCRHPORT1); + isp1362_hcd->rhport[1] = isp1362_read_reg32(isp1362_hcd, HCRHPORT2); + } + if (intstat & OHCI_INTR_RD) { + pr_info("%s: RESUME DETECTED\n", __func__); + isp1362_show_reg(isp1362_hcd, HCCONTROL); + usb_hcd_resume_root_hub(hcd); + } + isp1362_write_reg32(isp1362_hcd, HCINTSTAT, intstat); + irqstat &= ~HCuPINT_OPR; + handled = 1; + } + + if (irqstat & HCuPINT_SUSP) { + isp1362_hcd->irq_stat[ISP1362_INT_SUSP]++; + handled = 1; + svc_mask &= ~HCuPINT_SUSP; + + pr_info("%s: SUSPEND IRQ\n", __func__); + } + + if (irqstat & HCuPINT_CLKRDY) { + isp1362_hcd->irq_stat[ISP1362_INT_CLKRDY]++; + handled = 1; + isp1362_hcd->irqenb &= ~HCuPINT_CLKRDY; + svc_mask &= ~HCuPINT_CLKRDY; + pr_info("%s: CLKRDY IRQ\n", __func__); + } + + if (svc_mask) + pr_err("%s: Unserviced interrupt(s) %04x\n", __func__, svc_mask); + + isp1362_write_reg16(isp1362_hcd, HCuPINTENB, isp1362_hcd->irqenb); + isp1362_hcd->irq_active--; + spin_unlock(&isp1362_hcd->lock); + + return IRQ_RETVAL(handled); +} + +/*-------------------------------------------------------------------------*/ + +#define MAX_PERIODIC_LOAD 900 /* out of 1000 usec */ +static int balance(struct isp1362_hcd *isp1362_hcd, u16 interval, u16 load) +{ + int i, branch = -ENOSPC; + + /* search for the least loaded schedule branch of that interval + * which has enough bandwidth left unreserved. + */ + for (i = 0; i < interval; i++) { + if (branch < 0 || isp1362_hcd->load[branch] > isp1362_hcd->load[i]) { + int j; + + for (j = i; j < PERIODIC_SIZE; j += interval) { + if ((isp1362_hcd->load[j] + load) > MAX_PERIODIC_LOAD) { + pr_err("%s: new load %d load[%02x] %d max %d\n", __func__, + load, j, isp1362_hcd->load[j], MAX_PERIODIC_LOAD); + break; + } + } + if (j < PERIODIC_SIZE) + continue; + branch = i; + } + } + return branch; +} + +/* NB! ALL the code above this point runs with isp1362_hcd->lock + held, irqs off +*/ + +/*-------------------------------------------------------------------------*/ + +static int isp1362_urb_enqueue(struct usb_hcd *hcd, + struct urb *urb, + gfp_t mem_flags) +{ + struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd); + struct usb_device *udev = urb->dev; + unsigned int pipe = urb->pipe; + int is_out = !usb_pipein(pipe); + int type = usb_pipetype(pipe); + int epnum = usb_pipeendpoint(pipe); + struct usb_host_endpoint *hep = urb->ep; + struct isp1362_ep *ep = NULL; + unsigned long flags; + int retval = 0; + + DBG(3, "%s: urb %p\n", __func__, urb); + + if (type == PIPE_ISOCHRONOUS) { + pr_err("Isochronous transfers not supported\n"); + return -ENOSPC; + } + + URB_DBG("%s: FA %d ep%d%s %s: len %d %s%s\n", __func__, + usb_pipedevice(pipe), epnum, + is_out ? "out" : "in", + usb_pipecontrol(pipe) ? "ctrl" : + usb_pipeint(pipe) ? "int" : + usb_pipebulk(pipe) ? "bulk" : + "iso", + urb->transfer_buffer_length, + (urb->transfer_flags & URB_ZERO_PACKET) ? "ZERO_PACKET " : "", + !(urb->transfer_flags & URB_SHORT_NOT_OK) ? + "short_ok" : ""); + + /* avoid all allocations within spinlocks: request or endpoint */ + if (!hep->hcpriv) { + ep = kcalloc(1, sizeof *ep, mem_flags); + if (!ep) + return -ENOMEM; + } + spin_lock_irqsave(&isp1362_hcd->lock, flags); + + /* don't submit to a dead or disabled port */ + if (!((isp1362_hcd->rhport[0] | isp1362_hcd->rhport[1]) & + (1 << USB_PORT_FEAT_ENABLE)) || + !HC_IS_RUNNING(hcd->state)) { + kfree(ep); + retval = -ENODEV; + goto fail_not_linked; + } + + retval = usb_hcd_link_urb_to_ep(hcd, urb); + if (retval) { + kfree(ep); + goto fail_not_linked; + } + + if (hep->hcpriv) { + ep = hep->hcpriv; + } else { + INIT_LIST_HEAD(&ep->schedule); + INIT_LIST_HEAD(&ep->active); + INIT_LIST_HEAD(&ep->remove_list); + ep->udev = usb_get_dev(udev); + ep->hep = hep; + ep->epnum = epnum; + ep->maxpacket = usb_maxpacket(udev, urb->pipe, is_out); + ep->ptd_offset = -EINVAL; + ep->ptd_index = -EINVAL; + usb_settoggle(udev, epnum, is_out, 0); + + if (type == PIPE_CONTROL) + ep->nextpid = USB_PID_SETUP; + else if (is_out) + ep->nextpid = USB_PID_OUT; + else + ep->nextpid = USB_PID_IN; + + switch (type) { + case PIPE_ISOCHRONOUS: + case PIPE_INTERRUPT: + if (urb->interval > PERIODIC_SIZE) + urb->interval = PERIODIC_SIZE; + ep->interval = urb->interval; + ep->branch = PERIODIC_SIZE; + ep->load = usb_calc_bus_time(udev->speed, !is_out, + (type == PIPE_ISOCHRONOUS), + usb_maxpacket(udev, pipe, is_out)) / 1000; + break; + } + hep->hcpriv = ep; + } + ep->num_req = isp1362_hcd->req_serial++; + + /* maybe put endpoint into schedule */ + switch (type) { + case PIPE_CONTROL: + case PIPE_BULK: + if (list_empty(&ep->schedule)) { + DBG(1, "%s: Adding ep %p req %d to async schedule\n", + __func__, ep, ep->num_req); + list_add_tail(&ep->schedule, &isp1362_hcd->async); + } + break; + case PIPE_ISOCHRONOUS: + case PIPE_INTERRUPT: + urb->interval = ep->interval; + + /* urb submitted for already existing EP */ + if (ep->branch < PERIODIC_SIZE) + break; + + retval = balance(isp1362_hcd, ep->interval, ep->load); + if (retval < 0) { + pr_err("%s: balance returned %d\n", __func__, retval); + goto fail; + } + ep->branch = retval; + retval = 0; + isp1362_hcd->fmindex = isp1362_read_reg32(isp1362_hcd, HCFMNUM); + DBG(1, "%s: Current frame %04x branch %02x start_frame %04x(%04x)\n", + __func__, isp1362_hcd->fmindex, ep->branch, + ((isp1362_hcd->fmindex + PERIODIC_SIZE - 1) & + ~(PERIODIC_SIZE - 1)) + ep->branch, + (isp1362_hcd->fmindex & (PERIODIC_SIZE - 1)) + ep->branch); + + if (list_empty(&ep->schedule)) { + if (type == PIPE_ISOCHRONOUS) { + u16 frame = isp1362_hcd->fmindex; + + frame += max_t(u16, 8, ep->interval); + frame &= ~(ep->interval - 1); + frame |= ep->branch; + if (frame_before(frame, isp1362_hcd->fmindex)) + frame += ep->interval; + urb->start_frame = frame; + + DBG(1, "%s: Adding ep %p to isoc schedule\n", __func__, ep); + list_add_tail(&ep->schedule, &isp1362_hcd->isoc); + } else { + DBG(1, "%s: Adding ep %p to periodic schedule\n", __func__, ep); + list_add_tail(&ep->schedule, &isp1362_hcd->periodic); + } + } else + DBG(1, "%s: ep %p already scheduled\n", __func__, ep); + + DBG(2, "%s: load %d bandwidth %d -> %d\n", __func__, + ep->load / ep->interval, isp1362_hcd->load[ep->branch], + isp1362_hcd->load[ep->branch] + ep->load); + isp1362_hcd->load[ep->branch] += ep->load; + } + + urb->hcpriv = hep; + ALIGNSTAT(isp1362_hcd, urb->transfer_buffer); + + switch (type) { + case PIPE_CONTROL: + case PIPE_BULK: + start_atl_transfers(isp1362_hcd); + break; + case PIPE_INTERRUPT: + start_intl_transfers(isp1362_hcd); + break; + case PIPE_ISOCHRONOUS: + start_iso_transfers(isp1362_hcd); + break; + default: + BUG(); + } + fail: + if (retval) + usb_hcd_unlink_urb_from_ep(hcd, urb); + + + fail_not_linked: + spin_unlock_irqrestore(&isp1362_hcd->lock, flags); + if (retval) + DBG(0, "%s: urb %p failed with %d\n", __func__, urb, retval); + return retval; +} + +static int isp1362_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status) +{ + struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd); + struct usb_host_endpoint *hep; + unsigned long flags; + struct isp1362_ep *ep; + int retval = 0; + + DBG(3, "%s: urb %p\n", __func__, urb); + + spin_lock_irqsave(&isp1362_hcd->lock, flags); + retval = usb_hcd_check_unlink_urb(hcd, urb, status); + if (retval) + goto done; + + hep = urb->hcpriv; + + if (!hep) { + spin_unlock_irqrestore(&isp1362_hcd->lock, flags); + return -EIDRM; + } + + ep = hep->hcpriv; + if (ep) { + /* In front of queue? */ + if (ep->hep->urb_list.next == &urb->urb_list) { + if (!list_empty(&ep->active)) { + DBG(1, "%s: urb %p ep %p req %d active PTD[%d] $%04x\n", __func__, + urb, ep, ep->num_req, ep->ptd_index, ep->ptd_offset); + /* disable processing and queue PTD for removal */ + remove_ptd(isp1362_hcd, ep); + urb = NULL; + } + } + if (urb) { + DBG(1, "%s: Finishing ep %p req %d\n", __func__, ep, + ep->num_req); + finish_request(isp1362_hcd, ep, urb, status); + } else + DBG(1, "%s: urb %p active; wait4irq\n", __func__, urb); + } else { + pr_warning("%s: No EP in URB %p\n", __func__, urb); + retval = -EINVAL; + } +done: + spin_unlock_irqrestore(&isp1362_hcd->lock, flags); + + DBG(3, "%s: exit\n", __func__); + + return retval; +} + +static void isp1362_endpoint_disable(struct usb_hcd *hcd, struct usb_host_endpoint *hep) +{ + struct isp1362_ep *ep = hep->hcpriv; + struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd); + unsigned long flags; + + DBG(1, "%s: ep %p\n", __func__, ep); + if (!ep) + return; + spin_lock_irqsave(&isp1362_hcd->lock, flags); + if (!list_empty(&hep->urb_list)) { + if (!list_empty(&ep->active) && list_empty(&ep->remove_list)) { + DBG(1, "%s: Removing ep %p req %d PTD[%d] $%04x\n", __func__, + ep, ep->num_req, ep->ptd_index, ep->ptd_offset); + remove_ptd(isp1362_hcd, ep); + pr_info("%s: Waiting for Interrupt to clean up\n", __func__); + } + } + spin_unlock_irqrestore(&isp1362_hcd->lock, flags); + /* Wait for interrupt to clear out active list */ + while (!list_empty(&ep->active)) + msleep(1); + + DBG(1, "%s: Freeing EP %p\n", __func__, ep); + + usb_put_dev(ep->udev); + kfree(ep); + hep->hcpriv = NULL; +} + +static int isp1362_get_frame(struct usb_hcd *hcd) +{ + struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd); + u32 fmnum; + unsigned long flags; + + spin_lock_irqsave(&isp1362_hcd->lock, flags); + fmnum = isp1362_read_reg32(isp1362_hcd, HCFMNUM); + spin_unlock_irqrestore(&isp1362_hcd->lock, flags); + + return (int)fmnum; +} + +/*-------------------------------------------------------------------------*/ + +/* Adapted from ohci-hub.c */ +static int isp1362_hub_status_data(struct usb_hcd *hcd, char *buf) +{ + struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd); + int ports, i, changed = 0; + unsigned long flags; + + if (!HC_IS_RUNNING(hcd->state)) + return -ESHUTDOWN; + + /* Report no status change now, if we are scheduled to be + called later */ + if (timer_pending(&hcd->rh_timer)) + return 0; + + ports = isp1362_hcd->rhdesca & RH_A_NDP; + BUG_ON(ports > 2); + + spin_lock_irqsave(&isp1362_hcd->lock, flags); + /* init status */ + if (isp1362_hcd->rhstatus & (RH_HS_LPSC | RH_HS_OCIC)) + buf[0] = changed = 1; + else + buf[0] = 0; + + for (i = 0; i < ports; i++) { + u32 status = isp1362_hcd->rhport[i]; + + if (status & (RH_PS_CSC | RH_PS_PESC | RH_PS_PSSC | + RH_PS_OCIC | RH_PS_PRSC)) { + changed = 1; + buf[0] |= 1 << (i + 1); + continue; + } + + if (!(status & RH_PS_CCS)) + continue; + } + spin_unlock_irqrestore(&isp1362_hcd->lock, flags); + return changed; +} + +static void isp1362_hub_descriptor(struct isp1362_hcd *isp1362_hcd, + struct usb_hub_descriptor *desc) +{ + u32 reg = isp1362_hcd->rhdesca; + + DBG(3, "%s: enter\n", __func__); + + desc->bDescriptorType = 0x29; + desc->bDescLength = 9; + desc->bHubContrCurrent = 0; + desc->bNbrPorts = reg & 0x3; + /* Power switching, device type, overcurrent. */ + desc->wHubCharacteristics = cpu_to_le16((reg >> 8) & 0x1f); + DBG(0, "%s: hubcharacteristics = %02x\n", __func__, cpu_to_le16((reg >> 8) & 0x1f)); + desc->bPwrOn2PwrGood = (reg >> 24) & 0xff; + /* two bitmaps: ports removable, and legacy PortPwrCtrlMask */ + desc->bitmap[0] = desc->bNbrPorts == 1 ? 1 << 1 : 3 << 1; + desc->bitmap[1] = ~0; + + DBG(3, "%s: exit\n", __func__); +} + +/* Adapted from ohci-hub.c */ +static int isp1362_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue, + u16 wIndex, char *buf, u16 wLength) +{ + struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd); + int retval = 0; + unsigned long flags; + unsigned long t1; + int ports = isp1362_hcd->rhdesca & RH_A_NDP; + u32 tmp = 0; + + switch (typeReq) { + case ClearHubFeature: + DBG(0, "ClearHubFeature: "); + switch (wValue) { + case C_HUB_OVER_CURRENT: + _DBG(0, "C_HUB_OVER_CURRENT\n"); + spin_lock_irqsave(&isp1362_hcd->lock, flags); + isp1362_write_reg32(isp1362_hcd, HCRHSTATUS, RH_HS_OCIC); + spin_unlock_irqrestore(&isp1362_hcd->lock, flags); + case C_HUB_LOCAL_POWER: + _DBG(0, "C_HUB_LOCAL_POWER\n"); + break; + default: + goto error; + } + break; + case SetHubFeature: + DBG(0, "SetHubFeature: "); + switch (wValue) { + case C_HUB_OVER_CURRENT: + case C_HUB_LOCAL_POWER: + _DBG(0, "C_HUB_OVER_CURRENT or C_HUB_LOCAL_POWER\n"); + break; + default: + goto error; + } + break; + case GetHubDescriptor: + DBG(0, "GetHubDescriptor\n"); + isp1362_hub_descriptor(isp1362_hcd, (struct usb_hub_descriptor *)buf); + break; + case GetHubStatus: + DBG(0, "GetHubStatus\n"); + put_unaligned(cpu_to_le32(0), (__le32 *) buf); + break; + case GetPortStatus: +#ifndef VERBOSE + DBG(0, "GetPortStatus\n"); +#endif + if (!wIndex || wIndex > ports) + goto error; + tmp = isp1362_hcd->rhport[--wIndex]; + put_unaligned(cpu_to_le32(tmp), (__le32 *) buf); + break; + case ClearPortFeature: + DBG(0, "ClearPortFeature: "); + if (!wIndex || wIndex > ports) + goto error; + wIndex--; + + switch (wValue) { + case USB_PORT_FEAT_ENABLE: + _DBG(0, "USB_PORT_FEAT_ENABLE\n"); + tmp = RH_PS_CCS; + break; + case USB_PORT_FEAT_C_ENABLE: + _DBG(0, "USB_PORT_FEAT_C_ENABLE\n"); + tmp = RH_PS_PESC; + break; + case USB_PORT_FEAT_SUSPEND: + _DBG(0, "USB_PORT_FEAT_SUSPEND\n"); + tmp = RH_PS_POCI; + break; + case USB_PORT_FEAT_C_SUSPEND: + _DBG(0, "USB_PORT_FEAT_C_SUSPEND\n"); + tmp = RH_PS_PSSC; + break; + case USB_PORT_FEAT_POWER: + _DBG(0, "USB_PORT_FEAT_POWER\n"); + tmp = RH_PS_LSDA; + + break; + case USB_PORT_FEAT_C_CONNECTION: + _DBG(0, "USB_PORT_FEAT_C_CONNECTION\n"); + tmp = RH_PS_CSC; + break; + case USB_PORT_FEAT_C_OVER_CURRENT: + _DBG(0, "USB_PORT_FEAT_C_OVER_CURRENT\n"); + tmp = RH_PS_OCIC; + break; + case USB_PORT_FEAT_C_RESET: + _DBG(0, "USB_PORT_FEAT_C_RESET\n"); + tmp = RH_PS_PRSC; + break; + default: + goto error; + } + + spin_lock_irqsave(&isp1362_hcd->lock, flags); + isp1362_write_reg32(isp1362_hcd, HCRHPORT1 + wIndex, tmp); + isp1362_hcd->rhport[wIndex] = + isp1362_read_reg32(isp1362_hcd, HCRHPORT1 + wIndex); + spin_unlock_irqrestore(&isp1362_hcd->lock, flags); + break; + case SetPortFeature: + DBG(0, "SetPortFeature: "); + if (!wIndex || wIndex > ports) + goto error; + wIndex--; + switch (wValue) { + case USB_PORT_FEAT_SUSPEND: + _DBG(0, "USB_PORT_FEAT_SUSPEND\n"); +#ifdef CONFIG_USB_OTG + if (ohci->hcd.self.otg_port == (wIndex + 1) && + ohci->hcd.self.b_hnp_enable) { + start_hnp(ohci); + break; + } +#endif + spin_lock_irqsave(&isp1362_hcd->lock, flags); + isp1362_write_reg32(isp1362_hcd, HCRHPORT1 + wIndex, RH_PS_PSS); + isp1362_hcd->rhport[wIndex] = + isp1362_read_reg32(isp1362_hcd, HCRHPORT1 + wIndex); + spin_unlock_irqrestore(&isp1362_hcd->lock, flags); + break; + case USB_PORT_FEAT_POWER: + _DBG(0, "USB_PORT_FEAT_POWER\n"); + spin_lock_irqsave(&isp1362_hcd->lock, flags); + isp1362_write_reg32(isp1362_hcd, HCRHPORT1 + wIndex, RH_PS_PPS); + isp1362_hcd->rhport[wIndex] = + isp1362_read_reg32(isp1362_hcd, HCRHPORT1 + wIndex); + spin_unlock_irqrestore(&isp1362_hcd->lock, flags); + break; + case USB_PORT_FEAT_RESET: + _DBG(0, "USB_PORT_FEAT_RESET\n"); + spin_lock_irqsave(&isp1362_hcd->lock, flags); + + t1 = jiffies + msecs_to_jiffies(USB_RESET_WIDTH); + while (time_before(jiffies, t1)) { + /* spin until any current reset finishes */ + for (;;) { + tmp = isp1362_read_reg32(isp1362_hcd, HCRHPORT1 + wIndex); + if (!(tmp & RH_PS_PRS)) + break; + udelay(500); + } + if (!(tmp & RH_PS_CCS)) + break; + /* Reset lasts 10ms (claims datasheet) */ + isp1362_write_reg32(isp1362_hcd, HCRHPORT1 + wIndex, (RH_PS_PRS)); + + spin_unlock_irqrestore(&isp1362_hcd->lock, flags); + msleep(10); + spin_lock_irqsave(&isp1362_hcd->lock, flags); + } + + isp1362_hcd->rhport[wIndex] = isp1362_read_reg32(isp1362_hcd, + HCRHPORT1 + wIndex); + spin_unlock_irqrestore(&isp1362_hcd->lock, flags); + break; + default: + goto error; + } + break; + + default: + error: + /* "protocol stall" on error */ + _DBG(0, "PROTOCOL STALL\n"); + retval = -EPIPE; + } + + return retval; +} + +#ifdef CONFIG_PM +static int isp1362_bus_suspend(struct usb_hcd *hcd) +{ + int status = 0; + struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd); + unsigned long flags; + + if (time_before(jiffies, isp1362_hcd->next_statechange)) + msleep(5); + + spin_lock_irqsave(&isp1362_hcd->lock, flags); + + isp1362_hcd->hc_control = isp1362_read_reg32(isp1362_hcd, HCCONTROL); + switch (isp1362_hcd->hc_control & OHCI_CTRL_HCFS) { + case OHCI_USB_RESUME: + DBG(0, "%s: resume/suspend?\n", __func__); + isp1362_hcd->hc_control &= ~OHCI_CTRL_HCFS; + isp1362_hcd->hc_control |= OHCI_USB_RESET; + isp1362_write_reg32(isp1362_hcd, HCCONTROL, isp1362_hcd->hc_control); + /* FALL THROUGH */ + case OHCI_USB_RESET: + status = -EBUSY; + pr_warning("%s: needs reinit!\n", __func__); + goto done; + case OHCI_USB_SUSPEND: + pr_warning("%s: already suspended?\n", __func__); + goto done; + } + DBG(0, "%s: suspend root hub\n", __func__); + + /* First stop any processing */ + hcd->state = HC_STATE_QUIESCING; + if (!list_empty(&isp1362_hcd->atl_queue.active) || + !list_empty(&isp1362_hcd->intl_queue.active) || + !list_empty(&isp1362_hcd->istl_queue[0] .active) || + !list_empty(&isp1362_hcd->istl_queue[1] .active)) { + int limit; + + isp1362_write_reg32(isp1362_hcd, HCATLSKIP, ~0); + isp1362_write_reg32(isp1362_hcd, HCINTLSKIP, ~0); + isp1362_write_reg16(isp1362_hcd, HCBUFSTAT, 0); + isp1362_write_reg16(isp1362_hcd, HCuPINTENB, 0); + isp1362_write_reg32(isp1362_hcd, HCINTSTAT, OHCI_INTR_SF); + + DBG(0, "%s: stopping schedules ...\n", __func__); + limit = 2000; + while (limit > 0) { + udelay(250); + limit -= 250; + if (isp1362_read_reg32(isp1362_hcd, HCINTSTAT) & OHCI_INTR_SF) + break; + } + mdelay(7); + if (isp1362_read_reg16(isp1362_hcd, HCuPINT) & HCuPINT_ATL) { + u32 done_map = isp1362_read_reg32(isp1362_hcd, HCATLDONE); + finish_transfers(isp1362_hcd, done_map, &isp1362_hcd->atl_queue); + } + if (isp1362_read_reg16(isp1362_hcd, HCuPINT) & HCuPINT_INTL) { + u32 done_map = isp1362_read_reg32(isp1362_hcd, HCINTLDONE); + finish_transfers(isp1362_hcd, done_map, &isp1362_hcd->intl_queue); + } + if (isp1362_read_reg16(isp1362_hcd, HCuPINT) & HCuPINT_ISTL0) + finish_iso_transfers(isp1362_hcd, &isp1362_hcd->istl_queue[0]); + if (isp1362_read_reg16(isp1362_hcd, HCuPINT) & HCuPINT_ISTL1) + finish_iso_transfers(isp1362_hcd, &isp1362_hcd->istl_queue[1]); + } + DBG(0, "%s: HCINTSTAT: %08x\n", __func__, + isp1362_read_reg32(isp1362_hcd, HCINTSTAT)); + isp1362_write_reg32(isp1362_hcd, HCINTSTAT, + isp1362_read_reg32(isp1362_hcd, HCINTSTAT)); + + /* Suspend hub */ + isp1362_hcd->hc_control = OHCI_USB_SUSPEND; + isp1362_show_reg(isp1362_hcd, HCCONTROL); + isp1362_write_reg32(isp1362_hcd, HCCONTROL, isp1362_hcd->hc_control); + isp1362_show_reg(isp1362_hcd, HCCONTROL); + +#if 1 + isp1362_hcd->hc_control = isp1362_read_reg32(isp1362_hcd, HCCONTROL); + if ((isp1362_hcd->hc_control & OHCI_CTRL_HCFS) != OHCI_USB_SUSPEND) { + pr_err("%s: controller won't suspend %08x\n", __func__, + isp1362_hcd->hc_control); + status = -EBUSY; + } else +#endif + { + /* no resumes until devices finish suspending */ + isp1362_hcd->next_statechange = jiffies + msecs_to_jiffies(5); + } +done: + if (status == 0) { + hcd->state = HC_STATE_SUSPENDED; + DBG(0, "%s: HCD suspended: %08x\n", __func__, + isp1362_read_reg32(isp1362_hcd, HCCONTROL)); + } + spin_unlock_irqrestore(&isp1362_hcd->lock, flags); + return status; +} + +static int isp1362_bus_resume(struct usb_hcd *hcd) +{ + struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd); + u32 port; + unsigned long flags; + int status = -EINPROGRESS; + + if (time_before(jiffies, isp1362_hcd->next_statechange)) + msleep(5); + + spin_lock_irqsave(&isp1362_hcd->lock, flags); + isp1362_hcd->hc_control = isp1362_read_reg32(isp1362_hcd, HCCONTROL); + pr_info("%s: HCCONTROL: %08x\n", __func__, isp1362_hcd->hc_control); + if (hcd->state == HC_STATE_RESUMING) { + pr_warning("%s: duplicate resume\n", __func__); + status = 0; + } else + switch (isp1362_hcd->hc_control & OHCI_CTRL_HCFS) { + case OHCI_USB_SUSPEND: + DBG(0, "%s: resume root hub\n", __func__); + isp1362_hcd->hc_control &= ~OHCI_CTRL_HCFS; + isp1362_hcd->hc_control |= OHCI_USB_RESUME; + isp1362_write_reg32(isp1362_hcd, HCCONTROL, isp1362_hcd->hc_control); + break; + case OHCI_USB_RESUME: + /* HCFS changes sometime after INTR_RD */ + DBG(0, "%s: remote wakeup\n", __func__); + break; + case OHCI_USB_OPER: + DBG(0, "%s: odd resume\n", __func__); + status = 0; + hcd->self.root_hub->dev.power.power_state = PMSG_ON; + break; + default: /* RESET, we lost power */ + DBG(0, "%s: root hub hardware reset\n", __func__); + status = -EBUSY; + } + spin_unlock_irqrestore(&isp1362_hcd->lock, flags); + if (status == -EBUSY) { + DBG(0, "%s: Restarting HC\n", __func__); + isp1362_hc_stop(hcd); + return isp1362_hc_start(hcd); + } + if (status != -EINPROGRESS) + return status; + spin_lock_irqsave(&isp1362_hcd->lock, flags); + port = isp1362_read_reg32(isp1362_hcd, HCRHDESCA) & RH_A_NDP; + while (port--) { + u32 stat = isp1362_read_reg32(isp1362_hcd, HCRHPORT1 + port); + + /* force global, not selective, resume */ + if (!(stat & RH_PS_PSS)) { + DBG(0, "%s: Not Resuming RH port %d\n", __func__, port); + continue; + } + DBG(0, "%s: Resuming RH port %d\n", __func__, port); + isp1362_write_reg32(isp1362_hcd, HCRHPORT1 + port, RH_PS_POCI); + } + spin_unlock_irqrestore(&isp1362_hcd->lock, flags); + + /* Some controllers (lucent) need extra-long delays */ + hcd->state = HC_STATE_RESUMING; + mdelay(20 /* usb 11.5.1.10 */ + 15); + + isp1362_hcd->hc_control = OHCI_USB_OPER; + spin_lock_irqsave(&isp1362_hcd->lock, flags); + isp1362_show_reg(isp1362_hcd, HCCONTROL); + isp1362_write_reg32(isp1362_hcd, HCCONTROL, isp1362_hcd->hc_control); + spin_unlock_irqrestore(&isp1362_hcd->lock, flags); + /* TRSMRCY */ + msleep(10); + + /* keep it alive for ~5x suspend + resume costs */ + isp1362_hcd->next_statechange = jiffies + msecs_to_jiffies(250); + + hcd->self.root_hub->dev.power.power_state = PMSG_ON; + hcd->state = HC_STATE_RUNNING; + return 0; +} +#else +#define isp1362_bus_suspend NULL +#define isp1362_bus_resume NULL +#endif + +/*-------------------------------------------------------------------------*/ + +#ifdef STUB_DEBUG_FILE + +static inline void create_debug_file(struct isp1362_hcd *isp1362_hcd) +{ +} +static inline void remove_debug_file(struct isp1362_hcd *isp1362_hcd) +{ +} + +#else + +#include +#include + +static void dump_irq(struct seq_file *s, char *label, u16 mask) +{ + seq_printf(s, "%-15s %04x%s%s%s%s%s%s\n", label, mask, + mask & HCuPINT_CLKRDY ? " clkrdy" : "", + mask & HCuPINT_SUSP ? " susp" : "", + mask & HCuPINT_OPR ? " opr" : "", + mask & HCuPINT_EOT ? " eot" : "", + mask & HCuPINT_ATL ? " atl" : "", + mask & HCuPINT_SOF ? " sof" : ""); +} + +static void dump_int(struct seq_file *s, char *label, u32 mask) +{ + seq_printf(s, "%-15s %08x%s%s%s%s%s%s%s\n", label, mask, + mask & OHCI_INTR_MIE ? " MIE" : "", + mask & OHCI_INTR_RHSC ? " rhsc" : "", + mask & OHCI_INTR_FNO ? " fno" : "", + mask & OHCI_INTR_UE ? " ue" : "", + mask & OHCI_INTR_RD ? " rd" : "", + mask & OHCI_INTR_SF ? " sof" : "", + mask & OHCI_INTR_SO ? " so" : ""); +} + +static void dump_ctrl(struct seq_file *s, char *label, u32 mask) +{ + seq_printf(s, "%-15s %08x%s%s%s\n", label, mask, + mask & OHCI_CTRL_RWC ? " rwc" : "", + mask & OHCI_CTRL_RWE ? " rwe" : "", + ({ + char *hcfs; + switch (mask & OHCI_CTRL_HCFS) { + case OHCI_USB_OPER: + hcfs = " oper"; + break; + case OHCI_USB_RESET: + hcfs = " reset"; + break; + case OHCI_USB_RESUME: + hcfs = " resume"; + break; + case OHCI_USB_SUSPEND: + hcfs = " suspend"; + break; + default: + hcfs = " ?"; + } + hcfs; + })); +} + +static void dump_regs(struct seq_file *s, struct isp1362_hcd *isp1362_hcd) +{ + seq_printf(s, "HCREVISION [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCREVISION), + isp1362_read_reg32(isp1362_hcd, HCREVISION)); + seq_printf(s, "HCCONTROL [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCCONTROL), + isp1362_read_reg32(isp1362_hcd, HCCONTROL)); + seq_printf(s, "HCCMDSTAT [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCCMDSTAT), + isp1362_read_reg32(isp1362_hcd, HCCMDSTAT)); + seq_printf(s, "HCINTSTAT [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCINTSTAT), + isp1362_read_reg32(isp1362_hcd, HCINTSTAT)); + seq_printf(s, "HCINTENB [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCINTENB), + isp1362_read_reg32(isp1362_hcd, HCINTENB)); + seq_printf(s, "HCFMINTVL [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCFMINTVL), + isp1362_read_reg32(isp1362_hcd, HCFMINTVL)); + seq_printf(s, "HCFMREM [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCFMREM), + isp1362_read_reg32(isp1362_hcd, HCFMREM)); + seq_printf(s, "HCFMNUM [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCFMNUM), + isp1362_read_reg32(isp1362_hcd, HCFMNUM)); + seq_printf(s, "HCLSTHRESH [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCLSTHRESH), + isp1362_read_reg32(isp1362_hcd, HCLSTHRESH)); + seq_printf(s, "HCRHDESCA [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCRHDESCA), + isp1362_read_reg32(isp1362_hcd, HCRHDESCA)); + seq_printf(s, "HCRHDESCB [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCRHDESCB), + isp1362_read_reg32(isp1362_hcd, HCRHDESCB)); + seq_printf(s, "HCRHSTATUS [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCRHSTATUS), + isp1362_read_reg32(isp1362_hcd, HCRHSTATUS)); + seq_printf(s, "HCRHPORT1 [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCRHPORT1), + isp1362_read_reg32(isp1362_hcd, HCRHPORT1)); + seq_printf(s, "HCRHPORT2 [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCRHPORT2), + isp1362_read_reg32(isp1362_hcd, HCRHPORT2)); + seq_printf(s, "\n"); + seq_printf(s, "HCHWCFG [%02x] %04x\n", ISP1362_REG_NO(ISP1362_REG_HCHWCFG), + isp1362_read_reg16(isp1362_hcd, HCHWCFG)); + seq_printf(s, "HCDMACFG [%02x] %04x\n", ISP1362_REG_NO(ISP1362_REG_HCDMACFG), + isp1362_read_reg16(isp1362_hcd, HCDMACFG)); + seq_printf(s, "HCXFERCTR [%02x] %04x\n", ISP1362_REG_NO(ISP1362_REG_HCXFERCTR), + isp1362_read_reg16(isp1362_hcd, HCXFERCTR)); + seq_printf(s, "HCuPINT [%02x] %04x\n", ISP1362_REG_NO(ISP1362_REG_HCuPINT), + isp1362_read_reg16(isp1362_hcd, HCuPINT)); + seq_printf(s, "HCuPINTENB [%02x] %04x\n", ISP1362_REG_NO(ISP1362_REG_HCuPINTENB), + isp1362_read_reg16(isp1362_hcd, HCuPINTENB)); + seq_printf(s, "HCCHIPID [%02x] %04x\n", ISP1362_REG_NO(ISP1362_REG_HCCHIPID), + isp1362_read_reg16(isp1362_hcd, HCCHIPID)); + seq_printf(s, "HCSCRATCH [%02x] %04x\n", ISP1362_REG_NO(ISP1362_REG_HCSCRATCH), + isp1362_read_reg16(isp1362_hcd, HCSCRATCH)); + seq_printf(s, "HCBUFSTAT [%02x] %04x\n", ISP1362_REG_NO(ISP1362_REG_HCBUFSTAT), + isp1362_read_reg16(isp1362_hcd, HCBUFSTAT)); + seq_printf(s, "HCDIRADDR [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCDIRADDR), + isp1362_read_reg32(isp1362_hcd, HCDIRADDR)); +#if 0 + seq_printf(s, "HCDIRDATA [%02x] %04x\n", ISP1362_REG_NO(HCDIRDATA), + isp1362_read_reg16(isp1362_hcd, HCDIRDATA)); +#endif + seq_printf(s, "HCISTLBUFSZ[%02x] %04x\n", ISP1362_REG_NO(ISP1362_REG_HCISTLBUFSZ), + isp1362_read_reg16(isp1362_hcd, HCISTLBUFSZ)); + seq_printf(s, "HCISTLRATE [%02x] %04x\n", ISP1362_REG_NO(ISP1362_REG_HCISTLRATE), + isp1362_read_reg16(isp1362_hcd, HCISTLRATE)); + seq_printf(s, "\n"); + seq_printf(s, "HCINTLBUFSZ[%02x] %04x\n", ISP1362_REG_NO(ISP1362_REG_HCINTLBUFSZ), + isp1362_read_reg16(isp1362_hcd, HCINTLBUFSZ)); + seq_printf(s, "HCINTLBLKSZ[%02x] %04x\n", ISP1362_REG_NO(ISP1362_REG_HCINTLBLKSZ), + isp1362_read_reg16(isp1362_hcd, HCINTLBLKSZ)); + seq_printf(s, "HCINTLDONE [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCINTLDONE), + isp1362_read_reg32(isp1362_hcd, HCINTLDONE)); + seq_printf(s, "HCINTLSKIP [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCINTLSKIP), + isp1362_read_reg32(isp1362_hcd, HCINTLSKIP)); + seq_printf(s, "HCINTLLAST [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCINTLLAST), + isp1362_read_reg32(isp1362_hcd, HCINTLLAST)); + seq_printf(s, "HCINTLCURR [%02x] %04x\n", ISP1362_REG_NO(ISP1362_REG_HCINTLCURR), + isp1362_read_reg16(isp1362_hcd, HCINTLCURR)); + seq_printf(s, "\n"); + seq_printf(s, "HCATLBUFSZ [%02x] %04x\n", ISP1362_REG_NO(ISP1362_REG_HCATLBUFSZ), + isp1362_read_reg16(isp1362_hcd, HCATLBUFSZ)); + seq_printf(s, "HCATLBLKSZ [%02x] %04x\n", ISP1362_REG_NO(ISP1362_REG_HCATLBLKSZ), + isp1362_read_reg16(isp1362_hcd, HCATLBLKSZ)); +#if 0 + seq_printf(s, "HCATLDONE [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCATLDONE), + isp1362_read_reg32(isp1362_hcd, HCATLDONE)); +#endif + seq_printf(s, "HCATLSKIP [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCATLSKIP), + isp1362_read_reg32(isp1362_hcd, HCATLSKIP)); + seq_printf(s, "HCATLLAST [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCATLLAST), + isp1362_read_reg32(isp1362_hcd, HCATLLAST)); + seq_printf(s, "HCATLCURR [%02x] %04x\n", ISP1362_REG_NO(ISP1362_REG_HCATLCURR), + isp1362_read_reg16(isp1362_hcd, HCATLCURR)); + seq_printf(s, "\n"); + seq_printf(s, "HCATLDTC [%02x] %04x\n", ISP1362_REG_NO(ISP1362_REG_HCATLDTC), + isp1362_read_reg16(isp1362_hcd, HCATLDTC)); + seq_printf(s, "HCATLDTCTO [%02x] %04x\n", ISP1362_REG_NO(ISP1362_REG_HCATLDTCTO), + isp1362_read_reg16(isp1362_hcd, HCATLDTCTO)); +} + +static int proc_isp1362_show(struct seq_file *s, void *unused) +{ + struct isp1362_hcd *isp1362_hcd = s->private; + struct isp1362_ep *ep; + int i; + + seq_printf(s, "%s\n%s version %s\n", + isp1362_hcd_to_hcd(isp1362_hcd)->product_desc, hcd_name, DRIVER_VERSION); + + /* collect statistics to help estimate potential win for + * DMA engines that care about alignment (PXA) + */ + seq_printf(s, "alignment: 16b/%ld 8b/%ld 4b/%ld 2b/%ld 1b/%ld\n", + isp1362_hcd->stat16, isp1362_hcd->stat8, isp1362_hcd->stat4, + isp1362_hcd->stat2, isp1362_hcd->stat1); + seq_printf(s, "max # ptds in ATL fifo: %d\n", isp1362_hcd->atl_queue.stat_maxptds); + seq_printf(s, "max # ptds in INTL fifo: %d\n", isp1362_hcd->intl_queue.stat_maxptds); + seq_printf(s, "max # ptds in ISTL fifo: %d\n", + max(isp1362_hcd->istl_queue[0] .stat_maxptds, + isp1362_hcd->istl_queue[1] .stat_maxptds)); + + /* FIXME: don't show the following in suspended state */ + spin_lock_irq(&isp1362_hcd->lock); + + dump_irq(s, "hc_irq_enable", isp1362_read_reg16(isp1362_hcd, HCuPINTENB)); + dump_irq(s, "hc_irq_status", isp1362_read_reg16(isp1362_hcd, HCuPINT)); + dump_int(s, "ohci_int_enable", isp1362_read_reg32(isp1362_hcd, HCINTENB)); + dump_int(s, "ohci_int_status", isp1362_read_reg32(isp1362_hcd, HCINTSTAT)); + dump_ctrl(s, "ohci_control", isp1362_read_reg32(isp1362_hcd, HCCONTROL)); + + for (i = 0; i < NUM_ISP1362_IRQS; i++) + if (isp1362_hcd->irq_stat[i]) + seq_printf(s, "%-15s: %d\n", + ISP1362_INT_NAME(i), isp1362_hcd->irq_stat[i]); + + dump_regs(s, isp1362_hcd); + list_for_each_entry(ep, &isp1362_hcd->async, schedule) { + struct urb *urb; + + seq_printf(s, "%p, ep%d%s, maxpacket %d:\n", ep, ep->epnum, + ({ + char *s; + switch (ep->nextpid) { + case USB_PID_IN: + s = "in"; + break; + case USB_PID_OUT: + s = "out"; + break; + case USB_PID_SETUP: + s = "setup"; + break; + case USB_PID_ACK: + s = "status"; + break; + default: + s = "?"; + break; + }; + s;}), ep->maxpacket) ; + list_for_each_entry(urb, &ep->hep->urb_list, urb_list) { + seq_printf(s, " urb%p, %d/%d\n", urb, + urb->actual_length, + urb->transfer_buffer_length); + } + } + if (!list_empty(&isp1362_hcd->async)) + seq_printf(s, "\n"); + dump_ptd_queue(&isp1362_hcd->atl_queue); + + seq_printf(s, "periodic size= %d\n", PERIODIC_SIZE); + + list_for_each_entry(ep, &isp1362_hcd->periodic, schedule) { + seq_printf(s, "branch:%2d load:%3d PTD[%d] $%04x:\n", ep->branch, + isp1362_hcd->load[ep->branch], ep->ptd_index, ep->ptd_offset); + + seq_printf(s, " %d/%p (%sdev%d ep%d%s max %d)\n", + ep->interval, ep, + (ep->udev->speed == USB_SPEED_FULL) ? "" : "ls ", + ep->udev->devnum, ep->epnum, + (ep->epnum == 0) ? "" : + ((ep->nextpid == USB_PID_IN) ? + "in" : "out"), ep->maxpacket); + } + dump_ptd_queue(&isp1362_hcd->intl_queue); + + seq_printf(s, "ISO:\n"); + + list_for_each_entry(ep, &isp1362_hcd->isoc, schedule) { + seq_printf(s, " %d/%p (%sdev%d ep%d%s max %d)\n", + ep->interval, ep, + (ep->udev->speed == USB_SPEED_FULL) ? "" : "ls ", + ep->udev->devnum, ep->epnum, + (ep->epnum == 0) ? "" : + ((ep->nextpid == USB_PID_IN) ? + "in" : "out"), ep->maxpacket); + } + + spin_unlock_irq(&isp1362_hcd->lock); + seq_printf(s, "\n"); + + return 0; +} + +static int proc_isp1362_open(struct inode *inode, struct file *file) +{ + return single_open(file, proc_isp1362_show, PDE(inode)->data); +} + +static const struct file_operations proc_ops = { + .open = proc_isp1362_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +/* expect just one isp1362_hcd per system */ +static const char proc_filename[] = "driver/isp1362"; + +static void create_debug_file(struct isp1362_hcd *isp1362_hcd) +{ + struct proc_dir_entry *pde; + + pde = create_proc_entry(proc_filename, 0, NULL); + if (pde == NULL) { + pr_warning("%s: Failed to create debug file '%s'\n", __func__, proc_filename); + return; + } + + pde->proc_fops = &proc_ops; + pde->data = isp1362_hcd; + isp1362_hcd->pde = pde; +} + +static void remove_debug_file(struct isp1362_hcd *isp1362_hcd) +{ + if (isp1362_hcd->pde) + remove_proc_entry(proc_filename, 0); +} + +#endif + +/*-------------------------------------------------------------------------*/ + +static void isp1362_sw_reset(struct isp1362_hcd *isp1362_hcd) +{ + int tmp = 20; + unsigned long flags; + + spin_lock_irqsave(&isp1362_hcd->lock, flags); + + isp1362_write_reg16(isp1362_hcd, HCSWRES, HCSWRES_MAGIC); + isp1362_write_reg32(isp1362_hcd, HCCMDSTAT, OHCI_HCR); + while (--tmp) { + mdelay(1); + if (!(isp1362_read_reg32(isp1362_hcd, HCCMDSTAT) & OHCI_HCR)) + break; + } + if (!tmp) + pr_err("Software reset timeout\n"); + spin_unlock_irqrestore(&isp1362_hcd->lock, flags); +} + +static int isp1362_mem_config(struct usb_hcd *hcd) +{ + struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd); + unsigned long flags; + u32 total; + u16 istl_size = ISP1362_ISTL_BUFSIZE; + u16 intl_blksize = ISP1362_INTL_BLKSIZE + PTD_HEADER_SIZE; + u16 intl_size = ISP1362_INTL_BUFFERS * intl_blksize; + u16 atl_blksize = ISP1362_ATL_BLKSIZE + PTD_HEADER_SIZE; + u16 atl_buffers = (ISP1362_BUF_SIZE - (istl_size + intl_size)) / atl_blksize; + u16 atl_size; + int i; + + WARN_ON(istl_size & 3); + WARN_ON(atl_blksize & 3); + WARN_ON(intl_blksize & 3); + WARN_ON(atl_blksize < PTD_HEADER_SIZE); + WARN_ON(intl_blksize < PTD_HEADER_SIZE); + + BUG_ON((unsigned)ISP1362_INTL_BUFFERS > 32); + if (atl_buffers > 32) + atl_buffers = 32; + atl_size = atl_buffers * atl_blksize; + total = atl_size + intl_size + istl_size; + dev_info(hcd->self.controller, "ISP1362 Memory usage:\n"); + dev_info(hcd->self.controller, " ISTL: 2 * %4d: %4d @ $%04x:$%04x\n", + istl_size / 2, istl_size, 0, istl_size / 2); + dev_info(hcd->self.controller, " INTL: %4d * (%3u+8): %4d @ $%04x\n", + ISP1362_INTL_BUFFERS, intl_blksize - PTD_HEADER_SIZE, + intl_size, istl_size); + dev_info(hcd->self.controller, " ATL : %4d * (%3u+8): %4d @ $%04x\n", + atl_buffers, atl_blksize - PTD_HEADER_SIZE, + atl_size, istl_size + intl_size); + dev_info(hcd->self.controller, " USED/FREE: %4d %4d\n", total, + ISP1362_BUF_SIZE - total); + + if (total > ISP1362_BUF_SIZE) { + dev_err(hcd->self.controller, "%s: Memory requested: %d, available %d\n", + __func__, total, ISP1362_BUF_SIZE); + return -ENOMEM; + } + + total = istl_size + intl_size + atl_size; + spin_lock_irqsave(&isp1362_hcd->lock, flags); + + for (i = 0; i < 2; i++) { + isp1362_hcd->istl_queue[i].buf_start = i * istl_size / 2, + isp1362_hcd->istl_queue[i].buf_size = istl_size / 2; + isp1362_hcd->istl_queue[i].blk_size = 4; + INIT_LIST_HEAD(&isp1362_hcd->istl_queue[i].active); + snprintf(isp1362_hcd->istl_queue[i].name, + sizeof(isp1362_hcd->istl_queue[i].name), "ISTL%d", i); + DBG(3, "%s: %5s buf $%04x %d\n", __func__, + isp1362_hcd->istl_queue[i].name, + isp1362_hcd->istl_queue[i].buf_start, + isp1362_hcd->istl_queue[i].buf_size); + } + isp1362_write_reg16(isp1362_hcd, HCISTLBUFSZ, istl_size / 2); + + isp1362_hcd->intl_queue.buf_start = istl_size; + isp1362_hcd->intl_queue.buf_size = intl_size; + isp1362_hcd->intl_queue.buf_count = ISP1362_INTL_BUFFERS; + isp1362_hcd->intl_queue.blk_size = intl_blksize; + isp1362_hcd->intl_queue.buf_avail = isp1362_hcd->intl_queue.buf_count; + isp1362_hcd->intl_queue.skip_map = ~0; + INIT_LIST_HEAD(&isp1362_hcd->intl_queue.active); + + isp1362_write_reg16(isp1362_hcd, HCINTLBUFSZ, + isp1362_hcd->intl_queue.buf_size); + isp1362_write_reg16(isp1362_hcd, HCINTLBLKSZ, + isp1362_hcd->intl_queue.blk_size - PTD_HEADER_SIZE); + isp1362_write_reg32(isp1362_hcd, HCINTLSKIP, ~0); + isp1362_write_reg32(isp1362_hcd, HCINTLLAST, + 1 << (ISP1362_INTL_BUFFERS - 1)); + + isp1362_hcd->atl_queue.buf_start = istl_size + intl_size; + isp1362_hcd->atl_queue.buf_size = atl_size; + isp1362_hcd->atl_queue.buf_count = atl_buffers; + isp1362_hcd->atl_queue.blk_size = atl_blksize; + isp1362_hcd->atl_queue.buf_avail = isp1362_hcd->atl_queue.buf_count; + isp1362_hcd->atl_queue.skip_map = ~0; + INIT_LIST_HEAD(&isp1362_hcd->atl_queue.active); + + isp1362_write_reg16(isp1362_hcd, HCATLBUFSZ, + isp1362_hcd->atl_queue.buf_size); + isp1362_write_reg16(isp1362_hcd, HCATLBLKSZ, + isp1362_hcd->atl_queue.blk_size - PTD_HEADER_SIZE); + isp1362_write_reg32(isp1362_hcd, HCATLSKIP, ~0); + isp1362_write_reg32(isp1362_hcd, HCATLLAST, + 1 << (atl_buffers - 1)); + + snprintf(isp1362_hcd->atl_queue.name, + sizeof(isp1362_hcd->atl_queue.name), "ATL"); + snprintf(isp1362_hcd->intl_queue.name, + sizeof(isp1362_hcd->intl_queue.name), "INTL"); + DBG(3, "%s: %5s buf $%04x %2d * %4d = %4d\n", __func__, + isp1362_hcd->intl_queue.name, + isp1362_hcd->intl_queue.buf_start, + ISP1362_INTL_BUFFERS, isp1362_hcd->intl_queue.blk_size, + isp1362_hcd->intl_queue.buf_size); + DBG(3, "%s: %5s buf $%04x %2d * %4d = %4d\n", __func__, + isp1362_hcd->atl_queue.name, + isp1362_hcd->atl_queue.buf_start, + atl_buffers, isp1362_hcd->atl_queue.blk_size, + isp1362_hcd->atl_queue.buf_size); + + spin_unlock_irqrestore(&isp1362_hcd->lock, flags); + + return 0; +} + +static int isp1362_hc_reset(struct usb_hcd *hcd) +{ + int ret = 0; + struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd); + unsigned long t; + unsigned long timeout = 100; + unsigned long flags; + int clkrdy = 0; + + pr_info("%s:\n", __func__); + + if (isp1362_hcd->board && isp1362_hcd->board->reset) { + isp1362_hcd->board->reset(hcd->self.controller, 1); + msleep(20); + if (isp1362_hcd->board->clock) + isp1362_hcd->board->clock(hcd->self.controller, 1); + isp1362_hcd->board->reset(hcd->self.controller, 0); + } else + isp1362_sw_reset(isp1362_hcd); + + /* chip has been reset. First we need to see a clock */ + t = jiffies + msecs_to_jiffies(timeout); + while (!clkrdy && time_before_eq(jiffies, t)) { + spin_lock_irqsave(&isp1362_hcd->lock, flags); + clkrdy = isp1362_read_reg16(isp1362_hcd, HCuPINT) & HCuPINT_CLKRDY; + spin_unlock_irqrestore(&isp1362_hcd->lock, flags); + if (!clkrdy) + msleep(4); + } + + spin_lock_irqsave(&isp1362_hcd->lock, flags); + isp1362_write_reg16(isp1362_hcd, HCuPINT, HCuPINT_CLKRDY); + spin_unlock_irqrestore(&isp1362_hcd->lock, flags); + if (!clkrdy) { + pr_err("Clock not ready after %lums\n", timeout); + ret = -ENODEV; + } + return ret; +} + +static void isp1362_hc_stop(struct usb_hcd *hcd) +{ + struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd); + unsigned long flags; + u32 tmp; + + pr_info("%s:\n", __func__); + + del_timer_sync(&hcd->rh_timer); + + spin_lock_irqsave(&isp1362_hcd->lock, flags); + + isp1362_write_reg16(isp1362_hcd, HCuPINTENB, 0); + + /* Switch off power for all ports */ + tmp = isp1362_read_reg32(isp1362_hcd, HCRHDESCA); + tmp &= ~(RH_A_NPS | RH_A_PSM); + isp1362_write_reg32(isp1362_hcd, HCRHDESCA, tmp); + isp1362_write_reg32(isp1362_hcd, HCRHSTATUS, RH_HS_LPS); + + /* Reset the chip */ + if (isp1362_hcd->board && isp1362_hcd->board->reset) + isp1362_hcd->board->reset(hcd->self.controller, 1); + else + isp1362_sw_reset(isp1362_hcd); + + if (isp1362_hcd->board && isp1362_hcd->board->clock) + isp1362_hcd->board->clock(hcd->self.controller, 0); + + spin_unlock_irqrestore(&isp1362_hcd->lock, flags); +} + +#ifdef CHIP_BUFFER_TEST +static int isp1362_chip_test(struct isp1362_hcd *isp1362_hcd) +{ + int ret = 0; + u16 *ref; + unsigned long flags; + + ref = kmalloc(2 * ISP1362_BUF_SIZE, GFP_KERNEL); + if (ref) { + int offset; + u16 *tst = &ref[ISP1362_BUF_SIZE / 2]; + + for (offset = 0; offset < ISP1362_BUF_SIZE / 2; offset++) { + ref[offset] = ~offset; + tst[offset] = offset; + } + + for (offset = 0; offset < 4; offset++) { + int j; + + for (j = 0; j < 8; j++) { + spin_lock_irqsave(&isp1362_hcd->lock, flags); + isp1362_write_buffer(isp1362_hcd, (u8 *)ref + offset, 0, j); + isp1362_read_buffer(isp1362_hcd, (u8 *)tst + offset, 0, j); + spin_unlock_irqrestore(&isp1362_hcd->lock, flags); + + if (memcmp(ref, tst, j)) { + ret = -ENODEV; + pr_err("%s: memory check with %d byte offset %d failed\n", + __func__, j, offset); + dump_data((u8 *)ref + offset, j); + dump_data((u8 *)tst + offset, j); + } + } + } + + spin_lock_irqsave(&isp1362_hcd->lock, flags); + isp1362_write_buffer(isp1362_hcd, ref, 0, ISP1362_BUF_SIZE); + isp1362_read_buffer(isp1362_hcd, tst, 0, ISP1362_BUF_SIZE); + spin_unlock_irqrestore(&isp1362_hcd->lock, flags); + + if (memcmp(ref, tst, ISP1362_BUF_SIZE)) { + ret = -ENODEV; + pr_err("%s: memory check failed\n", __func__); + dump_data((u8 *)tst, ISP1362_BUF_SIZE / 2); + } + + for (offset = 0; offset < 256; offset++) { + int test_size = 0; + + yield(); + + memset(tst, 0, ISP1362_BUF_SIZE); + spin_lock_irqsave(&isp1362_hcd->lock, flags); + isp1362_write_buffer(isp1362_hcd, tst, 0, ISP1362_BUF_SIZE); + isp1362_read_buffer(isp1362_hcd, tst, 0, ISP1362_BUF_SIZE); + spin_unlock_irqrestore(&isp1362_hcd->lock, flags); + if (memcmp(tst, tst + (ISP1362_BUF_SIZE / (2 * sizeof(*tst))), + ISP1362_BUF_SIZE / 2)) { + pr_err("%s: Failed to clear buffer\n", __func__); + dump_data((u8 *)tst, ISP1362_BUF_SIZE); + break; + } + spin_lock_irqsave(&isp1362_hcd->lock, flags); + isp1362_write_buffer(isp1362_hcd, ref, offset * 2, PTD_HEADER_SIZE); + isp1362_write_buffer(isp1362_hcd, ref + PTD_HEADER_SIZE / sizeof(*ref), + offset * 2 + PTD_HEADER_SIZE, test_size); + isp1362_read_buffer(isp1362_hcd, tst, offset * 2, + PTD_HEADER_SIZE + test_size); + spin_unlock_irqrestore(&isp1362_hcd->lock, flags); + if (memcmp(ref, tst, PTD_HEADER_SIZE + test_size)) { + dump_data(((u8 *)ref) + offset, PTD_HEADER_SIZE + test_size); + dump_data((u8 *)tst, PTD_HEADER_SIZE + test_size); + spin_lock_irqsave(&isp1362_hcd->lock, flags); + isp1362_read_buffer(isp1362_hcd, tst, offset * 2, + PTD_HEADER_SIZE + test_size); + spin_unlock_irqrestore(&isp1362_hcd->lock, flags); + if (memcmp(ref, tst, PTD_HEADER_SIZE + test_size)) { + ret = -ENODEV; + pr_err("%s: memory check with offset %02x failed\n", + __func__, offset); + break; + } + pr_warning("%s: memory check with offset %02x ok after second read\n", + __func__, offset); + } + } + kfree(ref); + } + return ret; +} +#endif + +static int isp1362_hc_start(struct usb_hcd *hcd) +{ + int ret; + struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd); + struct isp1362_platform_data *board = isp1362_hcd->board; + u16 hwcfg; + u16 chipid; + unsigned long flags; + + pr_info("%s:\n", __func__); + + spin_lock_irqsave(&isp1362_hcd->lock, flags); + chipid = isp1362_read_reg16(isp1362_hcd, HCCHIPID); + spin_unlock_irqrestore(&isp1362_hcd->lock, flags); + + if ((chipid & HCCHIPID_MASK) != HCCHIPID_MAGIC) { + pr_err("%s: Invalid chip ID %04x\n", __func__, chipid); + return -ENODEV; + } + +#ifdef CHIP_BUFFER_TEST + ret = isp1362_chip_test(isp1362_hcd); + if (ret) + return -ENODEV; +#endif + spin_lock_irqsave(&isp1362_hcd->lock, flags); + /* clear interrupt status and disable all interrupt sources */ + isp1362_write_reg16(isp1362_hcd, HCuPINT, 0xff); + isp1362_write_reg16(isp1362_hcd, HCuPINTENB, 0); + + /* HW conf */ + hwcfg = HCHWCFG_INT_ENABLE | HCHWCFG_DBWIDTH(1); + if (board->sel15Kres) + hwcfg |= HCHWCFG_PULLDOWN_DS2 | + (MAX_ROOT_PORTS > 1) ? HCHWCFG_PULLDOWN_DS1 : 0; + if (board->clknotstop) + hwcfg |= HCHWCFG_CLKNOTSTOP; + if (board->oc_enable) + hwcfg |= HCHWCFG_ANALOG_OC; + if (board->int_act_high) + hwcfg |= HCHWCFG_INT_POL; + if (board->int_edge_triggered) + hwcfg |= HCHWCFG_INT_TRIGGER; + if (board->dreq_act_high) + hwcfg |= HCHWCFG_DREQ_POL; + if (board->dack_act_high) + hwcfg |= HCHWCFG_DACK_POL; + isp1362_write_reg16(isp1362_hcd, HCHWCFG, hwcfg); + isp1362_show_reg(isp1362_hcd, HCHWCFG); + isp1362_write_reg16(isp1362_hcd, HCDMACFG, 0); + spin_unlock_irqrestore(&isp1362_hcd->lock, flags); + + ret = isp1362_mem_config(hcd); + if (ret) + return ret; + + spin_lock_irqsave(&isp1362_hcd->lock, flags); + + /* Root hub conf */ + isp1362_hcd->rhdesca = 0; + if (board->no_power_switching) + isp1362_hcd->rhdesca |= RH_A_NPS; + if (board->power_switching_mode) + isp1362_hcd->rhdesca |= RH_A_PSM; + if (board->potpg) + isp1362_hcd->rhdesca |= (board->potpg << 24) & RH_A_POTPGT; + else + isp1362_hcd->rhdesca |= (25 << 24) & RH_A_POTPGT; + + isp1362_write_reg32(isp1362_hcd, HCRHDESCA, isp1362_hcd->rhdesca & ~RH_A_OCPM); + isp1362_write_reg32(isp1362_hcd, HCRHDESCA, isp1362_hcd->rhdesca | RH_A_OCPM); + isp1362_hcd->rhdesca = isp1362_read_reg32(isp1362_hcd, HCRHDESCA); + + isp1362_hcd->rhdescb = RH_B_PPCM; + isp1362_write_reg32(isp1362_hcd, HCRHDESCB, isp1362_hcd->rhdescb); + isp1362_hcd->rhdescb = isp1362_read_reg32(isp1362_hcd, HCRHDESCB); + + isp1362_read_reg32(isp1362_hcd, HCFMINTVL); + isp1362_write_reg32(isp1362_hcd, HCFMINTVL, (FSMP(FI) << 16) | FI); + isp1362_write_reg32(isp1362_hcd, HCLSTHRESH, LSTHRESH); + + spin_unlock_irqrestore(&isp1362_hcd->lock, flags); + + isp1362_hcd->hc_control = OHCI_USB_OPER; + hcd->state = HC_STATE_RUNNING; + + spin_lock_irqsave(&isp1362_hcd->lock, flags); + /* Set up interrupts */ + isp1362_hcd->intenb = OHCI_INTR_MIE | OHCI_INTR_RHSC | OHCI_INTR_UE; + isp1362_hcd->intenb |= OHCI_INTR_RD; + isp1362_hcd->irqenb = HCuPINT_OPR | HCuPINT_SUSP; + isp1362_write_reg32(isp1362_hcd, HCINTENB, isp1362_hcd->intenb); + isp1362_write_reg16(isp1362_hcd, HCuPINTENB, isp1362_hcd->irqenb); + + /* Go operational */ + isp1362_write_reg32(isp1362_hcd, HCCONTROL, isp1362_hcd->hc_control); + /* enable global power */ + isp1362_write_reg32(isp1362_hcd, HCRHSTATUS, RH_HS_LPSC | RH_HS_DRWE); + + spin_unlock_irqrestore(&isp1362_hcd->lock, flags); + + return 0; +} + +/*-------------------------------------------------------------------------*/ + +static struct hc_driver isp1362_hc_driver = { + .description = hcd_name, + .product_desc = "ISP1362 Host Controller", + .hcd_priv_size = sizeof(struct isp1362_hcd), + + .irq = isp1362_irq, + .flags = HCD_USB11 | HCD_MEMORY, + + .reset = isp1362_hc_reset, + .start = isp1362_hc_start, + .stop = isp1362_hc_stop, + + .urb_enqueue = isp1362_urb_enqueue, + .urb_dequeue = isp1362_urb_dequeue, + .endpoint_disable = isp1362_endpoint_disable, + + .get_frame_number = isp1362_get_frame, + + .hub_status_data = isp1362_hub_status_data, + .hub_control = isp1362_hub_control, + .bus_suspend = isp1362_bus_suspend, + .bus_resume = isp1362_bus_resume, +}; + +/*-------------------------------------------------------------------------*/ + +#define resource_len(r) (((r)->end - (r)->start) + 1) + +static int __devexit isp1362_remove(struct platform_device *pdev) +{ + struct usb_hcd *hcd = platform_get_drvdata(pdev); + struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd); + struct resource *res; + + remove_debug_file(isp1362_hcd); + DBG(0, "%s: Removing HCD\n", __func__); + usb_remove_hcd(hcd); + + DBG(0, "%s: Unmapping data_reg @ %08x\n", __func__, + (u32)isp1362_hcd->data_reg); + iounmap(isp1362_hcd->data_reg); + + DBG(0, "%s: Unmapping addr_reg @ %08x\n", __func__, + (u32)isp1362_hcd->addr_reg); + iounmap(isp1362_hcd->addr_reg); + + res = platform_get_resource(pdev, IORESOURCE_MEM, 1); + DBG(0, "%s: release mem_region: %08lx\n", __func__, (long unsigned int)res->start); + if (res) + release_mem_region(res->start, resource_len(res)); + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + DBG(0, "%s: release mem_region: %08lx\n", __func__, (long unsigned int)res->start); + if (res) + release_mem_region(res->start, resource_len(res)); + + DBG(0, "%s: put_hcd\n", __func__); + usb_put_hcd(hcd); + DBG(0, "%s: Done\n", __func__); + + return 0; +} + +static int __init isp1362_probe(struct platform_device *pdev) +{ + struct usb_hcd *hcd; + struct isp1362_hcd *isp1362_hcd; + struct resource *addr, *data; + void __iomem *addr_reg; + void __iomem *data_reg; + int irq; + int retval = 0; + + /* basic sanity checks first. board-specific init logic should + * have initialized this the three resources and probably board + * specific platform_data. we don't probe for IRQs, and do only + * minimal sanity checking. + */ + if (pdev->num_resources < 3) { + retval = -ENODEV; + goto err1; + } + + data = platform_get_resource(pdev, IORESOURCE_MEM, 0); + addr = platform_get_resource(pdev, IORESOURCE_MEM, 1); + irq = platform_get_irq(pdev, 0); + if (!addr || !data || irq < 0) { + retval = -ENODEV; + goto err1; + } + +#ifdef CONFIG_USB_HCD_DMA + if (pdev->dev.dma_mask) { + struct resource *dma_res = platform_get_resource(pdev, IORESOURCE_MEM, 2); + + if (!dma_res) { + retval = -ENODEV; + goto err1; + } + isp1362_hcd->data_dma = dma_res->start; + isp1362_hcd->max_dma_size = resource_len(dma_res); + } +#else + if (pdev->dev.dma_mask) { + DBG(1, "won't do DMA"); + retval = -ENODEV; + goto err1; + } +#endif + + if (!request_mem_region(addr->start, resource_len(addr), hcd_name)) { + retval = -EBUSY; + goto err1; + } + addr_reg = ioremap(addr->start, resource_len(addr)); + if (addr_reg == NULL) { + retval = -ENOMEM; + goto err2; + } + + if (!request_mem_region(data->start, resource_len(data), hcd_name)) { + retval = -EBUSY; + goto err3; + } + data_reg = ioremap(data->start, resource_len(data)); + if (data_reg == NULL) { + retval = -ENOMEM; + goto err4; + } + + /* allocate and initialize hcd */ + hcd = usb_create_hcd(&isp1362_hc_driver, &pdev->dev, dev_name(&pdev->dev)); + if (!hcd) { + retval = -ENOMEM; + goto err5; + } + hcd->rsrc_start = data->start; + isp1362_hcd = hcd_to_isp1362_hcd(hcd); + isp1362_hcd->data_reg = data_reg; + isp1362_hcd->addr_reg = addr_reg; + + isp1362_hcd->next_statechange = jiffies; + spin_lock_init(&isp1362_hcd->lock); + INIT_LIST_HEAD(&isp1362_hcd->async); + INIT_LIST_HEAD(&isp1362_hcd->periodic); + INIT_LIST_HEAD(&isp1362_hcd->isoc); + INIT_LIST_HEAD(&isp1362_hcd->remove_list); + isp1362_hcd->board = pdev->dev.platform_data; +#if USE_PLATFORM_DELAY + if (!isp1362_hcd->board->delay) { + dev_err(hcd->self.controller, "No platform delay function given\n"); + retval = -ENODEV; + goto err6; + } +#endif + +#ifdef CONFIG_ARM + if (isp1362_hcd->board) + set_irq_type(irq, isp1362_hcd->board->int_act_high ? IRQT_RISING : IRQT_FALLING); +#endif + + retval = usb_add_hcd(hcd, irq, IRQF_TRIGGER_LOW | IRQF_DISABLED | IRQF_SHARED); + if (retval != 0) + goto err6; + pr_info("%s, irq %d\n", hcd->product_desc, irq); + + create_debug_file(isp1362_hcd); + + return 0; + + err6: + DBG(0, "%s: Freeing dev %08x\n", __func__, (u32)isp1362_hcd); + usb_put_hcd(hcd); + err5: + DBG(0, "%s: Unmapping data_reg @ %08x\n", __func__, (u32)data_reg); + iounmap(data_reg); + err4: + DBG(0, "%s: Releasing mem region %08lx\n", __func__, (long unsigned int)data->start); + release_mem_region(data->start, resource_len(data)); + err3: + DBG(0, "%s: Unmapping addr_reg @ %08x\n", __func__, (u32)addr_reg); + iounmap(addr_reg); + err2: + DBG(0, "%s: Releasing mem region %08lx\n", __func__, (long unsigned int)addr->start); + release_mem_region(addr->start, resource_len(addr)); + err1: + pr_err("%s: init error, %d\n", __func__, retval); + + return retval; +} + +#ifdef CONFIG_PM +static int isp1362_suspend(struct platform_device *pdev, pm_message_t state) +{ + struct usb_hcd *hcd = platform_get_drvdata(pdev); + struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd); + unsigned long flags; + int retval = 0; + + DBG(0, "%s: Suspending device\n", __func__); + + if (state.event == PM_EVENT_FREEZE) { + DBG(0, "%s: Suspending root hub\n", __func__); + retval = isp1362_bus_suspend(hcd); + } else { + DBG(0, "%s: Suspending RH ports\n", __func__); + spin_lock_irqsave(&isp1362_hcd->lock, flags); + isp1362_write_reg32(isp1362_hcd, HCRHSTATUS, RH_HS_LPS); + spin_unlock_irqrestore(&isp1362_hcd->lock, flags); + } + if (retval == 0) + pdev->dev.power.power_state = state; + return retval; +} + +static int isp1362_resume(struct platform_device *pdev) +{ + struct usb_hcd *hcd = platform_get_drvdata(pdev); + struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd); + unsigned long flags; + + DBG(0, "%s: Resuming\n", __func__); + + if (pdev->dev.power.power_state.event == PM_EVENT_SUSPEND) { + DBG(0, "%s: Resume RH ports\n", __func__); + spin_lock_irqsave(&isp1362_hcd->lock, flags); + isp1362_write_reg32(isp1362_hcd, HCRHSTATUS, RH_HS_LPSC); + spin_unlock_irqrestore(&isp1362_hcd->lock, flags); + return 0; + } + + pdev->dev.power.power_state = PMSG_ON; + + return isp1362_bus_resume(isp1362_hcd_to_hcd(isp1362_hcd)); +} +#else +#define isp1362_suspend NULL +#define isp1362_resume NULL +#endif + +static struct platform_driver isp1362_driver = { + .probe = isp1362_probe, + .remove = __devexit_p(isp1362_remove), + + .suspend = isp1362_suspend, + .resume = isp1362_resume, + .driver = { + .name = (char *)hcd_name, + .owner = THIS_MODULE, + }, +}; + +/*-------------------------------------------------------------------------*/ + +static int __init isp1362_init(void) +{ + if (usb_disabled()) + return -ENODEV; + pr_info("driver %s, %s\n", hcd_name, DRIVER_VERSION); + return platform_driver_register(&isp1362_driver); +} +module_init(isp1362_init); + +static void __exit isp1362_cleanup(void) +{ + platform_driver_unregister(&isp1362_driver); +} +module_exit(isp1362_cleanup); diff --git a/drivers/usb/host/isp1362.h b/drivers/usb/host/isp1362.h new file mode 100644 index 00000000000..26e44fc8776 --- /dev/null +++ b/drivers/usb/host/isp1362.h @@ -0,0 +1,1079 @@ +/* + * ISP1362 HCD (Host Controller Driver) for USB. + * + * COPYRIGHT (C) by L. Wassmann + */ + +/* ------------------------------------------------------------------------- */ +/* + * Platform specific compile time options + */ +#if defined(CONFIG_ARCH_KARO) +#include +#include +#include + +#define USE_32BIT 1 + + +/* These options are mutually eclusive */ +#define USE_PLATFORM_DELAY 1 +#define USE_NDELAY 0 +/* + * MAX_ROOT_PORTS: Number of downstream ports + * + * The chip has two USB ports, one of which can be configured as + * an USB device port, so the value of this constant is implementation + * specific. + */ +#define MAX_ROOT_PORTS 2 +#define DUMMY_DELAY_ACCESS do {} while (0) + +/* insert platform specific definitions for other machines here */ +#elif defined(CONFIG_BLACKFIN) + +#include +#define USE_32BIT 0 +#define MAX_ROOT_PORTS 2 +#define USE_PLATFORM_DELAY 0 +#define USE_NDELAY 1 + +#define DUMMY_DELAY_ACCESS \ + do { \ + bfin_read16(ASYNC_BANK0_BASE); \ + bfin_read16(ASYNC_BANK0_BASE); \ + bfin_read16(ASYNC_BANK0_BASE); \ + } while (0) + +#undef insw +#undef outsw + +#define insw delayed_insw +#define outsw delayed_outsw + +static inline void delayed_outsw(unsigned int addr, void *buf, int len) +{ + unsigned short *bp = (unsigned short *)buf; + while (len--) { + DUMMY_DELAY_ACCESS; + outw(*bp++, addr); + } +} + +static inline void delayed_insw(unsigned int addr, void *buf, int len) +{ + unsigned short *bp = (unsigned short *)buf; + while (len--) { + DUMMY_DELAY_ACCESS; + *bp++ = inw((void *)addr); + } +} + +#else + +#define MAX_ROOT_PORTS 2 + +#define USE_32BIT 0 + +/* These options are mutually eclusive */ +#define USE_PLATFORM_DELAY 0 +#define USE_NDELAY 0 + +#define DUMMY_DELAY_ACCESS do {} while (0) + +#endif + + +/* ------------------------------------------------------------------------- */ + +#define USB_RESET_WIDTH 50 +#define MAX_XFER_SIZE 1023 + +/* Buffer sizes */ +#define ISP1362_BUF_SIZE 4096 +#define ISP1362_ISTL_BUFSIZE 512 +#define ISP1362_INTL_BLKSIZE 64 +#define ISP1362_INTL_BUFFERS 16 +#define ISP1362_ATL_BLKSIZE 64 + +#define ISP1362_REG_WRITE_OFFSET 0x80 + +#ifdef ISP1362_DEBUG +typedef const unsigned int isp1362_reg_t; + +#define REG_WIDTH_16 0x000 +#define REG_WIDTH_32 0x100 +#define REG_WIDTH_MASK 0x100 +#define REG_NO_MASK 0x0ff + +#define REG_ACCESS_R 0x200 +#define REG_ACCESS_W 0x400 +#define REG_ACCESS_RW 0x600 +#define REG_ACCESS_MASK 0x600 + +#define ISP1362_REG_NO(r) ((r) & REG_NO_MASK) + +#define _BUG_ON(x) BUG_ON(x) +#define _WARN_ON(x) WARN_ON(x) + +#define ISP1362_REG(name, addr, width, rw) \ +static isp1362_reg_t ISP1362_REG_##name = ((addr) | (width) | (rw)) + +#define REG_ACCESS_TEST(r) BUG_ON(((r) & ISP1362_REG_WRITE_OFFSET) && !((r) & REG_ACCESS_W)) +#define REG_WIDTH_TEST(r, w) BUG_ON(((r) & REG_WIDTH_MASK) != (w)) +#else +typedef const unsigned char isp1362_reg_t; +#define ISP1362_REG_NO(r) (r) +#define _BUG_ON(x) do {} while (0) +#define _WARN_ON(x) do {} while (0) + +#define ISP1362_REG(name, addr, width, rw) \ +static isp1362_reg_t ISP1362_REG_##name = addr + +#define REG_ACCESS_TEST(r) do {} while (0) +#define REG_WIDTH_TEST(r, w) do {} while (0) +#endif + +/* OHCI compatible registers */ +/* + * Note: Some of the ISP1362 'OHCI' registers implement only + * a subset of the bits defined in the OHCI spec. + * + * Bitmasks for the individual bits of these registers are defined in "ohci.h" + */ +ISP1362_REG(HCREVISION, 0x00, REG_WIDTH_32, REG_ACCESS_R); +ISP1362_REG(HCCONTROL, 0x01, REG_WIDTH_32, REG_ACCESS_RW); +ISP1362_REG(HCCMDSTAT, 0x02, REG_WIDTH_32, REG_ACCESS_RW); +ISP1362_REG(HCINTSTAT, 0x03, REG_WIDTH_32, REG_ACCESS_RW); +ISP1362_REG(HCINTENB, 0x04, REG_WIDTH_32, REG_ACCESS_RW); +ISP1362_REG(HCINTDIS, 0x05, REG_WIDTH_32, REG_ACCESS_RW); +ISP1362_REG(HCFMINTVL, 0x0d, REG_WIDTH_32, REG_ACCESS_RW); +ISP1362_REG(HCFMREM, 0x0e, REG_WIDTH_32, REG_ACCESS_RW); +ISP1362_REG(HCFMNUM, 0x0f, REG_WIDTH_32, REG_ACCESS_RW); +ISP1362_REG(HCLSTHRESH, 0x11, REG_WIDTH_32, REG_ACCESS_RW); +ISP1362_REG(HCRHDESCA, 0x12, REG_WIDTH_32, REG_ACCESS_RW); +ISP1362_REG(HCRHDESCB, 0x13, REG_WIDTH_32, REG_ACCESS_RW); +ISP1362_REG(HCRHSTATUS, 0x14, REG_WIDTH_32, REG_ACCESS_RW); +ISP1362_REG(HCRHPORT1, 0x15, REG_WIDTH_32, REG_ACCESS_RW); +ISP1362_REG(HCRHPORT2, 0x16, REG_WIDTH_32, REG_ACCESS_RW); + +/* Philips ISP1362 specific registers */ +ISP1362_REG(HCHWCFG, 0x20, REG_WIDTH_16, REG_ACCESS_RW); +#define HCHWCFG_DISABLE_SUSPEND (1 << 15) +#define HCHWCFG_GLOBAL_PWRDOWN (1 << 14) +#define HCHWCFG_PULLDOWN_DS1 (1 << 13) +#define HCHWCFG_PULLDOWN_DS2 (1 << 12) +#define HCHWCFG_CLKNOTSTOP (1 << 11) +#define HCHWCFG_ANALOG_OC (1 << 10) +#define HCHWCFG_ONEINT (1 << 9) +#define HCHWCFG_DACK_MODE (1 << 8) +#define HCHWCFG_ONEDMA (1 << 7) +#define HCHWCFG_DACK_POL (1 << 6) +#define HCHWCFG_DREQ_POL (1 << 5) +#define HCHWCFG_DBWIDTH_MASK (0x03 << 3) +#define HCHWCFG_DBWIDTH(n) (((n) << 3) & HCHWCFG_DBWIDTH_MASK) +#define HCHWCFG_INT_POL (1 << 2) +#define HCHWCFG_INT_TRIGGER (1 << 1) +#define HCHWCFG_INT_ENABLE (1 << 0) + +ISP1362_REG(HCDMACFG, 0x21, REG_WIDTH_16, REG_ACCESS_RW); +#define HCDMACFG_CTR_ENABLE (1 << 7) +#define HCDMACFG_BURST_LEN_MASK (0x03 << 5) +#define HCDMACFG_BURST_LEN(n) (((n) << 5) & HCDMACFG_BURST_LEN_MASK) +#define HCDMACFG_BURST_LEN_1 HCDMACFG_BURST_LEN(0) +#define HCDMACFG_BURST_LEN_4 HCDMACFG_BURST_LEN(1) +#define HCDMACFG_BURST_LEN_8 HCDMACFG_BURST_LEN(2) +#define HCDMACFG_DMA_ENABLE (1 << 4) +#define HCDMACFG_BUF_TYPE_MASK (0x07 << 1) +#define HCDMACFG_BUF_TYPE(n) (((n) << 1) & HCDMACFG_BUF_TYPE_MASK) +#define HCDMACFG_BUF_ISTL0 HCDMACFG_BUF_TYPE(0) +#define HCDMACFG_BUF_ISTL1 HCDMACFG_BUF_TYPE(1) +#define HCDMACFG_BUF_INTL HCDMACFG_BUF_TYPE(2) +#define HCDMACFG_BUF_ATL HCDMACFG_BUF_TYPE(3) +#define HCDMACFG_BUF_DIRECT HCDMACFG_BUF_TYPE(4) +#define HCDMACFG_DMA_RW_SELECT (1 << 0) + +ISP1362_REG(HCXFERCTR, 0x22, REG_WIDTH_16, REG_ACCESS_RW); + +ISP1362_REG(HCuPINT, 0x24, REG_WIDTH_16, REG_ACCESS_RW); +#define HCuPINT_SOF (1 << 0) +#define HCuPINT_ISTL0 (1 << 1) +#define HCuPINT_ISTL1 (1 << 2) +#define HCuPINT_EOT (1 << 3) +#define HCuPINT_OPR (1 << 4) +#define HCuPINT_SUSP (1 << 5) +#define HCuPINT_CLKRDY (1 << 6) +#define HCuPINT_INTL (1 << 7) +#define HCuPINT_ATL (1 << 8) +#define HCuPINT_OTG (1 << 9) + +ISP1362_REG(HCuPINTENB, 0x25, REG_WIDTH_16, REG_ACCESS_RW); +/* same bit definitions apply as for HCuPINT */ + +ISP1362_REG(HCCHIPID, 0x27, REG_WIDTH_16, REG_ACCESS_R); +#define HCCHIPID_MASK 0xff00 +#define HCCHIPID_MAGIC 0x3600 + +ISP1362_REG(HCSCRATCH, 0x28, REG_WIDTH_16, REG_ACCESS_RW); + +ISP1362_REG(HCSWRES, 0x29, REG_WIDTH_16, REG_ACCESS_W); +#define HCSWRES_MAGIC 0x00f6 + +ISP1362_REG(HCBUFSTAT, 0x2c, REG_WIDTH_16, REG_ACCESS_RW); +#define HCBUFSTAT_ISTL0_FULL (1 << 0) +#define HCBUFSTAT_ISTL1_FULL (1 << 1) +#define HCBUFSTAT_INTL_ACTIVE (1 << 2) +#define HCBUFSTAT_ATL_ACTIVE (1 << 3) +#define HCBUFSTAT_RESET_HWPP (1 << 4) +#define HCBUFSTAT_ISTL0_ACTIVE (1 << 5) +#define HCBUFSTAT_ISTL1_ACTIVE (1 << 6) +#define HCBUFSTAT_ISTL0_DONE (1 << 8) +#define HCBUFSTAT_ISTL1_DONE (1 << 9) +#define HCBUFSTAT_PAIRED_PTDPP (1 << 10) + +ISP1362_REG(HCDIRADDR, 0x32, REG_WIDTH_32, REG_ACCESS_RW); +#define HCDIRADDR_ADDR_MASK 0x0000ffff +#define HCDIRADDR_ADDR(n) (((n) << 0) & HCDIRADDR_ADDR_MASK) +#define HCDIRADDR_COUNT_MASK 0xffff0000 +#define HCDIRADDR_COUNT(n) (((n) << 16) & HCDIRADDR_COUNT_MASK) +ISP1362_REG(HCDIRDATA, 0x45, REG_WIDTH_16, REG_ACCESS_RW); + +ISP1362_REG(HCISTLBUFSZ, 0x30, REG_WIDTH_16, REG_ACCESS_RW); +ISP1362_REG(HCISTL0PORT, 0x40, REG_WIDTH_16, REG_ACCESS_RW); +ISP1362_REG(HCISTL1PORT, 0x42, REG_WIDTH_16, REG_ACCESS_RW); +ISP1362_REG(HCISTLRATE, 0x47, REG_WIDTH_16, REG_ACCESS_RW); + +ISP1362_REG(HCINTLBUFSZ, 0x33, REG_WIDTH_16, REG_ACCESS_RW); +ISP1362_REG(HCINTLPORT, 0x43, REG_WIDTH_16, REG_ACCESS_RW); +ISP1362_REG(HCINTLBLKSZ, 0x53, REG_WIDTH_16, REG_ACCESS_RW); +ISP1362_REG(HCINTLDONE, 0x17, REG_WIDTH_32, REG_ACCESS_R); +ISP1362_REG(HCINTLSKIP, 0x18, REG_WIDTH_32, REG_ACCESS_RW); +ISP1362_REG(HCINTLLAST, 0x19, REG_WIDTH_32, REG_ACCESS_RW); +ISP1362_REG(HCINTLCURR, 0x1a, REG_WIDTH_16, REG_ACCESS_R); + +ISP1362_REG(HCATLBUFSZ, 0x34, REG_WIDTH_16, REG_ACCESS_RW); +ISP1362_REG(HCATLPORT, 0x44, REG_WIDTH_16, REG_ACCESS_RW); +ISP1362_REG(HCATLBLKSZ, 0x54, REG_WIDTH_16, REG_ACCESS_RW); +ISP1362_REG(HCATLDONE, 0x1b, REG_WIDTH_32, REG_ACCESS_R); +ISP1362_REG(HCATLSKIP, 0x1c, REG_WIDTH_32, REG_ACCESS_RW); +ISP1362_REG(HCATLLAST, 0x1d, REG_WIDTH_32, REG_ACCESS_RW); +ISP1362_REG(HCATLCURR, 0x1e, REG_WIDTH_16, REG_ACCESS_R); + +ISP1362_REG(HCATLDTC, 0x51, REG_WIDTH_16, REG_ACCESS_RW); +ISP1362_REG(HCATLDTCTO, 0x52, REG_WIDTH_16, REG_ACCESS_RW); + + +ISP1362_REG(OTGCONTROL, 0x62, REG_WIDTH_16, REG_ACCESS_RW); +ISP1362_REG(OTGSTATUS, 0x67, REG_WIDTH_16, REG_ACCESS_R); +ISP1362_REG(OTGINT, 0x68, REG_WIDTH_16, REG_ACCESS_RW); +ISP1362_REG(OTGINTENB, 0x69, REG_WIDTH_16, REG_ACCESS_RW); +ISP1362_REG(OTGTIMER, 0x6A, REG_WIDTH_16, REG_ACCESS_RW); +ISP1362_REG(OTGALTTMR, 0x6C, REG_WIDTH_16, REG_ACCESS_RW); + +/* Philips transfer descriptor, cpu-endian */ +struct ptd { + u16 count; +#define PTD_COUNT_MSK (0x3ff << 0) +#define PTD_TOGGLE_MSK (1 << 10) +#define PTD_ACTIVE_MSK (1 << 11) +#define PTD_CC_MSK (0xf << 12) + u16 mps; +#define PTD_MPS_MSK (0x3ff << 0) +#define PTD_SPD_MSK (1 << 10) +#define PTD_LAST_MSK (1 << 11) +#define PTD_EP_MSK (0xf << 12) + u16 len; +#define PTD_LEN_MSK (0x3ff << 0) +#define PTD_DIR_MSK (3 << 10) +#define PTD_DIR_SETUP (0) +#define PTD_DIR_OUT (1) +#define PTD_DIR_IN (2) + u16 faddr; +#define PTD_FA_MSK (0x7f << 0) +/* PTD Byte 7: [StartingFrame (if ISO PTD) | StartingFrame[0..4], PollingRate[0..2] (if INT PTD)] */ +#define PTD_SF_ISO_MSK (0xff << 8) +#define PTD_SF_INT_MSK (0x1f << 8) +#define PTD_PR_MSK (0x07 << 13) +} __attribute__ ((packed, aligned(2))); +#define PTD_HEADER_SIZE sizeof(struct ptd) + +/* ------------------------------------------------------------------------- */ +/* Copied from ohci.h: */ +/* + * Hardware transfer status codes -- CC from PTD + */ +#define PTD_CC_NOERROR 0x00 +#define PTD_CC_CRC 0x01 +#define PTD_CC_BITSTUFFING 0x02 +#define PTD_CC_DATATOGGLEM 0x03 +#define PTD_CC_STALL 0x04 +#define PTD_DEVNOTRESP 0x05 +#define PTD_PIDCHECKFAIL 0x06 +#define PTD_UNEXPECTEDPID 0x07 +#define PTD_DATAOVERRUN 0x08 +#define PTD_DATAUNDERRUN 0x09 + /* 0x0A, 0x0B reserved for hardware */ +#define PTD_BUFFEROVERRUN 0x0C +#define PTD_BUFFERUNDERRUN 0x0D + /* 0x0E, 0x0F reserved for HCD */ +#define PTD_NOTACCESSED 0x0F + + +/* map OHCI TD status codes (CC) to errno values */ +static const int cc_to_error[16] = { + /* No Error */ 0, + /* CRC Error */ -EILSEQ, + /* Bit Stuff */ -EPROTO, + /* Data Togg */ -EILSEQ, + /* Stall */ -EPIPE, + /* DevNotResp */ -ETIMEDOUT, + /* PIDCheck */ -EPROTO, + /* UnExpPID */ -EPROTO, + /* DataOver */ -EOVERFLOW, + /* DataUnder */ -EREMOTEIO, + /* (for hw) */ -EIO, + /* (for hw) */ -EIO, + /* BufferOver */ -ECOMM, + /* BuffUnder */ -ENOSR, + /* (for HCD) */ -EALREADY, + /* (for HCD) */ -EALREADY +}; + + +/* + * HcControl (control) register masks + */ +#define OHCI_CTRL_HCFS (3 << 6) /* host controller functional state */ +#define OHCI_CTRL_RWC (1 << 9) /* remote wakeup connected */ +#define OHCI_CTRL_RWE (1 << 10) /* remote wakeup enable */ + +/* pre-shifted values for HCFS */ +# define OHCI_USB_RESET (0 << 6) +# define OHCI_USB_RESUME (1 << 6) +# define OHCI_USB_OPER (2 << 6) +# define OHCI_USB_SUSPEND (3 << 6) + +/* + * HcCommandStatus (cmdstatus) register masks + */ +#define OHCI_HCR (1 << 0) /* host controller reset */ +#define OHCI_SOC (3 << 16) /* scheduling overrun count */ + +/* + * masks used with interrupt registers: + * HcInterruptStatus (intrstatus) + * HcInterruptEnable (intrenable) + * HcInterruptDisable (intrdisable) + */ +#define OHCI_INTR_SO (1 << 0) /* scheduling overrun */ +#define OHCI_INTR_WDH (1 << 1) /* writeback of done_head */ +#define OHCI_INTR_SF (1 << 2) /* start frame */ +#define OHCI_INTR_RD (1 << 3) /* resume detect */ +#define OHCI_INTR_UE (1 << 4) /* unrecoverable error */ +#define OHCI_INTR_FNO (1 << 5) /* frame number overflow */ +#define OHCI_INTR_RHSC (1 << 6) /* root hub status change */ +#define OHCI_INTR_OC (1 << 30) /* ownership change */ +#define OHCI_INTR_MIE (1 << 31) /* master interrupt enable */ + +/* roothub.portstatus [i] bits */ +#define RH_PS_CCS 0x00000001 /* current connect status */ +#define RH_PS_PES 0x00000002 /* port enable status*/ +#define RH_PS_PSS 0x00000004 /* port suspend status */ +#define RH_PS_POCI 0x00000008 /* port over current indicator */ +#define RH_PS_PRS 0x00000010 /* port reset status */ +#define RH_PS_PPS 0x00000100 /* port power status */ +#define RH_PS_LSDA 0x00000200 /* low speed device attached */ +#define RH_PS_CSC 0x00010000 /* connect status change */ +#define RH_PS_PESC 0x00020000 /* port enable status change */ +#define RH_PS_PSSC 0x00040000 /* port suspend status change */ +#define RH_PS_OCIC 0x00080000 /* over current indicator change */ +#define RH_PS_PRSC 0x00100000 /* port reset status change */ + +/* roothub.status bits */ +#define RH_HS_LPS 0x00000001 /* local power status */ +#define RH_HS_OCI 0x00000002 /* over current indicator */ +#define RH_HS_DRWE 0x00008000 /* device remote wakeup enable */ +#define RH_HS_LPSC 0x00010000 /* local power status change */ +#define RH_HS_OCIC 0x00020000 /* over current indicator change */ +#define RH_HS_CRWE 0x80000000 /* clear remote wakeup enable */ + +/* roothub.b masks */ +#define RH_B_DR 0x0000ffff /* device removable flags */ +#define RH_B_PPCM 0xffff0000 /* port power control mask */ + +/* roothub.a masks */ +#define RH_A_NDP (0xff << 0) /* number of downstream ports */ +#define RH_A_PSM (1 << 8) /* power switching mode */ +#define RH_A_NPS (1 << 9) /* no power switching */ +#define RH_A_DT (1 << 10) /* device type (mbz) */ +#define RH_A_OCPM (1 << 11) /* over current protection mode */ +#define RH_A_NOCP (1 << 12) /* no over current protection */ +#define RH_A_POTPGT (0xff << 24) /* power on to power good time */ + +#define FI 0x2edf /* 12000 bits per frame (-1) */ +#define FSMP(fi) (0x7fff & ((6 * ((fi) - 210)) / 7)) +#define LSTHRESH 0x628 /* lowspeed bit threshold */ + +/* ------------------------------------------------------------------------- */ + +/* PTD accessor macros. */ +#define PTD_GET_COUNT(p) (((p)->count & PTD_COUNT_MSK) >> 0) +#define PTD_COUNT(v) (((v) << 0) & PTD_COUNT_MSK) +#define PTD_GET_TOGGLE(p) (((p)->count & PTD_TOGGLE_MSK) >> 10) +#define PTD_TOGGLE(v) (((v) << 10) & PTD_TOGGLE_MSK) +#define PTD_GET_ACTIVE(p) (((p)->count & PTD_ACTIVE_MSK) >> 11) +#define PTD_ACTIVE(v) (((v) << 11) & PTD_ACTIVE_MSK) +#define PTD_GET_CC(p) (((p)->count & PTD_CC_MSK) >> 12) +#define PTD_CC(v) (((v) << 12) & PTD_CC_MSK) +#define PTD_GET_MPS(p) (((p)->mps & PTD_MPS_MSK) >> 0) +#define PTD_MPS(v) (((v) << 0) & PTD_MPS_MSK) +#define PTD_GET_SPD(p) (((p)->mps & PTD_SPD_MSK) >> 10) +#define PTD_SPD(v) (((v) << 10) & PTD_SPD_MSK) +#define PTD_GET_LAST(p) (((p)->mps & PTD_LAST_MSK) >> 11) +#define PTD_LAST(v) (((v) << 11) & PTD_LAST_MSK) +#define PTD_GET_EP(p) (((p)->mps & PTD_EP_MSK) >> 12) +#define PTD_EP(v) (((v) << 12) & PTD_EP_MSK) +#define PTD_GET_LEN(p) (((p)->len & PTD_LEN_MSK) >> 0) +#define PTD_LEN(v) (((v) << 0) & PTD_LEN_MSK) +#define PTD_GET_DIR(p) (((p)->len & PTD_DIR_MSK) >> 10) +#define PTD_DIR(v) (((v) << 10) & PTD_DIR_MSK) +#define PTD_GET_FA(p) (((p)->faddr & PTD_FA_MSK) >> 0) +#define PTD_FA(v) (((v) << 0) & PTD_FA_MSK) +#define PTD_GET_SF_INT(p) (((p)->faddr & PTD_SF_INT_MSK) >> 8) +#define PTD_SF_INT(v) (((v) << 8) & PTD_SF_INT_MSK) +#define PTD_GET_SF_ISO(p) (((p)->faddr & PTD_SF_ISO_MSK) >> 8) +#define PTD_SF_ISO(v) (((v) << 8) & PTD_SF_ISO_MSK) +#define PTD_GET_PR(p) (((p)->faddr & PTD_PR_MSK) >> 13) +#define PTD_PR(v) (((v) << 13) & PTD_PR_MSK) + +#define LOG2_PERIODIC_SIZE 5 /* arbitrary; this matches OHCI */ +#define PERIODIC_SIZE (1 << LOG2_PERIODIC_SIZE) + +struct isp1362_ep { + struct usb_host_endpoint *hep; + struct usb_device *udev; + + /* philips transfer descriptor */ + struct ptd ptd; + + u8 maxpacket; + u8 epnum; + u8 nextpid; + u16 error_count; + u16 length; /* of current packet */ + s16 ptd_offset; /* buffer offset in ISP1362 where + PTD has been stored + (for access thru HCDIRDATA) */ + int ptd_index; + int num_ptds; + void *data; /* to databuf */ + /* queue of active EPs (the ones transmitted to the chip) */ + struct list_head active; + + /* periodic schedule */ + u8 branch; + u16 interval; + u16 load; + u16 last_iso; + + /* async schedule */ + struct list_head schedule; /* list of all EPs that need processing */ + struct list_head remove_list; + int num_req; +}; + +struct isp1362_ep_queue { + struct list_head active; /* list of PTDs currently processed by HC */ + atomic_t finishing; + unsigned long buf_map; + unsigned long skip_map; + int free_ptd; + u16 buf_start; + u16 buf_size; + u16 blk_size; /* PTD buffer block size for ATL and INTL */ + u8 buf_count; + u8 buf_avail; + char name[16]; + + /* for statistical tracking */ + u8 stat_maxptds; /* Max # of ptds seen simultaneously in fifo */ + u8 ptd_count; /* number of ptds submitted to this queue */ +}; + +struct isp1362_hcd { + spinlock_t lock; + void __iomem *addr_reg; + void __iomem *data_reg; + + struct isp1362_platform_data *board; + + struct proc_dir_entry *pde; + unsigned long stat1, stat2, stat4, stat8, stat16; + + /* HC registers */ + u32 intenb; /* "OHCI" interrupts */ + u16 irqenb; /* uP interrupts */ + + /* Root hub registers */ + u32 rhdesca; + u32 rhdescb; + u32 rhstatus; + u32 rhport[MAX_ROOT_PORTS]; + unsigned long next_statechange; + + /* HC control reg shadow copy */ + u32 hc_control; + + /* async schedule: control, bulk */ + struct list_head async; + + /* periodic schedule: int */ + u16 load[PERIODIC_SIZE]; + struct list_head periodic; + u16 fmindex; + + /* periodic schedule: isochronous */ + struct list_head isoc; + int istl_flip:1; + int irq_active:1; + + /* Schedules for the current frame */ + struct isp1362_ep_queue atl_queue; + struct isp1362_ep_queue intl_queue; + struct isp1362_ep_queue istl_queue[2]; + + /* list of PTDs retrieved from HC */ + struct list_head remove_list; + enum { + ISP1362_INT_SOF, + ISP1362_INT_ISTL0, + ISP1362_INT_ISTL1, + ISP1362_INT_EOT, + ISP1362_INT_OPR, + ISP1362_INT_SUSP, + ISP1362_INT_CLKRDY, + ISP1362_INT_INTL, + ISP1362_INT_ATL, + ISP1362_INT_OTG, + NUM_ISP1362_IRQS + } IRQ_NAMES; + unsigned int irq_stat[NUM_ISP1362_IRQS]; + int req_serial; +}; + +static inline const char *ISP1362_INT_NAME(int n) +{ + switch (n) { + case ISP1362_INT_SOF: return "SOF"; + case ISP1362_INT_ISTL0: return "ISTL0"; + case ISP1362_INT_ISTL1: return "ISTL1"; + case ISP1362_INT_EOT: return "EOT"; + case ISP1362_INT_OPR: return "OPR"; + case ISP1362_INT_SUSP: return "SUSP"; + case ISP1362_INT_CLKRDY: return "CLKRDY"; + case ISP1362_INT_INTL: return "INTL"; + case ISP1362_INT_ATL: return "ATL"; + case ISP1362_INT_OTG: return "OTG"; + default: return "unknown"; + } +} + +static inline void ALIGNSTAT(struct isp1362_hcd *isp1362_hcd, void *ptr) +{ + unsigned p = (unsigned)ptr; + if (!(p & 0xf)) + isp1362_hcd->stat16++; + else if (!(p & 0x7)) + isp1362_hcd->stat8++; + else if (!(p & 0x3)) + isp1362_hcd->stat4++; + else if (!(p & 0x1)) + isp1362_hcd->stat2++; + else + isp1362_hcd->stat1++; +} + +static inline struct isp1362_hcd *hcd_to_isp1362_hcd(struct usb_hcd *hcd) +{ + return (struct isp1362_hcd *) (hcd->hcd_priv); +} + +static inline struct usb_hcd *isp1362_hcd_to_hcd(struct isp1362_hcd *isp1362_hcd) +{ + return container_of((void *)isp1362_hcd, struct usb_hcd, hcd_priv); +} + +#define frame_before(f1, f2) ((s16)((u16)f1 - (u16)f2) < 0) + +/* + * ISP1362 HW Interface + */ + +#ifdef ISP1362_DEBUG +#define DBG(level, fmt...) \ + do { \ + if (dbg_level > level) \ + pr_debug(fmt); \ + } while (0) +#define _DBG(level, fmt...) \ + do { \ + if (dbg_level > level) \ + printk(fmt); \ + } while (0) +#else +#define DBG(fmt...) do {} while (0) +#define _DBG DBG +#endif + +#ifdef VERBOSE +# define VDBG(fmt...) DBG(3, fmt) +#else +# define VDBG(fmt...) do {} while (0) +#endif + +#ifdef REGISTERS +# define RDBG(fmt...) DBG(1, fmt) +#else +# define RDBG(fmt...) do {} while (0) +#endif + +#ifdef URB_TRACE +#define URB_DBG(fmt...) DBG(0, fmt) +#else +#define URB_DBG(fmt...) do {} while (0) +#endif + + +#if USE_PLATFORM_DELAY +#if USE_NDELAY +#error USE_PLATFORM_DELAY and USE_NDELAY defined simultaneously. +#endif +#define isp1362_delay(h, d) (h)->board->delay(isp1362_hcd_to_hcd(h)->self.controller, d) +#elif USE_NDELAY +#define isp1362_delay(h, d) ndelay(d) +#else +#define isp1362_delay(h, d) do {} while (0) +#endif + +#define get_urb(ep) ({ \ + BUG_ON(list_empty(&ep->hep->urb_list)); \ + container_of(ep->hep->urb_list.next, struct urb, urb_list); \ +}) + +/* basic access functions for ISP1362 chip registers */ +/* NOTE: The contents of the address pointer register cannot be read back! The driver must ensure, + * that all register accesses are performed with interrupts disabled, since the interrupt + * handler has no way of restoring the previous state. + */ +static void isp1362_write_addr(struct isp1362_hcd *isp1362_hcd, isp1362_reg_t reg) +{ + /*_BUG_ON((reg & ISP1362_REG_WRITE_OFFSET) && !(reg & REG_ACCESS_W));*/ + REG_ACCESS_TEST(reg); + _BUG_ON(!irqs_disabled()); + DUMMY_DELAY_ACCESS; + writew(ISP1362_REG_NO(reg), isp1362_hcd->addr_reg); + DUMMY_DELAY_ACCESS; + isp1362_delay(isp1362_hcd, 1); +} + +static void isp1362_write_data16(struct isp1362_hcd *isp1362_hcd, u16 val) +{ + _BUG_ON(!irqs_disabled()); + DUMMY_DELAY_ACCESS; + writew(val, isp1362_hcd->data_reg); +} + +static u16 isp1362_read_data16(struct isp1362_hcd *isp1362_hcd) +{ + u16 val; + + _BUG_ON(!irqs_disabled()); + DUMMY_DELAY_ACCESS; + val = readw(isp1362_hcd->data_reg); + + return val; +} + +static void isp1362_write_data32(struct isp1362_hcd *isp1362_hcd, u32 val) +{ + _BUG_ON(!irqs_disabled()); +#if USE_32BIT + DUMMY_DELAY_ACCESS; + writel(val, isp1362_hcd->data_reg); +#else + DUMMY_DELAY_ACCESS; + writew((u16)val, isp1362_hcd->data_reg); + DUMMY_DELAY_ACCESS; + writew(val >> 16, isp1362_hcd->data_reg); +#endif +} + +static u32 isp1362_read_data32(struct isp1362_hcd *isp1362_hcd) +{ + u32 val; + + _BUG_ON(!irqs_disabled()); +#if USE_32BIT + DUMMY_DELAY_ACCESS; + val = readl(isp1362_hcd->data_reg); +#else + DUMMY_DELAY_ACCESS; + val = (u32)readw(isp1362_hcd->data_reg); + DUMMY_DELAY_ACCESS; + val |= (u32)readw(isp1362_hcd->data_reg) << 16; +#endif + return val; +} + +/* use readsw/writesw to access the fifo whenever possible */ +/* assume HCDIRDATA or XFERCTR & addr_reg have been set up */ +static void isp1362_read_fifo(struct isp1362_hcd *isp1362_hcd, void *buf, u16 len) +{ + u8 *dp = buf; + u16 data; + + if (!len) + return; + + _BUG_ON(!irqs_disabled()); + + RDBG("%s: Reading %d byte from fifo to mem @ %p\n", __func__, len, buf); +#if USE_32BIT + if (len >= 4) { + RDBG("%s: Using readsl for %d dwords\n", __func__, len >> 2); + readsl(isp1362_hcd->data_reg, dp, len >> 2); + dp += len & ~3; + len &= 3; + } +#endif + if (len >= 2) { + RDBG("%s: Using readsw for %d words\n", __func__, len >> 1); + insw((unsigned long)isp1362_hcd->data_reg, dp, len >> 1); + dp += len & ~1; + len &= 1; + } + + BUG_ON(len & ~1); + if (len > 0) { + data = isp1362_read_data16(isp1362_hcd); + RDBG("%s: Reading trailing byte %02x to mem @ %08x\n", __func__, + (u8)data, (u32)dp); + *dp = (u8)data; + } +} + +static void isp1362_write_fifo(struct isp1362_hcd *isp1362_hcd, void *buf, u16 len) +{ + u8 *dp = buf; + u16 data; + + if (!len) + return; + + if ((unsigned)dp & 0x1) { + /* not aligned */ + for (; len > 1; len -= 2) { + data = *dp++; + data |= *dp++ << 8; + isp1362_write_data16(isp1362_hcd, data); + } + if (len) + isp1362_write_data16(isp1362_hcd, *dp); + return; + } + + _BUG_ON(!irqs_disabled()); + + RDBG("%s: Writing %d byte to fifo from memory @%p\n", __func__, len, buf); +#if USE_32BIT + if (len >= 4) { + RDBG("%s: Using writesl for %d dwords\n", __func__, len >> 2); + writesl(isp1362_hcd->data_reg, dp, len >> 2); + dp += len & ~3; + len &= 3; + } +#endif + if (len >= 2) { + RDBG("%s: Using writesw for %d words\n", __func__, len >> 1); + outsw((unsigned long)isp1362_hcd->data_reg, dp, len >> 1); + dp += len & ~1; + len &= 1; + } + + BUG_ON(len & ~1); + if (len > 0) { + /* finally write any trailing byte; we don't need to care + * about the high byte of the last word written + */ + data = (u16)*dp; + RDBG("%s: Sending trailing byte %02x from mem @ %08x\n", __func__, + data, (u32)dp); + isp1362_write_data16(isp1362_hcd, data); + } +} + +#define isp1362_read_reg16(d, r) ({ \ + u16 __v; \ + REG_WIDTH_TEST(ISP1362_REG_##r, REG_WIDTH_16); \ + isp1362_write_addr(d, ISP1362_REG_##r); \ + __v = isp1362_read_data16(d); \ + RDBG("%s: Read %04x from %s[%02x]\n", __func__, __v, #r, \ + ISP1362_REG_NO(ISP1362_REG_##r)); \ + __v; \ +}) + +#define isp1362_read_reg32(d, r) ({ \ + u32 __v; \ + REG_WIDTH_TEST(ISP1362_REG_##r, REG_WIDTH_32); \ + isp1362_write_addr(d, ISP1362_REG_##r); \ + __v = isp1362_read_data32(d); \ + RDBG("%s: Read %08x from %s[%02x]\n", __func__, __v, #r, \ + ISP1362_REG_NO(ISP1362_REG_##r)); \ + __v; \ +}) + +#define isp1362_write_reg16(d, r, v) { \ + REG_WIDTH_TEST(ISP1362_REG_##r, REG_WIDTH_16); \ + isp1362_write_addr(d, (ISP1362_REG_##r) | ISP1362_REG_WRITE_OFFSET); \ + isp1362_write_data16(d, (u16)(v)); \ + RDBG("%s: Wrote %04x to %s[%02x]\n", __func__, (u16)(v), #r, \ + ISP1362_REG_NO(ISP1362_REG_##r)); \ +} + +#define isp1362_write_reg32(d, r, v) { \ + REG_WIDTH_TEST(ISP1362_REG_##r, REG_WIDTH_32); \ + isp1362_write_addr(d, (ISP1362_REG_##r) | ISP1362_REG_WRITE_OFFSET); \ + isp1362_write_data32(d, (u32)(v)); \ + RDBG("%s: Wrote %08x to %s[%02x]\n", __func__, (u32)(v), #r, \ + ISP1362_REG_NO(ISP1362_REG_##r)); \ +} + +#define isp1362_set_mask16(d, r, m) { \ + u16 __v; \ + __v = isp1362_read_reg16(d, r); \ + if ((__v | m) != __v) \ + isp1362_write_reg16(d, r, __v | m); \ +} + +#define isp1362_clr_mask16(d, r, m) { \ + u16 __v; \ + __v = isp1362_read_reg16(d, r); \ + if ((__v & ~m) != __v) \ + isp1362_write_reg16(d, r, __v & ~m); \ +} + +#define isp1362_set_mask32(d, r, m) { \ + u32 __v; \ + __v = isp1362_read_reg32(d, r); \ + if ((__v | m) != __v) \ + isp1362_write_reg32(d, r, __v | m); \ +} + +#define isp1362_clr_mask32(d, r, m) { \ + u32 __v; \ + __v = isp1362_read_reg32(d, r); \ + if ((__v & ~m) != __v) \ + isp1362_write_reg32(d, r, __v & ~m); \ +} + +#ifdef ISP1362_DEBUG +#define isp1362_show_reg(d, r) { \ + if ((ISP1362_REG_##r & REG_WIDTH_MASK) == REG_WIDTH_32) \ + DBG(0, "%-12s[%02x]: %08x\n", #r, \ + ISP1362_REG_NO(ISP1362_REG_##r), isp1362_read_reg32(d, r)); \ + else \ + DBG(0, "%-12s[%02x]: %04x\n", #r, \ + ISP1362_REG_NO(ISP1362_REG_##r), isp1362_read_reg16(d, r)); \ +} +#else +#define isp1362_show_reg(d, r) do {} while (0) +#endif + +static void __attribute__((__unused__)) isp1362_show_regs(struct isp1362_hcd *isp1362_hcd) +{ + isp1362_show_reg(isp1362_hcd, HCREVISION); + isp1362_show_reg(isp1362_hcd, HCCONTROL); + isp1362_show_reg(isp1362_hcd, HCCMDSTAT); + isp1362_show_reg(isp1362_hcd, HCINTSTAT); + isp1362_show_reg(isp1362_hcd, HCINTENB); + isp1362_show_reg(isp1362_hcd, HCFMINTVL); + isp1362_show_reg(isp1362_hcd, HCFMREM); + isp1362_show_reg(isp1362_hcd, HCFMNUM); + isp1362_show_reg(isp1362_hcd, HCLSTHRESH); + isp1362_show_reg(isp1362_hcd, HCRHDESCA); + isp1362_show_reg(isp1362_hcd, HCRHDESCB); + isp1362_show_reg(isp1362_hcd, HCRHSTATUS); + isp1362_show_reg(isp1362_hcd, HCRHPORT1); + isp1362_show_reg(isp1362_hcd, HCRHPORT2); + + isp1362_show_reg(isp1362_hcd, HCHWCFG); + isp1362_show_reg(isp1362_hcd, HCDMACFG); + isp1362_show_reg(isp1362_hcd, HCXFERCTR); + isp1362_show_reg(isp1362_hcd, HCuPINT); + + if (in_interrupt()) + DBG(0, "%-12s[%02x]: %04x\n", "HCuPINTENB", + ISP1362_REG_NO(ISP1362_REG_HCuPINTENB), isp1362_hcd->irqenb); + else + isp1362_show_reg(isp1362_hcd, HCuPINTENB); + isp1362_show_reg(isp1362_hcd, HCCHIPID); + isp1362_show_reg(isp1362_hcd, HCSCRATCH); + isp1362_show_reg(isp1362_hcd, HCBUFSTAT); + isp1362_show_reg(isp1362_hcd, HCDIRADDR); + /* Access would advance fifo + * isp1362_show_reg(isp1362_hcd, HCDIRDATA); + */ + isp1362_show_reg(isp1362_hcd, HCISTLBUFSZ); + isp1362_show_reg(isp1362_hcd, HCISTLRATE); + isp1362_show_reg(isp1362_hcd, HCINTLBUFSZ); + isp1362_show_reg(isp1362_hcd, HCINTLBLKSZ); + isp1362_show_reg(isp1362_hcd, HCINTLDONE); + isp1362_show_reg(isp1362_hcd, HCINTLSKIP); + isp1362_show_reg(isp1362_hcd, HCINTLLAST); + isp1362_show_reg(isp1362_hcd, HCINTLCURR); + isp1362_show_reg(isp1362_hcd, HCATLBUFSZ); + isp1362_show_reg(isp1362_hcd, HCATLBLKSZ); + /* only valid after ATL_DONE interrupt + * isp1362_show_reg(isp1362_hcd, HCATLDONE); + */ + isp1362_show_reg(isp1362_hcd, HCATLSKIP); + isp1362_show_reg(isp1362_hcd, HCATLLAST); + isp1362_show_reg(isp1362_hcd, HCATLCURR); + isp1362_show_reg(isp1362_hcd, HCATLDTC); + isp1362_show_reg(isp1362_hcd, HCATLDTCTO); +} + +static void isp1362_write_diraddr(struct isp1362_hcd *isp1362_hcd, u16 offset, u16 len) +{ + _BUG_ON(offset & 1); + _BUG_ON(offset >= ISP1362_BUF_SIZE); + _BUG_ON(len > ISP1362_BUF_SIZE); + _BUG_ON(offset + len > ISP1362_BUF_SIZE); + len = (len + 1) & ~1; + + isp1362_clr_mask16(isp1362_hcd, HCDMACFG, HCDMACFG_CTR_ENABLE); + isp1362_write_reg32(isp1362_hcd, HCDIRADDR, + HCDIRADDR_ADDR(offset) | HCDIRADDR_COUNT(len)); +} + +static void isp1362_read_buffer(struct isp1362_hcd *isp1362_hcd, void *buf, u16 offset, int len) +{ + _BUG_ON(offset & 1); + + isp1362_write_diraddr(isp1362_hcd, offset, len); + + DBG(3, "%s: Reading %d byte from buffer @%04x to memory @ %08x\n", __func__, + len, offset, (u32)buf); + + isp1362_write_reg16(isp1362_hcd, HCuPINT, HCuPINT_EOT); + _WARN_ON((isp1362_read_reg16(isp1362_hcd, HCuPINT) & HCuPINT_EOT)); + + isp1362_write_addr(isp1362_hcd, ISP1362_REG_HCDIRDATA); + + isp1362_read_fifo(isp1362_hcd, buf, len); + _WARN_ON(!(isp1362_read_reg16(isp1362_hcd, HCuPINT) & HCuPINT_EOT)); + isp1362_write_reg16(isp1362_hcd, HCuPINT, HCuPINT_EOT); + _WARN_ON((isp1362_read_reg16(isp1362_hcd, HCuPINT) & HCuPINT_EOT)); +} + +static void isp1362_write_buffer(struct isp1362_hcd *isp1362_hcd, void *buf, u16 offset, int len) +{ + _BUG_ON(offset & 1); + + isp1362_write_diraddr(isp1362_hcd, offset, len); + + DBG(3, "%s: Writing %d byte to buffer @%04x from memory @ %08x\n", __func__, + len, offset, (u32)buf); + + isp1362_write_reg16(isp1362_hcd, HCuPINT, HCuPINT_EOT); + _WARN_ON((isp1362_read_reg16(isp1362_hcd, HCuPINT) & HCuPINT_EOT)); + + isp1362_write_addr(isp1362_hcd, ISP1362_REG_HCDIRDATA | ISP1362_REG_WRITE_OFFSET); + isp1362_write_fifo(isp1362_hcd, buf, len); + + _WARN_ON(!(isp1362_read_reg16(isp1362_hcd, HCuPINT) & HCuPINT_EOT)); + isp1362_write_reg16(isp1362_hcd, HCuPINT, HCuPINT_EOT); + _WARN_ON((isp1362_read_reg16(isp1362_hcd, HCuPINT) & HCuPINT_EOT)); +} + +static void __attribute__((unused)) dump_data(char *buf, int len) +{ + if (dbg_level > 0) { + int k; + int lf = 0; + + for (k = 0; k < len; ++k) { + if (!lf) + DBG(0, "%04x:", k); + printk(" %02x", ((u8 *) buf)[k]); + lf = 1; + if (!k) + continue; + if (k % 16 == 15) { + printk("\n"); + lf = 0; + continue; + } + if (k % 8 == 7) + printk(" "); + if (k % 4 == 3) + printk(" "); + } + if (lf) + printk("\n"); + } +} + +#if defined(ISP1362_DEBUG) && defined(PTD_TRACE) + +static void dump_ptd(struct ptd *ptd) +{ + DBG(0, "EP %p: CC=%x EP=%d DIR=%x CNT=%d LEN=%d MPS=%d TGL=%x ACT=%x FA=%d SPD=%x SF=%x PR=%x LST=%x\n", + container_of(ptd, struct isp1362_ep, ptd), + PTD_GET_CC(ptd), PTD_GET_EP(ptd), PTD_GET_DIR(ptd), + PTD_GET_COUNT(ptd), PTD_GET_LEN(ptd), PTD_GET_MPS(ptd), + PTD_GET_TOGGLE(ptd), PTD_GET_ACTIVE(ptd), PTD_GET_FA(ptd), + PTD_GET_SPD(ptd), PTD_GET_SF_INT(ptd), PTD_GET_PR(ptd), PTD_GET_LAST(ptd)); + DBG(0, " %04x %04x %04x %04x\n", ptd->count, ptd->mps, ptd->len, ptd->faddr); +} + +static void dump_ptd_out_data(struct ptd *ptd, u8 *buf) +{ + if (dbg_level > 0) { + if (PTD_GET_DIR(ptd) != PTD_DIR_IN && PTD_GET_LEN(ptd)) { + DBG(0, "--out->\n"); + dump_data(buf, PTD_GET_LEN(ptd)); + } + } +} + +static void dump_ptd_in_data(struct ptd *ptd, u8 *buf) +{ + if (dbg_level > 0) { + if (PTD_GET_DIR(ptd) == PTD_DIR_IN && PTD_GET_COUNT(ptd)) { + DBG(0, "<--in--\n"); + dump_data(buf, PTD_GET_COUNT(ptd)); + } + DBG(0, "-----\n"); + } +} + +static void dump_ptd_queue(struct isp1362_ep_queue *epq) +{ + struct isp1362_ep *ep; + int dbg = dbg_level; + + dbg_level = 1; + list_for_each_entry(ep, &epq->active, active) { + dump_ptd(&ep->ptd); + dump_data(ep->data, ep->length); + } + dbg_level = dbg; +} +#else +#define dump_ptd(ptd) do {} while (0) +#define dump_ptd_in_data(ptd, buf) do {} while (0) +#define dump_ptd_out_data(ptd, buf) do {} while (0) +#define dump_ptd_data(ptd, buf) do {} while (0) +#define dump_ptd_queue(epq) do {} while (0) +#endif diff --git a/include/linux/usb/isp1362.h b/include/linux/usb/isp1362.h new file mode 100644 index 00000000000..642684bb929 --- /dev/null +++ b/include/linux/usb/isp1362.h @@ -0,0 +1,46 @@ +/* + * board initialization code should put one of these into dev->platform_data + * and place the isp1362 onto platform_bus. + */ + +#ifndef __LINUX_USB_ISP1362_H__ +#define __LINUX_USB_ISP1362_H__ + +struct isp1362_platform_data { + /* Enable internal pulldown resistors on downstream ports */ + unsigned sel15Kres:1; + /* Clock cannot be stopped */ + unsigned clknotstop:1; + /* On-chip overcurrent protection */ + unsigned oc_enable:1; + /* INT output polarity */ + unsigned int_act_high:1; + /* INT edge or level triggered */ + unsigned int_edge_triggered:1; + /* DREQ output polarity */ + unsigned dreq_act_high:1; + /* DACK input polarity */ + unsigned dack_act_high:1; + /* chip can be resumed via H_WAKEUP pin */ + unsigned remote_wakeup_connected:1; + /* Switch or not to switch (keep always powered) */ + unsigned no_power_switching:1; + /* Ganged port power switching (0) or individual port power switching (1) */ + unsigned power_switching_mode:1; + /* Given port_power, msec/2 after power on till power good */ + u8 potpg; + /* Hardware reset set/clear */ + void (*reset) (struct device *dev, int set); + /* Clock start/stop */ + void (*clock) (struct device *dev, int start); + /* Inter-io delay (ns). The chip is picky about access timings; it + * expects at least: + * 110ns delay between consecutive accesses to DATA_REG, + * 300ns delay between access to ADDR_REG and DATA_REG (registers) + * 462ns delay between access to ADDR_REG and DATA_REG (buffer memory) + * WE MUST NOT be activated during these intervals (even without CS!) + */ + void (*delay) (struct device *dev, unsigned int delay); +}; + +#endif -- cgit v1.2.3-70-g09d2 From c35013087aa9b10e4674b53b7c8f7966de83c194 Mon Sep 17 00:00:00 2001 From: "Michael S. Tsirkin" Date: Wed, 29 Jul 2009 14:23:25 +0300 Subject: USB: audio: guard kernel-only code with __KERNEL__ include/linux/usb/audio.h is exported to userspace, so part of this file that is for internal kernel usage need to be guarded with ifdef __KERNEL__. This way make headers_install will stript it out. Signed-off-by: Michael S. Tsirkin Signed-off-by: Greg Kroah-Hartman --- include/linux/usb/audio.h | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'include') diff --git a/include/linux/usb/audio.h b/include/linux/usb/audio.h index 7b33c493917..eaf9dffe0a0 100644 --- a/include/linux/usb/audio.h +++ b/include/linux/usb/audio.h @@ -276,6 +276,8 @@ struct uac_iso_endpoint_descriptor { #define UAC_FU_BASS_BOOST (1 << (UAC_BASS_BOOST_CONTROL - 1)) #define UAC_FU_LOUDNESS (1 << (UAC_LOUDNESS_CONTROL - 1)) +#ifdef __KERNEL__ + struct usb_audio_control { struct list_head list; const char *name; @@ -294,4 +296,6 @@ struct usb_audio_control_selector { struct usb_descriptor_header *desc; }; +#endif /* __KERNEL__ */ + #endif /* __LINUX_USB_AUDIO_H */ -- cgit v1.2.3-70-g09d2 From 8e8dce065088833fc418bfa5fbf035cb0726c04c Mon Sep 17 00:00:00 2001 From: David VomLehn Date: Fri, 28 Aug 2009 12:54:27 -0700 Subject: USB: use kfifo to buffer usb-generic serial writes When do_output_char() attempts to write a carriage return/line feed sequence, it first checks to see how much buffer room is available. If there are at least two characters free, it will write the carriage return/line feed with two calls to tty_put_char(). It calls the tty_operation functions write() for devices that don't support the tty_operations function put_char(). If the USB generic serial device's write URB is not in use, it will return the buffer size when asked how much room is available. The write() of the carriage return will cause it to mark the write URB busy, so the subsequent write() of the line feed will be ignored. This patch uses the kfifo infrastructure to implement a write FIFO that accurately returns the amount of space available in the buffer. Signed-off-by: David VomLehn Signed-off-by: Greg Kroah-Hartman --- drivers/usb/serial/generic.c | 206 ++++++++++++++++++++++++---------------- drivers/usb/serial/usb-serial.c | 7 ++ include/linux/usb/serial.h | 2 + 3 files changed, 134 insertions(+), 81 deletions(-) (limited to 'include') diff --git a/drivers/usb/serial/generic.c b/drivers/usb/serial/generic.c index d9398e9f30c..deba08c7a01 100644 --- a/drivers/usb/serial/generic.c +++ b/drivers/usb/serial/generic.c @@ -19,7 +19,7 @@ #include #include #include - +#include static int debug; @@ -166,24 +166,6 @@ static void generic_cleanup(struct usb_serial_port *port) } } -int usb_serial_generic_resume(struct usb_serial *serial) -{ - struct usb_serial_port *port; - int i, c = 0, r; - - for (i = 0; i < serial->num_ports; i++) { - port = serial->port[i]; - if (port->port.count && port->read_urb) { - r = usb_submit_urb(port->read_urb, GFP_NOIO); - if (r < 0) - c++; - } - } - - return c ? -EIO : 0; -} -EXPORT_SYMBOL_GPL(usb_serial_generic_resume); - void usb_serial_generic_close(struct usb_serial_port *port) { dbg("%s - port %d", __func__, port->number); @@ -272,12 +254,81 @@ error_no_buffer: return bwrite; } +/** + * usb_serial_generic_write_start - kick off an URB write + * @port: Pointer to the &struct usb_serial_port data + * + * Returns the number of bytes queued on success. This will be zero if there + * was nothing to send. Otherwise, it returns a negative errno value + */ +static int usb_serial_generic_write_start(struct usb_serial_port *port) +{ + struct usb_serial *serial = port->serial; + unsigned char *data; + int result; + int count; + unsigned long flags; + bool start_io; + + /* Atomically determine whether we can and need to start a USB + * operation. */ + spin_lock_irqsave(&port->lock, flags); + if (port->write_urb_busy) + start_io = false; + else { + start_io = (__kfifo_len(port->write_fifo) != 0); + port->write_urb_busy = start_io; + } + spin_unlock_irqrestore(&port->lock, flags); + + if (!start_io) + return 0; + + data = port->write_urb->transfer_buffer; + count = kfifo_get(port->write_fifo, data, port->bulk_out_size); + usb_serial_debug_data(debug, &port->dev, __func__, count, data); + + /* set up our urb */ + usb_fill_bulk_urb(port->write_urb, serial->dev, + usb_sndbulkpipe(serial->dev, + port->bulk_out_endpointAddress), + port->write_urb->transfer_buffer, count, + ((serial->type->write_bulk_callback) ? + serial->type->write_bulk_callback : + usb_serial_generic_write_bulk_callback), + port); + + /* send the data out the bulk port */ + result = usb_submit_urb(port->write_urb, GFP_ATOMIC); + if (result) { + dev_err(&port->dev, + "%s - failed submitting write urb, error %d\n", + __func__, result); + /* don't have to grab the lock here, as we will + retry if != 0 */ + port->write_urb_busy = 0; + } else + result = count; + + return result; +} + +/** + * usb_serial_generic_write - generic write function for serial USB devices + * @tty: Pointer to &struct tty_struct for the device + * @port: Pointer to the &usb_serial_port structure for the device + * @buf: Pointer to the data to write + * @count: Number of bytes to write + * + * Returns the number of characters actually written, which may be anything + * from zero to @count. If an error occurs, it returns the negative errno + * value. + */ int usb_serial_generic_write(struct tty_struct *tty, struct usb_serial_port *port, const unsigned char *buf, int count) { struct usb_serial *serial = port->serial; int result; - unsigned char *data; dbg("%s - port %d", __func__, port->number); @@ -287,57 +338,20 @@ int usb_serial_generic_write(struct tty_struct *tty, } /* only do something if we have a bulk out endpoint */ - if (serial->num_bulk_out) { - unsigned long flags; - - if (serial->type->max_in_flight_urbs) - return usb_serial_multi_urb_write(tty, port, - buf, count); - - spin_lock_irqsave(&port->lock, flags); - if (port->write_urb_busy) { - spin_unlock_irqrestore(&port->lock, flags); - dbg("%s - already writing", __func__); - return 0; - } - port->write_urb_busy = 1; - spin_unlock_irqrestore(&port->lock, flags); - - count = (count > port->bulk_out_size) ? - port->bulk_out_size : count; - - memcpy(port->write_urb->transfer_buffer, buf, count); - data = port->write_urb->transfer_buffer; - usb_serial_debug_data(debug, &port->dev, __func__, count, data); + if (!serial->num_bulk_out) + return 0; - /* set up our urb */ - usb_fill_bulk_urb(port->write_urb, serial->dev, - usb_sndbulkpipe(serial->dev, - port->bulk_out_endpointAddress), - port->write_urb->transfer_buffer, count, - ((serial->type->write_bulk_callback) ? - serial->type->write_bulk_callback : - usb_serial_generic_write_bulk_callback), - port); + if (serial->type->max_in_flight_urbs) + return usb_serial_multi_urb_write(tty, port, + buf, count); - /* send the data out the bulk port */ - port->write_urb_busy = 1; - result = usb_submit_urb(port->write_urb, GFP_ATOMIC); - if (result) { - dev_err(&port->dev, - "%s - failed submitting write urb, error %d\n", - __func__, result); - /* don't have to grab the lock here, as we will - retry if != 0 */ - port->write_urb_busy = 0; - } else - result = count; + count = kfifo_put(port->write_fifo, buf, count); + result = usb_serial_generic_write_start(port); - return result; - } + if (result >= 0) + result = count; - /* no bulk out, so return 0 bytes written */ - return 0; + return result; } EXPORT_SYMBOL_GPL(usb_serial_generic_write); @@ -355,9 +369,8 @@ int usb_serial_generic_write_room(struct tty_struct *tty) room = port->bulk_out_size * (serial->type->max_in_flight_urbs - port->urbs_in_flight); - } else if (serial->num_bulk_out && !(port->write_urb_busy)) { - room = port->bulk_out_size; - } + } else if (serial->num_bulk_out) + room = port->write_fifo->size - __kfifo_len(port->write_fifo); spin_unlock_irqrestore(&port->lock, flags); dbg("%s - returns %d", __func__, room); @@ -377,11 +390,8 @@ int usb_serial_generic_chars_in_buffer(struct tty_struct *tty) spin_lock_irqsave(&port->lock, flags); chars = port->tx_bytes_flight; spin_unlock_irqrestore(&port->lock, flags); - } else if (serial->num_bulk_out) { - /* FIXME: Locking */ - if (port->write_urb_busy) - chars = port->write_urb->transfer_buffer_length; - } + } else if (serial->num_bulk_out) + chars = kfifo_len(port->write_fifo); dbg("%s - returns %d", __func__, chars); return chars; @@ -485,16 +495,23 @@ void usb_serial_generic_write_bulk_callback(struct urb *urb) if (port->urbs_in_flight < 0) port->urbs_in_flight = 0; spin_unlock_irqrestore(&port->lock, flags); + + if (status) { + dbg("%s - nonzero multi-urb write bulk status " + "received: %d", __func__, status); + return; + } } else { - /* Handle the case for single urb mode */ port->write_urb_busy = 0; - } - if (status) { - dbg("%s - nonzero write bulk status received: %d", - __func__, status); - return; + if (status) { + dbg("%s - nonzero multi-urb write bulk status " + "received: %d", __func__, status); + kfifo_reset(port->write_fifo); + } else + usb_serial_generic_write_start(port); } + usb_serial_port_softint(port); } EXPORT_SYMBOL_GPL(usb_serial_generic_write_bulk_callback); @@ -559,6 +576,33 @@ int usb_serial_handle_break(struct usb_serial_port *port) } EXPORT_SYMBOL_GPL(usb_serial_handle_break); +int usb_serial_generic_resume(struct usb_serial *serial) +{ + struct usb_serial_port *port; + int i, c = 0, r; + + for (i = 0; i < serial->num_ports; i++) { + port = serial->port[i]; + if (!port->port.count) + continue; + + if (port->read_urb) { + r = usb_submit_urb(port->read_urb, GFP_NOIO); + if (r < 0) + c++; + } + + if (port->write_urb) { + r = usb_serial_generic_write_start(port); + if (r < 0) + c++; + } + } + + return c ? -EIO : 0; +} +EXPORT_SYMBOL_GPL(usb_serial_generic_resume); + void usb_serial_generic_disconnect(struct usb_serial *serial) { int i; diff --git a/drivers/usb/serial/usb-serial.c b/drivers/usb/serial/usb-serial.c index 45975b4984e..ff75a3589e7 100644 --- a/drivers/usb/serial/usb-serial.c +++ b/drivers/usb/serial/usb-serial.c @@ -35,6 +35,7 @@ #include #include #include +#include #include "pl2303.h" /* @@ -625,6 +626,8 @@ static void port_release(struct device *dev) usb_free_urb(port->write_urb); usb_free_urb(port->interrupt_in_urb); usb_free_urb(port->interrupt_out_urb); + if (!IS_ERR(port->write_fifo) && port->write_fifo) + kfifo_free(port->write_fifo); kfree(port->bulk_in_buffer); kfree(port->bulk_out_buffer); kfree(port->interrupt_in_buffer); @@ -964,6 +967,10 @@ int usb_serial_probe(struct usb_interface *interface, dev_err(&interface->dev, "No free urbs available\n"); goto probe_error; } + port->write_fifo = kfifo_alloc(PAGE_SIZE, GFP_KERNEL, + &port->lock); + if (IS_ERR(port->write_fifo)) + goto probe_error; buffer_size = le16_to_cpu(endpoint->wMaxPacketSize); port->bulk_out_size = buffer_size; port->bulk_out_endpointAddress = endpoint->bEndpointAddress; diff --git a/include/linux/usb/serial.h b/include/linux/usb/serial.h index 7b85e327af9..c17eb64d721 100644 --- a/include/linux/usb/serial.h +++ b/include/linux/usb/serial.h @@ -59,6 +59,7 @@ enum port_dev_state { * @bulk_out_buffer: pointer to the bulk out buffer for this port. * @bulk_out_size: the size of the bulk_out_buffer, in bytes. * @write_urb: pointer to the bulk out struct urb for this port. + * @write_fifo: kfifo used to buffer outgoing data * @write_urb_busy: port`s writing status * @bulk_out_endpointAddress: endpoint address for the bulk out pipe for this * port. @@ -96,6 +97,7 @@ struct usb_serial_port { unsigned char *bulk_out_buffer; int bulk_out_size; struct urb *write_urb; + struct kfifo *write_fifo; int write_urb_busy; __u8 bulk_out_endpointAddress; -- cgit v1.2.3-70-g09d2 From df6c516900d48df3581b23d37d6516a22ec4f2ca Mon Sep 17 00:00:00 2001 From: Jason Wessel Date: Thu, 20 Aug 2009 15:39:48 -0500 Subject: USB: ehci,dbgp,early_printk: split ehci debug driver from early_printk.c Move the dbgp early printk driver in advance of refactoring and adding new code, so the changes to this code are tracked separately from the move of the code. The drivers/usb/early directory will be the location of the current and future early usb code for driving usb devices prior initializing the standard interrupt driven USB drivers. Signed-off-by: Jason Wessel Cc: Ingo Molnar Cc: Andrew Morton Cc: Yinghai Lu Cc: "Eric W. Biederman" Signed-off-by: Greg Kroah-Hartman --- arch/x86/kernel/early_printk.c | 715 ---------------------------------------- drivers/usb/Makefile | 1 + drivers/usb/early/Makefile | 5 + drivers/usb/early/ehci-dbgp.c | 723 +++++++++++++++++++++++++++++++++++++++++ include/linux/usb/ehci_def.h | 6 + 5 files changed, 735 insertions(+), 715 deletions(-) create mode 100644 drivers/usb/early/Makefile create mode 100644 drivers/usb/early/ehci-dbgp.c (limited to 'include') diff --git a/arch/x86/kernel/early_printk.c b/arch/x86/kernel/early_printk.c index b11cab3c323..519a5e10be5 100644 --- a/arch/x86/kernel/early_printk.c +++ b/arch/x86/kernel/early_printk.c @@ -160,721 +160,6 @@ static struct console early_serial_console = { .index = -1, }; -#ifdef CONFIG_EARLY_PRINTK_DBGP - -static struct ehci_caps __iomem *ehci_caps; -static struct ehci_regs __iomem *ehci_regs; -static struct ehci_dbg_port __iomem *ehci_debug; -static unsigned int dbgp_endpoint_out; - -struct ehci_dev { - u32 bus; - u32 slot; - u32 func; -}; - -static struct ehci_dev ehci_dev; - -#define USB_DEBUG_DEVNUM 127 - -#define DBGP_DATA_TOGGLE 0x8800 - -static inline u32 dbgp_pid_update(u32 x, u32 tok) -{ - return ((x ^ DBGP_DATA_TOGGLE) & 0xffff00) | (tok & 0xff); -} - -static inline u32 dbgp_len_update(u32 x, u32 len) -{ - return (x & ~0x0f) | (len & 0x0f); -} - -/* - * USB Packet IDs (PIDs) - */ - -/* token */ -#define USB_PID_OUT 0xe1 -#define USB_PID_IN 0x69 -#define USB_PID_SOF 0xa5 -#define USB_PID_SETUP 0x2d -/* handshake */ -#define USB_PID_ACK 0xd2 -#define USB_PID_NAK 0x5a -#define USB_PID_STALL 0x1e -#define USB_PID_NYET 0x96 -/* data */ -#define USB_PID_DATA0 0xc3 -#define USB_PID_DATA1 0x4b -#define USB_PID_DATA2 0x87 -#define USB_PID_MDATA 0x0f -/* Special */ -#define USB_PID_PREAMBLE 0x3c -#define USB_PID_ERR 0x3c -#define USB_PID_SPLIT 0x78 -#define USB_PID_PING 0xb4 -#define USB_PID_UNDEF_0 0xf0 - -#define USB_PID_DATA_TOGGLE 0x88 -#define DBGP_CLAIM (DBGP_OWNER | DBGP_ENABLED | DBGP_INUSE) - -#define PCI_CAP_ID_EHCI_DEBUG 0xa - -#define HUB_ROOT_RESET_TIME 50 /* times are in msec */ -#define HUB_SHORT_RESET_TIME 10 -#define HUB_LONG_RESET_TIME 200 -#define HUB_RESET_TIMEOUT 500 - -#define DBGP_MAX_PACKET 8 - -static int dbgp_wait_until_complete(void) -{ - u32 ctrl; - int loop = 0x100000; - - do { - ctrl = readl(&ehci_debug->control); - /* Stop when the transaction is finished */ - if (ctrl & DBGP_DONE) - break; - } while (--loop > 0); - - if (!loop) - return -1; - - /* - * Now that we have observed the completed transaction, - * clear the done bit. - */ - writel(ctrl | DBGP_DONE, &ehci_debug->control); - return (ctrl & DBGP_ERROR) ? -DBGP_ERRCODE(ctrl) : DBGP_LEN(ctrl); -} - -static void __init dbgp_mdelay(int ms) -{ - int i; - - while (ms--) { - for (i = 0; i < 1000; i++) - outb(0x1, 0x80); - } -} - -static void dbgp_breath(void) -{ - /* Sleep to give the debug port a chance to breathe */ -} - -static int dbgp_wait_until_done(unsigned ctrl) -{ - u32 pids, lpid; - int ret; - int loop = 3; - -retry: - writel(ctrl | DBGP_GO, &ehci_debug->control); - ret = dbgp_wait_until_complete(); - pids = readl(&ehci_debug->pids); - lpid = DBGP_PID_GET(pids); - - if (ret < 0) - return ret; - - /* - * If the port is getting full or it has dropped data - * start pacing ourselves, not necessary but it's friendly. - */ - if ((lpid == USB_PID_NAK) || (lpid == USB_PID_NYET)) - dbgp_breath(); - - /* If I get a NACK reissue the transmission */ - if (lpid == USB_PID_NAK) { - if (--loop > 0) - goto retry; - } - - return ret; -} - -static void dbgp_set_data(const void *buf, int size) -{ - const unsigned char *bytes = buf; - u32 lo, hi; - int i; - - lo = hi = 0; - for (i = 0; i < 4 && i < size; i++) - lo |= bytes[i] << (8*i); - for (; i < 8 && i < size; i++) - hi |= bytes[i] << (8*(i - 4)); - writel(lo, &ehci_debug->data03); - writel(hi, &ehci_debug->data47); -} - -static void __init dbgp_get_data(void *buf, int size) -{ - unsigned char *bytes = buf; - u32 lo, hi; - int i; - - lo = readl(&ehci_debug->data03); - hi = readl(&ehci_debug->data47); - for (i = 0; i < 4 && i < size; i++) - bytes[i] = (lo >> (8*i)) & 0xff; - for (; i < 8 && i < size; i++) - bytes[i] = (hi >> (8*(i - 4))) & 0xff; -} - -static int dbgp_bulk_write(unsigned devnum, unsigned endpoint, - const char *bytes, int size) -{ - u32 pids, addr, ctrl; - int ret; - - if (size > DBGP_MAX_PACKET) - return -1; - - addr = DBGP_EPADDR(devnum, endpoint); - - pids = readl(&ehci_debug->pids); - pids = dbgp_pid_update(pids, USB_PID_OUT); - - ctrl = readl(&ehci_debug->control); - ctrl = dbgp_len_update(ctrl, size); - ctrl |= DBGP_OUT; - ctrl |= DBGP_GO; - - dbgp_set_data(bytes, size); - writel(addr, &ehci_debug->address); - writel(pids, &ehci_debug->pids); - - ret = dbgp_wait_until_done(ctrl); - if (ret < 0) - return ret; - - return ret; -} - -static int __init dbgp_bulk_read(unsigned devnum, unsigned endpoint, void *data, - int size) -{ - u32 pids, addr, ctrl; - int ret; - - if (size > DBGP_MAX_PACKET) - return -1; - - addr = DBGP_EPADDR(devnum, endpoint); - - pids = readl(&ehci_debug->pids); - pids = dbgp_pid_update(pids, USB_PID_IN); - - ctrl = readl(&ehci_debug->control); - ctrl = dbgp_len_update(ctrl, size); - ctrl &= ~DBGP_OUT; - ctrl |= DBGP_GO; - - writel(addr, &ehci_debug->address); - writel(pids, &ehci_debug->pids); - ret = dbgp_wait_until_done(ctrl); - if (ret < 0) - return ret; - - if (size > ret) - size = ret; - dbgp_get_data(data, size); - return ret; -} - -static int __init dbgp_control_msg(unsigned devnum, int requesttype, - int request, int value, int index, void *data, int size) -{ - u32 pids, addr, ctrl; - struct usb_ctrlrequest req; - int read; - int ret; - - read = (requesttype & USB_DIR_IN) != 0; - if (size > (read ? DBGP_MAX_PACKET:0)) - return -1; - - /* Compute the control message */ - req.bRequestType = requesttype; - req.bRequest = request; - req.wValue = cpu_to_le16(value); - req.wIndex = cpu_to_le16(index); - req.wLength = cpu_to_le16(size); - - pids = DBGP_PID_SET(USB_PID_DATA0, USB_PID_SETUP); - addr = DBGP_EPADDR(devnum, 0); - - ctrl = readl(&ehci_debug->control); - ctrl = dbgp_len_update(ctrl, sizeof(req)); - ctrl |= DBGP_OUT; - ctrl |= DBGP_GO; - - /* Send the setup message */ - dbgp_set_data(&req, sizeof(req)); - writel(addr, &ehci_debug->address); - writel(pids, &ehci_debug->pids); - ret = dbgp_wait_until_done(ctrl); - if (ret < 0) - return ret; - - /* Read the result */ - return dbgp_bulk_read(devnum, 0, data, size); -} - - -/* Find a PCI capability */ -static u32 __init find_cap(u32 num, u32 slot, u32 func, int cap) -{ - u8 pos; - int bytes; - - if (!(read_pci_config_16(num, slot, func, PCI_STATUS) & - PCI_STATUS_CAP_LIST)) - return 0; - - pos = read_pci_config_byte(num, slot, func, PCI_CAPABILITY_LIST); - for (bytes = 0; bytes < 48 && pos >= 0x40; bytes++) { - u8 id; - - pos &= ~3; - id = read_pci_config_byte(num, slot, func, pos+PCI_CAP_LIST_ID); - if (id == 0xff) - break; - if (id == cap) - return pos; - - pos = read_pci_config_byte(num, slot, func, - pos+PCI_CAP_LIST_NEXT); - } - return 0; -} - -static u32 __init __find_dbgp(u32 bus, u32 slot, u32 func) -{ - u32 class; - - class = read_pci_config(bus, slot, func, PCI_CLASS_REVISION); - if ((class >> 8) != PCI_CLASS_SERIAL_USB_EHCI) - return 0; - - return find_cap(bus, slot, func, PCI_CAP_ID_EHCI_DEBUG); -} - -static u32 __init find_dbgp(int ehci_num, u32 *rbus, u32 *rslot, u32 *rfunc) -{ - u32 bus, slot, func; - - for (bus = 0; bus < 256; bus++) { - for (slot = 0; slot < 32; slot++) { - for (func = 0; func < 8; func++) { - unsigned cap; - - cap = __find_dbgp(bus, slot, func); - - if (!cap) - continue; - if (ehci_num-- != 0) - continue; - *rbus = bus; - *rslot = slot; - *rfunc = func; - return cap; - } - } - } - return 0; -} - -static int __init ehci_reset_port(int port) -{ - u32 portsc; - u32 delay_time, delay; - int loop; - - /* Reset the usb debug port */ - portsc = readl(&ehci_regs->port_status[port - 1]); - portsc &= ~PORT_PE; - portsc |= PORT_RESET; - writel(portsc, &ehci_regs->port_status[port - 1]); - - delay = HUB_ROOT_RESET_TIME; - for (delay_time = 0; delay_time < HUB_RESET_TIMEOUT; - delay_time += delay) { - dbgp_mdelay(delay); - - portsc = readl(&ehci_regs->port_status[port - 1]); - if (portsc & PORT_RESET) { - /* force reset to complete */ - loop = 2; - writel(portsc & ~(PORT_RWC_BITS | PORT_RESET), - &ehci_regs->port_status[port - 1]); - do { - portsc = readl(&ehci_regs->port_status[port-1]); - } while ((portsc & PORT_RESET) && (--loop > 0)); - } - - /* Device went away? */ - if (!(portsc & PORT_CONNECT)) - return -ENOTCONN; - - /* bomb out completely if something weird happend */ - if ((portsc & PORT_CSC)) - return -EINVAL; - - /* If we've finished resetting, then break out of the loop */ - if (!(portsc & PORT_RESET) && (portsc & PORT_PE)) - return 0; - } - return -EBUSY; -} - -static int __init ehci_wait_for_port(int port) -{ - u32 status; - int ret, reps; - - for (reps = 0; reps < 3; reps++) { - dbgp_mdelay(100); - status = readl(&ehci_regs->status); - if (status & STS_PCD) { - ret = ehci_reset_port(port); - if (ret == 0) - return 0; - } - } - return -ENOTCONN; -} - -#ifdef DBGP_DEBUG -# define dbgp_printk early_printk -#else -static inline void dbgp_printk(const char *fmt, ...) { } -#endif - -typedef void (*set_debug_port_t)(int port); - -static void __init default_set_debug_port(int port) -{ -} - -static set_debug_port_t __initdata set_debug_port = default_set_debug_port; - -static void __init nvidia_set_debug_port(int port) -{ - u32 dword; - dword = read_pci_config(ehci_dev.bus, ehci_dev.slot, ehci_dev.func, - 0x74); - dword &= ~(0x0f<<12); - dword |= ((port & 0x0f)<<12); - write_pci_config(ehci_dev.bus, ehci_dev.slot, ehci_dev.func, 0x74, - dword); - dbgp_printk("set debug port to %d\n", port); -} - -static void __init detect_set_debug_port(void) -{ - u32 vendorid; - - vendorid = read_pci_config(ehci_dev.bus, ehci_dev.slot, ehci_dev.func, - 0x00); - - if ((vendorid & 0xffff) == 0x10de) { - dbgp_printk("using nvidia set_debug_port\n"); - set_debug_port = nvidia_set_debug_port; - } -} - -static int __init ehci_setup(void) -{ - struct usb_debug_descriptor dbgp_desc; - u32 cmd, ctrl, status, portsc, hcs_params; - u32 debug_port, new_debug_port = 0, n_ports; - u32 devnum; - int ret, i; - int loop; - int port_map_tried; - int playtimes = 3; - -try_next_time: - port_map_tried = 0; - -try_next_port: - - hcs_params = readl(&ehci_caps->hcs_params); - debug_port = HCS_DEBUG_PORT(hcs_params); - n_ports = HCS_N_PORTS(hcs_params); - - dbgp_printk("debug_port: %d\n", debug_port); - dbgp_printk("n_ports: %d\n", n_ports); - - for (i = 1; i <= n_ports; i++) { - portsc = readl(&ehci_regs->port_status[i-1]); - dbgp_printk("portstatus%d: %08x\n", i, portsc); - } - - if (port_map_tried && (new_debug_port != debug_port)) { - if (--playtimes) { - set_debug_port(new_debug_port); - goto try_next_time; - } - return -1; - } - - loop = 100000; - /* Reset the EHCI controller */ - cmd = readl(&ehci_regs->command); - cmd |= CMD_RESET; - writel(cmd, &ehci_regs->command); - do { - cmd = readl(&ehci_regs->command); - } while ((cmd & CMD_RESET) && (--loop > 0)); - - if (!loop) { - dbgp_printk("can not reset ehci\n"); - return -1; - } - dbgp_printk("ehci reset done\n"); - - /* Claim ownership, but do not enable yet */ - ctrl = readl(&ehci_debug->control); - ctrl |= DBGP_OWNER; - ctrl &= ~(DBGP_ENABLED | DBGP_INUSE); - writel(ctrl, &ehci_debug->control); - - /* Start the ehci running */ - cmd = readl(&ehci_regs->command); - cmd &= ~(CMD_LRESET | CMD_IAAD | CMD_PSE | CMD_ASE | CMD_RESET); - cmd |= CMD_RUN; - writel(cmd, &ehci_regs->command); - - /* Ensure everything is routed to the EHCI */ - writel(FLAG_CF, &ehci_regs->configured_flag); - - /* Wait until the controller is no longer halted */ - loop = 10; - do { - status = readl(&ehci_regs->status); - } while ((status & STS_HALT) && (--loop > 0)); - - if (!loop) { - dbgp_printk("ehci can be started\n"); - return -1; - } - dbgp_printk("ehci started\n"); - - /* Wait for a device to show up in the debug port */ - ret = ehci_wait_for_port(debug_port); - if (ret < 0) { - dbgp_printk("No device found in debug port\n"); - goto next_debug_port; - } - dbgp_printk("ehci wait for port done\n"); - - /* Enable the debug port */ - ctrl = readl(&ehci_debug->control); - ctrl |= DBGP_CLAIM; - writel(ctrl, &ehci_debug->control); - ctrl = readl(&ehci_debug->control); - if ((ctrl & DBGP_CLAIM) != DBGP_CLAIM) { - dbgp_printk("No device in debug port\n"); - writel(ctrl & ~DBGP_CLAIM, &ehci_debug->control); - goto err; - } - dbgp_printk("debug ported enabled\n"); - - /* Completely transfer the debug device to the debug controller */ - portsc = readl(&ehci_regs->port_status[debug_port - 1]); - portsc &= ~PORT_PE; - writel(portsc, &ehci_regs->port_status[debug_port - 1]); - - dbgp_mdelay(100); - - /* Find the debug device and make it device number 127 */ - for (devnum = 0; devnum <= 127; devnum++) { - ret = dbgp_control_msg(devnum, - USB_DIR_IN | USB_TYPE_STANDARD | USB_RECIP_DEVICE, - USB_REQ_GET_DESCRIPTOR, (USB_DT_DEBUG << 8), 0, - &dbgp_desc, sizeof(dbgp_desc)); - if (ret > 0) - break; - } - if (devnum > 127) { - dbgp_printk("Could not find attached debug device\n"); - goto err; - } - if (ret < 0) { - dbgp_printk("Attached device is not a debug device\n"); - goto err; - } - dbgp_endpoint_out = dbgp_desc.bDebugOutEndpoint; - - /* Move the device to 127 if it isn't already there */ - if (devnum != USB_DEBUG_DEVNUM) { - ret = dbgp_control_msg(devnum, - USB_DIR_OUT | USB_TYPE_STANDARD | USB_RECIP_DEVICE, - USB_REQ_SET_ADDRESS, USB_DEBUG_DEVNUM, 0, NULL, 0); - if (ret < 0) { - dbgp_printk("Could not move attached device to %d\n", - USB_DEBUG_DEVNUM); - goto err; - } - devnum = USB_DEBUG_DEVNUM; - dbgp_printk("debug device renamed to 127\n"); - } - - /* Enable the debug interface */ - ret = dbgp_control_msg(USB_DEBUG_DEVNUM, - USB_DIR_OUT | USB_TYPE_STANDARD | USB_RECIP_DEVICE, - USB_REQ_SET_FEATURE, USB_DEVICE_DEBUG_MODE, 0, NULL, 0); - if (ret < 0) { - dbgp_printk(" Could not enable the debug device\n"); - goto err; - } - dbgp_printk("debug interface enabled\n"); - - /* Perform a small write to get the even/odd data state in sync - */ - ret = dbgp_bulk_write(USB_DEBUG_DEVNUM, dbgp_endpoint_out, " ", 1); - if (ret < 0) { - dbgp_printk("dbgp_bulk_write failed: %d\n", ret); - goto err; - } - dbgp_printk("small write doned\n"); - - return 0; -err: - /* Things didn't work so remove my claim */ - ctrl = readl(&ehci_debug->control); - ctrl &= ~(DBGP_CLAIM | DBGP_OUT); - writel(ctrl, &ehci_debug->control); - return -1; - -next_debug_port: - port_map_tried |= (1<<(debug_port - 1)); - new_debug_port = ((debug_port-1+1)%n_ports) + 1; - if (port_map_tried != ((1<> 29) & 0x7; - bar = (bar * 4) + 0xc; - offset = (debug_port >> 16) & 0xfff; - dbgp_printk("bar: %02x offset: %03x\n", bar, offset); - if (bar != PCI_BASE_ADDRESS_0) { - dbgp_printk("only debug ports on bar 1 handled.\n"); - - return -1; - } - - bar_val = read_pci_config(bus, slot, func, PCI_BASE_ADDRESS_0); - dbgp_printk("bar_val: %02x offset: %03x\n", bar_val, offset); - if (bar_val & ~PCI_BASE_ADDRESS_MEM_MASK) { - dbgp_printk("only simple 32bit mmio bars supported\n"); - - return -1; - } - - /* double check if the mem space is enabled */ - byte = read_pci_config_byte(bus, slot, func, 0x04); - if (!(byte & 0x2)) { - byte |= 0x02; - write_pci_config_byte(bus, slot, func, 0x04, byte); - dbgp_printk("mmio for ehci enabled\n"); - } - - /* - * FIXME I don't have the bar size so just guess PAGE_SIZE is more - * than enough. 1K is the biggest I have seen. - */ - set_fixmap_nocache(FIX_DBGP_BASE, bar_val & PAGE_MASK); - ehci_bar = (void __iomem *)__fix_to_virt(FIX_DBGP_BASE); - ehci_bar += bar_val & ~PAGE_MASK; - dbgp_printk("ehci_bar: %p\n", ehci_bar); - - ehci_caps = ehci_bar; - ehci_regs = ehci_bar + HC_LENGTH(readl(&ehci_caps->hc_capbase)); - ehci_debug = ehci_bar + offset; - ehci_dev.bus = bus; - ehci_dev.slot = slot; - ehci_dev.func = func; - - detect_set_debug_port(); - - ret = ehci_setup(); - if (ret < 0) { - dbgp_printk("ehci_setup failed\n"); - ehci_debug = NULL; - - return -1; - } - - return 0; -} - -static void early_dbgp_write(struct console *con, const char *str, u32 n) -{ - int chunk, ret; - - if (!ehci_debug) - return; - while (n > 0) { - chunk = n; - if (chunk > DBGP_MAX_PACKET) - chunk = DBGP_MAX_PACKET; - ret = dbgp_bulk_write(USB_DEBUG_DEVNUM, - dbgp_endpoint_out, str, chunk); - str += chunk; - n -= chunk; - } -} - -static struct console early_dbgp_console = { - .name = "earlydbg", - .write = early_dbgp_write, - .flags = CON_PRINTBUFFER, - .index = -1, -}; -#endif - /* Direct interface for emergencies */ static struct console *early_console = &early_vga_console; static int __initdata early_console_initialized; diff --git a/drivers/usb/Makefile b/drivers/usb/Makefile index dbe4fb694ad..be3c9b80bc9 100644 --- a/drivers/usb/Makefile +++ b/drivers/usb/Makefile @@ -40,6 +40,7 @@ obj-$(CONFIG_USB_MICROTEK) += image/ obj-$(CONFIG_USB_SERIAL) += serial/ obj-$(CONFIG_USB) += misc/ +obj-y += early/ obj-$(CONFIG_USB_ATM) += atm/ obj-$(CONFIG_USB_SPEEDTOUCH) += atm/ diff --git a/drivers/usb/early/Makefile b/drivers/usb/early/Makefile new file mode 100644 index 00000000000..dfedee8c45b --- /dev/null +++ b/drivers/usb/early/Makefile @@ -0,0 +1,5 @@ +# +# Makefile for early USB devices +# + +obj-$(CONFIG_EARLY_PRINTK_DBGP) += ehci-dbgp.o diff --git a/drivers/usb/early/ehci-dbgp.c b/drivers/usb/early/ehci-dbgp.c new file mode 100644 index 00000000000..821b7b21c29 --- /dev/null +++ b/drivers/usb/early/ehci-dbgp.c @@ -0,0 +1,723 @@ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +static struct ehci_caps __iomem *ehci_caps; +static struct ehci_regs __iomem *ehci_regs; +static struct ehci_dbg_port __iomem *ehci_debug; +static unsigned int dbgp_endpoint_out; + +struct ehci_dev { + u32 bus; + u32 slot; + u32 func; +}; + +static struct ehci_dev ehci_dev; + +#define USB_DEBUG_DEVNUM 127 + +#define DBGP_DATA_TOGGLE 0x8800 + +static inline u32 dbgp_pid_update(u32 x, u32 tok) +{ + return ((x ^ DBGP_DATA_TOGGLE) & 0xffff00) | (tok & 0xff); +} + +static inline u32 dbgp_len_update(u32 x, u32 len) +{ + return (x & ~0x0f) | (len & 0x0f); +} + +/* + * USB Packet IDs (PIDs) + */ + +/* token */ +#define USB_PID_OUT 0xe1 +#define USB_PID_IN 0x69 +#define USB_PID_SOF 0xa5 +#define USB_PID_SETUP 0x2d +/* handshake */ +#define USB_PID_ACK 0xd2 +#define USB_PID_NAK 0x5a +#define USB_PID_STALL 0x1e +#define USB_PID_NYET 0x96 +/* data */ +#define USB_PID_DATA0 0xc3 +#define USB_PID_DATA1 0x4b +#define USB_PID_DATA2 0x87 +#define USB_PID_MDATA 0x0f +/* Special */ +#define USB_PID_PREAMBLE 0x3c +#define USB_PID_ERR 0x3c +#define USB_PID_SPLIT 0x78 +#define USB_PID_PING 0xb4 +#define USB_PID_UNDEF_0 0xf0 + +#define USB_PID_DATA_TOGGLE 0x88 +#define DBGP_CLAIM (DBGP_OWNER | DBGP_ENABLED | DBGP_INUSE) + +#define PCI_CAP_ID_EHCI_DEBUG 0xa + +#define HUB_ROOT_RESET_TIME 50 /* times are in msec */ +#define HUB_SHORT_RESET_TIME 10 +#define HUB_LONG_RESET_TIME 200 +#define HUB_RESET_TIMEOUT 500 + +#define DBGP_MAX_PACKET 8 + +static int dbgp_wait_until_complete(void) +{ + u32 ctrl; + int loop = 0x100000; + + do { + ctrl = readl(&ehci_debug->control); + /* Stop when the transaction is finished */ + if (ctrl & DBGP_DONE) + break; + } while (--loop > 0); + + if (!loop) + return -1; + + /* + * Now that we have observed the completed transaction, + * clear the done bit. + */ + writel(ctrl | DBGP_DONE, &ehci_debug->control); + return (ctrl & DBGP_ERROR) ? -DBGP_ERRCODE(ctrl) : DBGP_LEN(ctrl); +} + +static void __init dbgp_mdelay(int ms) +{ + int i; + + while (ms--) { + for (i = 0; i < 1000; i++) + outb(0x1, 0x80); + } +} + +static void dbgp_breath(void) +{ + /* Sleep to give the debug port a chance to breathe */ +} + +static int dbgp_wait_until_done(unsigned ctrl) +{ + u32 pids, lpid; + int ret; + int loop = 3; + +retry: + writel(ctrl | DBGP_GO, &ehci_debug->control); + ret = dbgp_wait_until_complete(); + pids = readl(&ehci_debug->pids); + lpid = DBGP_PID_GET(pids); + + if (ret < 0) + return ret; + + /* + * If the port is getting full or it has dropped data + * start pacing ourselves, not necessary but it's friendly. + */ + if ((lpid == USB_PID_NAK) || (lpid == USB_PID_NYET)) + dbgp_breath(); + + /* If I get a NACK reissue the transmission */ + if (lpid == USB_PID_NAK) { + if (--loop > 0) + goto retry; + } + + return ret; +} + +static void dbgp_set_data(const void *buf, int size) +{ + const unsigned char *bytes = buf; + u32 lo, hi; + int i; + + lo = hi = 0; + for (i = 0; i < 4 && i < size; i++) + lo |= bytes[i] << (8*i); + for (; i < 8 && i < size; i++) + hi |= bytes[i] << (8*(i - 4)); + writel(lo, &ehci_debug->data03); + writel(hi, &ehci_debug->data47); +} + +static void __init dbgp_get_data(void *buf, int size) +{ + unsigned char *bytes = buf; + u32 lo, hi; + int i; + + lo = readl(&ehci_debug->data03); + hi = readl(&ehci_debug->data47); + for (i = 0; i < 4 && i < size; i++) + bytes[i] = (lo >> (8*i)) & 0xff; + for (; i < 8 && i < size; i++) + bytes[i] = (hi >> (8*(i - 4))) & 0xff; +} + +static int dbgp_bulk_write(unsigned devnum, unsigned endpoint, + const char *bytes, int size) +{ + u32 pids, addr, ctrl; + int ret; + + if (size > DBGP_MAX_PACKET) + return -1; + + addr = DBGP_EPADDR(devnum, endpoint); + + pids = readl(&ehci_debug->pids); + pids = dbgp_pid_update(pids, USB_PID_OUT); + + ctrl = readl(&ehci_debug->control); + ctrl = dbgp_len_update(ctrl, size); + ctrl |= DBGP_OUT; + ctrl |= DBGP_GO; + + dbgp_set_data(bytes, size); + writel(addr, &ehci_debug->address); + writel(pids, &ehci_debug->pids); + + ret = dbgp_wait_until_done(ctrl); + if (ret < 0) + return ret; + + return ret; +} + +static int __init dbgp_bulk_read(unsigned devnum, unsigned endpoint, void *data, + int size) +{ + u32 pids, addr, ctrl; + int ret; + + if (size > DBGP_MAX_PACKET) + return -1; + + addr = DBGP_EPADDR(devnum, endpoint); + + pids = readl(&ehci_debug->pids); + pids = dbgp_pid_update(pids, USB_PID_IN); + + ctrl = readl(&ehci_debug->control); + ctrl = dbgp_len_update(ctrl, size); + ctrl &= ~DBGP_OUT; + ctrl |= DBGP_GO; + + writel(addr, &ehci_debug->address); + writel(pids, &ehci_debug->pids); + ret = dbgp_wait_until_done(ctrl); + if (ret < 0) + return ret; + + if (size > ret) + size = ret; + dbgp_get_data(data, size); + return ret; +} + +static int __init dbgp_control_msg(unsigned devnum, int requesttype, + int request, int value, int index, void *data, int size) +{ + u32 pids, addr, ctrl; + struct usb_ctrlrequest req; + int read; + int ret; + + read = (requesttype & USB_DIR_IN) != 0; + if (size > (read ? DBGP_MAX_PACKET:0)) + return -1; + + /* Compute the control message */ + req.bRequestType = requesttype; + req.bRequest = request; + req.wValue = cpu_to_le16(value); + req.wIndex = cpu_to_le16(index); + req.wLength = cpu_to_le16(size); + + pids = DBGP_PID_SET(USB_PID_DATA0, USB_PID_SETUP); + addr = DBGP_EPADDR(devnum, 0); + + ctrl = readl(&ehci_debug->control); + ctrl = dbgp_len_update(ctrl, sizeof(req)); + ctrl |= DBGP_OUT; + ctrl |= DBGP_GO; + + /* Send the setup message */ + dbgp_set_data(&req, sizeof(req)); + writel(addr, &ehci_debug->address); + writel(pids, &ehci_debug->pids); + ret = dbgp_wait_until_done(ctrl); + if (ret < 0) + return ret; + + /* Read the result */ + return dbgp_bulk_read(devnum, 0, data, size); +} + + +/* Find a PCI capability */ +static u32 __init find_cap(u32 num, u32 slot, u32 func, int cap) +{ + u8 pos; + int bytes; + + if (!(read_pci_config_16(num, slot, func, PCI_STATUS) & + PCI_STATUS_CAP_LIST)) + return 0; + + pos = read_pci_config_byte(num, slot, func, PCI_CAPABILITY_LIST); + for (bytes = 0; bytes < 48 && pos >= 0x40; bytes++) { + u8 id; + + pos &= ~3; + id = read_pci_config_byte(num, slot, func, pos+PCI_CAP_LIST_ID); + if (id == 0xff) + break; + if (id == cap) + return pos; + + pos = read_pci_config_byte(num, slot, func, + pos+PCI_CAP_LIST_NEXT); + } + return 0; +} + +static u32 __init __find_dbgp(u32 bus, u32 slot, u32 func) +{ + u32 class; + + class = read_pci_config(bus, slot, func, PCI_CLASS_REVISION); + if ((class >> 8) != PCI_CLASS_SERIAL_USB_EHCI) + return 0; + + return find_cap(bus, slot, func, PCI_CAP_ID_EHCI_DEBUG); +} + +static u32 __init find_dbgp(int ehci_num, u32 *rbus, u32 *rslot, u32 *rfunc) +{ + u32 bus, slot, func; + + for (bus = 0; bus < 256; bus++) { + for (slot = 0; slot < 32; slot++) { + for (func = 0; func < 8; func++) { + unsigned cap; + + cap = __find_dbgp(bus, slot, func); + + if (!cap) + continue; + if (ehci_num-- != 0) + continue; + *rbus = bus; + *rslot = slot; + *rfunc = func; + return cap; + } + } + } + return 0; +} + +static int __init ehci_reset_port(int port) +{ + u32 portsc; + u32 delay_time, delay; + int loop; + + /* Reset the usb debug port */ + portsc = readl(&ehci_regs->port_status[port - 1]); + portsc &= ~PORT_PE; + portsc |= PORT_RESET; + writel(portsc, &ehci_regs->port_status[port - 1]); + + delay = HUB_ROOT_RESET_TIME; + for (delay_time = 0; delay_time < HUB_RESET_TIMEOUT; + delay_time += delay) { + dbgp_mdelay(delay); + + portsc = readl(&ehci_regs->port_status[port - 1]); + if (portsc & PORT_RESET) { + /* force reset to complete */ + loop = 2; + writel(portsc & ~(PORT_RWC_BITS | PORT_RESET), + &ehci_regs->port_status[port - 1]); + do { + portsc = readl(&ehci_regs->port_status[port-1]); + } while ((portsc & PORT_RESET) && (--loop > 0)); + } + + /* Device went away? */ + if (!(portsc & PORT_CONNECT)) + return -ENOTCONN; + + /* bomb out completely if something weird happend */ + if ((portsc & PORT_CSC)) + return -EINVAL; + + /* If we've finished resetting, then break out of the loop */ + if (!(portsc & PORT_RESET) && (portsc & PORT_PE)) + return 0; + } + return -EBUSY; +} + +static int __init ehci_wait_for_port(int port) +{ + u32 status; + int ret, reps; + + for (reps = 0; reps < 3; reps++) { + dbgp_mdelay(100); + status = readl(&ehci_regs->status); + if (status & STS_PCD) { + ret = ehci_reset_port(port); + if (ret == 0) + return 0; + } + } + return -ENOTCONN; +} + +#ifdef DBGP_DEBUG +# define dbgp_printk early_printk +#else +static inline void dbgp_printk(const char *fmt, ...) { } +#endif + +typedef void (*set_debug_port_t)(int port); + +static void __init default_set_debug_port(int port) +{ +} + +static set_debug_port_t __initdata set_debug_port = default_set_debug_port; + +static void __init nvidia_set_debug_port(int port) +{ + u32 dword; + dword = read_pci_config(ehci_dev.bus, ehci_dev.slot, ehci_dev.func, + 0x74); + dword &= ~(0x0f<<12); + dword |= ((port & 0x0f)<<12); + write_pci_config(ehci_dev.bus, ehci_dev.slot, ehci_dev.func, 0x74, + dword); + dbgp_printk("set debug port to %d\n", port); +} + +static void __init detect_set_debug_port(void) +{ + u32 vendorid; + + vendorid = read_pci_config(ehci_dev.bus, ehci_dev.slot, ehci_dev.func, + 0x00); + + if ((vendorid & 0xffff) == 0x10de) { + dbgp_printk("using nvidia set_debug_port\n"); + set_debug_port = nvidia_set_debug_port; + } +} + +static int __init ehci_setup(void) +{ + struct usb_debug_descriptor dbgp_desc; + u32 cmd, ctrl, status, portsc, hcs_params; + u32 debug_port, new_debug_port = 0, n_ports; + u32 devnum; + int ret, i; + int loop; + int port_map_tried; + int playtimes = 3; + +try_next_time: + port_map_tried = 0; + +try_next_port: + + hcs_params = readl(&ehci_caps->hcs_params); + debug_port = HCS_DEBUG_PORT(hcs_params); + n_ports = HCS_N_PORTS(hcs_params); + + dbgp_printk("debug_port: %d\n", debug_port); + dbgp_printk("n_ports: %d\n", n_ports); + + for (i = 1; i <= n_ports; i++) { + portsc = readl(&ehci_regs->port_status[i-1]); + dbgp_printk("portstatus%d: %08x\n", i, portsc); + } + + if (port_map_tried && (new_debug_port != debug_port)) { + if (--playtimes) { + set_debug_port(new_debug_port); + goto try_next_time; + } + return -1; + } + + loop = 100000; + /* Reset the EHCI controller */ + cmd = readl(&ehci_regs->command); + cmd |= CMD_RESET; + writel(cmd, &ehci_regs->command); + do { + cmd = readl(&ehci_regs->command); + } while ((cmd & CMD_RESET) && (--loop > 0)); + + if (!loop) { + dbgp_printk("can not reset ehci\n"); + return -1; + } + dbgp_printk("ehci reset done\n"); + + /* Claim ownership, but do not enable yet */ + ctrl = readl(&ehci_debug->control); + ctrl |= DBGP_OWNER; + ctrl &= ~(DBGP_ENABLED | DBGP_INUSE); + writel(ctrl, &ehci_debug->control); + + /* Start the ehci running */ + cmd = readl(&ehci_regs->command); + cmd &= ~(CMD_LRESET | CMD_IAAD | CMD_PSE | CMD_ASE | CMD_RESET); + cmd |= CMD_RUN; + writel(cmd, &ehci_regs->command); + + /* Ensure everything is routed to the EHCI */ + writel(FLAG_CF, &ehci_regs->configured_flag); + + /* Wait until the controller is no longer halted */ + loop = 10; + do { + status = readl(&ehci_regs->status); + } while ((status & STS_HALT) && (--loop > 0)); + + if (!loop) { + dbgp_printk("ehci can be started\n"); + return -1; + } + dbgp_printk("ehci started\n"); + + /* Wait for a device to show up in the debug port */ + ret = ehci_wait_for_port(debug_port); + if (ret < 0) { + dbgp_printk("No device found in debug port\n"); + goto next_debug_port; + } + dbgp_printk("ehci wait for port done\n"); + + /* Enable the debug port */ + ctrl = readl(&ehci_debug->control); + ctrl |= DBGP_CLAIM; + writel(ctrl, &ehci_debug->control); + ctrl = readl(&ehci_debug->control); + if ((ctrl & DBGP_CLAIM) != DBGP_CLAIM) { + dbgp_printk("No device in debug port\n"); + writel(ctrl & ~DBGP_CLAIM, &ehci_debug->control); + goto err; + } + dbgp_printk("debug ported enabled\n"); + + /* Completely transfer the debug device to the debug controller */ + portsc = readl(&ehci_regs->port_status[debug_port - 1]); + portsc &= ~PORT_PE; + writel(portsc, &ehci_regs->port_status[debug_port - 1]); + + dbgp_mdelay(100); + + /* Find the debug device and make it device number 127 */ + for (devnum = 0; devnum <= 127; devnum++) { + ret = dbgp_control_msg(devnum, + USB_DIR_IN | USB_TYPE_STANDARD | USB_RECIP_DEVICE, + USB_REQ_GET_DESCRIPTOR, (USB_DT_DEBUG << 8), 0, + &dbgp_desc, sizeof(dbgp_desc)); + if (ret > 0) + break; + } + if (devnum > 127) { + dbgp_printk("Could not find attached debug device\n"); + goto err; + } + if (ret < 0) { + dbgp_printk("Attached device is not a debug device\n"); + goto err; + } + dbgp_endpoint_out = dbgp_desc.bDebugOutEndpoint; + + /* Move the device to 127 if it isn't already there */ + if (devnum != USB_DEBUG_DEVNUM) { + ret = dbgp_control_msg(devnum, + USB_DIR_OUT | USB_TYPE_STANDARD | USB_RECIP_DEVICE, + USB_REQ_SET_ADDRESS, USB_DEBUG_DEVNUM, 0, NULL, 0); + if (ret < 0) { + dbgp_printk("Could not move attached device to %d\n", + USB_DEBUG_DEVNUM); + goto err; + } + devnum = USB_DEBUG_DEVNUM; + dbgp_printk("debug device renamed to 127\n"); + } + + /* Enable the debug interface */ + ret = dbgp_control_msg(USB_DEBUG_DEVNUM, + USB_DIR_OUT | USB_TYPE_STANDARD | USB_RECIP_DEVICE, + USB_REQ_SET_FEATURE, USB_DEVICE_DEBUG_MODE, 0, NULL, 0); + if (ret < 0) { + dbgp_printk(" Could not enable the debug device\n"); + goto err; + } + dbgp_printk("debug interface enabled\n"); + + /* Perform a small write to get the even/odd data state in sync + */ + ret = dbgp_bulk_write(USB_DEBUG_DEVNUM, dbgp_endpoint_out, " ", 1); + if (ret < 0) { + dbgp_printk("dbgp_bulk_write failed: %d\n", ret); + goto err; + } + dbgp_printk("small write doned\n"); + + return 0; +err: + /* Things didn't work so remove my claim */ + ctrl = readl(&ehci_debug->control); + ctrl &= ~(DBGP_CLAIM | DBGP_OUT); + writel(ctrl, &ehci_debug->control); + return -1; + +next_debug_port: + port_map_tried |= (1<<(debug_port - 1)); + new_debug_port = ((debug_port-1+1)%n_ports) + 1; + if (port_map_tried != ((1<> 29) & 0x7; + bar = (bar * 4) + 0xc; + offset = (debug_port >> 16) & 0xfff; + dbgp_printk("bar: %02x offset: %03x\n", bar, offset); + if (bar != PCI_BASE_ADDRESS_0) { + dbgp_printk("only debug ports on bar 1 handled.\n"); + + return -1; + } + + bar_val = read_pci_config(bus, slot, func, PCI_BASE_ADDRESS_0); + dbgp_printk("bar_val: %02x offset: %03x\n", bar_val, offset); + if (bar_val & ~PCI_BASE_ADDRESS_MEM_MASK) { + dbgp_printk("only simple 32bit mmio bars supported\n"); + + return -1; + } + + /* double check if the mem space is enabled */ + byte = read_pci_config_byte(bus, slot, func, 0x04); + if (!(byte & 0x2)) { + byte |= 0x02; + write_pci_config_byte(bus, slot, func, 0x04, byte); + dbgp_printk("mmio for ehci enabled\n"); + } + + /* + * FIXME I don't have the bar size so just guess PAGE_SIZE is more + * than enough. 1K is the biggest I have seen. + */ + set_fixmap_nocache(FIX_DBGP_BASE, bar_val & PAGE_MASK); + ehci_bar = (void __iomem *)__fix_to_virt(FIX_DBGP_BASE); + ehci_bar += bar_val & ~PAGE_MASK; + dbgp_printk("ehci_bar: %p\n", ehci_bar); + + ehci_caps = ehci_bar; + ehci_regs = ehci_bar + HC_LENGTH(readl(&ehci_caps->hc_capbase)); + ehci_debug = ehci_bar + offset; + ehci_dev.bus = bus; + ehci_dev.slot = slot; + ehci_dev.func = func; + + detect_set_debug_port(); + + ret = ehci_setup(); + if (ret < 0) { + dbgp_printk("ehci_setup failed\n"); + ehci_debug = NULL; + + return -1; + } + + return 0; +} + +static void early_dbgp_write(struct console *con, const char *str, u32 n) +{ + int chunk, ret; + + if (!ehci_debug) + return; + while (n > 0) { + chunk = n; + if (chunk > DBGP_MAX_PACKET) + chunk = DBGP_MAX_PACKET; + ret = dbgp_bulk_write(USB_DEBUG_DEVNUM, + dbgp_endpoint_out, str, chunk); + str += chunk; + n -= chunk; + } +} + +struct console early_dbgp_console = { + .name = "earlydbg", + .write = early_dbgp_write, + .flags = CON_PRINTBUFFER, + .index = -1, +}; + diff --git a/include/linux/usb/ehci_def.h b/include/linux/usb/ehci_def.h index 4c4b701ff26..1deac2a7b12 100644 --- a/include/linux/usb/ehci_def.h +++ b/include/linux/usb/ehci_def.h @@ -170,4 +170,10 @@ struct ehci_dbg_port { #define DBGP_EPADDR(dev, ep) (((dev)<<8)|(ep)) } __attribute__ ((packed)); +#ifdef CONFIG_EARLY_PRINTK_DBGP +#include +extern int __init early_dbgp_init(char *s); +extern struct console early_dbgp_console; +#endif /* CONFIG_EARLY_PRINTK_DBGP */ + #endif /* __LINUX_USB_EHCI_DEF_H */ -- cgit v1.2.3-70-g09d2 From 917778267fbe67703ab7d5c6f0b7a05d4c3df485 Mon Sep 17 00:00:00 2001 From: Jason Wessel Date: Thu, 20 Aug 2009 15:39:53 -0500 Subject: USB: ehci-dbgp: stability improvements and external re-init This patch implements several changes: 1) Improve the capability to debug the dbgp driver The dbgp_ehci_status() was added in a number of places to report the critical ehci registers to diagnose the cause of a failure of the ehci-dbgp driver. 2) Capability to survive the host controller initialization The dbgp_external_startup(), dbgp_not_safe, and dbgp_phys_port were added so as to allow the ehci-dbgp to re-initialize after the ehci host controller is reset by the standard host controller driver. This same routine is common for the early startup or re-initialization. This resulted in the need to move some of the initialization code out of the __init section because the ehci driver has the possibility to be loaded later on as a kernel module. 3) Stability improvements for device initialization The device enumeration from 0 to 127 has the possibility to fail the first time after a warm reset on some older EHCI debug controllers. The enumeration will be tried up to 3 times to account for this failure case. The dbg_wait_until_complete() was changed to wait up to 250 ms before failing which only comes into play during device initialization. The maximum delay will never get hit during the course of normal operation of the driver, unless the device got unplugged or there was a ehci controller failure, in which case the dbgp device driver will shut itself down. Signed-off-by: Jason Wessel Cc: Ingo Molnar Cc: Andrew Morton Cc: dbrownell@users.sourceforge.net Cc: Yinghai Lu Cc: "Eric W. Biederman" Signed-off-by: Greg Kroah-Hartman --- drivers/usb/early/ehci-dbgp.c | 442 ++++++++++++++++++++++++++++-------------- include/linux/usb/ehci_def.h | 5 + 2 files changed, 299 insertions(+), 148 deletions(-) (limited to 'include') diff --git a/drivers/usb/early/ehci-dbgp.c b/drivers/usb/early/ehci-dbgp.c index 6198ebded3a..06e05ea1787 100644 --- a/drivers/usb/early/ehci-dbgp.c +++ b/drivers/usb/early/ehci-dbgp.c @@ -1,5 +1,19 @@ +/* + * Standalone EHCI usb debug driver + * + * Originally written by: + * Eric W. Biederman" and + * Yinghai Lu + * + * Changes for early/late printk and HW errata: + * Jason Wessel + * Copyright (C) 2009 Wind River Systems, Inc. + * + */ + #include #include +#include #include #include #include @@ -9,15 +23,37 @@ #include #include -#ifdef DBGP_DEBUG -# define dbgp_printk printk -#else -static inline void dbgp_printk(const char *fmt, ...) { } -#endif +/* The code here is intended to talk directly to the EHCI debug port + * and does not require that you have any kind of USB host controller + * drivers or USB device drivers compiled into the kernel. + * + * If you make a change to anything in here, the following test cases + * need to pass where a USB debug device works in the following + * configurations. + * + * 1. boot args: earlyprintk=dbgp + * o kernel compiled with # CONFIG_USB_EHCI_HCD is not set + * o kernel compiled with CONFIG_USB_EHCI_HCD=y + * 2. boot args: earlyprintk=dbgp,keep + * o kernel compiled with # CONFIG_USB_EHCI_HCD is not set + * o kernel compiled with CONFIG_USB_EHCI_HCD=y + * 3. boot args: earlyprintk=dbgp console=ttyUSB0 + * o kernel has CONFIG_USB_EHCI_HCD=y and + * CONFIG_USB_SERIAL_DEBUG=y + * 4. boot args: earlyprintk=vga,dbgp + * o kernel compiled with # CONFIG_USB_EHCI_HCD is not set + * o kernel compiled with CONFIG_USB_EHCI_HCD=y + * + * For the 4th configuration you can turn on or off the DBGP_DEBUG + * such that you can debug the dbgp device's driver code. + */ + +static int dbgp_phys_port = 1; static struct ehci_caps __iomem *ehci_caps; static struct ehci_regs __iomem *ehci_regs; static struct ehci_dbg_port __iomem *ehci_debug; +static int dbgp_not_safe; /* Cannot use debug device during ehci reset */ static unsigned int dbgp_endpoint_out; struct ehci_dev { @@ -32,6 +68,26 @@ static struct ehci_dev ehci_dev; #define DBGP_DATA_TOGGLE 0x8800 +#ifdef DBGP_DEBUG +#define dbgp_printk printk +static void dbgp_ehci_status(char *str) +{ + if (!ehci_debug) + return; + dbgp_printk("dbgp: %s\n", str); + dbgp_printk(" Debug control: %08x", readl(&ehci_debug->control)); + dbgp_printk(" ehci cmd : %08x", readl(&ehci_regs->command)); + dbgp_printk(" ehci conf flg: %08x\n", + readl(&ehci_regs->configured_flag)); + dbgp_printk(" ehci status : %08x", readl(&ehci_regs->status)); + dbgp_printk(" ehci portsc : %08x\n", + readl(&ehci_regs->port_status[dbgp_phys_port - 1])); +} +#else +static inline void dbgp_ehci_status(char *str) { } +static inline void dbgp_printk(const char *fmt, ...) { } +#endif + static inline u32 dbgp_pid_update(u32 x, u32 tok) { return ((x ^ DBGP_DATA_TOGGLE) & 0xffff00) | (tok & 0xff); @@ -79,21 +135,23 @@ static inline u32 dbgp_len_update(u32 x, u32 len) #define HUB_RESET_TIMEOUT 500 #define DBGP_MAX_PACKET 8 +#define DBGP_TIMEOUT (250 * 1000) static int dbgp_wait_until_complete(void) { u32 ctrl; - int loop = 0x100000; + int loop = DBGP_TIMEOUT; do { ctrl = readl(&ehci_debug->control); /* Stop when the transaction is finished */ if (ctrl & DBGP_DONE) break; + udelay(1); } while (--loop > 0); if (!loop) - return -1; + return -DBGP_TIMEOUT; /* * Now that we have observed the completed transaction, @@ -103,7 +161,7 @@ static int dbgp_wait_until_complete(void) return (ctrl & DBGP_ERROR) ? -DBGP_ERRCODE(ctrl) : DBGP_LEN(ctrl); } -static void __init dbgp_mdelay(int ms) +static inline void dbgp_mdelay(int ms) { int i; @@ -130,8 +188,17 @@ retry: pids = readl(&ehci_debug->pids); lpid = DBGP_PID_GET(pids); - if (ret < 0) + if (ret < 0) { + /* A -DBGP_TIMEOUT failure here means the device has + * failed, perhaps because it was unplugged, in which + * case we do not want to hang the system so the dbgp + * will be marked as unsafe to use. EHCI reset is the + * only way to recover if you unplug the dbgp device. + */ + if (ret == -DBGP_TIMEOUT && !dbgp_not_safe) + dbgp_not_safe = 1; return ret; + } /* * If the port is getting full or it has dropped data @@ -149,7 +216,7 @@ retry: return ret; } -static void dbgp_set_data(const void *buf, int size) +static inline void dbgp_set_data(const void *buf, int size) { const unsigned char *bytes = buf; u32 lo, hi; @@ -164,7 +231,7 @@ static void dbgp_set_data(const void *buf, int size) writel(hi, &ehci_debug->data47); } -static void __init dbgp_get_data(void *buf, int size) +static inline void dbgp_get_data(void *buf, int size) { unsigned char *bytes = buf; u32 lo, hi; @@ -208,7 +275,7 @@ static int dbgp_bulk_write(unsigned devnum, unsigned endpoint, return ret; } -static int __init dbgp_bulk_read(unsigned devnum, unsigned endpoint, void *data, +static int dbgp_bulk_read(unsigned devnum, unsigned endpoint, void *data, int size) { u32 pids, addr, ctrl; @@ -239,7 +306,7 @@ static int __init dbgp_bulk_read(unsigned devnum, unsigned endpoint, void *data, return ret; } -static int __init dbgp_control_msg(unsigned devnum, int requesttype, +static int dbgp_control_msg(unsigned devnum, int requesttype, int request, int value, int index, void *data, int size) { u32 pids, addr, ctrl; @@ -342,13 +409,179 @@ static u32 __init find_dbgp(int ehci_num, u32 *rbus, u32 *rslot, u32 *rfunc) return 0; } +static int dbgp_ehci_startup(void) +{ + u32 ctrl, cmd, status; + int loop; + + /* Claim ownership, but do not enable yet */ + ctrl = readl(&ehci_debug->control); + ctrl |= DBGP_OWNER; + ctrl &= ~(DBGP_ENABLED | DBGP_INUSE); + writel(ctrl, &ehci_debug->control); + udelay(1); + + dbgp_ehci_status("EHCI startup"); + /* Start the ehci running */ + cmd = readl(&ehci_regs->command); + cmd &= ~(CMD_LRESET | CMD_IAAD | CMD_PSE | CMD_ASE | CMD_RESET); + cmd |= CMD_RUN; + writel(cmd, &ehci_regs->command); + + /* Ensure everything is routed to the EHCI */ + writel(FLAG_CF, &ehci_regs->configured_flag); + + /* Wait until the controller is no longer halted */ + loop = 10; + do { + status = readl(&ehci_regs->status); + if (!(status & STS_HALT)) + break; + udelay(1); + } while (--loop > 0); + + if (!loop) { + dbgp_printk("ehci can not be started\n"); + return -ENODEV; + } + dbgp_printk("ehci started\n"); + return 0; +} + +static int dbgp_ehci_controller_reset(void) +{ + int loop = 250 * 1000; + u32 cmd; + + /* Reset the EHCI controller */ + cmd = readl(&ehci_regs->command); + cmd |= CMD_RESET; + writel(cmd, &ehci_regs->command); + do { + cmd = readl(&ehci_regs->command); + } while ((cmd & CMD_RESET) && (--loop > 0)); + + if (!loop) { + dbgp_printk("can not reset ehci\n"); + return -1; + } + dbgp_ehci_status("ehci reset done"); + return 0; +} +static int ehci_wait_for_port(int port); +/* Return 0 on success + * Return -ENODEV for any general failure + * Return -EIO if wait for port fails + */ +int dbgp_external_startup(void) +{ + int devnum; + struct usb_debug_descriptor dbgp_desc; + int ret; + u32 ctrl, portsc; + int dbg_port = dbgp_phys_port; + int tries = 3; + + ret = dbgp_ehci_startup(); + if (ret) + return ret; + + /* Wait for a device to show up in the debug port */ + ret = ehci_wait_for_port(dbg_port); + if (ret < 0) { + portsc = readl(&ehci_regs->port_status[dbg_port - 1]); + dbgp_printk("No device found in debug port\n"); + return -EIO; + } + dbgp_ehci_status("wait for port done"); + + /* Enable the debug port */ + ctrl = readl(&ehci_debug->control); + ctrl |= DBGP_CLAIM; + writel(ctrl, &ehci_debug->control); + ctrl = readl(&ehci_debug->control); + if ((ctrl & DBGP_CLAIM) != DBGP_CLAIM) { + dbgp_printk("No device in debug port\n"); + writel(ctrl & ~DBGP_CLAIM, &ehci_debug->control); + return -ENODEV; + } + dbgp_ehci_status("debug ported enabled"); + + /* Completely transfer the debug device to the debug controller */ + portsc = readl(&ehci_regs->port_status[dbg_port - 1]); + portsc &= ~PORT_PE; + writel(portsc, &ehci_regs->port_status[dbg_port - 1]); + + dbgp_mdelay(100); + +try_again: + /* Find the debug device and make it device number 127 */ + for (devnum = 0; devnum <= 127; devnum++) { + ret = dbgp_control_msg(devnum, + USB_DIR_IN | USB_TYPE_STANDARD | USB_RECIP_DEVICE, + USB_REQ_GET_DESCRIPTOR, (USB_DT_DEBUG << 8), 0, + &dbgp_desc, sizeof(dbgp_desc)); + if (ret > 0) + break; + } + if (devnum > 127) { + dbgp_printk("Could not find attached debug device\n"); + goto err; + } + if (ret < 0) { + dbgp_printk("Attached device is not a debug device\n"); + goto err; + } + dbgp_endpoint_out = dbgp_desc.bDebugOutEndpoint; + + /* Move the device to 127 if it isn't already there */ + if (devnum != USB_DEBUG_DEVNUM) { + ret = dbgp_control_msg(devnum, + USB_DIR_OUT | USB_TYPE_STANDARD | USB_RECIP_DEVICE, + USB_REQ_SET_ADDRESS, USB_DEBUG_DEVNUM, 0, NULL, 0); + if (ret < 0) { + dbgp_printk("Could not move attached device to %d\n", + USB_DEBUG_DEVNUM); + goto err; + } + devnum = USB_DEBUG_DEVNUM; + dbgp_printk("debug device renamed to 127\n"); + } + + /* Enable the debug interface */ + ret = dbgp_control_msg(USB_DEBUG_DEVNUM, + USB_DIR_OUT | USB_TYPE_STANDARD | USB_RECIP_DEVICE, + USB_REQ_SET_FEATURE, USB_DEVICE_DEBUG_MODE, 0, NULL, 0); + if (ret < 0) { + dbgp_printk(" Could not enable the debug device\n"); + goto err; + } + dbgp_printk("debug interface enabled\n"); + /* Perform a small write to get the even/odd data state in sync + */ + ret = dbgp_bulk_write(USB_DEBUG_DEVNUM, dbgp_endpoint_out, " ", 1); + if (ret < 0) { + dbgp_printk("dbgp_bulk_write failed: %d\n", ret); + goto err; + } + dbgp_printk("small write doned\n"); + dbgp_not_safe = 0; + + return 0; +err: + if (tries--) + goto try_again; + return -ENODEV; +} +EXPORT_SYMBOL_GPL(dbgp_external_startup); + static int __init ehci_reset_port(int port) { u32 portsc; u32 delay_time, delay; int loop; - dbgp_printk("ehci_reset_port %i\n", port); + dbgp_ehci_status("reset port"); /* Reset the usb debug port */ portsc = readl(&ehci_regs->port_status[port - 1]); portsc &= ~PORT_PE; @@ -388,7 +621,7 @@ static int __init ehci_reset_port(int port) return -EBUSY; } -static int __init ehci_wait_for_port(int port) +static int ehci_wait_for_port(int port) { u32 status; int ret, reps; @@ -487,12 +720,9 @@ static void __init early_ehci_bios_handoff(void) static int __init ehci_setup(void) { - struct usb_debug_descriptor dbgp_desc; - u32 cmd, ctrl, status, portsc, hcs_params; + u32 ctrl, portsc, hcs_params; u32 debug_port, new_debug_port = 0, n_ports; - u32 devnum; int ret, i; - int loop; int port_map_tried; int playtimes = 3; @@ -505,10 +735,12 @@ try_next_port: hcs_params = readl(&ehci_caps->hcs_params); debug_port = HCS_DEBUG_PORT(hcs_params); + dbgp_phys_port = debug_port; n_ports = HCS_N_PORTS(hcs_params); dbgp_printk("debug_port: %d\n", debug_port); dbgp_printk("n_ports: %d\n", n_ports); + dbgp_ehci_status(""); for (i = 1; i <= n_ports; i++) { portsc = readl(&ehci_regs->port_status[i-1]); @@ -523,138 +755,27 @@ try_next_port: return -1; } - loop = 250 * 1000; - /* Reset the EHCI controller */ - cmd = readl(&ehci_regs->command); - cmd |= CMD_RESET; - writel(cmd, &ehci_regs->command); - do { - cmd = readl(&ehci_regs->command); - } while ((cmd & CMD_RESET) && (--loop > 0)); - - if (!loop) { - dbgp_printk("can not reset ehci\n"); - return -1; - } - dbgp_printk("ehci reset done\n"); - - /* Claim ownership, but do not enable yet */ - ctrl = readl(&ehci_debug->control); - ctrl |= DBGP_OWNER; - ctrl &= ~(DBGP_ENABLED | DBGP_INUSE); - writel(ctrl, &ehci_debug->control); - udelay(1); - - /* Start the ehci running */ - cmd = readl(&ehci_regs->command); - cmd &= ~(CMD_LRESET | CMD_IAAD | CMD_PSE | CMD_ASE | CMD_RESET); - cmd |= CMD_RUN; - writel(cmd, &ehci_regs->command); - - /* Ensure everything is routed to the EHCI */ - writel(FLAG_CF, &ehci_regs->configured_flag); - - /* Wait until the controller is no longer halted */ - loop = 10; - do { - status = readl(&ehci_regs->status); - if (!(status & STS_HALT)) - break; - udelay(1); - } while (--loop > 0); - - if (!loop) { - dbgp_printk("ehci can not be started\n"); - return -1; + /* Only reset the controller if it is not already in the + * configured state */ + if (!(readl(&ehci_regs->configured_flag) & FLAG_CF)) { + if (dbgp_ehci_controller_reset() != 0) + return -1; + } else { + dbgp_ehci_status("ehci skip - already configured"); } - dbgp_printk("ehci started\n"); - /* Wait for a device to show up in the debug port */ - ret = ehci_wait_for_port(debug_port); - if (ret < 0) { - dbgp_printk("No device found in debug port\n"); + ret = dbgp_external_startup(); + if (ret == -EIO) goto next_debug_port; - } - dbgp_printk("ehci wait for port done\n"); - - /* Enable the debug port */ - ctrl = readl(&ehci_debug->control); - ctrl |= DBGP_CLAIM; - writel(ctrl, &ehci_debug->control); - ctrl = readl(&ehci_debug->control); - if ((ctrl & DBGP_CLAIM) != DBGP_CLAIM) { - dbgp_printk("No device in debug port\n"); - writel(ctrl & ~DBGP_CLAIM, &ehci_debug->control); - goto err; - } - dbgp_printk("debug ported enabled\n"); - /* Completely transfer the debug device to the debug controller */ - portsc = readl(&ehci_regs->port_status[debug_port - 1]); - portsc &= ~PORT_PE; - writel(portsc, &ehci_regs->port_status[debug_port - 1]); - - dbgp_mdelay(100); - - /* Find the debug device and make it device number 127 */ - for (devnum = 0; devnum <= 127; devnum++) { - ret = dbgp_control_msg(devnum, - USB_DIR_IN | USB_TYPE_STANDARD | USB_RECIP_DEVICE, - USB_REQ_GET_DESCRIPTOR, (USB_DT_DEBUG << 8), 0, - &dbgp_desc, sizeof(dbgp_desc)); - if (ret > 0) - break; - } - if (devnum > 127) { - dbgp_printk("Could not find attached debug device\n"); - goto err; - } if (ret < 0) { - dbgp_printk("Attached device is not a debug device\n"); - goto err; - } - dbgp_endpoint_out = dbgp_desc.bDebugOutEndpoint; - - /* Move the device to 127 if it isn't already there */ - if (devnum != USB_DEBUG_DEVNUM) { - ret = dbgp_control_msg(devnum, - USB_DIR_OUT | USB_TYPE_STANDARD | USB_RECIP_DEVICE, - USB_REQ_SET_ADDRESS, USB_DEBUG_DEVNUM, 0, NULL, 0); - if (ret < 0) { - dbgp_printk("Could not move attached device to %d\n", - USB_DEBUG_DEVNUM); - goto err; - } - devnum = USB_DEBUG_DEVNUM; - dbgp_printk("debug device renamed to 127\n"); - } - - /* Enable the debug interface */ - ret = dbgp_control_msg(USB_DEBUG_DEVNUM, - USB_DIR_OUT | USB_TYPE_STANDARD | USB_RECIP_DEVICE, - USB_REQ_SET_FEATURE, USB_DEVICE_DEBUG_MODE, 0, NULL, 0); - if (ret < 0) { - dbgp_printk(" Could not enable the debug device\n"); - goto err; - } - dbgp_printk("debug interface enabled\n"); - - /* Perform a small write to get the even/odd data state in sync - */ - ret = dbgp_bulk_write(USB_DEBUG_DEVNUM, dbgp_endpoint_out, " ", 1); - if (ret < 0) { - dbgp_printk("dbgp_bulk_write failed: %d\n", ret); - goto err; + /* Things didn't work so remove my claim */ + ctrl = readl(&ehci_debug->control); + ctrl &= ~(DBGP_CLAIM | DBGP_OUT); + writel(ctrl, &ehci_debug->control); + return -1; } - dbgp_printk("small write doned\n"); - return 0; -err: - /* Things didn't work so remove my claim */ - ctrl = readl(&ehci_debug->control); - ctrl &= ~(DBGP_CLAIM | DBGP_OUT); - writel(ctrl, &ehci_debug->control); - return -1; next_debug_port: port_map_tried |= (1<<(debug_port - 1)); @@ -749,6 +870,7 @@ int __init early_dbgp_init(char *s) return -1; } + dbgp_ehci_status("early_init_complete"); return 0; } @@ -758,9 +880,27 @@ static void early_dbgp_write(struct console *con, const char *str, u32 n) int chunk, ret; char buf[DBGP_MAX_PACKET]; int use_cr = 0; + u32 cmd, ctrl; + int reset_run = 0; - if (!ehci_debug) + if (!ehci_debug || dbgp_not_safe) return; + + cmd = readl(&ehci_regs->command); + if (unlikely(!(cmd & CMD_RUN))) { + /* If the ehci controller is not in the run state do extended + * checks to see if the acpi or some other initialization also + * reset the ehci debug port */ + ctrl = readl(&ehci_debug->control); + if (!(ctrl & DBGP_ENABLED)) { + dbgp_not_safe = 1; + dbgp_external_startup(); + } else { + cmd |= CMD_RUN; + writel(cmd, &ehci_regs->command); + reset_run = 1; + } + } while (n > 0) { for (chunk = 0; chunk < DBGP_MAX_PACKET && n > 0; str++, chunk++, n--) { @@ -775,8 +915,15 @@ static void early_dbgp_write(struct console *con, const char *str, u32 n) use_cr = 0; buf[chunk] = *str; } - ret = dbgp_bulk_write(USB_DEBUG_DEVNUM, - dbgp_endpoint_out, buf, chunk); + if (chunk > 0) { + ret = dbgp_bulk_write(USB_DEBUG_DEVNUM, + dbgp_endpoint_out, buf, chunk); + } + } + if (unlikely(reset_run)) { + cmd = readl(&ehci_regs->command); + cmd &= ~CMD_RUN; + writel(cmd, &ehci_regs->command); } } @@ -786,4 +933,3 @@ struct console early_dbgp_console = { .flags = CON_PRINTBUFFER, .index = -1, }; - diff --git a/include/linux/usb/ehci_def.h b/include/linux/usb/ehci_def.h index 1deac2a7b12..07851e05d76 100644 --- a/include/linux/usb/ehci_def.h +++ b/include/linux/usb/ehci_def.h @@ -176,4 +176,9 @@ extern int __init early_dbgp_init(char *s); extern struct console early_dbgp_console; #endif /* CONFIG_EARLY_PRINTK_DBGP */ +#ifdef CONFIG_EARLY_PRINTK_DBGP +/* Call backs from ehci host driver to ehci debug driver */ +extern int dbgp_external_startup(void); +#endif + #endif /* __LINUX_USB_EHCI_DEF_H */ -- cgit v1.2.3-70-g09d2 From 8d053c79f22462f55c02c8083580730b922cf7b4 Mon Sep 17 00:00:00 2001 From: Jason Wessel Date: Thu, 20 Aug 2009 15:39:54 -0500 Subject: USB: ehci-dbgp,ehci: Allow early or late use of the dbgp device If the EHCI debug port is initialized and in use, the EHCI host controller driver must follow two rules. 1) If the EHCI host driver issues a controller reset, the debug controller driver re-initialization must get called after the reset is completed. 2) The EHCI host driver should ignore any requests to the physical EHCI debug port when the EHCI debug port is in use. The code to check for the debug port was moved from ehci_pci_reinit() to ehci_pci_setup because it must get called prior to ehci_reset() which will clear the debug port registers. Signed-off-by: Jason Wessel Cc: Alan Stern Cc: dbrownell@users.sourceforge.net Cc: Ingo Molnar Cc: Andrew Morton Cc: Yinghai Lu Cc: "Eric W. Biederman" Signed-off-by: Greg Kroah-Hartman --- drivers/usb/early/ehci-dbgp.c | 23 +++++++++++++++++++++++ drivers/usb/host/ehci-hcd.c | 8 ++++++++ drivers/usb/host/ehci-hub.c | 10 ++++++++++ drivers/usb/host/ehci-pci.c | 39 +++++++++++++++++++-------------------- include/linux/usb/ehci_def.h | 10 ++++++++++ 5 files changed, 70 insertions(+), 20 deletions(-) (limited to 'include') diff --git a/drivers/usb/early/ehci-dbgp.c b/drivers/usb/early/ehci-dbgp.c index 06e05ea1787..b88cb65b64e 100644 --- a/drivers/usb/early/ehci-dbgp.c +++ b/drivers/usb/early/ehci-dbgp.c @@ -933,3 +933,26 @@ struct console early_dbgp_console = { .flags = CON_PRINTBUFFER, .index = -1, }; + +int dbgp_reset_prep(void) +{ + u32 ctrl; + + dbgp_not_safe = 1; + if (!ehci_debug) + return 0; + + if (early_dbgp_console.index != -1 && + !(early_dbgp_console.flags & CON_BOOT)) + return 1; + /* This means the console is not initialized, or should get + * shutdown so as to allow for reuse of the usb device, which + * means it is time to shutdown the usb debug port. */ + ctrl = readl(&ehci_debug->control); + if (ctrl & DBGP_ENABLED) { + ctrl &= ~(DBGP_CLAIM); + writel(ctrl, &ehci_debug->control); + } + return 0; +} +EXPORT_SYMBOL_GPL(dbgp_reset_prep); diff --git a/drivers/usb/host/ehci-hcd.c b/drivers/usb/host/ehci-hcd.c index 4f89d7ffd53..9835e071394 100644 --- a/drivers/usb/host/ehci-hcd.c +++ b/drivers/usb/host/ehci-hcd.c @@ -240,6 +240,11 @@ static int ehci_reset (struct ehci_hcd *ehci) int retval; u32 command = ehci_readl(ehci, &ehci->regs->command); + /* If the EHCI debug controller is active, special care must be + * taken before and after a host controller reset */ + if (ehci->debug && !dbgp_reset_prep()) + ehci->debug = NULL; + command |= CMD_RESET; dbg_cmd (ehci, "reset", command); ehci_writel(ehci, command, &ehci->regs->command); @@ -260,6 +265,9 @@ static int ehci_reset (struct ehci_hcd *ehci) if (ehci_is_TDI(ehci)) tdi_reset (ehci); + if (ehci->debug) + dbgp_external_startup(); + return retval; } diff --git a/drivers/usb/host/ehci-hub.c b/drivers/usb/host/ehci-hub.c index 818647c33da..6b5e4d18d4b 100644 --- a/drivers/usb/host/ehci-hub.c +++ b/drivers/usb/host/ehci-hub.c @@ -855,6 +855,15 @@ static int ehci_hub_control ( case SetPortFeature: selector = wIndex >> 8; wIndex &= 0xff; + if (unlikely(ehci->debug)) { + /* If the debug port is active any port + * feature requests should get denied */ + if (wIndex == HCS_DEBUG_PORT(ehci->hcs_params) && + (readl(&ehci->debug->control) & DBGP_ENABLED)) { + retval = -ENODEV; + goto error_exit; + } + } if (!wIndex || wIndex > ports) goto error; wIndex--; @@ -951,6 +960,7 @@ error: /* "stall" on error */ retval = -EPIPE; } +error_exit: spin_unlock_irqrestore (&ehci->lock, flags); return retval; } diff --git a/drivers/usb/host/ehci-pci.c b/drivers/usb/host/ehci-pci.c index a88ad517ec5..378861b9d79 100644 --- a/drivers/usb/host/ehci-pci.c +++ b/drivers/usb/host/ehci-pci.c @@ -27,28 +27,8 @@ /* called after powerup, by probe or system-pm "wakeup" */ static int ehci_pci_reinit(struct ehci_hcd *ehci, struct pci_dev *pdev) { - u32 temp; int retval; - /* optional debug port, normally in the first BAR */ - temp = pci_find_capability(pdev, 0x0a); - if (temp) { - pci_read_config_dword(pdev, temp, &temp); - temp >>= 16; - if ((temp & (3 << 13)) == (1 << 13)) { - temp &= 0x1fff; - ehci->debug = ehci_to_hcd(ehci)->regs + temp; - temp = ehci_readl(ehci, &ehci->debug->control); - ehci_info(ehci, "debug port %d%s\n", - HCS_DEBUG_PORT(ehci->hcs_params), - (temp & DBGP_ENABLED) - ? " IN USE" - : ""); - if (!(temp & DBGP_ENABLED)) - ehci->debug = NULL; - } - } - /* we expect static quirk code to handle the "extended capabilities" * (currently just BIOS handoff) allowed starting with EHCI 0.96 */ @@ -195,6 +175,25 @@ static int ehci_pci_setup(struct usb_hcd *hcd) break; } + /* optional debug port, normally in the first BAR */ + temp = pci_find_capability(pdev, 0x0a); + if (temp) { + pci_read_config_dword(pdev, temp, &temp); + temp >>= 16; + if ((temp & (3 << 13)) == (1 << 13)) { + temp &= 0x1fff; + ehci->debug = ehci_to_hcd(ehci)->regs + temp; + temp = ehci_readl(ehci, &ehci->debug->control); + ehci_info(ehci, "debug port %d%s\n", + HCS_DEBUG_PORT(ehci->hcs_params), + (temp & DBGP_ENABLED) + ? " IN USE" + : ""); + if (!(temp & DBGP_ENABLED)) + ehci->debug = NULL; + } + } + ehci_reset(ehci); /* at least the Genesys GL880S needs fixup here */ diff --git a/include/linux/usb/ehci_def.h b/include/linux/usb/ehci_def.h index 07851e05d76..1909d924f81 100644 --- a/include/linux/usb/ehci_def.h +++ b/include/linux/usb/ehci_def.h @@ -179,6 +179,16 @@ extern struct console early_dbgp_console; #ifdef CONFIG_EARLY_PRINTK_DBGP /* Call backs from ehci host driver to ehci debug driver */ extern int dbgp_external_startup(void); +extern int dbgp_reset_prep(void); +#else +static inline int dbgp_reset_prep(void) +{ + return 1; +} +static inline int dbgp_external_startup(void) +{ + return -1; +} #endif #endif /* __LINUX_USB_EHCI_DEF_H */ -- cgit v1.2.3-70-g09d2 From aab2d4086a1876fcff282aa36e2d4a92aa9935c9 Mon Sep 17 00:00:00 2001 From: Jason Wessel Date: Thu, 20 Aug 2009 15:39:55 -0500 Subject: USB: ehci-dbgp: errata for EHCI debug controller initialization On some EHCI usb debug controllers, the EHCI debug device will fail to be seen after a port reset, after a warm reset. Two options exist to get the device to initialize correctly. Option 1 is to unplug and plug in the device. Option 2 is to use the EHCI port test to get the usb debug device to start talking again. At that point the debug controller port reset will succeed. Signed-off-by: Jason Wessel Cc: Ingo Molnar Cc: Andrew Morton Cc: Yinghai Lu Cc: "Eric W. Biederman" CC: dbrownell@users.sourceforge.net Signed-off-by: Greg Kroah-Hartman --- drivers/usb/early/ehci-dbgp.c | 23 ++++++++++++++++++++++- include/linux/usb/ehci_def.h | 1 + 2 files changed, 23 insertions(+), 1 deletion(-) (limited to 'include') diff --git a/drivers/usb/early/ehci-dbgp.c b/drivers/usb/early/ehci-dbgp.c index b88cb65b64e..f0a41c647be 100644 --- a/drivers/usb/early/ehci-dbgp.c +++ b/drivers/usb/early/ehci-dbgp.c @@ -478,10 +478,13 @@ int dbgp_external_startup(void) int devnum; struct usb_debug_descriptor dbgp_desc; int ret; - u32 ctrl, portsc; + u32 ctrl, portsc, cmd; int dbg_port = dbgp_phys_port; int tries = 3; + int reset_port_tries = 1; + int try_hard_once = 1; +try_port_reset_again: ret = dbgp_ehci_startup(); if (ret) return ret; @@ -490,6 +493,24 @@ int dbgp_external_startup(void) ret = ehci_wait_for_port(dbg_port); if (ret < 0) { portsc = readl(&ehci_regs->port_status[dbg_port - 1]); + if (!(portsc & PORT_CONNECT) && try_hard_once) { + /* Last ditch effort to try to force enable + * the debug device by using the packet test + * ehci command to try and wake it up. */ + try_hard_once = 0; + cmd = readl(&ehci_regs->command); + cmd &= ~CMD_RUN; + writel(cmd, &ehci_regs->command); + portsc = readl(&ehci_regs->port_status[dbg_port - 1]); + portsc |= PORT_TEST_PKT; + writel(portsc, &ehci_regs->port_status[dbg_port - 1]); + dbgp_ehci_status("Trying to force debug port online"); + mdelay(50); + dbgp_ehci_controller_reset(); + goto try_port_reset_again; + } else if (reset_port_tries--) { + goto try_port_reset_again; + } dbgp_printk("No device found in debug port\n"); return -EIO; } diff --git a/include/linux/usb/ehci_def.h b/include/linux/usb/ehci_def.h index 1909d924f81..af4b86f3aca 100644 --- a/include/linux/usb/ehci_def.h +++ b/include/linux/usb/ehci_def.h @@ -105,6 +105,7 @@ struct ehci_regs { #define PORT_WKDISC_E (1<<21) /* wake on disconnect (enable) */ #define PORT_WKCONN_E (1<<20) /* wake on connect (enable) */ /* 19:16 for port testing */ +#define PORT_TEST_PKT (0x4<<16) /* Port Test Control - packet test */ #define PORT_LED_OFF (0<<14) #define PORT_LED_AMBER (1<<14) #define PORT_LED_GREEN (2<<14) -- cgit v1.2.3-70-g09d2 From 01c6460f968d7b57fc6f98adb587952628c6e099 Mon Sep 17 00:00:00 2001 From: Alan Stern Date: Tue, 1 Sep 2009 11:09:56 -0400 Subject: USB: usbfs: add USBDEVFS_URB_BULK_CONTINUATION flag This patch (as1283) adds a new flag, USBDEVFS_URB_BULK_CONTINUATION, to usbfs. It is intended for userspace libraries such as libusb and openusb. When they have to break up a single usbfs bulk transfer into multiple URBs, they will set the flag on all but the first URB of the series. If an error other than an unlink occurs, the kernel will automatically cancel all the following URBs for the same endpoint and refuse to accept new submissions, until an URB is encountered that is not marked as a BULK_CONTINUATION. Such an URB would indicate the start of a new transfer or the presence of an older library, so the kernel returns to normal operation. This enables libraries to delimit bulk transfers correctly, even in the presence of early termination as indicated by short packets. Signed-off-by: Alan Stern Signed-off-by: Greg Kroah-Hartman --- drivers/usb/core/devio.c | 78 +++++++++++++++++++++++++++++++++++++++++++- include/linux/usbdevice_fs.h | 1 + 2 files changed, 78 insertions(+), 1 deletion(-) (limited to 'include') diff --git a/drivers/usb/core/devio.c b/drivers/usb/core/devio.c index 71514be8b71..181f78c8410 100644 --- a/drivers/usb/core/devio.c +++ b/drivers/usb/core/devio.c @@ -74,6 +74,7 @@ struct dev_state { void __user *disccontext; unsigned long ifclaimed; u32 secid; + u32 disabled_bulk_eps; }; struct async { @@ -88,6 +89,8 @@ struct async { struct urb *urb; int status; u32 secid; + u8 bulk_addr; + u8 bulk_status; }; static int usbfs_snoop; @@ -343,6 +346,43 @@ static void snoop_urb(struct usb_device *udev, } } +#define AS_CONTINUATION 1 +#define AS_UNLINK 2 + +static void cancel_bulk_urbs(struct dev_state *ps, unsigned bulk_addr) +__releases(ps->lock) +__acquires(ps->lock) +{ + struct async *as; + + /* Mark all the pending URBs that match bulk_addr, up to but not + * including the first one without AS_CONTINUATION. If such an + * URB is encountered then a new transfer has already started so + * the endpoint doesn't need to be disabled; otherwise it does. + */ + list_for_each_entry(as, &ps->async_pending, asynclist) { + if (as->bulk_addr == bulk_addr) { + if (as->bulk_status != AS_CONTINUATION) + goto rescan; + as->bulk_status = AS_UNLINK; + as->bulk_addr = 0; + } + } + ps->disabled_bulk_eps |= (1 << bulk_addr); + + /* Now carefully unlink all the marked pending URBs */ + rescan: + list_for_each_entry(as, &ps->async_pending, asynclist) { + if (as->bulk_status == AS_UNLINK) { + as->bulk_status = 0; /* Only once */ + spin_unlock(&ps->lock); /* Allow completions */ + usb_unlink_urb(as->urb); + spin_lock(&ps->lock); + goto rescan; + } + } +} + static void async_completed(struct urb *urb) { struct async *as = urb->context; @@ -371,6 +411,9 @@ static void async_completed(struct urb *urb) snoop(&urb->dev->dev, "urb complete\n"); snoop_urb(urb->dev, as->userurb, urb->pipe, urb->actual_length, as->status, COMPLETE); + if (as->status < 0 && as->bulk_addr && as->status != -ECONNRESET && + as->status != -ENOENT) + cancel_bulk_urbs(ps, as->bulk_addr); spin_unlock(&ps->lock); if (signr) @@ -993,6 +1036,7 @@ static int proc_do_submiturb(struct dev_state *ps, struct usbdevfs_urb *uurb, if (uurb->flags & ~(USBDEVFS_URB_ISO_ASAP | USBDEVFS_URB_SHORT_NOT_OK | + USBDEVFS_URB_BULK_CONTINUATION | USBDEVFS_URB_NO_FSBR | USBDEVFS_URB_ZERO_PACKET | USBDEVFS_URB_NO_INTERRUPT)) @@ -1194,7 +1238,39 @@ static int proc_do_submiturb(struct dev_state *ps, struct usbdevfs_urb *uurb, snoop_urb(ps->dev, as->userurb, as->urb->pipe, as->urb->transfer_buffer_length, 0, SUBMIT); async_newpending(as); - if ((ret = usb_submit_urb(as->urb, GFP_KERNEL))) { + + if (usb_endpoint_xfer_bulk(&ep->desc)) { + spin_lock_irq(&ps->lock); + + /* Not exactly the endpoint address; the direction bit is + * shifted to the 0x10 position so that the value will be + * between 0 and 31. + */ + as->bulk_addr = usb_endpoint_num(&ep->desc) | + ((ep->desc.bEndpointAddress & USB_ENDPOINT_DIR_MASK) + >> 3); + + /* If this bulk URB is the start of a new transfer, re-enable + * the endpoint. Otherwise mark it as a continuation URB. + */ + if (uurb->flags & USBDEVFS_URB_BULK_CONTINUATION) + as->bulk_status = AS_CONTINUATION; + else + ps->disabled_bulk_eps &= ~(1 << as->bulk_addr); + + /* Don't accept continuation URBs if the endpoint is + * disabled because of an earlier error. + */ + if (ps->disabled_bulk_eps & (1 << as->bulk_addr)) + ret = -EREMOTEIO; + else + ret = usb_submit_urb(as->urb, GFP_ATOMIC); + spin_unlock_irq(&ps->lock); + } else { + ret = usb_submit_urb(as->urb, GFP_KERNEL); + } + + if (ret) { dev_printk(KERN_DEBUG, &ps->dev->dev, "usbfs: usb_submit_urb returned %d\n", ret); snoop_urb(ps->dev, as->userurb, as->urb->pipe, diff --git a/include/linux/usbdevice_fs.h b/include/linux/usbdevice_fs.h index 00ceebeb9e5..b2a7d8ba6ee 100644 --- a/include/linux/usbdevice_fs.h +++ b/include/linux/usbdevice_fs.h @@ -77,6 +77,7 @@ struct usbdevfs_connectinfo { #define USBDEVFS_URB_SHORT_NOT_OK 0x01 #define USBDEVFS_URB_ISO_ASAP 0x02 +#define USBDEVFS_URB_BULK_CONTINUATION 0x04 #define USBDEVFS_URB_NO_FSBR 0x20 #define USBDEVFS_URB_ZERO_PACKET 0x40 #define USBDEVFS_URB_NO_INTERRUPT 0x80 -- cgit v1.2.3-70-g09d2 From fa081b00a80ef3f4575c99af6e97d29e1628cf51 Mon Sep 17 00:00:00 2001 From: Andrew Morton Date: Tue, 22 Sep 2009 16:43:27 -0700 Subject: include/linux/kmemcheck.h: fix a trillion warnings of the form include/net/inet_sock.h:208: warning: ISO C90 forbids mixed declarations and code Cc: Johannes Berg Acked-by: Vegard Nossum Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/kmemcheck.h | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'include') diff --git a/include/linux/kmemcheck.h b/include/linux/kmemcheck.h index c8006607f94..136cdcdf92e 100644 --- a/include/linux/kmemcheck.h +++ b/include/linux/kmemcheck.h @@ -145,10 +145,12 @@ static inline bool kmemcheck_is_obj_initialized(unsigned long addr, size_t size) #define kmemcheck_annotate_bitfield(ptr, name) \ do { \ + int _n; \ + \ if (!ptr) \ break; \ \ - int _n = (long) &((ptr)->name##_end) \ + _n = (long) &((ptr)->name##_end) \ - (long) &((ptr)->name##_begin); \ BUILD_BUG_ON(_n < 0); \ \ -- cgit v1.2.3-70-g09d2 From af91322ef3f29ae4114e736e2a72e28b4d619cf9 Mon Sep 17 00:00:00 2001 From: Dave Young Date: Tue, 22 Sep 2009 16:43:33 -0700 Subject: printk: add printk_delay to make messages readable for some scenarios When syslog is not possible, at the same time there's no serial/net console available, it will be hard to read the printk messages. For example oops/panic/warning messages in shutdown phase. Add a printk delay feature, we can make each printk message delay some milliseconds. Setting the delay by proc/sysctl interface: /proc/sys/kernel/printk_delay The value range from 0 - 10000, default value is 0 [akpm@linux-foundation.org: fix a few things] Signed-off-by: Dave Young Acked-by: Ingo Molnar Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- Documentation/sysctl/kernel.txt | 8 ++++++++ include/linux/kernel.h | 2 ++ kernel/printk.c | 15 +++++++++++++++ kernel/sysctl.c | 14 ++++++++++++++ 4 files changed, 39 insertions(+) (limited to 'include') diff --git a/Documentation/sysctl/kernel.txt b/Documentation/sysctl/kernel.txt index 3e5b63ebb82..b3d8b492274 100644 --- a/Documentation/sysctl/kernel.txt +++ b/Documentation/sysctl/kernel.txt @@ -313,6 +313,14 @@ send before ratelimiting kicks in. ============================================================== +printk_delay: + +Delay each printk message in printk_delay milliseconds + +Value from 0 - 10000 is allowed. + +============================================================== + randomize-va-space: This option can be used to select the type of process address diff --git a/include/linux/kernel.h b/include/linux/kernel.h index 2b5b1e0899a..0a2a1908786 100644 --- a/include/linux/kernel.h +++ b/include/linux/kernel.h @@ -246,6 +246,8 @@ extern int printk_ratelimit(void); extern bool printk_timed_ratelimit(unsigned long *caller_jiffies, unsigned int interval_msec); +extern int printk_delay_msec; + /* * Print a one-time message (analogous to WARN_ONCE() et al): */ diff --git a/kernel/printk.c b/kernel/printk.c index 932ea21feb1..f38b07f78a4 100644 --- a/kernel/printk.c +++ b/kernel/printk.c @@ -653,6 +653,20 @@ static int recursion_bug; static int new_text_line = 1; static char printk_buf[1024]; +int printk_delay_msec __read_mostly; + +static inline void printk_delay(void) +{ + if (unlikely(printk_delay_msec)) { + int m = printk_delay_msec; + + while (m--) { + mdelay(1); + touch_nmi_watchdog(); + } + } +} + asmlinkage int vprintk(const char *fmt, va_list args) { int printed_len = 0; @@ -662,6 +676,7 @@ asmlinkage int vprintk(const char *fmt, va_list args) char *p; boot_delay_msec(); + printk_delay(); preempt_disable(); /* This stops the holder of console_sem just where we want him */ diff --git a/kernel/sysctl.c b/kernel/sysctl.c index 6ba49c7cb12..0dfaa47d7cb 100644 --- a/kernel/sysctl.c +++ b/kernel/sysctl.c @@ -106,6 +106,9 @@ static int __maybe_unused one = 1; static int __maybe_unused two = 2; static unsigned long one_ul = 1; static int one_hundred = 100; +#ifdef CONFIG_PRINTK +static int ten_thousand = 10000; +#endif /* this is needed for the proc_doulongvec_minmax of vm_dirty_bytes */ static unsigned long dirty_bytes_min = 2 * PAGE_SIZE; @@ -722,6 +725,17 @@ static struct ctl_table kern_table[] = { .mode = 0644, .proc_handler = &proc_dointvec, }, + { + .ctl_name = CTL_UNNUMBERED, + .procname = "printk_delay", + .data = &printk_delay_msec, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = &proc_dointvec_minmax, + .strategy = &sysctl_intvec, + .extra1 = &zero, + .extra2 = &ten_thousand, + }, #endif { .ctl_name = KERN_NGROUPS_MAX, -- cgit v1.2.3-70-g09d2 From 1fd7317d02ec03c6fdf072317841287933d06d24 Mon Sep 17 00:00:00 2001 From: Nick Black Date: Tue, 22 Sep 2009 16:43:33 -0700 Subject: Move magic numbers into magic.h Move various magic-number definitions into magic.h. Signed-off-by: Nick Black Acked-by: Pekka Enberg Cc: Al Viro Cc: "David S. Miller" Cc: Casey Schaufler Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- fs/devpts/inode.c | 3 +-- fs/hugetlbfs/inode.c | 4 +--- include/linux/magic.h | 5 +++++ net/socket.c | 3 +-- security/smack/smack_lsm.c | 8 +------- 5 files changed, 9 insertions(+), 14 deletions(-) (limited to 'include') diff --git a/fs/devpts/inode.c b/fs/devpts/inode.c index 75efb028974..d5f8c96964b 100644 --- a/fs/devpts/inode.c +++ b/fs/devpts/inode.c @@ -18,14 +18,13 @@ #include #include #include +#include #include #include #include #include #include -#define DEVPTS_SUPER_MAGIC 0x1cd1 - #define DEVPTS_DEFAULT_MODE 0600 /* * ptmx is a new node in /dev/pts and will be unused in legacy (single- diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c index 06b7c2623f9..eba6d552d9c 100644 --- a/fs/hugetlbfs/inode.c +++ b/fs/hugetlbfs/inode.c @@ -31,12 +31,10 @@ #include #include #include +#include #include -/* some random number */ -#define HUGETLBFS_MAGIC 0x958458f6 - static const struct super_operations hugetlbfs_ops; static const struct address_space_operations hugetlbfs_aops; const struct file_operations hugetlbfs_file_operations; diff --git a/include/linux/magic.h b/include/linux/magic.h index 1923327b986..bce37786a0a 100644 --- a/include/linux/magic.h +++ b/include/linux/magic.h @@ -13,6 +13,7 @@ #define SECURITYFS_MAGIC 0x73636673 #define SELINUX_MAGIC 0xf97cff8c #define TMPFS_MAGIC 0x01021994 +#define HUGETLBFS_MAGIC 0x958458f6 /* some random number */ #define SQUASHFS_MAGIC 0x73717368 #define EFS_SUPER_MAGIC 0x414A53 #define EXT2_SUPER_MAGIC 0xEF53 @@ -53,4 +54,8 @@ #define INOTIFYFS_SUPER_MAGIC 0x2BAD1DEA #define STACK_END_MAGIC 0x57AC6E9D + +#define DEVPTS_SUPER_MAGIC 0x1cd1 +#define SOCKFS_MAGIC 0x534F434B + #endif /* __LINUX_MAGIC_H__ */ diff --git a/net/socket.c b/net/socket.c index 0ad02ae61a9..49917a1cac7 100644 --- a/net/socket.c +++ b/net/socket.c @@ -86,6 +86,7 @@ #include #include #include +#include #include #include @@ -235,8 +236,6 @@ int move_addr_to_user(struct sockaddr *kaddr, int klen, void __user *uaddr, return __put_user(klen, ulen); } -#define SOCKFS_MAGIC 0x534F434B - static struct kmem_cache *sock_inode_cachep __read_mostly; static struct inode *sock_alloc_inode(struct super_block *sb) diff --git a/security/smack/smack_lsm.c b/security/smack/smack_lsm.c index acae7ef4092..c33b6bb9b6d 100644 --- a/security/smack/smack_lsm.c +++ b/security/smack/smack_lsm.c @@ -30,17 +30,11 @@ #include #include #include +#include #include "smack.h" #define task_security(task) (task_cred_xxx((task), security)) -/* - * I hope these are the hokeyist lines of code in the module. Casey. - */ -#define DEVPTS_SUPER_MAGIC 0x1cd1 -#define SOCKFS_MAGIC 0x534F434B -#define TMPFS_MAGIC 0x01021994 - /** * smk_fetch - Fetch the smack label from a file. * @ip: a pointer to the inode -- cgit v1.2.3-70-g09d2 From 54fdade1c3332391948ec43530c02c4794a38172 Mon Sep 17 00:00:00 2001 From: Xiao Guangrong Date: Tue, 22 Sep 2009 16:43:39 -0700 Subject: generic-ipi: make struct call_function_data lockless This patch can remove spinlock from struct call_function_data, the reasons are below: 1: add a new interface for cpumask named cpumask_test_and_clear_cpu(), it can atomically test and clear specific cpu, we can use it instead of cpumask_test_cpu() and cpumask_clear_cpu() and no need data->lock to protect those in generic_smp_call_function_interrupt(). 2: in smp_call_function_many(), after csd_lock() return, the current's cfd_data is deleted from call_function list, so it not have race between other cpus, then cfs_data is only used in smp_call_function_many() that must disable preemption and not from a hardware interrupthandler or from a bottom half handler to call, only the correspond cpu can use it, so it not have race in current cpu, no need cfs_data->lock to protect it. 3: after 1 and 2, cfs_data->lock is only use to protect cfs_data->refs in generic_smp_call_function_interrupt(), so we can define cfs_data->refs to atomic_t, and no need cfs_data->lock any more. Signed-off-by: Xiao Guangrong Cc: Ingo Molnar Cc: Jens Axboe Cc: Nick Piggin Cc: Peter Zijlstra Acked-by: Rusty Russell [akpm@linux-foundation.org: use atomic_dec_return()] Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/cpumask.h | 12 ++++++++++++ kernel/smp.c | 29 ++++++++--------------------- 2 files changed, 20 insertions(+), 21 deletions(-) (limited to 'include') diff --git a/include/linux/cpumask.h b/include/linux/cpumask.h index 796df12091b..9b1d458aac6 100644 --- a/include/linux/cpumask.h +++ b/include/linux/cpumask.h @@ -714,6 +714,18 @@ static inline int cpumask_test_and_set_cpu(int cpu, struct cpumask *cpumask) return test_and_set_bit(cpumask_check(cpu), cpumask_bits(cpumask)); } +/** + * cpumask_test_and_clear_cpu - atomically test and clear a cpu in a cpumask + * @cpu: cpu number (< nr_cpu_ids) + * @cpumask: the cpumask pointer + * + * test_and_clear_bit wrapper for cpumasks. + */ +static inline int cpumask_test_and_clear_cpu(int cpu, struct cpumask *cpumask) +{ + return test_and_clear_bit(cpumask_check(cpu), cpumask_bits(cpumask)); +} + /** * cpumask_setall - set all cpus (< nr_cpu_ids) in a cpumask * @dstp: the cpumask pointer diff --git a/kernel/smp.c b/kernel/smp.c index 8e218500ab1..fd47a256a24 100644 --- a/kernel/smp.c +++ b/kernel/smp.c @@ -29,8 +29,7 @@ enum { struct call_function_data { struct call_single_data csd; - spinlock_t lock; - unsigned int refs; + atomic_t refs; cpumask_var_t cpumask; }; @@ -39,9 +38,7 @@ struct call_single_queue { spinlock_t lock; }; -static DEFINE_PER_CPU(struct call_function_data, cfd_data) = { - .lock = __SPIN_LOCK_UNLOCKED(cfd_data.lock), -}; +static DEFINE_PER_CPU(struct call_function_data, cfd_data); static int hotplug_cfd(struct notifier_block *nfb, unsigned long action, void *hcpu) @@ -196,25 +193,18 @@ void generic_smp_call_function_interrupt(void) list_for_each_entry_rcu(data, &call_function.queue, csd.list) { int refs; - spin_lock(&data->lock); - if (!cpumask_test_cpu(cpu, data->cpumask)) { - spin_unlock(&data->lock); + if (!cpumask_test_and_clear_cpu(cpu, data->cpumask)) continue; - } - cpumask_clear_cpu(cpu, data->cpumask); - spin_unlock(&data->lock); data->csd.func(data->csd.info); - spin_lock(&data->lock); - WARN_ON(data->refs == 0); - refs = --data->refs; + refs = atomic_dec_return(&data->refs); + WARN_ON(refs < 0); if (!refs) { spin_lock(&call_function.lock); list_del_rcu(&data->csd.list); spin_unlock(&call_function.lock); } - spin_unlock(&data->lock); if (refs) continue; @@ -419,23 +409,20 @@ void smp_call_function_many(const struct cpumask *mask, data = &__get_cpu_var(cfd_data); csd_lock(&data->csd); - spin_lock_irqsave(&data->lock, flags); data->csd.func = func; data->csd.info = info; cpumask_and(data->cpumask, mask, cpu_online_mask); cpumask_clear_cpu(this_cpu, data->cpumask); - data->refs = cpumask_weight(data->cpumask); + atomic_set(&data->refs, cpumask_weight(data->cpumask)); - spin_lock(&call_function.lock); + spin_lock_irqsave(&call_function.lock, flags); /* * Place entry at the _HEAD_ of the list, so that any cpu still * observing the entry in generic_smp_call_function_interrupt() * will not miss any other list entries: */ list_add_rcu(&data->csd.list, &call_function.queue); - spin_unlock(&call_function.lock); - - spin_unlock_irqrestore(&data->lock, flags); + spin_unlock_irqrestore(&call_function.lock, flags); /* * Make the list addition visible before sending the ipi. -- cgit v1.2.3-70-g09d2 From 88e9d34c727883d7d6f02cf1475b3ec98b8480c7 Mon Sep 17 00:00:00 2001 From: James Morris Date: Tue, 22 Sep 2009 16:43:43 -0700 Subject: seq_file: constify seq_operations Make all seq_operations structs const, to help mitigate against revectoring user-triggerable function pointers. This is derived from the grsecurity patch, although generated from scratch because it's simpler than extracting the changes from there. Signed-off-by: James Morris Acked-by: Serge Hallyn Acked-by: Casey Schaufler Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/mn10300/kernel/setup.c | 2 +- arch/powerpc/kernel/setup-common.c | 2 +- arch/powerpc/platforms/pseries/hvCall_inst.c | 2 +- drivers/block/cciss.c | 2 +- drivers/char/misc.c | 2 +- drivers/char/tpm/tpm_bios.c | 4 ++-- drivers/isdn/capi/kcapi_proc.c | 10 +++++----- drivers/scsi/sg.c | 6 +++--- fs/afs/proc.c | 8 ++++---- fs/dlm/debug_fs.c | 12 ++++++------ fs/jbd2/journal.c | 4 ++-- fs/nfs/client.c | 4 ++-- fs/nfsd/export.c | 2 +- fs/ocfs2/cluster/netdebug.c | 4 ++-- fs/ocfs2/dlm/dlmdebug.c | 2 +- fs/proc/nommu.c | 2 +- include/linux/nfsd/nfsd.h | 2 +- ipc/util.c | 2 +- kernel/cgroup.c | 2 +- kernel/kprobes.c | 2 +- kernel/lockdep_proc.c | 2 +- kernel/trace/ftrace.c | 4 ++-- kernel/trace/trace.c | 4 ++-- net/ipv6/ip6mr.c | 2 +- security/integrity/ima/ima_fs.c | 4 ++-- security/smack/smackfs.c | 6 +++--- 26 files changed, 49 insertions(+), 49 deletions(-) (limited to 'include') diff --git a/arch/mn10300/kernel/setup.c b/arch/mn10300/kernel/setup.c index 79890edfd67..3f24c298a3a 100644 --- a/arch/mn10300/kernel/setup.c +++ b/arch/mn10300/kernel/setup.c @@ -285,7 +285,7 @@ static void c_stop(struct seq_file *m, void *v) { } -struct seq_operations cpuinfo_op = { +const struct seq_operations cpuinfo_op = { .start = c_start, .next = c_next, .stop = c_stop, diff --git a/arch/powerpc/kernel/setup-common.c b/arch/powerpc/kernel/setup-common.c index 02fed27af7f..1d5570a1e45 100644 --- a/arch/powerpc/kernel/setup-common.c +++ b/arch/powerpc/kernel/setup-common.c @@ -328,7 +328,7 @@ static void c_stop(struct seq_file *m, void *v) { } -struct seq_operations cpuinfo_op = { +const struct seq_operations cpuinfo_op = { .start =c_start, .next = c_next, .stop = c_stop, diff --git a/arch/powerpc/platforms/pseries/hvCall_inst.c b/arch/powerpc/platforms/pseries/hvCall_inst.c index eae51ef9af2..3631a4f277e 100644 --- a/arch/powerpc/platforms/pseries/hvCall_inst.c +++ b/arch/powerpc/platforms/pseries/hvCall_inst.c @@ -71,7 +71,7 @@ static int hc_show(struct seq_file *m, void *p) return 0; } -static struct seq_operations hcall_inst_seq_ops = { +static const struct seq_operations hcall_inst_seq_ops = { .start = hc_start, .next = hc_next, .stop = hc_stop, diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c index 4f19105f755..24c3e21ab26 100644 --- a/drivers/block/cciss.c +++ b/drivers/block/cciss.c @@ -363,7 +363,7 @@ static void cciss_seq_stop(struct seq_file *seq, void *v) h->busy_configuring = 0; } -static struct seq_operations cciss_seq_ops = { +static const struct seq_operations cciss_seq_ops = { .start = cciss_seq_start, .show = cciss_seq_show, .next = cciss_seq_next, diff --git a/drivers/char/misc.c b/drivers/char/misc.c index 1ee27cc2342..07fa612a58d 100644 --- a/drivers/char/misc.c +++ b/drivers/char/misc.c @@ -91,7 +91,7 @@ static int misc_seq_show(struct seq_file *seq, void *v) } -static struct seq_operations misc_seq_ops = { +static const struct seq_operations misc_seq_ops = { .start = misc_seq_start, .next = misc_seq_next, .stop = misc_seq_stop, diff --git a/drivers/char/tpm/tpm_bios.c b/drivers/char/tpm/tpm_bios.c index 0c2f55a38b9..bf2170fb1cd 100644 --- a/drivers/char/tpm/tpm_bios.c +++ b/drivers/char/tpm/tpm_bios.c @@ -343,14 +343,14 @@ static int tpm_ascii_bios_measurements_show(struct seq_file *m, void *v) return 0; } -static struct seq_operations tpm_ascii_b_measurments_seqops = { +static const struct seq_operations tpm_ascii_b_measurments_seqops = { .start = tpm_bios_measurements_start, .next = tpm_bios_measurements_next, .stop = tpm_bios_measurements_stop, .show = tpm_ascii_bios_measurements_show, }; -static struct seq_operations tpm_binary_b_measurments_seqops = { +static const struct seq_operations tpm_binary_b_measurments_seqops = { .start = tpm_bios_measurements_start, .next = tpm_bios_measurements_next, .stop = tpm_bios_measurements_stop, diff --git a/drivers/isdn/capi/kcapi_proc.c b/drivers/isdn/capi/kcapi_proc.c index 50ed778f63f..09d4db764d2 100644 --- a/drivers/isdn/capi/kcapi_proc.c +++ b/drivers/isdn/capi/kcapi_proc.c @@ -89,14 +89,14 @@ static int contrstats_show(struct seq_file *seq, void *v) return 0; } -static struct seq_operations seq_controller_ops = { +static const struct seq_operations seq_controller_ops = { .start = controller_start, .next = controller_next, .stop = controller_stop, .show = controller_show, }; -static struct seq_operations seq_contrstats_ops = { +static const struct seq_operations seq_contrstats_ops = { .start = controller_start, .next = controller_next, .stop = controller_stop, @@ -194,14 +194,14 @@ applstats_show(struct seq_file *seq, void *v) return 0; } -static struct seq_operations seq_applications_ops = { +static const struct seq_operations seq_applications_ops = { .start = applications_start, .next = applications_next, .stop = applications_stop, .show = applications_show, }; -static struct seq_operations seq_applstats_ops = { +static const struct seq_operations seq_applstats_ops = { .start = applications_start, .next = applications_next, .stop = applications_stop, @@ -264,7 +264,7 @@ static int capi_driver_show(struct seq_file *seq, void *v) return 0; } -static struct seq_operations seq_capi_driver_ops = { +static const struct seq_operations seq_capi_driver_ops = { .start = capi_driver_start, .next = capi_driver_next, .stop = capi_driver_stop, diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c index 4968c4ced38..848b5946685 100644 --- a/drivers/scsi/sg.c +++ b/drivers/scsi/sg.c @@ -2233,7 +2233,7 @@ static struct file_operations dev_fops = { .open = sg_proc_open_dev, .release = seq_release, }; -static struct seq_operations dev_seq_ops = { +static const struct seq_operations dev_seq_ops = { .start = dev_seq_start, .next = dev_seq_next, .stop = dev_seq_stop, @@ -2246,7 +2246,7 @@ static struct file_operations devstrs_fops = { .open = sg_proc_open_devstrs, .release = seq_release, }; -static struct seq_operations devstrs_seq_ops = { +static const struct seq_operations devstrs_seq_ops = { .start = dev_seq_start, .next = dev_seq_next, .stop = dev_seq_stop, @@ -2259,7 +2259,7 @@ static struct file_operations debug_fops = { .open = sg_proc_open_debug, .release = seq_release, }; -static struct seq_operations debug_seq_ops = { +static const struct seq_operations debug_seq_ops = { .start = dev_seq_start, .next = dev_seq_next, .stop = dev_seq_stop, diff --git a/fs/afs/proc.c b/fs/afs/proc.c index 8630615e57f..852739d262a 100644 --- a/fs/afs/proc.c +++ b/fs/afs/proc.c @@ -28,7 +28,7 @@ static int afs_proc_cells_show(struct seq_file *m, void *v); static ssize_t afs_proc_cells_write(struct file *file, const char __user *buf, size_t size, loff_t *_pos); -static struct seq_operations afs_proc_cells_ops = { +static const struct seq_operations afs_proc_cells_ops = { .start = afs_proc_cells_start, .next = afs_proc_cells_next, .stop = afs_proc_cells_stop, @@ -70,7 +70,7 @@ static void *afs_proc_cell_volumes_next(struct seq_file *p, void *v, static void afs_proc_cell_volumes_stop(struct seq_file *p, void *v); static int afs_proc_cell_volumes_show(struct seq_file *m, void *v); -static struct seq_operations afs_proc_cell_volumes_ops = { +static const struct seq_operations afs_proc_cell_volumes_ops = { .start = afs_proc_cell_volumes_start, .next = afs_proc_cell_volumes_next, .stop = afs_proc_cell_volumes_stop, @@ -95,7 +95,7 @@ static void *afs_proc_cell_vlservers_next(struct seq_file *p, void *v, static void afs_proc_cell_vlservers_stop(struct seq_file *p, void *v); static int afs_proc_cell_vlservers_show(struct seq_file *m, void *v); -static struct seq_operations afs_proc_cell_vlservers_ops = { +static const struct seq_operations afs_proc_cell_vlservers_ops = { .start = afs_proc_cell_vlservers_start, .next = afs_proc_cell_vlservers_next, .stop = afs_proc_cell_vlservers_stop, @@ -119,7 +119,7 @@ static void *afs_proc_cell_servers_next(struct seq_file *p, void *v, static void afs_proc_cell_servers_stop(struct seq_file *p, void *v); static int afs_proc_cell_servers_show(struct seq_file *m, void *v); -static struct seq_operations afs_proc_cell_servers_ops = { +static const struct seq_operations afs_proc_cell_servers_ops = { .start = afs_proc_cell_servers_start, .next = afs_proc_cell_servers_next, .stop = afs_proc_cell_servers_stop, diff --git a/fs/dlm/debug_fs.c b/fs/dlm/debug_fs.c index 1d1d2744223..1c8bb8c3a82 100644 --- a/fs/dlm/debug_fs.c +++ b/fs/dlm/debug_fs.c @@ -386,9 +386,9 @@ static int table_seq_show(struct seq_file *seq, void *iter_ptr) return rv; } -static struct seq_operations format1_seq_ops; -static struct seq_operations format2_seq_ops; -static struct seq_operations format3_seq_ops; +static const struct seq_operations format1_seq_ops; +static const struct seq_operations format2_seq_ops; +static const struct seq_operations format3_seq_ops; static void *table_seq_start(struct seq_file *seq, loff_t *pos) { @@ -534,21 +534,21 @@ static void table_seq_stop(struct seq_file *seq, void *iter_ptr) } } -static struct seq_operations format1_seq_ops = { +static const struct seq_operations format1_seq_ops = { .start = table_seq_start, .next = table_seq_next, .stop = table_seq_stop, .show = table_seq_show, }; -static struct seq_operations format2_seq_ops = { +static const struct seq_operations format2_seq_ops = { .start = table_seq_start, .next = table_seq_next, .stop = table_seq_stop, .show = table_seq_show, }; -static struct seq_operations format3_seq_ops = { +static const struct seq_operations format3_seq_ops = { .start = table_seq_start, .next = table_seq_next, .stop = table_seq_stop, diff --git a/fs/jbd2/journal.c b/fs/jbd2/journal.c index a8a358bc0f2..53b86e16e5f 100644 --- a/fs/jbd2/journal.c +++ b/fs/jbd2/journal.c @@ -768,7 +768,7 @@ static void jbd2_seq_history_stop(struct seq_file *seq, void *v) { } -static struct seq_operations jbd2_seq_history_ops = { +static const struct seq_operations jbd2_seq_history_ops = { .start = jbd2_seq_history_start, .next = jbd2_seq_history_next, .stop = jbd2_seq_history_stop, @@ -872,7 +872,7 @@ static void jbd2_seq_info_stop(struct seq_file *seq, void *v) { } -static struct seq_operations jbd2_seq_info_ops = { +static const struct seq_operations jbd2_seq_info_ops = { .start = jbd2_seq_info_start, .next = jbd2_seq_info_next, .stop = jbd2_seq_info_stop, diff --git a/fs/nfs/client.c b/fs/nfs/client.c index a7ce15d3c24..152025358da 100644 --- a/fs/nfs/client.c +++ b/fs/nfs/client.c @@ -1531,7 +1531,7 @@ static void *nfs_server_list_next(struct seq_file *p, void *v, loff_t *pos); static void nfs_server_list_stop(struct seq_file *p, void *v); static int nfs_server_list_show(struct seq_file *m, void *v); -static struct seq_operations nfs_server_list_ops = { +static const struct seq_operations nfs_server_list_ops = { .start = nfs_server_list_start, .next = nfs_server_list_next, .stop = nfs_server_list_stop, @@ -1552,7 +1552,7 @@ static void *nfs_volume_list_next(struct seq_file *p, void *v, loff_t *pos); static void nfs_volume_list_stop(struct seq_file *p, void *v); static int nfs_volume_list_show(struct seq_file *m, void *v); -static struct seq_operations nfs_volume_list_ops = { +static const struct seq_operations nfs_volume_list_ops = { .start = nfs_volume_list_start, .next = nfs_volume_list_next, .stop = nfs_volume_list_stop, diff --git a/fs/nfsd/export.c b/fs/nfsd/export.c index 984a5ebcc1d..c1c9e035d4a 100644 --- a/fs/nfsd/export.c +++ b/fs/nfsd/export.c @@ -1517,7 +1517,7 @@ static int e_show(struct seq_file *m, void *p) return svc_export_show(m, &svc_export_cache, cp); } -struct seq_operations nfs_exports_op = { +const struct seq_operations nfs_exports_op = { .start = e_start, .next = e_next, .stop = e_stop, diff --git a/fs/ocfs2/cluster/netdebug.c b/fs/ocfs2/cluster/netdebug.c index f8424874fa0..cfb2be708ab 100644 --- a/fs/ocfs2/cluster/netdebug.c +++ b/fs/ocfs2/cluster/netdebug.c @@ -163,7 +163,7 @@ static void nst_seq_stop(struct seq_file *seq, void *v) { } -static struct seq_operations nst_seq_ops = { +static const struct seq_operations nst_seq_ops = { .start = nst_seq_start, .next = nst_seq_next, .stop = nst_seq_stop, @@ -344,7 +344,7 @@ static void sc_seq_stop(struct seq_file *seq, void *v) { } -static struct seq_operations sc_seq_ops = { +static const struct seq_operations sc_seq_ops = { .start = sc_seq_start, .next = sc_seq_next, .stop = sc_seq_stop, diff --git a/fs/ocfs2/dlm/dlmdebug.c b/fs/ocfs2/dlm/dlmdebug.c index df52f706f66..c5c88124096 100644 --- a/fs/ocfs2/dlm/dlmdebug.c +++ b/fs/ocfs2/dlm/dlmdebug.c @@ -683,7 +683,7 @@ static int lockres_seq_show(struct seq_file *s, void *v) return 0; } -static struct seq_operations debug_lockres_ops = { +static const struct seq_operations debug_lockres_ops = { .start = lockres_seq_start, .stop = lockres_seq_stop, .next = lockres_seq_next, diff --git a/fs/proc/nommu.c b/fs/proc/nommu.c index 7e14d1a0400..9fe7d7ebe11 100644 --- a/fs/proc/nommu.c +++ b/fs/proc/nommu.c @@ -109,7 +109,7 @@ static void *nommu_region_list_next(struct seq_file *m, void *v, loff_t *pos) return rb_next((struct rb_node *) v); } -static struct seq_operations proc_nommu_region_list_seqop = { +static const struct seq_operations proc_nommu_region_list_seqop = { .start = nommu_region_list_start, .next = nommu_region_list_next, .stop = nommu_region_list_stop, diff --git a/include/linux/nfsd/nfsd.h b/include/linux/nfsd/nfsd.h index 03bbe903910..510ffdd5020 100644 --- a/include/linux/nfsd/nfsd.h +++ b/include/linux/nfsd/nfsd.h @@ -60,7 +60,7 @@ extern spinlock_t nfsd_drc_lock; extern unsigned int nfsd_drc_max_mem; extern unsigned int nfsd_drc_mem_used; -extern struct seq_operations nfs_exports_op; +extern const struct seq_operations nfs_exports_op; /* * Function prototypes. diff --git a/ipc/util.c b/ipc/util.c index b8e4ba92f6d..79ce84e890f 100644 --- a/ipc/util.c +++ b/ipc/util.c @@ -942,7 +942,7 @@ static int sysvipc_proc_show(struct seq_file *s, void *it) return iface->show(s, it); } -static struct seq_operations sysvipc_proc_seqops = { +static const struct seq_operations sysvipc_proc_seqops = { .start = sysvipc_proc_start, .stop = sysvipc_proc_stop, .next = sysvipc_proc_next, diff --git a/kernel/cgroup.c b/kernel/cgroup.c index 213b7f92fcd..cd83d9933b6 100644 --- a/kernel/cgroup.c +++ b/kernel/cgroup.c @@ -2314,7 +2314,7 @@ static int cgroup_tasks_show(struct seq_file *s, void *v) return seq_printf(s, "%d\n", *(int *)v); } -static struct seq_operations cgroup_tasks_seq_operations = { +static const struct seq_operations cgroup_tasks_seq_operations = { .start = cgroup_tasks_start, .stop = cgroup_tasks_stop, .next = cgroup_tasks_next, diff --git a/kernel/kprobes.c b/kernel/kprobes.c index ef177d653b2..cfadc1291d0 100644 --- a/kernel/kprobes.c +++ b/kernel/kprobes.c @@ -1321,7 +1321,7 @@ static int __kprobes show_kprobe_addr(struct seq_file *pi, void *v) return 0; } -static struct seq_operations kprobes_seq_ops = { +static const struct seq_operations kprobes_seq_ops = { .start = kprobe_seq_start, .next = kprobe_seq_next, .stop = kprobe_seq_stop, diff --git a/kernel/lockdep_proc.c b/kernel/lockdep_proc.c index d4b3dbc79fd..d4aba4f3584 100644 --- a/kernel/lockdep_proc.c +++ b/kernel/lockdep_proc.c @@ -594,7 +594,7 @@ static int ls_show(struct seq_file *m, void *v) return 0; } -static struct seq_operations lockstat_ops = { +static const struct seq_operations lockstat_ops = { .start = ls_start, .next = ls_next, .stop = ls_stop, diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index c71e91bf737..23df7771c93 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c @@ -1520,7 +1520,7 @@ static int t_show(struct seq_file *m, void *v) return 0; } -static struct seq_operations show_ftrace_seq_ops = { +static const struct seq_operations show_ftrace_seq_ops = { .start = t_start, .next = t_next, .stop = t_stop, @@ -2459,7 +2459,7 @@ static int g_show(struct seq_file *m, void *v) return 0; } -static struct seq_operations ftrace_graph_seq_ops = { +static const struct seq_operations ftrace_graph_seq_ops = { .start = g_start, .next = g_next, .stop = g_stop, diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index a35925d222b..6c0f6a8a22e 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c @@ -1949,7 +1949,7 @@ static int s_show(struct seq_file *m, void *v) return 0; } -static struct seq_operations tracer_seq_ops = { +static const struct seq_operations tracer_seq_ops = { .start = s_start, .next = s_next, .stop = s_stop, @@ -2163,7 +2163,7 @@ static int t_show(struct seq_file *m, void *v) return 0; } -static struct seq_operations show_traces_seq_ops = { +static const struct seq_operations show_traces_seq_ops = { .start = t_start, .next = t_next, .stop = t_stop, diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c index 3907510c2ce..090675e269e 100644 --- a/net/ipv6/ip6mr.c +++ b/net/ipv6/ip6mr.c @@ -324,7 +324,7 @@ static int ipmr_mfc_seq_show(struct seq_file *seq, void *v) return 0; } -static struct seq_operations ipmr_mfc_seq_ops = { +static const struct seq_operations ipmr_mfc_seq_ops = { .start = ipmr_mfc_seq_start, .next = ipmr_mfc_seq_next, .stop = ipmr_mfc_seq_stop, diff --git a/security/integrity/ima/ima_fs.c b/security/integrity/ima/ima_fs.c index 6bfc7eaebfd..8e9777b7640 100644 --- a/security/integrity/ima/ima_fs.c +++ b/security/integrity/ima/ima_fs.c @@ -146,7 +146,7 @@ static int ima_measurements_show(struct seq_file *m, void *v) return 0; } -static struct seq_operations ima_measurments_seqops = { +static const struct seq_operations ima_measurments_seqops = { .start = ima_measurements_start, .next = ima_measurements_next, .stop = ima_measurements_stop, @@ -221,7 +221,7 @@ static int ima_ascii_measurements_show(struct seq_file *m, void *v) return 0; } -static struct seq_operations ima_ascii_measurements_seqops = { +static const struct seq_operations ima_ascii_measurements_seqops = { .start = ima_measurements_start, .next = ima_measurements_next, .stop = ima_measurements_stop, diff --git a/security/smack/smackfs.c b/security/smack/smackfs.c index f83a8098072..aeead758509 100644 --- a/security/smack/smackfs.c +++ b/security/smack/smackfs.c @@ -187,7 +187,7 @@ static void load_seq_stop(struct seq_file *s, void *v) /* No-op */ } -static struct seq_operations load_seq_ops = { +static const struct seq_operations load_seq_ops = { .start = load_seq_start, .next = load_seq_next, .show = load_seq_show, @@ -503,7 +503,7 @@ static void cipso_seq_stop(struct seq_file *s, void *v) /* No-op */ } -static struct seq_operations cipso_seq_ops = { +static const struct seq_operations cipso_seq_ops = { .start = cipso_seq_start, .stop = cipso_seq_stop, .next = cipso_seq_next, @@ -697,7 +697,7 @@ static void netlbladdr_seq_stop(struct seq_file *s, void *v) /* No-op */ } -static struct seq_operations netlbladdr_seq_ops = { +static const struct seq_operations netlbladdr_seq_ops = { .start = netlbladdr_seq_start, .stop = netlbladdr_seq_stop, .next = netlbladdr_seq_next, -- cgit v1.2.3-70-g09d2 From 02b51df1b07b4e9ca823c89284e704cadb323cd1 Mon Sep 17 00:00:00 2001 From: Scott James Remnant Date: Tue, 22 Sep 2009 16:43:44 -0700 Subject: proc connector: add event for process becoming session leader The act of a process becoming a session leader is a useful signal to a supervising init daemon such as Upstart. While a daemon will normally do this as part of the process of becoming a daemon, it is rare for its children to do so. When the children do, it is nearly always a sign that the child should be considered detached from the parent and not supervised along with it. The poster-child example is OpenSSH; the per-login children call setsid() so that they may control the pty connected to them. If the primary daemon dies or is restarted, we do not want to consider the per-login children and want to respawn the primary daemon without killing the children. This patch adds a new PROC_SID_EVENT and associated structure to the proc_event event_data union, it arranges for this to be emitted when the special PIDTYPE_SID pid is set. [akpm@linux-foundation.org: coding-style fixes] Signed-off-by: Scott James Remnant Acked-by: Matt Helsley Cc: Oleg Nesterov Cc: Evgeniy Polyakov Acked-by: "David S. Miller" Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- drivers/connector/cn_proc.c | 25 +++++++++++++++++++++++++ include/linux/cn_proc.h | 10 ++++++++++ kernel/exit.c | 4 +++- 3 files changed, 38 insertions(+), 1 deletion(-) (limited to 'include') diff --git a/drivers/connector/cn_proc.c b/drivers/connector/cn_proc.c index 85e5dc0431f..abf4a2529f8 100644 --- a/drivers/connector/cn_proc.c +++ b/drivers/connector/cn_proc.c @@ -139,6 +139,31 @@ void proc_id_connector(struct task_struct *task, int which_id) cn_netlink_send(msg, CN_IDX_PROC, GFP_KERNEL); } +void proc_sid_connector(struct task_struct *task) +{ + struct cn_msg *msg; + struct proc_event *ev; + struct timespec ts; + __u8 buffer[CN_PROC_MSG_SIZE]; + + if (atomic_read(&proc_event_num_listeners) < 1) + return; + + msg = (struct cn_msg *)buffer; + ev = (struct proc_event *)msg->data; + get_seq(&msg->seq, &ev->cpu); + ktime_get_ts(&ts); /* get high res monotonic timestamp */ + put_unaligned(timespec_to_ns(&ts), (__u64 *)&ev->timestamp_ns); + ev->what = PROC_EVENT_SID; + ev->event_data.sid.process_pid = task->pid; + ev->event_data.sid.process_tgid = task->tgid; + + memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id)); + msg->ack = 0; /* not used */ + msg->len = sizeof(*ev); + cn_netlink_send(msg, CN_IDX_PROC, GFP_KERNEL); +} + void proc_exit_connector(struct task_struct *task) { struct cn_msg *msg; diff --git a/include/linux/cn_proc.h b/include/linux/cn_proc.h index b8125b2eb66..47dac5ea8d3 100644 --- a/include/linux/cn_proc.h +++ b/include/linux/cn_proc.h @@ -52,6 +52,7 @@ struct proc_event { PROC_EVENT_EXEC = 0x00000002, PROC_EVENT_UID = 0x00000004, PROC_EVENT_GID = 0x00000040, + PROC_EVENT_SID = 0x00000080, /* "next" should be 0x00000400 */ /* "last" is the last process event: exit */ PROC_EVENT_EXIT = 0x80000000 @@ -89,6 +90,11 @@ struct proc_event { } e; } id; + struct sid_proc_event { + __kernel_pid_t process_pid; + __kernel_pid_t process_tgid; + } sid; + struct exit_proc_event { __kernel_pid_t process_pid; __kernel_pid_t process_tgid; @@ -102,6 +108,7 @@ struct proc_event { void proc_fork_connector(struct task_struct *task); void proc_exec_connector(struct task_struct *task); void proc_id_connector(struct task_struct *task, int which_id); +void proc_sid_connector(struct task_struct *task); void proc_exit_connector(struct task_struct *task); #else static inline void proc_fork_connector(struct task_struct *task) @@ -114,6 +121,9 @@ static inline void proc_id_connector(struct task_struct *task, int which_id) {} +static inline void proc_sid_connector(struct task_struct *task) +{} + static inline void proc_exit_connector(struct task_struct *task) {} #endif /* CONFIG_PROC_EVENTS */ diff --git a/kernel/exit.c b/kernel/exit.c index e47ee8a0613..61bb1761c7b 100644 --- a/kernel/exit.c +++ b/kernel/exit.c @@ -359,8 +359,10 @@ void __set_special_pids(struct pid *pid) { struct task_struct *curr = current->group_leader; - if (task_session(curr) != pid) + if (task_session(curr) != pid) { change_pid(curr, PIDTYPE_SID, pid); + proc_sid_connector(curr); + } if (task_pgrp(curr) != pid) change_pid(curr, PIDTYPE_PGID, pid); -- cgit v1.2.3-70-g09d2 From 70867453092297be9afb2249e712a1f960ec0a09 Mon Sep 17 00:00:00 2001 From: Roland Dreier Date: Tue, 22 Sep 2009 16:43:46 -0700 Subject: printk_once(): use bool for boolean flag Using the type bool (instead of int) for the __print_once flag in the printk_once() macro matches the intent of the code better, and allows the compiler to generate smaller code; eg a typical callsite with gcc 4.3.3 on i386: add/remove: 0/0 grow/shrink: 0/2 up/down: 0/-6 (-6) function old new delta static.__print_once 4 1 -3 get_cpu_vendor 146 143 -3 Saving 6 bytes of object size per callsite by slightly improving the readability of the source seems like a win to me. Signed-off-by: Roland Dreier Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/kernel.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'include') diff --git a/include/linux/kernel.h b/include/linux/kernel.h index 0a2a1908786..55723afa097 100644 --- a/include/linux/kernel.h +++ b/include/linux/kernel.h @@ -252,10 +252,10 @@ extern int printk_delay_msec; * Print a one-time message (analogous to WARN_ONCE() et al): */ #define printk_once(x...) ({ \ - static int __print_once = 1; \ + static bool __print_once = true; \ \ if (__print_once) { \ - __print_once = 0; \ + __print_once = false; \ printk(x); \ } \ }) -- cgit v1.2.3-70-g09d2 From 8c87df457cb58fe75b9b893007917cf8095660a0 Mon Sep 17 00:00:00 2001 From: Jan Beulich Date: Tue, 22 Sep 2009 16:43:52 -0700 Subject: BUILD_BUG_ON(): fix it and a couple of bogus uses of it gcc permitting variable length arrays makes the current construct used for BUILD_BUG_ON() useless, as that doesn't produce any diagnostic if the controlling expression isn't really constant. Instead, this patch makes it so that a bit field gets used here. Consequently, those uses where the condition isn't really constant now also need fixing. Note that in the gfp.h, kmemcheck.h, and virtio_config.h cases MAYBE_BUILD_BUG_ON() really just serves documentation purposes - even if the expression is compile time constant (__builtin_constant_p() yields true), the array is still deemed of variable length by gcc, and hence the whole expression doesn't have the intended effect. [akpm@linux-foundation.org: make arch/sparc/include/asm/vio.h compile] [akpm@linux-foundation.org: more nonsensical assertions in tpm.c..] Signed-off-by: Jan Beulich Cc: Andi Kleen Cc: Rusty Russell Cc: Catalin Marinas Cc: "David S. Miller" Cc: Rajiv Andrade Cc: Mimi Zohar Cc: James Morris Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/sparc/include/asm/vio.h | 2 +- drivers/char/tpm/tpm.c | 4 ++-- drivers/net/niu.c | 2 +- include/linux/gfp.h | 2 +- include/linux/kernel.h | 8 ++++++-- include/linux/kmemcheck.h | 2 +- include/linux/virtio_config.h | 3 +-- 7 files changed, 13 insertions(+), 10 deletions(-) (limited to 'include') diff --git a/arch/sparc/include/asm/vio.h b/arch/sparc/include/asm/vio.h index d4de32f0f8a..6cdbf7e7351 100644 --- a/arch/sparc/include/asm/vio.h +++ b/arch/sparc/include/asm/vio.h @@ -258,7 +258,7 @@ static inline void *vio_dring_entry(struct vio_dring_state *dr, static inline u32 vio_dring_avail(struct vio_dring_state *dr, unsigned int ring_size) { - BUILD_BUG_ON(!is_power_of_2(ring_size)); + MAYBE_BUILD_BUG_ON(!is_power_of_2(ring_size)); return (dr->pending - ((dr->prod - dr->cons) & (ring_size - 1))); diff --git a/drivers/char/tpm/tpm.c b/drivers/char/tpm/tpm.c index b0603b2e568..32b957efa42 100644 --- a/drivers/char/tpm/tpm.c +++ b/drivers/char/tpm/tpm.c @@ -696,7 +696,7 @@ int __tpm_pcr_read(struct tpm_chip *chip, int pcr_idx, u8 *res_buf) cmd.header.in = pcrread_header; cmd.params.pcrread_in.pcr_idx = cpu_to_be32(pcr_idx); - BUILD_BUG_ON(cmd.header.in.length > READ_PCR_RESULT_SIZE); + BUG_ON(cmd.header.in.length > READ_PCR_RESULT_SIZE); rc = transmit_cmd(chip, &cmd, cmd.header.in.length, "attempting to read a pcr value"); @@ -760,7 +760,7 @@ int tpm_pcr_extend(u32 chip_num, int pcr_idx, const u8 *hash) return -ENODEV; cmd.header.in = pcrextend_header; - BUILD_BUG_ON(be32_to_cpu(cmd.header.in.length) > EXTEND_PCR_SIZE); + BUG_ON(be32_to_cpu(cmd.header.in.length) > EXTEND_PCR_SIZE); cmd.params.pcrextend_in.pcr_idx = cpu_to_be32(pcr_idx); memcpy(cmd.params.pcrextend_in.hash, hash, TPM_DIGEST_SIZE); rc = transmit_cmd(chip, &cmd, cmd.header.in.length, diff --git a/drivers/net/niu.c b/drivers/net/niu.c index 76cc2614f48..f9364d0678f 100644 --- a/drivers/net/niu.c +++ b/drivers/net/niu.c @@ -5615,7 +5615,7 @@ static void niu_init_tx_mac(struct niu *np) /* The XMAC_MIN register only accepts values for TX min which * have the low 3 bits cleared. */ - BUILD_BUG_ON(min & 0x7); + BUG_ON(min & 0x7); if (np->flags & NIU_FLAGS_XMAC) niu_init_tx_xmac(np, min, max); diff --git a/include/linux/gfp.h b/include/linux/gfp.h index f53e9b868c2..557bdad320b 100644 --- a/include/linux/gfp.h +++ b/include/linux/gfp.h @@ -220,7 +220,7 @@ static inline enum zone_type gfp_zone(gfp_t flags) ((1 << ZONES_SHIFT) - 1); if (__builtin_constant_p(bit)) - BUILD_BUG_ON((GFP_ZONE_BAD >> bit) & 1); + MAYBE_BUILD_BUG_ON((GFP_ZONE_BAD >> bit) & 1); else { #ifdef CONFIG_DEBUG_VM BUG_ON((GFP_ZONE_BAD >> bit) & 1); diff --git a/include/linux/kernel.h b/include/linux/kernel.h index 55723afa097..63dcaece1ac 100644 --- a/include/linux/kernel.h +++ b/include/linux/kernel.h @@ -678,13 +678,17 @@ struct sysinfo { }; /* Force a compilation error if condition is true */ -#define BUILD_BUG_ON(condition) ((void)sizeof(char[1 - 2*!!(condition)])) +#define BUILD_BUG_ON(condition) ((void)BUILD_BUG_ON_ZERO(condition)) + +/* Force a compilation error if condition is constant and true */ +#define MAYBE_BUILD_BUG_ON(cond) ((void)sizeof(char[1 - 2 * !!(cond)])) /* Force a compilation error if condition is true, but also produce a result (of value 0 and type size_t), so the expression can be used e.g. in a structure initializer (or where-ever else comma expressions aren't permitted). */ -#define BUILD_BUG_ON_ZERO(e) (sizeof(char[1 - 2 * !!(e)]) - 1) +#define BUILD_BUG_ON_ZERO(e) (sizeof(struct { int:-!!(e); })) +#define BUILD_BUG_ON_NULL(e) ((void *)sizeof(struct { int:-!!(e); })) /* Trap pasters of __FUNCTION__ at compile-time */ #define __FUNCTION__ (__func__) diff --git a/include/linux/kmemcheck.h b/include/linux/kmemcheck.h index 136cdcdf92e..e880d4cf9e2 100644 --- a/include/linux/kmemcheck.h +++ b/include/linux/kmemcheck.h @@ -152,7 +152,7 @@ static inline bool kmemcheck_is_obj_initialized(unsigned long addr, size_t size) \ _n = (long) &((ptr)->name##_end) \ - (long) &((ptr)->name##_begin); \ - BUILD_BUG_ON(_n < 0); \ + MAYBE_BUILD_BUG_ON(_n < 0); \ \ kmemcheck_mark_initialized(&((ptr)->name##_begin), _n); \ } while (0) diff --git a/include/linux/virtio_config.h b/include/linux/virtio_config.h index e547e3c8ee9..0093dd7c1d6 100644 --- a/include/linux/virtio_config.h +++ b/include/linux/virtio_config.h @@ -109,8 +109,7 @@ static inline bool virtio_has_feature(const struct virtio_device *vdev, unsigned int fbit) { /* Did you forget to fix assumptions on max features? */ - if (__builtin_constant_p(fbit)) - BUILD_BUG_ON(fbit >= 32); + MAYBE_BUILD_BUG_ON(fbit >= 32); if (fbit < VIRTIO_TRANSPORT_F_START) virtio_check_driver_offered_feature(vdev, fbit); -- cgit v1.2.3-70-g09d2 From 562787a5c32ccdf182de27793a83a9f2ee86cd77 Mon Sep 17 00:00:00 2001 From: Davide Libenzi Date: Tue, 22 Sep 2009 16:43:57 -0700 Subject: anonfd: split interface into file creation and install Split the anonfd interface into a bare file pointer creation one, and a file pointer creation plus install one. There are cases, like the usage of eventfds inside other kernel interfaces, where the file pointer created by anonfd needs to be used inside the initialization of other structures. As it is right now, as soon as anon_inode_getfd() returns, the kenrle can race with userspace closing the newly installed file descriptor. This patch, while keeping the old anon_inode_getfd(), introduces a new anon_inode_getfile() (whose services are reused in anon_inode_getfd()) that allows to split the file creation phase and the fd install one. Once all the kernel structures are initialized, the code can call the proper fd_install(). Gregory manifested the need for something like this inside KVM. Signed-off-by: Davide Libenzi Cc: Alexander Viro Cc: James Morris Cc: Peter Zijlstra Cc: Gregory Haskins Acked-by: Serge Hallyn Acked-by: Roland Dreier Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- fs/anon_inodes.c | 68 +++++++++++++++++++++++++++++++++------------ fs/eventfd.c | 67 +++++++++++++++++++++++++++++++++++--------- include/linux/anon_inodes.h | 3 ++ include/linux/eventfd.h | 6 ++++ 4 files changed, 114 insertions(+), 30 deletions(-) (limited to 'include') diff --git a/fs/anon_inodes.c b/fs/anon_inodes.c index 47d4a01c539..d11c51fc2a3 100644 --- a/fs/anon_inodes.c +++ b/fs/anon_inodes.c @@ -77,28 +77,24 @@ static const struct address_space_operations anon_aops = { * * Creates a new file by hooking it on a single inode. This is useful for files * that do not need to have a full-fledged inode in order to operate correctly. - * All the files created with anon_inode_getfd() will share a single inode, + * All the files created with anon_inode_getfile() will share a single inode, * hence saving memory and avoiding code duplication for the file/inode/dentry - * setup. Returns new descriptor or -error. + * setup. Returns the newly created file* or an error pointer. */ -int anon_inode_getfd(const char *name, const struct file_operations *fops, - void *priv, int flags) +struct file *anon_inode_getfile(const char *name, + const struct file_operations *fops, + void *priv, int flags) { struct qstr this; struct dentry *dentry; struct file *file; - int error, fd; + int error; if (IS_ERR(anon_inode_inode)) - return -ENODEV; + return ERR_PTR(-ENODEV); if (fops->owner && !try_module_get(fops->owner)) - return -ENOENT; - - error = get_unused_fd_flags(flags); - if (error < 0) - goto err_module; - fd = error; + return ERR_PTR(-ENOENT); /* * Link the inode to a directory entry by creating a unique name @@ -110,7 +106,7 @@ int anon_inode_getfd(const char *name, const struct file_operations *fops, this.hash = 0; dentry = d_alloc(anon_inode_mnt->mnt_sb->s_root, &this); if (!dentry) - goto err_put_unused_fd; + goto err_module; /* * We know the anon_inode inode count is always greater than zero, @@ -136,16 +132,54 @@ int anon_inode_getfd(const char *name, const struct file_operations *fops, file->f_version = 0; file->private_data = priv; + return file; + +err_dput: + dput(dentry); +err_module: + module_put(fops->owner); + return ERR_PTR(error); +} +EXPORT_SYMBOL_GPL(anon_inode_getfile); + +/** + * anon_inode_getfd - creates a new file instance by hooking it up to an + * anonymous inode, and a dentry that describe the "class" + * of the file + * + * @name: [in] name of the "class" of the new file + * @fops: [in] file operations for the new file + * @priv: [in] private data for the new file (will be file's private_data) + * @flags: [in] flags + * + * Creates a new file by hooking it on a single inode. This is useful for files + * that do not need to have a full-fledged inode in order to operate correctly. + * All the files created with anon_inode_getfd() will share a single inode, + * hence saving memory and avoiding code duplication for the file/inode/dentry + * setup. Returns new descriptor or an error code. + */ +int anon_inode_getfd(const char *name, const struct file_operations *fops, + void *priv, int flags) +{ + int error, fd; + struct file *file; + + error = get_unused_fd_flags(flags); + if (error < 0) + return error; + fd = error; + + file = anon_inode_getfile(name, fops, priv, flags); + if (IS_ERR(file)) { + error = PTR_ERR(file); + goto err_put_unused_fd; + } fd_install(fd, file); return fd; -err_dput: - dput(dentry); err_put_unused_fd: put_unused_fd(fd); -err_module: - module_put(fops->owner); return error; } EXPORT_SYMBOL_GPL(anon_inode_getfd); diff --git a/fs/eventfd.c b/fs/eventfd.c index 31d12de83a2..8b47e4200e6 100644 --- a/fs/eventfd.c +++ b/fs/eventfd.c @@ -68,11 +68,16 @@ int eventfd_signal(struct eventfd_ctx *ctx, int n) } EXPORT_SYMBOL_GPL(eventfd_signal); +static void eventfd_free_ctx(struct eventfd_ctx *ctx) +{ + kfree(ctx); +} + static void eventfd_free(struct kref *kref) { struct eventfd_ctx *ctx = container_of(kref, struct eventfd_ctx, kref); - kfree(ctx); + eventfd_free_ctx(ctx); } /** @@ -298,9 +303,23 @@ struct eventfd_ctx *eventfd_ctx_fileget(struct file *file) } EXPORT_SYMBOL_GPL(eventfd_ctx_fileget); -SYSCALL_DEFINE2(eventfd2, unsigned int, count, int, flags) +/** + * eventfd_file_create - Creates an eventfd file pointer. + * @count: Initial eventfd counter value. + * @flags: Flags for the eventfd file. + * + * This function creates an eventfd file pointer, w/out installing it into + * the fd table. This is useful when the eventfd file is used during the + * initialization of data structures that require extra setup after the eventfd + * creation. So the eventfd creation is split into the file pointer creation + * phase, and the file descriptor installation phase. + * In this way races with userspace closing the newly installed file descriptor + * can be avoided. + * Returns an eventfd file pointer, or a proper error pointer. + */ +struct file *eventfd_file_create(unsigned int count, int flags) { - int fd; + struct file *file; struct eventfd_ctx *ctx; /* Check the EFD_* constants for consistency. */ @@ -308,26 +327,48 @@ SYSCALL_DEFINE2(eventfd2, unsigned int, count, int, flags) BUILD_BUG_ON(EFD_NONBLOCK != O_NONBLOCK); if (flags & ~EFD_FLAGS_SET) - return -EINVAL; + return ERR_PTR(-EINVAL); ctx = kmalloc(sizeof(*ctx), GFP_KERNEL); if (!ctx) - return -ENOMEM; + return ERR_PTR(-ENOMEM); kref_init(&ctx->kref); init_waitqueue_head(&ctx->wqh); ctx->count = count; ctx->flags = flags; - /* - * When we call this, the initialization must be complete, since - * anon_inode_getfd() will install the fd. - */ - fd = anon_inode_getfd("[eventfd]", &eventfd_fops, ctx, - flags & EFD_SHARED_FCNTL_FLAGS); - if (fd < 0) - kfree(ctx); + file = anon_inode_getfile("[eventfd]", &eventfd_fops, ctx, + flags & EFD_SHARED_FCNTL_FLAGS); + if (IS_ERR(file)) + eventfd_free_ctx(ctx); + + return file; +} + +SYSCALL_DEFINE2(eventfd2, unsigned int, count, int, flags) +{ + int fd, error; + struct file *file; + + error = get_unused_fd_flags(flags & EFD_SHARED_FCNTL_FLAGS); + if (error < 0) + return error; + fd = error; + + file = eventfd_file_create(count, flags); + if (IS_ERR(file)) { + error = PTR_ERR(file); + goto err_put_unused_fd; + } + fd_install(fd, file); + return fd; + +err_put_unused_fd: + put_unused_fd(fd); + + return error; } SYSCALL_DEFINE1(eventfd, unsigned int, count) diff --git a/include/linux/anon_inodes.h b/include/linux/anon_inodes.h index e0a0cdc2da4..69a21e0ebd3 100644 --- a/include/linux/anon_inodes.h +++ b/include/linux/anon_inodes.h @@ -8,6 +8,9 @@ #ifndef _LINUX_ANON_INODES_H #define _LINUX_ANON_INODES_H +struct file *anon_inode_getfile(const char *name, + const struct file_operations *fops, + void *priv, int flags); int anon_inode_getfd(const char *name, const struct file_operations *fops, void *priv, int flags); diff --git a/include/linux/eventfd.h b/include/linux/eventfd.h index 3b85ba6479f..94dd10366a7 100644 --- a/include/linux/eventfd.h +++ b/include/linux/eventfd.h @@ -27,6 +27,7 @@ #ifdef CONFIG_EVENTFD +struct file *eventfd_file_create(unsigned int count, int flags); struct eventfd_ctx *eventfd_ctx_get(struct eventfd_ctx *ctx); void eventfd_ctx_put(struct eventfd_ctx *ctx); struct file *eventfd_fget(int fd); @@ -40,6 +41,11 @@ int eventfd_signal(struct eventfd_ctx *ctx, int n); * Ugly ugly ugly error layer to support modules that uses eventfd but * pretend to work in !CONFIG_EVENTFD configurations. Namely, AIO. */ +static inline struct file *eventfd_file_create(unsigned int count, int flags) +{ + return ERR_PTR(-ENOSYS); +} + static inline struct eventfd_ctx *eventfd_ctx_fdget(int fd) { return ERR_PTR(-ENOSYS); -- cgit v1.2.3-70-g09d2 From a49c59c042c63b432307c1bbf7dac5a104c786e6 Mon Sep 17 00:00:00 2001 From: Rolf Eike Beer Date: Tue, 22 Sep 2009 16:44:03 -0700 Subject: Make sure the value in abs() does not get truncated if it is greater than 2^32 abs() will truncate the input if is it outside the 2^32 range. Fix that by assuming `long' input. This might generate worse code in the common case. Signed-off-by: Rolf Eike Beer Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/kernel.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include') diff --git a/include/linux/kernel.h b/include/linux/kernel.h index 63dcaece1ac..d3cd23f3003 100644 --- a/include/linux/kernel.h +++ b/include/linux/kernel.h @@ -146,7 +146,7 @@ extern int _cond_resched(void); #define might_sleep_if(cond) do { if (cond) might_sleep(); } while (0) #define abs(x) ({ \ - int __x = (x); \ + long __x = (x); \ (__x < 0) ? -__x : __x; \ }) -- cgit v1.2.3-70-g09d2 From b28cfd2c0616e1b42acc6ee3c77ef6cc3873c510 Mon Sep 17 00:00:00 2001 From: Andi Kleen Date: Tue, 22 Sep 2009 16:44:05 -0700 Subject: kmap_types.h: rename D macro I tend to use a 'D' debugging macro a lot during debugging. When I define it before includes I often get conflicts with kmap_types.h's use of 'D' too. It's not very nice when a global include pollutes the name space like this. Rename the kmap_types.h D to KMAP_D. It is only used temporarily in the header so has no effect on anything else. Signed-off-by: Andi Kleen Reviewed-by: WANG Cong Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/asm-generic/kmap_types.h | 47 ++++++++++++++++++++-------------------- 1 file changed, 24 insertions(+), 23 deletions(-) (limited to 'include') diff --git a/include/asm-generic/kmap_types.h b/include/asm-generic/kmap_types.h index eddbce0f9fb..e5f234a0854 100644 --- a/include/asm-generic/kmap_types.h +++ b/include/asm-generic/kmap_types.h @@ -2,34 +2,35 @@ #define _ASM_GENERIC_KMAP_TYPES_H #ifdef __WITH_KM_FENCE -# define D(n) __KM_FENCE_##n , +# define KMAP_D(n) __KM_FENCE_##n , #else -# define D(n) +# define KMAP_D(n) #endif enum km_type { -D(0) KM_BOUNCE_READ, -D(1) KM_SKB_SUNRPC_DATA, -D(2) KM_SKB_DATA_SOFTIRQ, -D(3) KM_USER0, -D(4) KM_USER1, -D(5) KM_BIO_SRC_IRQ, -D(6) KM_BIO_DST_IRQ, -D(7) KM_PTE0, -D(8) KM_PTE1, -D(9) KM_IRQ0, -D(10) KM_IRQ1, -D(11) KM_SOFTIRQ0, -D(12) KM_SOFTIRQ1, -D(13) KM_SYNC_ICACHE, -D(14) KM_SYNC_DCACHE, -D(15) KM_UML_USERCOPY, /* UML specific, for copy_*_user - used in do_op_one_page */ -D(16) KM_IRQ_PTE, -D(17) KM_NMI, -D(18) KM_NMI_PTE, -D(19) KM_TYPE_NR +KMAP_D(0) KM_BOUNCE_READ, +KMAP_D(1) KM_SKB_SUNRPC_DATA, +KMAP_D(2) KM_SKB_DATA_SOFTIRQ, +KMAP_D(3) KM_USER0, +KMAP_D(4) KM_USER1, +KMAP_D(5) KM_BIO_SRC_IRQ, +KMAP_D(6) KM_BIO_DST_IRQ, +KMAP_D(7) KM_PTE0, +KMAP_D(8) KM_PTE1, +KMAP_D(9) KM_IRQ0, +KMAP_D(10) KM_IRQ1, +KMAP_D(11) KM_SOFTIRQ0, +KMAP_D(12) KM_SOFTIRQ1, +KMAP_D(13) KM_SYNC_ICACHE, +KMAP_D(14) KM_SYNC_DCACHE, +/* UML specific, for copy_*_user - used in do_op_one_page */ +KMAP_D(15) KM_UML_USERCOPY, +KMAP_D(16) KM_IRQ_PTE, +KMAP_D(17) KM_NMI, +KMAP_D(18) KM_NMI_PTE, +KMAP_D(19) KM_TYPE_NR }; -#undef D +#undef KMAP_D #endif -- cgit v1.2.3-70-g09d2 From 1f10206cf8e945220f7220a809d8bfc15c21f9a5 Mon Sep 17 00:00:00 2001 From: Jiri Pirko Date: Tue, 22 Sep 2009 16:44:10 -0700 Subject: getrusage: fill ru_maxrss value Make ->ru_maxrss value in struct rusage filled accordingly to rss hiwater mark. This struct is filled as a parameter to getrusage syscall. ->ru_maxrss value is set to KBs which is the way it is done in BSD systems. /usr/bin/time (gnu time) application converts ->ru_maxrss to KBs which seems to be incorrect behavior. Maintainer of this util was notified by me with the patch which corrects it and cc'ed. To make this happen we extend struct signal_struct by two fields. The first one is ->maxrss which we use to store rss hiwater of the task. The second one is ->cmaxrss which we use to store highest rss hiwater of all task childs. These values are used in k_getrusage() to actually fill ->ru_maxrss. k_getrusage() uses current rss hiwater value directly if mm struct exists. Note: exec() clear mm->hiwater_rss, but doesn't clear sig->maxrss. it is intetionally behavior. *BSD getrusage have exec() inheriting. test programs ======================================================== getrusage.c =========== #include #include #include #include #include #include #include #include #include #include #include #include "common.h" #define err(str) perror(str), exit(1) int main(int argc, char** argv) { int status; printf("allocate 100MB\n"); consume(100); printf("testcase1: fork inherit? \n"); printf(" expect: initial.self ~= child.self\n"); show_rusage("initial"); if (__fork()) { wait(&status); } else { show_rusage("fork child"); _exit(0); } printf("\n"); printf("testcase2: fork inherit? (cont.) \n"); printf(" expect: initial.children ~= 100MB, but child.children = 0\n"); show_rusage("initial"); if (__fork()) { wait(&status); } else { show_rusage("child"); _exit(0); } printf("\n"); printf("testcase3: fork + malloc \n"); printf(" expect: child.self ~= initial.self + 50MB\n"); show_rusage("initial"); if (__fork()) { wait(&status); } else { printf("allocate +50MB\n"); consume(50); show_rusage("fork child"); _exit(0); } printf("\n"); printf("testcase4: grandchild maxrss\n"); printf(" expect: post_wait.children ~= 300MB\n"); show_rusage("initial"); if (__fork()) { wait(&status); show_rusage("post_wait"); } else { system("./child -n 0 -g 300"); _exit(0); } printf("\n"); printf("testcase5: zombie\n"); printf(" expect: pre_wait ~= initial, IOW the zombie process is not accounted.\n"); printf(" post_wait ~= 400MB, IOW wait() collect child's max_rss. \n"); show_rusage("initial"); if (__fork()) { sleep(1); /* children become zombie */ show_rusage("pre_wait"); wait(&status); show_rusage("post_wait"); } else { system("./child -n 400"); _exit(0); } printf("\n"); printf("testcase6: SIG_IGN\n"); printf(" expect: initial ~= after_zombie (child's 500MB alloc should be ignored).\n"); show_rusage("initial"); signal(SIGCHLD, SIG_IGN); if (__fork()) { sleep(1); /* children become zombie */ show_rusage("after_zombie"); } else { system("./child -n 500"); _exit(0); } printf("\n"); signal(SIGCHLD, SIG_DFL); printf("testcase7: exec (without fork) \n"); printf(" expect: initial ~= exec \n"); show_rusage("initial"); execl("./child", "child", "-v", NULL); return 0; } child.c ======= #include #include #include #include #include #include #include #include #include #include #include "common.h" int main(int argc, char** argv) { int status; int c; long consume_size = 0; long grandchild_consume_size = 0; int show = 0; while ((c = getopt(argc, argv, "n:g:v")) != -1) { switch (c) { case 'n': consume_size = atol(optarg); break; case 'v': show = 1; break; case 'g': grandchild_consume_size = atol(optarg); break; default: break; } } if (show) show_rusage("exec"); if (consume_size) { printf("child alloc %ldMB\n", consume_size); consume(consume_size); } if (grandchild_consume_size) { if (fork()) { wait(&status); } else { printf("grandchild alloc %ldMB\n", grandchild_consume_size); consume(grandchild_consume_size); exit(0); } } return 0; } common.c ======== #include #include #include #include #include #include #include #include #include #include #include #include "common.h" #define err(str) perror(str), exit(1) void show_rusage(char *prefix) { int err, err2; struct rusage rusage_self; struct rusage rusage_children; printf("%s: ", prefix); err = getrusage(RUSAGE_SELF, &rusage_self); if (!err) printf("self %ld ", rusage_self.ru_maxrss); err2 = getrusage(RUSAGE_CHILDREN, &rusage_children); if (!err2) printf("children %ld ", rusage_children.ru_maxrss); printf("\n"); } /* Some buggy OS need this worthless CPU waste. */ void make_pagefault(void) { void *addr; int size = getpagesize(); int i; for (i=0; i<1000; i++) { addr = mmap(NULL, size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON, -1, 0); if (addr == MAP_FAILED) err("make_pagefault"); memset(addr, 0, size); munmap(addr, size); } } void consume(int mega) { size_t sz = mega * 1024 * 1024; void *ptr; ptr = malloc(sz); memset(ptr, 0, sz); make_pagefault(); } pid_t __fork(void) { pid_t pid; pid = fork(); make_pagefault(); return pid; } common.h ======== void show_rusage(char *prefix); void make_pagefault(void); void consume(int mega); pid_t __fork(void); FreeBSD result (expected result) ======================================================== allocate 100MB testcase1: fork inherit? expect: initial.self ~= child.self initial: self 103492 children 0 fork child: self 103540 children 0 testcase2: fork inherit? (cont.) expect: initial.children ~= 100MB, but child.children = 0 initial: self 103540 children 103540 child: self 103564 children 0 testcase3: fork + malloc expect: child.self ~= initial.self + 50MB initial: self 103564 children 103564 allocate +50MB fork child: self 154860 children 0 testcase4: grandchild maxrss expect: post_wait.children ~= 300MB initial: self 103564 children 154860 grandchild alloc 300MB post_wait: self 103564 children 308720 testcase5: zombie expect: pre_wait ~= initial, IOW the zombie process is not accounted. post_wait ~= 400MB, IOW wait() collect child's max_rss. initial: self 103564 children 308720 child alloc 400MB pre_wait: self 103564 children 308720 post_wait: self 103564 children 411312 testcase6: SIG_IGN expect: initial ~= after_zombie (child's 500MB alloc should be ignored). initial: self 103564 children 411312 child alloc 500MB after_zombie: self 103624 children 411312 testcase7: exec (without fork) expect: initial ~= exec initial: self 103624 children 411312 exec: self 103624 children 411312 Linux result (actual test result) ======================================================== allocate 100MB testcase1: fork inherit? expect: initial.self ~= child.self initial: self 102848 children 0 fork child: self 102572 children 0 testcase2: fork inherit? (cont.) expect: initial.children ~= 100MB, but child.children = 0 initial: self 102876 children 102644 child: self 102572 children 0 testcase3: fork + malloc expect: child.self ~= initial.self + 50MB initial: self 102876 children 102644 allocate +50MB fork child: self 153804 children 0 testcase4: grandchild maxrss expect: post_wait.children ~= 300MB initial: self 102876 children 153864 grandchild alloc 300MB post_wait: self 102876 children 307536 testcase5: zombie expect: pre_wait ~= initial, IOW the zombie process is not accounted. post_wait ~= 400MB, IOW wait() collect child's max_rss. initial: self 102876 children 307536 child alloc 400MB pre_wait: self 102876 children 307536 post_wait: self 102876 children 410076 testcase6: SIG_IGN expect: initial ~= after_zombie (child's 500MB alloc should be ignored). initial: self 102876 children 410076 child alloc 500MB after_zombie: self 102880 children 410076 testcase7: exec (without fork) expect: initial ~= exec initial: self 102880 children 410076 exec: self 102880 children 410076 Signed-off-by: Jiri Pirko Signed-off-by: KOSAKI Motohiro Cc: Oleg Nesterov Cc: Hugh Dickins Cc: Ingo Molnar Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- fs/exec.c | 3 +++ include/linux/sched.h | 10 ++++++++++ kernel/exit.c | 6 ++++++ kernel/fork.c | 1 + kernel/sys.c | 14 ++++++++++++++ 5 files changed, 34 insertions(+) (limited to 'include') diff --git a/fs/exec.c b/fs/exec.c index 434dba778cc..69bb9d89979 100644 --- a/fs/exec.c +++ b/fs/exec.c @@ -845,6 +845,9 @@ static int de_thread(struct task_struct *tsk) sig->notify_count = 0; no_thread_group: + if (current->mm) + setmax_mm_hiwater_rss(&sig->maxrss, current->mm); + exit_itimers(sig); flush_itimer_signals(); diff --git a/include/linux/sched.h b/include/linux/sched.h index 97b10da0a3e..6448bbc6406 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -426,6 +426,15 @@ static inline unsigned long get_mm_hiwater_rss(struct mm_struct *mm) return max(mm->hiwater_rss, get_mm_rss(mm)); } +static inline void setmax_mm_hiwater_rss(unsigned long *maxrss, + struct mm_struct *mm) +{ + unsigned long hiwater_rss = get_mm_hiwater_rss(mm); + + if (*maxrss < hiwater_rss) + *maxrss = hiwater_rss; +} + static inline unsigned long get_mm_hiwater_vm(struct mm_struct *mm) { return max(mm->hiwater_vm, mm->total_vm); @@ -612,6 +621,7 @@ struct signal_struct { unsigned long nvcsw, nivcsw, cnvcsw, cnivcsw; unsigned long min_flt, maj_flt, cmin_flt, cmaj_flt; unsigned long inblock, oublock, cinblock, coublock; + unsigned long maxrss, cmaxrss; struct task_io_accounting ioac; /* diff --git a/kernel/exit.c b/kernel/exit.c index 61bb1761c7b..60d6fdcc926 100644 --- a/kernel/exit.c +++ b/kernel/exit.c @@ -947,6 +947,8 @@ NORET_TYPE void do_exit(long code) if (group_dead) { hrtimer_cancel(&tsk->signal->real_timer); exit_itimers(tsk->signal); + if (tsk->mm) + setmax_mm_hiwater_rss(&tsk->signal->maxrss, tsk->mm); } acct_collect(code, group_dead); if (group_dead) @@ -1210,6 +1212,7 @@ static int wait_task_zombie(struct wait_opts *wo, struct task_struct *p) if (likely(!traced) && likely(!task_detached(p))) { struct signal_struct *psig; struct signal_struct *sig; + unsigned long maxrss; /* * The resource counters for the group leader are in its @@ -1258,6 +1261,9 @@ static int wait_task_zombie(struct wait_opts *wo, struct task_struct *p) psig->coublock += task_io_get_oublock(p) + sig->oublock + sig->coublock; + maxrss = max(sig->maxrss, sig->cmaxrss); + if (psig->cmaxrss < maxrss) + psig->cmaxrss = maxrss; task_io_accounting_add(&psig->ioac, &p->ioac); task_io_accounting_add(&psig->ioac, &sig->ioac); spin_unlock_irq(&p->real_parent->sighand->siglock); diff --git a/kernel/fork.c b/kernel/fork.c index 1020977b57c..7cf45812ce8 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -866,6 +866,7 @@ static int copy_signal(unsigned long clone_flags, struct task_struct *tsk) sig->nvcsw = sig->nivcsw = sig->cnvcsw = sig->cnivcsw = 0; sig->min_flt = sig->maj_flt = sig->cmin_flt = sig->cmaj_flt = 0; sig->inblock = sig->oublock = sig->cinblock = sig->coublock = 0; + sig->maxrss = sig->cmaxrss = 0; task_io_accounting_init(&sig->ioac); sig->sum_sched_runtime = 0; taskstats_tgid_init(sig); diff --git a/kernel/sys.c b/kernel/sys.c index ea5c3bcac88..ebcb1561172 100644 --- a/kernel/sys.c +++ b/kernel/sys.c @@ -1338,6 +1338,7 @@ static void k_getrusage(struct task_struct *p, int who, struct rusage *r) unsigned long flags; cputime_t utime, stime; struct task_cputime cputime; + unsigned long maxrss = 0; memset((char *) r, 0, sizeof *r); utime = stime = cputime_zero; @@ -1346,6 +1347,7 @@ static void k_getrusage(struct task_struct *p, int who, struct rusage *r) utime = task_utime(current); stime = task_stime(current); accumulate_thread_rusage(p, r); + maxrss = p->signal->maxrss; goto out; } @@ -1363,6 +1365,7 @@ static void k_getrusage(struct task_struct *p, int who, struct rusage *r) r->ru_majflt = p->signal->cmaj_flt; r->ru_inblock = p->signal->cinblock; r->ru_oublock = p->signal->coublock; + maxrss = p->signal->cmaxrss; if (who == RUSAGE_CHILDREN) break; @@ -1377,6 +1380,8 @@ static void k_getrusage(struct task_struct *p, int who, struct rusage *r) r->ru_majflt += p->signal->maj_flt; r->ru_inblock += p->signal->inblock; r->ru_oublock += p->signal->oublock; + if (maxrss < p->signal->maxrss) + maxrss = p->signal->maxrss; t = p; do { accumulate_thread_rusage(t, r); @@ -1392,6 +1397,15 @@ static void k_getrusage(struct task_struct *p, int who, struct rusage *r) out: cputime_to_timeval(utime, &r->ru_utime); cputime_to_timeval(stime, &r->ru_stime); + + if (who != RUSAGE_CHILDREN) { + struct mm_struct *mm = get_task_mm(p); + if (mm) { + setmax_mm_hiwater_rss(&maxrss, mm); + mmput(mm); + } + } + r->ru_maxrss = maxrss * (PAGE_SIZE / 1024); /* convert pages to KBs */ } int getrusage(struct task_struct *p, int who, struct rusage __user *ru) -- cgit v1.2.3-70-g09d2 From 00afe029aab03bd95eba210b5e74a252017c4692 Mon Sep 17 00:00:00 2001 From: Mike Frysinger Date: Tue, 22 Sep 2009 16:44:14 -0700 Subject: asm/sections: add text/data checking functions for arches to override Some ports (like the Blackfin arch) have a discontiguous memory map which means there may be text or data that falls outside of the standard range of the start/end text/data symbols. Creating some helper functions allows these non-standard ports to declare these regions without adversely affecting anyone else. Signed-off-by: Mike Frysinger Cc: Ingo Molnar Cc: Robin Getz Cc: Sam Ravnborg Cc: Peter Zijlstra Cc: Arnd Bergmann Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/asm-generic/sections.h | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) (limited to 'include') diff --git a/include/asm-generic/sections.h b/include/asm-generic/sections.h index d083561337f..b3bfabc258f 100644 --- a/include/asm-generic/sections.h +++ b/include/asm-generic/sections.h @@ -23,4 +23,20 @@ extern char __ctors_start[], __ctors_end[]; #define dereference_function_descriptor(p) (p) #endif +/* random extra sections (if any). Override + * in asm/sections.h */ +#ifndef arch_is_kernel_text +static inline int arch_is_kernel_text(unsigned long addr) +{ + return 0; +} +#endif + +#ifndef arch_is_kernel_data +static inline int arch_is_kernel_data(unsigned long addr) +{ + return 0; +} +#endif + #endif /* _ASM_GENERIC_SECTIONS_H_ */ -- cgit v1.2.3-70-g09d2 From 8ea926b22e2d13238e4d65d8f61c48fe424e6f4f Mon Sep 17 00:00:00 2001 From: Adrian Hunter Date: Tue, 22 Sep 2009 16:44:29 -0700 Subject: mmc: add 'enable' and 'disable' methods to mmc host MMC hosts that support power saving can use the 'enable' and 'disable' methods to exit and enter power saving states. An explanation of their use is provided in the comments added to include/linux/mmc/host.h. Signed-off-by: Adrian Hunter Acked-by: Matt Fleming Cc: Ian Molton Cc: "Roberto A. Foglietta" Cc: Jarkko Lavinen Cc: Denis Karpov Cc: Pierre Ossman Cc: Philip Langdale Cc: "Madhusudhan" Cc: Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- drivers/mmc/core/core.c | 177 +++++++++++++++++++++++++++++++++++++++++++++-- drivers/mmc/core/host.c | 1 + drivers/mmc/core/host.h | 2 + include/linux/mmc/host.h | 47 +++++++++++++ 4 files changed, 221 insertions(+), 6 deletions(-) (limited to 'include') diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c index e22d2b5576e..fb24a096dba 100644 --- a/drivers/mmc/core/core.c +++ b/drivers/mmc/core/core.c @@ -343,6 +343,101 @@ unsigned int mmc_align_data_size(struct mmc_card *card, unsigned int sz) } EXPORT_SYMBOL(mmc_align_data_size); +/** + * mmc_host_enable - enable a host. + * @host: mmc host to enable + * + * Hosts that support power saving can use the 'enable' and 'disable' + * methods to exit and enter power saving states. For more information + * see comments for struct mmc_host_ops. + */ +int mmc_host_enable(struct mmc_host *host) +{ + if (!(host->caps & MMC_CAP_DISABLE)) + return 0; + + if (host->en_dis_recurs) + return 0; + + if (host->nesting_cnt++) + return 0; + + cancel_delayed_work_sync(&host->disable); + + if (host->enabled) + return 0; + + if (host->ops->enable) { + int err; + + host->en_dis_recurs = 1; + err = host->ops->enable(host); + host->en_dis_recurs = 0; + + if (err) { + pr_debug("%s: enable error %d\n", + mmc_hostname(host), err); + return err; + } + } + host->enabled = 1; + return 0; +} +EXPORT_SYMBOL(mmc_host_enable); + +static int mmc_host_do_disable(struct mmc_host *host, int lazy) +{ + if (host->ops->disable) { + int err; + + host->en_dis_recurs = 1; + err = host->ops->disable(host, lazy); + host->en_dis_recurs = 0; + + if (err < 0) { + pr_debug("%s: disable error %d\n", + mmc_hostname(host), err); + return err; + } + if (err > 0) { + unsigned long delay = msecs_to_jiffies(err); + + mmc_schedule_delayed_work(&host->disable, delay); + } + } + host->enabled = 0; + return 0; +} + +/** + * mmc_host_disable - disable a host. + * @host: mmc host to disable + * + * Hosts that support power saving can use the 'enable' and 'disable' + * methods to exit and enter power saving states. For more information + * see comments for struct mmc_host_ops. + */ +int mmc_host_disable(struct mmc_host *host) +{ + int err; + + if (!(host->caps & MMC_CAP_DISABLE)) + return 0; + + if (host->en_dis_recurs) + return 0; + + if (--host->nesting_cnt) + return 0; + + if (!host->enabled) + return 0; + + err = mmc_host_do_disable(host, 0); + return err; +} +EXPORT_SYMBOL(mmc_host_disable); + /** * __mmc_claim_host - exclusively claim a host * @host: mmc host to claim @@ -379,11 +474,81 @@ int __mmc_claim_host(struct mmc_host *host, atomic_t *abort) wake_up(&host->wq); spin_unlock_irqrestore(&host->lock, flags); remove_wait_queue(&host->wq, &wait); + if (!stop) + mmc_host_enable(host); return stop; } EXPORT_SYMBOL(__mmc_claim_host); +static int mmc_try_claim_host(struct mmc_host *host) +{ + int claimed_host = 0; + unsigned long flags; + + spin_lock_irqsave(&host->lock, flags); + if (!host->claimed) { + host->claimed = 1; + claimed_host = 1; + } + spin_unlock_irqrestore(&host->lock, flags); + return claimed_host; +} + +static void mmc_do_release_host(struct mmc_host *host) +{ + unsigned long flags; + + spin_lock_irqsave(&host->lock, flags); + host->claimed = 0; + spin_unlock_irqrestore(&host->lock, flags); + + wake_up(&host->wq); +} + +void mmc_host_deeper_disable(struct work_struct *work) +{ + struct mmc_host *host = + container_of(work, struct mmc_host, disable.work); + + /* If the host is claimed then we do not want to disable it anymore */ + if (!mmc_try_claim_host(host)) + return; + mmc_host_do_disable(host, 1); + mmc_do_release_host(host); +} + +/** + * mmc_host_lazy_disable - lazily disable a host. + * @host: mmc host to disable + * + * Hosts that support power saving can use the 'enable' and 'disable' + * methods to exit and enter power saving states. For more information + * see comments for struct mmc_host_ops. + */ +int mmc_host_lazy_disable(struct mmc_host *host) +{ + if (!(host->caps & MMC_CAP_DISABLE)) + return 0; + + if (host->en_dis_recurs) + return 0; + + if (--host->nesting_cnt) + return 0; + + if (!host->enabled) + return 0; + + if (host->disable_delay) { + mmc_schedule_delayed_work(&host->disable, + msecs_to_jiffies(host->disable_delay)); + return 0; + } else + return mmc_host_do_disable(host, 1); +} +EXPORT_SYMBOL(mmc_host_lazy_disable); + /** * mmc_release_host - release a host * @host: mmc host to release @@ -393,15 +558,11 @@ EXPORT_SYMBOL(__mmc_claim_host); */ void mmc_release_host(struct mmc_host *host) { - unsigned long flags; - WARN_ON(!host->claimed); - spin_lock_irqsave(&host->lock, flags); - host->claimed = 0; - spin_unlock_irqrestore(&host->lock, flags); + mmc_host_lazy_disable(host); - wake_up(&host->wq); + mmc_do_release_host(host); } EXPORT_SYMBOL(mmc_release_host); @@ -953,6 +1114,8 @@ void mmc_stop_host(struct mmc_host *host) spin_unlock_irqrestore(&host->lock, flags); #endif + if (host->caps & MMC_CAP_DISABLE) + cancel_delayed_work(&host->disable); cancel_delayed_work(&host->detect); mmc_flush_scheduled_work(); @@ -981,6 +1144,8 @@ void mmc_stop_host(struct mmc_host *host) */ int mmc_suspend_host(struct mmc_host *host, pm_message_t state) { + if (host->caps & MMC_CAP_DISABLE) + cancel_delayed_work(&host->disable); cancel_delayed_work(&host->detect); mmc_flush_scheduled_work(); diff --git a/drivers/mmc/core/host.c b/drivers/mmc/core/host.c index 5e945e64ead..a268d12f1af 100644 --- a/drivers/mmc/core/host.c +++ b/drivers/mmc/core/host.c @@ -83,6 +83,7 @@ struct mmc_host *mmc_alloc_host(int extra, struct device *dev) spin_lock_init(&host->lock); init_waitqueue_head(&host->wq); INIT_DELAYED_WORK(&host->detect, mmc_rescan); + INIT_DELAYED_WORK_DEFERRABLE(&host->disable, mmc_host_deeper_disable); /* * By default, hosts do not support SGIO or large requests. diff --git a/drivers/mmc/core/host.h b/drivers/mmc/core/host.h index c2dc3d2d9f9..8c87e1109a3 100644 --- a/drivers/mmc/core/host.h +++ b/drivers/mmc/core/host.h @@ -14,5 +14,7 @@ int mmc_register_host_class(void); void mmc_unregister_host_class(void); +void mmc_host_deeper_disable(struct work_struct *work); + #endif diff --git a/include/linux/mmc/host.h b/include/linux/mmc/host.h index 3e7615e9087..338a9b3d51e 100644 --- a/include/linux/mmc/host.h +++ b/include/linux/mmc/host.h @@ -51,6 +51,35 @@ struct mmc_ios { }; struct mmc_host_ops { + /* + * Hosts that support power saving can use the 'enable' and 'disable' + * methods to exit and enter power saving states. 'enable' is called + * when the host is claimed and 'disable' is called (or scheduled with + * a delay) when the host is released. The 'disable' is scheduled if + * the disable delay set by 'mmc_set_disable_delay()' is non-zero, + * otherwise 'disable' is called immediately. 'disable' may be + * scheduled repeatedly, to permit ever greater power saving at the + * expense of ever greater latency to re-enable. Rescheduling is + * determined by the return value of the 'disable' method. A positive + * value gives the delay in milliseconds. + * + * In the case where a host function (like set_ios) may be called + * with or without the host claimed, enabling and disabling can be + * done directly and will nest correctly. Call 'mmc_host_enable()' and + * 'mmc_host_lazy_disable()' for this purpose, but note that these + * functions must be paired. + * + * Alternatively, 'mmc_host_enable()' may be paired with + * 'mmc_host_disable()' which calls 'disable' immediately. In this + * case the 'disable' method will be called with 'lazy' set to 0. + * This is mainly useful for error paths. + * + * Because lazy disable may be called from a work queue, the 'disable' + * method must claim the host when 'lazy' != 0, which will work + * correctly because recursion is detected and handled. + */ + int (*enable)(struct mmc_host *host); + int (*disable)(struct mmc_host *host, int lazy); void (*request)(struct mmc_host *host, struct mmc_request *req); /* * Avoid calling these three functions too often or in a "fast path", @@ -118,6 +147,7 @@ struct mmc_host { #define MMC_CAP_SPI (1 << 4) /* Talks only SPI protocols */ #define MMC_CAP_NEEDS_POLL (1 << 5) /* Needs polling for card-detection */ #define MMC_CAP_8_BIT_DATA (1 << 6) /* Can the host do 8 bit transfers */ +#define MMC_CAP_DISABLE (1 << 7) /* Can the host be disabled */ /* host specific block data */ unsigned int max_seg_size; /* see blk_queue_max_segment_size */ @@ -142,6 +172,13 @@ struct mmc_host { unsigned int removed:1; /* host is being removed */ #endif + /* Only used with MMC_CAP_DISABLE */ + int enabled; /* host is enabled */ + int nesting_cnt; /* "enable" nesting count */ + int en_dis_recurs; /* detect recursion */ + unsigned int disable_delay; /* disable delay in msecs */ + struct delayed_work disable; /* disabling work */ + struct mmc_card *card; /* device attached to this host */ wait_queue_head_t wq; @@ -197,5 +234,15 @@ struct regulator; int mmc_regulator_get_ocrmask(struct regulator *supply); int mmc_regulator_set_ocr(struct regulator *supply, unsigned short vdd_bit); +int mmc_host_enable(struct mmc_host *host); +int mmc_host_disable(struct mmc_host *host); +int mmc_host_lazy_disable(struct mmc_host *host); + +static inline void mmc_set_disable_delay(struct mmc_host *host, + unsigned int disable_delay) +{ + host->disable_delay = disable_delay; +} + #endif -- cgit v1.2.3-70-g09d2 From 319a3f1429c91147058ac26c5f5bac8ec1730bc6 Mon Sep 17 00:00:00 2001 From: Adrian Hunter Date: Tue, 22 Sep 2009 16:44:30 -0700 Subject: mmc: allow host claim / release nesting This change allows the MMC host to be claimed in situations where the host may or may not have already been claimed. Also 'mmc_try_claim_host()' is now exported. Signed-off-by: Adrian Hunter Acked-by: Matt Fleming Cc: Ian Molton Cc: "Roberto A. Foglietta" Cc: Jarkko Lavinen Cc: Denis Karpov Cc: Pierre Ossman Cc: Philip Langdale Cc: "Madhusudhan" Cc: Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- drivers/mmc/core/core.c | 34 +++++++++++++++++++++++++--------- include/linux/mmc/core.h | 1 + include/linux/mmc/host.h | 2 ++ 3 files changed, 28 insertions(+), 9 deletions(-) (limited to 'include') diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c index fb24a096dba..02f2b1871a3 100644 --- a/drivers/mmc/core/core.c +++ b/drivers/mmc/core/core.c @@ -461,16 +461,18 @@ int __mmc_claim_host(struct mmc_host *host, atomic_t *abort) while (1) { set_current_state(TASK_UNINTERRUPTIBLE); stop = abort ? atomic_read(abort) : 0; - if (stop || !host->claimed) + if (stop || !host->claimed || host->claimer == current) break; spin_unlock_irqrestore(&host->lock, flags); schedule(); spin_lock_irqsave(&host->lock, flags); } set_current_state(TASK_RUNNING); - if (!stop) + if (!stop) { host->claimed = 1; - else + host->claimer = current; + host->claim_cnt += 1; + } else wake_up(&host->wq); spin_unlock_irqrestore(&host->lock, flags); remove_wait_queue(&host->wq, &wait); @@ -481,29 +483,43 @@ int __mmc_claim_host(struct mmc_host *host, atomic_t *abort) EXPORT_SYMBOL(__mmc_claim_host); -static int mmc_try_claim_host(struct mmc_host *host) +/** + * mmc_try_claim_host - try exclusively to claim a host + * @host: mmc host to claim + * + * Returns %1 if the host is claimed, %0 otherwise. + */ +int mmc_try_claim_host(struct mmc_host *host) { int claimed_host = 0; unsigned long flags; spin_lock_irqsave(&host->lock, flags); - if (!host->claimed) { + if (!host->claimed || host->claimer == current) { host->claimed = 1; + host->claimer = current; + host->claim_cnt += 1; claimed_host = 1; } spin_unlock_irqrestore(&host->lock, flags); return claimed_host; } +EXPORT_SYMBOL(mmc_try_claim_host); static void mmc_do_release_host(struct mmc_host *host) { unsigned long flags; spin_lock_irqsave(&host->lock, flags); - host->claimed = 0; - spin_unlock_irqrestore(&host->lock, flags); - - wake_up(&host->wq); + if (--host->claim_cnt) { + /* Release for nested claim */ + spin_unlock_irqrestore(&host->lock, flags); + } else { + host->claimed = 0; + host->claimer = NULL; + spin_unlock_irqrestore(&host->lock, flags); + wake_up(&host->wq); + } } void mmc_host_deeper_disable(struct work_struct *work) diff --git a/include/linux/mmc/core.h b/include/linux/mmc/core.h index 7ac8b500d55..e4898e9eeb5 100644 --- a/include/linux/mmc/core.h +++ b/include/linux/mmc/core.h @@ -139,6 +139,7 @@ extern unsigned int mmc_align_data_size(struct mmc_card *, unsigned int); extern int __mmc_claim_host(struct mmc_host *host, atomic_t *abort); extern void mmc_release_host(struct mmc_host *host); +extern int mmc_try_claim_host(struct mmc_host *host); /** * mmc_claim_host - exclusively claim a host diff --git a/include/linux/mmc/host.h b/include/linux/mmc/host.h index 338a9b3d51e..631a2fea526 100644 --- a/include/linux/mmc/host.h +++ b/include/linux/mmc/host.h @@ -182,6 +182,8 @@ struct mmc_host { struct mmc_card *card; /* device attached to this host */ wait_queue_head_t wq; + struct task_struct *claimer; /* task that has host claimed */ + int claim_cnt; /* "claim" nesting count */ struct delayed_work detect; -- cgit v1.2.3-70-g09d2 From 9feae246963c648b212abad0f0eb8938de5f5fe5 Mon Sep 17 00:00:00 2001 From: Adrian Hunter Date: Tue, 22 Sep 2009 16:44:32 -0700 Subject: mmc: add MMC_CAP_NONREMOVABLE host capability eMMC's are not removable, so unsafe resume is OK always. To permit this a new host capability MMC_CAP_NONREMOVABLE has been added and suspend / resume updated accordingly. Signed-off-by: Adrian Hunter Acked-by: Matt Fleming Cc: Ian Molton Cc: "Roberto A. Foglietta" Cc: Jarkko Lavinen Cc: Denis Karpov Cc: Pierre Ossman Cc: Philip Langdale Cc: "Madhusudhan" Cc: Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- drivers/mmc/core/mmc.c | 41 ++++++++++++++++++++++++++++++++++------- drivers/mmc/core/sd.c | 41 ++++++++++++++++++++++++++++++++++------- include/linux/mmc/host.h | 1 + 3 files changed, 69 insertions(+), 14 deletions(-) (limited to 'include') diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c index 2fb9d5f271e..995db1853a8 100644 --- a/drivers/mmc/core/mmc.c +++ b/drivers/mmc/core/mmc.c @@ -507,8 +507,6 @@ static void mmc_detect(struct mmc_host *host) } } -#ifdef CONFIG_MMC_UNSAFE_RESUME - /* * Suspend callback from host. */ @@ -551,20 +549,49 @@ static void mmc_resume(struct mmc_host *host) } -#else +#ifdef CONFIG_MMC_UNSAFE_RESUME -#define mmc_suspend NULL -#define mmc_resume NULL +static const struct mmc_bus_ops mmc_ops = { + .remove = mmc_remove, + .detect = mmc_detect, + .suspend = mmc_suspend, + .resume = mmc_resume, +}; -#endif +static void mmc_attach_bus_ops(struct mmc_host *host) +{ + mmc_attach_bus(host, &mmc_ops); +} + +#else static const struct mmc_bus_ops mmc_ops = { + .remove = mmc_remove, + .detect = mmc_detect, + .suspend = NULL, + .resume = NULL, +}; + +static const struct mmc_bus_ops mmc_ops_unsafe = { .remove = mmc_remove, .detect = mmc_detect, .suspend = mmc_suspend, .resume = mmc_resume, }; +static void mmc_attach_bus_ops(struct mmc_host *host) +{ + const struct mmc_bus_ops *bus_ops; + + if (host->caps & MMC_CAP_NONREMOVABLE) + bus_ops = &mmc_ops_unsafe; + else + bus_ops = &mmc_ops; + mmc_attach_bus(host, bus_ops); +} + +#endif + /* * Starting point for MMC card init. */ @@ -575,7 +602,7 @@ int mmc_attach_mmc(struct mmc_host *host, u32 ocr) BUG_ON(!host); WARN_ON(!host->claimed); - mmc_attach_bus(host, &mmc_ops); + mmc_attach_bus_ops(host); /* * We need to get OCR a different way for SPI. diff --git a/drivers/mmc/core/sd.c b/drivers/mmc/core/sd.c index 7ad646fe077..92fa9dceca7 100644 --- a/drivers/mmc/core/sd.c +++ b/drivers/mmc/core/sd.c @@ -561,8 +561,6 @@ static void mmc_sd_detect(struct mmc_host *host) } } -#ifdef CONFIG_MMC_UNSAFE_RESUME - /* * Suspend callback from host. */ @@ -605,20 +603,49 @@ static void mmc_sd_resume(struct mmc_host *host) } -#else +#ifdef CONFIG_MMC_UNSAFE_RESUME -#define mmc_sd_suspend NULL -#define mmc_sd_resume NULL +static const struct mmc_bus_ops mmc_sd_ops = { + .remove = mmc_sd_remove, + .detect = mmc_sd_detect, + .suspend = mmc_sd_suspend, + .resume = mmc_sd_resume, +}; -#endif +static void mmc_sd_attach_bus_ops(struct mmc_host *host) +{ + mmc_attach_bus(host, &mmc_sd_ops); +} + +#else static const struct mmc_bus_ops mmc_sd_ops = { + .remove = mmc_sd_remove, + .detect = mmc_sd_detect, + .suspend = NULL, + .resume = NULL, +}; + +static const struct mmc_bus_ops mmc_sd_ops_unsafe = { .remove = mmc_sd_remove, .detect = mmc_sd_detect, .suspend = mmc_sd_suspend, .resume = mmc_sd_resume, }; +static void mmc_sd_attach_bus_ops(struct mmc_host *host) +{ + const struct mmc_bus_ops *bus_ops; + + if (host->caps & MMC_CAP_NONREMOVABLE) + bus_ops = &mmc_sd_ops_unsafe; + else + bus_ops = &mmc_sd_ops; + mmc_attach_bus(host, bus_ops); +} + +#endif + /* * Starting point for SD card init. */ @@ -629,7 +656,7 @@ int mmc_attach_sd(struct mmc_host *host, u32 ocr) BUG_ON(!host); WARN_ON(!host->claimed); - mmc_attach_bus(host, &mmc_sd_ops); + mmc_sd_attach_bus_ops(host); /* * We need to get OCR a different way for SPI. diff --git a/include/linux/mmc/host.h b/include/linux/mmc/host.h index 631a2fea526..bb867d2c26b 100644 --- a/include/linux/mmc/host.h +++ b/include/linux/mmc/host.h @@ -148,6 +148,7 @@ struct mmc_host { #define MMC_CAP_NEEDS_POLL (1 << 5) /* Needs polling for card-detection */ #define MMC_CAP_8_BIT_DATA (1 << 6) /* Can the host do 8 bit transfers */ #define MMC_CAP_DISABLE (1 << 7) /* Can the host be disabled */ +#define MMC_CAP_NONREMOVABLE (1 << 8) /* Nonremovable e.g. eMMC */ /* host specific block data */ unsigned int max_seg_size; /* see blk_queue_max_segment_size */ -- cgit v1.2.3-70-g09d2 From eae1aeeed852aae37621b82a9e7f6c05096a18fd Mon Sep 17 00:00:00 2001 From: Adrian Hunter Date: Tue, 22 Sep 2009 16:44:33 -0700 Subject: mmc: add ability to save power by powering off cards Power can be saved by powering off cards that are not in use. This is similar to suspend / resume except it is under the control of the driver, and does not require any power management support. It can only be used when the driver can monitor whether the card is removed, otherwise it is unsafe. This is possible because, unlike suspend, the driver still receives card detect and / or cover switch interrupts. Signed-off-by: Adrian Hunter Acked-by: Matt Fleming Cc: Ian Molton Cc: "Roberto A. Foglietta" Cc: Jarkko Lavinen Cc: Denis Karpov Cc: Pierre Ossman Cc: Philip Langdale Cc: "Madhusudhan" Cc: Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- drivers/mmc/core/core.c | 34 ++++++++++++++++++++++++++++++++++ drivers/mmc/core/core.h | 2 ++ drivers/mmc/core/mmc.c | 11 +++++++++++ drivers/mmc/core/sd.c | 11 +++++++++++ include/linux/mmc/host.h | 3 +++ 5 files changed, 61 insertions(+) (limited to 'include') diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c index 02f2b1871a3..be1fc013fbe 100644 --- a/drivers/mmc/core/core.c +++ b/drivers/mmc/core/core.c @@ -1151,6 +1151,40 @@ void mmc_stop_host(struct mmc_host *host) mmc_power_off(host); } +void mmc_power_save_host(struct mmc_host *host) +{ + mmc_bus_get(host); + + if (!host->bus_ops || host->bus_dead || !host->bus_ops->power_restore) { + mmc_bus_put(host); + return; + } + + if (host->bus_ops->power_save) + host->bus_ops->power_save(host); + + mmc_bus_put(host); + + mmc_power_off(host); +} +EXPORT_SYMBOL(mmc_power_save_host); + +void mmc_power_restore_host(struct mmc_host *host) +{ + mmc_bus_get(host); + + if (!host->bus_ops || host->bus_dead || !host->bus_ops->power_restore) { + mmc_bus_put(host); + return; + } + + mmc_power_up(host); + host->bus_ops->power_restore(host); + + mmc_bus_put(host); +} +EXPORT_SYMBOL(mmc_power_restore_host); + #ifdef CONFIG_PM /** diff --git a/drivers/mmc/core/core.h b/drivers/mmc/core/core.h index c819effa103..f7eb4c4ca01 100644 --- a/drivers/mmc/core/core.h +++ b/drivers/mmc/core/core.h @@ -20,6 +20,8 @@ struct mmc_bus_ops { void (*detect)(struct mmc_host *); void (*suspend)(struct mmc_host *); void (*resume)(struct mmc_host *); + void (*power_save)(struct mmc_host *); + void (*power_restore)(struct mmc_host *); }; void mmc_attach_bus(struct mmc_host *host, const struct mmc_bus_ops *ops); diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c index 995db1853a8..27e842df6a6 100644 --- a/drivers/mmc/core/mmc.c +++ b/drivers/mmc/core/mmc.c @@ -549,6 +549,14 @@ static void mmc_resume(struct mmc_host *host) } +static void mmc_power_restore(struct mmc_host *host) +{ + host->card->state &= ~MMC_STATE_HIGHSPEED; + mmc_claim_host(host); + mmc_init_card(host, host->ocr, host->card); + mmc_release_host(host); +} + #ifdef CONFIG_MMC_UNSAFE_RESUME static const struct mmc_bus_ops mmc_ops = { @@ -556,6 +564,7 @@ static const struct mmc_bus_ops mmc_ops = { .detect = mmc_detect, .suspend = mmc_suspend, .resume = mmc_resume, + .power_restore = mmc_power_restore, }; static void mmc_attach_bus_ops(struct mmc_host *host) @@ -570,6 +579,7 @@ static const struct mmc_bus_ops mmc_ops = { .detect = mmc_detect, .suspend = NULL, .resume = NULL, + .power_restore = mmc_power_restore, }; static const struct mmc_bus_ops mmc_ops_unsafe = { @@ -577,6 +587,7 @@ static const struct mmc_bus_ops mmc_ops_unsafe = { .detect = mmc_detect, .suspend = mmc_suspend, .resume = mmc_resume, + .power_restore = mmc_power_restore, }; static void mmc_attach_bus_ops(struct mmc_host *host) diff --git a/drivers/mmc/core/sd.c b/drivers/mmc/core/sd.c index 92fa9dceca7..222a60928cd 100644 --- a/drivers/mmc/core/sd.c +++ b/drivers/mmc/core/sd.c @@ -603,6 +603,14 @@ static void mmc_sd_resume(struct mmc_host *host) } +static void mmc_sd_power_restore(struct mmc_host *host) +{ + host->card->state &= ~MMC_STATE_HIGHSPEED; + mmc_claim_host(host); + mmc_sd_init_card(host, host->ocr, host->card); + mmc_release_host(host); +} + #ifdef CONFIG_MMC_UNSAFE_RESUME static const struct mmc_bus_ops mmc_sd_ops = { @@ -610,6 +618,7 @@ static const struct mmc_bus_ops mmc_sd_ops = { .detect = mmc_sd_detect, .suspend = mmc_sd_suspend, .resume = mmc_sd_resume, + .power_restore = mmc_sd_power_restore, }; static void mmc_sd_attach_bus_ops(struct mmc_host *host) @@ -624,6 +633,7 @@ static const struct mmc_bus_ops mmc_sd_ops = { .detect = mmc_sd_detect, .suspend = NULL, .resume = NULL, + .power_restore = mmc_sd_power_restore, }; static const struct mmc_bus_ops mmc_sd_ops_unsafe = { @@ -631,6 +641,7 @@ static const struct mmc_bus_ops mmc_sd_ops_unsafe = { .detect = mmc_sd_detect, .suspend = mmc_sd_suspend, .resume = mmc_sd_resume, + .power_restore = mmc_sd_power_restore, }; static void mmc_sd_attach_bus_ops(struct mmc_host *host) diff --git a/include/linux/mmc/host.h b/include/linux/mmc/host.h index bb867d2c26b..c1cbe598d47 100644 --- a/include/linux/mmc/host.h +++ b/include/linux/mmc/host.h @@ -223,6 +223,9 @@ static inline void *mmc_priv(struct mmc_host *host) extern int mmc_suspend_host(struct mmc_host *, pm_message_t); extern int mmc_resume_host(struct mmc_host *); +extern void mmc_power_save_host(struct mmc_host *host); +extern void mmc_power_restore_host(struct mmc_host *host); + extern void mmc_detect_change(struct mmc_host *, unsigned long delay); extern void mmc_request_done(struct mmc_host *, struct mmc_request *); -- cgit v1.2.3-70-g09d2 From b1ebe38456f7fe61a88af2844361e763ac6ea5ae Mon Sep 17 00:00:00 2001 From: Jarkko Lavinen Date: Tue, 22 Sep 2009 16:44:34 -0700 Subject: mmc: add mmc card sleep and awake support Add support for the new MMC command SLEEP_AWAKE. Signed-off-by: Jarkko Lavinen Signed-off-by: Adrian Hunter Acked-by: Matt Fleming Cc: Ian Molton Cc: "Roberto A. Foglietta" Cc: Jarkko Lavinen Cc: Denis Karpov Cc: Pierre Ossman Cc: Philip Langdale Cc: "Madhusudhan" Cc: Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- drivers/mmc/core/core.c | 40 ++++++++++++++++++++++++++++++++++ drivers/mmc/core/core.h | 2 ++ drivers/mmc/core/mmc.c | 54 +++++++++++++++++++++++++++++++++++++++++----- drivers/mmc/core/mmc_ops.c | 36 +++++++++++++++++++++++++++++++ drivers/mmc/core/mmc_ops.h | 1 + include/linux/mmc/card.h | 2 ++ include/linux/mmc/host.h | 5 +++++ include/linux/mmc/mmc.h | 2 ++ 8 files changed, 137 insertions(+), 5 deletions(-) (limited to 'include') diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c index be1fc013fbe..828e60ea528 100644 --- a/drivers/mmc/core/core.c +++ b/drivers/mmc/core/core.c @@ -1185,6 +1185,46 @@ void mmc_power_restore_host(struct mmc_host *host) } EXPORT_SYMBOL(mmc_power_restore_host); +int mmc_card_awake(struct mmc_host *host) +{ + int err = -ENOSYS; + + mmc_bus_get(host); + + if (host->bus_ops && !host->bus_dead && host->bus_ops->awake) + err = host->bus_ops->awake(host); + + mmc_bus_put(host); + + return err; +} +EXPORT_SYMBOL(mmc_card_awake); + +int mmc_card_sleep(struct mmc_host *host) +{ + int err = -ENOSYS; + + mmc_bus_get(host); + + if (host->bus_ops && !host->bus_dead && host->bus_ops->awake) + err = host->bus_ops->sleep(host); + + mmc_bus_put(host); + + return err; +} +EXPORT_SYMBOL(mmc_card_sleep); + +int mmc_card_can_sleep(struct mmc_host *host) +{ + struct mmc_card *card = host->card; + + if (card && mmc_card_mmc(card) && card->ext_csd.rev >= 3) + return 1; + return 0; +} +EXPORT_SYMBOL(mmc_card_can_sleep); + #ifdef CONFIG_PM /** diff --git a/drivers/mmc/core/core.h b/drivers/mmc/core/core.h index f7eb4c4ca01..c386348f5f7 100644 --- a/drivers/mmc/core/core.h +++ b/drivers/mmc/core/core.h @@ -16,6 +16,8 @@ #define MMC_CMD_RETRIES 3 struct mmc_bus_ops { + int (*awake)(struct mmc_host *); + int (*sleep)(struct mmc_host *); void (*remove)(struct mmc_host *); void (*detect)(struct mmc_host *); void (*suspend)(struct mmc_host *); diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c index 27e842df6a6..e0bfa9515c8 100644 --- a/drivers/mmc/core/mmc.c +++ b/drivers/mmc/core/mmc.c @@ -160,7 +160,6 @@ static int mmc_read_ext_csd(struct mmc_card *card) { int err; u8 *ext_csd; - unsigned int ext_csd_struct; BUG_ON(!card); @@ -207,16 +206,16 @@ static int mmc_read_ext_csd(struct mmc_card *card) goto out; } - ext_csd_struct = ext_csd[EXT_CSD_REV]; - if (ext_csd_struct > 3) { + card->ext_csd.rev = ext_csd[EXT_CSD_REV]; + if (card->ext_csd.rev > 3) { printk(KERN_ERR "%s: unrecognised EXT_CSD structure " "version %d\n", mmc_hostname(card->host), - ext_csd_struct); + card->ext_csd.rev); err = -EINVAL; goto out; } - if (ext_csd_struct >= 2) { + if (card->ext_csd.rev >= 2) { card->ext_csd.sectors = ext_csd[EXT_CSD_SEC_CNT + 0] << 0 | ext_csd[EXT_CSD_SEC_CNT + 1] << 8 | @@ -241,6 +240,15 @@ static int mmc_read_ext_csd(struct mmc_card *card) goto out; } + if (card->ext_csd.rev >= 3) { + u8 sa_shift = ext_csd[EXT_CSD_S_A_TIMEOUT]; + + /* Sleep / awake timeout in 100ns units */ + if (sa_shift > 0 && sa_shift <= 0x17) + card->ext_csd.sa_timeout = + 1 << ext_csd[EXT_CSD_S_A_TIMEOUT]; + } + out: kfree(ext_csd); @@ -557,9 +565,41 @@ static void mmc_power_restore(struct mmc_host *host) mmc_release_host(host); } +static int mmc_sleep(struct mmc_host *host) +{ + struct mmc_card *card = host->card; + int err = -ENOSYS; + + if (card && card->ext_csd.rev >= 3) { + err = mmc_card_sleepawake(host, 1); + if (err < 0) + pr_debug("%s: Error %d while putting card into sleep", + mmc_hostname(host), err); + } + + return err; +} + +static int mmc_awake(struct mmc_host *host) +{ + struct mmc_card *card = host->card; + int err = -ENOSYS; + + if (card && card->ext_csd.rev >= 3) { + err = mmc_card_sleepawake(host, 0); + if (err < 0) + pr_debug("%s: Error %d while awaking sleeping card", + mmc_hostname(host), err); + } + + return err; +} + #ifdef CONFIG_MMC_UNSAFE_RESUME static const struct mmc_bus_ops mmc_ops = { + .awake = mmc_awake, + .sleep = mmc_sleep, .remove = mmc_remove, .detect = mmc_detect, .suspend = mmc_suspend, @@ -575,6 +615,8 @@ static void mmc_attach_bus_ops(struct mmc_host *host) #else static const struct mmc_bus_ops mmc_ops = { + .awake = mmc_awake, + .sleep = mmc_sleep, .remove = mmc_remove, .detect = mmc_detect, .suspend = NULL, @@ -583,6 +625,8 @@ static const struct mmc_bus_ops mmc_ops = { }; static const struct mmc_bus_ops mmc_ops_unsafe = { + .awake = mmc_awake, + .sleep = mmc_sleep, .remove = mmc_remove, .detect = mmc_detect, .suspend = mmc_suspend, diff --git a/drivers/mmc/core/mmc_ops.c b/drivers/mmc/core/mmc_ops.c index 34ce2703d29..355c6042cf6 100644 --- a/drivers/mmc/core/mmc_ops.c +++ b/drivers/mmc/core/mmc_ops.c @@ -57,6 +57,42 @@ int mmc_deselect_cards(struct mmc_host *host) return _mmc_select_card(host, NULL); } +int mmc_card_sleepawake(struct mmc_host *host, int sleep) +{ + struct mmc_command cmd; + struct mmc_card *card = host->card; + int err; + + if (sleep) + mmc_deselect_cards(host); + + memset(&cmd, 0, sizeof(struct mmc_command)); + + cmd.opcode = MMC_SLEEP_AWAKE; + cmd.arg = card->rca << 16; + if (sleep) + cmd.arg |= 1 << 15; + + cmd.flags = MMC_RSP_R1B | MMC_CMD_AC; + err = mmc_wait_for_cmd(host, &cmd, 0); + if (err) + return err; + + /* + * If the host does not wait while the card signals busy, then we will + * will have to wait the sleep/awake timeout. Note, we cannot use the + * SEND_STATUS command to poll the status because that command (and most + * others) is invalid while the card sleeps. + */ + if (!(host->caps & MMC_CAP_WAIT_WHILE_BUSY)) + mmc_delay(DIV_ROUND_UP(card->ext_csd.sa_timeout, 10000)); + + if (!sleep) + err = mmc_select_card(card); + + return err; +} + int mmc_go_idle(struct mmc_host *host) { int err; diff --git a/drivers/mmc/core/mmc_ops.h b/drivers/mmc/core/mmc_ops.h index 17854bf7cf0..653eb8e8417 100644 --- a/drivers/mmc/core/mmc_ops.h +++ b/drivers/mmc/core/mmc_ops.h @@ -25,6 +25,7 @@ int mmc_send_status(struct mmc_card *card, u32 *status); int mmc_send_cid(struct mmc_host *host, u32 *cid); int mmc_spi_read_ocr(struct mmc_host *host, int highcap, u32 *ocrp); int mmc_spi_set_crc(struct mmc_host *host, int use_crc); +int mmc_card_sleepawake(struct mmc_host *host, int sleep); #endif diff --git a/include/linux/mmc/card.h b/include/linux/mmc/card.h index 403aa505f27..58f59174c64 100644 --- a/include/linux/mmc/card.h +++ b/include/linux/mmc/card.h @@ -40,6 +40,8 @@ struct mmc_csd { }; struct mmc_ext_csd { + u8 rev; + unsigned int sa_timeout; /* Units: 100ns */ unsigned int hs_max_dtr; unsigned int sectors; }; diff --git a/include/linux/mmc/host.h b/include/linux/mmc/host.h index c1cbe598d47..81bb4235859 100644 --- a/include/linux/mmc/host.h +++ b/include/linux/mmc/host.h @@ -149,6 +149,7 @@ struct mmc_host { #define MMC_CAP_8_BIT_DATA (1 << 6) /* Can the host do 8 bit transfers */ #define MMC_CAP_DISABLE (1 << 7) /* Can the host be disabled */ #define MMC_CAP_NONREMOVABLE (1 << 8) /* Nonremovable e.g. eMMC */ +#define MMC_CAP_WAIT_WHILE_BUSY (1 << 9) /* Waits while card is busy */ /* host specific block data */ unsigned int max_seg_size; /* see blk_queue_max_segment_size */ @@ -240,6 +241,10 @@ struct regulator; int mmc_regulator_get_ocrmask(struct regulator *supply); int mmc_regulator_set_ocr(struct regulator *supply, unsigned short vdd_bit); +int mmc_card_awake(struct mmc_host *host); +int mmc_card_sleep(struct mmc_host *host); +int mmc_card_can_sleep(struct mmc_host *host); + int mmc_host_enable(struct mmc_host *host); int mmc_host_disable(struct mmc_host *host); int mmc_host_lazy_disable(struct mmc_host *host); diff --git a/include/linux/mmc/mmc.h b/include/linux/mmc/mmc.h index 14b81f3e523..b2b40951c16 100644 --- a/include/linux/mmc/mmc.h +++ b/include/linux/mmc/mmc.h @@ -31,6 +31,7 @@ #define MMC_ALL_SEND_CID 2 /* bcr R2 */ #define MMC_SET_RELATIVE_ADDR 3 /* ac [31:16] RCA R1 */ #define MMC_SET_DSR 4 /* bc [31:16] RCA */ +#define MMC_SLEEP_AWAKE 5 /* ac [31:16] RCA 15:flg R1b */ #define MMC_SWITCH 6 /* ac [31:0] See below R1b */ #define MMC_SELECT_CARD 7 /* ac [31:16] RCA R1 */ #define MMC_SEND_EXT_CSD 8 /* adtc R1 */ @@ -254,6 +255,7 @@ struct _mmc_csd { #define EXT_CSD_CARD_TYPE 196 /* RO */ #define EXT_CSD_REV 192 /* RO */ #define EXT_CSD_SEC_CNT 212 /* RO, 4 bytes */ +#define EXT_CSD_S_A_TIMEOUT 217 /* * EXT_CSD field definitions -- cgit v1.2.3-70-g09d2 From ef0b27d4ccacac32afc3d1c0e8a95e4091dfbc8c Mon Sep 17 00:00:00 2001 From: Adrian Hunter Date: Tue, 22 Sep 2009 16:44:37 -0700 Subject: mmc: check status after MMC SWITCH command According to the standard, the SWITCH command should be followed by a SEND_STATUS command to check for errors. Signed-off-by: Adrian Hunter Acked-by: Matt Fleming Cc: Ian Molton Cc: "Roberto A. Foglietta" Cc: Jarkko Lavinen Cc: Denis Karpov Cc: Pierre Ossman Cc: Philip Langdale Cc: "Madhusudhan" Cc: Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- drivers/mmc/core/mmc.c | 24 ++++++++++++++++++------ drivers/mmc/core/mmc_ops.c | 23 +++++++++++++++++++++++ include/linux/mmc/mmc.h | 1 + 3 files changed, 42 insertions(+), 6 deletions(-) (limited to 'include') diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c index e0bfa9515c8..a6b5fe9d396 100644 --- a/drivers/mmc/core/mmc.c +++ b/drivers/mmc/core/mmc.c @@ -416,12 +416,17 @@ static int mmc_init_card(struct mmc_host *host, u32 ocr, (host->caps & MMC_CAP_MMC_HIGHSPEED)) { err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_HS_TIMING, 1); - if (err) + if (err && err != -EBADMSG) goto free_card; - mmc_card_set_highspeed(card); - - mmc_set_timing(card->host, MMC_TIMING_MMC_HS); + if (err) { + printk(KERN_WARNING "%s: switch to highspeed failed\n", + mmc_hostname(card->host)); + err = 0; + } else { + mmc_card_set_highspeed(card); + mmc_set_timing(card->host, MMC_TIMING_MMC_HS); + } } /* @@ -456,10 +461,17 @@ static int mmc_init_card(struct mmc_host *host, u32 ocr, err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_BUS_WIDTH, ext_csd_bit); - if (err) + if (err && err != -EBADMSG) goto free_card; - mmc_set_bus_width(card->host, bus_width); + if (err) { + printk(KERN_WARNING "%s: switch to bus width %d " + "failed\n", mmc_hostname(card->host), + 1 << bus_width); + err = 0; + } else { + mmc_set_bus_width(card->host, bus_width); + } } if (!oldcard) diff --git a/drivers/mmc/core/mmc_ops.c b/drivers/mmc/core/mmc_ops.c index 355c6042cf6..d2cb5c63439 100644 --- a/drivers/mmc/core/mmc_ops.c +++ b/drivers/mmc/core/mmc_ops.c @@ -390,6 +390,7 @@ int mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value) { int err; struct mmc_command cmd; + u32 status; BUG_ON(!card); BUG_ON(!card->host); @@ -407,6 +408,28 @@ int mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value) if (err) return err; + /* Must check status to be sure of no errors */ + do { + err = mmc_send_status(card, &status); + if (err) + return err; + if (card->host->caps & MMC_CAP_WAIT_WHILE_BUSY) + break; + if (mmc_host_is_spi(card->host)) + break; + } while (R1_CURRENT_STATE(status) == 7); + + if (mmc_host_is_spi(card->host)) { + if (status & R1_SPI_ILLEGAL_COMMAND) + return -EBADMSG; + } else { + if (status & 0xFDFFA000) + printk(KERN_WARNING "%s: unexpected status %#x after " + "switch", mmc_hostname(card->host), status); + if (status & R1_SWITCH_ERROR) + return -EBADMSG; + } + return 0; } diff --git a/include/linux/mmc/mmc.h b/include/linux/mmc/mmc.h index b2b40951c16..c02c8db7370 100644 --- a/include/linux/mmc/mmc.h +++ b/include/linux/mmc/mmc.h @@ -128,6 +128,7 @@ #define R1_STATUS(x) (x & 0xFFFFE000) #define R1_CURRENT_STATE(x) ((x & 0x00001E00) >> 9) /* sx, b (4 bits) */ #define R1_READY_FOR_DATA (1 << 8) /* sx, a */ +#define R1_SWITCH_ERROR (1 << 7) /* sx, c */ #define R1_APP_CMD (1 << 5) /* sr, c */ /* -- cgit v1.2.3-70-g09d2 From 006ebd5de13854d6250eecc76866bbfad1ff7daf Mon Sep 17 00:00:00 2001 From: Ohad Ben-Cohen Date: Tue, 22 Sep 2009 16:45:07 -0700 Subject: sdio: add CD disable support Add support to disconnect the pull-up resistor on CD/DAT[3] (pin 1) of the card. This may be desired on certain setups of boards, controllers and embedded sdio devices which do not need the card's pull-up. As a result, card detection is disabled and power is saved. [akpm@linux-foundation.org: simplify sdio_disable_cd() a bit] Signed-off-by: Ohad Ben-Cohen Acked-by: Matt Fleming Cc: Ian Molton Cc: "Roberto A. Foglietta" Cc: Philip Langdale Cc: Pierre Ossman Cc: David Vrabel Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- drivers/mmc/core/sdio.c | 30 ++++++++++++++++++++++++++++++ include/linux/mmc/card.h | 3 ++- 2 files changed, 32 insertions(+), 1 deletion(-) (limited to 'include') diff --git a/drivers/mmc/core/sdio.c b/drivers/mmc/core/sdio.c index 6f221dc029a..2d24a123f0b 100644 --- a/drivers/mmc/core/sdio.c +++ b/drivers/mmc/core/sdio.c @@ -164,6 +164,29 @@ static int sdio_enable_wide(struct mmc_card *card) return 0; } +/* + * If desired, disconnect the pull-up resistor on CD/DAT[3] (pin 1) + * of the card. This may be required on certain setups of boards, + * controllers and embedded sdio device which do not need the card's + * pull-up. As a result, card detection is disabled and power is saved. + */ +static int sdio_disable_cd(struct mmc_card *card) +{ + int ret; + u8 ctrl; + + if (!card->cccr.disable_cd) + return 0; + + ret = mmc_io_rw_direct(card, 0, 0, SDIO_CCCR_IF, 0, &ctrl); + if (ret) + return ret; + + ctrl |= SDIO_BUS_CD_DISABLE; + + return mmc_io_rw_direct(card, 1, 0, SDIO_CCCR_IF, ctrl, NULL); +} + /* * Test if the card supports high-speed mode and, if so, switch to it. */ @@ -384,6 +407,13 @@ int mmc_attach_sdio(struct mmc_host *host, u32 ocr) if (err) goto remove; + /* + * If needed, disconnect card detection pull-up resistor. + */ + err = sdio_disable_cd(card); + if (err) + goto remove; + /* * Initialize (but don't add) all present functions. */ diff --git a/include/linux/mmc/card.h b/include/linux/mmc/card.h index 58f59174c64..00db39ccead 100644 --- a/include/linux/mmc/card.h +++ b/include/linux/mmc/card.h @@ -64,7 +64,8 @@ struct sdio_cccr { low_speed:1, wide_bus:1, high_power:1, - high_speed:1; + high_speed:1, + disable_cd:1; }; struct sdio_cis { -- cgit v1.2.3-70-g09d2 From 7c979ec7135d96bbff34790bf4b85a8508ede7fc Mon Sep 17 00:00:00 2001 From: Ohad Ben-Cohen Date: Tue, 22 Sep 2009 16:45:18 -0700 Subject: sdio: add MMC_QUIRK_LENIENT_FN0 Normally writes to SDIO function 0 outside the vendor specific CCCR registers are prohibited. To support embedded devices that require writes to SDIO function 0 outside this range (e.g. TI WL127x embedded sdio wifi device), MMC_QUIRK_LENIENT_FN0 is introduced. A card quirks field is added to `struct mmc_card' to support non-standard devices (e.g. embedded sdio devices). [akpm@linux-foundation.org: code in C, not cpp!] Signed-off-by: Ohad Ben-Cohen Cc: Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- drivers/mmc/core/sdio_io.c | 2 +- include/linux/mmc/card.h | 7 +++++++ 2 files changed, 8 insertions(+), 1 deletion(-) (limited to 'include') diff --git a/drivers/mmc/core/sdio_io.c b/drivers/mmc/core/sdio_io.c index f61fc2d4cd0..f9aa8a7deff 100644 --- a/drivers/mmc/core/sdio_io.c +++ b/drivers/mmc/core/sdio_io.c @@ -624,7 +624,7 @@ void sdio_f0_writeb(struct sdio_func *func, unsigned char b, unsigned int addr, BUG_ON(!func); - if (addr < 0xF0 || addr > 0xFF) { + if ((addr < 0xF0 || addr > 0xFF) && (!mmc_card_lenient_fn0(func->card))) { if (err_ret) *err_ret = -EINVAL; return; diff --git a/include/linux/mmc/card.h b/include/linux/mmc/card.h index 00db39ccead..2ee22e8af11 100644 --- a/include/linux/mmc/card.h +++ b/include/linux/mmc/card.h @@ -97,6 +97,8 @@ struct mmc_card { #define MMC_STATE_READONLY (1<<1) /* card is read-only */ #define MMC_STATE_HIGHSPEED (1<<2) /* card is in high speed mode */ #define MMC_STATE_BLOCKADDR (1<<3) /* card uses block-addressing */ + unsigned int quirks; /* card quirks */ +#define MMC_QUIRK_LENIENT_FN0 (1<<0) /* allow SDIO FN0 writes outside of the VS CCCR range */ u32 raw_cid[4]; /* raw card CID */ u32 raw_csd[4]; /* raw card CSD */ @@ -132,6 +134,11 @@ struct mmc_card { #define mmc_card_set_highspeed(c) ((c)->state |= MMC_STATE_HIGHSPEED) #define mmc_card_set_blockaddr(c) ((c)->state |= MMC_STATE_BLOCKADDR) +static inline int mmc_card_lenient_fn0(const struct mmc_card *c) +{ + return c->quirks & MMC_QUIRK_LENIENT_FN0; +} + #define mmc_card_name(c) ((c)->cid.prod_name) #define mmc_card_id(c) (dev_name(&(c)->dev)) -- cgit v1.2.3-70-g09d2 From 996ad5686c5f868e67557cc1bfcb2cfdde1a18b4 Mon Sep 17 00:00:00 2001 From: Nicolas Pitre Date: Tue, 22 Sep 2009 16:45:30 -0700 Subject: mmc: make SDIO device/driver struct accessors public Especially with the PM framework, those are quite handy to have in driver code too. Signed-off-by: Nicolas Pitre Cc: Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- drivers/mmc/core/sdio_bus.c | 3 --- include/linux/mmc/sdio_func.h | 3 +++ 2 files changed, 3 insertions(+), 3 deletions(-) (limited to 'include') diff --git a/drivers/mmc/core/sdio_bus.c b/drivers/mmc/core/sdio_bus.c index 46284b52739..d37464e296a 100644 --- a/drivers/mmc/core/sdio_bus.c +++ b/drivers/mmc/core/sdio_bus.c @@ -20,9 +20,6 @@ #include "sdio_cis.h" #include "sdio_bus.h" -#define dev_to_sdio_func(d) container_of(d, struct sdio_func, dev) -#define to_sdio_driver(d) container_of(d, struct sdio_driver, drv) - /* show configuration fields */ #define sdio_config_attr(field, format_string) \ static ssize_t \ diff --git a/include/linux/mmc/sdio_func.h b/include/linux/mmc/sdio_func.h index 451bdfc8583..ac3ab683fec 100644 --- a/include/linux/mmc/sdio_func.h +++ b/include/linux/mmc/sdio_func.h @@ -67,6 +67,7 @@ struct sdio_func { #define sdio_get_drvdata(f) dev_get_drvdata(&(f)->dev) #define sdio_set_drvdata(f,d) dev_set_drvdata(&(f)->dev, d) +#define dev_to_sdio_func(d) container_of(d, struct sdio_func, dev) /* * SDIO function device driver @@ -81,6 +82,8 @@ struct sdio_driver { struct device_driver drv; }; +#define to_sdio_driver(d) container_of(d, struct sdio_driver, drv) + /** * SDIO_DEVICE - macro used to describe a specific SDIO device * @vend: the 16 bit manufacturer code -- cgit v1.2.3-70-g09d2 From d899bf7b55f503ba7d3d07ed27c3a37e270fa7db Mon Sep 17 00:00:00 2001 From: Stefani Seibold Date: Tue, 22 Sep 2009 16:45:40 -0700 Subject: procfs: provide stack information for threads A patch to give a better overview of the userland application stack usage, especially for embedded linux. Currently you are only able to dump the main process/thread stack usage which is showed in /proc/pid/status by the "VmStk" Value. But you get no information about the consumed stack memory of the the threads. There is an enhancement in the /proc//{task/*,}/*maps and which marks the vm mapping where the thread stack pointer reside with "[thread stack xxxxxxxx]". xxxxxxxx is the maximum size of stack. This is a value information, because libpthread doesn't set the start of the stack to the top of the mapped area, depending of the pthread usage. A sample output of /proc//task//maps looks like: 08048000-08049000 r-xp 00000000 03:00 8312 /opt/z 08049000-0804a000 rw-p 00001000 03:00 8312 /opt/z 0804a000-0806b000 rw-p 00000000 00:00 0 [heap] a7d12000-a7d13000 ---p 00000000 00:00 0 a7d13000-a7f13000 rw-p 00000000 00:00 0 [thread stack: 001ff4b4] a7f13000-a7f14000 ---p 00000000 00:00 0 a7f14000-a7f36000 rw-p 00000000 00:00 0 a7f36000-a8069000 r-xp 00000000 03:00 4222 /lib/libc.so.6 a8069000-a806b000 r--p 00133000 03:00 4222 /lib/libc.so.6 a806b000-a806c000 rw-p 00135000 03:00 4222 /lib/libc.so.6 a806c000-a806f000 rw-p 00000000 00:00 0 a806f000-a8083000 r-xp 00000000 03:00 14462 /lib/libpthread.so.0 a8083000-a8084000 r--p 00013000 03:00 14462 /lib/libpthread.so.0 a8084000-a8085000 rw-p 00014000 03:00 14462 /lib/libpthread.so.0 a8085000-a8088000 rw-p 00000000 00:00 0 a8088000-a80a4000 r-xp 00000000 03:00 8317 /lib/ld-linux.so.2 a80a4000-a80a5000 r--p 0001b000 03:00 8317 /lib/ld-linux.so.2 a80a5000-a80a6000 rw-p 0001c000 03:00 8317 /lib/ld-linux.so.2 afaf5000-afb0a000 rw-p 00000000 00:00 0 [stack] ffffe000-fffff000 r-xp 00000000 00:00 0 [vdso] Also there is a new entry "stack usage" in /proc//{task/*,}/status which will you give the current stack usage in kb. A sample output of /proc/self/status looks like: Name: cat State: R (running) Tgid: 507 Pid: 507 . . . CapBnd: fffffffffffffeff voluntary_ctxt_switches: 0 nonvoluntary_ctxt_switches: 0 Stack usage: 12 kB I also fixed stack base address in /proc//{task/*,}/stat to the base address of the associated thread stack and not the one of the main process. This makes more sense. [akpm@linux-foundation.org: fs/proc/array.c now needs walk_page_range()] Signed-off-by: Stefani Seibold Cc: Ingo Molnar Cc: Peter Zijlstra Cc: Alexey Dobriyan Cc: "Eric W. Biederman" Cc: Randy Dunlap Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- Documentation/filesystems/proc.txt | 5 ++- fs/exec.c | 2 + fs/proc/array.c | 85 +++++++++++++++++++++++++++++++++++++- fs/proc/task_mmu.c | 19 +++++++++ include/linux/sched.h | 1 + kernel/fork.c | 2 + mm/Makefile | 4 +- 7 files changed, 114 insertions(+), 4 deletions(-) (limited to 'include') diff --git a/Documentation/filesystems/proc.txt b/Documentation/filesystems/proc.txt index 75988ba26a5..b5aee7838a0 100644 --- a/Documentation/filesystems/proc.txt +++ b/Documentation/filesystems/proc.txt @@ -176,6 +176,7 @@ read the file /proc/PID/status: CapBnd: ffffffffffffffff voluntary_ctxt_switches: 0 nonvoluntary_ctxt_switches: 1 + Stack usage: 12 kB This shows you nearly the same information you would get if you viewed it with the ps command. In fact, ps uses the proc file system to obtain its @@ -229,6 +230,7 @@ Table 1-2: Contents of the statm files (as of 2.6.30-rc7) Mems_allowed_list Same as previous, but in "list format" voluntary_ctxt_switches number of voluntary context switches nonvoluntary_ctxt_switches number of non voluntary context switches + Stack usage: stack usage high water mark (round up to page size) .............................................................................. Table 1-3: Contents of the statm files (as of 2.6.8-rc3) @@ -307,7 +309,7 @@ address perms offset dev inode pathname 08049000-0804a000 rw-p 00001000 03:00 8312 /opt/test 0804a000-0806b000 rw-p 00000000 00:00 0 [heap] a7cb1000-a7cb2000 ---p 00000000 00:00 0 -a7cb2000-a7eb2000 rw-p 00000000 00:00 0 +a7cb2000-a7eb2000 rw-p 00000000 00:00 0 [threadstack:001ff4b4] a7eb2000-a7eb3000 ---p 00000000 00:00 0 a7eb3000-a7ed5000 rw-p 00000000 00:00 0 a7ed5000-a8008000 r-xp 00000000 03:00 4222 /lib/libc.so.6 @@ -343,6 +345,7 @@ is not associated with a file: [stack] = the stack of the main process [vdso] = the "virtual dynamic shared object", the kernel system call handler + [threadstack:xxxxxxxx] = the stack of the thread, xxxxxxxx is the stack size or if empty, the mapping is anonymous. diff --git a/fs/exec.c b/fs/exec.c index 69bb9d89979..5c833c18d0d 100644 --- a/fs/exec.c +++ b/fs/exec.c @@ -1357,6 +1357,8 @@ int do_execve(char * filename, if (retval < 0) goto out; + current->stack_start = current->mm->start_stack; + /* execve succeeded */ current->fs->in_exec = 0; current->in_execve = 0; diff --git a/fs/proc/array.c b/fs/proc/array.c index 725a650bbbb..0c6bc602e6c 100644 --- a/fs/proc/array.c +++ b/fs/proc/array.c @@ -82,6 +82,7 @@ #include #include #include +#include #include #include @@ -321,6 +322,87 @@ static inline void task_context_switch_counts(struct seq_file *m, p->nivcsw); } +struct stack_stats { + struct vm_area_struct *vma; + unsigned long startpage; + unsigned long usage; +}; + +static int stack_usage_pte_range(pmd_t *pmd, unsigned long addr, + unsigned long end, struct mm_walk *walk) +{ + struct stack_stats *ss = walk->private; + struct vm_area_struct *vma = ss->vma; + pte_t *pte, ptent; + spinlock_t *ptl; + int ret = 0; + + pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); + for (; addr != end; pte++, addr += PAGE_SIZE) { + ptent = *pte; + +#ifdef CONFIG_STACK_GROWSUP + if (pte_present(ptent) || is_swap_pte(ptent)) + ss->usage = addr - ss->startpage + PAGE_SIZE; +#else + if (pte_present(ptent) || is_swap_pte(ptent)) { + ss->usage = ss->startpage - addr + PAGE_SIZE; + pte++; + ret = 1; + break; + } +#endif + } + pte_unmap_unlock(pte - 1, ptl); + cond_resched(); + return ret; +} + +static inline unsigned long get_stack_usage_in_bytes(struct vm_area_struct *vma, + struct task_struct *task) +{ + struct stack_stats ss; + struct mm_walk stack_walk = { + .pmd_entry = stack_usage_pte_range, + .mm = vma->vm_mm, + .private = &ss, + }; + + if (!vma->vm_mm || is_vm_hugetlb_page(vma)) + return 0; + + ss.vma = vma; + ss.startpage = task->stack_start & PAGE_MASK; + ss.usage = 0; + +#ifdef CONFIG_STACK_GROWSUP + walk_page_range(KSTK_ESP(task) & PAGE_MASK, vma->vm_end, + &stack_walk); +#else + walk_page_range(vma->vm_start, (KSTK_ESP(task) & PAGE_MASK) + PAGE_SIZE, + &stack_walk); +#endif + return ss.usage; +} + +static inline void task_show_stack_usage(struct seq_file *m, + struct task_struct *task) +{ + struct vm_area_struct *vma; + struct mm_struct *mm = get_task_mm(task); + + if (mm) { + down_read(&mm->mmap_sem); + vma = find_vma(mm, task->stack_start); + if (vma) + seq_printf(m, "Stack usage:\t%lu kB\n", + get_stack_usage_in_bytes(vma, task) >> 10); + + up_read(&mm->mmap_sem); + mmput(mm); + } +} + int proc_pid_status(struct seq_file *m, struct pid_namespace *ns, struct pid *pid, struct task_struct *task) { @@ -340,6 +422,7 @@ int proc_pid_status(struct seq_file *m, struct pid_namespace *ns, task_show_regs(m, task); #endif task_context_switch_counts(m, task); + task_show_stack_usage(m, task); return 0; } @@ -481,7 +564,7 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns, rsslim, mm ? mm->start_code : 0, mm ? mm->end_code : 0, - (permitted && mm) ? mm->start_stack : 0, + (permitted) ? task->stack_start : 0, esp, eip, /* The signal information here is obsolete. diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c index 366b1017a4f..2a1bef9203c 100644 --- a/fs/proc/task_mmu.c +++ b/fs/proc/task_mmu.c @@ -243,6 +243,25 @@ static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma) } else if (vma->vm_start <= mm->start_stack && vma->vm_end >= mm->start_stack) { name = "[stack]"; + } else { + unsigned long stack_start; + struct proc_maps_private *pmp; + + pmp = m->private; + stack_start = pmp->task->stack_start; + + if (vma->vm_start <= stack_start && + vma->vm_end >= stack_start) { + pad_len_spaces(m, len); + seq_printf(m, + "[threadstack:%08lx]", +#ifdef CONFIG_STACK_GROWSUP + vma->vm_end - stack_start +#else + stack_start - vma->vm_start +#endif + ); + } } } else { name = "[vdso]"; diff --git a/include/linux/sched.h b/include/linux/sched.h index 6448bbc6406..3cbc6c0be66 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -1529,6 +1529,7 @@ struct task_struct { /* bitmask of trace recursion */ unsigned long trace_recursion; #endif /* CONFIG_TRACING */ + unsigned long stack_start; }; /* Future-safe accessor for struct task_struct's cpus_allowed. */ diff --git a/kernel/fork.c b/kernel/fork.c index 7cf45812ce8..8f45b0ebdda 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -1095,6 +1095,8 @@ static struct task_struct *copy_process(unsigned long clone_flags, p->bts = NULL; + p->stack_start = stack_start; + /* Perform scheduler related setup. Assign this task to a CPU. */ sched_fork(p, clone_flags); diff --git a/mm/Makefile b/mm/Makefile index 728a9fde49d..88193d73cd1 100644 --- a/mm/Makefile +++ b/mm/Makefile @@ -11,10 +11,10 @@ obj-y := bootmem.o filemap.o mempool.o oom_kill.o fadvise.o \ maccess.o page_alloc.o page-writeback.o \ readahead.o swap.o truncate.o vmscan.o shmem.o \ prio_tree.o util.o mmzone.o vmstat.o backing-dev.o \ - page_isolation.o mm_init.o mmu_context.o $(mmu-y) + page_isolation.o mm_init.o mmu_context.o \ + pagewalk.o $(mmu-y) obj-y += init-mm.o -obj-$(CONFIG_PROC_PAGE_MONITOR) += pagewalk.o obj-$(CONFIG_BOUNCE) += bounce.o obj-$(CONFIG_SWAP) += page_io.o swap_state.o swapfile.o thrash.o obj-$(CONFIG_HAS_DMA) += dmapool.o -- cgit v1.2.3-70-g09d2 From 2ef43ec772551e975a6ea7cf22b59c84955aadf9 Mon Sep 17 00:00:00 2001 From: KAMEZAWA Hiroyuki Date: Tue, 22 Sep 2009 16:45:41 -0700 Subject: kcore: use usual list for kclist This patchset is for /proc/kcore. With this, - many per-arch hooks are removed. - /proc/kcore will know really valid physical memory area. - /proc/kcore will be aware of memory hotplug. - /proc/kcore will be architecture independent i.e. if an arch supports CONFIG_MMU, it can use /proc/kcore. (if the arch uses usual memory layout.) This patch: /proc/kcore uses its own list handling codes. It's better to use generic list codes. No changes in logic. just clean up. Signed-off-by: KAMEZAWA Hiroyuki Cc: Ralf Baechle Cc: Benjamin Herrenschmidt Cc: WANG Cong Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- fs/proc/kcore.c | 12 ++++++------ include/linux/proc_fs.h | 2 +- 2 files changed, 7 insertions(+), 7 deletions(-) (limited to 'include') diff --git a/fs/proc/kcore.c b/fs/proc/kcore.c index 0cf8a24cf6c..f9327e51ce9 100644 --- a/fs/proc/kcore.c +++ b/fs/proc/kcore.c @@ -20,6 +20,7 @@ #include #include #include +#include #define CORE_STR "CORE" @@ -57,7 +58,7 @@ struct memelfnote void *data; }; -static struct kcore_list *kclist; +static LIST_HEAD(kclist_head); static DEFINE_RWLOCK(kclist_lock); void @@ -67,8 +68,7 @@ kclist_add(struct kcore_list *new, void *addr, size_t size) new->size = size; write_lock(&kclist_lock); - new->next = kclist; - kclist = new; + list_add_tail(&new->list, &kclist_head); write_unlock(&kclist_lock); } @@ -80,7 +80,7 @@ static size_t get_kcore_size(int *nphdr, size_t *elf_buflen) *nphdr = 1; /* PT_NOTE */ size = 0; - for (m=kclist; m; m=m->next) { + list_for_each_entry(m, &kclist_head, list) { try = kc_vaddr_to_offset((size_t)m->addr + m->size); if (try > size) size = try; @@ -192,7 +192,7 @@ static void elf_kcore_store_hdr(char *bufp, int nphdr, int dataoff) nhdr->p_align = 0; /* setup ELF PT_LOAD program header for every area */ - for (m=kclist; m; m=m->next) { + list_for_each_entry(m, &kclist_head, list) { phdr = (struct elf_phdr *) bufp; bufp += sizeof(struct elf_phdr); offset += sizeof(struct elf_phdr); @@ -317,7 +317,7 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos) struct kcore_list *m; read_lock(&kclist_lock); - for (m=kclist; m; m=m->next) { + list_for_each_entry(m, &kclist_head, list) { if (start >= m->addr && start < (m->addr+m->size)) break; } diff --git a/include/linux/proc_fs.h b/include/linux/proc_fs.h index e6e77d31c41..0aff2a62eba 100644 --- a/include/linux/proc_fs.h +++ b/include/linux/proc_fs.h @@ -79,7 +79,7 @@ struct proc_dir_entry { }; struct kcore_list { - struct kcore_list *next; + struct list_head list; unsigned long addr; size_t size; }; -- cgit v1.2.3-70-g09d2 From c30bb2a25fcfde6157e6154a32c14686fb0bedbe Mon Sep 17 00:00:00 2001 From: KAMEZAWA Hiroyuki Date: Tue, 22 Sep 2009 16:45:43 -0700 Subject: kcore: add kclist types Presently, kclist_add() only eats start address and size as its arguments. Considering to make kclist dynamically reconfigulable, it's necessary to know which kclists are for System RAM and which are not. This patch add kclist types as KCORE_RAM KCORE_VMALLOC KCORE_TEXT KCORE_OTHER This "type" is used in a patch following this for detecting KCORE_RAM. Signed-off-by: KAMEZAWA Hiroyuki Cc: Ralf Baechle Cc: Benjamin Herrenschmidt Cc: WANG Cong Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/ia64/mm/init.c | 7 ++++--- arch/mips/mm/init.c | 7 ++++--- arch/powerpc/mm/init_32.c | 4 ++-- arch/powerpc/mm/init_64.c | 5 +++-- arch/sh/mm/init.c | 4 ++-- arch/x86/mm/init_32.c | 4 ++-- arch/x86/mm/init_64.c | 11 ++++++----- fs/proc/kcore.c | 3 ++- include/linux/proc_fs.h | 13 +++++++++++-- 9 files changed, 36 insertions(+), 22 deletions(-) (limited to 'include') diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c index 1d286244a56..f6a3c21a282 100644 --- a/arch/ia64/mm/init.c +++ b/arch/ia64/mm/init.c @@ -639,9 +639,10 @@ mem_init (void) high_memory = __va(max_low_pfn * PAGE_SIZE); - kclist_add(&kcore_mem, __va(0), max_low_pfn * PAGE_SIZE); - kclist_add(&kcore_vmem, (void *)VMALLOC_START, VMALLOC_END-VMALLOC_START); - kclist_add(&kcore_kernel, _stext, _end - _stext); + kclist_add(&kcore_mem, __va(0), max_low_pfn * PAGE_SIZE, KCORE_RAM); + kclist_add(&kcore_vmem, (void *)VMALLOC_START, + VMALLOC_END-VMALLOC_START, KCORE_VMALLOC); + kclist_add(&kcore_kernel, _stext, _end - _stext, KCORE_TEXT); for_each_online_pgdat(pgdat) if (pgdat->bdata->node_bootmem_map) diff --git a/arch/mips/mm/init.c b/arch/mips/mm/init.c index 1f4ee4797a6..f8661985bff 100644 --- a/arch/mips/mm/init.c +++ b/arch/mips/mm/init.c @@ -409,11 +409,12 @@ void __init mem_init(void) if ((unsigned long) &_text > (unsigned long) CKSEG0) /* The -4 is a hack so that user tools don't have to handle the overflow. */ - kclist_add(&kcore_kseg0, (void *) CKSEG0, 0x80000000 - 4); + kclist_add(&kcore_kseg0, (void *) CKSEG0, + 0x80000000 - 4, KCORE_TEXT); #endif - kclist_add(&kcore_mem, __va(0), max_low_pfn << PAGE_SHIFT); + kclist_add(&kcore_mem, __va(0), max_low_pfn << PAGE_SHIFT, KCORE_RAM); kclist_add(&kcore_vmalloc, (void *)VMALLOC_START, - VMALLOC_END-VMALLOC_START); + VMALLOC_END-VMALLOC_START, KCORE_VMALLOC); printk(KERN_INFO "Memory: %luk/%luk available (%ldk kernel code, " "%ldk reserved, %ldk data, %ldk init, %ldk highmem)\n", diff --git a/arch/powerpc/mm/init_32.c b/arch/powerpc/mm/init_32.c index 3ef5084b90c..e91add90ec5 100644 --- a/arch/powerpc/mm/init_32.c +++ b/arch/powerpc/mm/init_32.c @@ -268,11 +268,11 @@ static int __init setup_kcore(void) size); } - kclist_add(kcore_mem, __va(base), size); + kclist_add(kcore_mem, __va(base), size, KCORE_RAM); } kclist_add(&kcore_vmem, (void *)VMALLOC_START, - VMALLOC_END-VMALLOC_START); + VMALLOC_END-VMALLOC_START, KCORE_VMALLOC); return 0; } diff --git a/arch/powerpc/mm/init_64.c b/arch/powerpc/mm/init_64.c index 31582329cd6..9ee563101b5 100644 --- a/arch/powerpc/mm/init_64.c +++ b/arch/powerpc/mm/init_64.c @@ -128,10 +128,11 @@ static int __init setup_kcore(void) if (!kcore_mem) panic("%s: kmalloc failed\n", __func__); - kclist_add(kcore_mem, __va(base), size); + kclist_add(kcore_mem, __va(base), size, KCORE_RAM); } - kclist_add(&kcore_vmem, (void *)VMALLOC_START, VMALLOC_END-VMALLOC_START); + kclist_add(&kcore_vmem, (void *)VMALLOC_START, + VMALLOC_END-VMALLOC_START, KCORE_VMALLOC); return 0; } diff --git a/arch/sh/mm/init.c b/arch/sh/mm/init.c index fabb7c6f48d..ef56c9f9d7b 100644 --- a/arch/sh/mm/init.c +++ b/arch/sh/mm/init.c @@ -226,9 +226,9 @@ void __init mem_init(void) datasize = (unsigned long) &_edata - (unsigned long) &_etext; initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin; - kclist_add(&kcore_mem, __va(0), max_low_pfn << PAGE_SHIFT); + kclist_add(&kcore_mem, __va(0), max_low_pfn << PAGE_SHIFT, KCORE_RAM); kclist_add(&kcore_vmalloc, (void *)VMALLOC_START, - VMALLOC_END - VMALLOC_START); + VMALLOC_END - VMALLOC_START, KCORE_VMALLOC); printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, " "%dk data, %dk init)\n", diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c index b49b4f67453..2cbc4011293 100644 --- a/arch/x86/mm/init_32.c +++ b/arch/x86/mm/init_32.c @@ -886,9 +886,9 @@ void __init mem_init(void) datasize = (unsigned long) &_edata - (unsigned long) &_etext; initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin; - kclist_add(&kcore_mem, __va(0), max_low_pfn << PAGE_SHIFT); + kclist_add(&kcore_mem, __va(0), max_low_pfn << PAGE_SHIFT, KCORE_RAM); kclist_add(&kcore_vmalloc, (void *)VMALLOC_START, - VMALLOC_END-VMALLOC_START); + VMALLOC_END-VMALLOC_START, KCORE_VMALLOC); printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, " "%dk reserved, %dk data, %dk init, %ldk highmem)\n", diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c index 810bd31e7f5..c05810b614f 100644 --- a/arch/x86/mm/init_64.c +++ b/arch/x86/mm/init_64.c @@ -677,13 +677,14 @@ void __init mem_init(void) initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin; /* Register memory areas for /proc/kcore */ - kclist_add(&kcore_mem, __va(0), max_low_pfn << PAGE_SHIFT); + kclist_add(&kcore_mem, __va(0), max_low_pfn << PAGE_SHIFT, KCORE_RAM); kclist_add(&kcore_vmalloc, (void *)VMALLOC_START, - VMALLOC_END-VMALLOC_START); - kclist_add(&kcore_kernel, &_stext, _end - _stext); - kclist_add(&kcore_modules, (void *)MODULES_VADDR, MODULES_LEN); + VMALLOC_END-VMALLOC_START, KCORE_VMALLOC); + kclist_add(&kcore_kernel, &_stext, _end - _stext, KCORE_TEXT); + kclist_add(&kcore_modules, (void *)MODULES_VADDR, MODULES_LEN, + KCORE_OTHER); kclist_add(&kcore_vsyscall, (void *)VSYSCALL_START, - VSYSCALL_END - VSYSCALL_START); + VSYSCALL_END - VSYSCALL_START, KCORE_OTHER); printk(KERN_INFO "Memory: %luk/%luk available (%ldk kernel code, " "%ldk absent, %ldk reserved, %ldk data, %ldk init)\n", diff --git a/fs/proc/kcore.c b/fs/proc/kcore.c index f9327e51ce9..659c1635db8 100644 --- a/fs/proc/kcore.c +++ b/fs/proc/kcore.c @@ -62,10 +62,11 @@ static LIST_HEAD(kclist_head); static DEFINE_RWLOCK(kclist_lock); void -kclist_add(struct kcore_list *new, void *addr, size_t size) +kclist_add(struct kcore_list *new, void *addr, size_t size, int type) { new->addr = (unsigned long)addr; new->size = size; + new->type = type; write_lock(&kclist_lock); list_add_tail(&new->list, &kclist_head); diff --git a/include/linux/proc_fs.h b/include/linux/proc_fs.h index 0aff2a62eba..bd7b840765a 100644 --- a/include/linux/proc_fs.h +++ b/include/linux/proc_fs.h @@ -78,10 +78,18 @@ struct proc_dir_entry { struct list_head pde_openers; /* who did ->open, but not ->release */ }; +enum kcore_type { + KCORE_TEXT, + KCORE_VMALLOC, + KCORE_RAM, + KCORE_OTHER, +}; + struct kcore_list { struct list_head list; unsigned long addr; size_t size; + int type; }; struct vmcore { @@ -233,11 +241,12 @@ static inline void dup_mm_exe_file(struct mm_struct *oldmm, #endif /* CONFIG_PROC_FS */ #if !defined(CONFIG_PROC_KCORE) -static inline void kclist_add(struct kcore_list *new, void *addr, size_t size) +static inline void +kclist_add(struct kcore_list *new, void *addr, size_t size, int type) { } #else -extern void kclist_add(struct kcore_list *, void *, size_t); +extern void kclist_add(struct kcore_list *, void *, size_t, int type); #endif union proc_op { -- cgit v1.2.3-70-g09d2 From 908eedc6168bd92e89f90d89fa389065a36358fa Mon Sep 17 00:00:00 2001 From: KAMEZAWA Hiroyuki Date: Tue, 22 Sep 2009 16:45:46 -0700 Subject: walk system ram range MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Originally, walk_memory_resource() was introduced to traverse all memory of "System RAM" for detecting memory hotplug/unplug range. For doing so, flags of IORESOUCE_MEM|IORESOURCE_BUSY was used and this was enough for memory hotplug. But for using other purpose, /proc/kcore, this may includes some firmware area marked as IORESOURCE_BUSY | IORESOUCE_MEM. This patch makes the check strict to find out busy "System RAM". Note: PPC64 keeps their own walk_memory_resouce(), which walk through ppc64's lmb informaton. Because old kclist_add() is called per lmb, this patch makes no difference in behavior, finally. And this patch removes CONFIG_MEMORY_HOTPLUG check from this function. Because pfn_valid() just show "there is memmap or not* and cannot be used for "there is physical memory or not", this function is useful in generic to scan physical memory range. Signed-off-by: KAMEZAWA Hiroyuki Cc: Ralf Baechle Cc: Benjamin Herrenschmidt Cc: WANG Cong Cc: Américo Wang Cc: David Rientjes Cc: Roland Dreier Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/powerpc/mm/mem.c | 6 +++--- drivers/infiniband/hw/ehca/ehca_mrmw.c | 2 +- drivers/net/ehea/ehea_qmr.c | 2 +- include/linux/ioport.h | 4 ++++ include/linux/memory_hotplug.h | 8 -------- kernel/resource.c | 23 ++++++++++++++++------- mm/memory_hotplug.c | 6 +++--- 7 files changed, 28 insertions(+), 23 deletions(-) (limited to 'include') diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c index 0e5c59b995e..59736317bf0 100644 --- a/arch/powerpc/mm/mem.c +++ b/arch/powerpc/mm/mem.c @@ -143,8 +143,8 @@ int arch_add_memory(int nid, u64 start, u64 size) * memory regions, find holes and callback for contiguous regions. */ int -walk_memory_resource(unsigned long start_pfn, unsigned long nr_pages, void *arg, - int (*func)(unsigned long, unsigned long, void *)) +walk_system_ram_range(unsigned long start_pfn, unsigned long nr_pages, + void *arg, int (*func)(unsigned long, unsigned long, void *)) { struct lmb_property res; unsigned long pfn, len; @@ -166,7 +166,7 @@ walk_memory_resource(unsigned long start_pfn, unsigned long nr_pages, void *arg, } return ret; } -EXPORT_SYMBOL_GPL(walk_memory_resource); +EXPORT_SYMBOL_GPL(walk_system_ram_range); /* * Initialize the bootmem system and give it all the memory we diff --git a/drivers/infiniband/hw/ehca/ehca_mrmw.c b/drivers/infiniband/hw/ehca/ehca_mrmw.c index 7663a2a9f13..7550a534005 100644 --- a/drivers/infiniband/hw/ehca/ehca_mrmw.c +++ b/drivers/infiniband/hw/ehca/ehca_mrmw.c @@ -2463,7 +2463,7 @@ int ehca_create_busmap(void) int ret; ehca_mr_len = 0; - ret = walk_memory_resource(0, 1ULL << MAX_PHYSMEM_BITS, NULL, + ret = walk_system_ram_range(0, 1ULL << MAX_PHYSMEM_BITS, NULL, ehca_create_busmap_callback); return ret; } diff --git a/drivers/net/ehea/ehea_qmr.c b/drivers/net/ehea/ehea_qmr.c index 3747457f5e6..bc7c5b7abb8 100644 --- a/drivers/net/ehea/ehea_qmr.c +++ b/drivers/net/ehea/ehea_qmr.c @@ -751,7 +751,7 @@ int ehea_create_busmap(void) mutex_lock(&ehea_busmap_mutex); ehea_mr_len = 0; - ret = walk_memory_resource(0, 1ULL << MAX_PHYSMEM_BITS, NULL, + ret = walk_system_ram_range(0, 1ULL << MAX_PHYSMEM_BITS, NULL, ehea_create_busmap_callback); mutex_unlock(&ehea_busmap_mutex); return ret; diff --git a/include/linux/ioport.h b/include/linux/ioport.h index 786e7b8cece..83aa81297ea 100644 --- a/include/linux/ioport.h +++ b/include/linux/ioport.h @@ -184,5 +184,9 @@ extern void __devm_release_region(struct device *dev, struct resource *parent, extern int iomem_map_sanity_check(resource_size_t addr, unsigned long size); extern int iomem_is_exclusive(u64 addr); +extern int +walk_system_ram_range(unsigned long start_pfn, unsigned long nr_pages, + void *arg, int (*func)(unsigned long, unsigned long, void *)); + #endif /* __ASSEMBLY__ */ #endif /* _LINUX_IOPORT_H */ diff --git a/include/linux/memory_hotplug.h b/include/linux/memory_hotplug.h index d95f72e79b8..fed969281a4 100644 --- a/include/linux/memory_hotplug.h +++ b/include/linux/memory_hotplug.h @@ -191,14 +191,6 @@ static inline void register_page_bootmem_info_node(struct pglist_data *pgdat) #endif /* ! CONFIG_MEMORY_HOTPLUG */ -/* - * Walk through all memory which is registered as resource. - * arg is (start_pfn, nr_pages, private_arg_pointer) - */ -extern int walk_memory_resource(unsigned long start_pfn, - unsigned long nr_pages, void *arg, - int (*func)(unsigned long, unsigned long, void *)); - #ifdef CONFIG_MEMORY_HOTREMOVE extern int is_mem_section_removable(unsigned long pfn, unsigned long nr_pages); diff --git a/kernel/resource.c b/kernel/resource.c index 78b087221c1..fb11a58b959 100644 --- a/kernel/resource.c +++ b/kernel/resource.c @@ -223,13 +223,13 @@ int release_resource(struct resource *old) EXPORT_SYMBOL(release_resource); -#if defined(CONFIG_MEMORY_HOTPLUG) && !defined(CONFIG_ARCH_HAS_WALK_MEMORY) +#if !defined(CONFIG_ARCH_HAS_WALK_MEMORY) /* * Finds the lowest memory reosurce exists within [res->start.res->end) - * the caller must specify res->start, res->end, res->flags. + * the caller must specify res->start, res->end, res->flags and "name". * If found, returns 0, res is overwritten, if not found, returns -1. */ -static int find_next_system_ram(struct resource *res) +static int find_next_system_ram(struct resource *res, char *name) { resource_size_t start, end; struct resource *p; @@ -245,6 +245,8 @@ static int find_next_system_ram(struct resource *res) /* system ram is just marked as IORESOURCE_MEM */ if (p->flags != res->flags) continue; + if (name && strcmp(p->name, name)) + continue; if (p->start > end) { p = NULL; break; @@ -262,19 +264,26 @@ static int find_next_system_ram(struct resource *res) res->end = p->end; return 0; } -int -walk_memory_resource(unsigned long start_pfn, unsigned long nr_pages, void *arg, - int (*func)(unsigned long, unsigned long, void *)) + +/* + * This function calls callback against all memory range of "System RAM" + * which are marked as IORESOURCE_MEM and IORESOUCE_BUSY. + * Now, this function is only for "System RAM". + */ +int walk_system_ram_range(unsigned long start_pfn, unsigned long nr_pages, + void *arg, int (*func)(unsigned long, unsigned long, void *)) { struct resource res; unsigned long pfn, len; u64 orig_end; int ret = -1; + res.start = (u64) start_pfn << PAGE_SHIFT; res.end = ((u64)(start_pfn + nr_pages) << PAGE_SHIFT) - 1; res.flags = IORESOURCE_MEM | IORESOURCE_BUSY; orig_end = res.end; - while ((res.start < res.end) && (find_next_system_ram(&res) >= 0)) { + while ((res.start < res.end) && + (find_next_system_ram(&res, "System RAM") >= 0)) { pfn = (unsigned long)(res.start >> PAGE_SHIFT); len = (unsigned long)((res.end + 1 - res.start) >> PAGE_SHIFT); ret = (*func)(pfn, len, arg); diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c index efe3e0ec2e6..821dee59637 100644 --- a/mm/memory_hotplug.c +++ b/mm/memory_hotplug.c @@ -413,7 +413,7 @@ int online_pages(unsigned long pfn, unsigned long nr_pages) if (!populated_zone(zone)) need_zonelists_rebuild = 1; - ret = walk_memory_resource(pfn, nr_pages, &onlined_pages, + ret = walk_system_ram_range(pfn, nr_pages, &onlined_pages, online_pages_range); if (ret) { printk(KERN_DEBUG "online_pages %lx at %lx failed\n", @@ -705,7 +705,7 @@ offline_isolated_pages_cb(unsigned long start, unsigned long nr_pages, static void offline_isolated_pages(unsigned long start_pfn, unsigned long end_pfn) { - walk_memory_resource(start_pfn, end_pfn - start_pfn, NULL, + walk_system_ram_range(start_pfn, end_pfn - start_pfn, NULL, offline_isolated_pages_cb); } @@ -731,7 +731,7 @@ check_pages_isolated(unsigned long start_pfn, unsigned long end_pfn) long offlined = 0; int ret; - ret = walk_memory_resource(start_pfn, end_pfn - start_pfn, &offlined, + ret = walk_system_ram_range(start_pfn, end_pfn - start_pfn, &offlined, check_pages_isolated_cb); if (ret < 0) offlined = (long)ret; -- cgit v1.2.3-70-g09d2 From 26562c59fa9111ae3ea7b78045889662aac9e5ac Mon Sep 17 00:00:00 2001 From: KAMEZAWA Hiroyuki Date: Tue, 22 Sep 2009 16:45:49 -0700 Subject: kcore: register vmemmap range Benjamin Herrenschmidt pointed out that vmemmap range is not included in KCORE_RAM, KCORE_VMALLOC .... This adds KCORE_VMEMMAP if SPARSEMEM_VMEMMAP is used. By this, vmemmap can be readable via /proc/kcore Because it's not vmalloc area, vread/vwrite cannot be used. But the range is static against the memory layout, this patch handles vmemmap area by the same scheme with physical memory. This patch assumes SPARSEMEM_VMEMMAP range is not in VMALLOC range. It's correct now. [akpm@linux-foundation.org: fix typo] Signed-off-by: KAMEZAWA Hiroyuki Cc: Jiri Slaby Cc: Ralf Baechle Cc: Benjamin Herrenschmidt Cc: WANG Cong Cc: Benjamin Herrenschmidt Cc: Randy Dunlap Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- fs/proc/kcore.c | 52 +++++++++++++++++++++++++++++++++++++++++++++++-- include/linux/proc_fs.h | 1 + 2 files changed, 51 insertions(+), 2 deletions(-) (limited to 'include') diff --git a/fs/proc/kcore.c b/fs/proc/kcore.c index 802de33d634..78970e6f715 100644 --- a/fs/proc/kcore.c +++ b/fs/proc/kcore.c @@ -103,7 +103,7 @@ static void free_kclist_ents(struct list_head *head) } } /* - * Replace all KCORE_RAM information with passed list. + * Replace all KCORE_RAM/KCORE_VMEMMAP information with passed list. */ static void __kcore_update_ram(struct list_head *list) { @@ -113,7 +113,8 @@ static void __kcore_update_ram(struct list_head *list) write_lock(&kclist_lock); if (kcore_need_update) { list_for_each_entry_safe(pos, tmp, &kclist_head, list) { - if (pos->type == KCORE_RAM) + if (pos->type == KCORE_RAM + || pos->type == KCORE_VMEMMAP) list_move(&pos->list, &garbage); } list_splice_tail(list, &kclist_head); @@ -151,6 +152,47 @@ static int kcore_update_ram(void) #else /* !CONFIG_HIGHMEM */ +#ifdef CONFIG_SPARSEMEM_VMEMMAP +/* calculate vmemmap's address from given system ram pfn and register it */ +int get_sparsemem_vmemmap_info(struct kcore_list *ent, struct list_head *head) +{ + unsigned long pfn = __pa(ent->addr) >> PAGE_SHIFT; + unsigned long nr_pages = ent->size >> PAGE_SHIFT; + unsigned long start, end; + struct kcore_list *vmm, *tmp; + + + start = ((unsigned long)pfn_to_page(pfn)) & PAGE_MASK; + end = ((unsigned long)pfn_to_page(pfn + nr_pages)) - 1; + end = ALIGN(end, PAGE_SIZE); + /* overlap check (because we have to align page */ + list_for_each_entry(tmp, head, list) { + if (tmp->type != KCORE_VMEMMAP) + continue; + if (start < tmp->addr + tmp->size) + if (end > tmp->addr) + end = tmp->addr; + } + if (start < end) { + vmm = kmalloc(sizeof(*vmm), GFP_KERNEL); + if (!vmm) + return 0; + vmm->addr = start; + vmm->size = end - start; + vmm->type = KCORE_VMEMMAP; + list_add_tail(&vmm->list, head); + } + return 1; + +} +#else +int get_sparsemem_vmemmap_info(struct kcore_list *ent, struct list_head *head) +{ + return 1; +} + +#endif + static int kclist_add_private(unsigned long pfn, unsigned long nr_pages, void *arg) { @@ -181,6 +223,12 @@ kclist_add_private(unsigned long pfn, unsigned long nr_pages, void *arg) ent->type = KCORE_RAM; list_add_tail(&ent->list, head); + + if (!get_sparsemem_vmemmap_info(ent, head)) { + list_del(&ent->list); + goto free_out; + } + return 0; free_out: kfree(ent); diff --git a/include/linux/proc_fs.h b/include/linux/proc_fs.h index bd7b840765a..379eaed72d4 100644 --- a/include/linux/proc_fs.h +++ b/include/linux/proc_fs.h @@ -82,6 +82,7 @@ enum kcore_type { KCORE_TEXT, KCORE_VMALLOC, KCORE_RAM, + KCORE_VMEMMAP, KCORE_OTHER, }; -- cgit v1.2.3-70-g09d2 From 81ac3ad9061dd9cd490ee92f0c5316a14d77ce18 Mon Sep 17 00:00:00 2001 From: KAMEZAWA Hiroyuki Date: Tue, 22 Sep 2009 16:45:49 -0700 Subject: kcore: register module area in generic way Some archs define MODULED_VADDR/MODULES_END which is not in VMALLOC area. This is handled only in x86-64. This patch make it more generic. And we can use vread/vwrite to access the area. Fix it. Signed-off-by: KAMEZAWA Hiroyuki Cc: Jiri Slaby Cc: Ralf Baechle Cc: Benjamin Herrenschmidt Cc: WANG Cong Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/x86/mm/init_64.c | 4 +--- fs/proc/kcore.c | 19 ++++++++++++++++++- include/linux/mm.h | 8 ++++++++ mm/vmalloc.c | 2 +- 4 files changed, 28 insertions(+), 5 deletions(-) (limited to 'include') diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c index d5d23cc2407..5a4398a6006 100644 --- a/arch/x86/mm/init_64.c +++ b/arch/x86/mm/init_64.c @@ -647,7 +647,7 @@ EXPORT_SYMBOL_GPL(memory_add_physaddr_to_nid); #endif /* CONFIG_MEMORY_HOTPLUG */ -static struct kcore_list kcore_modules, kcore_vsyscall; +static struct kcore_list kcore_vsyscall; void __init mem_init(void) { @@ -676,8 +676,6 @@ void __init mem_init(void) initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin; /* Register memory areas for /proc/kcore */ - kclist_add(&kcore_modules, (void *)MODULES_VADDR, MODULES_LEN, - KCORE_OTHER); kclist_add(&kcore_vsyscall, (void *)VSYSCALL_START, VSYSCALL_END - VSYSCALL_START, KCORE_OTHER); diff --git a/fs/proc/kcore.c b/fs/proc/kcore.c index 78970e6f715..c6a5ec73197 100644 --- a/fs/proc/kcore.c +++ b/fs/proc/kcore.c @@ -490,7 +490,7 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos) if (m == NULL) { if (clear_user(buffer, tsz)) return -EFAULT; - } else if (is_vmalloc_addr((void *)start)) { + } else if (is_vmalloc_or_module_addr((void *)start)) { char * elf_buf; elf_buf = kzalloc(tsz, GFP_KERNEL); @@ -586,6 +586,22 @@ static void __init proc_kcore_text_init(void) } #endif +#if defined(CONFIG_MODULES) && defined(MODULES_VADDR) +/* + * MODULES_VADDR has no intersection with VMALLOC_ADDR. + */ +struct kcore_list kcore_modules; +static void __init add_modules_range(void) +{ + kclist_add(&kcore_modules, (void *)MODULES_VADDR, + MODULES_END - MODULES_VADDR, KCORE_VMALLOC); +} +#else +static void __init add_modules_range(void) +{ +} +#endif + static int __init proc_kcore_init(void) { proc_root_kcore = proc_create("kcore", S_IRUSR, NULL, @@ -595,6 +611,7 @@ static int __init proc_kcore_init(void) /* Store vmalloc area */ kclist_add(&kcore_vmalloc, (void *)VMALLOC_START, VMALLOC_END - VMALLOC_START, KCORE_VMALLOC); + add_modules_range(); /* Store direct-map area from physical memory map */ kcore_update_ram(); hotplug_memory_notifier(kcore_callback, 0); diff --git a/include/linux/mm.h b/include/linux/mm.h index 5946e2ff9fe..b6eae5e3144 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -285,6 +285,14 @@ static inline int is_vmalloc_addr(const void *x) return 0; #endif } +#ifdef CONFIG_MMU +extern int is_vmalloc_or_module_addr(const void *x); +#else +static int is_vmalloc_or_module_addr(const void *x) +{ + return 0; +} +#endif static inline struct page *compound_head(struct page *page) { diff --git a/mm/vmalloc.c b/mm/vmalloc.c index 5535da1d696..69511e66323 100644 --- a/mm/vmalloc.c +++ b/mm/vmalloc.c @@ -184,7 +184,7 @@ static int vmap_page_range(unsigned long start, unsigned long end, return ret; } -static inline int is_vmalloc_or_module_addr(const void *x) +int is_vmalloc_or_module_addr(const void *x) { /* * ARM, x86-64 and sparc64 put modules in a special place, -- cgit v1.2.3-70-g09d2 From a7e3108cca54c105f496919040f00df56767ec00 Mon Sep 17 00:00:00 2001 From: maximilian attems Date: Tue, 22 Sep 2009 16:45:53 -0700 Subject: ramfs: move RAMFS_MAGIC to include/linux/magic.h initramfs userspace likes to use this magic number. Cc: Hugh Dickins Signed-off-by: maximilian attems Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- fs/ramfs/inode.c | 4 +--- include/linux/magic.h | 1 + 2 files changed, 2 insertions(+), 3 deletions(-) (limited to 'include') diff --git a/fs/ramfs/inode.c b/fs/ramfs/inode.c index a7f0110fca4..a6090aa1a7c 100644 --- a/fs/ramfs/inode.c +++ b/fs/ramfs/inode.c @@ -34,12 +34,10 @@ #include #include #include +#include #include #include "internal.h" -/* some random number */ -#define RAMFS_MAGIC 0x858458f6 - #define RAMFS_DEFAULT_MODE 0755 static const struct super_operations ramfs_ops; diff --git a/include/linux/magic.h b/include/linux/magic.h index bce37786a0a..76285e01b39 100644 --- a/include/linux/magic.h +++ b/include/linux/magic.h @@ -12,6 +12,7 @@ #define SYSFS_MAGIC 0x62656572 #define SECURITYFS_MAGIC 0x73636673 #define SELINUX_MAGIC 0xf97cff8c +#define RAMFS_MAGIC 0x858458f6 /* some random number */ #define TMPFS_MAGIC 0x01021994 #define HUGETLBFS_MAGIC 0x958458f6 /* some random number */ #define SQUASHFS_MAGIC 0x73717368 -- cgit v1.2.3-70-g09d2 From b73b255956119111dc18fa063d1e3a0bb3f06328 Mon Sep 17 00:00:00 2001 From: Randy Dunlap Date: Tue, 22 Sep 2009 16:46:00 -0700 Subject: spi.h: add missing kernel-doc for struct spi_master Add missing kernel-doc notation in spi.h for struct spi_master: Warning(include/linux/spi/spi.h:289): No description found for parameter 'mode_bits' Warning(include/linux/spi/spi.h:289): No description found for parameter 'flags' Signed-off-by: Randy Dunlap Acked-by: David Brownell Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/spi/spi.h | 2 ++ 1 file changed, 2 insertions(+) (limited to 'include') diff --git a/include/linux/spi/spi.h b/include/linux/spi/spi.h index c47c4b4da97..eb25cedb995 100644 --- a/include/linux/spi/spi.h +++ b/include/linux/spi/spi.h @@ -207,6 +207,8 @@ static inline void spi_unregister_driver(struct spi_driver *sdrv) * each slave has a chipselect signal, but it's common that not * every chipselect is connected to a slave. * @dma_alignment: SPI controller constraint on DMA buffers alignment. + * @mode_bits: flags understood by this controller driver + * @flags: other constraints relevant to this driver * @setup: updates the device mode and clocking records used by a * device's SPI controller; protocol code may call this. This * must fail if an unrecognized or unsupported mode is requested. -- cgit v1.2.3-70-g09d2 From 75368bf6c2876d8f33abfe77aa3864869a3893eb Mon Sep 17 00:00:00 2001 From: Anton Vorontsov Date: Tue, 22 Sep 2009 16:46:04 -0700 Subject: spi: add support for device table matching With this patch spi drivers can use standard spi_driver.id_table and MODULE_DEVICE_TABLE() mechanisms to bind against the devices. Just like we do with I2C drivers. This is useful when a single driver supports several variants of devices but it is not possible to detect them in run-time (like non-JEDEC chips probing in drivers/mtd/devices/m25p80.c), and when platform_data usage is overkill. This patch also makes life a lot easier on OpenFirmware platforms, since with OF we extensively use proper device IDs in modaliases. Signed-off-by: Anton Vorontsov Cc: David Brownell Cc: David Woodhouse Cc: Grant Likely Cc: Jean Delvare Cc: Ben Dooks Cc: Benjamin Herrenschmidt Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- drivers/spi/spi.c | 23 +++++++++++++++++++++++ include/linux/mod_devicetable.h | 10 ++++++++++ include/linux/spi/spi.h | 10 ++++++++-- scripts/mod/file2alias.c | 13 +++++++++++++ 4 files changed, 54 insertions(+), 2 deletions(-) (limited to 'include') diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c index 70845ccd85c..8518a6eb63f 100644 --- a/drivers/spi/spi.c +++ b/drivers/spi/spi.c @@ -59,9 +59,32 @@ static struct device_attribute spi_dev_attrs[] = { * and the sysfs version makes coldplug work too. */ +static const struct spi_device_id *spi_match_id(const struct spi_device_id *id, + const struct spi_device *sdev) +{ + while (id->name[0]) { + if (!strcmp(sdev->modalias, id->name)) + return id; + id++; + } + return NULL; +} + +const struct spi_device_id *spi_get_device_id(const struct spi_device *sdev) +{ + const struct spi_driver *sdrv = to_spi_driver(sdev->dev.driver); + + return spi_match_id(sdrv->id_table, sdev); +} +EXPORT_SYMBOL_GPL(spi_get_device_id); + static int spi_match_device(struct device *dev, struct device_driver *drv) { const struct spi_device *spi = to_spi_device(dev); + const struct spi_driver *sdrv = to_spi_driver(drv); + + if (sdrv->id_table) + return !!spi_match_id(sdrv->id_table, spi); return strcmp(spi->modalias, drv->name) == 0; } diff --git a/include/linux/mod_devicetable.h b/include/linux/mod_devicetable.h index 1bf5900ffe4..b34f1ef2f1f 100644 --- a/include/linux/mod_devicetable.h +++ b/include/linux/mod_devicetable.h @@ -399,6 +399,16 @@ struct i2c_device_id { __attribute__((aligned(sizeof(kernel_ulong_t)))); }; +/* spi */ + +#define SPI_NAME_SIZE 32 + +struct spi_device_id { + char name[SPI_NAME_SIZE]; + kernel_ulong_t driver_data /* Data private to the driver */ + __attribute__((aligned(sizeof(kernel_ulong_t)))); +}; + /* dmi */ enum dmi_field { DMI_NONE, diff --git a/include/linux/spi/spi.h b/include/linux/spi/spi.h index eb25cedb995..e2051f39f6a 100644 --- a/include/linux/spi/spi.h +++ b/include/linux/spi/spi.h @@ -20,6 +20,7 @@ #define __LINUX_SPI_H #include +#include /* * INTERFACES between SPI master-side drivers and SPI infrastructure. @@ -86,7 +87,7 @@ struct spi_device { int irq; void *controller_state; void *controller_data; - char modalias[32]; + char modalias[SPI_NAME_SIZE]; /* * likely need more hooks for more protocol options affecting how @@ -145,6 +146,7 @@ struct spi_message; /** * struct spi_driver - Host side "protocol" driver + * @id_table: List of SPI devices supported by this driver * @probe: Binds this driver to the spi device. Drivers can verify * that the device is actually present, and may need to configure * characteristics (such as bits_per_word) which weren't needed for @@ -170,6 +172,7 @@ struct spi_message; * MMC, RTC, filesystem character device nodes, and hardware monitoring. */ struct spi_driver { + const struct spi_device_id *id_table; int (*probe)(struct spi_device *spi); int (*remove)(struct spi_device *spi); void (*shutdown)(struct spi_device *spi); @@ -734,7 +737,7 @@ struct spi_board_info { * controller_data goes to spi_device.controller_data, * irq is copied too */ - char modalias[32]; + char modalias[SPI_NAME_SIZE]; const void *platform_data; void *controller_data; int irq; @@ -802,4 +805,7 @@ spi_unregister_device(struct spi_device *spi) device_unregister(&spi->dev); } +extern const struct spi_device_id * +spi_get_device_id(const struct spi_device *sdev); + #endif /* __LINUX_SPI_H */ diff --git a/scripts/mod/file2alias.c b/scripts/mod/file2alias.c index 40e0045876e..9d446e34519 100644 --- a/scripts/mod/file2alias.c +++ b/scripts/mod/file2alias.c @@ -657,6 +657,15 @@ static int do_i2c_entry(const char *filename, struct i2c_device_id *id, return 1; } +/* Looks like: S */ +static int do_spi_entry(const char *filename, struct spi_device_id *id, + char *alias) +{ + sprintf(alias, "%s", id->name); + + return 1; +} + static const struct dmifield { const char *prefix; int field; @@ -853,6 +862,10 @@ void handle_moddevtable(struct module *mod, struct elf_info *info, do_table(symval, sym->st_size, sizeof(struct i2c_device_id), "i2c", do_i2c_entry, mod); + else if (sym_is(symname, "__mod_spi_device_table")) + do_table(symval, sym->st_size, + sizeof(struct spi_device_id), "spi", + do_spi_entry, mod); else if (sym_is(symname, "__mod_dmi_device_table")) do_table(symval, sym->st_size, sizeof(struct dmi_system_id), "dmi", -- cgit v1.2.3-70-g09d2 From e0626e3844e8f430fc1a4417f523a00797df7ca6 Mon Sep 17 00:00:00 2001 From: Anton Vorontsov Date: Tue, 22 Sep 2009 16:46:08 -0700 Subject: spi: prefix modalias with "spi:" This makes it consistent with other buses (platform, i2c, vio, ...). I'm not sure why we use the prefixes, but there must be a reason. This was easy enough to do it, and I did it. Signed-off-by: Anton Vorontsov Cc: David Brownell Cc: David Woodhouse Cc: Grant Likely Cc: Jean Delvare Cc: Ben Dooks Cc: Benjamin Herrenschmidt Cc: Dmitry Torokhov Cc: Samuel Ortiz Cc: "John W. Linville" Acked-by: Mike Frysinger Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- drivers/gpio/max7301.c | 1 + drivers/gpio/mcp23s08.c | 1 + drivers/hwmon/lis3lv02d_spi.c | 2 +- drivers/hwmon/max1111.c | 1 + drivers/input/touchscreen/ad7877.c | 1 + drivers/input/touchscreen/ad7879.c | 1 + drivers/input/touchscreen/ads7846.c | 1 + drivers/leds/leds-dac124s085.c | 1 + drivers/mfd/ezx-pcap.c | 1 + drivers/misc/eeprom/at25.c | 2 +- drivers/mmc/host/mmc_spi.c | 1 + drivers/mtd/devices/mtd_dataflash.c | 1 + drivers/net/enc28j60.c | 1 + drivers/net/ks8851.c | 1 + drivers/net/wireless/libertas/if_spi.c | 1 + drivers/net/wireless/p54/p54spi.c | 1 + drivers/net/wireless/wl12xx/wl1251_main.c | 1 + drivers/rtc/rtc-ds1305.c | 1 + drivers/rtc/rtc-ds1390.c | 1 + drivers/rtc/rtc-ds3234.c | 1 + drivers/rtc/rtc-m41t94.c | 1 + drivers/rtc/rtc-max6902.c | 1 + drivers/rtc/rtc-r9701.c | 1 + drivers/rtc/rtc-rs5c348.c | 1 + drivers/serial/max3100.c | 1 + drivers/spi/spi.c | 3 ++- drivers/spi/spidev.c | 1 + drivers/spi/tle62x0.c | 1 + drivers/staging/stlc45xx/stlc45xx.c | 1 + drivers/video/backlight/corgi_lcd.c | 1 + drivers/video/backlight/ltv350qv.c | 1 + drivers/video/backlight/tdo24m.c | 1 + drivers/video/backlight/tosa_lcd.c | 2 +- drivers/video/backlight/vgg2432a4.c | 3 +-- include/linux/mod_devicetable.h | 1 + scripts/mod/file2alias.c | 4 ++-- 36 files changed, 38 insertions(+), 8 deletions(-) (limited to 'include') diff --git a/drivers/gpio/max7301.c b/drivers/gpio/max7301.c index 7b82eaae262..480956f1ca5 100644 --- a/drivers/gpio/max7301.c +++ b/drivers/gpio/max7301.c @@ -339,3 +339,4 @@ module_exit(max7301_exit); MODULE_AUTHOR("Juergen Beisert"); MODULE_LICENSE("GPL v2"); MODULE_DESCRIPTION("MAX7301 SPI based GPIO-Expander"); +MODULE_ALIAS("spi:" DRIVER_NAME); diff --git a/drivers/gpio/mcp23s08.c b/drivers/gpio/mcp23s08.c index f6fae0e50e6..c6c7aa15f5d 100644 --- a/drivers/gpio/mcp23s08.c +++ b/drivers/gpio/mcp23s08.c @@ -433,3 +433,4 @@ static void __exit mcp23s08_exit(void) module_exit(mcp23s08_exit); MODULE_LICENSE("GPL"); +MODULE_ALIAS("spi:mcp23s08"); diff --git a/drivers/hwmon/lis3lv02d_spi.c b/drivers/hwmon/lis3lv02d_spi.c index 82ebca5a699..ecd739534f6 100644 --- a/drivers/hwmon/lis3lv02d_spi.c +++ b/drivers/hwmon/lis3lv02d_spi.c @@ -139,4 +139,4 @@ module_exit(lis302dl_exit); MODULE_AUTHOR("Daniel Mack "); MODULE_DESCRIPTION("lis3lv02d SPI glue layer"); MODULE_LICENSE("GPL"); - +MODULE_ALIAS("spi:" DRV_NAME); diff --git a/drivers/hwmon/max1111.c b/drivers/hwmon/max1111.c index bfaa665ccf3..9ac497271ad 100644 --- a/drivers/hwmon/max1111.c +++ b/drivers/hwmon/max1111.c @@ -242,3 +242,4 @@ module_exit(max1111_exit); MODULE_AUTHOR("Eric Miao "); MODULE_DESCRIPTION("MAX1111 ADC Driver"); MODULE_LICENSE("GPL"); +MODULE_ALIAS("spi:max1111"); diff --git a/drivers/input/touchscreen/ad7877.c b/drivers/input/touchscreen/ad7877.c index ecaeb7e8e75..eb83939c705 100644 --- a/drivers/input/touchscreen/ad7877.c +++ b/drivers/input/touchscreen/ad7877.c @@ -842,3 +842,4 @@ module_exit(ad7877_exit); MODULE_AUTHOR("Michael Hennerich "); MODULE_DESCRIPTION("AD7877 touchscreen Driver"); MODULE_LICENSE("GPL"); +MODULE_ALIAS("spi:ad7877"); diff --git a/drivers/input/touchscreen/ad7879.c b/drivers/input/touchscreen/ad7879.c index 5d8a7039880..19b4db7e974 100644 --- a/drivers/input/touchscreen/ad7879.c +++ b/drivers/input/touchscreen/ad7879.c @@ -779,3 +779,4 @@ module_exit(ad7879_exit); MODULE_AUTHOR("Michael Hennerich "); MODULE_DESCRIPTION("AD7879(-1) touchscreen Driver"); MODULE_LICENSE("GPL"); +MODULE_ALIAS("spi:ad7879"); diff --git a/drivers/input/touchscreen/ads7846.c b/drivers/input/touchscreen/ads7846.c index ba9d38c3f41..09c810999b9 100644 --- a/drivers/input/touchscreen/ads7846.c +++ b/drivers/input/touchscreen/ads7846.c @@ -1256,3 +1256,4 @@ module_exit(ads7846_exit); MODULE_DESCRIPTION("ADS7846 TouchScreen Driver"); MODULE_LICENSE("GPL"); +MODULE_ALIAS("spi:ads7846"); diff --git a/drivers/leds/leds-dac124s085.c b/drivers/leds/leds-dac124s085.c index 098d9aae725..2913d76ad3d 100644 --- a/drivers/leds/leds-dac124s085.c +++ b/drivers/leds/leds-dac124s085.c @@ -148,3 +148,4 @@ module_exit(dac124s085_leds_exit); MODULE_AUTHOR("Guennadi Liakhovetski "); MODULE_DESCRIPTION("DAC124S085 LED driver"); MODULE_LICENSE("GPL v2"); +MODULE_ALIAS("spi:dac124s085"); diff --git a/drivers/mfd/ezx-pcap.c b/drivers/mfd/ezx-pcap.c index 016be4938e4..87628891797 100644 --- a/drivers/mfd/ezx-pcap.c +++ b/drivers/mfd/ezx-pcap.c @@ -548,3 +548,4 @@ module_exit(ezx_pcap_exit); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Daniel Ribeiro / Harald Welte"); MODULE_DESCRIPTION("Motorola PCAP2 ASIC Driver"); +MODULE_ALIAS("spi:ezx-pcap"); diff --git a/drivers/misc/eeprom/at25.c b/drivers/misc/eeprom/at25.c index 2e535a0ccd5..d902d81dde3 100644 --- a/drivers/misc/eeprom/at25.c +++ b/drivers/misc/eeprom/at25.c @@ -417,4 +417,4 @@ module_exit(at25_exit); MODULE_DESCRIPTION("Driver for most SPI EEPROMs"); MODULE_AUTHOR("David Brownell"); MODULE_LICENSE("GPL"); - +MODULE_ALIAS("spi:at25"); diff --git a/drivers/mmc/host/mmc_spi.c b/drivers/mmc/host/mmc_spi.c index a461017ce5c..d55fe4fb793 100644 --- a/drivers/mmc/host/mmc_spi.c +++ b/drivers/mmc/host/mmc_spi.c @@ -1562,3 +1562,4 @@ MODULE_AUTHOR("Mike Lavender, David Brownell, " "Hans-Peter Nilsson, Jan Nikitenko"); MODULE_DESCRIPTION("SPI SD/MMC host driver"); MODULE_LICENSE("GPL"); +MODULE_ALIAS("spi:mmc_spi"); diff --git a/drivers/mtd/devices/mtd_dataflash.c b/drivers/mtd/devices/mtd_dataflash.c index 43976aa4dbb..211c27acd01 100644 --- a/drivers/mtd/devices/mtd_dataflash.c +++ b/drivers/mtd/devices/mtd_dataflash.c @@ -966,3 +966,4 @@ module_exit(dataflash_exit); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Andrew Victor, David Brownell"); MODULE_DESCRIPTION("MTD DataFlash driver"); +MODULE_ALIAS("spi:mtd_dataflash"); diff --git a/drivers/net/enc28j60.c b/drivers/net/enc28j60.c index 117fc6c12e3..66813c91a72 100644 --- a/drivers/net/enc28j60.c +++ b/drivers/net/enc28j60.c @@ -1666,3 +1666,4 @@ MODULE_AUTHOR("Claudio Lanconelli "); MODULE_LICENSE("GPL"); module_param_named(debug, debug.msg_enable, int, 0); MODULE_PARM_DESC(debug, "Debug verbosity level (0=none, ..., ffff=all)"); +MODULE_ALIAS("spi:" DRV_NAME); diff --git a/drivers/net/ks8851.c b/drivers/net/ks8851.c index 547ac7c7479..23783586435 100644 --- a/drivers/net/ks8851.c +++ b/drivers/net/ks8851.c @@ -1321,3 +1321,4 @@ MODULE_LICENSE("GPL"); module_param_named(message, msg_enable, int, 0); MODULE_PARM_DESC(message, "Message verbosity level (0=none, 31=all)"); +MODULE_ALIAS("spi:ks8851"); diff --git a/drivers/net/wireless/libertas/if_spi.c b/drivers/net/wireless/libertas/if_spi.c index 446e327180f..cb8be8d7abc 100644 --- a/drivers/net/wireless/libertas/if_spi.c +++ b/drivers/net/wireless/libertas/if_spi.c @@ -1222,3 +1222,4 @@ MODULE_DESCRIPTION("Libertas SPI WLAN Driver"); MODULE_AUTHOR("Andrey Yurovsky , " "Colin McCabe "); MODULE_LICENSE("GPL"); +MODULE_ALIAS("spi:libertas_spi"); diff --git a/drivers/net/wireless/p54/p54spi.c b/drivers/net/wireless/p54/p54spi.c index 05458d9249c..afd26bf0664 100644 --- a/drivers/net/wireless/p54/p54spi.c +++ b/drivers/net/wireless/p54/p54spi.c @@ -731,3 +731,4 @@ module_exit(p54spi_exit); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Christian Lamparter "); +MODULE_ALIAS("spi:cx3110x"); diff --git a/drivers/net/wireless/wl12xx/wl1251_main.c b/drivers/net/wireless/wl12xx/wl1251_main.c index 5809ef5b18f..1103256ad98 100644 --- a/drivers/net/wireless/wl12xx/wl1251_main.c +++ b/drivers/net/wireless/wl12xx/wl1251_main.c @@ -1426,3 +1426,4 @@ EXPORT_SYMBOL_GPL(wl1251_free_hw); MODULE_DESCRIPTION("TI wl1251 Wireles LAN Driver Core"); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Kalle Valo "); +MODULE_ALIAS("spi:wl12xx"); diff --git a/drivers/rtc/rtc-ds1305.c b/drivers/rtc/rtc-ds1305.c index 8f410e59d9f..2736b11a1b1 100644 --- a/drivers/rtc/rtc-ds1305.c +++ b/drivers/rtc/rtc-ds1305.c @@ -841,3 +841,4 @@ module_exit(ds1305_exit); MODULE_DESCRIPTION("RTC driver for DS1305 and DS1306 chips"); MODULE_LICENSE("GPL"); +MODULE_ALIAS("spi:rtc-ds1305"); diff --git a/drivers/rtc/rtc-ds1390.c b/drivers/rtc/rtc-ds1390.c index e01b955db07..cdb70505709 100644 --- a/drivers/rtc/rtc-ds1390.c +++ b/drivers/rtc/rtc-ds1390.c @@ -189,3 +189,4 @@ module_exit(ds1390_exit); MODULE_DESCRIPTION("Dallas/Maxim DS1390/93/94 SPI RTC driver"); MODULE_AUTHOR("Mark Jackson "); MODULE_LICENSE("GPL"); +MODULE_ALIAS("spi:rtc-ds1390"); diff --git a/drivers/rtc/rtc-ds3234.c b/drivers/rtc/rtc-ds3234.c index c51589ede5b..a774ca35b5f 100644 --- a/drivers/rtc/rtc-ds3234.c +++ b/drivers/rtc/rtc-ds3234.c @@ -188,3 +188,4 @@ module_exit(ds3234_exit); MODULE_DESCRIPTION("DS3234 SPI RTC driver"); MODULE_AUTHOR("Dennis Aberilla "); MODULE_LICENSE("GPL"); +MODULE_ALIAS("spi:ds3234"); diff --git a/drivers/rtc/rtc-m41t94.c b/drivers/rtc/rtc-m41t94.c index c3a18c58daf..c8c97a4169d 100644 --- a/drivers/rtc/rtc-m41t94.c +++ b/drivers/rtc/rtc-m41t94.c @@ -171,3 +171,4 @@ module_exit(m41t94_exit); MODULE_AUTHOR("Kim B. Heino "); MODULE_DESCRIPTION("Driver for ST M41T94 SPI RTC"); MODULE_LICENSE("GPL"); +MODULE_ALIAS("spi:rtc-m41t94"); diff --git a/drivers/rtc/rtc-max6902.c b/drivers/rtc/rtc-max6902.c index 36a8ea9ed8b..657403ebd54 100644 --- a/drivers/rtc/rtc-max6902.c +++ b/drivers/rtc/rtc-max6902.c @@ -175,3 +175,4 @@ module_exit(max6902_exit); MODULE_DESCRIPTION ("max6902 spi RTC driver"); MODULE_AUTHOR ("Raphael Assenat"); MODULE_LICENSE ("GPL"); +MODULE_ALIAS("spi:rtc-max6902"); diff --git a/drivers/rtc/rtc-r9701.c b/drivers/rtc/rtc-r9701.c index 42028f233be..9beba49c3c5 100644 --- a/drivers/rtc/rtc-r9701.c +++ b/drivers/rtc/rtc-r9701.c @@ -174,3 +174,4 @@ module_exit(r9701_exit); MODULE_DESCRIPTION("r9701 spi RTC driver"); MODULE_AUTHOR("Magnus Damm "); MODULE_LICENSE("GPL"); +MODULE_ALIAS("spi:rtc-r9701"); diff --git a/drivers/rtc/rtc-rs5c348.c b/drivers/rtc/rtc-rs5c348.c index dd1e2bc7a47..2099037cb3e 100644 --- a/drivers/rtc/rtc-rs5c348.c +++ b/drivers/rtc/rtc-rs5c348.c @@ -251,3 +251,4 @@ MODULE_AUTHOR("Atsushi Nemoto "); MODULE_DESCRIPTION("Ricoh RS5C348 RTC driver"); MODULE_LICENSE("GPL"); MODULE_VERSION(DRV_VERSION); +MODULE_ALIAS("spi:rtc-rs5c348"); diff --git a/drivers/serial/max3100.c b/drivers/serial/max3100.c index 75ab00631c4..3c30c56aa2e 100644 --- a/drivers/serial/max3100.c +++ b/drivers/serial/max3100.c @@ -925,3 +925,4 @@ module_exit(max3100_exit); MODULE_DESCRIPTION("MAX3100 driver"); MODULE_AUTHOR("Christian Pellegrin "); MODULE_LICENSE("GPL"); +MODULE_ALIAS("spi:max3100"); diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c index 8518a6eb63f..49e84860c8d 100644 --- a/drivers/spi/spi.c +++ b/drivers/spi/spi.c @@ -23,6 +23,7 @@ #include #include #include +#include #include @@ -93,7 +94,7 @@ static int spi_uevent(struct device *dev, struct kobj_uevent_env *env) { const struct spi_device *spi = to_spi_device(dev); - add_uevent_var(env, "MODALIAS=%s", spi->modalias); + add_uevent_var(env, "MODALIAS=%s%s", SPI_MODULE_PREFIX, spi->modalias); return 0; } diff --git a/drivers/spi/spidev.c b/drivers/spi/spidev.c index 606e7a40a8d..f921bd1109e 100644 --- a/drivers/spi/spidev.c +++ b/drivers/spi/spidev.c @@ -688,3 +688,4 @@ module_exit(spidev_exit); MODULE_AUTHOR("Andrea Paterniani, "); MODULE_DESCRIPTION("User mode SPI device interface"); MODULE_LICENSE("GPL"); +MODULE_ALIAS("spi:spidev"); diff --git a/drivers/spi/tle62x0.c b/drivers/spi/tle62x0.c index 455991fbe28..bf9540f5fb9 100644 --- a/drivers/spi/tle62x0.c +++ b/drivers/spi/tle62x0.c @@ -329,3 +329,4 @@ module_exit(tle62x0_exit); MODULE_AUTHOR("Ben Dooks "); MODULE_DESCRIPTION("TLE62x0 SPI driver"); MODULE_LICENSE("GPL v2"); +MODULE_ALIAS("spi:tle62x0"); diff --git a/drivers/staging/stlc45xx/stlc45xx.c b/drivers/staging/stlc45xx/stlc45xx.c index 12d414deaad..be99eb33d81 100644 --- a/drivers/staging/stlc45xx/stlc45xx.c +++ b/drivers/staging/stlc45xx/stlc45xx.c @@ -2591,3 +2591,4 @@ module_exit(stlc45xx_exit); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Kalle Valo "); +MODULE_ALIAS("spi:cx3110x"); diff --git a/drivers/video/backlight/corgi_lcd.c b/drivers/video/backlight/corgi_lcd.c index f8a4bb20f41..2211a852af9 100644 --- a/drivers/video/backlight/corgi_lcd.c +++ b/drivers/video/backlight/corgi_lcd.c @@ -639,3 +639,4 @@ module_exit(corgi_lcd_exit); MODULE_DESCRIPTION("LCD and backlight driver for SHARP C7x0/Cxx00"); MODULE_AUTHOR("Eric Miao "); MODULE_LICENSE("GPL"); +MODULE_ALIAS("spi:corgi-lcd"); diff --git a/drivers/video/backlight/ltv350qv.c b/drivers/video/backlight/ltv350qv.c index 2eb206bf73e..4631ca8fa4a 100644 --- a/drivers/video/backlight/ltv350qv.c +++ b/drivers/video/backlight/ltv350qv.c @@ -328,3 +328,4 @@ module_exit(ltv350qv_exit); MODULE_AUTHOR("Haavard Skinnemoen "); MODULE_DESCRIPTION("Samsung LTV350QV LCD Driver"); MODULE_LICENSE("GPL"); +MODULE_ALIAS("spi:ltv350qv"); diff --git a/drivers/video/backlight/tdo24m.c b/drivers/video/backlight/tdo24m.c index 51422fc4f60..bbfb502add6 100644 --- a/drivers/video/backlight/tdo24m.c +++ b/drivers/video/backlight/tdo24m.c @@ -472,3 +472,4 @@ module_exit(tdo24m_exit); MODULE_AUTHOR("Eric Miao "); MODULE_DESCRIPTION("Driver for Toppoly TDO24M LCD Panel"); MODULE_LICENSE("GPL"); +MODULE_ALIAS("spi:tdo24m"); diff --git a/drivers/video/backlight/tosa_lcd.c b/drivers/video/backlight/tosa_lcd.c index b7fbc75a62f..50ec17dfc51 100644 --- a/drivers/video/backlight/tosa_lcd.c +++ b/drivers/video/backlight/tosa_lcd.c @@ -300,4 +300,4 @@ module_exit(tosa_lcd_exit); MODULE_AUTHOR("Dmitry Baryshkov"); MODULE_LICENSE("GPL v2"); MODULE_DESCRIPTION("LCD/Backlight control for Sharp SL-6000 PDA"); - +MODULE_ALIAS("spi:tosa-lcd"); diff --git a/drivers/video/backlight/vgg2432a4.c b/drivers/video/backlight/vgg2432a4.c index 8e653b8a6f1..b49063c831e 100644 --- a/drivers/video/backlight/vgg2432a4.c +++ b/drivers/video/backlight/vgg2432a4.c @@ -280,5 +280,4 @@ module_exit(vgg2432a4_exit); MODULE_AUTHOR("Ben Dooks "); MODULE_DESCRIPTION("VGG2432A4 LCD Driver"); MODULE_LICENSE("GPL v2"); - - +MODULE_ALIAS("spi:VGG2432A4"); diff --git a/include/linux/mod_devicetable.h b/include/linux/mod_devicetable.h index b34f1ef2f1f..f58e9d836f3 100644 --- a/include/linux/mod_devicetable.h +++ b/include/linux/mod_devicetable.h @@ -402,6 +402,7 @@ struct i2c_device_id { /* spi */ #define SPI_NAME_SIZE 32 +#define SPI_MODULE_PREFIX "spi:" struct spi_device_id { char name[SPI_NAME_SIZE]; diff --git a/scripts/mod/file2alias.c b/scripts/mod/file2alias.c index 9d446e34519..62a9025cdcc 100644 --- a/scripts/mod/file2alias.c +++ b/scripts/mod/file2alias.c @@ -657,11 +657,11 @@ static int do_i2c_entry(const char *filename, struct i2c_device_id *id, return 1; } -/* Looks like: S */ +/* Looks like: spi:S */ static int do_spi_entry(const char *filename, struct spi_device_id *id, char *alias) { - sprintf(alias, "%s", id->name); + sprintf(alias, SPI_MODULE_PREFIX "%s", id->name); return 1; } -- cgit v1.2.3-70-g09d2 From 568d0697f42771425ae9f1e9a3db769fef7e10b6 Mon Sep 17 00:00:00 2001 From: David Brownell Date: Tue, 22 Sep 2009 16:46:18 -0700 Subject: spi: handle TX-only/RX-only Support two new half-duplex SPI implementation restrictions, for links that talk to TX-only or RX-only devices. (Existing half-duplex flavors support both transfer directions, just not at the same time.) Move spi_async() into the spi.c core, and stop inlining it. Then make that function perform error checks and reject messages that demand more than the underlying controller can support. Based on a patch from Marek Szyprowski which did this only for the bitbanged GPIO driver. Cc: Marek Szyprowski Signed-off-by: David Brownell Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- drivers/spi/spi.c | 59 +++++++++++++++++++++++++++++++++++++++++++++++++ include/linux/spi/spi.h | 39 +++----------------------------- 2 files changed, 62 insertions(+), 36 deletions(-) (limited to 'include') diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c index 49e84860c8d..b76f2468a84 100644 --- a/drivers/spi/spi.c +++ b/drivers/spi/spi.c @@ -663,6 +663,65 @@ int spi_setup(struct spi_device *spi) } EXPORT_SYMBOL_GPL(spi_setup); +/** + * spi_async - asynchronous SPI transfer + * @spi: device with which data will be exchanged + * @message: describes the data transfers, including completion callback + * Context: any (irqs may be blocked, etc) + * + * This call may be used in_irq and other contexts which can't sleep, + * as well as from task contexts which can sleep. + * + * The completion callback is invoked in a context which can't sleep. + * Before that invocation, the value of message->status is undefined. + * When the callback is issued, message->status holds either zero (to + * indicate complete success) or a negative error code. After that + * callback returns, the driver which issued the transfer request may + * deallocate the associated memory; it's no longer in use by any SPI + * core or controller driver code. + * + * Note that although all messages to a spi_device are handled in + * FIFO order, messages may go to different devices in other orders. + * Some device might be higher priority, or have various "hard" access + * time requirements, for example. + * + * On detection of any fault during the transfer, processing of + * the entire message is aborted, and the device is deselected. + * Until returning from the associated message completion callback, + * no other spi_message queued to that device will be processed. + * (This rule applies equally to all the synchronous transfer calls, + * which are wrappers around this core asynchronous primitive.) + */ +int spi_async(struct spi_device *spi, struct spi_message *message) +{ + struct spi_master *master = spi->master; + + /* Half-duplex links include original MicroWire, and ones with + * only one data pin like SPI_3WIRE (switches direction) or where + * either MOSI or MISO is missing. They can also be caused by + * software limitations. + */ + if ((master->flags & SPI_MASTER_HALF_DUPLEX) + || (spi->mode & SPI_3WIRE)) { + struct spi_transfer *xfer; + unsigned flags = master->flags; + + list_for_each_entry(xfer, &message->transfers, transfer_list) { + if (xfer->rx_buf && xfer->tx_buf) + return -EINVAL; + if ((flags & SPI_MASTER_NO_TX) && xfer->tx_buf) + return -EINVAL; + if ((flags & SPI_MASTER_NO_RX) && xfer->rx_buf) + return -EINVAL; + } + } + + message->spi = spi; + message->status = -EINPROGRESS; + return master->transfer(spi, message); +} +EXPORT_SYMBOL_GPL(spi_async); + /*-------------------------------------------------------------------------*/ diff --git a/include/linux/spi/spi.h b/include/linux/spi/spi.h index e2051f39f6a..97b60b37f44 100644 --- a/include/linux/spi/spi.h +++ b/include/linux/spi/spi.h @@ -258,6 +258,8 @@ struct spi_master { /* other constraints relevant to this driver */ u16 flags; #define SPI_MASTER_HALF_DUPLEX BIT(0) /* can't do full duplex */ +#define SPI_MASTER_NO_RX BIT(1) /* can't do buffer read */ +#define SPI_MASTER_NO_TX BIT(2) /* can't do buffer write */ /* Setup mode and clock, etc (spi driver may call many times). * @@ -538,42 +540,7 @@ static inline void spi_message_free(struct spi_message *m) } extern int spi_setup(struct spi_device *spi); - -/** - * spi_async - asynchronous SPI transfer - * @spi: device with which data will be exchanged - * @message: describes the data transfers, including completion callback - * Context: any (irqs may be blocked, etc) - * - * This call may be used in_irq and other contexts which can't sleep, - * as well as from task contexts which can sleep. - * - * The completion callback is invoked in a context which can't sleep. - * Before that invocation, the value of message->status is undefined. - * When the callback is issued, message->status holds either zero (to - * indicate complete success) or a negative error code. After that - * callback returns, the driver which issued the transfer request may - * deallocate the associated memory; it's no longer in use by any SPI - * core or controller driver code. - * - * Note that although all messages to a spi_device are handled in - * FIFO order, messages may go to different devices in other orders. - * Some device might be higher priority, or have various "hard" access - * time requirements, for example. - * - * On detection of any fault during the transfer, processing of - * the entire message is aborted, and the device is deselected. - * Until returning from the associated message completion callback, - * no other spi_message queued to that device will be processed. - * (This rule applies equally to all the synchronous transfer calls, - * which are wrappers around this core asynchronous primitive.) - */ -static inline int -spi_async(struct spi_device *spi, struct spi_message *message) -{ - message->spi = spi; - return spi->master->transfer(spi, message); -} +extern int spi_async(struct spi_device *spi, struct spi_message *message); /*---------------------------------------------------------------------------*/ -- cgit v1.2.3-70-g09d2 From a4177ee7f1a83eecb1d75e85d32664b023ef65e9 Mon Sep 17 00:00:00 2001 From: Jani Nikula Date: Tue, 22 Sep 2009 16:46:33 -0700 Subject: gpiolib: allow exported GPIO nodes to be named using sysfs links Commit 926b663ce8215ba448960e1ff6e58b67a2c3b99b (gpiolib: allow GPIOs to be named) already provides naming on the chip level. This patch provides more flexibility by allowing multiple names where ever in sysfs on a per GPIO basis. Adapted from David Brownell's comments on a similar concept: http://lkml.org/lkml/2009/4/20/203. [randy.dunlap@oracle.com: fix build for CONFIG_GENERIC_GPIO=n] Signed-off-by: Jani Nikula Acked-by: David Brownell Cc: Daniel Silverstone Signed-off-by: Randy Dunlap Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- Documentation/gpio.txt | 10 ++++++++++ drivers/gpio/gpiolib.c | 45 +++++++++++++++++++++++++++++++++++++++++++++ include/asm-generic/gpio.h | 8 ++++++++ include/linux/gpio.h | 11 +++++++++++ 4 files changed, 74 insertions(+) (limited to 'include') diff --git a/Documentation/gpio.txt b/Documentation/gpio.txt index e4b6985044a..566edaa56a5 100644 --- a/Documentation/gpio.txt +++ b/Documentation/gpio.txt @@ -555,6 +555,11 @@ requested using gpio_request(): /* reverse gpio_export() */ void gpio_unexport(); + /* create a sysfs link to an exported GPIO node */ + int gpio_export_link(struct device *dev, const char *name, + unsigned gpio) + + After a kernel driver requests a GPIO, it may only be made available in the sysfs interface by gpio_export(). The driver can control whether the signal direction may change. This helps drivers prevent userspace code @@ -563,3 +568,8 @@ from accidentally clobbering important system state. This explicit exporting can help with debugging (by making some kinds of experiments easier), or can provide an always-there interface that's suitable for documenting as part of a board support package. + +After the GPIO has been exported, gpio_export_link() allows creating +symlinks from elsewhere in sysfs to the GPIO sysfs node. Drivers can +use this to provide the interface under their own device in sysfs with +a descriptive name. diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c index 51a8d4103be..aef6b3d8e2c 100644 --- a/drivers/gpio/gpiolib.c +++ b/drivers/gpio/gpiolib.c @@ -504,6 +504,51 @@ static int match_export(struct device *dev, void *data) return dev_get_drvdata(dev) == data; } +/** + * gpio_export_link - create a sysfs link to an exported GPIO node + * @dev: device under which to create symlink + * @name: name of the symlink + * @gpio: gpio to create symlink to, already exported + * + * Set up a symlink from /sys/.../dev/name to /sys/class/gpio/gpioN + * node. Caller is responsible for unlinking. + * + * Returns zero on success, else an error. + */ +int gpio_export_link(struct device *dev, const char *name, unsigned gpio) +{ + struct gpio_desc *desc; + int status = -EINVAL; + + if (!gpio_is_valid(gpio)) + goto done; + + mutex_lock(&sysfs_lock); + + desc = &gpio_desc[gpio]; + + if (test_bit(FLAG_EXPORT, &desc->flags)) { + struct device *tdev; + + tdev = class_find_device(&gpio_class, NULL, desc, match_export); + if (tdev != NULL) { + status = sysfs_create_link(&dev->kobj, &tdev->kobj, + name); + } else { + status = -ENODEV; + } + } + + mutex_unlock(&sysfs_lock); + +done: + if (status) + pr_debug("%s: gpio%d status %d\n", __func__, gpio, status); + + return status; +} +EXPORT_SYMBOL_GPL(gpio_export_link); + /** * gpio_unexport - reverse effect of gpio_export() * @gpio: gpio to make unavailable diff --git a/include/asm-generic/gpio.h b/include/asm-generic/gpio.h index d6c379dc64f..9cca3785cab 100644 --- a/include/asm-generic/gpio.h +++ b/include/asm-generic/gpio.h @@ -141,6 +141,8 @@ extern int __gpio_to_irq(unsigned gpio); * but more typically is configured entirely from userspace. */ extern int gpio_export(unsigned gpio, bool direction_may_change); +extern int gpio_export_link(struct device *dev, const char *name, + unsigned gpio); extern void gpio_unexport(unsigned gpio); #endif /* CONFIG_GPIO_SYSFS */ @@ -185,6 +187,12 @@ static inline int gpio_export(unsigned gpio, bool direction_may_change) return -ENOSYS; } +static inline int gpio_export_link(struct device *dev, const char *name, + unsigned gpio) +{ + return -ENOSYS; +} + static inline void gpio_unexport(unsigned gpio) { } diff --git a/include/linux/gpio.h b/include/linux/gpio.h index e10c49a5b96..059bd189d35 100644 --- a/include/linux/gpio.h +++ b/include/linux/gpio.h @@ -12,6 +12,8 @@ #include #include +struct device; + /* * Some platforms don't support the GPIO programming interface. * @@ -89,6 +91,15 @@ static inline int gpio_export(unsigned gpio, bool direction_may_change) return -EINVAL; } +static inline int gpio_export_link(struct device *dev, const char *name, + unsigned gpio) +{ + /* GPIO can never have been exported */ + WARN_ON(1); + return -EINVAL; +} + + static inline void gpio_unexport(unsigned gpio) { /* GPIO can never have been exported */ -- cgit v1.2.3-70-g09d2 From 1e5db00687c1ebd93a902caf1d3694209013cb3e Mon Sep 17 00:00:00 2001 From: Richard Röjfors Date: Tue, 22 Sep 2009 16:46:34 -0700 Subject: gpio: add MC33880 driver MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit A GPIO driver for the Freescale MC33880 High/Low side switch Signed-off-by: Richard Röjfors Cc: David Brownell Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- drivers/gpio/Kconfig | 7 ++ drivers/gpio/Makefile | 1 + drivers/gpio/mc33880.c | 196 ++++++++++++++++++++++++++++++++++++++++++++ include/linux/spi/mc33880.h | 10 +++ 4 files changed, 214 insertions(+) create mode 100644 drivers/gpio/mc33880.c create mode 100644 include/linux/spi/mc33880.h (limited to 'include') diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig index 6b4c484a699..223e7c92fd5 100644 --- a/drivers/gpio/Kconfig +++ b/drivers/gpio/Kconfig @@ -195,4 +195,11 @@ config GPIO_MCP23S08 SPI driver for Microchip MCP23S08 I/O expander. This provides a GPIO interface supporting inputs and outputs. +config GPIO_MC33880 + tristate "Freescale MC33880 high-side/low-side switch" + depends on SPI_MASTER + help + SPI driver for Freescale MC33880 high-side/low-side switch. + This provides GPIO interface supporting inputs and outputs. + endif diff --git a/drivers/gpio/Makefile b/drivers/gpio/Makefile index ea7c745f26a..f35de16c7c4 100644 --- a/drivers/gpio/Makefile +++ b/drivers/gpio/Makefile @@ -6,6 +6,7 @@ obj-$(CONFIG_GPIOLIB) += gpiolib.o obj-$(CONFIG_GPIO_MAX7301) += max7301.o obj-$(CONFIG_GPIO_MAX732X) += max732x.o +obj-$(CONFIG_GPIO_MC33880) += mc33880.o obj-$(CONFIG_GPIO_MCP23S08) += mcp23s08.o obj-$(CONFIG_GPIO_PCA953X) += pca953x.o obj-$(CONFIG_GPIO_PCF857X) += pcf857x.o diff --git a/drivers/gpio/mc33880.c b/drivers/gpio/mc33880.c new file mode 100644 index 00000000000..e7d01bd8fdb --- /dev/null +++ b/drivers/gpio/mc33880.c @@ -0,0 +1,196 @@ +/* + * mc33880.c MC33880 high-side/low-side switch GPIO driver + * Copyright (c) 2009 Intel Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ + +/* Supports: + * Freescale MC33880 high-side/low-side switch + */ + +#include +#include +#include +#include +#include + +#define DRIVER_NAME "mc33880" + +/* + * Pin configurations, see MAX7301 datasheet page 6 + */ +#define PIN_CONFIG_MASK 0x03 +#define PIN_CONFIG_IN_PULLUP 0x03 +#define PIN_CONFIG_IN_WO_PULLUP 0x02 +#define PIN_CONFIG_OUT 0x01 + +#define PIN_NUMBER 8 + + +/* + * Some registers must be read back to modify. + * To save time we cache them here in memory + */ +struct mc33880 { + struct mutex lock; /* protect from simultanous accesses */ + u8 port_config; + struct gpio_chip chip; + struct spi_device *spi; +}; + +static int mc33880_write_config(struct mc33880 *mc) +{ + return spi_write(mc->spi, &mc->port_config, sizeof(mc->port_config)); +} + + +static int __mc33880_set(struct mc33880 *mc, unsigned offset, int value) +{ + if (value) + mc->port_config |= 1 << offset; + else + mc->port_config &= ~(1 << offset); + + return mc33880_write_config(mc); +} + + +static void mc33880_set(struct gpio_chip *chip, unsigned offset, int value) +{ + struct mc33880 *mc = container_of(chip, struct mc33880, chip); + + mutex_lock(&mc->lock); + + __mc33880_set(mc, offset, value); + + mutex_unlock(&mc->lock); +} + +static int __devinit mc33880_probe(struct spi_device *spi) +{ + struct mc33880 *mc; + struct mc33880_platform_data *pdata; + int ret; + + pdata = spi->dev.platform_data; + if (!pdata || !pdata->base) { + dev_dbg(&spi->dev, "incorrect or missing platform data\n"); + return -EINVAL; + } + + /* + * bits_per_word cannot be configured in platform data + */ + spi->bits_per_word = 8; + + ret = spi_setup(spi); + if (ret < 0) + return ret; + + mc = kzalloc(sizeof(struct mc33880), GFP_KERNEL); + if (!mc) + return -ENOMEM; + + mutex_init(&mc->lock); + + dev_set_drvdata(&spi->dev, mc); + + mc->spi = spi; + + mc->chip.label = DRIVER_NAME, + mc->chip.set = mc33880_set; + mc->chip.base = pdata->base; + mc->chip.ngpio = PIN_NUMBER; + mc->chip.can_sleep = 1; + mc->chip.dev = &spi->dev; + mc->chip.owner = THIS_MODULE; + + mc->port_config = 0x00; + /* write twice, because during initialisation the first setting + * is just for testing SPI communication, and the second is the + * "real" configuration + */ + ret = mc33880_write_config(mc); + mc->port_config = 0x00; + if (!ret) + ret = mc33880_write_config(mc); + + if (ret) { + printk(KERN_ERR "Failed writing to " DRIVER_NAME ": %d\n", ret); + goto exit_destroy; + } + + ret = gpiochip_add(&mc->chip); + if (ret) + goto exit_destroy; + + return ret; + +exit_destroy: + dev_set_drvdata(&spi->dev, NULL); + mutex_destroy(&mc->lock); + kfree(mc); + return ret; +} + +static int mc33880_remove(struct spi_device *spi) +{ + struct mc33880 *mc; + int ret; + + mc = dev_get_drvdata(&spi->dev); + if (mc == NULL) + return -ENODEV; + + dev_set_drvdata(&spi->dev, NULL); + + ret = gpiochip_remove(&mc->chip); + if (!ret) { + mutex_destroy(&mc->lock); + kfree(mc); + } else + dev_err(&spi->dev, "Failed to remove the GPIO controller: %d\n", + ret); + + return ret; +} + +static struct spi_driver mc33880_driver = { + .driver = { + .name = DRIVER_NAME, + .owner = THIS_MODULE, + }, + .probe = mc33880_probe, + .remove = __devexit_p(mc33880_remove), +}; + +static int __init mc33880_init(void) +{ + return spi_register_driver(&mc33880_driver); +} +/* register after spi postcore initcall and before + * subsys initcalls that may rely on these GPIOs + */ +subsys_initcall(mc33880_init); + +static void __exit mc33880_exit(void) +{ + spi_unregister_driver(&mc33880_driver); +} +module_exit(mc33880_exit); + +MODULE_AUTHOR("Mocean Laboratories "); +MODULE_LICENSE("GPL v2"); + diff --git a/include/linux/spi/mc33880.h b/include/linux/spi/mc33880.h new file mode 100644 index 00000000000..82ffccd6fbe --- /dev/null +++ b/include/linux/spi/mc33880.h @@ -0,0 +1,10 @@ +#ifndef LINUX_SPI_MC33880_H +#define LINUX_SPI_MC33880_H + +struct mc33880_platform_data { + /* number assigned to the first GPIO */ + unsigned base; +}; + +#endif + -- cgit v1.2.3-70-g09d2 From 4cf8e53b3b55fa2f9b2a6b9c3e557b649adf7c6a Mon Sep 17 00:00:00 2001 From: Marek Vasut Date: Tue, 22 Sep 2009 16:46:35 -0700 Subject: mfd/gpio: add a GPIO interface to the UCB1400 MFD chip driver via gpiolib Cc: Eric Miao Cc: Russell King Cc: David Brownell Cc: Samuel Ortiz Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- drivers/gpio/Kconfig | 12 +++++ drivers/gpio/Makefile | 1 + drivers/gpio/ucb1400_gpio.c | 125 ++++++++++++++++++++++++++++++++++++++++++++ drivers/mfd/ucb1400_core.c | 31 +++++++++-- include/linux/ucb1400.h | 19 +++++++ 5 files changed, 184 insertions(+), 4 deletions(-) create mode 100644 drivers/gpio/ucb1400_gpio.c (limited to 'include') diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig index 223e7c92fd5..ccca08e0b59 100644 --- a/drivers/gpio/Kconfig +++ b/drivers/gpio/Kconfig @@ -202,4 +202,16 @@ config GPIO_MC33880 SPI driver for Freescale MC33880 high-side/low-side switch. This provides GPIO interface supporting inputs and outputs. +comment "AC97 GPIO expanders:" + +config GPIO_UCB1400 + bool "Philips UCB1400 GPIO" + depends on UCB1400_CORE + help + This enables support for the Philips UCB1400 GPIO pins. + The UCB1400 is an AC97 audio codec. + + To compile this driver as a module, choose M here: the + module will be called ucb1400_gpio. + endif diff --git a/drivers/gpio/Makefile b/drivers/gpio/Makefile index f35de16c7c4..c1ac034698c 100644 --- a/drivers/gpio/Makefile +++ b/drivers/gpio/Makefile @@ -12,6 +12,7 @@ obj-$(CONFIG_GPIO_PCA953X) += pca953x.o obj-$(CONFIG_GPIO_PCF857X) += pcf857x.o obj-$(CONFIG_GPIO_PL061) += pl061.o obj-$(CONFIG_GPIO_TWL4030) += twl4030-gpio.o +obj-$(CONFIG_GPIO_UCB1400) += ucb1400_gpio.o obj-$(CONFIG_GPIO_XILINX) += xilinx_gpio.o obj-$(CONFIG_GPIO_BT8XX) += bt8xxgpio.o obj-$(CONFIG_GPIO_VR41XX) += vr41xx_giu.o diff --git a/drivers/gpio/ucb1400_gpio.c b/drivers/gpio/ucb1400_gpio.c new file mode 100644 index 00000000000..50e6bd1392c --- /dev/null +++ b/drivers/gpio/ucb1400_gpio.c @@ -0,0 +1,125 @@ +/* + * Philips UCB1400 GPIO driver + * + * Author: Marek Vasut + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + */ + +#include +#include + +struct ucb1400_gpio_data *ucbdata; + +static int ucb1400_gpio_dir_in(struct gpio_chip *gc, unsigned off) +{ + struct ucb1400_gpio *gpio; + gpio = container_of(gc, struct ucb1400_gpio, gc); + ucb1400_gpio_set_direction(gpio->ac97, off, 0); + return 0; +} + +static int ucb1400_gpio_dir_out(struct gpio_chip *gc, unsigned off, int val) +{ + struct ucb1400_gpio *gpio; + gpio = container_of(gc, struct ucb1400_gpio, gc); + ucb1400_gpio_set_direction(gpio->ac97, off, 1); + ucb1400_gpio_set_value(gpio->ac97, off, val); + return 0; +} + +static int ucb1400_gpio_get(struct gpio_chip *gc, unsigned off) +{ + struct ucb1400_gpio *gpio; + gpio = container_of(gc, struct ucb1400_gpio, gc); + return ucb1400_gpio_get_value(gpio->ac97, off); +} + +static void ucb1400_gpio_set(struct gpio_chip *gc, unsigned off, int val) +{ + struct ucb1400_gpio *gpio; + gpio = container_of(gc, struct ucb1400_gpio, gc); + ucb1400_gpio_set_value(gpio->ac97, off, val); +} + +static int ucb1400_gpio_probe(struct platform_device *dev) +{ + struct ucb1400_gpio *ucb = dev->dev.platform_data; + int err = 0; + + if (!(ucbdata && ucbdata->gpio_offset)) { + err = -EINVAL; + goto err; + } + + platform_set_drvdata(dev, ucb); + + ucb->gc.label = "ucb1400_gpio"; + ucb->gc.base = ucbdata->gpio_offset; + ucb->gc.ngpio = 10; + ucb->gc.owner = THIS_MODULE; + + ucb->gc.direction_input = ucb1400_gpio_dir_in; + ucb->gc.direction_output = ucb1400_gpio_dir_out; + ucb->gc.get = ucb1400_gpio_get; + ucb->gc.set = ucb1400_gpio_set; + ucb->gc.can_sleep = 1; + + err = gpiochip_add(&ucb->gc); + if (err) + goto err; + + if (ucbdata && ucbdata->gpio_setup) + err = ucbdata->gpio_setup(&dev->dev, ucb->gc.ngpio); + +err: + return err; + +} + +static int ucb1400_gpio_remove(struct platform_device *dev) +{ + int err = 0; + struct ucb1400_gpio *ucb = platform_get_drvdata(dev); + + if (ucbdata && ucbdata->gpio_teardown) { + err = ucbdata->gpio_teardown(&dev->dev, ucb->gc.ngpio); + if (err) + return err; + } + + err = gpiochip_remove(&ucb->gc); + return err; +} + +static struct platform_driver ucb1400_gpio_driver = { + .probe = ucb1400_gpio_probe, + .remove = ucb1400_gpio_remove, + .driver = { + .name = "ucb1400_gpio" + }, +}; + +static int __init ucb1400_gpio_init(void) +{ + return platform_driver_register(&ucb1400_gpio_driver); +} + +static void __exit ucb1400_gpio_exit(void) +{ + platform_driver_unregister(&ucb1400_gpio_driver); +} + +void __init ucb1400_gpio_set_data(struct ucb1400_gpio_data *data) +{ + ucbdata = data; +} + +module_init(ucb1400_gpio_init); +module_exit(ucb1400_gpio_exit); + +MODULE_DESCRIPTION("Philips UCB1400 GPIO driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/mfd/ucb1400_core.c b/drivers/mfd/ucb1400_core.c index 78c2135c5de..2afc08006e6 100644 --- a/drivers/mfd/ucb1400_core.c +++ b/drivers/mfd/ucb1400_core.c @@ -48,9 +48,11 @@ static int ucb1400_core_probe(struct device *dev) int err; struct ucb1400 *ucb; struct ucb1400_ts ucb_ts; + struct ucb1400_gpio ucb_gpio; struct snd_ac97 *ac97; memset(&ucb_ts, 0, sizeof(ucb_ts)); + memset(&ucb_gpio, 0, sizeof(ucb_gpio)); ucb = kzalloc(sizeof(struct ucb1400), GFP_KERNEL); if (!ucb) { @@ -68,25 +70,44 @@ static int ucb1400_core_probe(struct device *dev) goto err0; } + /* GPIO */ + ucb_gpio.ac97 = ac97; + ucb->ucb1400_gpio = platform_device_alloc("ucb1400_gpio", -1); + if (!ucb->ucb1400_gpio) { + err = -ENOMEM; + goto err0; + } + err = platform_device_add_data(ucb->ucb1400_gpio, &ucb_gpio, + sizeof(ucb_gpio)); + if (err) + goto err1; + err = platform_device_add(ucb->ucb1400_gpio); + if (err) + goto err1; + /* TOUCHSCREEN */ ucb_ts.ac97 = ac97; ucb->ucb1400_ts = platform_device_alloc("ucb1400_ts", -1); if (!ucb->ucb1400_ts) { err = -ENOMEM; - goto err0; + goto err2; } err = platform_device_add_data(ucb->ucb1400_ts, &ucb_ts, sizeof(ucb_ts)); if (err) - goto err1; + goto err3; err = platform_device_add(ucb->ucb1400_ts); if (err) - goto err1; + goto err3; return 0; -err1: +err3: platform_device_put(ucb->ucb1400_ts); +err2: + platform_device_unregister(ucb->ucb1400_gpio); +err1: + platform_device_put(ucb->ucb1400_gpio); err0: kfree(ucb); err: @@ -98,6 +119,8 @@ static int ucb1400_core_remove(struct device *dev) struct ucb1400 *ucb = dev_get_drvdata(dev); platform_device_unregister(ucb->ucb1400_ts); + platform_device_unregister(ucb->ucb1400_gpio); + kfree(ucb); return 0; } diff --git a/include/linux/ucb1400.h b/include/linux/ucb1400.h index ae779bb8cc0..adb44066680 100644 --- a/include/linux/ucb1400.h +++ b/include/linux/ucb1400.h @@ -26,6 +26,7 @@ #include #include #include +#include /* * UCB1400 AC-link registers @@ -82,6 +83,17 @@ #define UCB_ID 0x7e #define UCB_ID_1400 0x4304 +struct ucb1400_gpio_data { + int gpio_offset; + int (*gpio_setup)(struct device *dev, int ngpio); + int (*gpio_teardown)(struct device *dev, int ngpio); +}; + +struct ucb1400_gpio { + struct gpio_chip gc; + struct snd_ac97 *ac97; +}; + struct ucb1400_ts { struct input_dev *ts_idev; struct task_struct *ts_task; @@ -95,6 +107,7 @@ struct ucb1400_ts { struct ucb1400 { struct platform_device *ucb1400_ts; + struct platform_device *ucb1400_gpio; }; static inline u16 ucb1400_reg_read(struct snd_ac97 *ac97, u16 reg) @@ -147,4 +160,10 @@ static inline void ucb1400_adc_disable(struct snd_ac97 *ac97) unsigned int ucb1400_adc_read(struct snd_ac97 *ac97, u16 adc_channel, int adcsync); +#ifdef CONFIG_GPIO_UCB1400 +void __init ucb1400_gpio_set_data(struct ucb1400_gpio_data *data); +#else +static inline void ucb1400_gpio_set_data(struct ucb1400_gpio_data *data) {} +#endif + #endif -- cgit v1.2.3-70-g09d2 From 4ed824d9aead77a6a4eb1e89c3b3d270ba386fad Mon Sep 17 00:00:00 2001 From: Sudhakar Rajashekhara Date: Tue, 22 Sep 2009 16:47:06 -0700 Subject: davinci: fb: Frame Buffer driver for TI DA8xx/OMAP-L1xx Add LCD controller (LCDC) driver for TI's DA8xx/OMAP-L1xx architecture. LCDC specifications can be found at http://www.ti.com/litv/pdf/sprufm0a. LCDC on DA8xx consists of two independent controllers, the Raster Controller and the LCD Interface Display Driver (LIDD) controller. LIDD further supports character and graphic displays. This patch adds support for the graphic display (Sharp LQ035Q3DG01) found on the DA830 based EVM. The EVM details can be found at: http://support.spectrumdigital.com/boards/dskda830/revc/. Signed-off-by: Sudhakar Rajashekhara Signed-off-by: Pavel Kiryukhin Signed-off-by: Steve Chen Acked-by: Krzysztof Helt DESC davinci-fb-frame-buffer-driver-for-ti-da8xx-omap-l1xx-fix EDESC From: Andrew Morton fix kconfig indenting Cc: Krzysztof Helt Cc: Pavel Kiryukhin Cc: Steve Chen Cc: Sudhakar Rajashekhara Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- drivers/video/Kconfig | 11 + drivers/video/Makefile | 1 + drivers/video/da8xx-fb.c | 909 +++++++++++++++++++++++++++++++++++++++++++++++ include/video/da8xx-fb.h | 106 ++++++ 4 files changed, 1027 insertions(+) create mode 100644 drivers/video/da8xx-fb.c create mode 100644 include/video/da8xx-fb.h (limited to 'include') diff --git a/drivers/video/Kconfig b/drivers/video/Kconfig index 05184a1c3e9..a7944ef48a2 100644 --- a/drivers/video/Kconfig +++ b/drivers/video/Kconfig @@ -2041,6 +2041,17 @@ config FB_SH7760 and 8, 15 or 16 bpp color; 90 degrees clockwise display rotation for panels <= 320 pixel horizontal resolution. +config FB_DA8XX + tristate "DA8xx/OMAP-L1xx Framebuffer support" + depends on FB && ARCH_DAVINCI_DA8XX + select FB_CFB_FILLRECT + select FB_CFB_COPYAREA + select FB_CFB_IMAGEBLIT + ---help--- + This is the frame buffer device driver for the TI LCD controller + found on DA8xx/OMAP-L1xx SoCs. + If unsure, say N. + config FB_VIRTUAL tristate "Virtual Frame Buffer support (ONLY FOR TESTING!)" depends on FB diff --git a/drivers/video/Makefile b/drivers/video/Makefile index f4b6c150bd0..85b2d2322dc 100644 --- a/drivers/video/Makefile +++ b/drivers/video/Makefile @@ -137,6 +137,7 @@ obj-$(CONFIG_FB_OF) += offb.o obj-$(CONFIG_FB_BF54X_LQ043) += bf54x-lq043fb.o obj-$(CONFIG_FB_BFIN_T350MCQB) += bfin-t350mcqb-fb.o obj-$(CONFIG_FB_MX3) += mx3fb.o +obj-$(CONFIG_FB_DA8XX) += da8xx-fb.o # the test framebuffer is last obj-$(CONFIG_FB_VIRTUAL) += vfb.o diff --git a/drivers/video/da8xx-fb.c b/drivers/video/da8xx-fb.c new file mode 100644 index 00000000000..e0bbc66499c --- /dev/null +++ b/drivers/video/da8xx-fb.c @@ -0,0 +1,909 @@ +/* + * Copyright (C) 2008-2009 MontaVista Software Inc. + * Copyright (C) 2008-2009 Texas Instruments Inc + * + * Based on the LCD driver for TI Avalanche processors written by + * Ajay Singh and Shalom Hai. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option)any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include