diff options
1212 files changed, 36264 insertions, 14231 deletions
diff --git a/Documentation/ABI/stable/sysfs-driver-ib_srp b/Documentation/ABI/stable/sysfs-driver-ib_srp index 5c53d28f775..b9688de8455 100644 --- a/Documentation/ABI/stable/sysfs-driver-ib_srp +++ b/Documentation/ABI/stable/sysfs-driver-ib_srp @@ -61,6 +61,12 @@ Description: Interface for making ib_srp connect to a new target. interrupt is handled by a different CPU then the comp_vector parameter can be used to spread the SRP completion workload over multiple CPU's. + * tl_retry_count, a number in the range 2..7 specifying the + IB RC retry count. + * queue_size, the maximum number of commands that the + initiator is allowed to queue per SCSI host. The default + value for this parameter is 62. The lowest supported value + is 2. What: /sys/class/infiniband_srp/srp-<hca>-<port_number>/ibdev Date: January 2, 2006 @@ -153,6 +159,13 @@ Contact: linux-rdma@vger.kernel.org Description: InfiniBand service ID used for establishing communication with the SRP target. +What: /sys/class/scsi_host/host<n>/sgid +Date: February 1, 2014 +KernelVersion: 3.13 +Contact: linux-rdma@vger.kernel.org +Description: InfiniBand GID of the source port used for communication with + the SRP target. + What: /sys/class/scsi_host/host<n>/zero_req_lim Date: September 20, 2006 KernelVersion: 2.6.18 diff --git a/Documentation/ABI/stable/sysfs-transport-srp b/Documentation/ABI/stable/sysfs-transport-srp index b36fb0dc13c..ec7af69fea0 100644 --- a/Documentation/ABI/stable/sysfs-transport-srp +++ b/Documentation/ABI/stable/sysfs-transport-srp @@ -5,6 +5,24 @@ Contact: linux-scsi@vger.kernel.org, linux-rdma@vger.kernel.org Description: Instructs an SRP initiator to disconnect from a target and to remove all LUNs imported from that target. +What: /sys/class/srp_remote_ports/port-<h>:<n>/dev_loss_tmo +Date: February 1, 2014 +KernelVersion: 3.13 +Contact: linux-scsi@vger.kernel.org, linux-rdma@vger.kernel.org +Description: Number of seconds the SCSI layer will wait after a transport + layer error has been observed before removing a target port. + Zero means immediate removal. Setting this attribute to "off" + will disable the dev_loss timer. + +What: /sys/class/srp_remote_ports/port-<h>:<n>/fast_io_fail_tmo +Date: February 1, 2014 +KernelVersion: 3.13 +Contact: linux-scsi@vger.kernel.org, linux-rdma@vger.kernel.org +Description: Number of seconds the SCSI layer will wait after a transport + layer error has been observed before failing I/O. Zero means + failing I/O immediately. Setting this attribute to "off" will + disable the fast_io_fail timer. + What: /sys/class/srp_remote_ports/port-<h>:<n>/port_id Date: June 27, 2007 KernelVersion: 2.6.24 @@ -12,8 +30,29 @@ Contact: linux-scsi@vger.kernel.org Description: 16-byte local SRP port identifier in hexadecimal format. An example: 4c:49:4e:55:58:20:56:49:4f:00:00:00:00:00:00:00. +What: /sys/class/srp_remote_ports/port-<h>:<n>/reconnect_delay +Date: February 1, 2014 +KernelVersion: 3.13 +Contact: linux-scsi@vger.kernel.org, linux-rdma@vger.kernel.org +Description: Number of seconds the SCSI layer will wait after a reconnect + attempt failed before retrying. Setting this attribute to + "off" will disable time-based reconnecting. + What: /sys/class/srp_remote_ports/port-<h>:<n>/roles Date: June 27, 2007 KernelVersion: 2.6.24 Contact: linux-scsi@vger.kernel.org Description: Role of the remote port. Either "SRP Initiator" or "SRP Target". + +What: /sys/class/srp_remote_ports/port-<h>:<n>/state +Date: February 1, 2014 +KernelVersion: 3.13 +Contact: linux-scsi@vger.kernel.org, linux-rdma@vger.kernel.org +Description: State of the transport layer used for communication with the + remote port. "running" if the transport layer is operational; + "blocked" if a transport layer error has been encountered but + the fast_io_fail_tmo timer has not yet fired; "fail-fast" + after the fast_io_fail_tmo timer has fired and before the + "dev_loss_tmo" timer has fired; "lost" after the + "dev_loss_tmo" timer has fired and before the port is finally + removed. diff --git a/Documentation/assoc_array.txt b/Documentation/assoc_array.txt new file mode 100644 index 00000000000..f4faec0f66e --- /dev/null +++ b/Documentation/assoc_array.txt @@ -0,0 +1,574 @@ + ======================================== + GENERIC ASSOCIATIVE ARRAY IMPLEMENTATION + ======================================== + +Contents: + + - Overview. + + - The public API. + - Edit script. + - Operations table. + - Manipulation functions. + - Access functions. + - Index key form. + + - Internal workings. + - Basic internal tree layout. + - Shortcuts. + - Splitting and collapsing nodes. + - Non-recursive iteration. + - Simultaneous alteration and iteration. + + +======== +OVERVIEW +======== + +This associative array implementation is an object container with the following +properties: + + (1) Objects are opaque pointers. The implementation does not care where they + point (if anywhere) or what they point to (if anything). + + [!] NOTE: Pointers to objects _must_ be zero in the least significant bit. + + (2) Objects do not need to contain linkage blocks for use by the array. This + permits an object to be located in multiple arrays simultaneously. + Rather, the array is made up of metadata blocks that point to objects. + + (3) Objects require index keys to locate them within the array. + + (4) Index keys must be unique. Inserting an object with the same key as one + already in the array will replace the old object. + + (5) Index keys can be of any length and can be of different lengths. + + (6) Index keys should encode the length early on, before any variation due to + length is seen. + + (7) Index keys can include a hash to scatter objects throughout the array. + + (8) The array can iterated over. The objects will not necessarily come out in + key order. + + (9) The array can be iterated over whilst it is being modified, provided the + RCU readlock is being held by the iterator. Note, however, under these + circumstances, some objects may be seen more than once. If this is a + problem, the iterator should lock against modification. Objects will not + be missed, however, unless deleted. + +(10) Objects in the array can be looked up by means of their index key. + +(11) Objects can be looked up whilst the array is being modified, provided the + RCU readlock is being held by the thread doing the look up. + +The implementation uses a tree of 16-pointer nodes internally that are indexed +on each level by nibbles from the index key in the same manner as in a radix +tree. To improve memory efficiency, shortcuts can be emplaced to skip over +what would otherwise be a series of single-occupancy nodes. Further, nodes +pack leaf object pointers into spare space in the node rather than making an +extra branch until as such time an object needs to be added to a full node. + + +============== +THE PUBLIC API +============== + +The public API can be found in <linux/assoc_array.h>. The associative array is +rooted on the following structure: + + struct assoc_array { + ... + }; + +The code is selected by enabling CONFIG_ASSOCIATIVE_ARRAY. + + +EDIT SCRIPT +----------- + +The insertion and deletion functions produce an 'edit script' that can later be +applied to effect the changes without risking ENOMEM. This retains the +preallocated metadata blocks that will be installed in the internal tree and +keeps track of the metadata blocks that will be removed from the tree when the +script is applied. + +This is also used to keep track of dead blocks and dead objects after the +script has been applied so that they can be freed later. The freeing is done +after an RCU grace period has passed - thus allowing access functions to +proceed under the RCU read lock. + +The script appears as outside of the API as a pointer of the type: + + struct assoc_array_edit; + +There are two functions for dealing with the script: + + (1) Apply an edit script. + + void assoc_array_apply_edit(struct assoc_array_edit *edit); + + This will perform the edit functions, interpolating various write barriers + to permit accesses under the RCU read lock to continue. The edit script + will then be passed to call_rcu() to free it and any dead stuff it points + to. + + (2) Cancel an edit script. + + void assoc_array_cancel_edit(struct assoc_array_edit *edit); + + This frees the edit script and all preallocated memory immediately. If + this was for insertion, the new object is _not_ released by this function, + but must rather be released by the caller. + +These functions are guaranteed not to fail. + + +OPERATIONS TABLE +---------------- + +Various functions take a table of operations: + + struct assoc_array_ops { + ... + }; + +This points to a number of methods, all of which need to be provided: + + (1) Get a chunk of index key from caller data: + + unsigned long (*get_key_chunk)(const void *index_key, int level); + + This should return a chunk of caller-supplied index key starting at the + *bit* position given by the level argument. The level argument will be a + multiple of ASSOC_ARRAY_KEY_CHUNK_SIZE and the function should return + ASSOC_ARRAY_KEY_CHUNK_SIZE bits. No error is possible. + + + (2) Get a chunk of an object's index key. + + unsigned long (*get_object_key_chunk)(const void *object, int level); + + As the previous function, but gets its data from an object in the array + rather than from a caller-supplied index key. + + + (3) See if this is the object we're looking for. + + bool (*compare_object)(const void *object, const void *index_key); + + Compare the object against an index key and return true if it matches and + false if it doesn't. + + + (4) Diff the index keys of two objects. + + int (*diff_objects)(const void *a, const void *b); + + Return the bit position at which the index keys of two objects differ or + -1 if they are the same. + + + (5) Free an object. + + void (*free_object)(void *object); + + Free the specified object. Note that this may be called an RCU grace + period after assoc_array_apply_edit() was called, so synchronize_rcu() may + be necessary on module unloading. + + +MANIPULATION FUNCTIONS +---------------------- + +There are a number of functions for manipulating an associative array: + + (1) Initialise an associative array. + + void assoc_array_init(struct assoc_array *array); + + This initialises the base structure for an associative array. It can't + fail. + + + (2) Insert/replace an object in an associative array. + + struct assoc_array_edit * + assoc_array_insert(struct assoc_array *array, + const struct assoc_array_ops *ops, + const void *index_key, + void *object); + + This inserts the given object into the array. Note that the least + significant bit of the pointer must be zero as it's used to type-mark + pointers internally. + + If an object already exists for that key then it will be replaced with the + new object and the old one will be freed automatically. + + The index_key argument should hold index key information and is + passed to the methods in the ops table when they are called. + + This function makes no alteration to the array itself, but rather returns + an edit script that must be applied. -ENOMEM is returned in the case of + an out-of-memory error. + + The caller should lock exclusively against other modifiers of the array. + + + (3) Delete an object from an associative array. + + struct assoc_array_edit * + assoc_array_delete(struct assoc_array *array, + const struct assoc_array_ops *ops, + const void *index_key); + + This deletes an object that matches the specified data from the array. + + The index_key argument should hold index key information and is + passed to the methods in the ops table when they are called. + + This function makes no alteration to the array itself, but rather returns + an edit script that must be applied. -ENOMEM is returned in the case of + an out-of-memory error. NULL will be returned if the specified object is + not found within the array. + + The caller should lock exclusively against other modifiers of the array. + + + (4) Delete all objects from an associative array. + + struct assoc_array_edit * + assoc_array_clear(struct assoc_array *array, + const struct assoc_array_ops *ops); + + This deletes all the objects from an associative array and leaves it + completely empty. + + This function makes no alteration to the array itself, but rather returns + an edit script that must be applied. -ENOMEM is returned in the case of + an out-of-memory error. + + The caller should lock exclusively against other modifiers of the array. + + + (5) Destroy an associative array, deleting all objects. + + void assoc_array_destroy(struct assoc_array *array, + const struct assoc_array_ops *ops); + + This destroys the contents of the associative array and leaves it + completely empty. It is not permitted for another thread to be traversing + the array under the RCU read lock at the same time as this function is + destroying it as no RCU deferral is performed on memory release - + something that would require memory to be allocated. + + The caller should lock exclusively against other modifiers and accessors + of the array. + + + (6) Garbage collect an associative array. + + int assoc_array_gc(struct assoc_array *array, + const struct assoc_array_ops *ops, + bool (*iterator)(void *object, void *iterator_data), + void *iterator_data); + + This iterates over the objects in an associative array and passes each one + to iterator(). If iterator() returns true, the object is kept. If it + returns false, the object will be freed. If the iterator() function + returns true, it must perform any appropriate refcount incrementing on the + object before returning. + + The internal tree will be packed down if possible as part of the iteration + to reduce the number of nodes in it. + + The iterator_data is passed directly to iterator() and is otherwise + ignored by the function. + + The function will return 0 if successful and -ENOMEM if there wasn't + enough memory. + + It is possible for other threads to iterate over or search the array under + the RCU read lock whilst this function is in progress. The caller should + lock exclusively against other modifiers of the array. + + +ACCESS FUNCTIONS +---------------- + +There are two functions for accessing an associative array: + + (1) Iterate over all the objects in an associative array. + + int assoc_array_iterate(const struct assoc_array *array, + int (*iterator)(const void *object, + void *iterator_data), + void *iterator_data); + + This passes each object in the array to the iterator callback function. + iterator_data is private data for that function. + + This may be used on an array at the same time as the array is being + modified, provided the RCU read lock is held. Under such circumstances, + it is possible for the iteration function to see some objects twice. If + this is a problem, then modification should be locked against. The + iteration algorithm should not, however, miss any objects. + + The function will return 0 if no objects were in the array or else it will + return the result of the last iterator function called. Iteration stops + immediately if any call to the iteration function results in a non-zero + return. + + + (2) Find an object in an associative array. + + void *assoc_array_find(const struct assoc_array *array, + const struct assoc_array_ops *ops, + const void *index_key); + + This walks through the array's internal tree directly to the object + specified by the index key.. + + This may be used on an array at the same time as the array is being + modified, provided the RCU read lock is held. + + The function will return the object if found (and set *_type to the object + type) or will return NULL if the object was not found. + + +INDEX KEY FORM +-------------- + +The index key can be of any form, but since the algorithms aren't told how long +the key is, it is strongly recommended that the index key includes its length +very early on before any variation due to the length would have an effect on +comparisons. + +This will cause leaves with different length keys to scatter away from each +other - and those with the same length keys to cluster together. + +It is also recommended that the index key begin with a hash of the rest of the +key to maximise scattering throughout keyspace. + +The better the scattering, the wider and lower the internal tree will be. + +Poor scattering isn't too much of a problem as there are shortcuts and nodes +can contain mixtures of leaves and metadata pointers. + +The index key is read in chunks of machine word. Each chunk is subdivided into +one nibble (4 bits) per level, so on a 32-bit CPU this is good for 8 levels and +on a 64-bit CPU, 16 levels. Unless the scattering is really poor, it is +unlikely that more than one word of any particular index key will have to be +used. + + +================= +INTERNAL WORKINGS +================= + +The associative array data structure has an internal tree. This tree is +constructed of two types of metadata blocks: nodes and shortcuts. + +A node is an array of slots. Each slot can contain one of four things: + + (*) A NULL pointer, indicating that the slot is empty. + + (*) A pointer to an object (a leaf). + + (*) A pointer to a node at the next level. + + (*) A pointer to a shortcut. + + +BASIC INTERNAL TREE LAYOUT +-------------------------- + +Ignoring shortcuts for the moment, the nodes form a multilevel tree. The index +key space is strictly subdivided by the nodes in the tree and nodes occur on +fixed levels. For example: + + Level: 0 1 2 3 + =============== =============== =============== =============== + NODE D + NODE B NODE C +------>+---+ + +------>+---+ +------>+---+ | | 0 | + NODE A | | 0 | | | 0 | | +---+ + +---+ | +---+ | +---+ | : : + | 0 | | : : | : : | +---+ + +---+ | +---+ | +---+ | | f | + | 1 |---+ | 3 |---+ | 7 |---+ +---+ + +---+ +---+ +---+ + : : : : | 8 |---+ + +---+ +---+ +---+ | NODE E + | e |---+ | f | : : +------>+---+ + +---+ | +---+ +---+ | 0 | + | f | | | f | +---+ + +---+ | +---+ : : + | NODE F +---+ + +------>+---+ | f | + | 0 | NODE G +---+ + +---+ +------>+---+ + : : | | 0 | + +---+ | +---+ + | 6 |---+ : : + +---+ +---+ + : : | f | + +---+ +---+ + | f | + +---+ + +In the above example, there are 7 nodes (A-G), each with 16 slots (0-f). +Assuming no other meta data nodes in the tree, the key space is divided thusly: + + KEY PREFIX NODE + ========== ==== + 137* D + 138* E + 13[0-69-f]* C + 1[0-24-f]* B + e6* G + e[0-57-f]* F + [02-df]* A + +So, for instance, keys with the following example index keys will be found in +the appropriate nodes: + + INDEX KEY PREFIX NODE + =============== ======= ==== + 13694892892489 13 C + 13795289025897 137 D + 13889dde88793 138 E + 138bbb89003093 138 E + 1394879524789 12 C + 1458952489 1 B + 9431809de993ba - A + b4542910809cd - A + e5284310def98 e F + e68428974237 e6 G + e7fffcbd443 e F + f3842239082 - A + +To save memory, if a node can hold all the leaves in its portion of keyspace, +then the node will have all those leaves in it and will not have any metadata +pointers - even if some of those leaves would like to be in the same slot. + +A node can contain a heterogeneous mix of leaves and metadata pointers. +Metadata pointers must be in the slots that match their subdivisions of key +space. The leaves can be in any slot not occupied by a metadata pointer. It +is guaranteed that none of the leaves in a node will match a slot occupied by a +metadata pointer. If the metadata pointer is there, any leaf whose key matches +the metadata key prefix must be in the subtree that the metadata pointer points +to. + +In the above example list of index keys, node A will contain: + + SLOT CONTENT INDEX KEY (PREFIX) + ==== =============== ================== + 1 PTR TO NODE B 1* + any LEAF 9431809de993ba + any LEAF b4542910809cd + e PTR TO NODE F e* + any LEAF f3842239082 + +and node B: + + 3 PTR TO NODE C 13* + any LEAF 1458952489 + + +SHORTCUTS +--------- + +Shortcuts are metadata records that jump over a piece of keyspace. A shortcut +is a replacement for a series of single-occupancy nodes ascending through the +levels. Shortcuts exist to save memory and to speed up traversal. + +It is possible for the root of the tree to be a shortcut - say, for example, +the tree contains at least 17 nodes all with key prefix '1111'. The insertion +algorithm will insert a shortcut to skip over the '1111' keyspace in a single +bound and get to the fourth level where these actually become different. + + +SPLITTING AND COLLAPSING NODES +------------------------------ + +Each node has a maximum capacity of 16 leaves and metadata pointers. If the +insertion algorithm finds that it is trying to insert a 17th object into a +node, that node will be split such that at least two leaves that have a common +key segment at that level end up in a separate node rooted on that slot for +that common key segment. + +If the leaves in a full node and the leaf that is being inserted are +sufficiently similar, then a shortcut will be inserted into the tree. + +When the number of objects in the subtree rooted at a node falls to 16 or +fewer, then the subtree will be collapsed down to a single node - and this will +ripple towards the root if possible. + + +NON-RECURSIVE ITERATION +----------------------- + +Each node and shortcut contains a back pointer to its parent and the number of +slot in that parent that points to it. None-recursive iteration uses these to +proceed rootwards through the tree, going to the parent node, slot N + 1 to +make sure progress is made without the need for a stack. + +The backpointers, however, make simultaneous alteration and iteration tricky. + + +SIMULTANEOUS ALTERATION AND ITERATION +------------------------------------- + +There are a number of cases to consider: + + (1) Simple insert/replace. This involves simply replacing a NULL or old + matching leaf pointer with the pointer to the new leaf after a barrier. + The metadata blocks don't change otherwise. An old leaf won't be freed + until after the RCU grace period. + + (2) Simple delete. This involves just clearing an old matching leaf. The + metadata blocks don't change otherwise. The old leaf won't be freed until + after the RCU grace period. + + (3) Insertion replacing part of a subtree that we haven't yet entered. This + may involve replacement of part of that subtree - but that won't affect + the iteration as we won't have reached the pointer to it yet and the + ancestry blocks are not replaced (the layout of those does not change). + + (4) Insertion replacing nodes that we're actively processing. This isn't a + problem as we've passed the anchoring pointer and won't switch onto the + new layout until we follow the back pointers - at which point we've + already examined the leaves in the replaced node (we iterate over all the + leaves in a node before following any of its metadata pointers). + + We might, however, re-see some leaves that have been split out into a new + branch that's in a slot further along than we were at. + + (5) Insertion replacing nodes that we're processing a dependent branch of. + This won't affect us until we follow the back pointers. Similar to (4). + + (6) Deletion collapsing a branch under us. This doesn't affect us because the + back pointers will get us back to the parent of the new node before we + could see the new node. The entire collapsed subtree is thrown away + unchanged - and will still be rooted on the same slot, so we shouldn't + process it a second time as we'll go back to slot + 1. + +Note: + + (*) Under some circumstances, we need to simultaneously change the parent + pointer and the parent slot pointer on a node (say, for example, we + inserted another node before it and moved it up a level). We cannot do + this without locking against a read - so we have to replace that node too. + + However, when we're changing a shortcut into a node this isn't a problem + as shortcuts only have one slot and so the parent slot number isn't used + when traversing backwards over one. This means that it's okay to change + the slot number first - provided suitable barriers are used to make sure + the parent slot number is read after the back pointer. + +Obsolete blocks and leaves are freed up after an RCU grace period has passed, +so as long as anyone doing walking or iteration holds the RCU read lock, the +old superstructure should not go away on them. diff --git a/Documentation/devicetree/bindings/arc/pmu.txt b/Documentation/devicetree/bindings/arc/pmu.txt new file mode 100644 index 00000000000..49d517340de --- /dev/null +++ b/Documentation/devicetree/bindings/arc/pmu.txt @@ -0,0 +1,24 @@ +* ARC Performance Monitor Unit + +The ARC 700 can be configured with a pipeline performance monitor for counting +CPU and cache events like cache misses and hits. + +Note that: + * ARC 700 refers to a family of ARC processor cores; + - There is only one type of PMU available for the whole family; + - The PMU may support different sets of events; supported events are probed + at boot time, as required by the reference manual. + + * The ARC 700 PMU does not support interrupts; although HW events may be + counted, the HW events themselves cannot serve as a trigger for a sample. + +Required properties: + +- compatible : should contain + "snps,arc700-pmu" + +Example: + +pmu { + compatible = "snps,arc700-pmu"; +}; diff --git a/Documentation/devicetree/bindings/arm/calxeda/mem-ctrlr.txt b/Documentation/devicetree/bindings/arm/calxeda/mem-ctrlr.txt index f770ac0893d..049675944b7 100644 --- a/Documentation/devicetree/bindings/arm/calxeda/mem-ctrlr.txt +++ b/Documentation/devicetree/bindings/arm/calxeda/mem-ctrlr.txt @@ -1,7 +1,9 @@ Calxeda DDR memory controller Properties: -- compatible : Should be "calxeda,hb-ddr-ctrl" +- compatible : Should be: + - "calxeda,hb-ddr-ctrl" for ECX-1000 + - "calxeda,ecx-2000-ddr-ctrl" for ECX-2000 - reg : Address and size for DDR controller registers. - interrupts : Interrupt for DDR controller. diff --git a/Documentation/devicetree/bindings/dma/atmel-dma.txt b/Documentation/devicetree/bindings/dma/atmel-dma.txt index e1f343c7a34..f69bcf5a634 100644 --- a/Documentation/devicetree/bindings/dma/atmel-dma.txt +++ b/Documentation/devicetree/bindings/dma/atmel-dma.txt @@ -28,7 +28,7 @@ The three cells in order are: dependent: - bit 7-0: peripheral identifier for the hardware handshaking interface. The identifier can be different for tx and rx. - - bit 11-8: FIFO configuration. 0 for half FIFO, 1 for ALAP, 1 for ASAP. + - bit 11-8: FIFO configuration. 0 for half FIFO, 1 for ALAP, 2 for ASAP. Example: diff --git a/Documentation/devicetree/bindings/i2c/i2c-bcm-kona.txt b/Documentation/devicetree/bindings/i2c/i2c-bcm-kona.txt new file mode 100644 index 00000000000..1b87b741fa8 --- /dev/null +++ b/Documentation/devicetree/bindings/i2c/i2c-bcm-kona.txt @@ -0,0 +1,35 @@ +Broadcom Kona Family I2C +========================= + +This I2C controller is used in the following Broadcom SoCs: + + BCM11130 + BCM11140 + BCM11351 + BCM28145 + BCM28155 + +Required Properties +------------------- +- compatible: "brcm,bcm11351-i2c", "brcm,kona-i2c" +- reg: Physical base address and length of controller registers +- interrupts: The interrupt number used by the controller +- clocks: clock specifier for the kona i2c external clock +- clock-frequency: The I2C bus frequency in Hz +- #address-cells: Should be <1> +- #size-cells: Should be <0> + +Refer to clocks/clock-bindings.txt for generic clock consumer +properties. + +Example: + +i2c@3e016000 { + compatible = "brcm,bcm11351-i2c","brcm,kona-i2c"; + reg = <0x3e016000 0x80>; + interrupts = <GIC_SPI 103 IRQ_TYPE_LEVEL_HIGH>; + clocks = <&bsc1_clk>; + clock-frequency = <400000>; + #address-cells = <1>; + #size-cells = <0>; +}; diff --git a/Documentation/devicetree/bindings/i2c/i2c-exynos5.txt b/Documentation/devicetree/bindings/i2c/i2c-exynos5.txt new file mode 100644 index 00000000000..056732cfdce --- /dev/null +++ b/Documentation/devicetree/bindings/i2c/i2c-exynos5.txt @@ -0,0 +1,44 @@ +* Samsung's High Speed I2C controller + +The Samsung's High Speed I2C controller is used to interface with I2C devices +at various speeds ranging from 100khz to 3.4Mhz. + +Required properties: + - compatible: value should be. + -> "samsung,exynos5-hsi2c", for i2c compatible with exynos5 hsi2c. + - reg: physical base address of the controller and length of memory mapped + region. + - interrupts: interrupt number to the cpu. + - #address-cells: always 1 (for i2c addresses) + - #size-cells: always 0 + + - Pinctrl: + - pinctrl-0: Pin control group to be used for this controller. + - pinctrl-names: Should contain only one value - "default". + +Optional properties: + - clock-frequency: Desired operating frequency in Hz of the bus. + -> If not specified, the bus operates in fast-speed mode at + at 100khz. + -> If specified, the bus operates in high-speed mode only if the + clock-frequency is >= 1Mhz. + +Example: + +hsi2c@12ca0000 { + compatible = "samsung,exynos5-hsi2c"; + reg = <0x12ca0000 0x100>; + interrupts = <56>; + clock-frequency = <100000>; + + pinctrl-0 = <&i2c4_bus>; + pinctrl-names = "default"; + + #address-cells = <1>; + #size-cells = <0>; + + s2mps11_pmic@66 { + compatible = "samsung,s2mps11-pmic"; + reg = <0x66>; + }; +}; diff --git a/Documentation/devicetree/bindings/i2c/i2c-rcar.txt b/Documentation/devicetree/bindings/i2c/i2c-rcar.txt new file mode 100644 index 00000000000..897cfcd5ce9 --- /dev/null +++ b/Documentation/devicetree/bindings/i2c/i2c-rcar.txt @@ -0,0 +1,23 @@ +I2C for R-Car platforms + +Required properties: +- compatible: Must be one of + "renesas,i2c-rcar" + "renesas,i2c-r8a7778" + "renesas,i2c-r8a7779" + "renesas,i2c-r8a7790" +- reg: physical base address of the controller and length of memory mapped + region. +- interrupts: interrupt specifier. + +Optional properties: +- clock-frequency: desired I2C bus clock frequency in Hz. The absence of this + propoerty indicates the default frequency 100 kHz. + +Examples : + +i2c0: i2c@e6500000 { + compatible = "renesas,i2c-rcar-h2"; + reg = <0 0xe6500000 0 0x428>; + interrupts = <0 174 0x4>; +}; diff --git a/Documentation/devicetree/bindings/i2c/i2c-st.txt b/Documentation/devicetree/bindings/i2c/i2c-st.txt new file mode 100644 index 00000000000..437e0db3823 --- /dev/null +++ b/Documentation/devicetree/bindings/i2c/i2c-st.txt @@ -0,0 +1,41 @@ +ST SSC binding, for I2C mode operation + +Required properties : +- compatible : Must be "st,comms-ssc-i2c" or "st,comms-ssc4-i2c" +- reg : Offset and length of the register set for the device +- interrupts : the interrupt specifier +- clock-names: Must contain "ssc". +- clocks: Must contain an entry for each name in clock-names. See the common + clock bindings. +- A pinctrl state named "default" must be defined to set pins in mode of + operation for I2C transfer. + +Optional properties : +- clock-frequency : Desired I2C bus clock frequency in Hz. If not specified, + the default 100 kHz frequency will be used. As only Normal and Fast modes + are supported, possible values are 100000 and 400000. +- st,i2c-min-scl-pulse-width-us : The minimum valid SCL pulse width that is + allowed through the deglitch circuit. In units of us. +- st,i2c-min-sda-pulse-width-us : The minimum valid SDA pulse width that is + allowed through the deglitch circuit. In units of us. +- A pinctrl state named "idle" could be defined to set pins in idle state + when I2C instance is not performing a transfer. +- A pinctrl state named "sleep" could be defined to set pins in sleep state + when driver enters in suspend. + + + +Example : + +i2c0: i2c@fed40000 { + compatible = "st,comms-ssc4-i2c"; + reg = <0xfed40000 0x110>; + interrupts = <GIC_SPI 187 IRQ_TYPE_LEVEL_HIGH>; + clocks = <&CLK_S_ICN_REG_0>; + clock-names = "ssc"; + clock-frequency = <400000>; + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_i2c0_default>; + st,i2c-min-scl-pulse-width-us = <0>; + st,i2c-min-sda-pulse-width-us = <5>; +}; diff --git a/Documentation/devicetree/bindings/i2c/trivial-devices.txt b/Documentation/devicetree/bindings/i2c/trivial-devices.txt index ad6a73852f0..f1fb26eed0e 100644 --- a/Documentation/devicetree/bindings/i2c/trivial-devices.txt +++ b/Documentation/devicetree/bindings/i2c/trivial-devices.txt @@ -15,6 +15,7 @@ adi,adt7461 +/-1C TDM Extended Temp Range I.C adt7461 +/-1C TDM Extended Temp Range I.C at,24c08 i2c serial eeprom (24cxx) atmel,24c02 i2c serial eeprom (24cxx) +atmel,at97sc3204t i2c trusted platform module (TPM) catalyst,24c32 i2c serial eeprom dallas,ds1307 64 x 8, Serial, I2C Real-Time Clock dallas,ds1338 I2C RTC with 56-Byte NV RAM @@ -44,6 +45,7 @@ mc,rv3029c2 Real Time Clock Module with I2C-Bus national,lm75 I2C TEMP SENSOR national,lm80 Serial Interface ACPI-Compatible Microprocessor System Hardware Monitor national,lm92 ±0.33°C Accurate, 12-Bit + Sign Temperature Sensor and Thermal Window Comparator with Two-Wire Interface +nuvoton,npct501 i2c trusted platform module (TPM) nxp,pca9556 Octal SMBus and I2C registered interface nxp,pca9557 8-bit I2C-bus and SMBus I/O port with reset nxp,pcf8563 Real-time clock/calendar @@ -61,3 +63,4 @@ taos,tsl2550 Ambient Light Sensor with SMBUS/Two Wire Serial Interface ti,tsc2003 I2C Touch-Screen Controller ti,tmp102 Low Power Digital Temperature Sensor with SMBUS/Two Wire Serial Interface ti,tmp275 Digital Temperature Sensor +winbond,wpct301 i2c trusted platform module (TPM) diff --git a/Documentation/devicetree/bindings/media/st-rc.txt b/Documentation/devicetree/bindings/media/st-rc.txt new file mode 100644 index 00000000000..05c432d08bc --- /dev/null +++ b/Documentation/devicetree/bindings/media/st-rc.txt @@ -0,0 +1,29 @@ +Device-Tree bindings for ST IRB IP + +Required properties: + - compatible: Should contain "st,comms-irb". + - reg: Base physical address of the controller and length of memory + mapped region. + - interrupts: interrupt-specifier for the sole interrupt generated by + the device. The interrupt specifier format depends on the interrupt + controller parent. + - rx-mode: can be "infrared" or "uhf". This property specifies the L1 + protocol used for receiving remote control signals. rx-mode should + be present iff the rx pins are wired up. + - tx-mode: should be "infrared". This property specifies the L1 + protocol used for transmitting remote control signals. tx-mode should + be present iff the tx pins are wired up. + +Optional properties: + - pinctrl-names, pinctrl-0: the pincontrol settings to configure muxing + properly for IRB pins. + - clocks : phandle with clock-specifier pair for IRB. + +Example node: + + rc: rc@fe518000 { + compatible = "st,comms-irb"; + reg = <0xfe518000 0x234>; + interrupts = <0 203 0>; + rx-mode = "infrared"; + }; diff --git a/Documentation/devicetree/bindings/mmc/fsl-imx-esdhc.txt b/Documentation/devicetree/bindings/mmc/fsl-imx-esdhc.txt index 1dd622546d0..9046ba06c47 100644 --- a/Documentation/devicetree/bindings/mmc/fsl-imx-esdhc.txt +++ b/Documentation/devicetree/bindings/mmc/fsl-imx-esdhc.txt @@ -12,6 +12,11 @@ Required properties: Optional properties: - fsl,cd-controller : Indicate to use controller internal card detection - fsl,wp-controller : Indicate to use controller internal write protection +- fsl,delay-line : Specify the number of delay cells for override mode. + This is used to set the clock delay for DLL(Delay Line) on override mode + to select a proper data sampling window in case the clock quality is not good + due to signal path is too long on the board. Please refer to eSDHC/uSDHC + chapter, DLL (Delay Line) section in RM for details. Examples: diff --git a/Documentation/devicetree/bindings/mmc/synopsys-dw-mshc.txt b/Documentation/devicetree/bindings/mmc/synopsys-dw-mshc.txt index 066a78b034c..8f3f1331535 100644 --- a/Documentation/devicetree/bindings/mmc/synopsys-dw-mshc.txt +++ b/Documentation/devicetree/bindings/mmc/synopsys-dw-mshc.txt @@ -52,6 +52,9 @@ Optional properties: is specified and the ciu clock is specified then we'll try to set the ciu clock to this at probe time. +* clock-freq-min-max: Minimum and Maximum clock frequency for card output + clock(cclk_out). If it's not specified, max is 200MHZ and min is 400KHz by default. + * num-slots: specifies the number of slots supported by the controller. The number of physical slots actually used could be equal or less than the value specified by num-slots. If this property is not specified, the value @@ -66,6 +69,10 @@ Optional properties: * supports-highspeed: Enables support for high speed cards (up to 50MHz) +* caps2-mmc-hs200-1_8v: Supports mmc HS200 SDR 1.8V mode + +* caps2-mmc-hs200-1_2v: Supports mmc HS200 SDR 1.2V mode + * broken-cd: as documented in mmc core bindings. * vmmc-supply: The phandle to the regulator to use for vmmc. If this is @@ -93,8 +100,10 @@ board specific portions as listed below. dwmmc0@12200000 { clock-frequency = <400000000>; + clock-freq-min-max = <400000 200000000>; num-slots = <1>; supports-highspeed; + caps2-mmc-hs200-1_8v; broken-cd; fifo-depth = <0x80>; card-detect-delay = <200>; diff --git a/Documentation/devicetree/bindings/power/twl-charger.txt b/Documentation/devicetree/bindings/power/twl-charger.txt new file mode 100644 index 00000000000..d5c706216df --- /dev/null +++ b/Documentation/devicetree/bindings/power/twl-charger.txt @@ -0,0 +1,20 @@ +TWL BCI (Battery Charger Interface) + +Required properties: +- compatible: + - "ti,twl4030-bci" +- interrupts: two interrupt lines from the TWL SIH (secondary + interrupt handler) - interrupts 9 and 2. + +Optional properties: +- ti,bb-uvolt: microvolts for charging the backup battery. +- ti,bb-uamp: microamps for charging the backup battery. + +Examples: + +bci { + compatible = "ti,twl4030-bci"; + interrupts = <9>, <2>; + ti,bb-uvolt = <3200000>; + ti,bb-uamp = <150>; +}; diff --git a/Documentation/devicetree/bindings/power_supply/ti,bq24735.txt b/Documentation/devicetree/bindings/power_supply/ti,bq24735.txt new file mode 100644 index 00000000000..4f6a550184d --- /dev/null +++ b/Documentation/devicetree/bindings/power_supply/ti,bq24735.txt @@ -0,0 +1,32 @@ +TI BQ24735 Charge Controller +~~~~~~~~~~ + +Required properties : + - compatible : "ti,bq24735" + +Optional properties : + - interrupts : Specify the interrupt to be used to trigger when the AC + adapter is either plugged in or removed. + - ti,ac-detect-gpios : This GPIO is optionally used to read the AC adapter + presence. This is a Host GPIO that is configured as an input and + connected to the bq24735. + - ti,charge-current : Used to control and set the charging current. This value + must be between 128mA and 8.128A with a 64mA step resolution. The POR value + is 0x0000h. This number is in mA (e.g. 8192), see spec for more information + about the ChargeCurrent (0x14h) register. + - ti,charge-voltage : Used to control and set the charging voltage. This value + must be between 1.024V and 19.2V with a 16mV step resolution. The POR value + is 0x0000h. This number is in mV (e.g. 19200), see spec for more information + about the ChargeVoltage (0x15h) register. + - ti,input-current : Used to control and set the charger input current. This + value must be between 128mA and 8.064A with a 128mA step resolution. The + POR value is 0x1000h. This number is in mA (e.g. 8064), see the spec for + more information about the InputCurrent (0x3fh) register. + +Example: + + bq24735@9 { + compatible = "ti,bq24735"; + reg = <0x9>; + ti,ac-detect-gpios = <&gpio 72 0x1>; + } diff --git a/Documentation/devicetree/bindings/powerpc/fsl/dma.txt b/Documentation/devicetree/bindings/powerpc/fsl/dma.txt index 2a4b4bce611..7fc1b010fa7 100644 --- a/Documentation/devicetree/bindings/powerpc/fsl/dma.txt +++ b/Documentation/devicetree/bindings/powerpc/fsl/dma.txt @@ -1,33 +1,30 @@ -* Freescale 83xx DMA Controller +* Freescale DMA Controllers -Freescale PowerPC 83xx have on chip general purpose DMA controllers. +** Freescale Elo DMA Controller + This is a little-endian 4-channel DMA controller, used in Freescale mpc83xx + series chips such as mpc8315, mpc8349, mpc8379 etc. Required properties: -- compatible : compatible list, contains 2 entries, first is - "fsl,CHIP-dma", where CHIP is the processor - (mpc8349, mpc8360, etc.) and the second is - "fsl,elo-dma" -- reg : <registers mapping for DMA general status reg> -- ranges : Should be defined as specified in 1) to describe the - DMA controller channels. +- compatible : must include "fsl,elo-dma" +- reg : DMA General Status Register, i.e. DGSR which contains + status for all the 4 DMA channels +- ranges : describes the mapping between the address space of the + DMA channels and the address space of the DMA controller - cell-index : controller index. 0 for controller @ 0x8100 -- interrupts : <interrupt mapping for DMA IRQ> +- interrupts : interrupt specifier for DMA IRQ - interrupt-parent : optional, if needed for interrupt mapping - - DMA channel nodes: - - compatible : compatible list, contains 2 entries, first is - "fsl,CHIP-dma-channel", where CHIP is the processor - (mpc8349, mpc8350, etc.) and the second is - "fsl,elo-dma-channel". However, see note below. - - reg : <registers mapping for channel> - - cell-index : dma channel index starts at 0. + - compatible : must include "fsl,elo-dma-channel" + However, see note below. + - reg : DMA channel specific registers + - cell-index : DMA channel index starts at 0. Optional properties: - - interrupts : <interrupt mapping for DMA channel IRQ> - (on 83xx this is expected to be identical to - the interrupts property of the parent node) + - interrupts : interrupt specifier for DMA channel IRQ + (on 83xx this is expected to be identical to + the interrupts property of the parent node) - interrupt-parent : optional, if needed for interrupt mapping Example: @@ -70,30 +67,27 @@ Example: }; }; -* Freescale 85xx/86xx DMA Controller - -Freescale PowerPC 85xx/86xx have on chip general purpose DMA controllers. +** Freescale EloPlus DMA Controller + This is a 4-channel DMA controller with extended addresses and chaining, + mainly used in Freescale mpc85xx/86xx, Pxxx and BSC series chips, such as + mpc8540, mpc8641 p4080, bsc9131 etc. Required properties: -- compatible : compatible list, contains 2 entries, first is - "fsl,CHIP-dma", where CHIP is the processor - (mpc8540, mpc8540, etc.) and the second is - "fsl,eloplus-dma" -- reg : <registers mapping for DMA general status reg> +- compatible : must include "fsl,eloplus-dma" +- reg : DMA General Status Register, i.e. DGSR which contains + status for all the 4 DMA channels - cell-index : controller index. 0 for controller @ 0x21000, 1 for controller @ 0xc000 -- ranges : Should be defined as specified in 1) to describe the - DMA controller channels. +- ranges : describes the mapping between the address space of the + DMA channels and the address space of the DMA controller - DMA channel nodes: - - compatible : compatible list, contains 2 entries, first is - "fsl,CHIP-dma-channel", where CHIP is the processor - (mpc8540, mpc8560, etc.) and the second is - "fsl,eloplus-dma-channel". However, see note below. - - cell-index : dma channel index starts at 0. - - reg : <registers mapping for channel> - - interrupts : <interrupt mapping for DMA channel IRQ> + - compatible : must include "fsl,eloplus-dma-channel" + However, see note below. + - cell-index : DMA channel index starts at 0. + - reg : DMA channel specific registers + - interrupts : interrupt specifier for DMA channel IRQ - interrupt-parent : optional, if needed for interrupt mapping Example: @@ -134,6 +128,76 @@ Example: }; }; +** Freescale Elo3 DMA Controller + DMA controller which has same function as EloPlus except that Elo3 has 8 + channels while EloPlus has only 4, it is used in Freescale Txxx and Bxxx + series chips, such as t1040, t4240, b4860. + +Required properties: + +- compatible : must include "fsl,elo3-dma" +- reg : contains two entries for DMA General Status Registers, + i.e. DGSR0 which includes status for channel 1~4, and + DGSR1 for channel 5~8 +- ranges : describes the mapping between the address space of the + DMA channels and the address space of the DMA controller + +- DMA channel nodes: + - compatible : must include "fsl,eloplus-dma-channel" + - reg : DMA channel specific registers + - interrupts : interrupt specifier for DMA channel IRQ + - interrupt-parent : optional, if needed for interrupt mapping + +Example: +dma@100300 { + #address-cells = <1>; + #size-cells = <1>; + compatible = "fsl,elo3-dma"; + reg = <0x100300 0x4>, + <0x100600 0x4>; + ranges = <0x0 0x100100 0x500>; + dma-channel@0 { + compatible = "fsl,eloplus-dma-channel"; + reg = <0x0 0x80>; + interrupts = <28 2 0 0>; + }; + dma-channel@80 { + compatible = "fsl,eloplus-dma-channel"; + reg = <0x80 0x80>; + interrupts = <29 2 0 0>; + }; + dma-channel@100 { + compatible = "fsl,eloplus-dma-channel"; + reg = <0x100 0x80>; + interrupts = <30 2 0 0>; + }; + dma-channel@180 { + compatible = "fsl,eloplus-dma-channel"; + reg = <0x180 0x80>; + interrupts = <31 2 0 0>; + }; + dma-channel@300 { + compatible = "fsl,eloplus-dma-channel"; + reg = <0x300 0x80>; + interrupts = <76 2 0 0>; + }; + dma-channel@380 { + compatible = "fsl,eloplus-dma-channel"; + reg = <0x380 0x80>; + interrupts = <77 2 0 0>; + }; + dma-channel@400 { + compatible = "fsl,eloplus-dma-channel"; + reg = <0x400 0x80>; + interrupts = <78 2 0 0>; + }; + dma-channel@480 { + compatible = "fsl,eloplus-dma-channel"; + reg = <0x480 0x80>; + interrupts = <79 2 0 0>; + }; +}; + Note on DMA channel compatible properties: The compatible property must say "fsl,elo-dma-channel" or "fsl,eloplus-dma-channel" to be used by the Elo DMA driver (fsldma). Any DMA channel used by fsldma cannot be used by another diff --git a/Documentation/devicetree/bindings/watchdog/dw_wdt.txt b/Documentation/devicetree/bindings/watchdog/dw_wdt.txt new file mode 100644 index 00000000000..08e16f684f2 --- /dev/null +++ b/Documentation/devicetree/bindings/watchdog/dw_wdt.txt @@ -0,0 +1,21 @@ +Synopsys Designware Watchdog Timer + +Required Properties: + +- compatible : Should contain "snps,dw-wdt" +- reg : Base address and size of the watchdog timer registers. +- clocks : phandle + clock-specifier for the clock that drives the + watchdog timer. + +Optional Properties: + +- interrupts : The interrupt used for the watchdog timeout warning. + +Example: + + watchdog0: wd@ffd02000 { + compatible = "snps,dw-wdt"; + reg = <0xffd02000 0x1000>; + interrupts = <0 171 4>; + clocks = <&per_base_clk>; + }; diff --git a/Documentation/devicetree/bindings/gpio/men-a021-wdt.txt b/Documentation/devicetree/bindings/watchdog/men-a021-wdt.txt index 370dee3226d..370dee3226d 100644 --- a/Documentation/devicetree/bindings/gpio/men-a021-wdt.txt +++ b/Documentation/devicetree/bindings/watchdog/men-a021-wdt.txt diff --git a/Documentation/devicetree/bindings/watchdog/moxa,moxart-watchdog.txt b/Documentation/devicetree/bindings/watchdog/moxa,moxart-watchdog.txt new file mode 100644 index 00000000000..1169857d1d1 --- /dev/null +++ b/Documentation/devicetree/bindings/watchdog/moxa,moxart-watchdog.txt @@ -0,0 +1,15 @@ +MOXA ART Watchdog timer + +Required properties: + +- compatible : Must be "moxa,moxart-watchdog" +- reg : Should contain registers location and length +- clocks : Should contain phandle for the clock that drives the counter + +Example: + + watchdog: watchdog@98500000 { + compatible = "moxa,moxart-watchdog"; + reg = <0x98500000 0x10>; + clocks = <&coreclk>; + }; diff --git a/Documentation/devicetree/bindings/watchdog/rt2880-wdt.txt b/Documentation/devicetree/bindings/watchdog/rt2880-wdt.txt new file mode 100644 index 00000000000..d7bab3db9d1 --- /dev/null +++ b/Documentation/devicetree/bindings/watchdog/rt2880-wdt.txt @@ -0,0 +1,19 @@ +Ralink Watchdog Timers + +Required properties: +- compatible: must be "ralink,rt2880-wdt" +- reg: physical base address of the controller and length of the register range + +Optional properties: +- interrupt-parent: phandle to the INTC device node +- interrupts: Specify the INTC interrupt number + +Example: + + watchdog@120 { + compatible = "ralink,rt2880-wdt"; + reg = <0x120 0x10>; + + interrupt-parent = <&intc>; + interrupts = <1>; + }; diff --git a/Documentation/devicetree/bindings/watchdog/sirfsoc_wdt.txt b/Documentation/devicetree/bindings/watchdog/sirfsoc_wdt.txt new file mode 100644 index 00000000000..9cbc76c89b2 --- /dev/null +++ b/Documentation/devicetree/bindings/watchdog/sirfsoc_wdt.txt @@ -0,0 +1,14 @@ +SiRFSoC Timer and Watchdog Timer(WDT) Controller + +Required properties: +- compatible: "sirf,prima2-tick" +- reg: Address range of tick timer/WDT register set +- interrupts: interrupt number to the cpu + +Example: + +timer@b0020000 { + compatible = "sirf,prima2-tick"; + reg = <0xb0020000 0x1000>; + interrupts = <0>; +}; diff --git a/Documentation/dmatest.txt b/Documentation/dmatest.txt index a2b5663eae2..dd77a81bdb8 100644 --- a/Documentation/dmatest.txt +++ b/Documentation/dmatest.txt @@ -15,39 +15,48 @@ be built as module or inside kernel. Let's consider those cases. Part 2 - When dmatest is built as a module... -After mounting debugfs and loading the module, the /sys/kernel/debug/dmatest -folder with nodes will be created. There are two important files located. First -is the 'run' node that controls run and stop phases of the test, and the second -one, 'results', is used to get the test case results. - -Note that in this case test will not run on load automatically. - Example of usage: + % modprobe dmatest channel=dma0chan0 timeout=2000 iterations=1 run=1 + +...or: + % modprobe dmatest % echo dma0chan0 > /sys/module/dmatest/parameters/channel % echo 2000 > /sys/module/dmatest/parameters/timeout % echo 1 > /sys/module/dmatest/parameters/iterations - % echo 1 > /sys/kernel/debug/dmatest/run + % echo 1 > /sys/module/dmatest/parameters/run + +...or on the kernel command line: + + dmatest.channel=dma0chan0 dmatest.timeout=2000 dmatest.iterations=1 dmatest.run=1 Hint: available channel list could be extracted by running the following command: % ls -1 /sys/class/dma/ -After a while you will start to get messages about current status or error like -in the original code. +Once started a message like "dmatest: Started 1 threads using dma0chan0" is +emitted. After that only test failure messages are reported until the test +stops. Note that running a new test will not stop any in progress test. -The following command should return actual state of the test. - % cat /sys/kernel/debug/dmatest/run - -To wait for test done the user may perform a busy loop that checks the state. - - % while [ $(cat /sys/kernel/debug/dmatest/run) = "Y" ] - > do - > echo -n "." - > sleep 1 - > done - > echo +The following command returns the state of the test. + % cat /sys/module/dmatest/parameters/run + +To wait for test completion userpace can poll 'run' until it is false, or use +the wait parameter. Specifying 'wait=1' when loading the module causes module +initialization to pause until a test run has completed, while reading +/sys/module/dmatest/parameters/wait waits for any running test to complete +before returning. For example, the following scripts wait for 42 tests +to complete before exiting. Note that if 'iterations' is set to 'infinite' then +waiting is disabled. + +Example: + % modprobe dmatest run=1 iterations=42 wait=1 + % modprobe -r dmatest +...or: + % modprobe dmatest run=1 iterations=42 + % cat /sys/module/dmatest/parameters/wait + % modprobe -r dmatest Part 3 - When built-in in the kernel... @@ -62,21 +71,22 @@ case. You always could check them at run-time by running Part 4 - Gathering the test results -The module provides a storage for the test results in the memory. The gathered -data could be used after test is done. +Test results are printed to the kernel log buffer with the format: -The special file 'results' in the debugfs represents gathered data of the in -progress test. The messages collected are printed to the kernel log as well. +"dmatest: result <channel>: <test id>: '<error msg>' with src_off=<val> dst_off=<val> len=<val> (<err code>)" Example of output: - % cat /sys/kernel/debug/dmatest/results - dma0chan0-copy0: #1: No errors with src_off=0x7bf dst_off=0x8ad len=0x3fea (0) + % dmesg | tail -n 1 + dmatest: result dma0chan0-copy0: #1: No errors with src_off=0x7bf dst_off=0x8ad len=0x3fea (0) The message format is unified across the different types of errors. A number in the parens represents additional information, e.g. error code, error counter, -or status. +or status. A test thread also emits a summary line at completion listing the +number of tests executed, number that failed, and a result code. -Comparison between buffers is stored to the dedicated structure. +Example: + % dmesg | tail -n 1 + dmatest: dma0chan0-copy0: summary 1 test, 0 failures 1000 iops 100000 KB/s (0) -Note that the verify result is now accessible only via file 'results' in the -debugfs. +The details of a data miscompare error are also emitted, but do not follow the +above format. diff --git a/Documentation/filesystems/btrfs.txt b/Documentation/filesystems/btrfs.txt index 9dae5940743..5dd282dda55 100644 --- a/Documentation/filesystems/btrfs.txt +++ b/Documentation/filesystems/btrfs.txt @@ -70,6 +70,12 @@ Unless otherwise specified, all options default to off. See comments at the top of fs/btrfs/check-integrity.c for more info. + commit=<seconds> + Set the interval of periodic commit, 30 seconds by default. Higher + values defer data being synced to permanent storage with obvious + consequences when the system crashes. The upper bound is not forced, + but a warning is printed if it's more than 300 seconds (5 minutes). + compress compress=<type> compress-force @@ -154,7 +160,11 @@ Unless otherwise specified, all options default to off. Currently this scans a list of several previous tree roots and tries to use the first readable. - skip_balance + rescan_uuid_tree + Force check and rebuild procedure of the UUID tree. This should not + normally be needed. + + skip_balance Skip automatic resume of interrupted balance operation after mount. May be resumed with "btrfs balance resume." @@ -234,24 +244,14 @@ available from the git repository at the following location: These include the following tools: -mkfs.btrfs: create a filesystem - -btrfsctl: control program to create snapshots and subvolumes: +* mkfs.btrfs: create a filesystem - mount /dev/sda2 /mnt - btrfsctl -s new_subvol_name /mnt - btrfsctl -s snapshot_of_default /mnt/default - btrfsctl -s snapshot_of_new_subvol /mnt/new_subvol_name - btrfsctl -s snapshot_of_a_snapshot /mnt/snapshot_of_new_subvol - ls /mnt - default snapshot_of_a_snapshot snapshot_of_new_subvol - new_subvol_name snapshot_of_default +* btrfs: a single tool to manage the filesystems, refer to the manpage for more details - Snapshots and subvolumes cannot be deleted right now, but you can - rm -rf all the files and directories inside them. +* 'btrfsck' or 'btrfs check': do a consistency check of the filesystem -btrfsck: do a limited check of the FS extent trees. +Other tools for specific tasks: -btrfs-debug-tree: print all of the FS metadata in text form. Example: +* btrfs-convert: in-place conversion from ext2/3/4 filesystems - btrfs-debug-tree /dev/sda2 >& big_output_file +* btrfs-image: dump filesystem metadata for debugging diff --git a/Documentation/i2c/busses/i2c-i801 b/Documentation/i2c/busses/i2c-i801 index d29dea0f323..7b0dcdb5717 100644 --- a/Documentation/i2c/busses/i2c-i801 +++ b/Documentation/i2c/busses/i2c-i801 @@ -25,6 +25,7 @@ Supported adapters: * Intel Avoton (SOC) * Intel Wellsburg (PCH) * Intel Coleto Creek (PCH) + * Intel Wildcat Point-LP (PCH) Datasheets: Publicly available at the Intel website On Intel Patsburg and later chipsets, both the normal host SMBus controller diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt index 9ca3e74a10e..50680a59a2f 100644 --- a/Documentation/kernel-parameters.txt +++ b/Documentation/kernel-parameters.txt @@ -1190,15 +1190,24 @@ bytes respectively. Such letter suffixes can also be entirely omitted. owned by uid=0. ima_hash= [IMA] - Format: { "sha1" | "md5" } + Format: { md5 | sha1 | rmd160 | sha256 | sha384 + | sha512 | ... } default: "sha1" + The list of supported hash algorithms is defined + in crypto/hash_info.h. + ima_tcb [IMA] Load a policy which meets the needs of the Trusted Computing Base. This means IMA will measure all programs exec'd, files mmap'd for exec, and all files opened for read by uid=0. + ima_template= [IMA] + Select one of defined IMA measurements template formats. + Formats: { "ima" | "ima-ng" } + Default: "ima-ng" + init= [KNL] Format: <full_path> Run specified binary instead of /sbin/init as init diff --git a/Documentation/networking/ip-sysctl.txt b/Documentation/networking/ip-sysctl.txt index 8b8a0578764..3c12d9a7ed0 100644 --- a/Documentation/networking/ip-sysctl.txt +++ b/Documentation/networking/ip-sysctl.txt @@ -577,9 +577,6 @@ tcp_limit_output_bytes - INTEGER typical pfifo_fast qdiscs. tcp_limit_output_bytes limits the number of bytes on qdisc or device to reduce artificial RTT/cwnd and reduce bufferbloat. - Note: For GSO/TSO enabled flows, we try to have at least two - packets in flight. Reducing tcp_limit_output_bytes might also - reduce the size of individual GSO packet (64KB being the max) Default: 131072 tcp_challenge_ack_limit - INTEGER diff --git a/Documentation/power/power_supply_class.txt b/Documentation/power/power_supply_class.txt index 3f10b39b034..89a8816990f 100644 --- a/Documentation/power/power_supply_class.txt +++ b/Documentation/power/power_supply_class.txt @@ -135,11 +135,11 @@ CAPACITY_LEVEL - capacity level. This corresponds to POWER_SUPPLY_CAPACITY_LEVEL_*. TEMP - temperature of the power supply. -TEMP_ALERT_MIN - minimum battery temperature alert value in milli centigrade. -TEMP_ALERT_MAX - maximum battery temperature alert value in milli centigrade. +TEMP_ALERT_MIN - minimum battery temperature alert. +TEMP_ALERT_MAX - maximum battery temperature alert. TEMP_AMBIENT - ambient temperature. -TEMP_AMBIENT_ALERT_MIN - minimum ambient temperature alert value in milli centigrade. -TEMP_AMBIENT_ALERT_MAX - maximum ambient temperature alert value in milli centigrade. +TEMP_AMBIENT_ALERT_MIN - minimum ambient temperature alert. +TEMP_AMBIENT_ALERT_MAX - maximum ambient temperature alert. TIME_TO_EMPTY - seconds left for battery to be considered empty (i.e. while battery powers a load) diff --git a/Documentation/power/runtime_pm.txt b/Documentation/power/runtime_pm.txt index 0f54333b0ff..b6ce00b2be9 100644 --- a/Documentation/power/runtime_pm.txt +++ b/Documentation/power/runtime_pm.txt @@ -547,13 +547,11 @@ helper functions described in Section 4. In that case, pm_runtime_resume() should be used. Of course, for this purpose the device's runtime PM has to be enabled earlier by calling pm_runtime_enable(). -If the device bus type's or driver's ->probe() callback runs -pm_runtime_suspend() or pm_runtime_idle() or their asynchronous counterparts, -they will fail returning -EAGAIN, because the device's usage counter is -incremented by the driver core before executing ->probe(). Still, it may be -desirable to suspend the device as soon as ->probe() has finished, so the driver -core uses pm_runtime_put_sync() to invoke the subsystem-level idle callback for -the device at that time. +It may be desirable to suspend the device once ->probe() has finished. +Therefore the driver core uses the asyncronous pm_request_idle() to submit a +request to execute the subsystem-level idle callback for the device at that +time. A driver that makes use of the runtime autosuspend feature, may want to +update the last busy mark before returning from ->probe(). Moreover, the driver core prevents runtime PM callbacks from racing with the bus notifier callback in __device_release_driver(), which is necessary, because the @@ -656,7 +654,7 @@ out the following operations: __pm_runtime_disable() with 'false' as the second argument for every device right before executing the subsystem-level .suspend_late() callback for it. - * During system resume it calls pm_runtime_enable() and pm_runtime_put_sync() + * During system resume it calls pm_runtime_enable() and pm_runtime_put() for every device right after executing the subsystem-level .resume_early() callback and right after executing the subsystem-level .resume() callback for it, respectively. diff --git a/Documentation/security/00-INDEX b/Documentation/security/00-INDEX index 414235c1fcf..45c82fd3e9d 100644 --- a/Documentation/security/00-INDEX +++ b/Documentation/security/00-INDEX @@ -22,3 +22,5 @@ keys.txt - description of the kernel key retention service. tomoyo.txt - documentation on the TOMOYO Linux Security Module. +IMA-templates.txt + - documentation on the template management mechanism for IMA. diff --git a/Documentation/security/IMA-templates.txt b/Documentation/security/IMA-templates.txt new file mode 100644 index 00000000000..a777e5f1df5 --- /dev/null +++ b/Documentation/security/IMA-templates.txt @@ -0,0 +1,87 @@ + IMA Template Management Mechanism + + +==== INTRODUCTION ==== + +The original 'ima' template is fixed length, containing the filedata hash +and pathname. The filedata hash is limited to 20 bytes (md5/sha1). +The pathname is a null terminated string, limited to 255 characters. +To overcome these limitations and to add additional file metadata, it is +necessary to extend the current version of IMA by defining additional +templates. For example, information that could be possibly reported are +the inode UID/GID or the LSM labels either of the inode and of the process +that is accessing it. + +However, the main problem to introduce this feature is that, each time +a new template is defined, the functions that generate and display +the measurements list would include the code for handling a new format +and, thus, would significantly grow over the time. + +The proposed solution solves this problem by separating the template +management from the remaining IMA code. The core of this solution is the +definition of two new data structures: a template descriptor, to determine +which information should be included in the measurement list; a template +field, to generate and display data of a given type. + +Managing templates with these structures is very simple. To support +a new data type, developers define the field identifier and implement +two functions, init() and show(), respectively to generate and display +measurement entries. Defining a new template descriptor requires +specifying the template format, a string of field identifiers separated +by the '|' character. While in the current implementation it is possible +to define new template descriptors only by adding their definition in the +template specific code (ima_template.c), in a future version it will be +possible to register a new template on a running kernel by supplying to IMA +the desired format string. In this version, IMA initializes at boot time +all defined template descriptors by translating the format into an array +of template fields structures taken from the set of the supported ones. + +After the initialization step, IMA will call ima_alloc_init_template() +(new function defined within the patches for the new template management +mechanism) to generate a new measurement entry by using the template +descriptor chosen through the kernel configuration or through the newly +introduced 'ima_template=' kernel command line parameter. It is during this +phase that the advantages of the new architecture are clearly shown: +the latter function will not contain specific code to handle a given template +but, instead, it simply calls the init() method of the template fields +associated to the chosen template descriptor and store the result (pointer +to allocated data and data length) in the measurement entry structure. + +The same mechanism is employed to display measurements entries. +The functions ima[_ascii]_measurements_show() retrieve, for each entry, +the template descriptor used to produce that entry and call the show() +method for each item of the array of template fields structures. + + + +==== SUPPORTED TEMPLATE FIELDS AND DESCRIPTORS ==== + +In the following, there is the list of supported template fields +('<identifier>': description), that can be used to define new template +descriptors by adding their identifier to the format string +(support for more data types will be added later): + + - 'd': the digest of the event (i.e. the digest of a measured file), + calculated with the SHA1 or MD5 hash algorithm; + - 'n': the name of the event (i.e. the file name), with size up to 255 bytes; + - 'd-ng': the digest of the event, calculated with an arbitrary hash + algorithm (field format: [<hash algo>:]digest, where the digest + prefix is shown only if the hash algorithm is not SHA1 or MD5); + - 'n-ng': the name of the event, without size limitations. + + +Below, there is the list of defined template descriptors: + - "ima": its format is 'd|n'; + - "ima-ng" (default): its format is 'd-ng|n-ng'. + + + +==== USE ==== + +To specify the template descriptor to be used to generate measurement entries, +currently the following methods are supported: + + - select a template descriptor among those supported in the kernel + configuration ('ima-ng' is the default choice); + - specify a template descriptor name from the kernel command line through + the 'ima_template=' parameter. diff --git a/Documentation/security/keys.txt b/Documentation/security/keys.txt index 7b4145d0045..a4c33f1a7c6 100644 --- a/Documentation/security/keys.txt +++ b/Documentation/security/keys.txt @@ -865,15 +865,14 @@ encountered: calling processes has a searchable link to the key from one of its keyrings. There are three functions for dealing with these: - key_ref_t make_key_ref(const struct key *key, - unsigned long possession); + key_ref_t make_key_ref(const struct key *key, bool possession); struct key *key_ref_to_ptr(const key_ref_t key_ref); - unsigned long is_key_possessed(const key_ref_t key_ref); + bool is_key_possessed(const key_ref_t key_ref); The first function constructs a key reference from a key pointer and - possession information (which must be 0 or 1 and not any other value). + possession information (which must be true or false). The second function retrieves the key pointer from a reference and the third retrieves the possession flag. @@ -961,14 +960,17 @@ payload contents" for more information. the argument will not be parsed. -(*) Extra references can be made to a key by calling the following function: +(*) Extra references can be made to a key by calling one of the following + functions: + struct key *__key_get(struct key *key); struct key *key_get(struct key *key); - These need to be disposed of by calling key_put() when they've been - finished with. The key pointer passed in will be returned. If the pointer - is NULL or CONFIG_KEYS is not set then the key will not be dereferenced and - no increment will take place. + Keys so references will need to be disposed of by calling key_put() when + they've been finished with. The key pointer passed in will be returned. + + In the case of key_get(), if the pointer is NULL or CONFIG_KEYS is not set + then the key will not be dereferenced and no increment will take place. (*) A key's serial number can be obtained by calling: diff --git a/Documentation/vm/split_page_table_lock b/Documentation/vm/split_page_table_lock index 7521d367f21..6dea4fd5c96 100644 --- a/Documentation/vm/split_page_table_lock +++ b/Documentation/vm/split_page_table_lock @@ -63,9 +63,9 @@ levels. PMD split lock enabling requires pgtable_pmd_page_ctor() call on PMD table allocation and pgtable_pmd_page_dtor() on freeing. -Allocation usually happens in pmd_alloc_one(), freeing in pmd_free(), but -make sure you cover all PMD table allocation / freeing paths: i.e X86_PAE -preallocate few PMDs on pgd_alloc(). +Allocation usually happens in pmd_alloc_one(), freeing in pmd_free() and +pmd_free_tlb(), but make sure you cover all PMD table allocation / freeing +paths: i.e X86_PAE preallocate few PMDs on pgd_alloc(). With everything in place you can set CONFIG_ARCH_ENABLE_SPLIT_PMD_PTLOCK. diff --git a/MAINTAINERS b/MAINTAINERS index 678f0743039..8285ed4676b 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -1425,7 +1425,7 @@ M: Wolfram Sang <wsa@the-dreams.de> L: linux-i2c@vger.kernel.org S: Maintained F: drivers/misc/eeprom/at24.c -F: include/linux/i2c/at24.h +F: include/linux/platform_data/at24.h ATA OVER ETHERNET (AOE) DRIVER M: "Ed L. Cashin" <ecashin@coraid.com> @@ -2468,7 +2468,7 @@ S: Maintained F: drivers/media/dvb-frontends/cxd2820r* CXGB3 ETHERNET DRIVER (CXGB3) -M: Divy Le Ray <divy@chelsio.com> +M: Santosh Raspatur <santosh@chelsio.com> L: netdev@vger.kernel.org W: http://www.chelsio.com S: Supported @@ -3063,6 +3063,14 @@ W: bluesmoke.sourceforge.net S: Maintained F: drivers/edac/amd64_edac* +EDAC-CALXEDA +M: Doug Thompson <dougthompson@xmission.com> +M: Robert Richter <rric@kernel.org> +L: linux-edac@vger.kernel.org +W: bluesmoke.sourceforge.net +S: Maintained +F: drivers/edac/highbank* + EDAC-CAVIUM M: Ralf Baechle <ralf@linux-mips.org> M: David Daney <david.daney@cavium.com> @@ -3144,6 +3152,13 @@ W: bluesmoke.sourceforge.net S: Maintained F: drivers/edac/i82975x_edac.c +EDAC-MPC85XX +M: Johannes Thumshirn <johannes.thumshirn@men.de> +L: linux-edac@vger.kernel.org +W: bluesmoke.sourceforge.net +S: Maintained +F: drivers/edac/mpc85xx_edac.[ch] + EDAC-PASEMI M: Egor Martovetsky <egor@pasemi.com> L: linux-edac@vger.kernel.org @@ -4050,6 +4065,7 @@ F: arch/x86/include/uapi/asm/hyperv.h F: arch/x86/kernel/cpu/mshyperv.c F: drivers/hid/hid-hyperv.c F: drivers/hv/ +F: drivers/input/serio/hyperv-keyboard.c F: drivers/net/hyperv/ F: drivers/scsi/storvsc_drv.c F: drivers/video/hyperv_fb.c @@ -7500,9 +7516,10 @@ SELINUX SECURITY MODULE M: Stephen Smalley <sds@tycho.nsa.gov> M: James Morris <james.l.morris@oracle.com> M: Eric Paris <eparis@parisplace.org> +M: Paul Moore <paul@paul-moore.com> L: selinux@tycho.nsa.gov (subscribers-only, general discussion) W: http://selinuxproject.org -T: git git://git.infradead.org/users/eparis/selinux.git +T: git git://git.infradead.org/users/pcmoore/selinux S: Supported F: include/linux/selinux* F: security/selinux/ @@ -8649,6 +8666,7 @@ F: drivers/media/usb/tm6000/ TPM DEVICE DRIVER M: Leonidas Da Silva Barbosa <leosilva@linux.vnet.ibm.com> M: Ashley Lai <ashley@ashleylai.com> +M: Peter Huewe <peterhuewe@gmx.de> M: Rajiv Andrade <mail@srajiv.net> W: http://tpmdd.sourceforge.net M: Marcel Selhorst <tpmdd@selhorst.net> @@ -8945,8 +8963,8 @@ USB PEGASUS DRIVER M: Petko Manolov <petkan@nucleusys.com> L: linux-usb@vger.kernel.org L: netdev@vger.kernel.org -T: git git://git.code.sf.net/p/pegasus2/git -W: http://pegasus2.sourceforge.net/ +T: git git://github.com/petkan/pegasus.git +W: https://github.com/petkan/pegasus S: Maintained F: drivers/net/usb/pegasus.* @@ -8967,8 +8985,8 @@ USB RTL8150 DRIVER M: Petko Manolov <petkan@nucleusys.com> L: linux-usb@vger.kernel.org L: netdev@vger.kernel.org -T: git git://git.code.sf.net/p/pegasus2/git -W: http://pegasus2.sourceforge.net/ +T: git git://github.com/petkan/rtl8150.git +W: https://github.com/petkan/rtl8150 S: Maintained F: drivers/net/usb/rtl8150.c @@ -9507,8 +9525,8 @@ F: drivers/xen/*swiotlb* XFS FILESYSTEM P: Silicon Graphics Inc +M: Dave Chinner <dchinner@fromorbit.com> M: Ben Myers <bpm@sgi.com> -M: Alex Elder <elder@kernel.org> M: xfs@oss.sgi.com L: xfs@oss.sgi.com W: http://oss.sgi.com/projects/xfs diff --git a/arch/alpha/Kconfig b/arch/alpha/Kconfig index 135c674eaf9..d39dc9b95a2 100644 --- a/arch/alpha/Kconfig +++ b/arch/alpha/Kconfig @@ -16,8 +16,8 @@ config ALPHA select ARCH_WANT_IPC_PARSE_VERSION select ARCH_HAVE_NMI_SAFE_CMPXCHG select ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE + select GENERIC_CLOCKEVENTS select GENERIC_SMP_IDLE_THREAD - select GENERIC_CMOS_UPDATE select GENERIC_STRNCPY_FROM_USER select GENERIC_STRNLEN_USER select HAVE_MOD_ARCH_SPECIFIC @@ -488,6 +488,20 @@ config VGA_HOSE which always have multiple hoses, and whose consoles support it. +config ALPHA_QEMU + bool "Run under QEMU emulation" + depends on !ALPHA_GENERIC + ---help--- + Assume the presence of special features supported by QEMU PALcode + that reduce the overhead of system emulation. + + Generic kernels will auto-detect QEMU. But when building a + system-specific kernel, the assumption is that we want to + elimiate as many runtime tests as possible. + + If unsure, say N. + + config ALPHA_SRM bool "Use SRM as bootloader" if ALPHA_CABRIOLET || ALPHA_AVANTI_CH || ALPHA_EB64P || ALPHA_PC164 || ALPHA_TAKARA || ALPHA_EB164 || ALPHA_ALCOR || ALPHA_MIATA || ALPHA_LX164 || ALPHA_SX164 || ALPHA_NAUTILUS || ALPHA_NONAME depends on TTY @@ -572,6 +586,30 @@ config NUMA Access). This option is for configuring high-end multiprocessor server machines. If in doubt, say N. +config ALPHA_WTINT + bool "Use WTINT" if ALPHA_SRM || ALPHA_GENERIC + default y if ALPHA_QEMU + default n if ALPHA_EV5 || ALPHA_EV56 || (ALPHA_EV4 && !ALPHA_LCA) + default n if !ALPHA_SRM && !ALPHA_GENERIC + default y if SMP + ---help--- + The Wait for Interrupt (WTINT) PALcall attempts to place the CPU + to sleep until the next interrupt. This may reduce the power + consumed, and the heat produced by the computer. However, it has + the side effect of making the cycle counter unreliable as a timing + device across the sleep. + + For emulation under QEMU, definitely say Y here, as we have other + mechanisms for measuring time than the cycle counter. + + For EV4 (but not LCA), EV5 and EV56 systems, or for systems running + MILO, sleep mode is not supported so you might as well say N here. + + For SMP systems we cannot use the cycle counter for timing anyway, + so you might as well say Y here. + + If unsure, say N. + config NODES_SHIFT int default "7" @@ -613,9 +651,41 @@ config VERBOSE_MCHECK_ON Take the default (1) unless you want more control or more info. +choice + prompt "Timer interrupt frequency (HZ)?" + default HZ_128 if ALPHA_QEMU + default HZ_1200 if ALPHA_RAWHIDE + default HZ_1024 + ---help--- + The frequency at which timer interrupts occur. A high frequency + minimizes latency, whereas a low frequency minimizes overhead of + process accounting. The later effect is especially significant + when being run under QEMU. + + Note that some Alpha hardware cannot change the interrupt frequency + of the timer. If unsure, say 1024 (or 1200 for Rawhide). + + config HZ_32 + bool "32 Hz" + config HZ_64 + bool "64 Hz" + config HZ_128 + bool "128 Hz" + config HZ_256 + bool "256 Hz" + config HZ_1024 + bool "1024 Hz" + config HZ_1200 + bool "1200 Hz" +endchoice + config HZ - int - default 1200 if ALPHA_RAWHIDE + int + default 32 if HZ_32 + default 64 if HZ_64 + default 128 if HZ_128 + default 256 if HZ_256 + default 1200 if HZ_1200 default 1024 source "drivers/pci/Kconfig" diff --git a/arch/alpha/include/asm/machvec.h b/arch/alpha/include/asm/machvec.h index 72dbf235927..75cb3641ed2 100644 --- a/arch/alpha/include/asm/machvec.h +++ b/arch/alpha/include/asm/machvec.h @@ -33,6 +33,7 @@ struct alpha_machine_vector int nr_irqs; int rtc_port; + int rtc_boot_cpu_only; unsigned int max_asn; unsigned long max_isa_dma_address; unsigned long irq_probe_mask; @@ -95,9 +96,6 @@ struct alpha_machine_vector struct _alpha_agp_info *(*agp_info)(void); - unsigned int (*rtc_get_time)(struct rtc_time *); - int (*rtc_set_time)(struct rtc_time *); - const char *vector_name; /* NUMA information */ @@ -126,13 +124,19 @@ extern struct alpha_machine_vector alpha_mv; #ifdef CONFIG_ALPHA_GENERIC extern int alpha_using_srm; +extern int alpha_using_qemu; #else -#ifdef CONFIG_ALPHA_SRM -#define alpha_using_srm 1 -#else -#define alpha_using_srm 0 -#endif +# ifdef CONFIG_ALPHA_SRM +# define alpha_using_srm 1 +# else +# define alpha_using_srm 0 +# endif +# ifdef CONFIG_ALPHA_QEMU +# define alpha_using_qemu 1 +# else +# define alpha_using_qemu 0 +# endif #endif /* GENERIC */ -#endif +#endif /* __KERNEL__ */ #endif /* __ALPHA_MACHVEC_H */ diff --git a/arch/alpha/include/asm/pal.h b/arch/alpha/include/asm/pal.h index 6fcd2b5b08f..5422a47646f 100644 --- a/arch/alpha/include/asm/pal.h +++ b/arch/alpha/include/asm/pal.h @@ -89,6 +89,7 @@ __CALL_PAL_W1(wrmces, unsigned long); __CALL_PAL_RW2(wrperfmon, unsigned long, unsigned long, unsigned long); __CALL_PAL_W1(wrusp, unsigned long); __CALL_PAL_W1(wrvptptr, unsigned long); +__CALL_PAL_RW1(wtint, unsigned long, unsigned long); /* * TB routines.. @@ -111,5 +112,75 @@ __CALL_PAL_W1(wrvptptr, unsigned long); #define tbiap() __tbi(-1, /* no second argument */) #define tbia() __tbi(-2, /* no second argument */) +/* + * QEMU Cserv routines.. + */ + +static inline unsigned long +qemu_get_walltime(void) +{ + register unsigned long v0 __asm__("$0"); + register unsigned long a0 __asm__("$16") = 3; + + asm("call_pal %2 # cserve get_time" + : "=r"(v0), "+r"(a0) + : "i"(PAL_cserve) + : "$17", "$18", "$19", "$20", "$21"); + + return v0; +} + +static inline unsigned long +qemu_get_alarm(void) +{ + register unsigned long v0 __asm__("$0"); + register unsigned long a0 __asm__("$16") = 4; + + asm("call_pal %2 # cserve get_alarm" + : "=r"(v0), "+r"(a0) + : "i"(PAL_cserve) + : "$17", "$18", "$19", "$20", "$21"); + + return v0; +} + +static inline void +qemu_set_alarm_rel(unsigned long expire) +{ + register unsigned long a0 __asm__("$16") = 5; + register unsigned long a1 __asm__("$17") = expire; + + asm volatile("call_pal %2 # cserve set_alarm_rel" + : "+r"(a0), "+r"(a1) + : "i"(PAL_cserve) + : "$0", "$18", "$19", "$20", "$21"); +} + +static inline void +qemu_set_alarm_abs(unsigned long expire) +{ + register unsigned long a0 __asm__("$16") = 6; + register unsigned long a1 __asm__("$17") = expire; + + asm volatile("call_pal %2 # cserve set_alarm_abs" + : "+r"(a0), "+r"(a1) + : "i"(PAL_cserve) + : "$0", "$18", "$19", "$20", "$21"); +} + +static inline unsigned long +qemu_get_vmtime(void) +{ + register unsigned long v0 __asm__("$0"); + register unsigned long a0 __asm__("$16") = 7; + + asm("call_pal %2 # cserve get_time" + : "=r"(v0), "+r"(a0) + : "i"(PAL_cserve) + : "$17", "$18", "$19", "$20", "$21"); + + return v0; +} + #endif /* !__ASSEMBLY__ */ #endif /* __ALPHA_PAL_H */ diff --git a/arch/alpha/include/asm/rtc.h b/arch/alpha/include/asm/rtc.h index d70408d3667..f71c3b0ed36 100644 --- a/arch/alpha/include/asm/rtc.h +++ b/arch/alpha/include/asm/rtc.h @@ -1,12 +1 @@ -#ifndef _ALPHA_RTC_H -#define _ALPHA_RTC_H - -#if defined(CONFIG_ALPHA_MARVEL) && defined(CONFIG_SMP) \ - || defined(CONFIG_ALPHA_GENERIC) -# define get_rtc_time alpha_mv.rtc_get_time -# define set_rtc_time alpha_mv.rtc_set_time -#endif - #include <asm-generic/rtc.h> - -#endif diff --git a/arch/alpha/include/asm/string.h b/arch/alpha/include/asm/string.h index b02b8a28294..c2911f59170 100644 --- a/arch/alpha/include/asm/string.h +++ b/arch/alpha/include/asm/string.h @@ -22,15 +22,27 @@ extern void * __memcpy(void *, const void *, size_t); #define __HAVE_ARCH_MEMSET extern void * __constant_c_memset(void *, unsigned long, size_t); +extern void * ___memset(void *, int, size_t); extern void * __memset(void *, int, size_t); extern void * memset(void *, int, size_t); -#define memset(s, c, n) \ -(__builtin_constant_p(c) \ - ? (__builtin_constant_p(n) && (c) == 0 \ - ? __builtin_memset((s),0,(n)) \ - : __constant_c_memset((s),0x0101010101010101UL*(unsigned char)(c),(n))) \ - : __memset((s),(c),(n))) +/* For gcc 3.x, we cannot have the inline function named "memset" because + the __builtin_memset will attempt to resolve to the inline as well, + leading to a "sorry" about unimplemented recursive inlining. */ +extern inline void *__memset(void *s, int c, size_t n) +{ + if (__builtin_constant_p(c)) { + if (__builtin_constant_p(n)) { + return __builtin_memset(s, c, n); + } else { + unsigned long c8 = (c & 0xff) * 0x0101010101010101UL; + return __constant_c_memset(s, c8, n); + } + } + return ___memset(s, c, n); +} + +#define memset __memset #define __HAVE_ARCH_STRCPY extern char * strcpy(char *,const char *); diff --git a/arch/alpha/include/asm/thread_info.h b/arch/alpha/include/asm/thread_info.h index 52cd2a4a3ff..453597b91f3 100644 --- a/arch/alpha/include/asm/thread_info.h +++ b/arch/alpha/include/asm/thread_info.h @@ -58,8 +58,6 @@ register struct thread_info *__current_thread_info __asm__("$8"); #define THREAD_SIZE_ORDER 1 #define THREAD_SIZE (2*PAGE_SIZE) -#define PREEMPT_ACTIVE 0x40000000 - /* * Thread information flags: * - these are process state flags and used from assembly diff --git a/arch/alpha/include/uapi/asm/pal.h b/arch/alpha/include/uapi/asm/pal.h index 3c0ce08e5f5..dfc8140b908 100644 --- a/arch/alpha/include/uapi/asm/pal.h +++ b/arch/alpha/include/uapi/asm/pal.h @@ -46,6 +46,7 @@ #define PAL_rdusp 58 #define PAL_whami 60 #define PAL_retsys 61 +#define PAL_wtint 62 #define PAL_rti 63 diff --git a/arch/alpha/kernel/Makefile b/arch/alpha/kernel/Makefile index 84ec46b38f7..0d54650e78f 100644 --- a/arch/alpha/kernel/Makefile +++ b/arch/alpha/kernel/Makefile @@ -16,6 +16,7 @@ obj-$(CONFIG_PCI) += pci.o pci_iommu.o pci-sysfs.o obj-$(CONFIG_SRM_ENV) += srm_env.o obj-$(CONFIG_MODULES) += module.o obj-$(CONFIG_PERF_EVENTS) += perf_event.o +obj-$(CONFIG_RTC_DRV_ALPHA) += rtc.o ifdef CONFIG_ALPHA_GENERIC diff --git a/arch/alpha/kernel/alpha_ksyms.c b/arch/alpha/kernel/alpha_ksyms.c index 89566b346c0..f4c7ab6f43b 100644 --- a/arch/alpha/kernel/alpha_ksyms.c +++ b/arch/alpha/kernel/alpha_ksyms.c @@ -40,6 +40,7 @@ EXPORT_SYMBOL(strrchr); EXPORT_SYMBOL(memmove); EXPORT_SYMBOL(__memcpy); EXPORT_SYMBOL(__memset); +EXPORT_SYMBOL(___memset); EXPORT_SYMBOL(__memsetw); EXPORT_SYMBOL(__constant_c_memset); EXPORT_SYMBOL(copy_page); diff --git a/arch/alpha/kernel/irq_alpha.c b/arch/alpha/kernel/irq_alpha.c index 28e4429596f..1c8625cb0e2 100644 --- a/arch/alpha/kernel/irq_alpha.c +++ b/arch/alpha/kernel/irq_alpha.c @@ -66,21 +66,7 @@ do_entInt(unsigned long type, unsigned long vector, break; case 1: old_regs = set_irq_regs(regs); -#ifdef CONFIG_SMP - { - long cpu; - - smp_percpu_timer_interrupt(regs); - cpu = smp_processor_id(); - if (cpu != boot_cpuid) { - kstat_incr_irqs_this_cpu(RTC_IRQ, irq_to_desc(RTC_IRQ)); - } else { - handle_irq(RTC_IRQ); - } - } -#else handle_irq(RTC_IRQ); -#endif set_irq_regs(old_regs); return; case 2: @@ -228,7 +214,7 @@ process_mcheck_info(unsigned long vector, unsigned long la_ptr, */ struct irqaction timer_irqaction = { - .handler = timer_interrupt, + .handler = rtc_timer_interrupt, .name = "timer", }; diff --git a/arch/alpha/kernel/machvec_impl.h b/arch/alpha/kernel/machvec_impl.h index 7fa62488bd1..f54bdf658cd 100644 --- a/arch/alpha/kernel/machvec_impl.h +++ b/arch/alpha/kernel/machvec_impl.h @@ -43,10 +43,7 @@ #define CAT1(x,y) x##y #define CAT(x,y) CAT1(x,y) -#define DO_DEFAULT_RTC \ - .rtc_port = 0x70, \ - .rtc_get_time = common_get_rtc_time, \ - .rtc_set_time = common_set_rtc_time +#define DO_DEFAULT_RTC .rtc_port = 0x70 #define DO_EV4_MMU \ .max_asn = EV4_MAX_ASN, \ diff --git a/arch/alpha/kernel/perf_event.c b/arch/alpha/kernel/perf_event.c index d821b17047e..c52e7f0ee5f 100644 --- a/arch/alpha/kernel/perf_event.c +++ b/arch/alpha/kernel/perf_event.c @@ -83,6 +83,8 @@ struct alpha_pmu_t { long pmc_left[3]; /* Subroutine for allocation of PMCs. Enforces constraints. */ int (*check_constraints)(struct perf_event **, unsigned long *, int); + /* Subroutine for checking validity of a raw event for this PMU. */ + int (*raw_event_valid)(u64 config); }; /* @@ -203,6 +205,12 @@ success: } +static int ev67_raw_event_valid(u64 config) +{ + return config >= EV67_CYCLES && config < EV67_LAST_ET; +}; + + static const struct alpha_pmu_t ev67_pmu = { .event_map = ev67_perfmon_event_map, .max_events = ARRAY_SIZE(ev67_perfmon_event_map), @@ -211,7 +219,8 @@ static const struct alpha_pmu_t ev67_pmu = { .pmc_count_mask = {EV67_PCTR_0_COUNT_MASK, EV67_PCTR_1_COUNT_MASK, 0}, .pmc_max_period = {(1UL<<20) - 1, (1UL<<20) - 1, 0}, .pmc_left = {16, 4, 0}, - .check_constraints = ev67_check_constraints + .check_constraints = ev67_check_constraints, + .raw_event_valid = ev67_raw_event_valid, }; @@ -609,7 +618,9 @@ static int __hw_perf_event_init(struct perf_event *event) } else if (attr->type == PERF_TYPE_HW_CACHE) { return -EOPNOTSUPP; } else if (attr->type == PERF_TYPE_RAW) { - ev = attr->config & 0xff; + if (!alpha_pmu->raw_event_valid(attr->config)) + return -EINVAL; + ev = attr->config; } else { return -EOPNOTSUPP; } diff --git a/arch/alpha/kernel/process.c b/arch/alpha/kernel/process.c index f2360a74e5d..1941a07b581 100644 --- a/arch/alpha/kernel/process.c +++ b/arch/alpha/kernel/process.c @@ -46,6 +46,23 @@ void (*pm_power_off)(void) = machine_power_off; EXPORT_SYMBOL(pm_power_off); +#ifdef CONFIG_ALPHA_WTINT +/* + * Sleep the CPU. + * EV6, LCA45 and QEMU know how to power down, skipping N timer interrupts. + */ +void arch_cpu_idle(void) +{ + wtint(0); + local_irq_enable(); +} + +void arch_cpu_idle_dead(void) +{ + wtint(INT_MAX); +} +#endif /* ALPHA_WTINT */ + struct halt_info { int mode; char *restart_cmd; diff --git a/arch/alpha/kernel/proto.h b/arch/alpha/kernel/proto.h index d3e52d3fd59..da2d6ec9c37 100644 --- a/arch/alpha/kernel/proto.h +++ b/arch/alpha/kernel/proto.h @@ -135,17 +135,15 @@ extern void unregister_srm_console(void); /* smp.c */ extern void setup_smp(void); extern void handle_ipi(struct pt_regs *); -extern void smp_percpu_timer_interrupt(struct pt_regs *); /* bios32.c */ /* extern void reset_for_srm(void); */ /* time.c */ -extern irqreturn_t timer_interrupt(int irq, void *dev); +extern irqreturn_t rtc_timer_interrupt(int irq, void *dev); +extern void init_clockevent(void); extern void common_init_rtc(void); extern unsigned long est_cycle_freq; -extern unsigned int common_get_rtc_time(struct rtc_time *time); -extern int common_set_rtc_time(struct rtc_time *time); /* smc37c93x.c */ extern void SMC93x_Init(void); diff --git a/arch/alpha/kernel/rtc.c b/arch/alpha/kernel/rtc.c new file mode 100644 index 00000000000..c8d284d8521 --- /dev/null +++ b/arch/alpha/kernel/rtc.c @@ -0,0 +1,323 @@ +/* + * linux/arch/alpha/kernel/rtc.c + * + * Copyright (C) 1991, 1992, 1995, 1999, 2000 Linus Torvalds + * + * This file contains date handling. + */ +#include <linux/errno.h> +#include <linux/init.h> +#include <linux/kernel.h> +#include <linux/param.h> +#include <linux/string.h> +#include <linux/mc146818rtc.h> +#include <linux/bcd.h> +#include <linux/rtc.h> +#include <linux/platform_device.h> + +#include <asm/rtc.h> + +#include "proto.h" + + +/* + * Support for the RTC device. + * + * We don't want to use the rtc-cmos driver, because we don't want to support + * alarms, as that would be indistinguishable from timer interrupts. + * + * Further, generic code is really, really tied to a 1900 epoch. This is + * true in __get_rtc_time as well as the users of struct rtc_time e.g. + * rtc_tm_to_time. Thankfully all of the other epochs in use are later + * than 1900, and so it's easy to adjust. + */ + +static unsigned long rtc_epoch; + +static int __init +specifiy_epoch(char *str) +{ + unsigned long epoch = simple_strtoul(str, NULL, 0); + if (epoch < 1900) + printk("Ignoring invalid user specified epoch %lu\n", epoch); + else + rtc_epoch = epoch; + return 1; +} +__setup("epoch=", specifiy_epoch); + +static void __init +init_rtc_epoch(void) +{ + int epoch, year, ctrl; + + if (rtc_epoch != 0) { + /* The epoch was specified on the command-line. */ + return; + } + + /* Detect the epoch in use on this computer. */ + ctrl = CMOS_READ(RTC_CONTROL); + year = CMOS_READ(RTC_YEAR); + if (!(ctrl & RTC_DM_BINARY) || RTC_ALWAYS_BCD) + year = bcd2bin(year); + + /* PC-like is standard; used for year >= 70 */ + epoch = 1900; + if (year < 20) { + epoch = 2000; + } else if (year >= 20 && year < 48) { + /* NT epoch */ + epoch = 1980; + } else if (year >= 48 && year < 70) { + /* Digital UNIX epoch */ + epoch = 1952; + } + rtc_epoch = epoch; + + printk(KERN_INFO "Using epoch %d for rtc year %d\n", epoch, year); +} + +static int +alpha_rtc_read_time(struct device *dev, struct rtc_time *tm) +{ + __get_rtc_time(tm); + + /* Adjust for non-default epochs. It's easier to depend on the + generic __get_rtc_time and adjust the epoch here than create + a copy of __get_rtc_time with the edits we need. */ + if (rtc_epoch != 1900) { + int year = tm->tm_year; + /* Undo the century adjustment made in __get_rtc_time. */ + if (year >= 100) + year -= 100; + year += rtc_epoch - 1900; + /* Redo the century adjustment with the epoch in place. */ + if (year <= 69) + year += 100; + tm->tm_year = year; + } + + return rtc_valid_tm(tm); +} + +static int +alpha_rtc_set_time(struct device *dev, struct rtc_time *tm) +{ + struct rtc_time xtm; + + if (rtc_epoch != 1900) { + xtm = *tm; + xtm.tm_year -= rtc_epoch - 1900; + tm = &xtm; + } + + return __set_rtc_time(tm); +} + +static int +alpha_rtc_set_mmss(struct device *dev, unsigned long nowtime) +{ + int retval = 0; + int real_seconds, real_minutes, cmos_minutes; + unsigned char save_control, save_freq_select; + + /* Note: This code only updates minutes and seconds. Comments + indicate this was to avoid messing with unknown time zones, + and with the epoch nonsense described above. In order for + this to work, the existing clock cannot be off by more than + 15 minutes. + + ??? This choice is may be out of date. The x86 port does + not have problems with timezones, and the epoch processing has + now been fixed in alpha_set_rtc_time. + + In either case, one can always force a full rtc update with + the userland hwclock program, so surely 15 minute accuracy + is no real burden. */ + + /* In order to set the CMOS clock precisely, we have to be called + 500 ms after the second nowtime has started, because when + nowtime is written into the registers of the CMOS clock, it will + jump to the next second precisely 500 ms later. Check the Motorola + MC146818A or Dallas DS12887 data sheet for details. */ + + /* irq are locally disabled here */ + spin_lock(&rtc_lock); + /* Tell the clock it's being set */ + save_control = CMOS_READ(RTC_CONTROL); + CMOS_WRITE((save_control|RTC_SET), RTC_CONTROL); + + /* Stop and reset prescaler */ + save_freq_select = CMOS_READ(RTC_FREQ_SELECT); + CMOS_WRITE((save_freq_select|RTC_DIV_RESET2), RTC_FREQ_SELECT); + + cmos_minutes = CMOS_READ(RTC_MINUTES); + if (!(save_control & RTC_DM_BINARY) || RTC_ALWAYS_BCD) + cmos_minutes = bcd2bin(cmos_minutes); + + real_seconds = nowtime % 60; + real_minutes = nowtime / 60; + if (((abs(real_minutes - cmos_minutes) + 15) / 30) & 1) { + /* correct for half hour time zone */ + real_minutes += 30; + } + real_minutes %= 60; + + if (abs(real_minutes - cmos_minutes) < 30) { + if (!(save_control & RTC_DM_BINARY) || RTC_ALWAYS_BCD) { + real_seconds = bin2bcd(real_seconds); + real_minutes = bin2bcd(real_minutes); + } + CMOS_WRITE(real_seconds,RTC_SECONDS); + CMOS_WRITE(real_minutes,RTC_MINUTES); + } else { + printk_once(KERN_NOTICE + "set_rtc_mmss: can't update from %d to %d\n", + cmos_minutes, real_minutes); + retval = -1; + } + + /* The following flags have to be released exactly in this order, + * otherwise the DS12887 (popular MC146818A clone with integrated + * battery and quartz) will not reset the oscillator and will not + * update precisely 500 ms later. You won't find this mentioned in + * the Dallas Semiconductor data sheets, but who believes data + * sheets anyway ... -- Markus Kuhn + */ + CMOS_WRITE(save_control, RTC_CONTROL); + CMOS_WRITE(save_freq_select, RTC_FREQ_SELECT); + spin_unlock(&rtc_lock); + + return retval; +} + +static int +alpha_rtc_ioctl(struct device *dev, unsigned int cmd, unsigned long arg) +{ + switch (cmd) { + case RTC_EPOCH_READ: + return put_user(rtc_epoch, (unsigned long __user *)arg); + case RTC_EPOCH_SET: + if (arg < 1900) + return -EINVAL; + rtc_epoch = arg; + return 0; + default: + return -ENOIOCTLCMD; + } +} + +static const struct rtc_class_ops alpha_rtc_ops = { + .read_time = alpha_rtc_read_time, + .set_time = alpha_rtc_set_time, + .set_mmss = alpha_rtc_set_mmss, + .ioctl = alpha_rtc_ioctl, +}; + +/* + * Similarly, except do the actual CMOS access on the boot cpu only. + * This requires marshalling the data across an interprocessor call. + */ + +#if defined(CONFIG_SMP) && \ + (defined(CONFIG_ALPHA_GENERIC) || defined(CONFIG_ALPHA_MARVEL)) +# define HAVE_REMOTE_RTC 1 + +union remote_data { + struct rtc_time *tm; + unsigned long now; + long retval; +}; + +static void +do_remote_read(void *data) +{ + union remote_data *x = data; + x->retval = alpha_rtc_read_time(NULL, x->tm); +} + +static int +remote_read_time(struct device *dev, struct rtc_time *tm) +{ + union remote_data x; + if (smp_processor_id() != boot_cpuid) { + x.tm = tm; + smp_call_function_single(boot_cpuid, do_remote_read, &x, 1); + return x.retval; + } + return alpha_rtc_read_time(NULL, tm); +} + +static void +do_remote_set(void *data) +{ + union remote_data *x = data; + x->retval = alpha_rtc_set_time(NULL, x->tm); +} + +static int +remote_set_time(struct device *dev, struct rtc_time *tm) +{ + union remote_data x; + if (smp_processor_id() != boot_cpuid) { + x.tm = tm; + smp_call_function_single(boot_cpuid, do_remote_set, &x, 1); + return x.retval; + } + return alpha_rtc_set_time(NULL, tm); +} + +static void +do_remote_mmss(void *data) +{ + union remote_data *x = data; + x->retval = alpha_rtc_set_mmss(NULL, x->now); +} + +static int +remote_set_mmss(struct device *dev, unsigned long now) +{ + union remote_data x; + if (smp_processor_id() != boot_cpuid) { + x.now = now; + smp_call_function_single(boot_cpuid, do_remote_mmss, &x, 1); + return x.retval; + } + return alpha_rtc_set_mmss(NULL, now); +} + +static const struct rtc_class_ops remote_rtc_ops = { + .read_time = remote_read_time, + .set_time = remote_set_time, + .set_mmss = remote_set_mmss, + .ioctl = alpha_rtc_ioctl, +}; +#endif + +static int __init +alpha_rtc_init(void) +{ + const struct rtc_class_ops *ops; + struct platform_device *pdev; + struct rtc_device *rtc; + const char *name; + + init_rtc_epoch(); + name = "rtc-alpha"; + ops = &alpha_rtc_ops; + +#ifdef HAVE_REMOTE_RTC + if (alpha_mv.rtc_boot_cpu_only) + ops = &remote_rtc_ops; +#endif + + pdev = platform_device_register_simple(name, -1, NULL, 0); + rtc = devm_rtc_device_register(&pdev->dev, name, ops, THIS_MODULE); + if (IS_ERR(rtc)) + return PTR_ERR(rtc); + + platform_set_drvdata(pdev, rtc); + return 0; +} +device_initcall(alpha_rtc_init); diff --git a/arch/alpha/kernel/setup.c b/arch/alpha/kernel/setup.c index 9e3107cc5eb..b20af76f12c 100644 --- a/arch/alpha/kernel/setup.c +++ b/arch/alpha/kernel/setup.c @@ -115,10 +115,17 @@ unsigned long alpha_agpgart_size = DEFAULT_AGP_APER_SIZE; #ifdef CONFIG_ALPHA_GENERIC struct alpha_machine_vector alpha_mv; +#endif + +#ifndef alpha_using_srm int alpha_using_srm; EXPORT_SYMBOL(alpha_using_srm); #endif +#ifndef alpha_using_qemu +int alpha_using_qemu; +#endif + static struct alpha_machine_vector *get_sysvec(unsigned long, unsigned long, unsigned long); static struct alpha_machine_vector *get_sysvec_byname(const char *); @@ -529,11 +536,15 @@ setup_arch(char **cmdline_p) atomic_notifier_chain_register(&panic_notifier_list, &alpha_panic_block); -#ifdef CONFIG_ALPHA_GENERIC +#ifndef alpha_using_srm /* Assume that we've booted from SRM if we haven't booted from MILO. Detect the later by looking for "MILO" in the system serial nr. */ alpha_using_srm = strncmp((const char *)hwrpb->ssn, "MILO", 4) != 0; #endif +#ifndef alpha_using_qemu + /* Similarly, look for QEMU. */ + alpha_using_qemu = strstr((const char *)hwrpb->ssn, "QEMU") != 0; +#endif /* If we are using SRM, we want to allow callbacks as early as possible, so do this NOW, and then @@ -1207,6 +1218,7 @@ show_cpuinfo(struct seq_file *f, void *slot) char *systype_name; char *sysvariation_name; int nr_processors; + unsigned long timer_freq; cpu_index = (unsigned) (cpu->type - 1); cpu_name = "Unknown"; @@ -1218,6 +1230,12 @@ show_cpuinfo(struct seq_file *f, void *slot) nr_processors = get_nr_processors(cpu, hwrpb->nr_processors); +#if CONFIG_HZ == 1024 || CONFIG_HZ == 1200 + timer_freq = (100UL * hwrpb->intr_freq) / 4096; +#else + timer_freq = 100UL * CONFIG_HZ; +#endif + seq_printf(f, "cpu\t\t\t: Alpha\n" "cpu model\t\t: %s\n" "cpu variation\t\t: %ld\n" @@ -1243,8 +1261,7 @@ show_cpuinfo(struct seq_file *f, void *slot) (char*)hwrpb->ssn, est_cycle_freq ? : hwrpb->cycle_freq, est_cycle_freq ? "est." : "", - hwrpb->intr_freq / 4096, - (100 * hwrpb->intr_freq / 4096) % 100, + timer_freq / 100, timer_freq % 100, hwrpb->pagesize, hwrpb->pa_bits, hwrpb->max_asn, diff --git a/arch/alpha/kernel/smp.c b/arch/alpha/kernel/smp.c index 9dbbcb3b914..99ac36d5de4 100644 --- a/arch/alpha/kernel/smp.c +++ b/arch/alpha/kernel/smp.c @@ -138,9 +138,11 @@ smp_callin(void) /* Get our local ticker going. */ smp_setup_percpu_timer(cpuid); + init_clockevent(); /* Call platform-specific callin, if specified */ - if (alpha_mv.smp_callin) alpha_mv.smp_callin(); + if (alpha_mv.smp_callin) + alpha_mv.smp_callin(); /* All kernel threads share the same mm context. */ atomic_inc(&init_mm.mm_count); @@ -498,35 +500,6 @@ smp_cpus_done(unsigned int max_cpus) ((bogosum + 2500) / (5000/HZ)) % 100); } - -void -smp_percpu_timer_interrupt(struct pt_regs *regs) -{ - struct pt_regs *old_regs; - int cpu = smp_processor_id(); - unsigned long user = user_mode(regs); - struct cpuinfo_alpha *data = &cpu_data[cpu]; - - old_regs = set_irq_regs(regs); - - /* Record kernel PC. */ - profile_tick(CPU_PROFILING); - - if (!--data->prof_counter) { - /* We need to make like a normal interrupt -- otherwise - timer interrupts ignore the global interrupt lock, - which would be a Bad Thing. */ - irq_enter(); - - update_process_times(user); - - data->prof_counter = data->prof_multiplier; - - irq_exit(); - } - set_irq_regs(old_regs); -} - int setup_profiling_timer(unsigned int multiplier) { diff --git a/arch/alpha/kernel/sys_jensen.c b/arch/alpha/kernel/sys_jensen.c index 5a0af11b3a6..608f2a7fa0a 100644 --- a/arch/alpha/kernel/sys_jensen.c +++ b/arch/alpha/kernel/sys_jensen.c @@ -224,8 +224,6 @@ struct alpha_machine_vector jensen_mv __initmv = { .machine_check = jensen_machine_check, .max_isa_dma_address = ALPHA_MAX_ISA_DMA_ADDRESS, .rtc_port = 0x170, - .rtc_get_time = common_get_rtc_time, - .rtc_set_time = common_set_rtc_time, .nr_irqs = 16, .device_interrupt = jensen_device_interrupt, diff --git a/arch/alpha/kernel/sys_marvel.c b/arch/alpha/kernel/sys_marvel.c index c92e389ff21..f21d61fab67 100644 --- a/arch/alpha/kernel/sys_marvel.c +++ b/arch/alpha/kernel/sys_marvel.c @@ -22,7 +22,6 @@ #include <asm/hwrpb.h> #include <asm/tlbflush.h> #include <asm/vga.h> -#include <asm/rtc.h> #include "proto.h" #include "err_impl.h" @@ -400,57 +399,6 @@ marvel_init_rtc(void) init_rtc_irq(); } -struct marvel_rtc_time { - struct rtc_time *time; - int retval; -}; - -#ifdef CONFIG_SMP -static void -smp_get_rtc_time(void *data) -{ - struct marvel_rtc_time *mrt = data; - mrt->retval = __get_rtc_time(mrt->time); -} - -static void -smp_set_rtc_time(void *data) -{ - struct marvel_rtc_time *mrt = data; - mrt->retval = __set_rtc_time(mrt->time); -} -#endif - -static unsigned int -marvel_get_rtc_time(struct rtc_time *time) -{ -#ifdef CONFIG_SMP - struct marvel_rtc_time mrt; - - if (smp_processor_id() != boot_cpuid) { - mrt.time = time; - smp_call_function_single(boot_cpuid, smp_get_rtc_time, &mrt, 1); - return mrt.retval; - } -#endif - return __get_rtc_time(time); -} - -static int -marvel_set_rtc_time(struct rtc_time *time) -{ -#ifdef CONFIG_SMP - struct marvel_rtc_time mrt; - - if (smp_processor_id() != boot_cpuid) { - mrt.time = time; - smp_call_function_single(boot_cpuid, smp_set_rtc_time, &mrt, 1); - return mrt.retval; - } -#endif - return __set_rtc_time(time); -} - static void marvel_smp_callin(void) { @@ -492,8 +440,7 @@ struct alpha_machine_vector marvel_ev7_mv __initmv = { .vector_name = "MARVEL/EV7", DO_EV7_MMU, .rtc_port = 0x70, - .rtc_get_time = marvel_get_rtc_time, - .rtc_set_time = marvel_set_rtc_time, + .rtc_boot_cpu_only = 1, DO_MARVEL_IO, .machine_check = marvel_machine_check, .max_isa_dma_address = ALPHA_MAX_ISA_DMA_ADDRESS, diff --git a/arch/alpha/kernel/time.c b/arch/alpha/kernel/time.c index ea339503655..ee39cee8064 100644 --- a/arch/alpha/kernel/time.c +++ b/arch/alpha/kernel/time.c @@ -3,13 +3,7 @@ * * Copyright (C) 1991, 1992, 1995, 1999, 2000 Linus Torvalds * - * This file contains the PC-specific time handling details: - * reading the RTC at bootup, etc.. - * 1994-07-02 Alan Modra - * fixed set_rtc_mmss, fixed time.year for >= 2000, new mktime - * 1995-03-26 Markus Kuhn - * fixed 500 ms bug at call to set_rtc_mmss, fixed DS12887 - * precision CMOS clock update + * This file contains the clocksource time handling. * 1997-09-10 Updated NTP code according to technical memorandum Jan '96 * "A Kernel Model for Precision Timekeeping" by Dave Mills * 1997-01-09 Adrian Sun @@ -21,9 +15,6 @@ * 1999-04-16 Thorsten Kranzkowski (dl8bcu@gmx.net) * fixed algorithm in do_gettimeofday() for calculating the precise time * from processor cycle counter (now taking lost_ticks into account) - * 2000-08-13 Jan-Benedict Glaw <jbglaw@lug-owl.de> - * Fixed time_init to be aware of epoches != 1900. This prevents - * booting up in 2048 for me;) Code is stolen from rtc.c. * 2003-06-03 R. Scott Bailey <scott.bailey@eds.com> * Tighten sanity in time_init from 1% (10,000 PPM) to 250 PPM */ @@ -46,40 +37,19 @@ #include <asm/uaccess.h> #include <asm/io.h> #include <asm/hwrpb.h> -#include <asm/rtc.h> #include <linux/mc146818rtc.h> #include <linux/time.h> #include <linux/timex.h> #include <linux/clocksource.h> +#include <linux/clockchips.h> #include "proto.h" #include "irq_impl.h" -static int set_rtc_mmss(unsigned long); - DEFINE_SPINLOCK(rtc_lock); EXPORT_SYMBOL(rtc_lock); -#define TICK_SIZE (tick_nsec / 1000) - -/* - * Shift amount by which scaled_ticks_per_cycle is scaled. Shifting - * by 48 gives us 16 bits for HZ while keeping the accuracy good even - * for large CPU clock rates. - */ -#define FIX_SHIFT 48 - -/* lump static variables together for more efficient access: */ -static struct { - /* cycle counter last time it got invoked */ - __u32 last_time; - /* ticks/cycle * 2^48 */ - unsigned long scaled_ticks_per_cycle; - /* partial unused tick */ - unsigned long partial_tick; -} state; - unsigned long est_cycle_freq; #ifdef CONFIG_IRQ_WORK @@ -108,109 +78,156 @@ static inline __u32 rpcc(void) return __builtin_alpha_rpcc(); } -int update_persistent_clock(struct timespec now) -{ - return set_rtc_mmss(now.tv_sec); -} -void read_persistent_clock(struct timespec *ts) + +/* + * The RTC as a clock_event_device primitive. + */ + +static DEFINE_PER_CPU(struct clock_event_device, cpu_ce); + +irqreturn_t +rtc_timer_interrupt(int irq, void *dev) { - unsigned int year, mon, day, hour, min, sec, epoch; - - sec = CMOS_READ(RTC_SECONDS); - min = CMOS_READ(RTC_MINUTES); - hour = CMOS_READ(RTC_HOURS); - day = CMOS_READ(RTC_DAY_OF_MONTH); - mon = CMOS_READ(RTC_MONTH); - year = CMOS_READ(RTC_YEAR); - - if (!(CMOS_READ(RTC_CONTROL) & RTC_DM_BINARY) || RTC_ALWAYS_BCD) { - sec = bcd2bin(sec); - min = bcd2bin(min); - hour = bcd2bin(hour); - day = bcd2bin(day); - mon = bcd2bin(mon); - year = bcd2bin(year); - } + int cpu = smp_processor_id(); + struct clock_event_device *ce = &per_cpu(cpu_ce, cpu); - /* PC-like is standard; used for year >= 70 */ - epoch = 1900; - if (year < 20) - epoch = 2000; - else if (year >= 20 && year < 48) - /* NT epoch */ - epoch = 1980; - else if (year >= 48 && year < 70) - /* Digital UNIX epoch */ - epoch = 1952; + /* Don't run the hook for UNUSED or SHUTDOWN. */ + if (likely(ce->mode == CLOCK_EVT_MODE_PERIODIC)) + ce->event_handler(ce); - printk(KERN_INFO "Using epoch = %d\n", epoch); + if (test_irq_work_pending()) { + clear_irq_work_pending(); + irq_work_run(); + } - if ((year += epoch) < 1970) - year += 100; + return IRQ_HANDLED; +} - ts->tv_sec = mktime(year, mon, day, hour, min, sec); - ts->tv_nsec = 0; +static void +rtc_ce_set_mode(enum clock_event_mode mode, struct clock_event_device *ce) +{ + /* The mode member of CE is updated in generic code. + Since we only support periodic events, nothing to do. */ +} + +static int +rtc_ce_set_next_event(unsigned long evt, struct clock_event_device *ce) +{ + /* This hook is for oneshot mode, which we don't support. */ + return -EINVAL; } +static void __init +init_rtc_clockevent(void) +{ + int cpu = smp_processor_id(); + struct clock_event_device *ce = &per_cpu(cpu_ce, cpu); + + *ce = (struct clock_event_device){ + .name = "rtc", + .features = CLOCK_EVT_FEAT_PERIODIC, + .rating = 100, + .cpumask = cpumask_of(cpu), + .set_mode = rtc_ce_set_mode, + .set_next_event = rtc_ce_set_next_event, + }; + clockevents_config_and_register(ce, CONFIG_HZ, 0, 0); +} + /* - * timer_interrupt() needs to keep up the real-time clock, - * as well as call the "xtime_update()" routine every clocktick + * The QEMU clock as a clocksource primitive. */ -irqreturn_t timer_interrupt(int irq, void *dev) + +static cycle_t +qemu_cs_read(struct clocksource *cs) { - unsigned long delta; - __u32 now; - long nticks; + return qemu_get_vmtime(); +} -#ifndef CONFIG_SMP - /* Not SMP, do kernel PC profiling here. */ - profile_tick(CPU_PROFILING); -#endif +static struct clocksource qemu_cs = { + .name = "qemu", + .rating = 400, + .read = qemu_cs_read, + .mask = CLOCKSOURCE_MASK(64), + .flags = CLOCK_SOURCE_IS_CONTINUOUS, + .max_idle_ns = LONG_MAX +}; - /* - * Calculate how many ticks have passed since the last update, - * including any previous partial leftover. Save any resulting - * fraction for the next pass. - */ - now = rpcc(); - delta = now - state.last_time; - state.last_time = now; - delta = delta * state.scaled_ticks_per_cycle + state.partial_tick; - state.partial_tick = delta & ((1UL << FIX_SHIFT) - 1); - nticks = delta >> FIX_SHIFT; - if (nticks) - xtime_update(nticks); +/* + * The QEMU alarm as a clock_event_device primitive. + */ - if (test_irq_work_pending()) { - clear_irq_work_pending(); - irq_work_run(); - } +static void +qemu_ce_set_mode(enum clock_event_mode mode, struct clock_event_device *ce) +{ + /* The mode member of CE is updated for us in generic code. + Just make sure that the event is disabled. */ + qemu_set_alarm_abs(0); +} -#ifndef CONFIG_SMP - while (nticks--) - update_process_times(user_mode(get_irq_regs())); -#endif +static int +qemu_ce_set_next_event(unsigned long evt, struct clock_event_device *ce) +{ + qemu_set_alarm_rel(evt); + return 0; +} +static irqreturn_t +qemu_timer_interrupt(int irq, void *dev) +{ + int cpu = smp_processor_id(); + struct clock_event_device *ce = &per_cpu(cpu_ce, cpu); + + ce->event_handler(ce); return IRQ_HANDLED; } +static void __init +init_qemu_clockevent(void) +{ + int cpu = smp_processor_id(); + struct clock_event_device *ce = &per_cpu(cpu_ce, cpu); + + *ce = (struct clock_event_device){ + .name = "qemu", + .features = CLOCK_EVT_FEAT_ONESHOT, + .rating = 400, + .cpumask = cpumask_of(cpu), + .set_mode = qemu_ce_set_mode, + .set_next_event = qemu_ce_set_next_event, + }; + + clockevents_config_and_register(ce, NSEC_PER_SEC, 1000, LONG_MAX); +} + + void __init common_init_rtc(void) { - unsigned char x; + unsigned char x, sel = 0; /* Reset periodic interrupt frequency. */ - x = CMOS_READ(RTC_FREQ_SELECT) & 0x3f; - /* Test includes known working values on various platforms - where 0x26 is wrong; we refuse to change those. */ - if (x != 0x26 && x != 0x25 && x != 0x19 && x != 0x06) { - printk("Setting RTC_FREQ to 1024 Hz (%x)\n", x); - CMOS_WRITE(0x26, RTC_FREQ_SELECT); +#if CONFIG_HZ == 1024 || CONFIG_HZ == 1200 + x = CMOS_READ(RTC_FREQ_SELECT) & 0x3f; + /* Test includes known working values on various platforms + where 0x26 is wrong; we refuse to change those. */ + if (x != 0x26 && x != 0x25 && x != 0x19 && x != 0x06) { + sel = RTC_REF_CLCK_32KHZ + 6; } +#elif CONFIG_HZ == 256 || CONFIG_HZ == 128 || CONFIG_HZ == 64 || CONFIG_HZ == 32 + sel = RTC_REF_CLCK_32KHZ + __builtin_ffs(32768 / CONFIG_HZ); +#else +# error "Unknown HZ from arch/alpha/Kconfig" +#endif + if (sel) { + printk(KERN_INFO "Setting RTC_FREQ to %d Hz (%x)\n", + CONFIG_HZ, sel); + CMOS_WRITE(sel, RTC_FREQ_SELECT); + } /* Turn on periodic interrupts. */ x = CMOS_READ(RTC_CONTROL); @@ -233,16 +250,37 @@ common_init_rtc(void) init_rtc_irq(); } -unsigned int common_get_rtc_time(struct rtc_time *time) -{ - return __get_rtc_time(time); -} + +#ifndef CONFIG_ALPHA_WTINT +/* + * The RPCC as a clocksource primitive. + * + * While we have free-running timecounters running on all CPUs, and we make + * a half-hearted attempt in init_rtc_rpcc_info to sync the timecounter + * with the wall clock, that initialization isn't kept up-to-date across + * different time counters in SMP mode. Therefore we can only use this + * method when there's only one CPU enabled. + * + * When using the WTINT PALcall, the RPCC may shift to a lower frequency, + * or stop altogether, while waiting for the interrupt. Therefore we cannot + * use this method when WTINT is in use. + */ -int common_set_rtc_time(struct rtc_time *time) +static cycle_t read_rpcc(struct clocksource *cs) { - return __set_rtc_time(time); + return rpcc(); } +static struct clocksource clocksource_rpcc = { + .name = "rpcc", + .rating = 300, + .read = read_rpcc, + .mask = CLOCKSOURCE_MASK(32), + .flags = CLOCK_SOURCE_IS_CONTINUOUS +}; +#endif /* ALPHA_WTINT */ + + /* Validate a computed cycle counter result against the known bounds for the given processor core. There's too much brokenness in the way of timing hardware for any one method to work everywhere. :-( @@ -353,33 +391,6 @@ rpcc_after_update_in_progress(void) return rpcc(); } -#ifndef CONFIG_SMP -/* Until and unless we figure out how to get cpu cycle counters - in sync and keep them there, we can't use the rpcc. */ -static cycle_t read_rpcc(struct clocksource *cs) -{ - cycle_t ret = (cycle_t)rpcc(); - return ret; -} - -static struct clocksource clocksource_rpcc = { - .name = "rpcc", - .rating = 300, - .read = read_rpcc, - .mask = CLOCKSOURCE_MASK(32), - .flags = CLOCK_SOURCE_IS_CONTINUOUS -}; - -static inline void register_rpcc_clocksource(long cycle_freq) -{ - clocksource_register_hz(&clocksource_rpcc, cycle_freq); -} -#else /* !CONFIG_SMP */ -static inline void register_rpcc_clocksource(long cycle_freq) -{ -} -#endif /* !CONFIG_SMP */ - void __init time_init(void) { @@ -387,6 +398,15 @@ time_init(void) unsigned long cycle_freq, tolerance; long diff; + if (alpha_using_qemu) { + clocksource_register_hz(&qemu_cs, NSEC_PER_SEC); + init_qemu_clockevent(); + + timer_irqaction.handler = qemu_timer_interrupt; + init_rtc_irq(); + return; + } + /* Calibrate CPU clock -- attempt #1. */ if (!est_cycle_freq) est_cycle_freq = validate_cc_value(calibrate_cc_with_pit()); @@ -421,100 +441,25 @@ time_init(void) "and unable to estimate a proper value!\n"); } - /* From John Bowman <bowman@math.ualberta.ca>: allow the values - to settle, as the Update-In-Progress bit going low isn't good - enough on some hardware. 2ms is our guess; we haven't found - bogomips yet, but this is close on a 500Mhz box. */ - __delay(1000000); - - - if (HZ > (1<<16)) { - extern void __you_loose (void); - __you_loose(); - } - - register_rpcc_clocksource(cycle_freq); - - state.last_time = cc1; - state.scaled_ticks_per_cycle - = ((unsigned long) HZ << FIX_SHIFT) / cycle_freq; - state.partial_tick = 0L; + /* See above for restrictions on using clocksource_rpcc. */ +#ifndef CONFIG_ALPHA_WTINT + if (hwrpb->nr_processors == 1) + clocksource_register_hz(&clocksource_rpcc, cycle_freq); +#endif /* Startup the timer source. */ alpha_mv.init_rtc(); + init_rtc_clockevent(); } -/* - * In order to set the CMOS clock precisely, set_rtc_mmss has to be - * called 500 ms after the second nowtime has started, because when - * nowtime is written into the registers of the CMOS clock, it will - * jump to the next second precisely 500 ms later. Check the Motorola - * MC146818A or Dallas DS12887 data sheet for details. - * - * BUG: This routine does not handle hour overflow properly; it just - * sets the minutes. Usually you won't notice until after reboot! - */ - - -static int -set_rtc_mmss(unsigned long nowtime) +/* Initialize the clock_event_device for secondary cpus. */ +#ifdef CONFIG_SMP +void __init +init_clockevent(void) { - int retval = 0; - int real_seconds, real_minutes, cmos_minutes; - unsigned char save_control, save_freq_select; - - /* irq are locally disabled here */ - spin_lock(&rtc_lock); - /* Tell the clock it's being set */ - save_control = CMOS_READ(RTC_CONTROL); - CMOS_WRITE((save_control|RTC_SET), RTC_CONTROL); - - /* Stop and reset prescaler */ - save_freq_select = CMOS_READ(RTC_FREQ_SELECT); - CMOS_WRITE((save_freq_select|RTC_DIV_RESET2), RTC_FREQ_SELECT); - - cmos_minutes = CMOS_READ(RTC_MINUTES); - if (!(save_control & RTC_DM_BINARY) || RTC_ALWAYS_BCD) - cmos_minutes = bcd2bin(cmos_minutes); - - /* - * since we're only adjusting minutes and seconds, - * don't interfere with hour overflow. This avoids - * messing with unknown time zones but requires your - * RTC not to be off by more than 15 minutes - */ - real_seconds = nowtime % 60; - real_minutes = nowtime / 60; - if (((abs(real_minutes - cmos_minutes) + 15)/30) & 1) { - /* correct for half hour time zone */ - real_minutes += 30; - } - real_minutes %= 60; - - if (abs(real_minutes - cmos_minutes) < 30) { - if (!(save_control & RTC_DM_BINARY) || RTC_ALWAYS_BCD) { - real_seconds = bin2bcd(real_seconds); - real_minutes = bin2bcd(real_minutes); - } - CMOS_WRITE(real_seconds,RTC_SECONDS); - CMOS_WRITE(real_minutes,RTC_MINUTES); - } else { - printk_once(KERN_NOTICE - "set_rtc_mmss: can't update from %d to %d\n", - cmos_minutes, real_minutes); - retval = -1; - } - - /* The following flags have to be released exactly in this order, - * otherwise the DS12887 (popular MC146818A clone with integrated - * battery and quartz) will not reset the oscillator and will not - * update precisely 500 ms later. You won't find this mentioned in - * the Dallas Semiconductor data sheets, but who believes data - * sheets anyway ... -- Markus Kuhn - */ - CMOS_WRITE(save_control, RTC_CONTROL); - CMOS_WRITE(save_freq_select, RTC_FREQ_SELECT); - spin_unlock(&rtc_lock); - - return retval; + if (alpha_using_qemu) + init_qemu_clockevent(); + else + init_rtc_clockevent(); } +#endif diff --git a/arch/alpha/kernel/traps.c b/arch/alpha/kernel/traps.c index bd0665cdc84..9c4c189eb22 100644 --- a/arch/alpha/kernel/traps.c +++ b/arch/alpha/kernel/traps.c @@ -241,6 +241,21 @@ do_entIF(unsigned long type, struct pt_regs *regs) (const char *)(data[1] | (long)data[2] << 32), data[0]); } +#ifdef CONFIG_ALPHA_WTINT + if (type == 4) { + /* If CALL_PAL WTINT is totally unsupported by the + PALcode, e.g. MILO, "emulate" it by overwriting + the insn. */ + unsigned int *pinsn + = (unsigned int *) regs->pc - 1; + if (*pinsn == PAL_wtint) { + *pinsn = 0x47e01400; /* mov 0,$0 */ + imb(); + regs->r0 = 0; + return; + } + } +#endif /* ALPHA_WTINT */ die_if_kernel((type == 1 ? "Kernel Bug" : "Instruction fault"), regs, type, NULL); } diff --git a/arch/alpha/lib/csum_partial_copy.c b/arch/alpha/lib/csum_partial_copy.c index ffb19b7da99..ff3c10721ca 100644 --- a/arch/alpha/lib/csum_partial_copy.c +++ b/arch/alpha/lib/csum_partial_copy.c @@ -130,7 +130,7 @@ csum_partial_cfu_aligned(const unsigned long __user *src, unsigned long *dst, *dst = word | tmp; checksum += carry; } - if (err) *errp = err; + if (err && errp) *errp = err; return checksum; } @@ -185,7 +185,7 @@ csum_partial_cfu_dest_aligned(const unsigned long __user *src, *dst = word | tmp; checksum += carry; } - if (err) *errp = err; + if (err && errp) *errp = err; return checksum; } @@ -242,7 +242,7 @@ csum_partial_cfu_src_aligned(const unsigned long __user *src, stq_u(partial_dest | second_dest, dst); out: checksum += carry; - if (err) *errp = err; + if (err && errp) *errp = err; return checksum; } @@ -325,7 +325,7 @@ csum_partial_cfu_unaligned(const unsigned long __user * src, stq_u(partial_dest | word | second_dest, dst); checksum += carry; } - if (err) *errp = err; + if (err && errp) *errp = err; return checksum; } @@ -339,7 +339,7 @@ csum_partial_copy_from_user(const void __user *src, void *dst, int len, if (len) { if (!access_ok(VERIFY_READ, src, len)) { - *errp = -EFAULT; + if (errp) *errp = -EFAULT; memset(dst, 0, len); return sum; } diff --git a/arch/alpha/lib/ev6-memset.S b/arch/alpha/lib/ev6-memset.S index d8b94e1c7fc..356bb2fdd70 100644 --- a/arch/alpha/lib/ev6-memset.S +++ b/arch/alpha/lib/ev6-memset.S @@ -30,14 +30,15 @@ .set noat .set noreorder .text + .globl memset .globl __memset + .globl ___memset .globl __memsetw .globl __constant_c_memset - .globl memset - .ent __memset + .ent ___memset .align 5 -__memset: +___memset: .frame $30,0,$26,0 .prologue 0 @@ -227,7 +228,7 @@ end_b: nop nop ret $31,($26),1 # L0 : - .end __memset + .end ___memset /* * This is the original body of code, prior to replication and @@ -594,4 +595,5 @@ end_w: .end __memsetw -memset = __memset +memset = ___memset +__memset = ___memset diff --git a/arch/alpha/lib/memset.S b/arch/alpha/lib/memset.S index 311b8cfc691..76ccc6d1f36 100644 --- a/arch/alpha/lib/memset.S +++ b/arch/alpha/lib/memset.S @@ -19,11 +19,13 @@ .text .globl memset .globl __memset + .globl ___memset .globl __memsetw .globl __constant_c_memset - .ent __memset + + .ent ___memset .align 5 -__memset: +___memset: .frame $30,0,$26,0 .prologue 0 @@ -103,7 +105,7 @@ within_one_quad: end: ret $31,($26),1 /* E1 */ - .end __memset + .end ___memset .align 5 .ent __memsetw @@ -121,4 +123,5 @@ __memsetw: .end __memsetw -memset = __memset +memset = ___memset +__memset = ___memset diff --git a/arch/arc/boot/dts/abilis_tb100.dtsi b/arch/arc/boot/dts/abilis_tb100.dtsi index d9f8249aa66..3942634f805 100644 --- a/arch/arc/boot/dts/abilis_tb100.dtsi +++ b/arch/arc/boot/dts/abilis_tb100.dtsi @@ -43,124 +43,124 @@ iomux: iomux@FF10601c { /* Port 1 */ pctl_tsin_s0: pctl-tsin-s0 { /* Serial TS-in 0 */ - pingrp = "mis0_pins"; + abilis,function = "mis0"; }; pctl_tsin_s1: pctl-tsin-s1 { /* Serial TS-in 1 */ - pingrp = "mis1_pins"; + abilis,function = "mis1"; }; pctl_gpio_a: pctl-gpio-a { /* GPIO bank A */ - pingrp = "gpioa_pins"; + abilis,function = "gpioa"; }; pctl_tsin_p1: pctl-tsin-p1 { /* Parallel TS-in 1 */ - pingrp = "mip1_pins"; + abilis,function = "mip1"; }; /* Port 2 */ pctl_tsin_s2: pctl-tsin-s2 { /* Serial TS-in 2 */ - pingrp = "mis2_pins"; + abilis,function = "mis2"; }; pctl_tsin_s3: pctl-tsin-s3 { /* Serial TS-in 3 */ - pingrp = "mis3_pins"; + abilis,function = "mis3"; }; pctl_gpio_c: pctl-gpio-c { /* GPIO bank C */ - pingrp = "gpioc_pins"; + abilis,function = "gpioc"; }; pctl_tsin_p3: pctl-tsin-p3 { /* Parallel TS-in 3 */ - pingrp = "mip3_pins"; + abilis,function = "mip3"; }; /* Port 3 */ pctl_tsin_s4: pctl-tsin-s4 { /* Serial TS-in 4 */ - pingrp = "mis4_pins"; + abilis,function = "mis4"; }; pctl_tsin_s5: pctl-tsin-s5 { /* Serial TS-in 5 */ - pingrp = "mis5_pins"; + abilis,function = "mis5"; }; pctl_gpio_e: pctl-gpio-e { /* GPIO bank E */ - pingrp = "gpioe_pins"; + abilis,function = "gpioe"; }; pctl_tsin_p5: pctl-tsin-p5 { /* Parallel TS-in 5 */ - pingrp = "mip5_pins"; + abilis,function = "mip5"; }; /* Port 4 */ pctl_tsin_s6: pctl-tsin-s6 { /* Serial TS-in 6 */ - pingrp = "mis6_pins"; + abilis,function = "mis6"; }; pctl_tsin_s7: pctl-tsin-s7 { /* Serial TS-in 7 */ - pingrp = "mis7_pins"; + abilis,function = "mis7"; }; pctl_gpio_g: pctl-gpio-g { /* GPIO bank G */ - pingrp = "gpiog_pins"; + abilis,function = "gpiog"; }; pctl_tsin_p7: pctl-tsin-p7 { /* Parallel TS-in 7 */ - pingrp = "mip7_pins"; + abilis,function = "mip7"; }; /* Port 5 */ pctl_gpio_j: pctl-gpio-j { /* GPIO bank J */ - pingrp = "gpioj_pins"; + abilis,function = "gpioj"; }; pctl_gpio_k: pctl-gpio-k { /* GPIO bank K */ - pingrp = "gpiok_pins"; + abilis,function = "gpiok"; }; pctl_ciplus: pctl-ciplus { /* CI+ interface */ - pingrp = "ciplus_pins"; + abilis,function = "ciplus"; }; pctl_mcard: pctl-mcard { /* M-Card interface */ - pingrp = "mcard_pins"; + abilis,function = "mcard"; }; /* Port 6 */ pctl_tsout_p: pctl-tsout-p { /* Parallel TS-out */ - pingrp = "mop_pins"; + abilis,function = "mop"; }; pctl_tsout_s0: pctl-tsout-s0 { /* Serial TS-out 0 */ - pingrp = "mos0_pins"; + abilis,function = "mos0"; }; pctl_tsout_s1: pctl-tsout-s1 { /* Serial TS-out 1 */ - pingrp = "mos1_pins"; + abilis,function = "mos1"; }; pctl_tsout_s2: pctl-tsout-s2 { /* Serial TS-out 2 */ - pingrp = "mos2_pins"; + abilis,function = "mos2"; }; pctl_tsout_s3: pctl-tsout-s3 { /* Serial TS-out 3 */ - pingrp = "mos3_pins"; + abilis,function = "mos3"; }; /* Port 7 */ pctl_uart0: pctl-uart0 { /* UART 0 */ - pingrp = "uart0_pins"; + abilis,function = "uart0"; }; pctl_uart1: pctl-uart1 { /* UART 1 */ - pingrp = "uart1_pins"; + abilis,function = "uart1"; }; pctl_gpio_l: pctl-gpio-l { /* GPIO bank L */ - pingrp = "gpiol_pins"; + abilis,function = "gpiol"; }; pctl_gpio_m: pctl-gpio-m { /* GPIO bank M */ - pingrp = "gpiom_pins"; + abilis,function = "gpiom"; }; /* Port 8 */ pctl_spi3: pctl-spi3 { - pingrp = "spi3_pins"; + abilis,function = "spi3"; }; /* Port 9 */ pctl_spi1: pctl-spi1 { - pingrp = "spi1_pins"; + abilis,function = "spi1"; }; pctl_gpio_n: pctl-gpio-n { - pingrp = "gpion_pins"; + abilis,function = "gpion"; }; /* Unmuxed GPIOs */ pctl_gpio_b: pctl-gpio-b { - pingrp = "gpiob_pins"; + abilis,function = "gpiob"; }; pctl_gpio_d: pctl-gpio-d { - pingrp = "gpiod_pins"; + abilis,function = "gpiod"; }; pctl_gpio_f: pctl-gpio-f { - pingrp = "gpiof_pins"; + abilis,function = "gpiof"; }; pctl_gpio_h: pctl-gpio-h { - pingrp = "gpioh_pins"; + abilis,function = "gpioh"; }; pctl_gpio_i: pctl-gpio-i { - pingrp = "gpioi_pins"; + abilis,function = "gpioi"; }; }; @@ -172,9 +172,10 @@ interrupts = <27 2>; reg = <0xFF140000 0x1000>; gpio-controller; - #gpio-cells = <1>; - gpio-base = <0>; - gpio-pins = <&pctl_gpio_a>; + #gpio-cells = <2>; + abilis,ngpio = <3>; + gpio-ranges = <&iomux 0 0 0>; + gpio-ranges-group-names = "gpioa"; }; gpiob: gpio@FF141000 { compatible = "abilis,tb10x-gpio"; @@ -184,9 +185,10 @@ interrupts = <27 2>; reg = <0xFF141000 0x1000>; gpio-controller; - #gpio-cells = <1>; - gpio-base = <3>; - gpio-pins = <&pctl_gpio_b>; + #gpio-cells = <2>; + abilis,ngpio = <2>; + gpio-ranges = <&iomux 0 0 0>; + gpio-ranges-group-names = "gpiob"; }; gpioc: gpio@FF142000 { compatible = "abilis,tb10x-gpio"; @@ -196,9 +198,10 @@ interrupts = <27 2>; reg = <0xFF142000 0x1000>; gpio-controller; - #gpio-cells = <1>; - gpio-base = <5>; - gpio-pins = <&pctl_gpio_c>; + #gpio-cells = <2>; + abilis,ngpio = <3>; + gpio-ranges = <&iomux 0 0 0>; + gpio-ranges-group-names = "gpioc"; }; gpiod: gpio@FF143000 { compatible = "abilis,tb10x-gpio"; @@ -208,9 +211,10 @@ interrupts = <27 2>; reg = <0xFF143000 0x1000>; gpio-controller; - #gpio-cells = <1>; - gpio-base = <8>; - gpio-pins = <&pctl_gpio_d>; + #gpio-cells = <2>; + abilis,ngpio = <2>; + gpio-ranges = <&iomux 0 0 0>; + gpio-ranges-group-names = "gpiod"; }; gpioe: gpio@FF144000 { compatible = "abilis,tb10x-gpio"; @@ -220,9 +224,10 @@ interrupts = <27 2>; reg = <0xFF144000 0x1000>; gpio-controller; - #gpio-cells = <1>; - gpio-base = <10>; - gpio-pins = <&pctl_gpio_e>; + #gpio-cells = <2>; + abilis,ngpio = <3>; + gpio-ranges = <&iomux 0 0 0>; + gpio-ranges-group-names = "gpioe"; }; gpiof: gpio@FF145000 { compatible = "abilis,tb10x-gpio"; @@ -232,9 +237,10 @@ interrupts = <27 2>; reg = <0xFF145000 0x1000>; gpio-controller; - #gpio-cells = <1>; - gpio-base = <13>; - gpio-pins = <&pctl_gpio_f>; + #gpio-cells = <2>; + abilis,ngpio = <2>; + gpio-ranges = <&iomux 0 0 0>; + gpio-ranges-group-names = "gpiof"; }; gpiog: gpio@FF146000 { compatible = "abilis,tb10x-gpio"; @@ -244,9 +250,10 @@ interrupts = <27 2>; reg = <0xFF146000 0x1000>; gpio-controller; - #gpio-cells = <1>; - gpio-base = <15>; - gpio-pins = <&pctl_gpio_g>; + #gpio-cells = <2>; + abilis,ngpio = <3>; + gpio-ranges = <&iomux 0 0 0>; + gpio-ranges-group-names = "gpiog"; }; gpioh: gpio@FF147000 { compatible = "abilis,tb10x-gpio"; @@ -256,9 +263,10 @@ interrupts = <27 2>; reg = <0xFF147000 0x1000>; gpio-controller; - #gpio-cells = <1>; - gpio-base = <18>; - gpio-pins = <&pctl_gpio_h>; + #gpio-cells = <2>; + abilis,ngpio = <2>; + gpio-ranges = <&iomux 0 0 0>; + gpio-ranges-group-names = "gpioh"; }; gpioi: gpio@FF148000 { compatible = "abilis,tb10x-gpio"; @@ -268,9 +276,10 @@ interrupts = <27 2>; reg = <0xFF148000 0x1000>; gpio-controller; - #gpio-cells = <1>; - gpio-base = <20>; - gpio-pins = <&pctl_gpio_i>; + #gpio-cells = <2>; + abilis,ngpio = <12>; + gpio-ranges = <&iomux 0 0 0>; + gpio-ranges-group-names = "gpioi"; }; gpioj: gpio@FF149000 { compatible = "abilis,tb10x-gpio"; @@ -280,9 +289,10 @@ interrupts = <27 2>; reg = <0xFF149000 0x1000>; gpio-controller; - #gpio-cells = <1>; - gpio-base = <32>; - gpio-pins = <&pctl_gpio_j>; + #gpio-cells = <2>; + abilis,ngpio = <32>; + gpio-ranges = <&iomux 0 0 0>; + gpio-ranges-group-names = "gpioj"; }; gpiok: gpio@FF14a000 { compatible = "abilis,tb10x-gpio"; @@ -292,9 +302,10 @@ interrupts = <27 2>; reg = <0xFF14A000 0x1000>; gpio-controller; - #gpio-cells = <1>; - gpio-base = <64>; - gpio-pins = <&pctl_gpio_k>; + #gpio-cells = <2>; + abilis,ngpio = <22>; + gpio-ranges = <&iomux 0 0 0>; + gpio-ranges-group-names = "gpiok"; }; gpiol: gpio@FF14b000 { compatible = "abilis,tb10x-gpio"; @@ -304,9 +315,10 @@ interrupts = <27 2>; reg = <0xFF14B000 0x1000>; gpio-controller; - #gpio-cells = <1>; - gpio-base = <86>; - gpio-pins = <&pctl_gpio_l>; + #gpio-cells = <2>; + abilis,ngpio = <4>; + gpio-ranges = <&iomux 0 0 0>; + gpio-ranges-group-names = "gpiol"; }; gpiom: gpio@FF14c000 { compatible = "abilis,tb10x-gpio"; @@ -316,9 +328,10 @@ interrupts = <27 2>; reg = <0xFF14C000 0x1000>; gpio-controller; - #gpio-cells = <1>; - gpio-base = <90>; - gpio-pins = <&pctl_gpio_m>; + #gpio-cells = <2>; + abilis,ngpio = <4>; + gpio-ranges = <&iomux 0 0 0>; + gpio-ranges-group-names = "gpiom"; }; gpion: gpio@FF14d000 { compatible = "abilis,tb10x-gpio"; @@ -328,9 +341,10 @@ interrupts = <27 2>; reg = <0xFF14D000 0x1000>; gpio-controller; - #gpio-cells = <1>; - gpio-base = <94>; - gpio-pins = <&pctl_gpio_n>; + #gpio-cells = <2>; + abilis,ngpio = <5>; + gpio-ranges = <&iomux 0 0 0>; + gpio-ranges-group-names = "gpion"; }; }; }; diff --git a/arch/arc/boot/dts/abilis_tb100_dvk.dts b/arch/arc/boot/dts/abilis_tb100_dvk.dts index ebc313a9f5b..3dd6ed94146 100644 --- a/arch/arc/boot/dts/abilis_tb100_dvk.dts +++ b/arch/arc/boot/dts/abilis_tb100_dvk.dts @@ -64,62 +64,62 @@ compatible = "gpio-leds"; power { label = "Power"; - gpios = <&gpioi 0>; + gpios = <&gpioi 0 0>; linux,default-trigger = "default-on"; }; heartbeat { label = "Heartbeat"; - gpios = <&gpioi 1>; + gpios = <&gpioi 1 0>; linux,default-trigger = "heartbeat"; }; led2 { label = "LED2"; - gpios = <&gpioi 2>; + gpios = <&gpioi 2 0>; default-state = "off"; }; led3 { label = "LED3"; - gpios = <&gpioi 3>; + gpios = <&gpioi 3 0>; default-state = "off"; }; led4 { label = "LED4"; - gpios = <&gpioi 4>; + gpios = <&gpioi 4 0>; default-state = "off"; }; led5 { label = "LED5"; - gpios = <&gpioi 5>; + gpios = <&gpioi 5 0>; default-state = "off"; }; led6 { label = "LED6"; - gpios = <&gpioi 6>; + gpios = <&gpioi 6 0>; default-state = "off"; }; led7 { label = "LED7"; - gpios = <&gpioi 7>; + gpios = <&gpioi 7 0>; default-state = "off"; }; led8 { label = "LED8"; - gpios = <&gpioi 8>; + gpios = <&gpioi 8 0>; default-state = "off"; }; led9 { label = "LED9"; - gpios = <&gpioi 9>; + gpios = <&gpioi 9 0>; default-state = "off"; }; led10 { label = "LED10"; - gpios = <&gpioi 10>; + gpios = <&gpioi 10 0>; default-state = "off"; }; led11 { label = "LED11"; - gpios = <&gpioi 11>; + gpios = <&gpioi 11 0>; default-state = "off"; }; }; diff --git a/arch/arc/boot/dts/abilis_tb101.dtsi b/arch/arc/boot/dts/abilis_tb101.dtsi index da8ca7941e6..b0467229a5c 100644 --- a/arch/arc/boot/dts/abilis_tb101.dtsi +++ b/arch/arc/boot/dts/abilis_tb101.dtsi @@ -43,133 +43,133 @@ iomux: iomux@FF10601c { /* Port 1 */ pctl_tsin_s0: pctl-tsin-s0 { /* Serial TS-in 0 */ - pingrp = "mis0_pins"; + abilis,function = "mis0"; }; pctl_tsin_s1: pctl-tsin-s1 { /* Serial TS-in 1 */ - pingrp = "mis1_pins"; + abilis,function = "mis1"; }; pctl_gpio_a: pctl-gpio-a { /* GPIO bank A */ - pingrp = "gpioa_pins"; + abilis,function = "gpioa"; }; pctl_tsin_p1: pctl-tsin-p1 { /* Parallel TS-in 1 */ - pingrp = "mip1_pins"; + abilis,function = "mip1"; }; /* Port 2 */ pctl_tsin_s2: pctl-tsin-s2 { /* Serial TS-in 2 */ - pingrp = "mis2_pins"; + abilis,function = "mis2"; }; pctl_tsin_s3: pctl-tsin-s3 { /* Serial TS-in 3 */ - pingrp = "mis3_pins"; + abilis,function = "mis3"; }; pctl_gpio_c: pctl-gpio-c { /* GPIO bank C */ - pingrp = "gpioc_pins"; + abilis,function = "gpioc"; }; pctl_tsin_p3: pctl-tsin-p3 { /* Parallel TS-in 3 */ - pingrp = "mip3_pins"; + abilis,function = "mip3"; }; /* Port 3 */ pctl_tsin_s4: pctl-tsin-s4 { /* Serial TS-in 4 */ - pingrp = "mis4_pins"; + abilis,function = "mis4"; }; pctl_tsin_s5: pctl-tsin-s5 { /* Serial TS-in 5 */ - pingrp = "mis5_pins"; + abilis,function = "mis5"; }; pctl_gpio_e: pctl-gpio-e { /* GPIO bank E */ - pingrp = "gpioe_pins"; + abilis,function = "gpioe"; }; pctl_tsin_p5: pctl-tsin-p5 { /* Parallel TS-in 5 */ - pingrp = "mip5_pins"; + abilis,function = "mip5"; }; /* Port 4 */ pctl_tsin_s6: pctl-tsin-s6 { /* Serial TS-in 6 */ - pingrp = "mis6_pins"; + abilis,function = "mis6"; }; pctl_tsin_s7: pctl-tsin-s7 { /* Serial TS-in 7 */ - pingrp = "mis7_pins"; + abilis,function = "mis7"; }; pctl_gpio_g: pctl-gpio-g { /* GPIO bank G */ - pingrp = "gpiog_pins"; + abilis,function = "gpiog"; }; pctl_tsin_p7: pctl-tsin-p7 { /* Parallel TS-in 7 */ - pingrp = "mip7_pins"; + abilis,function = "mip7"; }; /* Port 5 */ pctl_gpio_j: pctl-gpio-j { /* GPIO bank J */ - pingrp = "gpioj_pins"; + abilis,function = "gpioj"; }; pctl_gpio_k: pctl-gpio-k { /* GPIO bank K */ - pingrp = "gpiok_pins"; + abilis,function = "gpiok"; }; pctl_ciplus: pctl-ciplus { /* CI+ interface */ - pingrp = "ciplus_pins"; + abilis,function = "ciplus"; }; pctl_mcard: pctl-mcard { /* M-Card interface */ - pingrp = "mcard_pins"; + abilis,function = "mcard"; }; pctl_stc0: pctl-stc0 { /* Smart card I/F 0 */ - pingrp = "stc0_pins"; + abilis,function = "stc0"; }; pctl_stc1: pctl-stc1 { /* Smart card I/F 1 */ - pingrp = "stc1_pins"; + abilis,function = "stc1"; }; /* Port 6 */ pctl_tsout_p: pctl-tsout-p { /* Parallel TS-out */ - pingrp = "mop_pins"; + abilis,function = "mop"; }; pctl_tsout_s0: pctl-tsout-s0 { /* Serial TS-out 0 */ - pingrp = "mos0_pins"; + abilis,function = "mos0"; }; pctl_tsout_s1: pctl-tsout-s1 { /* Serial TS-out 1 */ - pingrp = "mos1_pins"; + abilis,function = "mos1"; }; pctl_tsout_s2: pctl-tsout-s2 { /* Serial TS-out 2 */ - pingrp = "mos2_pins"; + abilis,function = "mos2"; }; pctl_tsout_s3: pctl-tsout-s3 { /* Serial TS-out 3 */ - pingrp = "mos3_pins"; + abilis,function = "mos3"; }; /* Port 7 */ pctl_uart0: pctl-uart0 { /* UART 0 */ - pingrp = "uart0_pins"; + abilis,function = "uart0"; }; pctl_uart1: pctl-uart1 { /* UART 1 */ - pingrp = "uart1_pins"; + abilis,function = "uart1"; }; pctl_gpio_l: pctl-gpio-l { /* GPIO bank L */ - pingrp = "gpiol_pins"; + abilis,function = "gpiol"; }; pctl_gpio_m: pctl-gpio-m { /* GPIO bank M */ - pingrp = "gpiom_pins"; + abilis,function = "gpiom"; }; /* Port 8 */ pctl_spi3: pctl-spi3 { - pingrp = "spi3_pins"; + abilis,function = "spi3"; }; pctl_jtag: pctl-jtag { - pingrp = "jtag_pins"; + abilis,function = "jtag"; }; /* Port 9 */ pctl_spi1: pctl-spi1 { - pingrp = "spi1_pins"; + abilis,function = "spi1"; }; pctl_gpio_n: pctl-gpio-n { - pingrp = "gpion_pins"; + abilis,function = "gpion"; }; /* Unmuxed GPIOs */ pctl_gpio_b: pctl-gpio-b { - pingrp = "gpiob_pins"; + abilis,function = "gpiob"; }; pctl_gpio_d: pctl-gpio-d { - pingrp = "gpiod_pins"; + abilis,function = "gpiod"; }; pctl_gpio_f: pctl-gpio-f { - pingrp = "gpiof_pins"; + abilis,function = "gpiof"; }; pctl_gpio_h: pctl-gpio-h { - pingrp = "gpioh_pins"; + abilis,function = "gpioh"; }; pctl_gpio_i: pctl-gpio-i { - pingrp = "gpioi_pins"; + abilis,function = "gpioi"; }; }; @@ -181,9 +181,10 @@ interrupts = <27 2>; reg = <0xFF140000 0x1000>; gpio-controller; - #gpio-cells = <1>; - gpio-base = <0>; - gpio-pins = <&pctl_gpio_a>; + #gpio-cells = <2>; + abilis,ngpio = <3>; + gpio-ranges = <&iomux 0 0 0>; + gpio-ranges-group-names = "gpioa"; }; gpiob: gpio@FF141000 { compatible = "abilis,tb10x-gpio"; @@ -193,9 +194,10 @@ interrupts = <27 2>; reg = <0xFF141000 0x1000>; gpio-controller; - #gpio-cells = <1>; - gpio-base = <3>; - gpio-pins = <&pctl_gpio_b>; + #gpio-cells = <2>; + abilis,ngpio = <2>; + gpio-ranges = <&iomux 0 0 0>; + gpio-ranges-group-names = "gpiob"; }; gpioc: gpio@FF142000 { compatible = "abilis,tb10x-gpio"; @@ -205,9 +207,10 @@ interrupts = <27 2>; reg = <0xFF142000 0x1000>; gpio-controller; - #gpio-cells = <1>; - gpio-base = <5>; - gpio-pins = <&pctl_gpio_c>; + #gpio-cells = <2>; + abilis,ngpio = <3>; + gpio-ranges = <&iomux 0 0 0>; + gpio-ranges-group-names = "gpioc"; }; gpiod: gpio@FF143000 { compatible = "abilis,tb10x-gpio"; @@ -217,9 +220,10 @@ interrupts = <27 2>; reg = <0xFF143000 0x1000>; gpio-controller; - #gpio-cells = <1>; - gpio-base = <8>; - gpio-pins = <&pctl_gpio_d>; + #gpio-cells = <2>; + abilis,ngpio = <2>; + gpio-ranges = <&iomux 0 0 0>; + gpio-ranges-group-names = "gpiod"; }; gpioe: gpio@FF144000 { compatible = "abilis,tb10x-gpio"; @@ -229,9 +233,10 @@ interrupts = <27 2>; reg = <0xFF144000 0x1000>; gpio-controller; - #gpio-cells = <1>; - gpio-base = <10>; - gpio-pins = <&pctl_gpio_e>; + #gpio-cells = <2>; + abilis,ngpio = <3>; + gpio-ranges = <&iomux 0 0 0>; + gpio-ranges-group-names = "gpioe"; }; gpiof: gpio@FF145000 { compatible = "abilis,tb10x-gpio"; @@ -241,9 +246,10 @@ interrupts = <27 2>; reg = <0xFF145000 0x1000>; gpio-controller; - #gpio-cells = <1>; - gpio-base = <13>; - gpio-pins = <&pctl_gpio_f>; + #gpio-cells = <2>; + abilis,ngpio = <2>; + gpio-ranges = <&iomux 0 0 0>; + gpio-ranges-group-names = "gpiof"; }; gpiog: gpio@FF146000 { compatible = "abilis,tb10x-gpio"; @@ -253,9 +259,10 @@ interrupts = <27 2>; reg = <0xFF146000 0x1000>; gpio-controller; - #gpio-cells = <1>; - gpio-base = <15>; - gpio-pins = <&pctl_gpio_g>; + #gpio-cells = <2>; + abilis,ngpio = <3>; + gpio-ranges = <&iomux 0 0 0>; + gpio-ranges-group-names = "gpiog"; }; gpioh: gpio@FF147000 { compatible = "abilis,tb10x-gpio"; @@ -265,9 +272,10 @@ interrupts = <27 2>; reg = <0xFF147000 0x1000>; gpio-controller; - #gpio-cells = <1>; - gpio-base = <18>; - gpio-pins = <&pctl_gpio_h>; + #gpio-cells = <2>; + abilis,ngpio = <2>; + gpio-ranges = <&iomux 0 0 0>; + gpio-ranges-group-names = "gpioh"; }; gpioi: gpio@FF148000 { compatible = "abilis,tb10x-gpio"; @@ -277,9 +285,10 @@ interrupts = <27 2>; reg = <0xFF148000 0x1000>; gpio-controller; - #gpio-cells = <1>; - gpio-base = <20>; - gpio-pins = <&pctl_gpio_i>; + #gpio-cells = <2>; + abilis,ngpio = <12>; + gpio-ranges = <&iomux 0 0 0>; + gpio-ranges-group-names = "gpioi"; }; gpioj: gpio@FF149000 { compatible = "abilis,tb10x-gpio"; @@ -289,9 +298,10 @@ interrupts = <27 2>; reg = <0xFF149000 0x1000>; gpio-controller; - #gpio-cells = <1>; - gpio-base = <32>; - gpio-pins = <&pctl_gpio_j>; + #gpio-cells = <2>; + abilis,ngpio = <32>; + gpio-ranges = <&iomux 0 0 0>; + gpio-ranges-group-names = "gpioj"; }; gpiok: gpio@FF14a000 { compatible = "abilis,tb10x-gpio"; @@ -301,9 +311,10 @@ interrupts = <27 2>; reg = <0xFF14A000 0x1000>; gpio-controller; - #gpio-cells = <1>; - gpio-base = <64>; - gpio-pins = <&pctl_gpio_k>; + #gpio-cells = <2>; + abilis,ngpio = <22>; + gpio-ranges = <&iomux 0 0 0>; + gpio-ranges-group-names = "gpiok"; }; gpiol: gpio@FF14b000 { compatible = "abilis,tb10x-gpio"; @@ -313,9 +324,10 @@ interrupts = <27 2>; reg = <0xFF14B000 0x1000>; gpio-controller; - #gpio-cells = <1>; - gpio-base = <86>; - gpio-pins = <&pctl_gpio_l>; + #gpio-cells = <2>; + abilis,ngpio = <4>; + gpio-ranges = <&iomux 0 0 0>; + gpio-ranges-group-names = "gpiol"; }; gpiom: gpio@FF14c000 { compatible = "abilis,tb10x-gpio"; @@ -325,9 +337,10 @@ interrupts = <27 2>; reg = <0xFF14C000 0x1000>; gpio-controller; - #gpio-cells = <1>; - gpio-base = <90>; - gpio-pins = <&pctl_gpio_m>; + #gpio-cells = <2>; + abilis,ngpio = <4>; + gpio-ranges = <&iomux 0 0 0>; + gpio-ranges-group-names = "gpiom"; }; gpion: gpio@FF14d000 { compatible = "abilis,tb10x-gpio"; @@ -337,9 +350,10 @@ interrupts = <27 2>; reg = <0xFF14D000 0x1000>; gpio-controller; - #gpio-cells = <1>; - gpio-base = <94>; - gpio-pins = <&pctl_gpio_n>; + #gpio-cells = <2>; + abilis,ngpio = <5>; + gpio-ranges = <&iomux 0 0 0>; + gpio-ranges-group-names = "gpion"; }; }; }; diff --git a/arch/arc/boot/dts/abilis_tb101_dvk.dts b/arch/arc/boot/dts/abilis_tb101_dvk.dts index b204657993a..1cf51c280f2 100644 --- a/arch/arc/boot/dts/abilis_tb101_dvk.dts +++ b/arch/arc/boot/dts/abilis_tb101_dvk.dts @@ -64,62 +64,62 @@ compatible = "gpio-leds"; power { label = "Power"; - gpios = <&gpioi 0>; + gpios = <&gpioi 0 0>; linux,default-trigger = "default-on"; }; heartbeat { label = "Heartbeat"; - gpios = <&gpioi 1>; + gpios = <&gpioi 1 0>; linux,default-trigger = "heartbeat"; }; led2 { label = "LED2"; - gpios = <&gpioi 2>; + gpios = <&gpioi 2 0>; default-state = "off"; }; led3 { label = "LED3"; - gpios = <&gpioi 3>; + gpios = <&gpioi 3 0>; default-state = "off"; }; led4 { label = "LED4"; - gpios = <&gpioi 4>; + gpios = <&gpioi 4 0>; default-state = "off"; }; led5 { label = "LED5"; - gpios = <&gpioi 5>; + gpios = <&gpioi 5 0>; default-state = "off"; }; led6 { label = "LED6"; - gpios = <&gpioi 6>; + gpios = <&gpioi 6 0>; default-state = "off"; }; led7 { label = "LED7"; - gpios = <&gpioi 7>; + gpios = <&gpioi 7 0>; default-state = "off"; }; led8 { label = "LED8"; - gpios = <&gpioi 8>; + gpios = <&gpioi 8 0>; default-state = "off"; }; led9 { label = "LED9"; - gpios = <&gpioi 9>; + gpios = <&gpioi 9 0>; default-state = "off"; }; led10 { label = "LED10"; - gpios = <&gpioi 10>; + gpios = <&gpioi 10 0>; default-state = "off"; }; led11 { label = "LED11"; - gpios = <&gpioi 11>; + gpios = <&gpioi 11 0>; default-state = "off"; }; }; diff --git a/arch/arc/boot/dts/abilis_tb10x.dtsi b/arch/arc/boot/dts/abilis_tb10x.dtsi index edf56f4749e..a098d7c05e9 100644 --- a/arch/arc/boot/dts/abilis_tb10x.dtsi +++ b/arch/arc/boot/dts/abilis_tb10x.dtsi @@ -62,9 +62,8 @@ }; iomux: iomux@FF10601c { - #address-cells = <1>; - #size-cells = <1>; compatible = "abilis,tb10x-iomux"; + #gpio-range-cells = <3>; reg = <0xFF10601c 0x4>; }; diff --git a/arch/arc/boot/dts/angel4.dts b/arch/arc/boot/dts/angel4.dts index 4fb2d6f655b..bcf662d21a5 100644 --- a/arch/arc/boot/dts/angel4.dts +++ b/arch/arc/boot/dts/angel4.dts @@ -67,5 +67,9 @@ reg = <1>; }; }; + + arcpmu0: pmu { + compatible = "snps,arc700-pmu"; + }; }; }; diff --git a/arch/arc/configs/fpga_noramfs_defconfig b/arch/arc/configs/fpga_noramfs_defconfig new file mode 100644 index 00000000000..5276a52f6a2 --- /dev/null +++ b/arch/arc/configs/fpga_noramfs_defconfig @@ -0,0 +1,64 @@ +CONFIG_CROSS_COMPILE="arc-linux-uclibc-" +# CONFIG_LOCALVERSION_AUTO is not set +CONFIG_DEFAULT_HOSTNAME="ARCLinux" +# CONFIG_SWAP is not set +CONFIG_HIGH_RES_TIMERS=y +CONFIG_IKCONFIG=y +CONFIG_IKCONFIG_PROC=y +CONFIG_NAMESPACES=y +# CONFIG_UTS_NS is not set +# CONFIG_PID_NS is not set +CONFIG_BLK_DEV_INITRD=y +CONFIG_KALLSYMS_ALL=y +CONFIG_EMBEDDED=y +# CONFIG_SLUB_DEBUG is not set +# CONFIG_COMPAT_BRK is not set +CONFIG_KPROBES=y +CONFIG_MODULES=y +# CONFIG_LBDAF is not set +# CONFIG_BLK_DEV_BSG is not set +# CONFIG_IOSCHED_DEADLINE is not set +# CONFIG_IOSCHED_CFQ is not set +CONFIG_ARC_PLAT_FPGA_LEGACY=y +CONFIG_ARC_BOARD_ML509=y +# CONFIG_ARC_HAS_RTSC is not set +CONFIG_ARC_BUILTIN_DTB_NAME="angel4" +CONFIG_PREEMPT=y +# CONFIG_COMPACTION is not set +# CONFIG_CROSS_MEMORY_ATTACH is not set +CONFIG_NET=y +CONFIG_PACKET=y +CONFIG_UNIX=y +CONFIG_UNIX_DIAG=y +CONFIG_NET_KEY=y +CONFIG_INET=y +# CONFIG_IPV6 is not set +# CONFIG_STANDALONE is not set +# CONFIG_PREVENT_FIRMWARE_BUILD is not set +# CONFIG_FIRMWARE_IN_KERNEL is not set +# CONFIG_BLK_DEV is not set +CONFIG_NETDEVICES=y +CONFIG_ARC_EMAC=y +CONFIG_LXT_PHY=y +# CONFIG_INPUT_MOUSEDEV_PSAUX is not set +# CONFIG_INPUT_KEYBOARD is not set +# CONFIG_INPUT_MOUSE is not set +# CONFIG_SERIO is not set +# CONFIG_LEGACY_PTYS is not set +# CONFIG_DEVKMEM is not set +CONFIG_SERIAL_ARC=y +CONFIG_SERIAL_ARC_CONSOLE=y +# CONFIG_HW_RANDOM is not set +# CONFIG_HWMON is not set +# CONFIG_VGA_CONSOLE is not set +# CONFIG_HID is not set +# CONFIG_USB_SUPPORT is not set +# CONFIG_IOMMU_SUPPORT is not set +CONFIG_EXT2_FS=y +CONFIG_EXT2_FS_XATTR=y +CONFIG_TMPFS=y +# CONFIG_MISC_FILESYSTEMS is not set +CONFIG_NFS_FS=y +# CONFIG_ENABLE_WARN_DEPRECATED is not set +# CONFIG_ENABLE_MUST_CHECK is not set +CONFIG_XZ_DEC=y diff --git a/arch/arc/include/asm/perf_event.h b/arch/arc/include/asm/perf_event.h index 115ad96480e..cbf755e32a0 100644 --- a/arch/arc/include/asm/perf_event.h +++ b/arch/arc/include/asm/perf_event.h @@ -1,5 +1,7 @@ /* - * Copyright (C) 2011-2012 Synopsys, Inc. (www.synopsys.com) + * Linux performance counter support for ARC + * + * Copyright (C) 2011-2013 Synopsys, Inc. (www.synopsys.com) * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as @@ -10,4 +12,204 @@ #ifndef __ASM_PERF_EVENT_H #define __ASM_PERF_EVENT_H +/* real maximum varies per CPU, this is the maximum supported by the driver */ +#define ARC_PMU_MAX_HWEVENTS 64 + +#define ARC_REG_CC_BUILD 0xF6 +#define ARC_REG_CC_INDEX 0x240 +#define ARC_REG_CC_NAME0 0x241 +#define ARC_REG_CC_NAME1 0x242 + +#define ARC_REG_PCT_BUILD 0xF5 +#define ARC_REG_PCT_COUNTL 0x250 +#define ARC_REG_PCT_COUNTH 0x251 +#define ARC_REG_PCT_SNAPL 0x252 +#define ARC_REG_PCT_SNAPH 0x253 +#define ARC_REG_PCT_CONFIG 0x254 +#define ARC_REG_PCT_CONTROL 0x255 +#define ARC_REG_PCT_INDEX 0x256 + +#define ARC_REG_PCT_CONTROL_CC (1 << 16) /* clear counts */ +#define ARC_REG_PCT_CONTROL_SN (1 << 17) /* snapshot */ + +struct arc_reg_pct_build { +#ifdef CONFIG_CPU_BIG_ENDIAN + unsigned int m:8, c:8, r:6, s:2, v:8; +#else + unsigned int v:8, s:2, r:6, c:8, m:8; +#endif +}; + +struct arc_reg_cc_build { +#ifdef CONFIG_CPU_BIG_ENDIAN + unsigned int c:16, r:8, v:8; +#else + unsigned int v:8, r:8, c:16; +#endif +}; + +#define PERF_COUNT_ARC_DCLM (PERF_COUNT_HW_MAX + 0) +#define PERF_COUNT_ARC_DCSM (PERF_COUNT_HW_MAX + 1) +#define PERF_COUNT_ARC_ICM (PERF_COUNT_HW_MAX + 2) +#define PERF_COUNT_ARC_BPOK (PERF_COUNT_HW_MAX + 3) +#define PERF_COUNT_ARC_EDTLB (PERF_COUNT_HW_MAX + 4) +#define PERF_COUNT_ARC_EITLB (PERF_COUNT_HW_MAX + 5) +#define PERF_COUNT_ARC_HW_MAX (PERF_COUNT_HW_MAX + 6) + +/* + * The "generalized" performance events seem to really be a copy + * of the available events on x86 processors; the mapping to ARC + * events is not always possible 1-to-1. Fortunately, there doesn't + * seem to be an exact definition for these events, so we can cheat + * a bit where necessary. + * + * In particular, the following PERF events may behave a bit differently + * compared to other architectures: + * + * PERF_COUNT_HW_CPU_CYCLES + * Cycles not in halted state + * + * PERF_COUNT_HW_REF_CPU_CYCLES + * Reference cycles not in halted state, same as PERF_COUNT_HW_CPU_CYCLES + * for now as we don't do Dynamic Voltage/Frequency Scaling (yet) + * + * PERF_COUNT_HW_BUS_CYCLES + * Unclear what this means, Intel uses 0x013c, which according to + * their datasheet means "unhalted reference cycles". It sounds similar + * to PERF_COUNT_HW_REF_CPU_CYCLES, and we use the same counter for it. + * + * PERF_COUNT_HW_STALLED_CYCLES_BACKEND + * PERF_COUNT_HW_STALLED_CYCLES_FRONTEND + * The ARC 700 can either measure stalls per pipeline stage, or all stalls + * combined; for now we assign all stalls to STALLED_CYCLES_BACKEND + * and all pipeline flushes (e.g. caused by mispredicts, etc.) to + * STALLED_CYCLES_FRONTEND. + * + * We could start multiple performance counters and combine everything + * afterwards, but that makes it complicated. + * + * Note that I$ cache misses aren't counted by either of the two! + */ + +static const char * const arc_pmu_ev_hw_map[] = { + [PERF_COUNT_HW_CPU_CYCLES] = "crun", + [PERF_COUNT_HW_REF_CPU_CYCLES] = "crun", + [PERF_COUNT_HW_BUS_CYCLES] = "crun", + [PERF_COUNT_HW_INSTRUCTIONS] = "iall", + [PERF_COUNT_HW_BRANCH_MISSES] = "bpfail", + [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = "ijmp", + [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = "bflush", + [PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = "bstall", + [PERF_COUNT_ARC_DCLM] = "dclm", + [PERF_COUNT_ARC_DCSM] = "dcsm", + [PERF_COUNT_ARC_ICM] = "icm", + [PERF_COUNT_ARC_BPOK] = "bpok", + [PERF_COUNT_ARC_EDTLB] = "edtlb", + [PERF_COUNT_ARC_EITLB] = "eitlb", +}; + +#define C(_x) PERF_COUNT_HW_CACHE_##_x +#define CACHE_OP_UNSUPPORTED 0xffff + +static const unsigned arc_pmu_cache_map[C(MAX)][C(OP_MAX)][C(RESULT_MAX)] = { + [C(L1D)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = PERF_COUNT_ARC_DCLM, + }, + [C(OP_WRITE)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = PERF_COUNT_ARC_DCSM, + }, + [C(OP_PREFETCH)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, + }, + }, + [C(L1I)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = PERF_COUNT_ARC_ICM, + }, + [C(OP_WRITE)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, + }, + [C(OP_PREFETCH)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, + }, + }, + [C(LL)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, + }, + [C(OP_WRITE)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, + }, + [C(OP_PREFETCH)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, + }, + }, + [C(DTLB)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = PERF_COUNT_ARC_EDTLB, + }, + [C(OP_WRITE)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, + }, + [C(OP_PREFETCH)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, + }, + }, + [C(ITLB)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = PERF_COUNT_ARC_EITLB, + }, + [C(OP_WRITE)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, + }, + [C(OP_PREFETCH)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, + }, + }, + [C(BPU)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = PERF_COUNT_HW_BRANCH_INSTRUCTIONS, + [C(RESULT_MISS)] = PERF_COUNT_HW_BRANCH_MISSES, + }, + [C(OP_WRITE)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, + }, + [C(OP_PREFETCH)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, + }, + }, + [C(NODE)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, + }, + [C(OP_WRITE)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, + }, + [C(OP_PREFETCH)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, + }, + }, +}; + #endif /* __ASM_PERF_EVENT_H */ diff --git a/arch/arc/include/asm/thread_info.h b/arch/arc/include/asm/thread_info.h index 2d50a4cdd7f..45be2167201 100644 --- a/arch/arc/include/asm/thread_info.h +++ b/arch/arc/include/asm/thread_info.h @@ -80,8 +80,6 @@ static inline __attribute_const__ struct thread_info *current_thread_info(void) #endif /* !__ASSEMBLY__ */ -#define PREEMPT_ACTIVE 0x10000000 - /* * thread information flags * - these are process state flags that various assembly files may need to diff --git a/arch/arc/kernel/Makefile b/arch/arc/kernel/Makefile index c242ef07ba7..8004b4fa646 100644 --- a/arch/arc/kernel/Makefile +++ b/arch/arc/kernel/Makefile @@ -19,6 +19,7 @@ obj-$(CONFIG_KPROBES) += kprobes.o obj-$(CONFIG_ARC_MISALIGN_ACCESS) += unaligned.o obj-$(CONFIG_KGDB) += kgdb.o obj-$(CONFIG_ARC_METAWARE_HLINK) += arc_hostlink.o +obj-$(CONFIG_PERF_EVENTS) += perf_event.o obj-$(CONFIG_ARC_FPU_SAVE_RESTORE) += fpu.o CFLAGS_fpu.o += -mdpfp diff --git a/arch/arc/kernel/perf_event.c b/arch/arc/kernel/perf_event.c new file mode 100644 index 00000000000..e46d81f7097 --- /dev/null +++ b/arch/arc/kernel/perf_event.c @@ -0,0 +1,326 @@ +/* + * Linux performance counter support for ARC700 series + * + * Copyright (C) 2013 Synopsys, Inc. (www.synopsys.com) + * + * This code is inspired by the perf support of various other architectures. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + */ +#include <linux/errno.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/perf_event.h> +#include <linux/platform_device.h> +#include <asm/arcregs.h> + +struct arc_pmu { + struct pmu pmu; + int counter_size; /* in bits */ + int n_counters; + unsigned long used_mask[BITS_TO_LONGS(ARC_PMU_MAX_HWEVENTS)]; + int ev_hw_idx[PERF_COUNT_ARC_HW_MAX]; +}; + +/* read counter #idx; note that counter# != event# on ARC! */ +static uint64_t arc_pmu_read_counter(int idx) +{ + uint32_t tmp; + uint64_t result; + + /* + * ARC supports making 'snapshots' of the counters, so we don't + * need to care about counters wrapping to 0 underneath our feet + */ + write_aux_reg(ARC_REG_PCT_INDEX, idx); + tmp = read_aux_reg(ARC_REG_PCT_CONTROL); + write_aux_reg(ARC_REG_PCT_CONTROL, tmp | ARC_REG_PCT_CONTROL_SN); + result = (uint64_t) (read_aux_reg(ARC_REG_PCT_SNAPH)) << 32; + result |= read_aux_reg(ARC_REG_PCT_SNAPL); + + return result; +} + +static void arc_perf_event_update(struct perf_event *event, + struct hw_perf_event *hwc, int idx) +{ + struct arc_pmu *arc_pmu = container_of(event->pmu, struct arc_pmu, pmu); + uint64_t prev_raw_count, new_raw_count; + int64_t delta; + + do { + prev_raw_count = local64_read(&hwc->prev_count); + new_raw_count = arc_pmu_read_counter(idx); + } while (local64_cmpxchg(&hwc->prev_count, prev_raw_count, + new_raw_count) != prev_raw_count); + + delta = (new_raw_count - prev_raw_count) & + ((1ULL << arc_pmu->counter_size) - 1ULL); + + local64_add(delta, &event->count); + local64_sub(delta, &hwc->period_left); +} + +static void arc_pmu_read(struct perf_event *event) +{ + arc_perf_event_update(event, &event->hw, event->hw.idx); +} + +static int arc_pmu_cache_event(u64 config) +{ + unsigned int cache_type, cache_op, cache_result; + int ret; + + cache_type = (config >> 0) & 0xff; + cache_op = (config >> 8) & 0xff; + cache_result = (config >> 16) & 0xff; + if (cache_type >= PERF_COUNT_HW_CACHE_MAX) + return -EINVAL; + if (cache_type >= PERF_COUNT_HW_CACHE_OP_MAX) + return -EINVAL; + if (cache_type >= PERF_COUNT_HW_CACHE_RESULT_MAX) + return -EINVAL; + + ret = arc_pmu_cache_map[cache_type][cache_op][cache_result]; + + if (ret == CACHE_OP_UNSUPPORTED) + return -ENOENT; + + return ret; +} + +/* initializes hw_perf_event structure if event is supported */ +static int arc_pmu_event_init(struct perf_event *event) +{ + struct arc_pmu *arc_pmu = container_of(event->pmu, struct arc_pmu, pmu); + struct hw_perf_event *hwc = &event->hw; + int ret; + + /* ARC 700 PMU does not support sampling events */ + if (is_sampling_event(event)) + return -ENOENT; + + switch (event->attr.type) { + case PERF_TYPE_HARDWARE: + if (event->attr.config >= PERF_COUNT_HW_MAX) + return -ENOENT; + if (arc_pmu->ev_hw_idx[event->attr.config] < 0) + return -ENOENT; + hwc->config = arc_pmu->ev_hw_idx[event->attr.config]; + pr_debug("initializing event %d with cfg %d\n", + (int) event->attr.config, (int) hwc->config); + return 0; + case PERF_TYPE_HW_CACHE: + ret = arc_pmu_cache_event(event->attr.config); + if (ret < 0) + return ret; + hwc->config = arc_pmu->ev_hw_idx[ret]; + return 0; + default: + return -ENOENT; + } +} + +/* starts all counters */ +static void arc_pmu_enable(struct pmu *pmu) +{ + uint32_t tmp; + tmp = read_aux_reg(ARC_REG_PCT_CONTROL); + write_aux_reg(ARC_REG_PCT_CONTROL, (tmp & 0xffff0000) | 0x1); +} + +/* stops all counters */ +static void arc_pmu_disable(struct pmu *pmu) +{ + uint32_t tmp; + tmp = read_aux_reg(ARC_REG_PCT_CONTROL); + write_aux_reg(ARC_REG_PCT_CONTROL, (tmp & 0xffff0000) | 0x0); +} + +/* + * Assigns hardware counter to hardware condition. + * Note that there is no separate start/stop mechanism; + * stopping is achieved by assigning the 'never' condition + */ +static void arc_pmu_start(struct perf_event *event, int flags) +{ + struct hw_perf_event *hwc = &event->hw; + int idx = hwc->idx; + + if (WARN_ON_ONCE(idx == -1)) + return; + + if (flags & PERF_EF_RELOAD) + WARN_ON_ONCE(!(event->hw.state & PERF_HES_UPTODATE)); + + event->hw.state = 0; + + /* enable ARC pmu here */ + write_aux_reg(ARC_REG_PCT_INDEX, idx); + write_aux_reg(ARC_REG_PCT_CONFIG, hwc->config); +} + +static void arc_pmu_stop(struct perf_event *event, int flags) +{ + struct hw_perf_event *hwc = &event->hw; + int idx = hwc->idx; + + if (!(event->hw.state & PERF_HES_STOPPED)) { + /* stop ARC pmu here */ + write_aux_reg(ARC_REG_PCT_INDEX, idx); + + /* condition code #0 is always "never" */ + write_aux_reg(ARC_REG_PCT_CONFIG, 0); + + event->hw.state |= PERF_HES_STOPPED; + } + + if ((flags & PERF_EF_UPDATE) && + !(event->hw.state & PERF_HES_UPTODATE)) { + arc_perf_event_update(event, &event->hw, idx); + event->hw.state |= PERF_HES_UPTODATE; + } +} + +static void arc_pmu_del(struct perf_event *event, int flags) +{ + struct arc_pmu *arc_pmu = container_of(event->pmu, struct arc_pmu, pmu); + + arc_pmu_stop(event, PERF_EF_UPDATE); + __clear_bit(event->hw.idx, arc_pmu->used_mask); + + perf_event_update_userpage(event); +} + +/* allocate hardware counter and optionally start counting */ +static int arc_pmu_add(struct perf_event *event, int flags) +{ + struct arc_pmu *arc_pmu = container_of(event->pmu, struct arc_pmu, pmu); + struct hw_perf_event *hwc = &event->hw; + int idx = hwc->idx; + + if (__test_and_set_bit(idx, arc_pmu->used_mask)) { + idx = find_first_zero_bit(arc_pmu->used_mask, + arc_pmu->n_counters); + if (idx == arc_pmu->n_counters) + return -EAGAIN; + + __set_bit(idx, arc_pmu->used_mask); + hwc->idx = idx; + } + + write_aux_reg(ARC_REG_PCT_INDEX, idx); + write_aux_reg(ARC_REG_PCT_CONFIG, 0); + write_aux_reg(ARC_REG_PCT_COUNTL, 0); + write_aux_reg(ARC_REG_PCT_COUNTH, 0); + local64_set(&hwc->prev_count, 0); + + hwc->state = PERF_HES_UPTODATE | PERF_HES_STOPPED; + if (flags & PERF_EF_START) + arc_pmu_start(event, PERF_EF_RELOAD); + + perf_event_update_userpage(event); + + return 0; +} + +static int arc_pmu_device_probe(struct platform_device *pdev) +{ + struct arc_pmu *arc_pmu; + struct arc_reg_pct_build pct_bcr; + struct arc_reg_cc_build cc_bcr; + int i, j, ret; + + union cc_name { + struct { + uint32_t word0, word1; + char sentinel; + } indiv; + char str[9]; + } cc_name; + + + READ_BCR(ARC_REG_PCT_BUILD, pct_bcr); + if (!pct_bcr.v) { + pr_err("This core does not have performance counters!\n"); + return -ENODEV; + } + + arc_pmu = devm_kzalloc(&pdev->dev, sizeof(struct arc_pmu), + GFP_KERNEL); + if (!arc_pmu) + return -ENOMEM; + + arc_pmu->n_counters = pct_bcr.c; + BUG_ON(arc_pmu->n_counters > ARC_PMU_MAX_HWEVENTS); + + arc_pmu->counter_size = 32 + (pct_bcr.s << 4); + pr_info("ARC PMU found with %d counters of size %d bits\n", + arc_pmu->n_counters, arc_pmu->counter_size); + + READ_BCR(ARC_REG_CC_BUILD, cc_bcr); + + if (!cc_bcr.v) + pr_err("Strange! Performance counters exist, but no countable conditions?\n"); + + pr_info("ARC PMU has %d countable conditions\n", cc_bcr.c); + + cc_name.str[8] = 0; + for (i = 0; i < PERF_COUNT_HW_MAX; i++) + arc_pmu->ev_hw_idx[i] = -1; + + for (j = 0; j < cc_bcr.c; j++) { + write_aux_reg(ARC_REG_CC_INDEX, j); + cc_name.indiv.word0 = read_aux_reg(ARC_REG_CC_NAME0); + cc_name.indiv.word1 = read_aux_reg(ARC_REG_CC_NAME1); + for (i = 0; i < ARRAY_SIZE(arc_pmu_ev_hw_map); i++) { + if (arc_pmu_ev_hw_map[i] && + !strcmp(arc_pmu_ev_hw_map[i], cc_name.str) && + strlen(arc_pmu_ev_hw_map[i])) { + pr_debug("mapping %d to idx %d with name %s\n", + i, j, cc_name.str); + arc_pmu->ev_hw_idx[i] = j; + } + } + } + + arc_pmu->pmu = (struct pmu) { + .pmu_enable = arc_pmu_enable, + .pmu_disable = arc_pmu_disable, + .event_init = arc_pmu_event_init, + .add = arc_pmu_add, + .del = arc_pmu_del, + .start = arc_pmu_start, + .stop = arc_pmu_stop, + .read = arc_pmu_read, + }; + + ret = perf_pmu_register(&arc_pmu->pmu, pdev->name, PERF_TYPE_RAW); + + return ret; +} + +#ifdef CONFIG_OF +static const struct of_device_id arc_pmu_match[] = { + { .compatible = "snps,arc700-pmu" }, + {}, +}; +MODULE_DEVICE_TABLE(of, arc_pmu_match); +#endif + +static struct platform_driver arc_pmu_driver = { + .driver = { + .name = "arc700-pmu", + .of_match_table = of_match_ptr(arc_pmu_match), + }, + .probe = arc_pmu_device_probe, +}; + +module_platform_driver(arc_pmu_driver); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Mischa Jonker <mjonker@synopsys.com>"); +MODULE_DESCRIPTION("ARC PMU driver"); diff --git a/arch/arc/plat-tb10x/Kconfig b/arch/arc/plat-tb10x/Kconfig index 1ab386bb5da..6994c188dc8 100644 --- a/arch/arc/plat-tb10x/Kconfig +++ b/arch/arc/plat-tb10x/Kconfig @@ -20,8 +20,10 @@ menuconfig ARC_PLAT_TB10X bool "Abilis TB10x" select COMMON_CLK select PINCTRL + select PINCTRL_TB10X select PINMUX select ARCH_REQUIRE_GPIOLIB + select GPIO_TB10X select TB10X_IRQC help Support for platforms based on the TB10x home media gateway SOC by diff --git a/arch/arm/boot/dts/ecx-2000.dts b/arch/arm/boot/dts/ecx-2000.dts index 139b40cc3a2..2ccbb57fbfa 100644 --- a/arch/arm/boot/dts/ecx-2000.dts +++ b/arch/arm/boot/dts/ecx-2000.dts @@ -85,6 +85,12 @@ <1 10 0xf08>; }; + memory-controller@fff00000 { + compatible = "calxeda,ecx-2000-ddr-ctrl"; + reg = <0xfff00000 0x1000>; + interrupts = <0 91 4>; + }; + intc: interrupt-controller@fff11000 { compatible = "arm,cortex-a15-gic"; #interrupt-cells = <3>; diff --git a/arch/arm/boot/dts/ecx-common.dtsi b/arch/arm/boot/dts/ecx-common.dtsi index bc22557d7a6..b90045a8f8e 100644 --- a/arch/arm/boot/dts/ecx-common.dtsi +++ b/arch/arm/boot/dts/ecx-common.dtsi @@ -53,12 +53,6 @@ status = "disabled"; }; - memory-controller@fff00000 { - compatible = "calxeda,hb-ddr-ctrl"; - reg = <0xfff00000 0x1000>; - interrupts = <0 91 4>; - }; - ipc@fff20000 { compatible = "arm,pl320", "arm,primecell"; reg = <0xfff20000 0x1000>; diff --git a/arch/arm/boot/dts/highbank.dts b/arch/arm/boot/dts/highbank.dts index 6aad34ad951..ed14aeac056 100644 --- a/arch/arm/boot/dts/highbank.dts +++ b/arch/arm/boot/dts/highbank.dts @@ -86,6 +86,12 @@ soc { ranges = <0x00000000 0x00000000 0xffffffff>; + memory-controller@fff00000 { + compatible = "calxeda,hb-ddr-ctrl"; + reg = <0xfff00000 0x1000>; + interrupts = <0 91 4>; + }; + timer@fff10600 { compatible = "arm,cortex-a9-twd-timer"; reg = <0xfff10600 0x20>; diff --git a/arch/arm/boot/dts/twl4030.dtsi b/arch/arm/boot/dts/twl4030.dtsi index fb1b2ec8eaa..4217096ee67 100644 --- a/arch/arm/boot/dts/twl4030.dtsi +++ b/arch/arm/boot/dts/twl4030.dtsi @@ -19,6 +19,12 @@ interrupts = <11>; }; + charger: bci { + compatible = "ti,twl4030-bci"; + interrupts = <9>, <2>; + bci3v1-supply = <&vusb3v1>; + }; + watchdog { compatible = "ti,twl4030-wdt"; }; diff --git a/arch/arm/common/edma.c b/arch/arm/common/edma.c index 8e1a0245907..41bca32409f 100644 --- a/arch/arm/common/edma.c +++ b/arch/arm/common/edma.c @@ -404,7 +404,7 @@ static irqreturn_t dma_irq_handler(int irq, void *data) BIT(slot)); if (edma_cc[ctlr]->intr_data[channel].callback) edma_cc[ctlr]->intr_data[channel].callback( - channel, DMA_COMPLETE, + channel, EDMA_DMA_COMPLETE, edma_cc[ctlr]->intr_data[channel].data); } } while (sh_ipr); @@ -459,7 +459,7 @@ static irqreturn_t dma_ccerr_handler(int irq, void *data) callback) { edma_cc[ctlr]->intr_data[k]. callback(k, - DMA_CC_ERROR, + EDMA_DMA_CC_ERROR, edma_cc[ctlr]->intr_data [k].data); } diff --git a/arch/arm/configs/prima2_defconfig b/arch/arm/configs/prima2_defconfig index 002a1ceadce..23591dba47a 100644 --- a/arch/arm/configs/prima2_defconfig +++ b/arch/arm/configs/prima2_defconfig @@ -39,6 +39,7 @@ CONFIG_SPI=y CONFIG_SPI_SIRF=y CONFIG_SPI_SPIDEV=y # CONFIG_HWMON is not set +CONFIG_WATCHDOG=y CONFIG_USB_GADGET=y CONFIG_USB_MASS_STORAGE=m CONFIG_MMC=y diff --git a/arch/arm/include/asm/hardware/iop3xx-adma.h b/arch/arm/include/asm/hardware/iop3xx-adma.h index 9b28f1243bd..240b29ef17d 100644 --- a/arch/arm/include/asm/hardware/iop3xx-adma.h +++ b/arch/arm/include/asm/hardware/iop3xx-adma.h @@ -393,36 +393,6 @@ static inline int iop_chan_zero_sum_slot_count(size_t len, int src_cnt, return slot_cnt; } -static inline int iop_desc_is_pq(struct iop_adma_desc_slot *desc) -{ - return 0; -} - -static inline u32 iop_desc_get_dest_addr(struct iop_adma_desc_slot *desc, - struct iop_adma_chan *chan) -{ - union iop3xx_desc hw_desc = { .ptr = desc->hw_desc, }; - - switch (chan->device->id) { - case DMA0_ID: - case DMA1_ID: - return hw_desc.dma->dest_addr; - case AAU_ID: - return hw_desc.aau->dest_addr; - default: - BUG(); - } - return 0; -} - - -static inline u32 iop_desc_get_qdest_addr(struct iop_adma_desc_slot *desc, - struct iop_adma_chan *chan) -{ - BUG(); - return 0; -} - static inline u32 iop_desc_get_byte_count(struct iop_adma_desc_slot *desc, struct iop_adma_chan *chan) { diff --git a/arch/arm/include/asm/hardware/iop_adma.h b/arch/arm/include/asm/hardware/iop_adma.h index 122f86d8c99..250760e0810 100644 --- a/arch/arm/include/asm/hardware/iop_adma.h +++ b/arch/arm/include/asm/hardware/iop_adma.h @@ -82,8 +82,6 @@ struct iop_adma_chan { * @slot_cnt: total slots used in an transaction (group of operations) * @slots_per_op: number of slots per operation * @idx: pool index - * @unmap_src_cnt: number of xor sources - * @unmap_len: transaction bytecount * @tx_list: list of descriptors that are associated with one operation * @async_tx: support for the async_tx api * @group_list: list of slots that make up a multi-descriptor transaction @@ -99,8 +97,6 @@ struct iop_adma_desc_slot { u16 slot_cnt; u16 slots_per_op; u16 idx; - u16 unmap_src_cnt; - size_t unmap_len; struct list_head tx_list; struct dma_async_tx_descriptor async_tx; union { diff --git a/arch/arm/include/asm/thread_info.h b/arch/arm/include/asm/thread_info.h index df5e13d64f2..71a06b29348 100644 --- a/arch/arm/include/asm/thread_info.h +++ b/arch/arm/include/asm/thread_info.h @@ -141,12 +141,6 @@ extern int vfp_restore_user_hwstate(struct user_vfp __user *, #endif /* - * We use bit 30 of the preempt_count to indicate that kernel - * preemption is occurring. See <asm/hardirq.h>. - */ -#define PREEMPT_ACTIVE 0x40000000 - -/* * thread information flags: * TIF_SYSCALL_TRACE - syscall trace active * TIF_SYSCAL_AUDIT - syscall auditing active diff --git a/arch/arm/mach-at91/board-sam9260ek.c b/arch/arm/mach-at91/board-sam9260ek.c index 0b153c87521..f4f8735315d 100644 --- a/arch/arm/mach-at91/board-sam9260ek.c +++ b/arch/arm/mach-at91/board-sam9260ek.c @@ -28,7 +28,7 @@ #include <linux/spi/spi.h> #include <linux/spi/at73c213.h> #include <linux/clk.h> -#include <linux/i2c/at24.h> +#include <linux/platform_data/at24.h> #include <linux/gpio_keys.h> #include <linux/input.h> diff --git a/arch/arm/mach-at91/board-sam9263ek.c b/arch/arm/mach-at91/board-sam9263ek.c index 8b4942cbb6d..2f931915c80 100644 --- a/arch/arm/mach-at91/board-sam9263ek.c +++ b/arch/arm/mach-at91/board-sam9263ek.c @@ -27,7 +27,7 @@ #include <linux/platform_device.h> #include <linux/spi/spi.h> #include <linux/spi/ads7846.h> -#include <linux/i2c/at24.h> +#include <linux/platform_data/at24.h> #include <linux/fb.h> #include <linux/gpio_keys.h> #include <linux/input.h> diff --git a/arch/arm/mach-davinci/board-da830-evm.c b/arch/arm/mach-davinci/board-da830-evm.c index 40f15f133c5..d1f45af7a53 100644 --- a/arch/arm/mach-davinci/board-da830-evm.c +++ b/arch/arm/mach-davinci/board-da830-evm.c @@ -17,7 +17,7 @@ #include <linux/platform_device.h> #include <linux/i2c.h> #include <linux/i2c/pcf857x.h> -#include <linux/i2c/at24.h> +#include <linux/platform_data/at24.h> #include <linux/mtd/mtd.h> #include <linux/mtd/partitions.h> #include <linux/spi/spi.h> diff --git a/arch/arm/mach-davinci/board-da850-evm.c b/arch/arm/mach-davinci/board-da850-evm.c index df16cb88a26..e0af0eccde8 100644 --- a/arch/arm/mach-davinci/board-da850-evm.c +++ b/arch/arm/mach-davinci/board-da850-evm.c @@ -18,7 +18,7 @@ #include <linux/init.h> #include <linux/kernel.h> #include <linux/i2c.h> -#include <linux/i2c/at24.h> +#include <linux/platform_data/at24.h> #include <linux/platform_data/pca953x.h> #include <linux/input.h> #include <linux/input/tps6507x-ts.h> diff --git a/arch/arm/mach-davinci/board-dm365-evm.c b/arch/arm/mach-davinci/board-dm365-evm.c index f4a6c18912e..e08a8684ead 100644 --- a/arch/arm/mach-davinci/board-dm365-evm.c +++ b/arch/arm/mach-davinci/board-dm365-evm.c @@ -18,7 +18,7 @@ #include <linux/i2c.h> #include <linux/io.h> #include <linux/clk.h> -#include <linux/i2c/at24.h> +#include <linux/platform_data/at24.h> #include <linux/leds.h> #include <linux/mtd/mtd.h> #include <linux/mtd/partitions.h> diff --git a/arch/arm/mach-davinci/board-dm644x-evm.c b/arch/arm/mach-davinci/board-dm644x-evm.c index 9cc32c283b8..987605b7855 100644 --- a/arch/arm/mach-davinci/board-dm644x-evm.c +++ b/arch/arm/mach-davinci/board-dm644x-evm.c @@ -15,7 +15,7 @@ #include <linux/gpio.h> #include <linux/i2c.h> #include <linux/i2c/pcf857x.h> -#include <linux/i2c/at24.h> +#include <linux/platform_data/at24.h> #include <linux/mtd/mtd.h> #include <linux/mtd/nand.h> #include <linux/mtd/partitions.h> diff --git a/arch/arm/mach-davinci/board-dm646x-evm.c b/arch/arm/mach-davinci/board-dm646x-evm.c index 44b20191a9f..13d0801fd6b 100644 --- a/arch/arm/mach-davinci/board-dm646x-evm.c +++ b/arch/arm/mach-davinci/board-dm646x-evm.c @@ -22,7 +22,7 @@ #include <linux/gpio.h> #include <linux/platform_device.h> #include <linux/i2c.h> -#include <linux/i2c/at24.h> +#include <linux/platform_data/at24.h> #include <linux/i2c/pcf857x.h> #include <media/tvp514x.h> diff --git a/arch/arm/mach-davinci/board-mityomapl138.c b/arch/arm/mach-davinci/board-mityomapl138.c index cd0f58730c2..7aa105b1fd0 100644 --- a/arch/arm/mach-davinci/board-mityomapl138.c +++ b/arch/arm/mach-davinci/board-mityomapl138.c @@ -15,7 +15,7 @@ #include <linux/mtd/partitions.h> #include <linux/regulator/machine.h> #include <linux/i2c.h> -#include <linux/i2c/at24.h> +#include <linux/platform_data/at24.h> #include <linux/etherdevice.h> #include <linux/spi/spi.h> #include <linux/spi/flash.h> diff --git a/arch/arm/mach-davinci/board-sffsdr.c b/arch/arm/mach-davinci/board-sffsdr.c index d8436014810..41c7c961579 100644 --- a/arch/arm/mach-davinci/board-sffsdr.c +++ b/arch/arm/mach-davinci/board-sffsdr.c @@ -26,7 +26,7 @@ #include <linux/init.h> #include <linux/platform_device.h> #include <linux/i2c.h> -#include <linux/i2c/at24.h> +#include <linux/platform_data/at24.h> #include <linux/mtd/mtd.h> #include <linux/mtd/nand.h> #include <linux/mtd/partitions.h> diff --git a/arch/arm/mach-imx/mach-pca100.c b/arch/arm/mach-imx/mach-pca100.c index 19bb6441a7d..c5f95674e9b 100644 --- a/arch/arm/mach-imx/mach-pca100.c +++ b/arch/arm/mach-imx/mach-pca100.c @@ -20,7 +20,7 @@ #include <linux/platform_device.h> #include <linux/io.h> #include <linux/i2c.h> -#include <linux/i2c/at24.h> +#include <linux/platform_data/at24.h> #include <linux/dma-mapping.h> #include <linux/spi/spi.h> #include <linux/spi/eeprom.h> diff --git a/arch/arm/mach-imx/mach-pcm037.c b/arch/arm/mach-imx/mach-pcm037.c index 45303bd6290..639a3dfb009 100644 --- a/arch/arm/mach-imx/mach-pcm037.c +++ b/arch/arm/mach-imx/mach-pcm037.c @@ -23,7 +23,7 @@ #include <linux/smsc911x.h> #include <linux/interrupt.h> #include <linux/i2c.h> -#include <linux/i2c/at24.h> +#include <linux/platform_data/at24.h> #include <linux/delay.h> #include <linux/spi/spi.h> #include <linux/irq.h> diff --git a/arch/arm/mach-imx/mach-pcm038.c b/arch/arm/mach-imx/mach-pcm038.c index e805ac273e9..592ddbe031a 100644 --- a/arch/arm/mach-imx/mach-pcm038.c +++ b/arch/arm/mach-imx/mach-pcm038.c @@ -18,7 +18,7 @@ */ #include <linux/i2c.h> -#include <linux/i2c/at24.h> +#include <linux/platform_data/at24.h> #include <linux/io.h> #include <linux/mtd/plat-ram.h> #include <linux/mtd/physmap.h> diff --git a/arch/arm/mach-imx/mach-pcm043.c b/arch/arm/mach-imx/mach-pcm043.c index b726cb1c5fd..ac504b67326 100644 --- a/arch/arm/mach-imx/mach-pcm043.c +++ b/arch/arm/mach-imx/mach-pcm043.c @@ -24,7 +24,7 @@ #include <linux/interrupt.h> #include <linux/delay.h> #include <linux/i2c.h> -#include <linux/i2c/at24.h> +#include <linux/platform_data/at24.h> #include <linux/usb/otg.h> #include <linux/usb/ulpi.h> diff --git a/arch/arm/mach-imx/mach-vpr200.c b/arch/arm/mach-imx/mach-vpr200.c index 0910761e828..8825d1217d1 100644 --- a/arch/arm/mach-imx/mach-vpr200.c +++ b/arch/arm/mach-imx/mach-vpr200.c @@ -29,7 +29,7 @@ #include <asm/mach/time.h> #include <linux/i2c.h> -#include <linux/i2c/at24.h> +#include <linux/platform_data/at24.h> #include <linux/mfd/mc13xxx.h> #include "common.h" diff --git a/arch/arm/mach-iop13xx/include/mach/adma.h b/arch/arm/mach-iop13xx/include/mach/adma.h index 6d3782d85a9..a86fd0ed775 100644 --- a/arch/arm/mach-iop13xx/include/mach/adma.h +++ b/arch/arm/mach-iop13xx/include/mach/adma.h @@ -218,20 +218,6 @@ iop_chan_xor_slot_count(size_t len, int src_cnt, int *slots_per_op) #define iop_chan_pq_slot_count iop_chan_xor_slot_count #define iop_chan_pq_zero_sum_slot_count iop_chan_xor_slot_count -static inline u32 iop_desc_get_dest_addr(struct iop_adma_desc_slot *desc, - struct iop_adma_chan *chan) -{ - struct iop13xx_adma_desc_hw *hw_desc = desc->hw_desc; - return hw_desc->dest_addr; -} - -static inline u32 iop_desc_get_qdest_addr(struct iop_adma_desc_slot *desc, - struct iop_adma_chan *chan) -{ - struct iop13xx_adma_desc_hw *hw_desc = desc->hw_desc; - return hw_desc->q_dest_addr; -} - static inline u32 iop_desc_get_byte_count(struct iop_adma_desc_slot *desc, struct iop_adma_chan *chan) { @@ -350,18 +336,6 @@ iop_desc_init_pq(struct iop_adma_desc_slot *desc, int src_cnt, hw_desc->desc_ctrl = u_desc_ctrl.value; } -static inline int iop_desc_is_pq(struct iop_adma_desc_slot *desc) -{ - struct iop13xx_adma_desc_hw *hw_desc = desc->hw_desc; - union { - u32 value; - struct iop13xx_adma_desc_ctrl field; - } u_desc_ctrl; - - u_desc_ctrl.value = hw_desc->desc_ctrl; - return u_desc_ctrl.field.pq_xfer_en; -} - static inline void iop_desc_init_pq_zero_sum(struct iop_adma_desc_slot *desc, int src_cnt, unsigned long flags) diff --git a/arch/arm/mach-kirkwood/lacie_v2-common.c b/arch/arm/mach-kirkwood/lacie_v2-common.c index 489495976fc..8e3e4331c38 100644 --- a/arch/arm/mach-kirkwood/lacie_v2-common.c +++ b/arch/arm/mach-kirkwood/lacie_v2-common.c @@ -12,7 +12,7 @@ #include <linux/spi/flash.h> #include <linux/spi/spi.h> #include <linux/i2c.h> -#include <linux/i2c/at24.h> +#include <linux/platform_data/at24.h> #include <linux/gpio.h> #include <asm/mach/time.h> #include <mach/kirkwood.h> diff --git a/arch/arm/mach-omap1/board-osk.c b/arch/arm/mach-omap1/board-osk.c index a7ce6928668..d68909b095f 100644 --- a/arch/arm/mach-omap1/board-osk.c +++ b/arch/arm/mach-omap1/board-osk.c @@ -300,7 +300,7 @@ static struct omap_lcd_config osk_lcd_config __initdata = { #ifdef CONFIG_OMAP_OSK_MISTRAL #include <linux/input.h> -#include <linux/i2c/at24.h> +#include <linux/platform_data/at24.h> #include <linux/spi/spi.h> #include <linux/spi/ads7846.h> diff --git a/arch/arm/mach-omap2/board-cm-t35.c b/arch/arm/mach-omap2/board-cm-t35.c index 33d159e2386..8dd0ec858cf 100644 --- a/arch/arm/mach-omap2/board-cm-t35.c +++ b/arch/arm/mach-omap2/board-cm-t35.c @@ -25,7 +25,7 @@ #include <linux/gpio.h> #include <linux/platform_data/gpio-omap.h> -#include <linux/i2c/at24.h> +#include <linux/platform_data/at24.h> #include <linux/i2c/twl.h> #include <linux/regulator/fixed.h> #include <linux/regulator/machine.h> diff --git a/arch/arm/mach-omap2/board-h4.c b/arch/arm/mach-omap2/board-h4.c index 87e41a8b8d4..f7808349a73 100644 --- a/arch/arm/mach-omap2/board-h4.c +++ b/arch/arm/mach-omap2/board-h4.c @@ -20,7 +20,7 @@ #include <linux/delay.h> #include <linux/workqueue.h> #include <linux/i2c.h> -#include <linux/i2c/at24.h> +#include <linux/platform_data/at24.h> #include <linux/input.h> #include <linux/err.h> #include <linux/clk.h> diff --git a/arch/arm/mach-omap2/board-omap3stalker.c b/arch/arm/mach-omap2/board-omap3stalker.c index ba8342fef79..119efaf5808 100644 --- a/arch/arm/mach-omap2/board-omap3stalker.c +++ b/arch/arm/mach-omap2/board-omap3stalker.c @@ -32,7 +32,7 @@ #include <linux/spi/spi.h> #include <linux/interrupt.h> #include <linux/smsc911x.h> -#include <linux/i2c/at24.h> +#include <linux/platform_data/at24.h> #include <linux/usb/phy.h> #include <asm/mach-types.h> diff --git a/arch/arm/mach-pxa/stargate2.c b/arch/arm/mach-pxa/stargate2.c index 62aea3e835f..01de542432a 100644 --- a/arch/arm/mach-pxa/stargate2.c +++ b/arch/arm/mach-pxa/stargate2.c @@ -27,7 +27,7 @@ #include <linux/i2c/pxa-i2c.h> #include <linux/i2c/pcf857x.h> -#include <linux/i2c/at24.h> +#include <linux/platform_data/at24.h> #include <linux/smc91x.h> #include <linux/gpio.h> #include <linux/leds.h> diff --git a/arch/arm/mach-s3c24xx/mach-mini2440.c b/arch/arm/mach-s3c24xx/mach-mini2440.c index a83db46320b..4a18d49a63e 100644 --- a/arch/arm/mach-s3c24xx/mach-mini2440.c +++ b/arch/arm/mach-s3c24xx/mach-mini2440.c @@ -24,7 +24,7 @@ #include <linux/io.h> #include <linux/serial_core.h> #include <linux/dm9000.h> -#include <linux/i2c/at24.h> +#include <linux/platform_data/at24.h> #include <linux/platform_device.h> #include <linux/gpio_keys.h> #include <linux/i2c.h> diff --git a/arch/arm64/include/asm/thread_info.h b/arch/arm64/include/asm/thread_info.h index 23a3c4791d8..720e70b66ff 100644 --- a/arch/arm64/include/asm/thread_info.h +++ b/arch/arm64/include/asm/thread_info.h @@ -89,12 +89,6 @@ static inline struct thread_info *current_thread_info(void) #endif /* - * We use bit 30 of the preempt_count to indicate that kernel - * preemption is occurring. See <asm/hardirq.h>. - */ -#define PREEMPT_ACTIVE 0x40000000 - -/* * thread information flags: * TIF_SYSCALL_TRACE - syscall trace active * TIF_SIGPENDING - signal pending diff --git a/arch/avr32/boot/u-boot/head.S b/arch/avr32/boot/u-boot/head.S index 4488fa27fe9..2ffc298f061 100644 --- a/arch/avr32/boot/u-boot/head.S +++ b/arch/avr32/boot/u-boot/head.S @@ -8,6 +8,8 @@ * published by the Free Software Foundation. */ #include <asm/setup.h> +#include <asm/thread_info.h> +#include <asm/sysreg.h> /* * The kernel is loaded where we want it to be and all caches @@ -20,11 +22,6 @@ .section .init.text,"ax" .global _start _start: - /* Check if the boot loader actually provided a tag table */ - lddpc r0, magic_number - cp.w r12, r0 - brne no_tag_table - /* Initialize .bss */ lddpc r2, bss_start_addr lddpc r3, end_addr @@ -34,6 +31,25 @@ _start: cp r2, r3 brlo 1b + /* Initialize status register */ + lddpc r0, init_sr + mtsr SYSREG_SR, r0 + + /* Set initial stack pointer */ + lddpc sp, stack_addr + sub sp, -THREAD_SIZE + +#ifdef CONFIG_FRAME_POINTER + /* Mark last stack frame */ + mov lr, 0 + mov r7, 0 +#endif + + /* Check if the boot loader actually provided a tag table */ + lddpc r0, magic_number + cp.w r12, r0 + brne no_tag_table + /* * Save the tag table address for later use. This must be done * _after_ .bss has been initialized... @@ -53,8 +69,15 @@ bss_start_addr: .long __bss_start end_addr: .long _end +init_sr: + .long 0x007f0000 /* Supervisor mode, everything masked */ +stack_addr: + .long init_thread_union +panic_addr: + .long panic no_tag_table: sub r12, pc, (. - 2f) - bral panic + /* branch to panic() which can be far away with that construct */ + lddpc pc, panic_addr 2: .asciz "Boot loader didn't provide correct magic number\n" diff --git a/arch/avr32/include/asm/kprobes.h b/arch/avr32/include/asm/kprobes.h index 996cb656474..45f563ed73f 100644 --- a/arch/avr32/include/asm/kprobes.h +++ b/arch/avr32/include/asm/kprobes.h @@ -16,6 +16,7 @@ typedef u16 kprobe_opcode_t; #define BREAKPOINT_INSTRUCTION 0xd673 /* breakpoint */ #define MAX_INSN_SIZE 2 +#define MAX_STACK_SIZE 64 /* 32 would probably be OK */ #define kretprobe_blacklist_size 0 @@ -26,6 +27,19 @@ struct arch_specific_insn { kprobe_opcode_t insn[MAX_INSN_SIZE]; }; +struct prev_kprobe { + struct kprobe *kp; + unsigned int status; +}; + +/* per-cpu kprobe control block */ +struct kprobe_ctlblk { + unsigned int kprobe_status; + struct prev_kprobe prev_kprobe; + struct pt_regs jprobe_saved_regs; + char jprobes_stack[MAX_STACK_SIZE]; +}; + extern int kprobe_fault_handler(struct pt_regs *regs, int trapnr); extern int kprobe_exceptions_notify(struct notifier_block *self, unsigned long val, void *data); diff --git a/arch/avr32/include/asm/thread_info.h b/arch/avr32/include/asm/thread_info.h index 6dc62e1f94c..a978f3fe7c2 100644 --- a/arch/avr32/include/asm/thread_info.h +++ b/arch/avr32/include/asm/thread_info.h @@ -66,8 +66,6 @@ static inline struct thread_info *current_thread_info(void) #endif /* !__ASSEMBLY__ */ -#define PREEMPT_ACTIVE 0x40000000 - /* * Thread information flags * - these are process state flags that various assembly files may need to access diff --git a/arch/avr32/include/uapi/asm/Kbuild b/arch/avr32/include/uapi/asm/Kbuild index 3b85eaddf52..08d8a3d76ea 100644 --- a/arch/avr32/include/uapi/asm/Kbuild +++ b/arch/avr32/include/uapi/asm/Kbuild @@ -2,35 +2,35 @@ include include/uapi/asm-generic/Kbuild.asm header-y += auxvec.h -header-y += bitsperlong.h header-y += byteorder.h header-y += cachectl.h -header-y += errno.h -header-y += fcntl.h -header-y += ioctl.h -header-y += ioctls.h -header-y += ipcbuf.h -header-y += kvm_para.h -header-y += mman.h header-y += msgbuf.h header-y += param.h -header-y += poll.h header-y += posix_types.h header-y += ptrace.h -header-y += resource.h header-y += sembuf.h header-y += setup.h header-y += shmbuf.h header-y += sigcontext.h -header-y += siginfo.h header-y += signal.h header-y += socket.h header-y += sockios.h header-y += stat.h -header-y += statfs.h header-y += swab.h header-y += termbits.h header-y += termios.h header-y += types.h header-y += unistd.h +generic-y += bitsperlong.h +generic-y += errno.h +generic-y += fcntl.h +generic-y += ioctl.h +generic-y += ioctls.h +generic-y += ipcbuf.h +generic-y += kvm_para.h +generic-y += mman.h generic-y += param.h +generic-y += poll.h +generic-y += resource.h +generic-y += siginfo.h +generic-y += statfs.h diff --git a/arch/avr32/include/uapi/asm/auxvec.h b/arch/avr32/include/uapi/asm/auxvec.h index d5dd435bf8f..4f02da3ffef 100644 --- a/arch/avr32/include/uapi/asm/auxvec.h +++ b/arch/avr32/include/uapi/asm/auxvec.h @@ -1,4 +1,4 @@ -#ifndef __ASM_AVR32_AUXVEC_H -#define __ASM_AVR32_AUXVEC_H +#ifndef _UAPI__ASM_AVR32_AUXVEC_H +#define _UAPI__ASM_AVR32_AUXVEC_H -#endif /* __ASM_AVR32_AUXVEC_H */ +#endif /* _UAPI__ASM_AVR32_AUXVEC_H */ diff --git a/arch/avr32/include/uapi/asm/bitsperlong.h b/arch/avr32/include/uapi/asm/bitsperlong.h deleted file mode 100644 index 6dc0bb0c13b..00000000000 --- a/arch/avr32/include/uapi/asm/bitsperlong.h +++ /dev/null @@ -1 +0,0 @@ -#include <asm-generic/bitsperlong.h> diff --git a/arch/avr32/include/uapi/asm/byteorder.h b/arch/avr32/include/uapi/asm/byteorder.h index 50abc21619a..71242f0d39c 100644 --- a/arch/avr32/include/uapi/asm/byteorder.h +++ b/arch/avr32/include/uapi/asm/byteorder.h @@ -1,9 +1,9 @@ /* * AVR32 endian-conversion functions. */ -#ifndef __ASM_AVR32_BYTEORDER_H -#define __ASM_AVR32_BYTEORDER_H +#ifndef _UAPI__ASM_AVR32_BYTEORDER_H +#define _UAPI__ASM_AVR32_BYTEORDER_H #include <linux/byteorder/big_endian.h> -#endif /* __ASM_AVR32_BYTEORDER_H */ +#endif /* _UAPI__ASM_AVR32_BYTEORDER_H */ diff --git a/arch/avr32/include/uapi/asm/cachectl.h b/arch/avr32/include/uapi/asm/cachectl.h index 4faf1ce6006..573a9584dd5 100644 --- a/arch/avr32/include/uapi/asm/cachectl.h +++ b/arch/avr32/include/uapi/asm/cachectl.h @@ -1,5 +1,5 @@ -#ifndef __ASM_AVR32_CACHECTL_H -#define __ASM_AVR32_CACHECTL_H +#ifndef _UAPI__ASM_AVR32_CACHECTL_H +#define _UAPI__ASM_AVR32_CACHECTL_H /* * Operations that can be performed through the cacheflush system call @@ -8,4 +8,4 @@ /* Clean the data cache, then invalidate the icache */ #define CACHE_IFLUSH 0 -#endif /* __ASM_AVR32_CACHECTL_H */ +#endif /* _UAPI__ASM_AVR32_CACHECTL_H */ diff --git a/arch/avr32/include/uapi/asm/errno.h b/arch/avr32/include/uapi/asm/errno.h deleted file mode 100644 index 558a7249f06..00000000000 --- a/arch/avr32/include/uapi/asm/errno.h +++ /dev/null @@ -1,6 +0,0 @@ -#ifndef __ASM_AVR32_ERRNO_H -#define __ASM_AVR32_ERRNO_H - -#include <asm-generic/errno.h> - -#endif /* __ASM_AVR32_ERRNO_H */ diff --git a/arch/avr32/include/uapi/asm/fcntl.h b/arch/avr32/include/uapi/asm/fcntl.h deleted file mode 100644 index 14c0c4402b1..00000000000 --- a/arch/avr32/include/uapi/asm/fcntl.h +++ /dev/null @@ -1,6 +0,0 @@ -#ifndef __ASM_AVR32_FCNTL_H -#define __ASM_AVR32_FCNTL_H - -#include <asm-generic/fcntl.h> - -#endif /* __ASM_AVR32_FCNTL_H */ diff --git a/arch/avr32/include/uapi/asm/ioctl.h b/arch/avr32/include/uapi/asm/ioctl.h deleted file mode 100644 index c8472c1398e..00000000000 --- a/arch/avr32/include/uapi/asm/ioctl.h +++ /dev/null @@ -1,6 +0,0 @@ -#ifndef __ASM_AVR32_IOCTL_H -#define __ASM_AVR32_IOCTL_H - -#include <asm-generic/ioctl.h> - -#endif /* __ASM_AVR32_IOCTL_H */ diff --git a/arch/avr32/include/uapi/asm/ioctls.h b/arch/avr32/include/uapi/asm/ioctls.h deleted file mode 100644 index 909cf66feaf..00000000000 --- a/arch/avr32/include/uapi/asm/ioctls.h +++ /dev/null @@ -1,6 +0,0 @@ -#ifndef __ASM_AVR32_IOCTLS_H -#define __ASM_AVR32_IOCTLS_H - -#include <asm-generic/ioctls.h> - -#endif /* __ASM_AVR32_IOCTLS_H */ diff --git a/arch/avr32/include/uapi/asm/ipcbuf.h b/arch/avr32/include/uapi/asm/ipcbuf.h deleted file mode 100644 index 84c7e51cb6d..00000000000 --- a/arch/avr32/include/uapi/asm/ipcbuf.h +++ /dev/null @@ -1 +0,0 @@ -#include <asm-generic/ipcbuf.h> diff --git a/arch/avr32/include/uapi/asm/kvm_para.h b/arch/avr32/include/uapi/asm/kvm_para.h deleted file mode 100644 index 14fab8f0b95..00000000000 --- a/arch/avr32/include/uapi/asm/kvm_para.h +++ /dev/null @@ -1 +0,0 @@ -#include <asm-generic/kvm_para.h> diff --git a/arch/avr32/include/uapi/asm/mman.h b/arch/avr32/include/uapi/asm/mman.h deleted file mode 100644 index 8eebf89f5ab..00000000000 --- a/arch/avr32/include/uapi/asm/mman.h +++ /dev/null @@ -1 +0,0 @@ -#include <asm-generic/mman.h> diff --git a/arch/avr32/include/uapi/asm/msgbuf.h b/arch/avr32/include/uapi/asm/msgbuf.h index ac18bc4da7f..9eae6effad1 100644 --- a/arch/avr32/include/uapi/asm/msgbuf.h +++ b/arch/avr32/include/uapi/asm/msgbuf.h @@ -1,5 +1,5 @@ -#ifndef __ASM_AVR32_MSGBUF_H -#define __ASM_AVR32_MSGBUF_H +#ifndef _UAPI__ASM_AVR32_MSGBUF_H +#define _UAPI__ASM_AVR32_MSGBUF_H /* * The msqid64_ds structure for i386 architecture. @@ -28,4 +28,4 @@ struct msqid64_ds { unsigned long __unused5; }; -#endif /* __ASM_AVR32_MSGBUF_H */ +#endif /* _UAPI__ASM_AVR32_MSGBUF_H */ diff --git a/arch/avr32/include/uapi/asm/poll.h b/arch/avr32/include/uapi/asm/poll.h deleted file mode 100644 index c98509d3149..00000000000 --- a/arch/avr32/include/uapi/asm/poll.h +++ /dev/null @@ -1 +0,0 @@ -#include <asm-generic/poll.h> diff --git a/arch/avr32/include/uapi/asm/posix_types.h b/arch/avr32/include/uapi/asm/posix_types.h index 9ba9e749b3f..5b813a8abf0 100644 --- a/arch/avr32/include/uapi/asm/posix_types.h +++ b/arch/avr32/include/uapi/asm/posix_types.h @@ -5,8 +5,8 @@ * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ -#ifndef __ASM_AVR32_POSIX_TYPES_H -#define __ASM_AVR32_POSIX_TYPES_H +#ifndef _UAPI__ASM_AVR32_POSIX_TYPES_H +#define _UAPI__ASM_AVR32_POSIX_TYPES_H /* * This file is generally used by user-level software, so you need to @@ -34,4 +34,4 @@ typedef unsigned short __kernel_old_dev_t; #include <asm-generic/posix_types.h> -#endif /* __ASM_AVR32_POSIX_TYPES_H */ +#endif /* _UAPI__ASM_AVR32_POSIX_TYPES_H */ diff --git a/arch/avr32/include/uapi/asm/resource.h b/arch/avr32/include/uapi/asm/resource.h deleted file mode 100644 index c6dd101472b..00000000000 --- a/arch/avr32/include/uapi/asm/resource.h +++ /dev/null @@ -1,6 +0,0 @@ -#ifndef __ASM_AVR32_RESOURCE_H -#define __ASM_AVR32_RESOURCE_H - -#include <asm-generic/resource.h> - -#endif /* __ASM_AVR32_RESOURCE_H */ diff --git a/arch/avr32/include/uapi/asm/sembuf.h b/arch/avr32/include/uapi/asm/sembuf.h index e472216e0c9..6c6f7cf1e75 100644 --- a/arch/avr32/include/uapi/asm/sembuf.h +++ b/arch/avr32/include/uapi/asm/sembuf.h @@ -1,5 +1,5 @@ -#ifndef __ASM_AVR32_SEMBUF_H -#define __ASM_AVR32_SEMBUF_H +#ifndef _UAPI__ASM_AVR32_SEMBUF_H +#define _UAPI__ASM_AVR32_SEMBUF_H /* * The semid64_ds structure for AVR32 architecture. @@ -22,4 +22,4 @@ struct semid64_ds { unsigned long __unused4; }; -#endif /* __ASM_AVR32_SEMBUF_H */ +#endif /* _UAPI__ASM_AVR32_SEMBUF_H */ diff --git a/arch/avr32/include/uapi/asm/setup.h b/arch/avr32/include/uapi/asm/setup.h index e58aa9356fa..a654df7dba4 100644 --- a/arch/avr32/include/uapi/asm/setup.h +++ b/arch/avr32/include/uapi/asm/setup.h @@ -13,5 +13,4 @@ #define COMMAND_LINE_SIZE 256 - #endif /* _UAPI__ASM_AVR32_SETUP_H__ */ diff --git a/arch/avr32/include/uapi/asm/shmbuf.h b/arch/avr32/include/uapi/asm/shmbuf.h index c62fba41739..b94cf8b60b7 100644 --- a/arch/avr32/include/uapi/asm/shmbuf.h +++ b/arch/avr32/include/uapi/asm/shmbuf.h @@ -1,5 +1,5 @@ -#ifndef __ASM_AVR32_SHMBUF_H -#define __ASM_AVR32_SHMBUF_H +#ifndef _UAPI__ASM_AVR32_SHMBUF_H +#define _UAPI__ASM_AVR32_SHMBUF_H /* * The shmid64_ds structure for i386 architecture. @@ -39,4 +39,4 @@ struct shminfo64 { unsigned long __unused4; }; -#endif /* __ASM_AVR32_SHMBUF_H */ +#endif /* _UAPI__ASM_AVR32_SHMBUF_H */ diff --git a/arch/avr32/include/uapi/asm/sigcontext.h b/arch/avr32/include/uapi/asm/sigcontext.h index e04062b5f39..27e56bf6377 100644 --- a/arch/avr32/include/uapi/asm/sigcontext.h +++ b/arch/avr32/include/uapi/asm/sigcontext.h @@ -5,8 +5,8 @@ * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ -#ifndef __ASM_AVR32_SIGCONTEXT_H -#define __ASM_AVR32_SIGCONTEXT_H +#ifndef _UAPI__ASM_AVR32_SIGCONTEXT_H +#define _UAPI__ASM_AVR32_SIGCONTEXT_H struct sigcontext { unsigned long oldmask; @@ -31,4 +31,4 @@ struct sigcontext { unsigned long r0; }; -#endif /* __ASM_AVR32_SIGCONTEXT_H */ +#endif /* _UAPI__ASM_AVR32_SIGCONTEXT_H */ diff --git a/arch/avr32/include/uapi/asm/siginfo.h b/arch/avr32/include/uapi/asm/siginfo.h deleted file mode 100644 index 5ee93f40a8a..00000000000 --- a/arch/avr32/include/uapi/asm/siginfo.h +++ /dev/null @@ -1,6 +0,0 @@ -#ifndef _AVR32_SIGINFO_H -#define _AVR32_SIGINFO_H - -#include <asm-generic/siginfo.h> - -#endif diff --git a/arch/avr32/include/uapi/asm/signal.h b/arch/avr32/include/uapi/asm/signal.h index 1b77a93eff5..ffe8c770caf 100644 --- a/arch/avr32/include/uapi/asm/signal.h +++ b/arch/avr32/include/uapi/asm/signal.h @@ -118,5 +118,4 @@ typedef struct sigaltstack { size_t ss_size; } stack_t; - #endif /* _UAPI__ASM_AVR32_SIGNAL_H */ diff --git a/arch/avr32/include/uapi/asm/socket.h b/arch/avr32/include/uapi/asm/socket.h index 43993642143..cbf902e4cd9 100644 --- a/arch/avr32/include/uapi/asm/socket.h +++ b/arch/avr32/include/uapi/asm/socket.h @@ -1,5 +1,5 @@ -#ifndef __ASM_AVR32_SOCKET_H -#define __ASM_AVR32_SOCKET_H +#ifndef _UAPI__ASM_AVR32_SOCKET_H +#define _UAPI__ASM_AVR32_SOCKET_H #include <asm/sockios.h> @@ -78,4 +78,4 @@ #define SO_MAX_PACING_RATE 47 -#endif /* __ASM_AVR32_SOCKET_H */ +#endif /* _UAPI__ASM_AVR32_SOCKET_H */ diff --git a/arch/avr32/include/uapi/asm/sockios.h b/arch/avr32/include/uapi/asm/sockios.h index 0802d742f97..d0478545353 100644 --- a/arch/avr32/include/uapi/asm/sockios.h +++ b/arch/avr32/include/uapi/asm/sockios.h @@ -1,5 +1,5 @@ -#ifndef __ASM_AVR32_SOCKIOS_H -#define __ASM_AVR32_SOCKIOS_H +#ifndef _UAPI__ASM_AVR32_SOCKIOS_H +#define _UAPI__ASM_AVR32_SOCKIOS_H /* Socket-level I/O control calls. */ #define FIOSETOWN 0x8901 @@ -10,4 +10,4 @@ #define SIOCGSTAMP 0x8906 /* Get stamp (timeval) */ #define SIOCGSTAMPNS 0x8907 /* Get stamp (timespec) */ -#endif /* __ASM_AVR32_SOCKIOS_H */ +#endif /* _UAPI__ASM_AVR32_SOCKIOS_H */ diff --git a/arch/avr32/include/uapi/asm/stat.h b/arch/avr32/include/uapi/asm/stat.h index e72881e1023..c06acef7fce 100644 --- a/arch/avr32/include/uapi/asm/stat.h +++ b/arch/avr32/include/uapi/asm/stat.h @@ -5,8 +5,8 @@ * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ -#ifndef __ASM_AVR32_STAT_H -#define __ASM_AVR32_STAT_H +#ifndef _UAPI__ASM_AVR32_STAT_H +#define _UAPI__ASM_AVR32_STAT_H struct __old_kernel_stat { unsigned short st_dev; @@ -76,4 +76,4 @@ struct stat64 { unsigned long __unused2; }; -#endif /* __ASM_AVR32_STAT_H */ +#endif /* _UAPI__ASM_AVR32_STAT_H */ diff --git a/arch/avr32/include/uapi/asm/statfs.h b/arch/avr32/include/uapi/asm/statfs.h deleted file mode 100644 index 2961bd18c50..00000000000 --- a/arch/avr32/include/uapi/asm/statfs.h +++ /dev/null @@ -1,6 +0,0 @@ -#ifndef __ASM_AVR32_STATFS_H -#define __ASM_AVR32_STATFS_H - -#include <asm-generic/statfs.h> - -#endif /* __ASM_AVR32_STATFS_H */ diff --git a/arch/avr32/include/uapi/asm/swab.h b/arch/avr32/include/uapi/asm/swab.h index 14cc737bbca..1a03549e7dc 100644 --- a/arch/avr32/include/uapi/asm/swab.h +++ b/arch/avr32/include/uapi/asm/swab.h @@ -1,8 +1,8 @@ /* * AVR32 byteswapping functions. */ -#ifndef __ASM_AVR32_SWAB_H -#define __ASM_AVR32_SWAB_H +#ifndef _UAPI__ASM_AVR32_SWAB_H +#define _UAPI__ASM_AVR32_SWAB_H #include <linux/types.h> #include <linux/compiler.h> @@ -32,4 +32,4 @@ static inline __attribute_const__ __u32 __arch_swab32(__u32 val) #define __arch_swab32 __arch_swab32 #endif -#endif /* __ASM_AVR32_SWAB_H */ +#endif /* _UAPI__ASM_AVR32_SWAB_H */ diff --git a/arch/avr32/include/uapi/asm/termbits.h b/arch/avr32/include/uapi/asm/termbits.h index 366adc5ebb1..32789ccb38f 100644 --- a/arch/avr32/include/uapi/asm/termbits.h +++ b/arch/avr32/include/uapi/asm/termbits.h @@ -1,5 +1,5 @@ -#ifndef __ASM_AVR32_TERMBITS_H -#define __ASM_AVR32_TERMBITS_H +#ifndef _UAPI__ASM_AVR32_TERMBITS_H +#define _UAPI__ASM_AVR32_TERMBITS_H #include <linux/posix_types.h> @@ -193,4 +193,4 @@ struct ktermios { #define TCSADRAIN 1 #define TCSAFLUSH 2 -#endif /* __ASM_AVR32_TERMBITS_H */ +#endif /* _UAPI__ASM_AVR32_TERMBITS_H */ diff --git a/arch/avr32/include/uapi/asm/termios.h b/arch/avr32/include/uapi/asm/termios.h index b8ef8ea6335..c8a0081556c 100644 --- a/arch/avr32/include/uapi/asm/termios.h +++ b/arch/avr32/include/uapi/asm/termios.h @@ -46,5 +46,4 @@ struct termio { /* ioctl (fd, TIOCSERGETLSR, &result) where result may be as below */ - #endif /* _UAPI__ASM_AVR32_TERMIOS_H */ diff --git a/arch/avr32/include/uapi/asm/types.h b/arch/avr32/include/uapi/asm/types.h index bb34ad349df..7c986c4e99b 100644 --- a/arch/avr32/include/uapi/asm/types.h +++ b/arch/avr32/include/uapi/asm/types.h @@ -5,4 +5,9 @@ * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ +#ifndef _UAPI__ASM_AVR32_TYPES_H +#define _UAPI__ASM_AVR32_TYPES_H + #include <asm-generic/int-ll64.h> + +#endif /* _UAPI__ASM_AVR32_TYPES_H */ diff --git a/arch/avr32/include/uapi/asm/unistd.h b/arch/avr32/include/uapi/asm/unistd.h index 3eaa68753ad..8822bf46ddc 100644 --- a/arch/avr32/include/uapi/asm/unistd.h +++ b/arch/avr32/include/uapi/asm/unistd.h @@ -301,5 +301,4 @@ #define __NR_eventfd 281 #define __NR_setns 283 - #endif /* _UAPI__ASM_AVR32_UNISTD_H */ diff --git a/arch/avr32/kernel/entry-avr32b.S b/arch/avr32/kernel/entry-avr32b.S index 9899d3cc6f0..7301f4806bb 100644 --- a/arch/avr32/kernel/entry-avr32b.S +++ b/arch/avr32/kernel/entry-avr32b.S @@ -401,9 +401,10 @@ handle_critical: /* We should never get here... */ bad_return: sub r12, pc, (. - 1f) - bral panic + lddpc pc, 2f .align 2 1: .asciz "Return from critical exception!" +2: .long panic .align 1 do_bus_error_write: diff --git a/arch/avr32/kernel/head.S b/arch/avr32/kernel/head.S index 6163bd0acb9..59eae6dfbed 100644 --- a/arch/avr32/kernel/head.S +++ b/arch/avr32/kernel/head.S @@ -10,33 +10,13 @@ #include <linux/linkage.h> #include <asm/page.h> -#include <asm/thread_info.h> -#include <asm/sysreg.h> .section .init.text,"ax" .global kernel_entry kernel_entry: - /* Initialize status register */ - lddpc r0, init_sr - mtsr SYSREG_SR, r0 - - /* Set initial stack pointer */ - lddpc sp, stack_addr - sub sp, -THREAD_SIZE - -#ifdef CONFIG_FRAME_POINTER - /* Mark last stack frame */ - mov lr, 0 - mov r7, 0 -#endif - /* Start the show */ lddpc pc, kernel_start_addr .align 2 -init_sr: - .long 0x007f0000 /* Supervisor mode, everything masked */ -stack_addr: - .long init_thread_union kernel_start_addr: .long start_kernel diff --git a/arch/blackfin/include/asm/hardirq.h b/arch/blackfin/include/asm/hardirq.h index c078dd78d99..58b54a6d5a1 100644 --- a/arch/blackfin/include/asm/hardirq.h +++ b/arch/blackfin/include/asm/hardirq.h @@ -12,9 +12,6 @@ extern void ack_bad_irq(unsigned int irq); #define ack_bad_irq ack_bad_irq -/* Define until common code gets sane defaults */ -#define HARDIRQ_BITS 9 - #include <asm-generic/hardirq.h> #endif diff --git a/arch/blackfin/include/asm/thread_info.h b/arch/blackfin/include/asm/thread_info.h index 3894005337b..55f473bdad3 100644 --- a/arch/blackfin/include/asm/thread_info.h +++ b/arch/blackfin/include/asm/thread_info.h @@ -88,8 +88,6 @@ static inline struct thread_info *current_thread_info(void) #define TI_CPU 12 #define TI_PREEMPT 16 -#define PREEMPT_ACTIVE 0x4000000 - /* * thread information flag bit numbers */ diff --git a/arch/c6x/include/asm/thread_info.h b/arch/c6x/include/asm/thread_info.h index 4c8dc562bd9..d4e9ef87076 100644 --- a/arch/c6x/include/asm/thread_info.h +++ b/arch/c6x/include/asm/thread_info.h @@ -84,8 +84,6 @@ struct thread_info *current_thread_info(void) #define put_thread_info(ti) put_task_struct((ti)->task) #endif /* __ASSEMBLY__ */ -#define PREEMPT_ACTIVE 0x10000000 - /* * thread information flag bit numbers * - pending work-to-be-done flags are in LSW diff --git a/arch/cris/include/asm/hardirq.h b/arch/cris/include/asm/hardirq.h index 17bb12d760b..04126f7bfab 100644 --- a/arch/cris/include/asm/hardirq.h +++ b/arch/cris/include/asm/hardirq.h @@ -2,18 +2,6 @@ #define __ASM_HARDIRQ_H #include <asm/irq.h> - -#define HARDIRQ_BITS 8 - -/* - * The hardirq mask has to be large enough to have - * space for potentially all IRQ sources in the system - * nesting on a single CPU: - */ -#if (1 << HARDIRQ_BITS) < NR_IRQS -# error HARDIRQ_BITS is too low! -#endif - #include <asm-generic/hardirq.h> #endif /* __ASM_HARDIRQ_H */ diff --git a/arch/cris/include/asm/thread_info.h b/arch/cris/include/asm/thread_info.h index 07c8c40c52b..55dede18c03 100644 --- a/arch/cris/include/asm/thread_info.h +++ b/arch/cris/include/asm/thread_info.h @@ -44,8 +44,6 @@ struct thread_info { #endif -#define PREEMPT_ACTIVE 0x10000000 - /* * macros/functions for gaining access to the thread information structure */ diff --git a/arch/frv/include/asm/thread_info.h b/arch/frv/include/asm/thread_info.h index bebd7eadc77..af29e17c018 100644 --- a/arch/frv/include/asm/thread_info.h +++ b/arch/frv/include/asm/thread_info.h @@ -52,8 +52,6 @@ struct thread_info { #endif -#define PREEMPT_ACTIVE 0x10000000 - /* * macros/functions for gaining access to the thread information structure */ diff --git a/arch/hexagon/include/asm/thread_info.h b/arch/hexagon/include/asm/thread_info.h index f7c32406a71..a59dad3b369 100644 --- a/arch/hexagon/include/asm/thread_info.h +++ b/arch/hexagon/include/asm/thread_info.h @@ -73,10 +73,6 @@ struct thread_info { #endif /* __ASSEMBLY__ */ -/* looks like "linux/hardirq.h" uses this. */ - -#define PREEMPT_ACTIVE 0x10000000 - #ifndef __ASSEMBLY__ #define INIT_THREAD_INFO(tsk) \ diff --git a/arch/ia64/hp/common/sba_iommu.c b/arch/ia64/hp/common/sba_iommu.c index d43daf192b2..4c530a82fc4 100644 --- a/arch/ia64/hp/common/sba_iommu.c +++ b/arch/ia64/hp/common/sba_iommu.c @@ -1992,7 +1992,7 @@ sba_connect_bus(struct pci_bus *bus) if (PCI_CONTROLLER(bus)->iommu) return; - handle = PCI_CONTROLLER(bus)->acpi_handle; + handle = acpi_device_handle(PCI_CONTROLLER(bus)->companion); if (!handle) return; diff --git a/arch/ia64/include/asm/pci.h b/arch/ia64/include/asm/pci.h index 80775f55f03..71fbaaa495c 100644 --- a/arch/ia64/include/asm/pci.h +++ b/arch/ia64/include/asm/pci.h @@ -95,7 +95,7 @@ struct iospace_resource { }; struct pci_controller { - void *acpi_handle; + struct acpi_device *companion; void *iommu; int segment; int node; /* nearest node with memory or -1 for global allocation */ diff --git a/arch/ia64/include/asm/thread_info.h b/arch/ia64/include/asm/thread_info.h index cade13dd029..5957cf61f89 100644 --- a/arch/ia64/include/asm/thread_info.h +++ b/arch/ia64/include/asm/thread_info.h @@ -11,9 +11,6 @@ #include <asm/processor.h> #include <asm/ptrace.h> -#define PREEMPT_ACTIVE_BIT 30 -#define PREEMPT_ACTIVE (1 << PREEMPT_ACTIVE_BIT) - #ifndef __ASSEMBLY__ /* diff --git a/arch/ia64/kernel/entry.S b/arch/ia64/kernel/entry.S index 7a53530f22c..ddea607f948 100644 --- a/arch/ia64/kernel/entry.S +++ b/arch/ia64/kernel/entry.S @@ -1169,21 +1169,8 @@ skip_rbs_switch: .work_pending: tbit.z p6,p0=r31,TIF_NEED_RESCHED // is resched not needed? (p6) br.cond.sptk.few .notify -#ifdef CONFIG_PREEMPT -(pKStk) dep r21=-1,r0,PREEMPT_ACTIVE_BIT,1 - ;; -(pKStk) st4 [r20]=r21 -#endif - SSM_PSR_I(p0, p6, r2) // enable interrupts - br.call.spnt.many rp=schedule + br.call.spnt.many rp=preempt_schedule_irq .ret9: cmp.eq p6,p0=r0,r0 // p6 <- 1 (re-check) - RSM_PSR_I(p0, r2, r20) // disable interrupts - ;; -#ifdef CONFIG_PREEMPT -(pKStk) adds r20=TI_PRE_COUNT+IA64_TASK_SIZE,r13 - ;; -(pKStk) st4 [r20]=r0 // preempt_count() <- 0 -#endif (pLvSys)br.cond.sptk.few __paravirt_pending_syscall_end br.cond.sptk.many .work_processed_kernel diff --git a/arch/ia64/kernel/perfmon.c b/arch/ia64/kernel/perfmon.c index 5a9ff1c3c3e..cb592773c78 100644 --- a/arch/ia64/kernel/perfmon.c +++ b/arch/ia64/kernel/perfmon.c @@ -2166,12 +2166,6 @@ static const struct file_operations pfm_file_ops = { .flush = pfm_flush }; -static int -pfmfs_delete_dentry(const struct dentry *dentry) -{ - return 1; -} - static char *pfmfs_dname(struct dentry *dentry, char *buffer, int buflen) { return dynamic_dname(dentry, buffer, buflen, "pfm:[%lu]", @@ -2179,7 +2173,7 @@ static char *pfmfs_dname(struct dentry *dentry, char *buffer, int buflen) } static const struct dentry_operations pfmfs_dentry_operations = { - .d_delete = pfmfs_delete_dentry, + .d_delete = always_delete_dentry, .d_dname = pfmfs_dname, }; diff --git a/arch/ia64/pci/pci.c b/arch/ia64/pci/pci.c index 2326790b7d8..9e4938d8ca4 100644 --- a/arch/ia64/pci/pci.c +++ b/arch/ia64/pci/pci.c @@ -436,9 +436,9 @@ struct pci_bus *pci_acpi_scan_root(struct acpi_pci_root *root) if (!controller) return NULL; - controller->acpi_handle = device->handle; + controller->companion = device; - pxm = acpi_get_pxm(controller->acpi_handle); + pxm = acpi_get_pxm(device->handle); #ifdef CONFIG_NUMA if (pxm >= 0) controller->node = pxm_to_node(pxm); @@ -489,7 +489,7 @@ int pcibios_root_bridge_prepare(struct pci_host_bridge *bridge) { struct pci_controller *controller = bridge->bus->sysdata; - ACPI_HANDLE_SET(&bridge->dev, controller->acpi_handle); + ACPI_COMPANION_SET(&bridge->dev, controller->companion); return 0; } diff --git a/arch/ia64/sn/kernel/io_acpi_init.c b/arch/ia64/sn/kernel/io_acpi_init.c index b1725398b5a..0640739cc20 100644 --- a/arch/ia64/sn/kernel/io_acpi_init.c +++ b/arch/ia64/sn/kernel/io_acpi_init.c @@ -132,7 +132,7 @@ sn_get_bussoft_ptr(struct pci_bus *bus) struct acpi_resource_vendor_typed *vendor; - handle = PCI_CONTROLLER(bus)->acpi_handle; + handle = acpi_device_handle(PCI_CONTROLLER(bus)->companion); status = acpi_get_vendor_resource(handle, METHOD_NAME__CRS, &sn_uuid, &buffer); if (ACPI_FAILURE(status)) { @@ -360,7 +360,7 @@ sn_acpi_get_pcidev_info(struct pci_dev *dev, struct pcidev_info **pcidev_info, acpi_status status; struct acpi_buffer name_buffer = { ACPI_ALLOCATE_BUFFER, NULL }; - rootbus_handle = PCI_CONTROLLER(dev)->acpi_handle; + rootbus_handle = acpi_device_handle(PCI_CONTROLLER(dev)->companion); status = acpi_evaluate_integer(rootbus_handle, METHOD_NAME__SEG, NULL, &segment); if (ACPI_SUCCESS(status)) { diff --git a/arch/m32r/include/asm/hardirq.h b/arch/m32r/include/asm/hardirq.h index 4c31c0ae215..5f2ac4f64dd 100644 --- a/arch/m32r/include/asm/hardirq.h +++ b/arch/m32r/include/asm/hardirq.h @@ -3,22 +3,6 @@ #define __ASM_HARDIRQ_H #include <asm/irq.h> - -#if NR_IRQS > 256 -#define HARDIRQ_BITS 9 -#else -#define HARDIRQ_BITS 8 -#endif - -/* - * The hardirq mask has to be large enough to have - * space for potentially all IRQ sources in the system - * nesting on a single CPU: - */ -#if (1 << HARDIRQ_BITS) < NR_IRQS -# error HARDIRQ_BITS is too low! -#endif - #include <asm-generic/hardirq.h> #endif /* __ASM_HARDIRQ_H */ diff --git a/arch/m32r/include/asm/thread_info.h b/arch/m32r/include/asm/thread_info.h index c074f4c2e85..00171703402 100644 --- a/arch/m32r/include/asm/thread_info.h +++ b/arch/m32r/include/asm/thread_info.h @@ -53,8 +53,6 @@ struct thread_info { #endif -#define PREEMPT_ACTIVE 0x10000000 - #define THREAD_SIZE (PAGE_SIZE << 1) #define THREAD_SIZE_ORDER 1 /* diff --git a/arch/m32r/kernel/entry.S b/arch/m32r/kernel/entry.S index 0c01543f10c..7c3db9940ce 100644 --- a/arch/m32r/kernel/entry.S +++ b/arch/m32r/kernel/entry.S @@ -182,13 +182,7 @@ need_resched: ld r4, PSW(sp) ; interrupts off (exception path) ? and3 r4, r4, #0x4000 beqz r4, restore_all - LDIMM (r4, PREEMPT_ACTIVE) - st r4, @(TI_PRE_COUNT, r8) - ENABLE_INTERRUPTS(r4) - bl schedule - ldi r4, #0 - st r4, @(TI_PRE_COUNT, r8) - DISABLE_INTERRUPTS(r4) + bl preempt_schedule_irq bra need_resched #endif diff --git a/arch/m68k/include/asm/hardirq.h b/arch/m68k/include/asm/hardirq.h index db30ed27687..6c618529d9b 100644 --- a/arch/m68k/include/asm/hardirq.h +++ b/arch/m68k/include/asm/hardirq.h @@ -5,17 +5,6 @@ #include <linux/cache.h> #include <asm/irq.h> -#define HARDIRQ_BITS 8 - -/* - * The hardirq mask has to be large enough to have - * space for potentially all IRQ sources in the system - * nesting on a single CPU: - */ -#if (1 << HARDIRQ_BITS) < NR_IRQS -# error HARDIRQ_BITS is too low! -#endif - #ifdef CONFIG_MMU static inline void ack_bad_irq(unsigned int irq) diff --git a/arch/m68k/include/asm/thread_info.h b/arch/m68k/include/asm/thread_info.h index 126131f94a2..21a4784ca5a 100644 --- a/arch/m68k/include/asm/thread_info.h +++ b/arch/m68k/include/asm/thread_info.h @@ -35,8 +35,6 @@ struct thread_info { }; #endif /* __ASSEMBLY__ */ -#define PREEMPT_ACTIVE 0x4000000 - #define INIT_THREAD_INFO(tsk) \ { \ .task = &tsk, \ diff --git a/arch/m68k/kernel/entry.S b/arch/m68k/kernel/entry.S index a78f5649e8d..b54ac7aba85 100644 --- a/arch/m68k/kernel/entry.S +++ b/arch/m68k/kernel/entry.S @@ -45,7 +45,7 @@ .globl system_call, buserr, trap, resume .globl sys_call_table .globl __sys_fork, __sys_clone, __sys_vfork -.globl ret_from_interrupt, bad_interrupt +.globl bad_interrupt .globl auto_irqhandler_fixup .globl user_irqvec_fixup @@ -275,8 +275,6 @@ do_delayed_trace: ENTRY(auto_inthandler) SAVE_ALL_INT GET_CURRENT(%d0) - movel %d0,%a1 - addqb #1,%a1@(TINFO_PREEMPT+1) | put exception # in d0 bfextu %sp@(PT_OFF_FORMATVEC){#4,#10},%d0 subw #VEC_SPUR,%d0 @@ -286,32 +284,13 @@ ENTRY(auto_inthandler) auto_irqhandler_fixup = . + 2 jsr do_IRQ | process the IRQ addql #8,%sp | pop parameters off stack - -ret_from_interrupt: - movel %curptr@(TASK_STACK),%a1 - subqb #1,%a1@(TINFO_PREEMPT+1) - jeq ret_from_last_interrupt -2: RESTORE_ALL - - ALIGN -ret_from_last_interrupt: - moveq #(~ALLOWINT>>8)&0xff,%d0 - andb %sp@(PT_OFF_SR),%d0 - jne 2b - - /* check if we need to do software interrupts */ - tstl irq_stat+CPUSTAT_SOFTIRQ_PENDING - jeq .Lret_from_exception - pea ret_from_exception - jra do_softirq + jra ret_from_exception /* Handler for user defined interrupt vectors */ ENTRY(user_inthandler) SAVE_ALL_INT GET_CURRENT(%d0) - movel %d0,%a1 - addqb #1,%a1@(TINFO_PREEMPT+1) | put exception # in d0 bfextu %sp@(PT_OFF_FORMATVEC){#4,#10},%d0 user_irqvec_fixup = . + 2 @@ -321,29 +300,18 @@ user_irqvec_fixup = . + 2 movel %d0,%sp@- | put vector # on stack jsr do_IRQ | process the IRQ addql #8,%sp | pop parameters off stack - - movel %curptr@(TASK_STACK),%a1 - subqb #1,%a1@(TINFO_PREEMPT+1) - jeq ret_from_last_interrupt - RESTORE_ALL + jra ret_from_exception /* Handler for uninitialized and spurious interrupts */ ENTRY(bad_inthandler) SAVE_ALL_INT GET_CURRENT(%d0) - movel %d0,%a1 - addqb #1,%a1@(TINFO_PREEMPT+1) movel %sp,%sp@- jsr handle_badint addql #4,%sp - - movel %curptr@(TASK_STACK),%a1 - subqb #1,%a1@(TINFO_PREEMPT+1) - jeq ret_from_last_interrupt - RESTORE_ALL - + jra ret_from_exception resume: /* diff --git a/arch/m68k/kernel/ints.c b/arch/m68k/kernel/ints.c index 4d7da384eea..077d3a70fed 100644 --- a/arch/m68k/kernel/ints.c +++ b/arch/m68k/kernel/ints.c @@ -58,12 +58,6 @@ void __init init_IRQ(void) { int i; - /* assembly irq entry code relies on this... */ - if (HARDIRQ_MASK != 0x00ff0000) { - extern void hardirq_mask_is_broken(void); - hardirq_mask_is_broken(); - } - for (i = IRQ_AUTO_1; i <= IRQ_AUTO_7; i++) irq_set_chip_and_handler(i, &auto_irq_chip, handle_simple_irq); diff --git a/arch/m68k/platform/68000/entry.S b/arch/m68k/platform/68000/entry.S index 7f91c2fde50..23ac054c6e1 100644 --- a/arch/m68k/platform/68000/entry.S +++ b/arch/m68k/platform/68000/entry.S @@ -27,7 +27,6 @@ .globl ret_from_exception .globl ret_from_signal .globl sys_call_table -.globl ret_from_interrupt .globl bad_interrupt .globl inthandler1 .globl inthandler2 @@ -137,7 +136,7 @@ inthandler1: movel #65,%sp@- /* put vector # on stack*/ jbsr process_int /* process the IRQ*/ 3: addql #8,%sp /* pop parameters off stack*/ - bra ret_from_interrupt + bra ret_from_exception inthandler2: SAVE_ALL_INT @@ -148,7 +147,7 @@ inthandler2: movel #66,%sp@- /* put vector # on stack*/ jbsr process_int /* process the IRQ*/ 3: addql #8,%sp /* pop parameters off stack*/ - bra ret_from_interrupt + bra ret_from_exception inthandler3: SAVE_ALL_INT @@ -159,7 +158,7 @@ inthandler3: movel #67,%sp@- /* put vector # on stack*/ jbsr process_int /* process the IRQ*/ 3: addql #8,%sp /* pop parameters off stack*/ - bra ret_from_interrupt + bra ret_from_exception inthandler4: SAVE_ALL_INT @@ -170,7 +169,7 @@ inthandler4: movel #68,%sp@- /* put vector # on stack*/ jbsr process_int /* process the IRQ*/ 3: addql #8,%sp /* pop parameters off stack*/ - bra ret_from_interrupt + bra ret_from_exception inthandler5: SAVE_ALL_INT @@ -181,7 +180,7 @@ inthandler5: movel #69,%sp@- /* put vector # on stack*/ jbsr process_int /* process the IRQ*/ 3: addql #8,%sp /* pop parameters off stack*/ - bra ret_from_interrupt + bra ret_from_exception inthandler6: SAVE_ALL_INT @@ -192,7 +191,7 @@ inthandler6: movel #70,%sp@- /* put vector # on stack*/ jbsr process_int /* process the IRQ*/ 3: addql #8,%sp /* pop parameters off stack*/ - bra ret_from_interrupt + bra ret_from_exception inthandler7: SAVE_ALL_INT @@ -203,7 +202,7 @@ inthandler7: movel #71,%sp@- /* put vector # on stack*/ jbsr process_int /* process the IRQ*/ 3: addql #8,%sp /* pop parameters off stack*/ - bra ret_from_interrupt + bra ret_from_exception inthandler: SAVE_ALL_INT @@ -214,23 +213,7 @@ inthandler: movel %d0,%sp@- /* put vector # on stack*/ jbsr process_int /* process the IRQ*/ 3: addql #8,%sp /* pop parameters off stack*/ - bra ret_from_interrupt - -ret_from_interrupt: - jeq 1f -2: - RESTORE_ALL -1: - moveb %sp@(PT_OFF_SR), %d0 - and #7, %d0 - jhi 2b - - /* check if we need to do software interrupts */ - jeq ret_from_exception - - pea ret_from_exception - jra do_softirq - + bra ret_from_exception /* * Handler for uninitialized and spurious interrupts. diff --git a/arch/m68k/platform/68360/entry.S b/arch/m68k/platform/68360/entry.S index 904fd9a4af4..447c33ef37f 100644 --- a/arch/m68k/platform/68360/entry.S +++ b/arch/m68k/platform/68360/entry.S @@ -29,7 +29,6 @@ .globl ret_from_exception .globl ret_from_signal .globl sys_call_table -.globl ret_from_interrupt .globl bad_interrupt .globl inthandler @@ -132,26 +131,9 @@ inthandler: movel %sp,%sp@- movel %d0,%sp@- /* put vector # on stack*/ - jbsr do_IRQ /* process the IRQ*/ -3: addql #8,%sp /* pop parameters off stack*/ - bra ret_from_interrupt - -ret_from_interrupt: - jeq 1f -2: - RESTORE_ALL -1: - moveb %sp@(PT_OFF_SR), %d0 - and #7, %d0 - jhi 2b - /* check if we need to do software interrupts */ - - movel irq_stat+CPUSTAT_SOFTIRQ_PENDING,%d0 - jeq ret_from_exception - - pea ret_from_exception - jra do_softirq - + jbsr do_IRQ /* process the IRQ */ + addql #8,%sp /* pop parameters off stack*/ + jra ret_from_exception /* * Handler for uninitialized and spurious interrupts. diff --git a/arch/metag/include/asm/thread_info.h b/arch/metag/include/asm/thread_info.h index 7c4a3300614..b19e9c588a1 100644 --- a/arch/metag/include/asm/thread_info.h +++ b/arch/metag/include/asm/thread_info.h @@ -46,8 +46,6 @@ struct thread_info { #endif -#define PREEMPT_ACTIVE 0x10000000 - #ifdef CONFIG_4KSTACKS #define THREAD_SHIFT 12 #else diff --git a/arch/microblaze/include/asm/thread_info.h b/arch/microblaze/include/asm/thread_info.h index de26ea6373d..8c9d36591a0 100644 --- a/arch/microblaze/include/asm/thread_info.h +++ b/arch/microblaze/include/asm/thread_info.h @@ -106,8 +106,6 @@ static inline struct thread_info *current_thread_info(void) /* thread information allocation */ #endif /* __ASSEMBLY__ */ -#define PREEMPT_ACTIVE 0x10000000 - /* * thread information flags * - these are process state flags that various assembly files may diff --git a/arch/mips/configs/db1235_defconfig b/arch/mips/configs/db1235_defconfig index e2b4ad55462..28e49f226dc 100644 --- a/arch/mips/configs/db1235_defconfig +++ b/arch/mips/configs/db1235_defconfig @@ -351,7 +351,6 @@ CONFIG_USB_OHCI_HCD=y CONFIG_USB_OHCI_HCD_PLATFORM=y CONFIG_USB_STORAGE=y CONFIG_MMC=y -CONFIG_MMC_CLKGATE=y CONFIG_MMC_AU1X=y CONFIG_NEW_LEDS=y CONFIG_LEDS_CLASS=y diff --git a/arch/mips/include/asm/thread_info.h b/arch/mips/include/asm/thread_info.h index f9b24bfbdba..4f58ef6d0ee 100644 --- a/arch/mips/include/asm/thread_info.h +++ b/arch/mips/include/asm/thread_info.h @@ -92,8 +92,6 @@ static inline struct thread_info *current_thread_info(void) #define STACK_WARN (THREAD_SIZE / 8) -#define PREEMPT_ACTIVE 0x10000000 - /* * thread information flags * - these are process state flags that various assembly files may need to diff --git a/arch/mn10300/include/asm/thread_info.h b/arch/mn10300/include/asm/thread_info.h index 224b4262486..bf280eaccd3 100644 --- a/arch/mn10300/include/asm/thread_info.h +++ b/arch/mn10300/include/asm/thread_info.h @@ -16,8 +16,6 @@ #include <asm/page.h> -#define PREEMPT_ACTIVE 0x10000000 - #ifdef CONFIG_4KSTACKS #define THREAD_SIZE (4096) #define THREAD_SIZE_ORDER (0) diff --git a/arch/parisc/include/asm/socket.h b/arch/parisc/include/asm/socket.h new file mode 100644 index 00000000000..748016cb122 --- /dev/null +++ b/arch/parisc/include/asm/socket.h @@ -0,0 +1,11 @@ +#ifndef _ASM_SOCKET_H +#define _ASM_SOCKET_H + +#include <uapi/asm/socket.h> + +/* O_NONBLOCK clashes with the bits used for socket types. Therefore we + * have to define SOCK_NONBLOCK to a different value here. + */ +#define SOCK_NONBLOCK 0x40000000 + +#endif /* _ASM_SOCKET_H */ diff --git a/arch/parisc/include/asm/thread_info.h b/arch/parisc/include/asm/thread_info.h index bc7cf120106..d5f97ea3a4e 100644 --- a/arch/parisc/include/asm/thread_info.h +++ b/arch/parisc/include/asm/thread_info.h @@ -46,9 +46,6 @@ struct thread_info { #define THREAD_SIZE (PAGE_SIZE << THREAD_SIZE_ORDER) #define THREAD_SHIFT (PAGE_SHIFT + THREAD_SIZE_ORDER) -#define PREEMPT_ACTIVE_BIT 28 -#define PREEMPT_ACTIVE (1 << PREEMPT_ACTIVE_BIT) - /* * thread information flags */ diff --git a/arch/parisc/include/asm/uaccess.h b/arch/parisc/include/asm/uaccess.h index 63f4dd0b49c..4006964d8e1 100644 --- a/arch/parisc/include/asm/uaccess.h +++ b/arch/parisc/include/asm/uaccess.h @@ -4,14 +4,11 @@ /* * User space memory access functions */ -#include <asm/processor.h> #include <asm/page.h> #include <asm/cache.h> #include <asm/errno.h> #include <asm-generic/uaccess-unaligned.h> -#include <linux/sched.h> - #define VERIFY_READ 0 #define VERIFY_WRITE 1 @@ -36,43 +33,12 @@ extern int __get_user_bad(void); extern int __put_kernel_bad(void); extern int __put_user_bad(void); - -/* - * Test whether a block of memory is a valid user space address. - * Returns 0 if the range is valid, nonzero otherwise. - */ -static inline int __range_not_ok(unsigned long addr, unsigned long size, - unsigned long limit) +static inline long access_ok(int type, const void __user * addr, + unsigned long size) { - unsigned long __newaddr = addr + size; - return (__newaddr < addr || __newaddr > limit || size > limit); + return 1; } -/** - * access_ok: - Checks if a user space pointer is valid - * @type: Type of access: %VERIFY_READ or %VERIFY_WRITE. Note that - * %VERIFY_WRITE is a superset of %VERIFY_READ - if it is safe - * to write to a block, it is always safe to read from it. - * @addr: User space pointer to start of block to check - * @size: Size of block to check - * - * Context: User context only. This function may sleep. - * - * Checks if a pointer to a block of memory in user space is valid. - * - * Returns true (nonzero) if the memory block may be valid, false (zero) - * if it is definitely invalid. - * - * Note that, depending on architecture, this function probably just - * checks that the pointer is in the user space range - after calling - * this function, memory access functions may still return -EFAULT. - */ -#define access_ok(type, addr, size) \ -( __chk_user_ptr(addr), \ - !__range_not_ok((unsigned long) (__force void *) (addr), \ - size, user_addr_max()) \ -) - #define put_user __put_user #define get_user __get_user @@ -253,11 +219,7 @@ extern long lstrnlen_user(const char __user *,long); /* * Complex access routines -- macros */ -#ifdef CONFIG_COMPAT -#define user_addr_max() (TASK_SIZE) -#else -#define user_addr_max() (DEFAULT_TASK_SIZE) -#endif +#define user_addr_max() (~0UL) #define strnlen_user lstrnlen_user #define strlen_user(str) lstrnlen_user(str, 0x7fffffffL) diff --git a/arch/parisc/include/uapi/asm/socket.h b/arch/parisc/include/uapi/asm/socket.h index 7c614d01f1f..f33113a6141 100644 --- a/arch/parisc/include/uapi/asm/socket.h +++ b/arch/parisc/include/uapi/asm/socket.h @@ -1,5 +1,5 @@ -#ifndef _ASM_SOCKET_H -#define _ASM_SOCKET_H +#ifndef _UAPI_ASM_SOCKET_H +#define _UAPI_ASM_SOCKET_H #include <asm/sockios.h> @@ -77,9 +77,4 @@ #define SO_MAX_PACING_RATE 0x4048 -/* O_NONBLOCK clashes with the bits used for socket types. Therefore we - * have to define SOCK_NONBLOCK to a different value here. - */ -#define SOCK_NONBLOCK 0x40000000 - -#endif /* _ASM_SOCKET_H */ +#endif /* _UAPI_ASM_SOCKET_H */ diff --git a/arch/parisc/lib/memcpy.c b/arch/parisc/lib/memcpy.c index b5507ec06b8..413dc176929 100644 --- a/arch/parisc/lib/memcpy.c +++ b/arch/parisc/lib/memcpy.c @@ -161,7 +161,7 @@ static inline void prefetch_dst(const void *addr) /* Copy from a not-aligned src to an aligned dst, using shifts. Handles 4 words * per loop. This code is derived from glibc. */ -static inline unsigned long copy_dstaligned(unsigned long dst, +static noinline unsigned long copy_dstaligned(unsigned long dst, unsigned long src, unsigned long len) { /* gcc complains that a2 and a3 may be uninitialized, but actually @@ -276,7 +276,7 @@ handle_store_error: /* Returns PA_MEMCPY_OK, PA_MEMCPY_LOAD_ERROR or PA_MEMCPY_STORE_ERROR. * In case of an access fault the faulty address can be read from the per_cpu * exception data struct. */ -static unsigned long pa_memcpy_internal(void *dstp, const void *srcp, +static noinline unsigned long pa_memcpy_internal(void *dstp, const void *srcp, unsigned long len) { register unsigned long src, dst, t1, t2, t3; @@ -529,7 +529,7 @@ long probe_kernel_read(void *dst, const void *src, size_t size) { unsigned long addr = (unsigned long)src; - if (size < 0 || addr < PAGE_SIZE) + if (addr < PAGE_SIZE) return -EFAULT; /* check for I/O space F_EXTEND(0xfff00000) access as well? */ diff --git a/arch/parisc/mm/fault.c b/arch/parisc/mm/fault.c index 7584a5df0fa..9d08c71a967 100644 --- a/arch/parisc/mm/fault.c +++ b/arch/parisc/mm/fault.c @@ -282,16 +282,34 @@ bad_area: #endif switch (code) { case 15: /* Data TLB miss fault/Data page fault */ + /* send SIGSEGV when outside of vma */ + if (!vma || + address < vma->vm_start || address > vma->vm_end) { + si.si_signo = SIGSEGV; + si.si_code = SEGV_MAPERR; + break; + } + + /* send SIGSEGV for wrong permissions */ + if ((vma->vm_flags & acc_type) != acc_type) { + si.si_signo = SIGSEGV; + si.si_code = SEGV_ACCERR; + break; + } + + /* probably address is outside of mapped file */ + /* fall through */ case 17: /* NA data TLB miss / page fault */ case 18: /* Unaligned access - PCXS only */ si.si_signo = SIGBUS; - si.si_code = BUS_ADRERR; + si.si_code = (code == 18) ? BUS_ADRALN : BUS_ADRERR; break; case 16: /* Non-access instruction TLB miss fault */ case 26: /* PCXL: Data memory access rights trap */ default: si.si_signo = SIGSEGV; - si.si_code = SEGV_MAPERR; + si.si_code = (code == 26) ? SEGV_ACCERR : SEGV_MAPERR; + break; } si.si_errno = 0; si.si_addr = (void __user *) address; diff --git a/arch/powerpc/Makefile b/arch/powerpc/Makefile index 607acf54a42..8a2463670a5 100644 --- a/arch/powerpc/Makefile +++ b/arch/powerpc/Makefile @@ -111,6 +111,7 @@ endif endif CFLAGS-$(CONFIG_PPC64) := -mtraceback=no -mcall-aixdesc +CFLAGS-$(CONFIG_PPC64) += $(call cc-option,-mabi=elfv1) CFLAGS-$(CONFIG_PPC64) += $(call cc-option,-mcmodel=medium,-mminimal-toc) CFLAGS-$(CONFIG_PPC64) += $(call cc-option,-mno-pointers-to-nested-functions) CFLAGS-$(CONFIG_PPC32) := -ffixed-r2 $(MULTIPLEWORD) diff --git a/arch/powerpc/boot/dts/fsl/b4si-post.dtsi b/arch/powerpc/boot/dts/fsl/b4si-post.dtsi index 4c617bf8cdb..4f6e48277c4 100644 --- a/arch/powerpc/boot/dts/fsl/b4si-post.dtsi +++ b/arch/powerpc/boot/dts/fsl/b4si-post.dtsi @@ -223,13 +223,13 @@ reg = <0xe2000 0x1000>; }; -/include/ "qoriq-dma-0.dtsi" +/include/ "elo3-dma-0.dtsi" dma@100300 { fsl,iommu-parent = <&pamu0>; fsl,liodn-reg = <&guts 0x580>; /* DMA1LIODNR */ }; -/include/ "qoriq-dma-1.dtsi" +/include/ "elo3-dma-1.dtsi" dma@101300 { fsl,iommu-parent = <&pamu0>; fsl,liodn-reg = <&guts 0x584>; /* DMA2LIODNR */ diff --git a/arch/powerpc/boot/dts/fsl/elo3-dma-0.dtsi b/arch/powerpc/boot/dts/fsl/elo3-dma-0.dtsi new file mode 100644 index 00000000000..3c210e0d520 --- /dev/null +++ b/arch/powerpc/boot/dts/fsl/elo3-dma-0.dtsi @@ -0,0 +1,82 @@ +/* + * QorIQ Elo3 DMA device tree stub [ controller @ offset 0x100000 ] + * + * Copyright 2013 Freescale Semiconductor Inc. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Freescale Semiconductor nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * + * ALTERNATIVELY, this software may be distributed under the terms of the + * GNU General Public License ("GPL") as published by the Free Software + * Foundation, either version 2 of that License or (at your option) any + * later version. + * + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +dma0: dma@100300 { + #address-cells = <1>; + #size-cells = <1>; + compatible = "fsl,elo3-dma"; + reg = <0x100300 0x4>, + <0x100600 0x4>; + ranges = <0x0 0x100100 0x500>; + dma-channel@0 { + compatible = "fsl,eloplus-dma-channel"; + reg = <0x0 0x80>; + interrupts = <28 2 0 0>; + }; + dma-channel@80 { + compatible = "fsl,eloplus-dma-channel"; + reg = <0x80 0x80>; + interrupts = <29 2 0 0>; + }; + dma-channel@100 { + compatible = "fsl,eloplus-dma-channel"; + reg = <0x100 0x80>; + interrupts = <30 2 0 0>; + }; + dma-channel@180 { + compatible = "fsl,eloplus-dma-channel"; + reg = <0x180 0x80>; + interrupts = <31 2 0 0>; + }; + dma-channel@300 { + compatible = "fsl,eloplus-dma-channel"; + reg = <0x300 0x80>; + interrupts = <76 2 0 0>; + }; + dma-channel@380 { + compatible = "fsl,eloplus-dma-channel"; + reg = <0x380 0x80>; + interrupts = <77 2 0 0>; + }; + dma-channel@400 { + compatible = "fsl,eloplus-dma-channel"; + reg = <0x400 0x80>; + interrupts = <78 2 0 0>; + }; + dma-channel@480 { + compatible = "fsl,eloplus-dma-channel"; + reg = <0x480 0x80>; + interrupts = <79 2 0 0>; + }; +}; diff --git a/arch/powerpc/boot/dts/fsl/elo3-dma-1.dtsi b/arch/powerpc/boot/dts/fsl/elo3-dma-1.dtsi new file mode 100644 index 00000000000..cccf3bb3822 --- /dev/null +++ b/arch/powerpc/boot/dts/fsl/elo3-dma-1.dtsi @@ -0,0 +1,82 @@ +/* + * QorIQ Elo3 DMA device tree stub [ controller @ offset 0x101000 ] + * + * Copyright 2013 Freescale Semiconductor Inc. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Freescale Semiconductor nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * + * ALTERNATIVELY, this software may be distributed under the terms of the + * GNU General Public License ("GPL") as published by the Free Software + * Foundation, either version 2 of that License or (at your option) any + * later version. + * + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +dma1: dma@101300 { + #address-cells = <1>; + #size-cells = <1>; + compatible = "fsl,elo3-dma"; + reg = <0x101300 0x4>, + <0x101600 0x4>; + ranges = <0x0 0x101100 0x500>; + dma-channel@0 { + compatible = "fsl,eloplus-dma-channel"; + reg = <0x0 0x80>; + interrupts = <32 2 0 0>; + }; + dma-channel@80 { + compatible = "fsl,eloplus-dma-channel"; + reg = <0x80 0x80>; + interrupts = <33 2 0 0>; + }; + dma-channel@100 { + compatible = "fsl,eloplus-dma-channel"; + reg = <0x100 0x80>; + interrupts = <34 2 0 0>; + }; + dma-channel@180 { + compatible = "fsl,eloplus-dma-channel"; + reg = <0x180 0x80>; + interrupts = <35 2 0 0>; + }; + dma-channel@300 { + compatible = "fsl,eloplus-dma-channel"; + reg = <0x300 0x80>; + interrupts = <80 2 0 0>; + }; + dma-channel@380 { + compatible = "fsl,eloplus-dma-channel"; + reg = <0x380 0x80>; + interrupts = <81 2 0 0>; + }; + dma-channel@400 { + compatible = "fsl,eloplus-dma-channel"; + reg = <0x400 0x80>; + interrupts = <82 2 0 0>; + }; + dma-channel@480 { + compatible = "fsl,eloplus-dma-channel"; + reg = <0x480 0x80>; + interrupts = <83 2 0 0>; + }; +}; diff --git a/arch/powerpc/boot/dts/fsl/t4240si-post.dtsi b/arch/powerpc/boot/dts/fsl/t4240si-post.dtsi index 510afa362de..4143a9733cd 100644 --- a/arch/powerpc/boot/dts/fsl/t4240si-post.dtsi +++ b/arch/powerpc/boot/dts/fsl/t4240si-post.dtsi @@ -387,8 +387,8 @@ reg = <0xea000 0x4000>; }; -/include/ "qoriq-dma-0.dtsi" -/include/ "qoriq-dma-1.dtsi" +/include/ "elo3-dma-0.dtsi" +/include/ "elo3-dma-1.dtsi" /include/ "qoriq-espi-0.dtsi" spi@110000 { diff --git a/arch/powerpc/configs/pseries_le_defconfig b/arch/powerpc/configs/pseries_le_defconfig new file mode 100644 index 00000000000..62771e0adb7 --- /dev/null +++ b/arch/powerpc/configs/pseries_le_defconfig @@ -0,0 +1,352 @@ +CONFIG_PPC64=y +CONFIG_ALTIVEC=y +CONFIG_VSX=y +CONFIG_SMP=y +CONFIG_NR_CPUS=2048 +CONFIG_CPU_LITTLE_ENDIAN=y +CONFIG_SYSVIPC=y +CONFIG_POSIX_MQUEUE=y +CONFIG_AUDIT=y +CONFIG_AUDITSYSCALL=y +CONFIG_IRQ_DOMAIN_DEBUG=y +CONFIG_NO_HZ=y +CONFIG_HIGH_RES_TIMERS=y +CONFIG_TASKSTATS=y +CONFIG_TASK_DELAY_ACCT=y +CONFIG_TASK_XACCT=y +CONFIG_TASK_IO_ACCOUNTING=y +CONFIG_IKCONFIG=y +CONFIG_IKCONFIG_PROC=y +CONFIG_CGROUPS=y +CONFIG_CGROUP_FREEZER=y +CONFIG_CGROUP_DEVICE=y +CONFIG_CPUSETS=y +CONFIG_CGROUP_CPUACCT=y +CONFIG_BLK_DEV_INITRD=y +# CONFIG_COMPAT_BRK is not set +CONFIG_PROFILING=y +CONFIG_OPROFILE=y +CONFIG_KPROBES=y +CONFIG_JUMP_LABEL=y +CONFIG_MODULES=y +CONFIG_MODULE_UNLOAD=y +CONFIG_MODVERSIONS=y +CONFIG_MODULE_SRCVERSION_ALL=y +CONFIG_PARTITION_ADVANCED=y +CONFIG_PPC_SPLPAR=y +CONFIG_SCANLOG=m +CONFIG_PPC_SMLPAR=y +CONFIG_DTL=y +# CONFIG_PPC_PMAC is not set +CONFIG_RTAS_FLASH=m +CONFIG_IBMEBUS=y +CONFIG_HZ_100=y +CONFIG_BINFMT_MISC=m +CONFIG_PPC_TRANSACTIONAL_MEM=y +CONFIG_KEXEC=y +CONFIG_IRQ_ALL_CPUS=y +CONFIG_MEMORY_HOTPLUG=y +CONFIG_MEMORY_HOTREMOVE=y +CONFIG_CMA=y +CONFIG_PPC_64K_PAGES=y +CONFIG_PPC_SUBPAGE_PROT=y +CONFIG_SCHED_SMT=y +CONFIG_HOTPLUG_PCI=y +CONFIG_HOTPLUG_PCI_RPA=m +CONFIG_HOTPLUG_PCI_RPA_DLPAR=m +CONFIG_PACKET=y +CONFIG_UNIX=y +CONFIG_XFRM_USER=m +CONFIG_NET_KEY=m +CONFIG_INET=y +CONFIG_IP_MULTICAST=y +CONFIG_NET_IPIP=y +CONFIG_SYN_COOKIES=y +CONFIG_INET_AH=m +CONFIG_INET_ESP=m +CONFIG_INET_IPCOMP=m +# CONFIG_IPV6 is not set +CONFIG_NETFILTER=y +CONFIG_NF_CONNTRACK=m +CONFIG_NF_CONNTRACK_EVENTS=y +CONFIG_NF_CT_PROTO_UDPLITE=m +CONFIG_NF_CONNTRACK_FTP=m +CONFIG_NF_CONNTRACK_IRC=m +CONFIG_NF_CONNTRACK_TFTP=m +CONFIG_NF_CT_NETLINK=m +CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m +CONFIG_NETFILTER_XT_TARGET_CONNMARK=m +CONFIG_NETFILTER_XT_TARGET_MARK=m +CONFIG_NETFILTER_XT_TARGET_NFLOG=m +CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m +CONFIG_NETFILTER_XT_TARGET_TCPMSS=m +CONFIG_NETFILTER_XT_MATCH_COMMENT=m +CONFIG_NETFILTER_XT_MATCH_CONNBYTES=m +CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=m +CONFIG_NETFILTER_XT_MATCH_CONNMARK=m +CONFIG_NETFILTER_XT_MATCH_CONNTRACK=m +CONFIG_NETFILTER_XT_MATCH_DCCP=m +CONFIG_NETFILTER_XT_MATCH_DSCP=m +CONFIG_NETFILTER_XT_MATCH_ESP=m +CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=m +CONFIG_NETFILTER_XT_MATCH_HELPER=m +CONFIG_NETFILTER_XT_MATCH_IPRANGE=m +CONFIG_NETFILTER_XT_MATCH_LENGTH=m +CONFIG_NETFILTER_XT_MATCH_LIMIT=m +CONFIG_NETFILTER_XT_MATCH_MAC=m +CONFIG_NETFILTER_XT_MATCH_MARK=m +CONFIG_NETFILTER_XT_MATCH_MULTIPORT=m +CONFIG_NETFILTER_XT_MATCH_OWNER=m +CONFIG_NETFILTER_XT_MATCH_POLICY=m +CONFIG_NETFILTER_XT_MATCH_PKTTYPE=m +CONFIG_NETFILTER_XT_MATCH_QUOTA=m +CONFIG_NETFILTER_XT_MATCH_RATEEST=m +CONFIG_NETFILTER_XT_MATCH_REALM=m +CONFIG_NETFILTER_XT_MATCH_RECENT=m +CONFIG_NETFILTER_XT_MATCH_SCTP=m +CONFIG_NETFILTER_XT_MATCH_STATE=m +CONFIG_NETFILTER_XT_MATCH_STATISTIC=m +CONFIG_NETFILTER_XT_MATCH_STRING=m +CONFIG_NETFILTER_XT_MATCH_TCPMSS=m +CONFIG_NETFILTER_XT_MATCH_TIME=m +CONFIG_NETFILTER_XT_MATCH_U32=m +CONFIG_NF_CONNTRACK_IPV4=m +CONFIG_IP_NF_IPTABLES=m +CONFIG_IP_NF_MATCH_AH=m +CONFIG_IP_NF_MATCH_ECN=m +CONFIG_IP_NF_MATCH_TTL=m +CONFIG_IP_NF_FILTER=m +CONFIG_IP_NF_TARGET_REJECT=m +CONFIG_IP_NF_TARGET_ULOG=m +CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" +CONFIG_DEVTMPFS=y +CONFIG_DEVTMPFS_MOUNT=y +CONFIG_PROC_DEVICETREE=y +CONFIG_PARPORT=m +CONFIG_PARPORT_PC=m +CONFIG_BLK_DEV_FD=m +CONFIG_BLK_DEV_LOOP=y +CONFIG_BLK_DEV_NBD=m +CONFIG_BLK_DEV_RAM=y +CONFIG_BLK_DEV_RAM_SIZE=65536 +CONFIG_VIRTIO_BLK=m +CONFIG_IDE=y +CONFIG_BLK_DEV_IDECD=y +CONFIG_BLK_DEV_GENERIC=y +CONFIG_BLK_DEV_AMD74XX=y +CONFIG_BLK_DEV_SD=y +CONFIG_CHR_DEV_ST=y +CONFIG_BLK_DEV_SR=y +CONFIG_BLK_DEV_SR_VENDOR=y +CONFIG_CHR_DEV_SG=y +CONFIG_SCSI_MULTI_LUN=y +CONFIG_SCSI_CONSTANTS=y +CONFIG_SCSI_FC_ATTRS=y +CONFIG_SCSI_CXGB3_ISCSI=m +CONFIG_SCSI_CXGB4_ISCSI=m +CONFIG_SCSI_BNX2_ISCSI=m +CONFIG_BE2ISCSI=m +CONFIG_SCSI_MPT2SAS=m +CONFIG_SCSI_IBMVSCSI=y +CONFIG_SCSI_IBMVFC=m +CONFIG_SCSI_SYM53C8XX_2=y +CONFIG_SCSI_SYM53C8XX_DMA_ADDRESSING_MODE=0 +CONFIG_SCSI_IPR=y +CONFIG_SCSI_QLA_FC=m +CONFIG_SCSI_QLA_ISCSI=m +CONFIG_SCSI_LPFC=m +CONFIG_SCSI_VIRTIO=m +CONFIG_SCSI_DH=m +CONFIG_SCSI_DH_RDAC=m +CONFIG_SCSI_DH_ALUA=m +CONFIG_ATA=y +# CONFIG_ATA_SFF is not set +CONFIG_MD=y +CONFIG_BLK_DEV_MD=y +CONFIG_MD_LINEAR=y +CONFIG_MD_RAID0=y +CONFIG_MD_RAID1=y +CONFIG_MD_RAID10=m +CONFIG_MD_RAID456=m +CONFIG_MD_MULTIPATH=m +CONFIG_MD_FAULTY=m +CONFIG_BLK_DEV_DM=y +CONFIG_DM_CRYPT=m +CONFIG_DM_SNAPSHOT=m +CONFIG_DM_MIRROR=m +CONFIG_DM_ZERO=m +CONFIG_DM_MULTIPATH=m +CONFIG_DM_MULTIPATH_QL=m +CONFIG_DM_MULTIPATH_ST=m +CONFIG_DM_UEVENT=y +CONFIG_BONDING=m +CONFIG_DUMMY=m +CONFIG_NETCONSOLE=y +CONFIG_NETPOLL_TRAP=y +CONFIG_TUN=m +CONFIG_VIRTIO_NET=m +CONFIG_VORTEX=y +CONFIG_ACENIC=m +CONFIG_ACENIC_OMIT_TIGON_I=y +CONFIG_PCNET32=y +CONFIG_TIGON3=y +CONFIG_CHELSIO_T1=m +CONFIG_BE2NET=m +CONFIG_S2IO=m +CONFIG_IBMVETH=y +CONFIG_EHEA=y +CONFIG_E100=y +CONFIG_E1000=y +CONFIG_E1000E=y +CONFIG_IXGB=m +CONFIG_IXGBE=m +CONFIG_MLX4_EN=m +CONFIG_MYRI10GE=m +CONFIG_QLGE=m +CONFIG_NETXEN_NIC=m +CONFIG_PPP=m +CONFIG_PPP_BSDCOMP=m +CONFIG_PPP_DEFLATE=m +CONFIG_PPPOE=m +CONFIG_PPP_ASYNC=m +CONFIG_PPP_SYNC_TTY=m +# CONFIG_INPUT_MOUSEDEV_PSAUX is not set +CONFIG_INPUT_EVDEV=m +CONFIG_INPUT_MISC=y +CONFIG_INPUT_PCSPKR=m +# CONFIG_SERIO_SERPORT is not set +CONFIG_SERIAL_8250=y +CONFIG_SERIAL_8250_CONSOLE=y +CONFIG_SERIAL_ICOM=m +CONFIG_SERIAL_JSM=m +CONFIG_HVC_CONSOLE=y +CONFIG_HVC_RTAS=y +CONFIG_HVCS=m +CONFIG_VIRTIO_CONSOLE=m +CONFIG_IBM_BSR=m +CONFIG_GEN_RTC=y +CONFIG_RAW_DRIVER=y +CONFIG_MAX_RAW_DEVS=1024 +CONFIG_FB=y +CONFIG_FIRMWARE_EDID=y +CONFIG_FB_OF=y +CONFIG_FB_MATROX=y +CONFIG_FB_MATROX_MILLENIUM=y +CONFIG_FB_MATROX_MYSTIQUE=y +CONFIG_FB_MATROX_G=y +CONFIG_FB_RADEON=y +CONFIG_FB_IBM_GXT4500=y +CONFIG_LCD_PLATFORM=m +# CONFIG_VGA_CONSOLE is not set +CONFIG_FRAMEBUFFER_CONSOLE=y +CONFIG_LOGO=y +CONFIG_HID_GYRATION=y +CONFIG_HID_PANTHERLORD=y +CONFIG_HID_PETALYNX=y +CONFIG_HID_SAMSUNG=y +CONFIG_HID_SUNPLUS=y +CONFIG_USB_HIDDEV=y +CONFIG_USB=y +CONFIG_USB_MON=m +CONFIG_USB_EHCI_HCD=y +# CONFIG_USB_EHCI_HCD_PPC_OF is not set +CONFIG_USB_OHCI_HCD=y +CONFIG_USB_STORAGE=m +CONFIG_INFINIBAND=m +CONFIG_INFINIBAND_USER_MAD=m +CONFIG_INFINIBAND_USER_ACCESS=m +CONFIG_INFINIBAND_MTHCA=m +CONFIG_INFINIBAND_EHCA=m +CONFIG_INFINIBAND_CXGB3=m +CONFIG_INFINIBAND_CXGB4=m +CONFIG_MLX4_INFINIBAND=m +CONFIG_INFINIBAND_IPOIB=m +CONFIG_INFINIBAND_IPOIB_CM=y +CONFIG_INFINIBAND_SRP=m +CONFIG_INFINIBAND_ISER=m +CONFIG_VIRTIO_PCI=m +CONFIG_VIRTIO_BALLOON=m +CONFIG_EXT2_FS=y +CONFIG_EXT2_FS_XATTR=y +CONFIG_EXT2_FS_POSIX_ACL=y +CONFIG_EXT2_FS_SECURITY=y +CONFIG_EXT2_FS_XIP=y +CONFIG_EXT3_FS=y +CONFIG_EXT3_FS_POSIX_ACL=y +CONFIG_EXT3_FS_SECURITY=y +CONFIG_EXT4_FS=y +CONFIG_EXT4_FS_POSIX_ACL=y +CONFIG_EXT4_FS_SECURITY=y +CONFIG_REISERFS_FS=y +CONFIG_REISERFS_FS_XATTR=y +CONFIG_REISERFS_FS_POSIX_ACL=y +CONFIG_REISERFS_FS_SECURITY=y +CONFIG_JFS_FS=m +CONFIG_JFS_POSIX_ACL=y +CONFIG_JFS_SECURITY=y +CONFIG_XFS_FS=m +CONFIG_XFS_POSIX_ACL=y +CONFIG_BTRFS_FS=m +CONFIG_BTRFS_FS_POSIX_ACL=y +CONFIG_NILFS2_FS=m +CONFIG_AUTOFS4_FS=m +CONFIG_FUSE_FS=m +CONFIG_ISO9660_FS=y +CONFIG_UDF_FS=m +CONFIG_MSDOS_FS=y +CONFIG_VFAT_FS=y +CONFIG_PROC_KCORE=y +CONFIG_TMPFS=y +CONFIG_TMPFS_POSIX_ACL=y +CONFIG_HUGETLBFS=y +CONFIG_CRAMFS=m +CONFIG_SQUASHFS=m +CONFIG_SQUASHFS_XATTR=y +CONFIG_SQUASHFS_LZO=y +CONFIG_SQUASHFS_XZ=y +CONFIG_PSTORE=y +CONFIG_NFS_FS=y +CONFIG_NFS_V3_ACL=y +CONFIG_NFS_V4=y +CONFIG_NFSD=m +CONFIG_NFSD_V3_ACL=y +CONFIG_NFSD_V4=y +CONFIG_CIFS=m +CONFIG_CIFS_XATTR=y +CONFIG_CIFS_POSIX=y +CONFIG_NLS_DEFAULT="utf8" +CONFIG_NLS_CODEPAGE_437=y +CONFIG_NLS_ASCII=y +CONFIG_NLS_ISO8859_1=y +CONFIG_NLS_UTF8=y +CONFIG_CRC_T10DIF=y +CONFIG_MAGIC_SYSRQ=y +CONFIG_DEBUG_KERNEL=y +CONFIG_DEBUG_STACK_USAGE=y +CONFIG_DEBUG_STACKOVERFLOW=y +CONFIG_LOCKUP_DETECTOR=y +CONFIG_LATENCYTOP=y +CONFIG_SCHED_TRACER=y +CONFIG_BLK_DEV_IO_TRACE=y +CONFIG_CODE_PATCHING_SELFTEST=y +CONFIG_FTR_FIXUP_SELFTEST=y +CONFIG_MSI_BITMAP_SELFTEST=y +CONFIG_XMON=y +CONFIG_CRYPTO_TEST=m +CONFIG_CRYPTO_PCBC=m +CONFIG_CRYPTO_HMAC=y +CONFIG_CRYPTO_MICHAEL_MIC=m +CONFIG_CRYPTO_TGR192=m +CONFIG_CRYPTO_WP512=m +CONFIG_CRYPTO_ANUBIS=m +CONFIG_CRYPTO_BLOWFISH=m +CONFIG_CRYPTO_CAST6=m +CONFIG_CRYPTO_KHAZAD=m +CONFIG_CRYPTO_SALSA20=m +CONFIG_CRYPTO_SERPENT=m +CONFIG_CRYPTO_TEA=m +CONFIG_CRYPTO_TWOFISH=m +CONFIG_CRYPTO_LZO=m +# CONFIG_CRYPTO_ANSI_CPRNG is not set +CONFIG_CRYPTO_DEV_NX=y +CONFIG_CRYPTO_DEV_NX_ENCRYPT=m diff --git a/arch/powerpc/include/asm/elf.h b/arch/powerpc/include/asm/elf.h index cc0655a702a..935b5e7a143 100644 --- a/arch/powerpc/include/asm/elf.h +++ b/arch/powerpc/include/asm/elf.h @@ -31,6 +31,8 @@ extern unsigned long randomize_et_dyn(unsigned long base); #define ELF_ET_DYN_BASE (randomize_et_dyn(0x20000000)) +#define ELF_CORE_EFLAGS (is_elf2_task() ? 2 : 0) + /* * Our registers are always unsigned longs, whether we're a 32 bit * process or 64 bit, on either a 64 bit or 32 bit kernel. @@ -86,6 +88,8 @@ typedef elf_vrregset_t elf_fpxregset_t; #ifdef __powerpc64__ # define SET_PERSONALITY(ex) \ do { \ + if (((ex).e_flags & 0x3) == 2) \ + set_thread_flag(TIF_ELF2ABI); \ if ((ex).e_ident[EI_CLASS] == ELFCLASS32) \ set_thread_flag(TIF_32BIT); \ else \ diff --git a/arch/powerpc/include/asm/hvcall.h b/arch/powerpc/include/asm/hvcall.h index 0c7f2bfcf13..d8b600b3f05 100644 --- a/arch/powerpc/include/asm/hvcall.h +++ b/arch/powerpc/include/asm/hvcall.h @@ -403,6 +403,8 @@ static inline unsigned long cmo_get_page_size(void) extern long pSeries_enable_reloc_on_exc(void); extern long pSeries_disable_reloc_on_exc(void); +extern long pseries_big_endian_exceptions(void); + #else #define pSeries_enable_reloc_on_exc() do {} while (0) diff --git a/arch/powerpc/include/asm/plpar_wrappers.h b/arch/powerpc/include/asm/plpar_wrappers.h index a63b045e707..12c32c5f533 100644 --- a/arch/powerpc/include/asm/plpar_wrappers.h +++ b/arch/powerpc/include/asm/plpar_wrappers.h @@ -287,6 +287,32 @@ static inline long disable_reloc_on_exceptions(void) { return plpar_set_mode(0, 3, 0, 0); } +/* + * Take exceptions in big endian mode on this partition + * + * Note: this call has a partition wide scope and can take a while to complete. + * If it returns H_LONG_BUSY_* it should be retried periodically until it + * returns H_SUCCESS. + */ +static inline long enable_big_endian_exceptions(void) +{ + /* mflags = 0: big endian exceptions */ + return plpar_set_mode(0, 4, 0, 0); +} + +/* + * Take exceptions in little endian mode on this partition + * + * Note: this call has a partition wide scope and can take a while to complete. + * If it returns H_LONG_BUSY_* it should be retried periodically until it + * returns H_SUCCESS. + */ +static inline long enable_little_endian_exceptions(void) +{ + /* mflags = 1: little endian exceptions */ + return plpar_set_mode(1, 4, 0, 0); +} + static inline long plapr_set_ciabr(unsigned long ciabr) { return plpar_set_mode(0, 1, ciabr, 0); diff --git a/arch/powerpc/include/asm/smp.h b/arch/powerpc/include/asm/smp.h index 98da78e0c2c..084e0807db9 100644 --- a/arch/powerpc/include/asm/smp.h +++ b/arch/powerpc/include/asm/smp.h @@ -33,6 +33,7 @@ extern int boot_cpuid; extern int spinning_secondaries; extern void cpu_die(void); +extern int cpu_to_chip_id(int cpu); #ifdef CONFIG_SMP @@ -112,7 +113,6 @@ static inline struct cpumask *cpu_core_mask(int cpu) } extern int cpu_to_core_id(int cpu); -extern int cpu_to_chip_id(int cpu); /* Since OpenPIC has only 4 IPIs, we use slightly different message numbers. * diff --git a/arch/powerpc/include/asm/thread_info.h b/arch/powerpc/include/asm/thread_info.h index ba7b1973866..9854c564ac5 100644 --- a/arch/powerpc/include/asm/thread_info.h +++ b/arch/powerpc/include/asm/thread_info.h @@ -82,8 +82,6 @@ static inline struct thread_info *current_thread_info(void) #endif /* __ASSEMBLY__ */ -#define PREEMPT_ACTIVE 0x10000000 - /* * thread information flag bit numbers */ @@ -107,6 +105,9 @@ static inline struct thread_info *current_thread_info(void) #define TIF_EMULATE_STACK_STORE 16 /* Is an instruction emulation for stack store? */ #define TIF_MEMDIE 17 /* is terminating due to OOM killer */ +#if defined(CONFIG_PPC64) +#define TIF_ELF2ABI 18 /* function descriptors must die! */ +#endif /* as above, but as bit values */ #define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE) @@ -185,6 +186,12 @@ static inline bool test_thread_local_flags(unsigned int flags) #define is_32bit_task() (1) #endif +#if defined(CONFIG_PPC64) +#define is_elf2_task() (test_thread_flag(TIF_ELF2ABI)) +#else +#define is_elf2_task() (0) +#endif + #endif /* !__ASSEMBLY__ */ #endif /* __KERNEL__ */ diff --git a/arch/powerpc/kernel/eeh.c b/arch/powerpc/kernel/eeh.c index 67130206534..4bd687d5e7a 100644 --- a/arch/powerpc/kernel/eeh.c +++ b/arch/powerpc/kernel/eeh.c @@ -686,6 +686,15 @@ void eeh_save_bars(struct eeh_dev *edev) for (i = 0; i < 16; i++) eeh_ops->read_config(dn, i * 4, 4, &edev->config_space[i]); + + /* + * For PCI bridges including root port, we need enable bus + * master explicitly. Otherwise, it can't fetch IODA table + * entries correctly. So we cache the bit in advance so that + * we can restore it after reset, either PHB range or PE range. + */ + if (edev->mode & EEH_DEV_BRIDGE) + edev->config_space[1] |= PCI_COMMAND_MASTER; } /** diff --git a/arch/powerpc/kernel/eeh_event.c b/arch/powerpc/kernel/eeh_event.c index d27c5afc90a..72d748b56c8 100644 --- a/arch/powerpc/kernel/eeh_event.c +++ b/arch/powerpc/kernel/eeh_event.c @@ -74,8 +74,13 @@ static int eeh_event_handler(void * dummy) pe = event->pe; if (pe) { eeh_pe_state_mark(pe, EEH_PE_RECOVERING); - pr_info("EEH: Detected PCI bus error on PHB#%d-PE#%x\n", - pe->phb->global_number, pe->addr); + if (pe->type & EEH_PE_PHB) + pr_info("EEH: Detected error on PHB#%d\n", + pe->phb->global_number); + else + pr_info("EEH: Detected PCI bus error on " + "PHB#%d-PE#%x\n", + pe->phb->global_number, pe->addr); eeh_handle_event(pe); eeh_pe_state_clear(pe, EEH_PE_RECOVERING); } else { diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c index 75c2d100998..3386d8ab7eb 100644 --- a/arch/powerpc/kernel/process.c +++ b/arch/powerpc/kernel/process.c @@ -858,17 +858,21 @@ void show_regs(struct pt_regs * regs) printk("MSR: "REG" ", regs->msr); printbits(regs->msr, msr_bits); printk(" CR: %08lx XER: %08lx\n", regs->ccr, regs->xer); -#ifdef CONFIG_PPC64 - printk("SOFTE: %ld\n", regs->softe); -#endif trap = TRAP(regs); if ((regs->trap != 0xc00) && cpu_has_feature(CPU_FTR_CFAR)) - printk("CFAR: "REG"\n", regs->orig_gpr3); - if (trap == 0x300 || trap == 0x600) + printk("CFAR: "REG" ", regs->orig_gpr3); + if (trap == 0x200 || trap == 0x300 || trap == 0x600) #if defined(CONFIG_4xx) || defined(CONFIG_BOOKE) - printk("DEAR: "REG", ESR: "REG"\n", regs->dar, regs->dsisr); + printk("DEAR: "REG" ESR: "REG" ", regs->dar, regs->dsisr); #else - printk("DAR: "REG", DSISR: %08lx\n", regs->dar, regs->dsisr); + printk("DAR: "REG" DSISR: %08lx ", regs->dar, regs->dsisr); +#endif +#ifdef CONFIG_PPC64 + printk("SOFTE: %ld ", regs->softe); +#endif +#ifdef CONFIG_PPC_TRANSACTIONAL_MEM + if (MSR_TM_ACTIVE(regs->msr)) + printk("\nPACATMSCRATCH: %016llx ", get_paca()->tm_scratch); #endif for (i = 0; i < 32; i++) { @@ -887,9 +891,6 @@ void show_regs(struct pt_regs * regs) printk("NIP ["REG"] %pS\n", regs->nip, (void *)regs->nip); printk("LR ["REG"] %pS\n", regs->link, (void *)regs->link); #endif -#ifdef CONFIG_PPC_TRANSACTIONAL_MEM - printk("PACATMSCRATCH [%llx]\n", get_paca()->tm_scratch); -#endif show_stack(current, (unsigned long *) regs->gpr[1]); if (!user_mode(regs)) show_instructions(regs); @@ -1086,25 +1087,45 @@ void start_thread(struct pt_regs *regs, unsigned long start, unsigned long sp) regs->msr = MSR_USER; #else if (!is_32bit_task()) { - unsigned long entry, toc; + unsigned long entry; - /* start is a relocated pointer to the function descriptor for - * the elf _start routine. The first entry in the function - * descriptor is the entry address of _start and the second - * entry is the TOC value we need to use. - */ - __get_user(entry, (unsigned long __user *)start); - __get_user(toc, (unsigned long __user *)start+1); + if (is_elf2_task()) { + /* Look ma, no function descriptors! */ + entry = start; - /* Check whether the e_entry function descriptor entries - * need to be relocated before we can use them. - */ - if (load_addr != 0) { - entry += load_addr; - toc += load_addr; + /* + * Ulrich says: + * The latest iteration of the ABI requires that when + * calling a function (at its global entry point), + * the caller must ensure r12 holds the entry point + * address (so that the function can quickly + * establish addressability). + */ + regs->gpr[12] = start; + /* Make sure that's restored on entry to userspace. */ + set_thread_flag(TIF_RESTOREALL); + } else { + unsigned long toc; + + /* start is a relocated pointer to the function + * descriptor for the elf _start routine. The first + * entry in the function descriptor is the entry + * address of _start and the second entry is the TOC + * value we need to use. + */ + __get_user(entry, (unsigned long __user *)start); + __get_user(toc, (unsigned long __user *)start+1); + + /* Check whether the e_entry function descriptor entries + * need to be relocated before we can use them. + */ + if (load_addr != 0) { + entry += load_addr; + toc += load_addr; + } + regs->gpr[2] = toc; } regs->nip = entry; - regs->gpr[2] = toc; regs->msr = MSR_USER64; } else { regs->nip = start; diff --git a/arch/powerpc/kernel/prom.c b/arch/powerpc/kernel/prom.c index f3a47098fb8..fa0ad8aafbc 100644 --- a/arch/powerpc/kernel/prom.c +++ b/arch/powerpc/kernel/prom.c @@ -777,6 +777,26 @@ int of_get_ibm_chip_id(struct device_node *np) return -1; } +/** + * cpu_to_chip_id - Return the cpus chip-id + * @cpu: The logical cpu number. + * + * Return the value of the ibm,chip-id property corresponding to the given + * logical cpu number. If the chip-id can not be found, returns -1. + */ +int cpu_to_chip_id(int cpu) +{ + struct device_node *np; + + np = of_get_cpu_node(cpu, NULL); + if (!np) + return -1; + + of_node_put(np); + return of_get_ibm_chip_id(np); +} +EXPORT_SYMBOL(cpu_to_chip_id); + #ifdef CONFIG_PPC_PSERIES /* * Fix up the uninitialized fields in a new device node: diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c index 749778e0a69..1844298f5ea 100644 --- a/arch/powerpc/kernel/signal_32.c +++ b/arch/powerpc/kernel/signal_32.c @@ -457,7 +457,15 @@ static int save_user_regs(struct pt_regs *regs, struct mcontext __user *frame, if (copy_vsx_to_user(&frame->mc_vsregs, current)) return 1; msr |= MSR_VSX; - } + } else if (!ctx_has_vsx_region) + /* + * With a small context structure we can't hold the VSX + * registers, hence clear the MSR value to indicate the state + * was not saved. + */ + msr &= ~MSR_VSX; + + #endif /* CONFIG_VSX */ #ifdef CONFIG_SPE /* save spe registers */ diff --git a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c index b3c615764c9..e66f67b8b9e 100644 --- a/arch/powerpc/kernel/signal_64.c +++ b/arch/powerpc/kernel/signal_64.c @@ -701,12 +701,6 @@ badframe: int handle_rt_signal64(int signr, struct k_sigaction *ka, siginfo_t *info, sigset_t *set, struct pt_regs *regs) { - /* Handler is *really* a pointer to the function descriptor for - * the signal routine. The first entry in the function - * descriptor is the entry address of signal and the second - * entry is the TOC value we need to use. - */ - func_descr_t __user *funct_desc_ptr; struct rt_sigframe __user *frame; unsigned long newsp = 0; long err = 0; @@ -766,19 +760,32 @@ int handle_rt_signal64(int signr, struct k_sigaction *ka, siginfo_t *info, goto badframe; regs->link = (unsigned long) &frame->tramp[0]; } - funct_desc_ptr = (func_descr_t __user *) ka->sa.sa_handler; /* Allocate a dummy caller frame for the signal handler. */ newsp = ((unsigned long)frame) - __SIGNAL_FRAMESIZE; err |= put_user(regs->gpr[1], (unsigned long __user *)newsp); /* Set up "regs" so we "return" to the signal handler. */ - err |= get_user(regs->nip, &funct_desc_ptr->entry); + if (is_elf2_task()) { + regs->nip = (unsigned long) ka->sa.sa_handler; + regs->gpr[12] = regs->nip; + } else { + /* Handler is *really* a pointer to the function descriptor for + * the signal routine. The first entry in the function + * descriptor is the entry address of signal and the second + * entry is the TOC value we need to use. + */ + func_descr_t __user *funct_desc_ptr = + (func_descr_t __user *) ka->sa.sa_handler; + + err |= get_user(regs->nip, &funct_desc_ptr->entry); + err |= get_user(regs->gpr[2], &funct_desc_ptr->toc); + } + /* enter the signal handler in native-endian mode */ regs->msr &= ~MSR_LE; regs->msr |= (MSR_KERNEL & MSR_LE); regs->gpr[1] = newsp; - err |= get_user(regs->gpr[2], &funct_desc_ptr->toc); regs->gpr[3] = signr; regs->result = 0; if (ka->sa.sa_flags & SA_SIGINFO) { diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c index 930cd8af350..a3b64f3bf9a 100644 --- a/arch/powerpc/kernel/smp.c +++ b/arch/powerpc/kernel/smp.c @@ -597,22 +597,6 @@ out: return id; } -/* Return the value of the chip-id property corresponding - * to the given logical cpu. - */ -int cpu_to_chip_id(int cpu) -{ - struct device_node *np; - - np = of_get_cpu_node(cpu, NULL); - if (!np) - return -1; - - of_node_put(np); - return of_get_ibm_chip_id(np); -} -EXPORT_SYMBOL(cpu_to_chip_id); - /* Helper routines for cpu to core mapping */ int cpu_core_index_of_thread(int cpu) { diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c index 192b051df97..b3b144121cc 100644 --- a/arch/powerpc/kernel/time.c +++ b/arch/powerpc/kernel/time.c @@ -213,8 +213,6 @@ static u64 scan_dispatch_log(u64 stop_tb) if (i == be64_to_cpu(vpa->dtl_idx)) return 0; while (i < be64_to_cpu(vpa->dtl_idx)) { - if (dtl_consumer) - dtl_consumer(dtl, i); dtb = be64_to_cpu(dtl->timebase); tb_delta = be32_to_cpu(dtl->enqueue_to_dispatch_time) + be32_to_cpu(dtl->ready_to_enqueue_time); @@ -227,6 +225,8 @@ static u64 scan_dispatch_log(u64 stop_tb) } if (dtb > stop_tb) break; + if (dtl_consumer) + dtl_consumer(dtl, i); stolen += tb_delta; ++i; ++dtl; diff --git a/arch/powerpc/kernel/vdso64/sigtramp.S b/arch/powerpc/kernel/vdso64/sigtramp.S index 45ea281e9a2..542c6f422e4 100644 --- a/arch/powerpc/kernel/vdso64/sigtramp.S +++ b/arch/powerpc/kernel/vdso64/sigtramp.S @@ -142,6 +142,13 @@ V_FUNCTION_END(__kernel_sigtramp_rt64) /* Size of CR reg in DWARF unwind info. */ #define CRSIZE 4 +/* Offset of CR reg within a full word. */ +#ifdef __LITTLE_ENDIAN__ +#define CROFF 0 +#else +#define CROFF (RSIZE - CRSIZE) +#endif + /* This is the offset of the VMX reg pointer. */ #define VREGS 48*RSIZE+33*8 @@ -181,7 +188,14 @@ V_FUNCTION_END(__kernel_sigtramp_rt64) rsave (31, 31*RSIZE); \ rsave (67, 32*RSIZE); /* ap, used as temp for nip */ \ rsave (65, 36*RSIZE); /* lr */ \ - rsave (70, 38*RSIZE + (RSIZE - CRSIZE)) /* cr */ + rsave (68, 38*RSIZE + CROFF); /* cr fields */ \ + rsave (69, 38*RSIZE + CROFF); \ + rsave (70, 38*RSIZE + CROFF); \ + rsave (71, 38*RSIZE + CROFF); \ + rsave (72, 38*RSIZE + CROFF); \ + rsave (73, 38*RSIZE + CROFF); \ + rsave (74, 38*RSIZE + CROFF); \ + rsave (75, 38*RSIZE + CROFF) /* Describe where the FP regs are saved. */ #define EH_FRAME_FP \ diff --git a/arch/powerpc/kernel/vio.c b/arch/powerpc/kernel/vio.c index e7d0c88f621..76a64821f4a 100644 --- a/arch/powerpc/kernel/vio.c +++ b/arch/powerpc/kernel/vio.c @@ -1419,7 +1419,7 @@ struct vio_dev *vio_register_device_node(struct device_node *of_node) /* needed to ensure proper operation of coherent allocations * later, in case driver doesn't set it explicitly */ - dma_set_mask_and_coherent(&viodev->dev, DMA_BIT_MASK(64)); + dma_coerce_mask_and_coherent(&viodev->dev, DMA_BIT_MASK(64)); } /* register with generic device framework */ diff --git a/arch/powerpc/mm/gup.c b/arch/powerpc/mm/gup.c index 6936547018b..c5f734e20b0 100644 --- a/arch/powerpc/mm/gup.c +++ b/arch/powerpc/mm/gup.c @@ -123,6 +123,7 @@ int __get_user_pages_fast(unsigned long start, int nr_pages, int write, struct mm_struct *mm = current->mm; unsigned long addr, len, end; unsigned long next; + unsigned long flags; pgd_t *pgdp; int nr = 0; @@ -156,7 +157,7 @@ int __get_user_pages_fast(unsigned long start, int nr_pages, int write, * So long as we atomically load page table pointers versus teardown, * we can follow the address down to the the page and take a ref on it. */ - local_irq_disable(); + local_irq_save(flags); pgdp = pgd_offset(mm, addr); do { @@ -179,7 +180,7 @@ int __get_user_pages_fast(unsigned long start, int nr_pages, int write, break; } while (pgdp++, addr = next, addr != end); - local_irq_enable(); + local_irq_restore(flags); return nr; } diff --git a/arch/powerpc/mm/slice.c b/arch/powerpc/mm/slice.c index 3e99c149271..7ce9cf3b698 100644 --- a/arch/powerpc/mm/slice.c +++ b/arch/powerpc/mm/slice.c @@ -258,7 +258,7 @@ static bool slice_scan_available(unsigned long addr, slice = GET_HIGH_SLICE_INDEX(addr); *boundary_addr = (slice + end) ? ((slice + end) << SLICE_HIGH_SHIFT) : SLICE_LOW_TOP; - return !!(available.high_slices & (1u << slice)); + return !!(available.high_slices & (1ul << slice)); } } diff --git a/arch/powerpc/platforms/Kconfig.cputype b/arch/powerpc/platforms/Kconfig.cputype index c2a566fb8bb..132f8726a25 100644 --- a/arch/powerpc/platforms/Kconfig.cputype +++ b/arch/powerpc/platforms/Kconfig.cputype @@ -403,3 +403,14 @@ config PPC_DOORBELL default n endmenu + +config CPU_LITTLE_ENDIAN + bool "Build little endian kernel" + default n + help + This option selects whether a big endian or little endian kernel will + be built. + + Note that if cross compiling a little endian kernel, + CROSS_COMPILE must point to a toolchain capable of targeting + little endian powerpc. diff --git a/arch/powerpc/platforms/powernv/rng.c b/arch/powerpc/platforms/powernv/rng.c index 8844628915d..1cb160dc160 100644 --- a/arch/powerpc/platforms/powernv/rng.c +++ b/arch/powerpc/platforms/powernv/rng.c @@ -19,6 +19,7 @@ #include <asm/io.h> #include <asm/prom.h> #include <asm/machdep.h> +#include <asm/smp.h> struct powernv_rng { diff --git a/arch/powerpc/platforms/pseries/eeh_pseries.c b/arch/powerpc/platforms/pseries/eeh_pseries.c index 7fbc25b1813..ccb633e077b 100644 --- a/arch/powerpc/platforms/pseries/eeh_pseries.c +++ b/arch/powerpc/platforms/pseries/eeh_pseries.c @@ -189,8 +189,9 @@ static void *pseries_eeh_of_probe(struct device_node *dn, void *flag) struct eeh_dev *edev; struct eeh_pe pe; struct pci_dn *pdn = PCI_DN(dn); - const u32 *class_code, *vendor_id, *device_id; - const u32 *regs; + const __be32 *classp, *vendorp, *devicep; + u32 class_code; + const __be32 *regs; u32 pcie_flags; int enable = 0; int ret; @@ -201,22 +202,24 @@ static void *pseries_eeh_of_probe(struct device_node *dn, void *flag) return NULL; /* Retrieve class/vendor/device IDs */ - class_code = of_get_property(dn, "class-code", NULL); - vendor_id = of_get_property(dn, "vendor-id", NULL); - device_id = of_get_property(dn, "device-id", NULL); + classp = of_get_property(dn, "class-code", NULL); + vendorp = of_get_property(dn, "vendor-id", NULL); + devicep = of_get_property(dn, "device-id", NULL); /* Skip for bad OF node or PCI-ISA bridge */ - if (!class_code || !vendor_id || !device_id) + if (!classp || !vendorp || !devicep) return NULL; if (dn->type && !strcmp(dn->type, "isa")) return NULL; + class_code = of_read_number(classp, 1); + /* * Update class code and mode of eeh device. We need * correctly reflects that current device is root port * or PCIe switch downstream port. */ - edev->class_code = *class_code; + edev->class_code = class_code; edev->pcie_cap = pseries_eeh_find_cap(dn, PCI_CAP_ID_EXP); edev->mode &= 0xFFFFFF00; if ((edev->class_code >> 8) == PCI_CLASS_BRIDGE_PCI) { @@ -243,12 +246,12 @@ static void *pseries_eeh_of_probe(struct device_node *dn, void *flag) /* Initialize the fake PE */ memset(&pe, 0, sizeof(struct eeh_pe)); pe.phb = edev->phb; - pe.config_addr = regs[0]; + pe.config_addr = of_read_number(regs, 1); /* Enable EEH on the device */ ret = eeh_ops->set_option(&pe, EEH_OPT_ENABLE); if (!ret) { - edev->config_addr = regs[0]; + edev->config_addr = of_read_number(regs, 1); /* Retrieve PE address */ edev->pe_config_addr = eeh_ops->get_pe_addr(&pe); pe.addr = edev->pe_config_addr; diff --git a/arch/powerpc/platforms/pseries/lpar.c b/arch/powerpc/platforms/pseries/lpar.c index 356bc75ca74..4fca3def9db 100644 --- a/arch/powerpc/platforms/pseries/lpar.c +++ b/arch/powerpc/platforms/pseries/lpar.c @@ -245,6 +245,23 @@ static void pSeries_lpar_hptab_clear(void) &(ptes[j].pteh), &(ptes[j].ptel)); } } + +#ifdef __LITTLE_ENDIAN__ + /* Reset exceptions to big endian */ + if (firmware_has_feature(FW_FEATURE_SET_MODE)) { + long rc; + + rc = pseries_big_endian_exceptions(); + /* + * At this point it is unlikely panic() will get anything + * out to the user, but at least this will stop us from + * continuing on further and creating an even more + * difficult to debug situation. + */ + if (rc) + panic("Could not enable big endian exceptions"); + } +#endif } /* diff --git a/arch/powerpc/platforms/pseries/rng.c b/arch/powerpc/platforms/pseries/rng.c index a702f1c0824..72a102758d4 100644 --- a/arch/powerpc/platforms/pseries/rng.c +++ b/arch/powerpc/platforms/pseries/rng.c @@ -13,6 +13,7 @@ #include <linux/of.h> #include <asm/archrandom.h> #include <asm/machdep.h> +#include <asm/plpar_wrappers.h> static int pseries_get_random_long(unsigned long *v) diff --git a/arch/powerpc/platforms/pseries/setup.c b/arch/powerpc/platforms/pseries/setup.c index 1f97e2b87a6..c1f19085870 100644 --- a/arch/powerpc/platforms/pseries/setup.c +++ b/arch/powerpc/platforms/pseries/setup.c @@ -442,6 +442,32 @@ static void pSeries_machine_kexec(struct kimage *image) } #endif +#ifdef __LITTLE_ENDIAN__ +long pseries_big_endian_exceptions(void) +{ + long rc; + + while (1) { + rc = enable_big_endian_exceptions(); + if (!H_IS_LONG_BUSY(rc)) + return rc; + mdelay(get_longbusy_msecs(rc)); + } +} + +static long pseries_little_endian_exceptions(void) +{ + long rc; + + while (1) { + rc = enable_little_endian_exceptions(); + if (!H_IS_LONG_BUSY(rc)) + return rc; + mdelay(get_longbusy_msecs(rc)); + } +} +#endif + static void __init pSeries_setup_arch(void) { panic_timeout = 10; @@ -698,6 +724,22 @@ static int __init pSeries_probe(void) /* Now try to figure out if we are running on LPAR */ of_scan_flat_dt(pseries_probe_fw_features, NULL); +#ifdef __LITTLE_ENDIAN__ + if (firmware_has_feature(FW_FEATURE_SET_MODE)) { + long rc; + /* + * Tell the hypervisor that we want our exceptions to + * be taken in little endian mode. If this fails we don't + * want to use BUG() because it will trigger an exception. + */ + rc = pseries_little_endian_exceptions(); + if (rc) { + ppc_md.progress("H_SET_MODE LE exception fail", 0); + panic("Could not enable little endian exceptions"); + } + } +#endif + if (firmware_has_feature(FW_FEATURE_LPAR)) hpte_init_lpar(); else diff --git a/arch/powerpc/platforms/wsp/chroma.c b/arch/powerpc/platforms/wsp/chroma.c index 8ef53bc2e70..aaa46b35371 100644 --- a/arch/powerpc/platforms/wsp/chroma.c +++ b/arch/powerpc/platforms/wsp/chroma.c @@ -15,6 +15,7 @@ #include <linux/of.h> #include <linux/smp.h> #include <linux/time.h> +#include <linux/of_fdt.h> #include <asm/machdep.h> #include <asm/udbg.h> diff --git a/arch/powerpc/platforms/wsp/h8.c b/arch/powerpc/platforms/wsp/h8.c index d18e6cc19df..a3c87f39575 100644 --- a/arch/powerpc/platforms/wsp/h8.c +++ b/arch/powerpc/platforms/wsp/h8.c @@ -10,6 +10,7 @@ #include <linux/kernel.h> #include <linux/of.h> #include <linux/io.h> +#include <linux/of_address.h> #include "wsp.h" diff --git a/arch/powerpc/platforms/wsp/ics.c b/arch/powerpc/platforms/wsp/ics.c index 2d3b1dd9571..9cd92e64502 100644 --- a/arch/powerpc/platforms/wsp/ics.c +++ b/arch/powerpc/platforms/wsp/ics.c @@ -18,6 +18,8 @@ #include <linux/smp.h> #include <linux/spinlock.h> #include <linux/types.h> +#include <linux/of_address.h> +#include <linux/of_irq.h> #include <asm/io.h> #include <asm/irq.h> diff --git a/arch/powerpc/platforms/wsp/opb_pic.c b/arch/powerpc/platforms/wsp/opb_pic.c index cb565bf9365..3f672980793 100644 --- a/arch/powerpc/platforms/wsp/opb_pic.c +++ b/arch/powerpc/platforms/wsp/opb_pic.c @@ -15,6 +15,8 @@ #include <linux/of.h> #include <linux/slab.h> #include <linux/time.h> +#include <linux/of_address.h> +#include <linux/of_irq.h> #include <asm/reg_a2.h> #include <asm/irq.h> diff --git a/arch/powerpc/platforms/wsp/psr2.c b/arch/powerpc/platforms/wsp/psr2.c index 508ec8282b9..a87b414c766 100644 --- a/arch/powerpc/platforms/wsp/psr2.c +++ b/arch/powerpc/platforms/wsp/psr2.c @@ -15,6 +15,7 @@ #include <linux/of.h> #include <linux/smp.h> #include <linux/time.h> +#include <linux/of_fdt.h> #include <asm/machdep.h> #include <asm/udbg.h> diff --git a/arch/powerpc/platforms/wsp/scom_wsp.c b/arch/powerpc/platforms/wsp/scom_wsp.c index 8928507affe..6538b4de34f 100644 --- a/arch/powerpc/platforms/wsp/scom_wsp.c +++ b/arch/powerpc/platforms/wsp/scom_wsp.c @@ -14,6 +14,7 @@ #include <linux/of.h> #include <linux/spinlock.h> #include <linux/types.h> +#include <linux/of_address.h> #include <asm/cputhreads.h> #include <asm/reg_a2.h> diff --git a/arch/powerpc/platforms/wsp/wsp.c b/arch/powerpc/platforms/wsp/wsp.c index ddb6efe8891..58cd1f00e1e 100644 --- a/arch/powerpc/platforms/wsp/wsp.c +++ b/arch/powerpc/platforms/wsp/wsp.c @@ -13,6 +13,7 @@ #include <linux/smp.h> #include <linux/delay.h> #include <linux/time.h> +#include <linux/of_address.h> #include <asm/scom.h> diff --git a/arch/s390/boot/Makefile b/arch/s390/boot/Makefile index f2737a005af..9a42ecec564 100644 --- a/arch/s390/boot/Makefile +++ b/arch/s390/boot/Makefile @@ -21,6 +21,6 @@ $(obj)/bzImage: $(obj)/compressed/vmlinux FORCE $(obj)/compressed/vmlinux: FORCE $(Q)$(MAKE) $(build)=$(obj)/compressed $@ -install: $(CONFIGURE) $(obj)/image - sh -x $(srctree)/$(obj)/install.sh $(KERNELRELEASE) $(obj)/image \ +install: $(CONFIGURE) $(obj)/bzImage + sh -x $(srctree)/$(obj)/install.sh $(KERNELRELEASE) $(obj)/bzImage \ System.map "$(INSTALL_PATH)" diff --git a/arch/s390/include/asm/ctl_reg.h b/arch/s390/include/asm/ctl_reg.h index 9b69c0befdc..4e63f1a1360 100644 --- a/arch/s390/include/asm/ctl_reg.h +++ b/arch/s390/include/asm/ctl_reg.h @@ -7,6 +7,8 @@ #ifndef __ASM_CTL_REG_H #define __ASM_CTL_REG_H +#include <linux/bug.h> + #ifdef CONFIG_64BIT # define __CTL_LOAD "lctlg" # define __CTL_STORE "stctg" diff --git a/arch/s390/include/asm/eadm.h b/arch/s390/include/asm/eadm.h index dc9200ca32e..67026300c88 100644 --- a/arch/s390/include/asm/eadm.h +++ b/arch/s390/include/asm/eadm.h @@ -111,18 +111,7 @@ struct scm_driver { int scm_driver_register(struct scm_driver *scmdrv); void scm_driver_unregister(struct scm_driver *scmdrv); -int scm_start_aob(struct aob *aob); +int eadm_start_aob(struct aob *aob); void scm_irq_handler(struct aob *aob, int error); -struct eadm_ops { - int (*eadm_start) (struct aob *aob); - struct module *owner; -}; - -int scm_get_ref(void); -void scm_put_ref(void); - -void register_eadm_ops(struct eadm_ops *ops); -void unregister_eadm_ops(struct eadm_ops *ops); - #endif /* _ASM_S390_EADM_H */ diff --git a/arch/s390/include/asm/hardirq.h b/arch/s390/include/asm/hardirq.h index a908d2941c5..b7eabaaeffb 100644 --- a/arch/s390/include/asm/hardirq.h +++ b/arch/s390/include/asm/hardirq.h @@ -18,8 +18,6 @@ #define __ARCH_HAS_DO_SOFTIRQ #define __ARCH_IRQ_EXIT_IRQS_DISABLED -#define HARDIRQ_BITS 8 - static inline void ack_bad_irq(unsigned int irq) { printk(KERN_CRIT "unexpected IRQ trap at vector %02x\n", irq); diff --git a/arch/s390/include/asm/pci.h b/arch/s390/include/asm/pci.h index 1cc185da9d3..c129ab2ac73 100644 --- a/arch/s390/include/asm/pci.h +++ b/arch/s390/include/asm/pci.h @@ -63,9 +63,10 @@ enum zpci_state { }; struct zpci_bar_struct { + struct resource *res; /* bus resource */ u32 val; /* bar start & 3 flag bits */ - u8 size; /* order 2 exponent */ u16 map_idx; /* index into bar mapping array */ + u8 size; /* order 2 exponent */ }; /* Private data per function */ @@ -97,6 +98,7 @@ struct zpci_dev { unsigned long iommu_pages; unsigned int next_bit; + char res_name[16]; struct zpci_bar_struct bars[PCI_BAR_COUNT]; u64 start_dma; /* Start of available DMA addresses */ @@ -122,12 +124,10 @@ static inline bool zdev_enabled(struct zpci_dev *zdev) Prototypes ----------------------------------------------------------------------------- */ /* Base stuff */ -struct zpci_dev *zpci_alloc_device(void); int zpci_create_device(struct zpci_dev *); int zpci_enable_device(struct zpci_dev *); int zpci_disable_device(struct zpci_dev *); void zpci_stop_device(struct zpci_dev *); -void zpci_free_device(struct zpci_dev *); int zpci_register_ioat(struct zpci_dev *, u8, u64, u64, u64); int zpci_unregister_ioat(struct zpci_dev *, u8); diff --git a/arch/s390/include/asm/sclp.h b/arch/s390/include/asm/sclp.h index 7dc7f9c63b6..30ef748bc16 100644 --- a/arch/s390/include/asm/sclp.h +++ b/arch/s390/include/asm/sclp.h @@ -43,7 +43,6 @@ struct sclp_cpu_info { int sclp_get_cpu_info(struct sclp_cpu_info *info); int sclp_cpu_configure(u8 cpu); int sclp_cpu_deconfigure(u8 cpu); -void sclp_facilities_detect(void); unsigned long long sclp_get_rnmax(void); unsigned long long sclp_get_rzm(void); int sclp_sdias_blk_count(void); @@ -57,5 +56,7 @@ bool sclp_has_vt220(void); int sclp_pci_configure(u32 fid); int sclp_pci_deconfigure(u32 fid); int memcpy_hsa(void *dest, unsigned long src, size_t count, int mode); +unsigned long sclp_get_hsa_size(void); +void sclp_early_detect(void); #endif /* _ASM_S390_SCLP_H */ diff --git a/arch/s390/include/asm/setup.h b/arch/s390/include/asm/setup.h index df802ee14af..94cfbe442f1 100644 --- a/arch/s390/include/asm/setup.h +++ b/arch/s390/include/asm/setup.h @@ -107,9 +107,6 @@ void create_mem_hole(struct mem_chunk mem_chunk[], unsigned long addr, #define MACHINE_HAS_RRBM (S390_lowcore.machine_flags & MACHINE_FLAG_RRBM) #endif /* CONFIG_64BIT */ -#define ZFCPDUMP_HSA_SIZE (32UL<<20) -#define ZFCPDUMP_HSA_SIZE_MAX (64UL<<20) - /* * Console mode. Override with conmode= */ diff --git a/arch/s390/include/asm/thread_info.h b/arch/s390/include/asm/thread_info.h index eb5f64d26d0..10e0fcd3633 100644 --- a/arch/s390/include/asm/thread_info.h +++ b/arch/s390/include/asm/thread_info.h @@ -111,6 +111,4 @@ static inline struct thread_info *current_thread_info(void) #define is_32bit_task() (1) #endif -#define PREEMPT_ACTIVE 0x4000000 - #endif /* _ASM_THREAD_INFO_H */ diff --git a/arch/s390/kernel/crash_dump.c b/arch/s390/kernel/crash_dump.c index f45b2ab0cb8..d7658c4b2ed 100644 --- a/arch/s390/kernel/crash_dump.c +++ b/arch/s390/kernel/crash_dump.c @@ -95,7 +95,7 @@ static void *elfcorehdr_newmem; /* * Copy one page from zfcpdump "oldmem" * - * For pages below ZFCPDUMP_HSA_SIZE memory from the HSA is copied. Otherwise + * For pages below HSA size memory from the HSA is copied. Otherwise * real memory copy is used. */ static ssize_t copy_oldmem_page_zfcpdump(char *buf, size_t csize, @@ -103,7 +103,7 @@ static ssize_t copy_oldmem_page_zfcpdump(char *buf, size_t csize, { int rc; - if (src < ZFCPDUMP_HSA_SIZE) { + if (src < sclp_get_hsa_size()) { rc = memcpy_hsa(buf, src, csize, userbuf); } else { if (userbuf) @@ -188,18 +188,19 @@ static int remap_oldmem_pfn_range_kdump(struct vm_area_struct *vma, /* * Remap "oldmem" for zfcpdump * - * We only map available memory above ZFCPDUMP_HSA_SIZE. Memory below - * ZFCPDUMP_HSA_SIZE is read on demand using the copy_oldmem_page() function. + * We only map available memory above HSA size. Memory below HSA size + * is read on demand using the copy_oldmem_page() function. */ static int remap_oldmem_pfn_range_zfcpdump(struct vm_area_struct *vma, unsigned long from, unsigned long pfn, unsigned long size, pgprot_t prot) { + unsigned long hsa_end = sclp_get_hsa_size(); unsigned long size_hsa; - if (pfn < ZFCPDUMP_HSA_SIZE >> PAGE_SHIFT) { - size_hsa = min(size, ZFCPDUMP_HSA_SIZE - (pfn << PAGE_SHIFT)); + if (pfn < hsa_end >> PAGE_SHIFT) { + size_hsa = min(size, hsa_end - (pfn << PAGE_SHIFT)); if (size == size_hsa) return 0; size -= size_hsa; @@ -238,9 +239,9 @@ int copy_from_oldmem(void *dest, void *src, size_t count) return rc; } } else { - if ((unsigned long) src < ZFCPDUMP_HSA_SIZE) { - copied = min(count, - ZFCPDUMP_HSA_SIZE - (unsigned long) src); + unsigned long hsa_end = sclp_get_hsa_size(); + if ((unsigned long) src < hsa_end) { + copied = min(count, hsa_end - (unsigned long) src); rc = memcpy_hsa(dest, (unsigned long) src, copied, 0); if (rc) return rc; @@ -580,6 +581,9 @@ int elfcorehdr_alloc(unsigned long long *addr, unsigned long long *size) /* If elfcorehdr= has been passed via cmdline, we use that one */ if (elfcorehdr_addr != ELFCORE_ADDR_MAX) return 0; + /* If we cannot get HSA size for zfcpdump return error */ + if (ipl_info.type == IPL_TYPE_FCP_DUMP && !sclp_get_hsa_size()) + return -ENODEV; mem_chunk_cnt = get_mem_chunk_cnt(); alloc_size = 0x1000 + get_cpu_cnt() * 0x300 + diff --git a/arch/s390/kernel/early.c b/arch/s390/kernel/early.c index 96543ac400a..fca20b5fe79 100644 --- a/arch/s390/kernel/early.c +++ b/arch/s390/kernel/early.c @@ -483,7 +483,7 @@ void __init startup_init(void) detect_diag44(); detect_machine_facilities(); setup_topology(); - sclp_facilities_detect(); + sclp_early_detect(); #ifdef CONFIG_DYNAMIC_FTRACE S390_lowcore.ftrace_func = (unsigned long)ftrace_caller; #endif diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c index ffe1c53264a..4444875266e 100644 --- a/arch/s390/kernel/setup.c +++ b/arch/s390/kernel/setup.c @@ -471,8 +471,9 @@ static void __init setup_memory_end(void) #ifdef CONFIG_ZFCPDUMP - if (ipl_info.type == IPL_TYPE_FCP_DUMP && !OLDMEM_BASE) { - memory_end = ZFCPDUMP_HSA_SIZE; + if (ipl_info.type == IPL_TYPE_FCP_DUMP && + !OLDMEM_BASE && sclp_get_hsa_size()) { + memory_end = sclp_get_hsa_size(); memory_end_set = 1; } #endif @@ -586,7 +587,7 @@ static unsigned long __init find_crash_base(unsigned long crash_size, crash_base = (chunk->addr + chunk->size) - crash_size; if (crash_base < crash_size) continue; - if (crash_base < ZFCPDUMP_HSA_SIZE_MAX) + if (crash_base < sclp_get_hsa_size()) continue; if (crash_base < (unsigned long) INITRD_START + INITRD_SIZE) continue; diff --git a/arch/s390/pci/pci.c b/arch/s390/pci/pci.c index 0c9a17780e4..bf7c73d71ee 100644 --- a/arch/s390/pci/pci.c +++ b/arch/s390/pci/pci.c @@ -530,20 +530,6 @@ static void zpci_unmap_resources(struct zpci_dev *zdev) } } -struct zpci_dev *zpci_alloc_device(void) -{ - struct zpci_dev *zdev; - - /* Alloc memory for our private pci device data */ - zdev = kzalloc(sizeof(*zdev), GFP_KERNEL); - return zdev ? : ERR_PTR(-ENOMEM); -} - -void zpci_free_device(struct zpci_dev *zdev) -{ - kfree(zdev); -} - int pcibios_add_platform_entries(struct pci_dev *pdev) { return zpci_sysfs_add_device(&pdev->dev); @@ -579,37 +565,6 @@ static void zpci_irq_exit(void) unregister_adapter_interrupt(&zpci_airq); } -static struct resource *zpci_alloc_bus_resource(unsigned long start, unsigned long size, - unsigned long flags, int domain) -{ - struct resource *r; - char *name; - int rc; - - r = kzalloc(sizeof(*r), GFP_KERNEL); - if (!r) - return ERR_PTR(-ENOMEM); - r->start = start; - r->end = r->start + size - 1; - r->flags = flags; - r->parent = &iomem_resource; - name = kmalloc(18, GFP_KERNEL); - if (!name) { - kfree(r); - return ERR_PTR(-ENOMEM); - } - sprintf(name, "PCI Bus: %04x:%02x", domain, ZPCI_BUS_NR); - r->name = name; - - rc = request_resource(&iomem_resource, r); - if (rc) { - kfree(r->name); - kfree(r); - return ERR_PTR(-ENOMEM); - } - return r; -} - static int zpci_alloc_iomap(struct zpci_dev *zdev) { int entry; @@ -633,6 +588,82 @@ static void zpci_free_iomap(struct zpci_dev *zdev, int entry) spin_unlock(&zpci_iomap_lock); } +static struct resource *__alloc_res(struct zpci_dev *zdev, unsigned long start, + unsigned long size, unsigned long flags) +{ + struct resource *r; + + r = kzalloc(sizeof(*r), GFP_KERNEL); + if (!r) + return NULL; + + r->start = start; + r->end = r->start + size - 1; + r->flags = flags; + r->name = zdev->res_name; + + if (request_resource(&iomem_resource, r)) { + kfree(r); + return NULL; + } + return r; +} + +static int zpci_setup_bus_resources(struct zpci_dev *zdev, + struct list_head *resources) +{ + unsigned long addr, size, flags; + struct resource *res; + int i, entry; + + snprintf(zdev->res_name, sizeof(zdev->res_name), + "PCI Bus %04x:%02x", zdev->domain, ZPCI_BUS_NR); + + for (i = 0; i < PCI_BAR_COUNT; i++) { + if (!zdev->bars[i].size) + continue; + entry = zpci_alloc_iomap(zdev); + if (entry < 0) + return entry; + zdev->bars[i].map_idx = entry; + + /* only MMIO is supported */ + flags = IORESOURCE_MEM; + if (zdev->bars[i].val & 8) + flags |= IORESOURCE_PREFETCH; + if (zdev->bars[i].val & 4) + flags |= IORESOURCE_MEM_64; + + addr = ZPCI_IOMAP_ADDR_BASE + ((u64) entry << 48); + + size = 1UL << zdev->bars[i].size; + + res = __alloc_res(zdev, addr, size, flags); + if (!res) { + zpci_free_iomap(zdev, entry); + return -ENOMEM; + } + zdev->bars[i].res = res; + pci_add_resource(resources, res); + } + + return 0; +} + +static void zpci_cleanup_bus_resources(struct zpci_dev *zdev) +{ + int i; + + for (i = 0; i < PCI_BAR_COUNT; i++) { + if (!zdev->bars[i].size) + continue; + + zpci_free_iomap(zdev, zdev->bars[i].map_idx); + release_resource(zdev->bars[i].res); + kfree(zdev->bars[i].res); + } +} + int pcibios_add_device(struct pci_dev *pdev) { struct zpci_dev *zdev = get_zdev(pdev); @@ -729,52 +760,6 @@ struct dev_pm_ops pcibios_pm_ops = { }; #endif /* CONFIG_HIBERNATE_CALLBACKS */ -static int zpci_scan_bus(struct zpci_dev *zdev) -{ - struct resource *res; - LIST_HEAD(resources); - int i; - - /* allocate mapping entry for each used bar */ - for (i = 0; i < PCI_BAR_COUNT; i++) { - unsigned long addr, size, flags; - int entry; - - if (!zdev->bars[i].size) - continue; - entry = zpci_alloc_iomap(zdev); - if (entry < 0) - return entry; - zdev->bars[i].map_idx = entry; - - /* only MMIO is supported */ - flags = IORESOURCE_MEM; - if (zdev->bars[i].val & 8) - flags |= IORESOURCE_PREFETCH; - if (zdev->bars[i].val & 4) - flags |= IORESOURCE_MEM_64; - - addr = ZPCI_IOMAP_ADDR_BASE + ((u64) entry << 48); - - size = 1UL << zdev->bars[i].size; - - res = zpci_alloc_bus_resource(addr, size, flags, zdev->domain); - if (IS_ERR(res)) { - zpci_free_iomap(zdev, entry); - return PTR_ERR(res); - } - pci_add_resource(&resources, res); - } - - zdev->bus = pci_scan_root_bus(NULL, ZPCI_BUS_NR, &pci_root_ops, - zdev, &resources); - if (!zdev->bus) - return -EIO; - - zdev->bus->max_bus_speed = zdev->max_bus_speed; - return 0; -} - static int zpci_alloc_domain(struct zpci_dev *zdev) { spin_lock(&zpci_domain_lock); @@ -795,6 +780,41 @@ static void zpci_free_domain(struct zpci_dev *zdev) spin_unlock(&zpci_domain_lock); } +void pcibios_remove_bus(struct pci_bus *bus) +{ + struct zpci_dev *zdev = get_zdev_by_bus(bus); + + zpci_exit_slot(zdev); + zpci_cleanup_bus_resources(zdev); + zpci_free_domain(zdev); + + spin_lock(&zpci_list_lock); + list_del(&zdev->entry); + spin_unlock(&zpci_list_lock); + + kfree(zdev); +} + +static int zpci_scan_bus(struct zpci_dev *zdev) +{ + LIST_HEAD(resources); + int ret; + + ret = zpci_setup_bus_resources(zdev, &resources); + if (ret) + return ret; + + zdev->bus = pci_scan_root_bus(NULL, ZPCI_BUS_NR, &pci_root_ops, + zdev, &resources); + if (!zdev->bus) { + zpci_cleanup_bus_resources(zdev); + return -EIO; + } + + zdev->bus->max_bus_speed = zdev->max_bus_speed; + return 0; +} + int zpci_enable_device(struct zpci_dev *zdev) { int rc; diff --git a/arch/s390/pci/pci_clp.c b/arch/s390/pci/pci_clp.c index 84147984224..c747394029e 100644 --- a/arch/s390/pci/pci_clp.c +++ b/arch/s390/pci/pci_clp.c @@ -155,9 +155,9 @@ int clp_add_pci_device(u32 fid, u32 fh, int configured) int rc; zpci_dbg(3, "add fid:%x, fh:%x, c:%d\n", fid, fh, configured); - zdev = zpci_alloc_device(); - if (IS_ERR(zdev)) - return PTR_ERR(zdev); + zdev = kzalloc(sizeof(*zdev), GFP_KERNEL); + if (!zdev) + return -ENOMEM; zdev->fh = fh; zdev->fid = fid; @@ -178,7 +178,7 @@ int clp_add_pci_device(u32 fid, u32 fh, int configured) return 0; error: - zpci_free_device(zdev); + kfree(zdev); return rc; } diff --git a/arch/s390/pci/pci_event.c b/arch/s390/pci/pci_event.c index 278e671ec9a..800f064b0da 100644 --- a/arch/s390/pci/pci_event.c +++ b/arch/s390/pci/pci_event.c @@ -11,6 +11,7 @@ #include <linux/kernel.h> #include <linux/pci.h> #include <asm/pci_debug.h> +#include <asm/sclp.h> /* Content Code Description for PCI Function Error */ struct zpci_ccdf_err { @@ -42,10 +43,27 @@ struct zpci_ccdf_avail { u16 pec; /* PCI event code */ } __packed; -static void zpci_event_log_avail(struct zpci_ccdf_avail *ccdf) +void zpci_event_error(void *data) { + struct zpci_ccdf_err *ccdf = data; + struct zpci_dev *zdev = get_zdev_by_fid(ccdf->fid); + + zpci_err("error CCDF:\n"); + zpci_err_hex(ccdf, sizeof(*ccdf)); + + if (!zdev) + return; + + pr_err("%s: Event 0x%x reports an error for PCI function 0x%x\n", + pci_name(zdev->pdev), ccdf->pec, ccdf->fid); +} + +void zpci_event_availability(void *data) +{ + struct zpci_ccdf_avail *ccdf = data; struct zpci_dev *zdev = get_zdev_by_fid(ccdf->fid); struct pci_dev *pdev = zdev ? zdev->pdev : NULL; + int ret; pr_info("%s: Event 0x%x reconfigured PCI function 0x%x\n", pdev ? pci_name(pdev) : "n/a", ccdf->pec, ccdf->fid); @@ -53,36 +71,47 @@ static void zpci_event_log_avail(struct zpci_ccdf_avail *ccdf) zpci_err_hex(ccdf, sizeof(*ccdf)); switch (ccdf->pec) { - case 0x0301: - zpci_enable_device(zdev); + case 0x0301: /* Standby -> Configured */ + if (!zdev || zdev->state == ZPCI_FN_STATE_CONFIGURED) + break; + zdev->state = ZPCI_FN_STATE_CONFIGURED; + ret = zpci_enable_device(zdev); + if (ret) + break; + pci_rescan_bus(zdev->bus); break; - case 0x0302: + case 0x0302: /* Reserved -> Standby */ clp_add_pci_device(ccdf->fid, ccdf->fh, 0); break; - case 0x0306: + case 0x0303: /* Deconfiguration requested */ + if (pdev) + pci_stop_and_remove_bus_device(pdev); + + ret = zpci_disable_device(zdev); + if (ret) + break; + + ret = sclp_pci_deconfigure(zdev->fid); + zpci_dbg(3, "deconf fid:%x, rc:%d\n", zdev->fid, ret); + if (!ret) + zdev->state = ZPCI_FN_STATE_STANDBY; + + break; + case 0x0304: /* Configured -> Standby */ + if (pdev) + pci_stop_and_remove_bus_device(pdev); + + zpci_disable_device(zdev); + zdev->state = ZPCI_FN_STATE_STANDBY; + break; + case 0x0306: /* 0x308 or 0x302 for multiple devices */ clp_rescan_pci_devices(); break; + case 0x0308: /* Standby -> Reserved */ + pci_stop_root_bus(zdev->bus); + pci_remove_root_bus(zdev->bus); + break; default: break; } } - -void zpci_event_error(void *data) -{ - struct zpci_ccdf_err *ccdf = data; - struct zpci_dev *zdev = get_zdev_by_fid(ccdf->fid); - - zpci_err("error CCDF:\n"); - zpci_err_hex(ccdf, sizeof(*ccdf)); - - if (!zdev) - return; - - pr_err("%s: Event 0x%x reports an error for PCI function 0x%x\n", - pci_name(zdev->pdev), ccdf->pec, ccdf->fid); -} - -void zpci_event_availability(void *data) -{ - zpci_event_log_avail(data); -} diff --git a/arch/score/include/asm/thread_info.h b/arch/score/include/asm/thread_info.h index 1425cc03487..656b7ada932 100644 --- a/arch/score/include/asm/thread_info.h +++ b/arch/score/include/asm/thread_info.h @@ -72,8 +72,6 @@ register struct thread_info *__current_thread_info __asm__("r28"); #endif /* !__ASSEMBLY__ */ -#define PREEMPT_ACTIVE 0x10000000 - /* * thread information flags * - these are process state flags that various assembly files may need to diff --git a/arch/sh/boards/mach-ecovec24/setup.c b/arch/sh/boards/mach-ecovec24/setup.c index 1fa8be40977..122f737a901 100644 --- a/arch/sh/boards/mach-ecovec24/setup.c +++ b/arch/sh/boards/mach-ecovec24/setup.c @@ -15,6 +15,7 @@ #include <linux/mmc/sh_mmcif.h> #include <linux/mmc/sh_mobile_sdhi.h> #include <linux/mtd/physmap.h> +#include <linux/mfd/tmio.h> #include <linux/gpio.h> #include <linux/interrupt.h> #include <linux/io.h> diff --git a/arch/sh/include/asm/thread_info.h b/arch/sh/include/asm/thread_info.h index 45a93669289..ad27ffa65e2 100644 --- a/arch/sh/include/asm/thread_info.h +++ b/arch/sh/include/asm/thread_info.h @@ -41,8 +41,6 @@ struct thread_info { #endif -#define PREEMPT_ACTIVE 0x10000000 - #if defined(CONFIG_4KSTACKS) #define THREAD_SHIFT 12 #else diff --git a/arch/sh/kernel/entry-common.S b/arch/sh/kernel/entry-common.S index 9b6e4beeb29..ca46834294b 100644 --- a/arch/sh/kernel/entry-common.S +++ b/arch/sh/kernel/entry-common.S @@ -108,7 +108,7 @@ need_resched: and #(0xf0>>1), r0 ! interrupts off (exception path)? cmp/eq #(0xf0>>1), r0 bt noresched - mov.l 3f, r0 + mov.l 1f, r0 jsr @r0 ! call preempt_schedule_irq nop bra need_resched @@ -119,9 +119,7 @@ noresched: nop .align 2 -1: .long PREEMPT_ACTIVE -2: .long schedule -3: .long preempt_schedule_irq +1: .long preempt_schedule_irq #endif ENTRY(resume_userspace) diff --git a/arch/sparc/include/asm/hardirq_32.h b/arch/sparc/include/asm/hardirq_32.h index 162007643cd..ee93923b7f8 100644 --- a/arch/sparc/include/asm/hardirq_32.h +++ b/arch/sparc/include/asm/hardirq_32.h @@ -7,7 +7,6 @@ #ifndef __SPARC_HARDIRQ_H #define __SPARC_HARDIRQ_H -#define HARDIRQ_BITS 8 #include <asm-generic/hardirq.h> #endif /* __SPARC_HARDIRQ_H */ diff --git a/arch/sparc/include/asm/hardirq_64.h b/arch/sparc/include/asm/hardirq_64.h index 7c29fd1a87a..f478ff1ddd0 100644 --- a/arch/sparc/include/asm/hardirq_64.h +++ b/arch/sparc/include/asm/hardirq_64.h @@ -14,6 +14,4 @@ void ack_bad_irq(unsigned int irq); -#define HARDIRQ_BITS 8 - #endif /* !(__SPARC64_HARDIRQ_H) */ diff --git a/arch/sparc/include/asm/thread_info_32.h b/arch/sparc/include/asm/thread_info_32.h index dd3807599bb..96efa7adc22 100644 --- a/arch/sparc/include/asm/thread_info_32.h +++ b/arch/sparc/include/asm/thread_info_32.h @@ -105,8 +105,6 @@ register struct thread_info *current_thread_info_reg asm("g6"); #define TI_W_SAVED 0x250 /* #define TI_RESTART_BLOCK 0x25n */ /* Nobody cares */ -#define PREEMPT_ACTIVE 0x4000000 - /* * thread information flag bit numbers */ diff --git a/arch/sparc/include/asm/thread_info_64.h b/arch/sparc/include/asm/thread_info_64.h index 5d9292ab107..a5f01ac6d0f 100644 --- a/arch/sparc/include/asm/thread_info_64.h +++ b/arch/sparc/include/asm/thread_info_64.h @@ -111,8 +111,6 @@ struct thread_info { #define THREAD_SHIFT PAGE_SHIFT #endif /* PAGE_SHIFT == 13 */ -#define PREEMPT_ACTIVE 0x10000000 - /* * macros/functions for gaining access to the thread information structure */ diff --git a/arch/sparc/include/asm/tlbflush_64.h b/arch/sparc/include/asm/tlbflush_64.h index f0d6a9700f4..3c3c89f5264 100644 --- a/arch/sparc/include/asm/tlbflush_64.h +++ b/arch/sparc/include/asm/tlbflush_64.h @@ -1,7 +1,6 @@ #ifndef _SPARC64_TLBFLUSH_H #define _SPARC64_TLBFLUSH_H -#include <linux/mm.h> #include <asm/mmu_context.h> /* TSB flush operations. */ diff --git a/arch/sparc/kernel/rtrap_64.S b/arch/sparc/kernel/rtrap_64.S index a954eb81881..39f0c662f4c 100644 --- a/arch/sparc/kernel/rtrap_64.S +++ b/arch/sparc/kernel/rtrap_64.S @@ -312,12 +312,10 @@ to_kernel: nop cmp %l4, 0 bne,pn %xcc, kern_fpucheck - sethi %hi(PREEMPT_ACTIVE), %l6 - stw %l6, [%g6 + TI_PRE_COUNT] - call schedule + nop + call preempt_schedule_irq nop ba,pt %xcc, rtrap - stw %g0, [%g6 + TI_PRE_COUNT] #endif kern_fpucheck: ldub [%g6 + TI_FPDEPTH], %l5 brz,pt %l5, rt_continue diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c index 6b643790e4f..5322e530d09 100644 --- a/arch/sparc/mm/init_64.c +++ b/arch/sparc/mm/init_64.c @@ -2565,8 +2565,6 @@ pgtable_t pte_alloc_one(struct mm_struct *mm, { struct page *page = alloc_page(GFP_KERNEL | __GFP_NOTRACK | __GFP_REPEAT | __GFP_ZERO); - pte_t *pte = NULL; - if (!page) return NULL; if (!pgtable_page_ctor(page)) { diff --git a/arch/tile/include/asm/hardirq.h b/arch/tile/include/asm/hardirq.h index 822390f9a15..54110af2398 100644 --- a/arch/tile/include/asm/hardirq.h +++ b/arch/tile/include/asm/hardirq.h @@ -42,6 +42,4 @@ DECLARE_PER_CPU(irq_cpustat_t, irq_stat); #include <linux/irq_cpustat.h> /* Standard mappings for irq_cpustat_t above */ -#define HARDIRQ_BITS 8 - #endif /* _ASM_TILE_HARDIRQ_H */ diff --git a/arch/tile/include/asm/thread_info.h b/arch/tile/include/asm/thread_info.h index b8aa6df3e10..729aa107f64 100644 --- a/arch/tile/include/asm/thread_info.h +++ b/arch/tile/include/asm/thread_info.h @@ -113,8 +113,6 @@ extern void _cpu_idle(void); #endif /* !__ASSEMBLY__ */ -#define PREEMPT_ACTIVE 0x10000000 - /* * Thread information flags that various assembly files may need to access. * Keep flags accessed frequently in low bits, particular since it makes diff --git a/arch/um/Kconfig.char b/arch/um/Kconfig.char index b9d7c427668..f10738d68b2 100644 --- a/arch/um/Kconfig.char +++ b/arch/um/Kconfig.char @@ -6,10 +6,6 @@ config STDERR_CONSOLE help console driver which dumps all printk messages to stderr. -config STDIO_CONSOLE - bool - default y - config SSL bool "Virtual serial line" help diff --git a/arch/um/Kconfig.common b/arch/um/Kconfig.common index 8ddea1f8006..21ca44c4f6d 100644 --- a/arch/um/Kconfig.common +++ b/arch/um/Kconfig.common @@ -1,8 +1,3 @@ -config DEFCONFIG_LIST - string - option defconfig_list - default "arch/$ARCH/defconfig" - config UML bool default y diff --git a/arch/um/Makefile b/arch/um/Makefile index 133f7de2a13..48d92bbe62e 100644 --- a/arch/um/Makefile +++ b/arch/um/Makefile @@ -6,6 +6,17 @@ # Licensed under the GPL # +# select defconfig based on actual architecture +ifeq ($(SUBARCH),x86) + ifeq ($(shell uname -m),x86_64) + KBUILD_DEFCONFIG := x86_64_defconfig + else + KBUILD_DEFCONFIG := i386_defconfig + endif +else + KBUILD_DEFCONFIG := $(SUBARCH)_defconfig +endif + ARCH_DIR := arch/um OS := $(shell uname -s) # We require bash because the vmlinux link and loader script cpp use bash diff --git a/arch/um/configs/i386_defconfig b/arch/um/configs/i386_defconfig new file mode 100644 index 00000000000..a12bf68c9f3 --- /dev/null +++ b/arch/um/configs/i386_defconfig @@ -0,0 +1,76 @@ +CONFIG_3_LEVEL_PGTABLES=y +# CONFIG_COMPACTION is not set +CONFIG_BINFMT_MISC=m +CONFIG_HOSTFS=y +CONFIG_MAGIC_SYSRQ=y +CONFIG_KERNEL_STACK_ORDER=1 +CONFIG_SYSVIPC=y +CONFIG_POSIX_MQUEUE=y +CONFIG_NO_HZ=y +CONFIG_HIGH_RES_TIMERS=y +CONFIG_BSD_PROCESS_ACCT=y +CONFIG_IKCONFIG=y +CONFIG_IKCONFIG_PROC=y +CONFIG_LOG_BUF_SHIFT=14 +CONFIG_CGROUPS=y +CONFIG_CGROUP_FREEZER=y +CONFIG_CGROUP_DEVICE=y +CONFIG_CPUSETS=y +CONFIG_CGROUP_CPUACCT=y +CONFIG_RESOURCE_COUNTERS=y +CONFIG_CGROUP_SCHED=y +CONFIG_BLK_CGROUP=y +# CONFIG_PID_NS is not set +CONFIG_SYSFS_DEPRECATED=y +CONFIG_CC_OPTIMIZE_FOR_SIZE=y +CONFIG_SLAB=y +CONFIG_MODULES=y +CONFIG_MODULE_UNLOAD=y +# CONFIG_BLK_DEV_BSG is not set +CONFIG_IOSCHED_CFQ=m +CONFIG_SSL=y +CONFIG_NULL_CHAN=y +CONFIG_PORT_CHAN=y +CONFIG_PTY_CHAN=y +CONFIG_TTY_CHAN=y +CONFIG_XTERM_CHAN=y +CONFIG_CON_CHAN="pts" +CONFIG_SSL_CHAN="pts" +CONFIG_UML_SOUND=m +CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" +CONFIG_DEVTMPFS=y +CONFIG_DEVTMPFS_MOUNT=y +CONFIG_BLK_DEV_UBD=y +CONFIG_BLK_DEV_LOOP=m +CONFIG_BLK_DEV_NBD=m +CONFIG_DUMMY=m +CONFIG_TUN=m +CONFIG_PPP=m +CONFIG_SLIP=m +CONFIG_LEGACY_PTY_COUNT=32 +# CONFIG_HW_RANDOM is not set +CONFIG_UML_RANDOM=y +CONFIG_NET=y +CONFIG_PACKET=y +CONFIG_UNIX=y +CONFIG_INET=y +# CONFIG_INET_LRO is not set +# CONFIG_IPV6 is not set +CONFIG_UML_NET=y +CONFIG_UML_NET_ETHERTAP=y +CONFIG_UML_NET_TUNTAP=y +CONFIG_UML_NET_SLIP=y +CONFIG_UML_NET_DAEMON=y +CONFIG_UML_NET_MCAST=y +CONFIG_UML_NET_SLIRP=y +CONFIG_EXT4_FS=y +CONFIG_REISERFS_FS=y +CONFIG_QUOTA=y +CONFIG_AUTOFS4_FS=m +CONFIG_ISO9660_FS=m +CONFIG_JOLIET=y +CONFIG_PROC_KCORE=y +CONFIG_TMPFS=y +CONFIG_NLS=y +CONFIG_DEBUG_INFO=y +CONFIG_DEBUG_KERNEL=y diff --git a/arch/um/configs/x86_64_defconfig b/arch/um/configs/x86_64_defconfig new file mode 100644 index 00000000000..3aab117bd55 --- /dev/null +++ b/arch/um/configs/x86_64_defconfig @@ -0,0 +1,75 @@ +# CONFIG_COMPACTION is not set +CONFIG_BINFMT_MISC=m +CONFIG_HOSTFS=y +CONFIG_MAGIC_SYSRQ=y +CONFIG_SYSVIPC=y +CONFIG_POSIX_MQUEUE=y +CONFIG_NO_HZ=y +CONFIG_HIGH_RES_TIMERS=y +CONFIG_BSD_PROCESS_ACCT=y +CONFIG_IKCONFIG=y +CONFIG_IKCONFIG_PROC=y +CONFIG_LOG_BUF_SHIFT=14 +CONFIG_CGROUPS=y +CONFIG_CGROUP_FREEZER=y +CONFIG_CGROUP_DEVICE=y +CONFIG_CPUSETS=y +CONFIG_CGROUP_CPUACCT=y +CONFIG_RESOURCE_COUNTERS=y +CONFIG_CGROUP_SCHED=y +CONFIG_BLK_CGROUP=y +# CONFIG_PID_NS is not set +CONFIG_SYSFS_DEPRECATED=y +CONFIG_CC_OPTIMIZE_FOR_SIZE=y +CONFIG_SLAB=y +CONFIG_MODULES=y +CONFIG_MODULE_UNLOAD=y +# CONFIG_BLK_DEV_BSG is not set +CONFIG_IOSCHED_CFQ=m +CONFIG_SSL=y +CONFIG_NULL_CHAN=y +CONFIG_PORT_CHAN=y +CONFIG_PTY_CHAN=y +CONFIG_TTY_CHAN=y +CONFIG_XTERM_CHAN=y +CONFIG_CON_CHAN="pts" +CONFIG_SSL_CHAN="pts" +CONFIG_UML_SOUND=m +CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" +CONFIG_DEVTMPFS=y +CONFIG_DEVTMPFS_MOUNT=y +CONFIG_BLK_DEV_UBD=y +CONFIG_BLK_DEV_LOOP=m +CONFIG_BLK_DEV_NBD=m +CONFIG_DUMMY=m +CONFIG_TUN=m +CONFIG_PPP=m +CONFIG_SLIP=m +CONFIG_LEGACY_PTY_COUNT=32 +# CONFIG_HW_RANDOM is not set +CONFIG_UML_RANDOM=y +CONFIG_NET=y +CONFIG_PACKET=y +CONFIG_UNIX=y +CONFIG_INET=y +# CONFIG_INET_LRO is not set +# CONFIG_IPV6 is not set +CONFIG_UML_NET=y +CONFIG_UML_NET_ETHERTAP=y +CONFIG_UML_NET_TUNTAP=y +CONFIG_UML_NET_SLIP=y +CONFIG_UML_NET_DAEMON=y +CONFIG_UML_NET_MCAST=y +CONFIG_UML_NET_SLIRP=y +CONFIG_EXT4_FS=y +CONFIG_REISERFS_FS=y +CONFIG_QUOTA=y +CONFIG_AUTOFS4_FS=m +CONFIG_ISO9660_FS=m +CONFIG_JOLIET=y +CONFIG_PROC_KCORE=y +CONFIG_TMPFS=y +CONFIG_NLS=y +CONFIG_DEBUG_INFO=y +CONFIG_FRAME_WARN=1024 +CONFIG_DEBUG_KERNEL=y diff --git a/arch/um/defconfig b/arch/um/defconfig deleted file mode 100644 index 2665e6b683f..00000000000 --- a/arch/um/defconfig +++ /dev/null @@ -1,899 +0,0 @@ -# -# Automatically generated file; DO NOT EDIT. -# User Mode Linux/i386 3.3.0 Kernel Configuration -# -CONFIG_DEFCONFIG_LIST="arch/$ARCH/defconfig" -CONFIG_UML=y -CONFIG_MMU=y -CONFIG_NO_IOMEM=y -# CONFIG_TRACE_IRQFLAGS_SUPPORT is not set -CONFIG_LOCKDEP_SUPPORT=y -# CONFIG_STACKTRACE_SUPPORT is not set -CONFIG_GENERIC_CALIBRATE_DELAY=y -CONFIG_GENERIC_BUG=y -CONFIG_GENERIC_CLOCKEVENTS=y -CONFIG_HZ=100 - -# -# UML-specific options -# - -# -# Host processor type and features -# -# CONFIG_M486 is not set -# CONFIG_M586 is not set -# CONFIG_M586TSC is not set -# CONFIG_M586MMX is not set -CONFIG_M686=y -# CONFIG_MPENTIUMII is not set -# CONFIG_MPENTIUMIII is not set -# CONFIG_MPENTIUMM is not set -# CONFIG_MPENTIUM4 is not set -# CONFIG_MK6 is not set -# CONFIG_MK7 is not set -# CONFIG_MK8 is not set -# CONFIG_MCRUSOE is not set -# CONFIG_MEFFICEON is not set -# CONFIG_MWINCHIPC6 is not set -# CONFIG_MWINCHIP3D is not set -# CONFIG_MELAN is not set -# CONFIG_MGEODEGX1 is not set -# CONFIG_MGEODE_LX is not set -# CONFIG_MCYRIXIII is not set -# CONFIG_MVIAC3_2 is not set -# CONFIG_MVIAC7 is not set -# CONFIG_MCORE2 is not set -# CONFIG_MATOM is not set -# CONFIG_X86_GENERIC is not set -CONFIG_X86_INTERNODE_CACHE_SHIFT=5 -CONFIG_X86_CMPXCHG=y -CONFIG_X86_L1_CACHE_SHIFT=5 -CONFIG_X86_XADD=y -CONFIG_X86_PPRO_FENCE=y -CONFIG_X86_WP_WORKS_OK=y -CONFIG_X86_INVLPG=y -CONFIG_X86_BSWAP=y -CONFIG_X86_POPAD_OK=y -CONFIG_X86_USE_PPRO_CHECKSUM=y -CONFIG_X86_TSC=y -CONFIG_X86_CMPXCHG64=y -CONFIG_X86_CMOV=y -CONFIG_X86_MINIMUM_CPU_FAMILY=5 -CONFIG_CPU_SUP_INTEL=y -CONFIG_CPU_SUP_CYRIX_32=y -CONFIG_CPU_SUP_AMD=y -CONFIG_CPU_SUP_CENTAUR=y -CONFIG_CPU_SUP_TRANSMETA_32=y -CONFIG_CPU_SUP_UMC_32=y -CONFIG_UML_X86=y -# CONFIG_64BIT is not set -CONFIG_X86_32=y -# CONFIG_X86_64 is not set -# CONFIG_RWSEM_XCHGADD_ALGORITHM is not set -CONFIG_RWSEM_GENERIC_SPINLOCK=y -# CONFIG_3_LEVEL_PGTABLES is not set -CONFIG_ARCH_HAS_SC_SIGNALS=y -CONFIG_ARCH_REUSE_HOST_VSYSCALL_AREA=y -CONFIG_GENERIC_HWEIGHT=y -# CONFIG_STATIC_LINK is not set -CONFIG_SELECT_MEMORY_MODEL=y -CONFIG_FLATMEM_MANUAL=y -CONFIG_FLATMEM=y -CONFIG_FLAT_NODE_MEM_MAP=y -CONFIG_PAGEFLAGS_EXTENDED=y -CONFIG_SPLIT_PTLOCK_CPUS=4 -# CONFIG_COMPACTION is not set -# CONFIG_PHYS_ADDR_T_64BIT is not set -CONFIG_ZONE_DMA_FLAG=0 -CONFIG_VIRT_TO_BUS=y -# CONFIG_KSM is not set -CONFIG_DEFAULT_MMAP_MIN_ADDR=4096 -CONFIG_NEED_PER_CPU_KM=y -# CONFIG_CLEANCACHE is not set -CONFIG_TICK_ONESHOT=y -CONFIG_NO_HZ=y -CONFIG_HIGH_RES_TIMERS=y -CONFIG_GENERIC_CLOCKEVENTS_BUILD=y -CONFIG_LD_SCRIPT_DYN=y -CONFIG_BINFMT_ELF=y -CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS=y -CONFIG_HAVE_AOUT=y -# CONFIG_BINFMT_AOUT is not set -CONFIG_BINFMT_MISC=m -CONFIG_HOSTFS=y -# CONFIG_HPPFS is not set -CONFIG_MCONSOLE=y -CONFIG_MAGIC_SYSRQ=y -CONFIG_KERNEL_STACK_ORDER=0 -# CONFIG_MMAPPER is not set -CONFIG_NO_DMA=y - -# -# General setup -# -CONFIG_EXPERIMENTAL=y -CONFIG_BROKEN_ON_SMP=y -CONFIG_INIT_ENV_ARG_LIMIT=128 -CONFIG_CROSS_COMPILE="" -CONFIG_LOCALVERSION="" -CONFIG_LOCALVERSION_AUTO=y -CONFIG_DEFAULT_HOSTNAME="(none)" -CONFIG_SWAP=y -CONFIG_SYSVIPC=y -CONFIG_SYSVIPC_SYSCTL=y -CONFIG_POSIX_MQUEUE=y -CONFIG_POSIX_MQUEUE_SYSCTL=y -CONFIG_BSD_PROCESS_ACCT=y -# CONFIG_BSD_PROCESS_ACCT_V3 is not set -# CONFIG_FHANDLE is not set -# CONFIG_TASKSTATS is not set -# CONFIG_AUDIT is not set - -# -# IRQ subsystem -# -CONFIG_GENERIC_IRQ_SHOW=y - -# -# RCU Subsystem -# -CONFIG_TINY_RCU=y -# CONFIG_PREEMPT_RCU is not set -# CONFIG_RCU_TRACE is not set -# CONFIG_TREE_RCU_TRACE is not set -CONFIG_IKCONFIG=y -CONFIG_IKCONFIG_PROC=y -CONFIG_LOG_BUF_SHIFT=14 -CONFIG_CGROUPS=y -# CONFIG_CGROUP_DEBUG is not set -CONFIG_CGROUP_FREEZER=y -CONFIG_CGROUP_DEVICE=y -CONFIG_CPUSETS=y -CONFIG_PROC_PID_CPUSET=y -CONFIG_CGROUP_CPUACCT=y -CONFIG_RESOURCE_COUNTERS=y -CONFIG_CGROUP_MEMCG=y -CONFIG_CGROUP_MEMCG_SWAP=y -# CONFIG_CGROUP_MEMCG_SWAP_ENABLED is not set -# CONFIG_CGROUP_MEMCG_KMEM is not set -CONFIG_CGROUP_SCHED=y -CONFIG_FAIR_GROUP_SCHED=y -# CONFIG_CFS_BANDWIDTH is not set -# CONFIG_RT_GROUP_SCHED is not set -CONFIG_BLK_CGROUP=y -# CONFIG_DEBUG_BLK_CGROUP is not set -# CONFIG_CHECKPOINT_RESTORE is not set -CONFIG_NAMESPACES=y -CONFIG_UTS_NS=y -CONFIG_IPC_NS=y -# CONFIG_USER_NS is not set -# CONFIG_PID_NS is not set -CONFIG_NET_NS=y -# CONFIG_SCHED_AUTOGROUP is not set -CONFIG_MM_OWNER=y -CONFIG_SYSFS_DEPRECATED=y -# CONFIG_SYSFS_DEPRECATED_V2 is not set -# CONFIG_RELAY is not set -# CONFIG_BLK_DEV_INITRD is not set -CONFIG_CC_OPTIMIZE_FOR_SIZE=y -CONFIG_SYSCTL=y -CONFIG_ANON_INODES=y -# CONFIG_EXPERT is not set -CONFIG_UID16=y -# CONFIG_SYSCTL_SYSCALL is not set -CONFIG_KALLSYMS=y -# CONFIG_KALLSYMS_ALL is not set -CONFIG_HOTPLUG=y -CONFIG_PRINTK=y -CONFIG_BUG=y -CONFIG_ELF_CORE=y -CONFIG_BASE_FULL=y -CONFIG_FUTEX=y -CONFIG_EPOLL=y -CONFIG_SIGNALFD=y -CONFIG_TIMERFD=y -CONFIG_EVENTFD=y -CONFIG_SHMEM=y -CONFIG_AIO=y -# CONFIG_EMBEDDED is not set - -# -# Kernel Performance Events And Counters -# -CONFIG_VM_EVENT_COUNTERS=y -CONFIG_COMPAT_BRK=y -CONFIG_SLAB=y -# CONFIG_SLUB is not set -# CONFIG_PROFILING is not set - -# -# GCOV-based kernel profiling -# -# CONFIG_HAVE_GENERIC_DMA_COHERENT is not set -CONFIG_SLABINFO=y -CONFIG_RT_MUTEXES=y -CONFIG_BASE_SMALL=0 -CONFIG_MODULES=y -# CONFIG_MODULE_FORCE_LOAD is not set -CONFIG_MODULE_UNLOAD=y -# CONFIG_MODULE_FORCE_UNLOAD is not set -# CONFIG_MODVERSIONS is not set -# CONFIG_MODULE_SRCVERSION_ALL is not set -CONFIG_BLOCK=y -CONFIG_LBDAF=y -# CONFIG_BLK_DEV_BSG is not set -# CONFIG_BLK_DEV_BSGLIB is not set -# CONFIG_BLK_DEV_INTEGRITY is not set - -# -# Partition Types -# -# CONFIG_PARTITION_ADVANCED is not set -CONFIG_MSDOS_PARTITION=y - -# -# IO Schedulers -# -CONFIG_IOSCHED_NOOP=y -CONFIG_IOSCHED_DEADLINE=y -CONFIG_IOSCHED_CFQ=m -# CONFIG_CFQ_GROUP_IOSCHED is not set -CONFIG_DEFAULT_DEADLINE=y -# CONFIG_DEFAULT_CFQ is not set -# CONFIG_DEFAULT_NOOP is not set -CONFIG_DEFAULT_IOSCHED="deadline" -# CONFIG_INLINE_SPIN_TRYLOCK is not set -# CONFIG_INLINE_SPIN_TRYLOCK_BH is not set -# CONFIG_INLINE_SPIN_LOCK is not set -# CONFIG_INLINE_SPIN_LOCK_BH is not set -# CONFIG_INLINE_SPIN_LOCK_IRQ is not set -# CONFIG_INLINE_SPIN_LOCK_IRQSAVE is not set -CONFIG_INLINE_SPIN_UNLOCK=y -# CONFIG_INLINE_SPIN_UNLOCK_BH is not set -CONFIG_INLINE_SPIN_UNLOCK_IRQ=y -# CONFIG_INLINE_SPIN_UNLOCK_IRQRESTORE is not set -# CONFIG_INLINE_READ_TRYLOCK is not set -# CONFIG_INLINE_READ_LOCK is not set -# CONFIG_INLINE_READ_LOCK_BH is not set -# CONFIG_INLINE_READ_LOCK_IRQ is not set -# CONFIG_INLINE_READ_LOCK_IRQSAVE is not set -CONFIG_INLINE_READ_UNLOCK=y -# CONFIG_INLINE_READ_UNLOCK_BH is not set -CONFIG_INLINE_READ_UNLOCK_IRQ=y -# CONFIG_INLINE_READ_UNLOCK_IRQRESTORE is not set -# CONFIG_INLINE_WRITE_TRYLOCK is not set -# CONFIG_INLINE_WRITE_LOCK is not set -# CONFIG_INLINE_WRITE_LOCK_BH is not set -# CONFIG_INLINE_WRITE_LOCK_IRQ is not set -# CONFIG_INLINE_WRITE_LOCK_IRQSAVE is not set -CONFIG_INLINE_WRITE_UNLOCK=y -# CONFIG_INLINE_WRITE_UNLOCK_BH is not set -CONFIG_INLINE_WRITE_UNLOCK_IRQ=y -# CONFIG_INLINE_WRITE_UNLOCK_IRQRESTORE is not set -# CONFIG_MUTEX_SPIN_ON_OWNER is not set -CONFIG_FREEZER=y - -# -# UML Character Devices -# -CONFIG_STDERR_CONSOLE=y -CONFIG_STDIO_CONSOLE=y -CONFIG_SSL=y -CONFIG_NULL_CHAN=y -CONFIG_PORT_CHAN=y -CONFIG_PTY_CHAN=y -CONFIG_TTY_CHAN=y -CONFIG_XTERM_CHAN=y -# CONFIG_NOCONFIG_CHAN is not set -CONFIG_CON_ZERO_CHAN="fd:0,fd:1" -CONFIG_CON_CHAN="xterm" -CONFIG_SSL_CHAN="pts" -CONFIG_UML_SOUND=m -CONFIG_SOUND=m -CONFIG_SOUND_OSS_CORE=y -CONFIG_HOSTAUDIO=m - -# -# Device Drivers -# - -# -# Generic Driver Options -# -CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" -CONFIG_DEVTMPFS=y -CONFIG_DEVTMPFS_MOUNT=y -CONFIG_STANDALONE=y -CONFIG_PREVENT_FIRMWARE_BUILD=y -CONFIG_FW_LOADER=y -CONFIG_FIRMWARE_IN_KERNEL=y -CONFIG_EXTRA_FIRMWARE="" -# CONFIG_DEBUG_DRIVER is not set -# CONFIG_DEBUG_DEVRES is not set -# CONFIG_SYS_HYPERVISOR is not set -CONFIG_GENERIC_CPU_DEVICES=y -# CONFIG_DMA_SHARED_BUFFER is not set -# CONFIG_CONNECTOR is not set -# CONFIG_MTD is not set -CONFIG_BLK_DEV=y -CONFIG_BLK_DEV_UBD=y -# CONFIG_BLK_DEV_UBD_SYNC is not set -CONFIG_BLK_DEV_COW_COMMON=y -CONFIG_BLK_DEV_LOOP=m -CONFIG_BLK_DEV_LOOP_MIN_COUNT=8 -# CONFIG_BLK_DEV_CRYPTOLOOP is not set - -# -# DRBD disabled because PROC_FS, INET or CONNECTOR not selected -# -CONFIG_BLK_DEV_NBD=m -# CONFIG_BLK_DEV_RAM is not set -# CONFIG_ATA_OVER_ETH is not set -# CONFIG_BLK_DEV_RBD is not set - -# -# Misc devices -# -# CONFIG_ENCLOSURE_SERVICES is not set -# CONFIG_C2PORT is not set - -# -# EEPROM support -# -# CONFIG_EEPROM_93CX6 is not set - -# -# Texas Instruments shared transport line discipline -# - -# -# Altera FPGA firmware download module -# - -# -# SCSI device support -# -CONFIG_SCSI_MOD=y -# CONFIG_RAID_ATTRS is not set -# CONFIG_SCSI is not set -# CONFIG_SCSI_DMA is not set -# CONFIG_SCSI_NETLINK is not set -# CONFIG_MD is not set -CONFIG_NETDEVICES=y -CONFIG_NET_CORE=y -# CONFIG_BONDING is not set -CONFIG_DUMMY=m -# CONFIG_EQUALIZER is not set -# CONFIG_MII is not set -# CONFIG_NET_TEAM is not set -# CONFIG_MACVLAN is not set -# CONFIG_NETCONSOLE is not set -# CONFIG_NETPOLL is not set -# CONFIG_NET_POLL_CONTROLLER is not set -CONFIG_TUN=m -# CONFIG_VETH is not set - -# -# CAIF transport drivers -# -CONFIG_ETHERNET=y -CONFIG_NET_VENDOR_CHELSIO=y -CONFIG_NET_VENDOR_INTEL=y -CONFIG_NET_VENDOR_I825XX=y -CONFIG_NET_VENDOR_MARVELL=y -CONFIG_NET_VENDOR_NATSEMI=y -CONFIG_NET_VENDOR_8390=y -# CONFIG_PHYLIB is not set -CONFIG_PPP=m -# CONFIG_PPP_BSDCOMP is not set -# CONFIG_PPP_DEFLATE is not set -# CONFIG_PPP_FILTER is not set -# CONFIG_PPP_MPPE is not set -# CONFIG_PPP_MULTILINK is not set -# CONFIG_PPPOE is not set -# CONFIG_PPP_ASYNC is not set -# CONFIG_PPP_SYNC_TTY is not set -CONFIG_SLIP=m -CONFIG_SLHC=m -# CONFIG_SLIP_COMPRESSED is not set -# CONFIG_SLIP_SMART is not set -# CONFIG_SLIP_MODE_SLIP6 is not set -CONFIG_WLAN=y -# CONFIG_HOSTAP is not set - -# -# Enable WiMAX (Networking options) to see the WiMAX drivers -# -# CONFIG_WAN is not set - -# -# Character devices -# -CONFIG_UNIX98_PTYS=y -# CONFIG_DEVPTS_MULTIPLE_INSTANCES is not set -CONFIG_LEGACY_PTYS=y -CONFIG_LEGACY_PTY_COUNT=32 -# CONFIG_N_GSM is not set -# CONFIG_TRACE_SINK is not set -CONFIG_DEVKMEM=y -# CONFIG_HW_RANDOM is not set -CONFIG_UML_RANDOM=y -# CONFIG_R3964 is not set -# CONFIG_NSC_GPIO is not set -# CONFIG_RAW_DRIVER is not set - -# -# PPS support -# -# CONFIG_PPS is not set - -# -# PPS generators support -# - -# -# PTP clock support -# - -# -# Enable Device Drivers -> PPS to see the PTP clock options. -# -# CONFIG_POWER_SUPPLY is not set -# CONFIG_THERMAL is not set -# CONFIG_WATCHDOG is not set -# CONFIG_REGULATOR is not set -CONFIG_SOUND_OSS_CORE_PRECLAIM=y -# CONFIG_MEMSTICK is not set -# CONFIG_NEW_LEDS is not set -# CONFIG_ACCESSIBILITY is not set -# CONFIG_AUXDISPLAY is not set -# CONFIG_UIO is not set - -# -# Virtio drivers -# -# CONFIG_VIRTIO_BALLOON is not set - -# -# Microsoft Hyper-V guest support -# -# CONFIG_STAGING is not set - -# -# Hardware Spinlock drivers -# -CONFIG_IOMMU_SUPPORT=y -# CONFIG_VIRT_DRIVERS is not set -# CONFIG_PM_DEVFREQ is not set -CONFIG_NET=y - -# -# Networking options -# -CONFIG_PACKET=y -CONFIG_UNIX=y -# CONFIG_UNIX_DIAG is not set -CONFIG_XFRM=y -# CONFIG_XFRM_USER is not set -# CONFIG_XFRM_SUB_POLICY is not set -# CONFIG_XFRM_MIGRATE is not set -# CONFIG_XFRM_STATISTICS is not set -# CONFIG_NET_KEY is not set -CONFIG_INET=y -# CONFIG_IP_MULTICAST is not set -# CONFIG_IP_ADVANCED_ROUTER is not set -# CONFIG_IP_PNP is not set -# CONFIG_NET_IPIP is not set -# CONFIG_NET_IPGRE_DEMUX is not set -# CONFIG_ARPD is not set -# CONFIG_SYN_COOKIES is not set -# CONFIG_INET_AH is not set -# CONFIG_INET_ESP is not set -# CONFIG_INET_IPCOMP is not set -# CONFIG_INET_XFRM_TUNNEL is not set -# CONFIG_INET_TUNNEL is not set -CONFIG_INET_XFRM_MODE_TRANSPORT=y -CONFIG_INET_XFRM_MODE_TUNNEL=y -CONFIG_INET_XFRM_MODE_BEET=y -# CONFIG_INET_LRO is not set -CONFIG_INET_DIAG=y -CONFIG_INET_TCP_DIAG=y -# CONFIG_INET_UDP_DIAG is not set -# CONFIG_TCP_CONG_ADVANCED is not set -CONFIG_TCP_CONG_CUBIC=y -CONFIG_DEFAULT_TCP_CONG="cubic" -# CONFIG_TCP_MD5SIG is not set -# CONFIG_IPV6 is not set -# CONFIG_NETWORK_SECMARK is not set -# CONFIG_NETWORK_PHY_TIMESTAMPING is not set -# CONFIG_NETFILTER is not set -# CONFIG_IP_DCCP is not set -# CONFIG_IP_SCTP is not set -# CONFIG_RDS is not set -# CONFIG_TIPC is not set -# CONFIG_ATM is not set -# CONFIG_L2TP is not set -# CONFIG_BRIDGE is not set -# CONFIG_NET_DSA is not set -# CONFIG_VLAN_8021Q is not set -# CONFIG_DECNET is not set -# CONFIG_LLC2 is not set -# CONFIG_IPX is not set -# CONFIG_ATALK is not set -# CONFIG_X25 is not set -# CONFIG_LAPB is not set -# CONFIG_ECONET is not set -# CONFIG_WAN_ROUTER is not set -# CONFIG_PHONET is not set -# CONFIG_IEEE802154 is not set -# CONFIG_NET_SCHED is not set -# CONFIG_DCB is not set -# CONFIG_BATMAN_ADV is not set -# CONFIG_OPENVSWITCH is not set -# CONFIG_NETPRIO_CGROUP is not set -CONFIG_BQL=y - -# -# Network testing -# -# CONFIG_NET_PKTGEN is not set -# CONFIG_HAMRADIO is not set -# CONFIG_CAN is not set -# CONFIG_IRDA is not set -# CONFIG_BT is not set -# CONFIG_AF_RXRPC is not set -CONFIG_WIRELESS=y -# CONFIG_CFG80211 is not set -# CONFIG_LIB80211 is not set - -# -# CFG80211 needs to be enabled for MAC80211 -# -# CONFIG_WIMAX is not set -# CONFIG_RFKILL is not set -# CONFIG_NET_9P is not set -# CONFIG_CAIF is not set -# CONFIG_CEPH_LIB is not set -# CONFIG_NFC is not set - -# -# UML Network Devices -# -CONFIG_UML_NET=y -CONFIG_UML_NET_ETHERTAP=y -CONFIG_UML_NET_TUNTAP=y -CONFIG_UML_NET_SLIP=y -CONFIG_UML_NET_DAEMON=y -# CONFIG_UML_NET_VDE is not set -CONFIG_UML_NET_MCAST=y -# CONFIG_UML_NET_PCAP is not set -CONFIG_UML_NET_SLIRP=y - -# -# File systems -# -# CONFIG_EXT2_FS is not set -# CONFIG_EXT3_FS is not set -CONFIG_EXT4_FS=y -CONFIG_EXT4_USE_FOR_EXT23=y -CONFIG_EXT4_FS_XATTR=y -# CONFIG_EXT4_FS_POSIX_ACL is not set -# CONFIG_EXT4_FS_SECURITY is not set -# CONFIG_EXT4_DEBUG is not set -CONFIG_JBD2=y -CONFIG_FS_MBCACHE=y -CONFIG_REISERFS_FS=y -# CONFIG_REISERFS_CHECK is not set -# CONFIG_REISERFS_PROC_INFO is not set -# CONFIG_REISERFS_FS_XATTR is not set -# CONFIG_JFS_FS is not set -# CONFIG_XFS_FS is not set -# CONFIG_GFS2_FS is not set -# CONFIG_BTRFS_FS is not set -# CONFIG_NILFS2_FS is not set -# CONFIG_FS_POSIX_ACL is not set -CONFIG_FILE_LOCKING=y -CONFIG_FSNOTIFY=y -CONFIG_DNOTIFY=y -CONFIG_INOTIFY_USER=y -# CONFIG_FANOTIFY is not set -CONFIG_QUOTA=y -# CONFIG_QUOTA_NETLINK_INTERFACE is not set -CONFIG_PRINT_QUOTA_WARNING=y -# CONFIG_QUOTA_DEBUG is not set -# CONFIG_QFMT_V1 is not set -# CONFIG_QFMT_V2 is not set -CONFIG_QUOTACTL=y -CONFIG_AUTOFS4_FS=m -# CONFIG_FUSE_FS is not set - -# -# Caches -# -# CONFIG_FSCACHE is not set - -# -# CD-ROM/DVD Filesystems -# -CONFIG_ISO9660_FS=m -CONFIG_JOLIET=y -# CONFIG_ZISOFS is not set -# CONFIG_UDF_FS is not set - -# -# DOS/FAT/NT Filesystems -# -# CONFIG_MSDOS_FS is not set -# CONFIG_VFAT_FS is not set -# CONFIG_NTFS_FS is not set - -# -# Pseudo filesystems -# -CONFIG_PROC_FS=y -CONFIG_PROC_KCORE=y -CONFIG_PROC_SYSCTL=y -CONFIG_PROC_PAGE_MONITOR=y -CONFIG_SYSFS=y -CONFIG_TMPFS=y -# CONFIG_TMPFS_POSIX_ACL is not set -# CONFIG_TMPFS_XATTR is not set -# CONFIG_HUGETLB_PAGE is not set -# CONFIG_CONFIGFS_FS is not set -CONFIG_MISC_FILESYSTEMS=y -# CONFIG_ADFS_FS is not set -# CONFIG_AFFS_FS is not set -# CONFIG_HFS_FS is not set -# CONFIG_HFSPLUS_FS is not set -# CONFIG_BEFS_FS is not set -# CONFIG_BFS_FS is not set -# CONFIG_EFS_FS is not set -# CONFIG_LOGFS is not set -# CONFIG_CRAMFS is not set -# CONFIG_SQUASHFS is not set -# CONFIG_VXFS_FS is not set -# CONFIG_MINIX_FS is not set -# CONFIG_OMFS_FS is not set -# CONFIG_HPFS_FS is not set -# CONFIG_QNX4FS_FS is not set -# CONFIG_ROMFS_FS is not set -# CONFIG_PSTORE is not set -# CONFIG_SYSV_FS is not set -# CONFIG_UFS_FS is not set -CONFIG_NETWORK_FILESYSTEMS=y -# CONFIG_NFS_FS is not set -# CONFIG_NFSD is not set -# CONFIG_CEPH_FS is not set -# CONFIG_CIFS is not set -# CONFIG_NCP_FS is not set -# CONFIG_CODA_FS is not set -# CONFIG_AFS_FS is not set -CONFIG_NLS=y -CONFIG_NLS_DEFAULT="iso8859-1" -# CONFIG_NLS_CODEPAGE_437 is not set -# CONFIG_NLS_CODEPAGE_737 is not set -# CONFIG_NLS_CODEPAGE_775 is not set -# CONFIG_NLS_CODEPAGE_850 is not set -# CONFIG_NLS_CODEPAGE_852 is not set -# CONFIG_NLS_CODEPAGE_855 is not set -# CONFIG_NLS_CODEPAGE_857 is not set -# CONFIG_NLS_CODEPAGE_860 is not set -# CONFIG_NLS_CODEPAGE_861 is not set -# CONFIG_NLS_CODEPAGE_862 is not set -# CONFIG_NLS_CODEPAGE_863 is not set -# CONFIG_NLS_CODEPAGE_864 is not set -# CONFIG_NLS_CODEPAGE_865 is not set -# CONFIG_NLS_CODEPAGE_866 is not set -# CONFIG_NLS_CODEPAGE_869 is not set -# CONFIG_NLS_CODEPAGE_936 is not set -# CONFIG_NLS_CODEPAGE_950 is not set -# CONFIG_NLS_CODEPAGE_932 is not set -# CONFIG_NLS_CODEPAGE_949 is not set -# CONFIG_NLS_CODEPAGE_874 is not set -# CONFIG_NLS_ISO8859_8 is not set -# CONFIG_NLS_CODEPAGE_1250 is not set -# CONFIG_NLS_CODEPAGE_1251 is not set -# CONFIG_NLS_ASCII is not set -# CONFIG_NLS_ISO8859_1 is not set -# CONFIG_NLS_ISO8859_2 is not set -# CONFIG_NLS_ISO8859_3 is not set -# CONFIG_NLS_ISO8859_4 is not set -# CONFIG_NLS_ISO8859_5 is not set -# CONFIG_NLS_ISO8859_6 is not set -# CONFIG_NLS_ISO8859_7 is not set -# CONFIG_NLS_ISO8859_9 is not set -# CONFIG_NLS_ISO8859_13 is not set -# CONFIG_NLS_ISO8859_14 is not set -# CONFIG_NLS_ISO8859_15 is not set -# CONFIG_NLS_KOI8_R is not set -# CONFIG_NLS_KOI8_U is not set -# CONFIG_NLS_UTF8 is not set - -# -# Security options -# -# CONFIG_KEYS is not set -# CONFIG_SECURITY_DMESG_RESTRICT is not set -# CONFIG_SECURITY is not set -# CONFIG_SECURITYFS is not set -CONFIG_DEFAULT_SECURITY_DAC=y -CONFIG_DEFAULT_SECURITY="" -CONFIG_CRYPTO=y - -# -# Crypto core or helper -# -# CONFIG_CRYPTO_FIPS is not set -CONFIG_CRYPTO_ALGAPI=m -CONFIG_CRYPTO_ALGAPI2=m -CONFIG_CRYPTO_RNG=m -CONFIG_CRYPTO_RNG2=m -# CONFIG_CRYPTO_MANAGER is not set -# CONFIG_CRYPTO_MANAGER2 is not set -# CONFIG_CRYPTO_USER is not set -# CONFIG_CRYPTO_GF128MUL is not set -# CONFIG_CRYPTO_NULL is not set -# CONFIG_CRYPTO_CRYPTD is not set -# CONFIG_CRYPTO_AUTHENC is not set -# CONFIG_CRYPTO_TEST is not set - -# -# Authenticated Encryption with Associated Data -# -# CONFIG_CRYPTO_CCM is not set -# CONFIG_CRYPTO_GCM is not set -# CONFIG_CRYPTO_SEQIV is not set - -# -# Block modes -# -# CONFIG_CRYPTO_CBC is not set -# CONFIG_CRYPTO_CTR is not set -# CONFIG_CRYPTO_CTS is not set -# CONFIG_CRYPTO_ECB is not set -# CONFIG_CRYPTO_LRW is not set -# CONFIG_CRYPTO_PCBC is not set -# CONFIG_CRYPTO_XTS is not set - -# -# Hash modes -# -# CONFIG_CRYPTO_HMAC is not set -# CONFIG_CRYPTO_XCBC is not set -# CONFIG_CRYPTO_VMAC is not set - -# -# Digest -# -# CONFIG_CRYPTO_CRC32C is not set -# CONFIG_CRYPTO_GHASH is not set -# CONFIG_CRYPTO_MD4 is not set -# CONFIG_CRYPTO_MD5 is not set -# CONFIG_CRYPTO_MICHAEL_MIC is not set -# CONFIG_CRYPTO_RMD128 is not set -# CONFIG_CRYPTO_RMD160 is not set -# CONFIG_CRYPTO_RMD256 is not set -# CONFIG_CRYPTO_RMD320 is not set -# CONFIG_CRYPTO_SHA1 is not set -# CONFIG_CRYPTO_SHA256 is not set -# CONFIG_CRYPTO_SHA512 is not set -# CONFIG_CRYPTO_TGR192 is not set -# CONFIG_CRYPTO_WP512 is not set - -# -# Ciphers -# -CONFIG_CRYPTO_AES=m -# CONFIG_CRYPTO_AES_586 is not set -# CONFIG_CRYPTO_ANUBIS is not set -# CONFIG_CRYPTO_ARC4 is not set -# CONFIG_CRYPTO_BLOWFISH is not set -# CONFIG_CRYPTO_CAMELLIA is not set -# CONFIG_CRYPTO_CAST5 is not set -# CONFIG_CRYPTO_CAST6 is not set -# CONFIG_CRYPTO_DES is not set -# CONFIG_CRYPTO_FCRYPT is not set -# CONFIG_CRYPTO_KHAZAD is not set -# CONFIG_CRYPTO_SALSA20 is not set -# CONFIG_CRYPTO_SALSA20_586 is not set -# CONFIG_CRYPTO_SEED is not set -# CONFIG_CRYPTO_SERPENT is not set -# CONFIG_CRYPTO_TEA is not set -# CONFIG_CRYPTO_TWOFISH is not set -# CONFIG_CRYPTO_TWOFISH_586 is not set - -# -# Compression -# -# CONFIG_CRYPTO_DEFLATE is not set -# CONFIG_CRYPTO_ZLIB is not set -# CONFIG_CRYPTO_LZO is not set - -# -# Random Number Generation -# -CONFIG_CRYPTO_ANSI_CPRNG=m -# CONFIG_CRYPTO_USER_API_HASH is not set -# CONFIG_CRYPTO_USER_API_SKCIPHER is not set -CONFIG_CRYPTO_HW=y -# CONFIG_BINARY_PRINTF is not set - -# -# Library routines -# -CONFIG_BITREVERSE=y -CONFIG_GENERIC_FIND_FIRST_BIT=y -CONFIG_GENERIC_IO=y -# CONFIG_CRC_CCITT is not set -CONFIG_CRC16=y -# CONFIG_CRC_T10DIF is not set -# CONFIG_CRC_ITU_T is not set -CONFIG_CRC32=y -# CONFIG_CRC7 is not set -# CONFIG_LIBCRC32C is not set -# CONFIG_CRC8 is not set -# CONFIG_XZ_DEC is not set -# CONFIG_XZ_DEC_BCJ is not set -CONFIG_DQL=y -CONFIG_NLATTR=y -# CONFIG_AVERAGE is not set -# CONFIG_CORDIC is not set - -# -# Kernel hacking -# -# CONFIG_PRINTK_TIME is not set -CONFIG_DEFAULT_MESSAGE_LOGLEVEL=4 -CONFIG_ENABLE_WARN_DEPRECATED=y -CONFIG_ENABLE_MUST_CHECK=y -CONFIG_FRAME_WARN=1024 -# CONFIG_STRIP_ASM_SYMS is not set -# CONFIG_UNUSED_SYMBOLS is not set -# CONFIG_DEBUG_FS is not set -# CONFIG_DEBUG_SECTION_MISMATCH is not set -CONFIG_DEBUG_KERNEL=y -# CONFIG_DEBUG_SHIRQ is not set -# CONFIG_LOCKUP_DETECTOR is not set -# CONFIG_HARDLOCKUP_DETECTOR is not set -# CONFIG_DETECT_HUNG_TASK is not set -CONFIG_SCHED_DEBUG=y -# CONFIG_SCHEDSTATS is not set -# CONFIG_TIMER_STATS is not set -# CONFIG_DEBUG_OBJECTS is not set -# CONFIG_DEBUG_SLAB is not set -# CONFIG_DEBUG_RT_MUTEXES is not set -# CONFIG_RT_MUTEX_TESTER is not set -# CONFIG_DEBUG_SPINLOCK is not set -# CONFIG_DEBUG_MUTEXES is not set -# CONFIG_SPARSE_RCU_POINTER is not set -# CONFIG_DEBUG_ATOMIC_SLEEP is not set -# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set -# CONFIG_DEBUG_STACK_USAGE is not set -# CONFIG_DEBUG_KOBJECT is not set -CONFIG_DEBUG_BUGVERBOSE=y -CONFIG_DEBUG_INFO=y -# CONFIG_DEBUG_INFO_REDUCED is not set -# CONFIG_DEBUG_VM is not set -# CONFIG_DEBUG_WRITECOUNT is not set -CONFIG_DEBUG_MEMORY_INIT=y -# CONFIG_DEBUG_LIST is not set -# CONFIG_TEST_LIST_SORT is not set -# CONFIG_DEBUG_SG is not set -# CONFIG_DEBUG_NOTIFIERS is not set -# CONFIG_DEBUG_CREDENTIALS is not set -CONFIG_FRAME_POINTER=y -# CONFIG_BOOT_PRINTK_DELAY is not set -# CONFIG_RCU_TORTURE_TEST is not set -# CONFIG_BACKTRACE_SELF_TEST is not set -# CONFIG_DEBUG_BLOCK_EXT_DEVT is not set -# CONFIG_DEBUG_FORCE_WEAK_PER_CPU is not set -# CONFIG_FAULT_INJECTION is not set -# CONFIG_SYSCTL_SYSCALL_CHECK is not set -# CONFIG_DEBUG_PAGEALLOC is not set -# CONFIG_ATOMIC64_SELFTEST is not set -# CONFIG_SAMPLES is not set -# CONFIG_TEST_KSTRTOX is not set -# CONFIG_GPROF is not set -# CONFIG_GCOV is not set -CONFIG_EARLY_PRINTK=y diff --git a/arch/um/drivers/mconsole_kern.c b/arch/um/drivers/mconsole_kern.c index 3df3bd54449..29880c9b324 100644 --- a/arch/um/drivers/mconsole_kern.c +++ b/arch/um/drivers/mconsole_kern.c @@ -645,11 +645,9 @@ void mconsole_sysrq(struct mc_request *req) static void stack_proc(void *arg) { - struct task_struct *from = current, *to = arg; + struct task_struct *task = arg; - to->thread.saved_task = from; - rcu_user_hooks_switch(from, to); - switch_to(from, to, from); + show_stack(task, NULL); } /* diff --git a/arch/um/include/asm/processor-generic.h b/arch/um/include/asm/processor-generic.h index c03cd5a0236..d89b02bb626 100644 --- a/arch/um/include/asm/processor-generic.h +++ b/arch/um/include/asm/processor-generic.h @@ -19,8 +19,8 @@ struct task_struct; struct mm_struct; struct thread_struct { - struct task_struct *saved_task; struct pt_regs regs; + struct pt_regs *segv_regs; int singlestep_syscall; void *fault_addr; jmp_buf *fault_catcher; diff --git a/arch/um/include/asm/thread_info.h b/arch/um/include/asm/thread_info.h index 2c8eeb2df8b..1c5b2a83046 100644 --- a/arch/um/include/asm/thread_info.h +++ b/arch/um/include/asm/thread_info.h @@ -60,8 +60,6 @@ static inline struct thread_info *current_thread_info(void) #endif -#define PREEMPT_ACTIVE 0x10000000 - #define TIF_SYSCALL_TRACE 0 /* syscall trace active */ #define TIF_SIGPENDING 1 /* signal pending */ #define TIF_NEED_RESCHED 2 /* rescheduling necessary */ diff --git a/arch/um/include/shared/as-layout.h b/arch/um/include/shared/as-layout.h index 694c792bab4..41c8c774ec1 100644 --- a/arch/um/include/shared/as-layout.h +++ b/arch/um/include/shared/as-layout.h @@ -44,7 +44,6 @@ struct cpu_task { extern struct cpu_task cpu_tasks[]; -extern unsigned long low_physmem; extern unsigned long high_physmem; extern unsigned long uml_physmem; extern unsigned long uml_reserved; @@ -52,8 +51,6 @@ extern unsigned long end_vm; extern unsigned long start_vm; extern unsigned long long highmem; -extern unsigned long _stext, _etext, _sdata, _edata, __bss_start, _end; -extern unsigned long _unprotected_end; extern unsigned long brk_start; extern unsigned long host_task_size; diff --git a/arch/um/include/shared/os.h b/arch/um/include/shared/os.h index 021104d98cb..75298d3358e 100644 --- a/arch/um/include/shared/os.h +++ b/arch/um/include/shared/os.h @@ -227,6 +227,7 @@ extern void block_signals(void); extern void unblock_signals(void); extern int get_signals(void); extern int set_signals(int enable); +extern int os_is_signal_stack(void); /* util.c */ extern void stack_protections(unsigned long address); diff --git a/arch/um/kernel/process.c b/arch/um/kernel/process.c index bbcef522bcb..eecc4142764 100644 --- a/arch/um/kernel/process.c +++ b/arch/um/kernel/process.c @@ -82,19 +82,8 @@ void *__switch_to(struct task_struct *from, struct task_struct *to) to->thread.prev_sched = from; set_current(to); - do { - current->thread.saved_task = NULL; - - switch_threads(&from->thread.switch_buf, - &to->thread.switch_buf); - - arch_switch_to(current); - - if (current->thread.saved_task) - show_regs(&(current->thread.regs)); - to = current->thread.saved_task; - from = current; - } while (current->thread.saved_task); + switch_threads(&from->thread.switch_buf, &to->thread.switch_buf); + arch_switch_to(current); return current->thread.prev_sched; } diff --git a/arch/um/kernel/sysrq.c b/arch/um/kernel/sysrq.c index 0dc4d1c6f98..4d6fdf68edf 100644 --- a/arch/um/kernel/sysrq.c +++ b/arch/um/kernel/sysrq.c @@ -1,6 +1,10 @@ /* * Copyright (C) 2001 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com) - * Licensed under the GPL + * Copyright (C) 2013 Richard Weinberger <richrd@nod.at> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. */ #include <linux/kallsyms.h> @@ -8,59 +12,87 @@ #include <linux/module.h> #include <linux/sched.h> #include <asm/sysrq.h> +#include <os.h> -/* Catch non-i386 SUBARCH's. */ -#if !defined(CONFIG_UML_X86) || defined(CONFIG_64BIT) -void show_trace(struct task_struct *task, unsigned long * stack) +struct stack_frame { + struct stack_frame *next_frame; + unsigned long return_address; +}; + +static void print_stack_trace(unsigned long *sp, unsigned long bp) { + int reliable; unsigned long addr; + struct stack_frame *frame = (struct stack_frame *)bp; - if (!stack) { - stack = (unsigned long*) &stack; - WARN_ON(1); - } - - printk(KERN_INFO "Call Trace: \n"); - while (((long) stack & (THREAD_SIZE-1)) != 0) { - addr = *stack; + printk(KERN_INFO "Call Trace:\n"); + while (((long) sp & (THREAD_SIZE-1)) != 0) { + addr = *sp; if (__kernel_text_address(addr)) { - printk(KERN_INFO "%08lx: [<%08lx>]", - (unsigned long) stack, addr); - print_symbol(KERN_CONT " %s", addr); + reliable = 0; + if ((unsigned long) sp == bp + sizeof(long)) { + frame = frame ? frame->next_frame : NULL; + bp = (unsigned long)frame; + reliable = 1; + } + + printk(KERN_INFO " [<%08lx>]", addr); + printk(KERN_CONT " %s", reliable ? "" : "? "); + print_symbol(KERN_CONT "%s", addr); printk(KERN_CONT "\n"); } - stack++; + sp++; } printk(KERN_INFO "\n"); } -#endif -/*Stolen from arch/i386/kernel/traps.c */ -static const int kstack_depth_to_print = 24; +static unsigned long get_frame_pointer(struct task_struct *task, + struct pt_regs *segv_regs) +{ + if (!task || task == current) + return segv_regs ? PT_REGS_BP(segv_regs) : current_bp(); + else + return KSTK_EBP(task); +} -/* This recently started being used in arch-independent code too, as in - * kernel/sched/core.c.*/ -void show_stack(struct task_struct *task, unsigned long *esp) +static unsigned long *get_stack_pointer(struct task_struct *task, + struct pt_regs *segv_regs) { - unsigned long *stack; + if (!task || task == current) + return segv_regs ? (unsigned long *)PT_REGS_SP(segv_regs) : current_sp(); + else + return (unsigned long *)KSTK_ESP(task); +} + +void show_stack(struct task_struct *task, unsigned long *stack) +{ + unsigned long *sp = stack, bp = 0; + struct pt_regs *segv_regs = current->thread.segv_regs; int i; - if (esp == NULL) { - if (task != current && task != NULL) { - esp = (unsigned long *) KSTK_ESP(task); - } else { - esp = (unsigned long *) &esp; - } + if (!segv_regs && os_is_signal_stack()) { + printk(KERN_ERR "Received SIGSEGV in SIGSEGV handler," + " aborting stack trace!\n"); + return; } - stack = esp; - for (i = 0; i < kstack_depth_to_print; i++) { +#ifdef CONFIG_FRAME_POINTER + bp = get_frame_pointer(task, segv_regs); +#endif + + if (!stack) + sp = get_stack_pointer(task, segv_regs); + + printk(KERN_INFO "Stack:\n"); + stack = sp; + for (i = 0; i < 3 * STACKSLOTS_PER_LINE; i++) { if (kstack_end(stack)) break; - if (i && ((i % 8) == 0)) - printk(KERN_INFO " "); - printk(KERN_CONT "%08lx ", *stack++); + if (i && ((i % STACKSLOTS_PER_LINE) == 0)) + printk(KERN_CONT "\n"); + printk(KERN_CONT " %08lx", *stack++); } + printk(KERN_CONT "\n"); - show_trace(task, esp); + print_stack_trace(sp, bp); } diff --git a/arch/um/kernel/trap.c b/arch/um/kernel/trap.c index 5c3aef74237..974b87474a9 100644 --- a/arch/um/kernel/trap.c +++ b/arch/um/kernel/trap.c @@ -206,9 +206,12 @@ unsigned long segv(struct faultinfo fi, unsigned long ip, int is_user, int is_write = FAULT_WRITE(fi); unsigned long address = FAULT_ADDRESS(fi); + if (regs) + current->thread.segv_regs = container_of(regs, struct pt_regs, regs); + if (!is_user && (address >= start_vm) && (address < end_vm)) { flush_tlb_kernel_vm(); - return 0; + goto out; } else if (current->mm == NULL) { show_regs(container_of(regs, struct pt_regs, regs)); @@ -230,7 +233,7 @@ unsigned long segv(struct faultinfo fi, unsigned long ip, int is_user, catcher = current->thread.fault_catcher; if (!err) - return 0; + goto out; else if (catcher != NULL) { current->thread.fault_addr = (void *) address; UML_LONGJMP(catcher, 1); @@ -238,7 +241,7 @@ unsigned long segv(struct faultinfo fi, unsigned long ip, int is_user, else if (current->thread.fault_addr != NULL) panic("fault_addr set but no fault catcher"); else if (!is_user && arch_fixup(ip, regs)) - return 0; + goto out; if (!is_user) { show_regs(container_of(regs, struct pt_regs, regs)); @@ -262,6 +265,11 @@ unsigned long segv(struct faultinfo fi, unsigned long ip, int is_user, current->thread.arch.faultinfo = fi; force_sig_info(SIGSEGV, &si, current); } + +out: + if (regs) + current->thread.segv_regs = NULL; + return 0; } diff --git a/arch/um/kernel/um_arch.c b/arch/um/kernel/um_arch.c index 87df5e3acc2..016adf0985d 100644 --- a/arch/um/kernel/um_arch.c +++ b/arch/um/kernel/um_arch.c @@ -13,6 +13,7 @@ #include <linux/sched.h> #include <asm/pgtable.h> #include <asm/processor.h> +#include <asm/sections.h> #include <asm/setup.h> #include <as-layout.h> #include <arch.h> @@ -234,7 +235,6 @@ static int panic_exit(struct notifier_block *self, unsigned long unused1, void *unused2) { bust_spinlocks(1); - show_regs(&(current->thread.regs)); bust_spinlocks(0); uml_exitcode = 1; os_dump_core(); diff --git a/arch/um/os-Linux/signal.c b/arch/um/os-Linux/signal.c index 905924b773d..7b605e4dfff 100644 --- a/arch/um/os-Linux/signal.c +++ b/arch/um/os-Linux/signal.c @@ -304,3 +304,11 @@ int set_signals(int enable) return ret; } + +int os_is_signal_stack(void) +{ + stack_t ss; + sigaltstack(NULL, &ss); + + return ss.ss_flags & SS_ONSTACK; +} diff --git a/arch/unicore32/include/asm/thread_info.h b/arch/unicore32/include/asm/thread_info.h index 818b4a1edb5..af36d8eabdf 100644 --- a/arch/unicore32/include/asm/thread_info.h +++ b/arch/unicore32/include/asm/thread_info.h @@ -118,12 +118,6 @@ static inline struct thread_info *current_thread_info(void) #endif /* - * We use bit 30 of the preempt_count to indicate that kernel - * preemption is occurring. See <asm/hardirq.h>. - */ -#define PREEMPT_ACTIVE 0x40000000 - -/* * thread information flags: * TIF_SYSCALL_TRACE - syscall trace active * TIF_SIGPENDING - signal pending diff --git a/arch/x86/include/asm/pci.h b/arch/x86/include/asm/pci.h index 7d7443283a9..947b5c417e8 100644 --- a/arch/x86/include/asm/pci.h +++ b/arch/x86/include/asm/pci.h @@ -15,7 +15,7 @@ struct pci_sysdata { int domain; /* PCI domain */ int node; /* NUMA node */ #ifdef CONFIG_ACPI - void *acpi; /* ACPI-specific data */ + struct acpi_device *companion; /* ACPI companion device */ #endif #ifdef CONFIG_X86_64 void *iommu; /* IOMMU private data */ diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h index c46a46be1ec..3ba3de457d0 100644 --- a/arch/x86/include/asm/thread_info.h +++ b/arch/x86/include/asm/thread_info.h @@ -153,8 +153,6 @@ struct thread_info { #define _TIF_WORK_CTXSW_PREV (_TIF_WORK_CTXSW|_TIF_USER_RETURN_NOTIFY) #define _TIF_WORK_CTXSW_NEXT (_TIF_WORK_CTXSW) -#define PREEMPT_ACTIVE 0x10000000 - #ifdef CONFIG_X86_32 #define STACK_WARN (THREAD_SIZE/8) diff --git a/arch/x86/include/uapi/asm/msr-index.h b/arch/x86/include/uapi/asm/msr-index.h index b93e09a0fa2..37813b5ddc3 100644 --- a/arch/x86/include/uapi/asm/msr-index.h +++ b/arch/x86/include/uapi/asm/msr-index.h @@ -147,6 +147,8 @@ #define MSR_PP1_ENERGY_STATUS 0x00000641 #define MSR_PP1_POLICY 0x00000642 +#define MSR_CORE_C1_RES 0x00000660 + #define MSR_AMD64_MC0_MASK 0xc0010044 #define MSR_IA32_MCx_CTL(x) (MSR_IA32_MC0_CTL + 4*(x)) diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c index ed165d65738..d278736bf77 100644 --- a/arch/x86/kernel/apic/apic.c +++ b/arch/x86/kernel/apic/apic.c @@ -62,6 +62,7 @@ unsigned disabled_cpus; /* Processor that is doing the boot up */ unsigned int boot_cpu_physical_apicid = -1U; +EXPORT_SYMBOL_GPL(boot_cpu_physical_apicid); /* * The highest APIC ID seen during enumeration. diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c index a7cccb6d7fe..c96314abd14 100644 --- a/arch/x86/mm/pgtable.c +++ b/arch/x86/mm/pgtable.c @@ -61,6 +61,7 @@ void ___pte_free_tlb(struct mmu_gather *tlb, struct page *pte) #if PAGETABLE_LEVELS > 2 void ___pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd) { + struct page *page = virt_to_page(pmd); paravirt_release_pmd(__pa(pmd) >> PAGE_SHIFT); /* * NOTE! For PAE, any changes to the top page-directory-pointer-table @@ -69,7 +70,8 @@ void ___pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd) #ifdef CONFIG_X86_PAE tlb->need_flush_all = 1; #endif - tlb_remove_page(tlb, virt_to_page(pmd)); + pgtable_pmd_page_dtor(page); + tlb_remove_page(tlb, page); } #if PAGETABLE_LEVELS > 3 @@ -209,7 +211,7 @@ static int preallocate_pmds(pmd_t *pmds[]) if (!pmd) failed = true; if (pmd && !pgtable_pmd_page_ctor(virt_to_page(pmd))) { - free_page((unsigned long)pmds[i]); + free_page((unsigned long)pmd); pmd = NULL; failed = true; } diff --git a/arch/x86/pci/acpi.c b/arch/x86/pci/acpi.c index 7fb24e53d4c..4f25ec07755 100644 --- a/arch/x86/pci/acpi.c +++ b/arch/x86/pci/acpi.c @@ -518,7 +518,7 @@ struct pci_bus *pci_acpi_scan_root(struct acpi_pci_root *root) sd = &info->sd; sd->domain = domain; sd->node = node; - sd->acpi = device->handle; + sd->companion = device; /* * Maybe the desired pci bus has been already scanned. In such case * it is unnecessary to scan the pci bus with the given domain,busnum. @@ -589,7 +589,7 @@ int pcibios_root_bridge_prepare(struct pci_host_bridge *bridge) { struct pci_sysdata *sd = bridge->bus->sysdata; - ACPI_HANDLE_SET(&bridge->dev, sd->acpi); + ACPI_COMPANION_SET(&bridge->dev, sd->companion); return 0; } diff --git a/arch/x86/um/Kconfig b/arch/x86/um/Kconfig index 14ef8d1dbc3..ed56a1c4ae7 100644 --- a/arch/x86/um/Kconfig +++ b/arch/x86/um/Kconfig @@ -31,6 +31,11 @@ config X86_64 def_bool 64BIT select MODULES_USE_ELF_RELA +config ARCH_DEFCONFIG + string + default "arch/um/configs/i386_defconfig" if X86_32 + default "arch/um/configs/x86_64_defconfig" if X86_64 + config RWSEM_XCHGADD_ALGORITHM def_bool 64BIT diff --git a/arch/x86/um/asm/processor_32.h b/arch/x86/um/asm/processor_32.h index 6c6689e574c..c112de81c9e 100644 --- a/arch/x86/um/asm/processor_32.h +++ b/arch/x86/um/asm/processor_32.h @@ -33,6 +33,8 @@ struct arch_thread { .faultinfo = { 0, 0, 0 } \ } +#define STACKSLOTS_PER_LINE 8 + static inline void arch_flush_thread(struct arch_thread *thread) { /* Clear any TLS still hanging */ @@ -53,4 +55,7 @@ static inline void arch_copy_thread(struct arch_thread *from, #define current_text_addr() \ ({ void *pc; __asm__("movl $1f,%0\n1:":"=g" (pc)); pc; }) +#define current_sp() ({ void *sp; __asm__("movl %%esp, %0" : "=r" (sp) : ); sp; }) +#define current_bp() ({ unsigned long bp; __asm__("movl %%ebp, %0" : "=r" (bp) : ); bp; }) + #endif diff --git a/arch/x86/um/asm/processor_64.h b/arch/x86/um/asm/processor_64.h index 4b02a8455bd..c3be85205a6 100644 --- a/arch/x86/um/asm/processor_64.h +++ b/arch/x86/um/asm/processor_64.h @@ -19,6 +19,8 @@ struct arch_thread { .fs = 0, \ .faultinfo = { 0, 0, 0 } } +#define STACKSLOTS_PER_LINE 4 + static inline void arch_flush_thread(struct arch_thread *thread) { } @@ -32,4 +34,7 @@ static inline void arch_copy_thread(struct arch_thread *from, #define current_text_addr() \ ({ void *pc; __asm__("movq $1f,%0\n1:":"=g" (pc)); pc; }) +#define current_sp() ({ void *sp; __asm__("movq %%rsp, %0" : "=r" (sp) : ); sp; }) +#define current_bp() ({ unsigned long bp; __asm__("movq %%rbp, %0" : "=r" (bp) : ); bp; }) + #endif diff --git a/arch/x86/um/sysrq_32.c b/arch/x86/um/sysrq_32.c index c9bee5b8c0d..16ee0e450e3 100644 --- a/arch/x86/um/sysrq_32.c +++ b/arch/x86/um/sysrq_32.c @@ -30,70 +30,4 @@ void show_regs(struct pt_regs *regs) printk(" DS: %04lx ES: %04lx\n", 0xffff & PT_REGS_DS(regs), 0xffff & PT_REGS_ES(regs)); - - show_trace(NULL, (unsigned long *) ®s); } - -/* Copied from i386. */ -static inline int valid_stack_ptr(struct thread_info *tinfo, void *p) -{ - return p > (void *)tinfo && - p < (void *)tinfo + THREAD_SIZE - 3; -} - -/* Adapted from i386 (we also print the address we read from). */ -static inline unsigned long print_context_stack(struct thread_info *tinfo, - unsigned long *stack, unsigned long ebp) -{ - unsigned long addr; - -#ifdef CONFIG_FRAME_POINTER - while (valid_stack_ptr(tinfo, (void *)ebp)) { - addr = *(unsigned long *)(ebp + 4); - printk("%08lx: [<%08lx>]", ebp + 4, addr); - print_symbol(" %s", addr); - printk("\n"); - ebp = *(unsigned long *)ebp; - } -#else - while (valid_stack_ptr(tinfo, stack)) { - addr = *stack; - if (__kernel_text_address(addr)) { - printk("%08lx: [<%08lx>]", (unsigned long) stack, addr); - print_symbol(" %s", addr); - printk("\n"); - } - stack++; - } -#endif - return ebp; -} - -void show_trace(struct task_struct* task, unsigned long * stack) -{ - unsigned long ebp; - struct thread_info *context; - - /* Turn this into BUG_ON if possible. */ - if (!stack) { - stack = (unsigned long*) &stack; - printk("show_trace: got NULL stack, implicit assumption task == current"); - WARN_ON(1); - } - - if (!task) - task = current; - - if (task != current) { - ebp = (unsigned long) KSTK_EBP(task); - } else { - asm ("movl %%ebp, %0" : "=r" (ebp) : ); - } - - context = (struct thread_info *) - ((unsigned long)stack & (~(THREAD_SIZE - 1))); - print_context_stack(context, stack, ebp); - - printk("\n"); -} - diff --git a/arch/x86/um/sysrq_64.c b/arch/x86/um/sysrq_64.c index a0e7fb1134a..38b4e4abd0f 100644 --- a/arch/x86/um/sysrq_64.c +++ b/arch/x86/um/sysrq_64.c @@ -12,7 +12,7 @@ #include <asm/ptrace.h> #include <asm/sysrq.h> -void __show_regs(struct pt_regs *regs) +void show_regs(struct pt_regs *regs) { printk("\n"); print_modules(); @@ -33,9 +33,3 @@ void __show_regs(struct pt_regs *regs) printk(KERN_INFO "R13: %016lx R14: %016lx R15: %016lx\n", PT_REGS_R13(regs), PT_REGS_R14(regs), PT_REGS_R15(regs)); } - -void show_regs(struct pt_regs *regs) -{ - __show_regs(regs); - show_trace(current, (unsigned long *) ®s); -} diff --git a/arch/x86/um/vdso/.gitignore b/arch/x86/um/vdso/.gitignore new file mode 100644 index 00000000000..9cac6d07219 --- /dev/null +++ b/arch/x86/um/vdso/.gitignore @@ -0,0 +1,2 @@ +vdso-syms.lds +vdso.lds diff --git a/arch/xtensa/include/asm/thread_info.h b/arch/xtensa/include/asm/thread_info.h index 9481004ac11..470153e8547 100644 --- a/arch/xtensa/include/asm/thread_info.h +++ b/arch/xtensa/include/asm/thread_info.h @@ -76,8 +76,6 @@ struct thread_info { #endif -#define PREEMPT_ACTIVE 0x10000000 - /* * macros/functions for gaining access to the thread information structure */ diff --git a/block/blk-mq.c b/block/blk-mq.c index 862f458d476..cdc629cf075 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -171,9 +171,12 @@ bool blk_mq_can_queue(struct blk_mq_hw_ctx *hctx) } EXPORT_SYMBOL(blk_mq_can_queue); -static void blk_mq_rq_ctx_init(struct blk_mq_ctx *ctx, struct request *rq, - unsigned int rw_flags) +static void blk_mq_rq_ctx_init(struct request_queue *q, struct blk_mq_ctx *ctx, + struct request *rq, unsigned int rw_flags) { + if (blk_queue_io_stat(q)) + rw_flags |= REQ_IO_STAT; + rq->mq_ctx = ctx; rq->cmd_flags = rw_flags; ctx->rq_dispatched[rw_is_sync(rw_flags)]++; @@ -197,7 +200,7 @@ static struct request *blk_mq_alloc_request_pinned(struct request_queue *q, rq = __blk_mq_alloc_request(hctx, gfp & ~__GFP_WAIT, reserved); if (rq) { - blk_mq_rq_ctx_init(ctx, rq, rw); + blk_mq_rq_ctx_init(q, ctx, rq, rw); break; } else if (!(gfp & __GFP_WAIT)) break; @@ -718,6 +721,8 @@ static void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx, { struct blk_mq_ctx *ctx = rq->mq_ctx; + trace_block_rq_insert(hctx->queue, rq); + list_add_tail(&rq->queuelist, &ctx->rq_list); blk_mq_hctx_mark_pending(hctx, ctx); @@ -921,7 +926,7 @@ static void blk_mq_make_request(struct request_queue *q, struct bio *bio) trace_block_getrq(q, bio, rw); rq = __blk_mq_alloc_request(hctx, GFP_ATOMIC, false); if (likely(rq)) - blk_mq_rq_ctx_init(ctx, rq, rw); + blk_mq_rq_ctx_init(q, ctx, rq, rw); else { blk_mq_put_ctx(ctx); trace_block_sleeprq(q, bio, rw); @@ -1377,6 +1382,7 @@ struct request_queue *blk_mq_init_queue(struct blk_mq_reg *reg, q->queue_hw_ctx = hctxs; q->mq_ops = reg->ops; + q->queue_flags |= QUEUE_FLAG_MQ_DEFAULT; blk_queue_make_request(q, blk_mq_make_request); blk_queue_rq_timed_out(q, reg->ops->timeout); diff --git a/block/partitions/efi.c b/block/partitions/efi.c index a8287b49d06..dc51f467a56 100644 --- a/block/partitions/efi.c +++ b/block/partitions/efi.c @@ -96,6 +96,7 @@ * - Code works, detects all the partitions. * ************************************************************/ +#include <linux/kernel.h> #include <linux/crc32.h> #include <linux/ctype.h> #include <linux/math64.h> @@ -715,8 +716,8 @@ int efi_partition(struct parsed_partitions *state) efi_guid_unparse(&ptes[i].unique_partition_guid, info->uuid); /* Naively convert UTF16-LE to 7 bits. */ - label_max = min(sizeof(info->volname) - 1, - sizeof(ptes[i].partition_name)); + label_max = min(ARRAY_SIZE(info->volname) - 1, + ARRAY_SIZE(ptes[i].partition_name)); info->volname[label_max] = 0; while (label_count < label_max) { u8 c = ptes[i].partition_name[label_count] & 0xff; diff --git a/crypto/Kconfig b/crypto/Kconfig index 71f337aefa3..4ae5734fb47 100644 --- a/crypto/Kconfig +++ b/crypto/Kconfig @@ -1402,6 +1402,9 @@ config CRYPTO_USER_API_SKCIPHER This option enables the user-spaces interface for symmetric key cipher algorithms. +config CRYPTO_HASH_INFO + bool + source "drivers/crypto/Kconfig" source crypto/asymmetric_keys/Kconfig diff --git a/crypto/Makefile b/crypto/Makefile index 80019ba8da3..b3a7e807e08 100644 --- a/crypto/Makefile +++ b/crypto/Makefile @@ -104,3 +104,4 @@ obj-$(CONFIG_CRYPTO_USER_API_SKCIPHER) += algif_skcipher.o obj-$(CONFIG_XOR_BLOCKS) += xor.o obj-$(CONFIG_ASYNC_CORE) += async_tx/ obj-$(CONFIG_ASYMMETRIC_KEY_TYPE) += asymmetric_keys/ +obj-$(CONFIG_CRYPTO_HASH_INFO) += hash_info.o diff --git a/crypto/asymmetric_keys/Kconfig b/crypto/asymmetric_keys/Kconfig index 6d2c2ea1255..03a6eb95ab5 100644 --- a/crypto/asymmetric_keys/Kconfig +++ b/crypto/asymmetric_keys/Kconfig @@ -12,6 +12,8 @@ if ASYMMETRIC_KEY_TYPE config ASYMMETRIC_PUBLIC_KEY_SUBTYPE tristate "Asymmetric public-key crypto algorithm subtype" select MPILIB + select PUBLIC_KEY_ALGO_RSA + select CRYPTO_HASH_INFO help This option provides support for asymmetric public key type handling. If signature generation and/or verification are to be used, @@ -20,8 +22,8 @@ config ASYMMETRIC_PUBLIC_KEY_SUBTYPE config PUBLIC_KEY_ALGO_RSA tristate "RSA public-key algorithm" - depends on ASYMMETRIC_PUBLIC_KEY_SUBTYPE select MPILIB_EXTRA + select MPILIB help This option enables support for the RSA algorithm (PKCS#1, RFC3447). diff --git a/crypto/asymmetric_keys/asymmetric_type.c b/crypto/asymmetric_keys/asymmetric_type.c index cf807654d22..b77eb530478 100644 --- a/crypto/asymmetric_keys/asymmetric_type.c +++ b/crypto/asymmetric_keys/asymmetric_type.c @@ -209,6 +209,7 @@ struct key_type key_type_asymmetric = { .match = asymmetric_key_match, .destroy = asymmetric_key_destroy, .describe = asymmetric_key_describe, + .def_lookup_type = KEYRING_SEARCH_LOOKUP_ITERATE, }; EXPORT_SYMBOL_GPL(key_type_asymmetric); diff --git a/crypto/asymmetric_keys/public_key.c b/crypto/asymmetric_keys/public_key.c index cb2e29180a8..97eb001960b 100644 --- a/crypto/asymmetric_keys/public_key.c +++ b/crypto/asymmetric_keys/public_key.c @@ -22,29 +22,25 @@ MODULE_LICENSE("GPL"); -const char *const pkey_algo[PKEY_ALGO__LAST] = { +const char *const pkey_algo_name[PKEY_ALGO__LAST] = { [PKEY_ALGO_DSA] = "DSA", [PKEY_ALGO_RSA] = "RSA", }; -EXPORT_SYMBOL_GPL(pkey_algo); +EXPORT_SYMBOL_GPL(pkey_algo_name); -const char *const pkey_hash_algo[PKEY_HASH__LAST] = { - [PKEY_HASH_MD4] = "md4", - [PKEY_HASH_MD5] = "md5", - [PKEY_HASH_SHA1] = "sha1", - [PKEY_HASH_RIPE_MD_160] = "rmd160", - [PKEY_HASH_SHA256] = "sha256", - [PKEY_HASH_SHA384] = "sha384", - [PKEY_HASH_SHA512] = "sha512", - [PKEY_HASH_SHA224] = "sha224", +const struct public_key_algorithm *pkey_algo[PKEY_ALGO__LAST] = { +#if defined(CONFIG_PUBLIC_KEY_ALGO_RSA) || \ + defined(CONFIG_PUBLIC_KEY_ALGO_RSA_MODULE) + [PKEY_ALGO_RSA] = &RSA_public_key_algorithm, +#endif }; -EXPORT_SYMBOL_GPL(pkey_hash_algo); +EXPORT_SYMBOL_GPL(pkey_algo); -const char *const pkey_id_type[PKEY_ID_TYPE__LAST] = { +const char *const pkey_id_type_name[PKEY_ID_TYPE__LAST] = { [PKEY_ID_PGP] = "PGP", [PKEY_ID_X509] = "X509", }; -EXPORT_SYMBOL_GPL(pkey_id_type); +EXPORT_SYMBOL_GPL(pkey_id_type_name); /* * Provide a part of a description of the key for /proc/keys. @@ -56,7 +52,7 @@ static void public_key_describe(const struct key *asymmetric_key, if (key) seq_printf(m, "%s.%s", - pkey_id_type[key->id_type], key->algo->name); + pkey_id_type_name[key->id_type], key->algo->name); } /* @@ -78,21 +74,45 @@ EXPORT_SYMBOL_GPL(public_key_destroy); /* * Verify a signature using a public key. */ -static int public_key_verify_signature(const struct key *key, - const struct public_key_signature *sig) +int public_key_verify_signature(const struct public_key *pk, + const struct public_key_signature *sig) { - const struct public_key *pk = key->payload.data; + const struct public_key_algorithm *algo; + + BUG_ON(!pk); + BUG_ON(!pk->mpi[0]); + BUG_ON(!pk->mpi[1]); + BUG_ON(!sig); + BUG_ON(!sig->digest); + BUG_ON(!sig->mpi[0]); + + algo = pk->algo; + if (!algo) { + if (pk->pkey_algo >= PKEY_ALGO__LAST) + return -ENOPKG; + algo = pkey_algo[pk->pkey_algo]; + if (!algo) + return -ENOPKG; + } - if (!pk->algo->verify_signature) + if (!algo->verify_signature) return -ENOTSUPP; - if (sig->nr_mpi != pk->algo->n_sig_mpi) { + if (sig->nr_mpi != algo->n_sig_mpi) { pr_debug("Signature has %u MPI not %u\n", - sig->nr_mpi, pk->algo->n_sig_mpi); + sig->nr_mpi, algo->n_sig_mpi); return -EINVAL; } - return pk->algo->verify_signature(pk, sig); + return algo->verify_signature(pk, sig); +} +EXPORT_SYMBOL_GPL(public_key_verify_signature); + +static int public_key_verify_signature_2(const struct key *key, + const struct public_key_signature *sig) +{ + const struct public_key *pk = key->payload.data; + return public_key_verify_signature(pk, sig); } /* @@ -103,6 +123,6 @@ struct asymmetric_key_subtype public_key_subtype = { .name = "public_key", .describe = public_key_describe, .destroy = public_key_destroy, - .verify_signature = public_key_verify_signature, + .verify_signature = public_key_verify_signature_2, }; EXPORT_SYMBOL_GPL(public_key_subtype); diff --git a/crypto/asymmetric_keys/public_key.h b/crypto/asymmetric_keys/public_key.h index 5e5e3562689..5c37a22a063 100644 --- a/crypto/asymmetric_keys/public_key.h +++ b/crypto/asymmetric_keys/public_key.h @@ -28,3 +28,9 @@ struct public_key_algorithm { }; extern const struct public_key_algorithm RSA_public_key_algorithm; + +/* + * public_key.c + */ +extern int public_key_verify_signature(const struct public_key *pk, + const struct public_key_signature *sig); diff --git a/crypto/asymmetric_keys/rsa.c b/crypto/asymmetric_keys/rsa.c index 4a6a0696f8a..90a17f59ba2 100644 --- a/crypto/asymmetric_keys/rsa.c +++ b/crypto/asymmetric_keys/rsa.c @@ -73,13 +73,13 @@ static const struct { size_t size; } RSA_ASN1_templates[PKEY_HASH__LAST] = { #define _(X) { RSA_digest_info_##X, sizeof(RSA_digest_info_##X) } - [PKEY_HASH_MD5] = _(MD5), - [PKEY_HASH_SHA1] = _(SHA1), - [PKEY_HASH_RIPE_MD_160] = _(RIPE_MD_160), - [PKEY_HASH_SHA256] = _(SHA256), - [PKEY_HASH_SHA384] = _(SHA384), - [PKEY_HASH_SHA512] = _(SHA512), - [PKEY_HASH_SHA224] = _(SHA224), + [HASH_ALGO_MD5] = _(MD5), + [HASH_ALGO_SHA1] = _(SHA1), + [HASH_ALGO_RIPE_MD_160] = _(RIPE_MD_160), + [HASH_ALGO_SHA256] = _(SHA256), + [HASH_ALGO_SHA384] = _(SHA384), + [HASH_ALGO_SHA512] = _(SHA512), + [HASH_ALGO_SHA224] = _(SHA224), #undef _ }; diff --git a/crypto/asymmetric_keys/x509_cert_parser.c b/crypto/asymmetric_keys/x509_cert_parser.c index facbf26bc6b..29893162497 100644 --- a/crypto/asymmetric_keys/x509_cert_parser.c +++ b/crypto/asymmetric_keys/x509_cert_parser.c @@ -47,6 +47,8 @@ void x509_free_certificate(struct x509_certificate *cert) kfree(cert->subject); kfree(cert->fingerprint); kfree(cert->authority); + kfree(cert->sig.digest); + mpi_free(cert->sig.rsa.s); kfree(cert); } } @@ -152,33 +154,33 @@ int x509_note_pkey_algo(void *context, size_t hdrlen, return -ENOPKG; /* Unsupported combination */ case OID_md4WithRSAEncryption: - ctx->cert->sig_hash_algo = PKEY_HASH_MD5; - ctx->cert->sig_pkey_algo = PKEY_ALGO_RSA; + ctx->cert->sig.pkey_hash_algo = HASH_ALGO_MD5; + ctx->cert->sig.pkey_algo = PKEY_ALGO_RSA; break; case OID_sha1WithRSAEncryption: - ctx->cert->sig_hash_algo = PKEY_HASH_SHA1; - ctx->cert->sig_pkey_algo = PKEY_ALGO_RSA; + ctx->cert->sig.pkey_hash_algo = HASH_ALGO_SHA1; + ctx->cert->sig.pkey_algo = PKEY_ALGO_RSA; break; case OID_sha256WithRSAEncryption: - ctx->cert->sig_hash_algo = PKEY_HASH_SHA256; - ctx->cert->sig_pkey_algo = PKEY_ALGO_RSA; + ctx->cert->sig.pkey_hash_algo = HASH_ALGO_SHA256; + ctx->cert->sig.pkey_algo = PKEY_ALGO_RSA; break; case OID_sha384WithRSAEncryption: - ctx->cert->sig_hash_algo = PKEY_HASH_SHA384; - ctx->cert->sig_pkey_algo = PKEY_ALGO_RSA; + ctx->cert->sig.pkey_hash_algo = HASH_ALGO_SHA384; + ctx->cert->sig.pkey_algo = PKEY_ALGO_RSA; break; case OID_sha512WithRSAEncryption: - ctx->cert->sig_hash_algo = PKEY_HASH_SHA512; - ctx->cert->sig_pkey_algo = PKEY_ALGO_RSA; + ctx->cert->sig.pkey_hash_algo = HASH_ALGO_SHA512; + ctx->cert->sig.pkey_algo = PKEY_ALGO_RSA; break; case OID_sha224WithRSAEncryption: - ctx->cert->sig_hash_algo = PKEY_HASH_SHA224; - ctx->cert->sig_pkey_algo = PKEY_ALGO_RSA; + ctx->cert->sig.pkey_hash_algo = HASH_ALGO_SHA224; + ctx->cert->sig.pkey_algo = PKEY_ALGO_RSA; break; } @@ -203,8 +205,8 @@ int x509_note_signature(void *context, size_t hdrlen, return -EINVAL; } - ctx->cert->sig = value; - ctx->cert->sig_size = vlen; + ctx->cert->raw_sig = value; + ctx->cert->raw_sig_size = vlen; return 0; } @@ -343,8 +345,9 @@ int x509_extract_key_data(void *context, size_t hdrlen, if (ctx->last_oid != OID_rsaEncryption) return -ENOPKG; - /* There seems to be an extraneous 0 byte on the front of the data */ - ctx->cert->pkey_algo = PKEY_ALGO_RSA; + ctx->cert->pub->pkey_algo = PKEY_ALGO_RSA; + + /* Discard the BIT STRING metadata */ ctx->key = value + 1; ctx->key_size = vlen - 1; return 0; diff --git a/crypto/asymmetric_keys/x509_parser.h b/crypto/asymmetric_keys/x509_parser.h index f86dc5fcc4a..87d9cc26f63 100644 --- a/crypto/asymmetric_keys/x509_parser.h +++ b/crypto/asymmetric_keys/x509_parser.h @@ -9,6 +9,7 @@ * 2 of the Licence, or (at your option) any later version. */ +#include <linux/time.h> #include <crypto/public_key.h> struct x509_certificate { @@ -20,13 +21,11 @@ struct x509_certificate { char *authority; /* Authority key fingerprint as hex */ struct tm valid_from; struct tm valid_to; - enum pkey_algo pkey_algo : 8; /* Public key algorithm */ - enum pkey_algo sig_pkey_algo : 8; /* Signature public key algorithm */ - enum pkey_hash_algo sig_hash_algo : 8; /* Signature hash algorithm */ const void *tbs; /* Signed data */ - size_t tbs_size; /* Size of signed data */ - const void *sig; /* Signature data */ - size_t sig_size; /* Size of sigature */ + unsigned tbs_size; /* Size of signed data */ + unsigned raw_sig_size; /* Size of sigature */ + const void *raw_sig; /* Signature data */ + struct public_key_signature sig; /* Signature parameters */ }; /* @@ -34,3 +33,10 @@ struct x509_certificate { */ extern void x509_free_certificate(struct x509_certificate *cert); extern struct x509_certificate *x509_cert_parse(const void *data, size_t datalen); + +/* + * x509_public_key.c + */ +extern int x509_get_sig_params(struct x509_certificate *cert); +extern int x509_check_signature(const struct public_key *pub, + struct x509_certificate *cert); diff --git a/crypto/asymmetric_keys/x509_public_key.c b/crypto/asymmetric_keys/x509_public_key.c index 06007f0e880..f83300b6e8c 100644 --- a/crypto/asymmetric_keys/x509_public_key.c +++ b/crypto/asymmetric_keys/x509_public_key.c @@ -18,85 +18,162 @@ #include <linux/asn1_decoder.h> #include <keys/asymmetric-subtype.h> #include <keys/asymmetric-parser.h> +#include <keys/system_keyring.h> #include <crypto/hash.h> #include "asymmetric_keys.h" #include "public_key.h" #include "x509_parser.h" -static const -struct public_key_algorithm *x509_public_key_algorithms[PKEY_ALGO__LAST] = { - [PKEY_ALGO_DSA] = NULL, -#if defined(CONFIG_PUBLIC_KEY_ALGO_RSA) || \ - defined(CONFIG_PUBLIC_KEY_ALGO_RSA_MODULE) - [PKEY_ALGO_RSA] = &RSA_public_key_algorithm, -#endif -}; +/* + * Find a key in the given keyring by issuer and authority. + */ +static struct key *x509_request_asymmetric_key( + struct key *keyring, + const char *signer, size_t signer_len, + const char *authority, size_t auth_len) +{ + key_ref_t key; + char *id; + + /* Construct an identifier. */ + id = kmalloc(signer_len + 2 + auth_len + 1, GFP_KERNEL); + if (!id) + return ERR_PTR(-ENOMEM); + + memcpy(id, signer, signer_len); + id[signer_len + 0] = ':'; + id[signer_len + 1] = ' '; + memcpy(id + signer_len + 2, authority, auth_len); + id[signer_len + 2 + auth_len] = 0; + + pr_debug("Look up: \"%s\"\n", id); + + key = keyring_search(make_key_ref(keyring, 1), + &key_type_asymmetric, id); + if (IS_ERR(key)) + pr_debug("Request for module key '%s' err %ld\n", + id, PTR_ERR(key)); + kfree(id); + + if (IS_ERR(key)) { + switch (PTR_ERR(key)) { + /* Hide some search errors */ + case -EACCES: + case -ENOTDIR: + case -EAGAIN: + return ERR_PTR(-ENOKEY); + default: + return ERR_CAST(key); + } + } + + pr_devel("<==%s() = 0 [%x]\n", __func__, key_serial(key_ref_to_ptr(key))); + return key_ref_to_ptr(key); +} /* - * Check the signature on a certificate using the provided public key + * Set up the signature parameters in an X.509 certificate. This involves + * digesting the signed data and extracting the signature. */ -static int x509_check_signature(const struct public_key *pub, - const struct x509_certificate *cert) +int x509_get_sig_params(struct x509_certificate *cert) { - struct public_key_signature *sig; struct crypto_shash *tfm; struct shash_desc *desc; size_t digest_size, desc_size; + void *digest; int ret; pr_devel("==>%s()\n", __func__); - + + if (cert->sig.rsa.s) + return 0; + + cert->sig.rsa.s = mpi_read_raw_data(cert->raw_sig, cert->raw_sig_size); + if (!cert->sig.rsa.s) + return -ENOMEM; + cert->sig.nr_mpi = 1; + /* Allocate the hashing algorithm we're going to need and find out how * big the hash operational data will be. */ - tfm = crypto_alloc_shash(pkey_hash_algo[cert->sig_hash_algo], 0, 0); + tfm = crypto_alloc_shash(hash_algo_name[cert->sig.pkey_hash_algo], 0, 0); if (IS_ERR(tfm)) return (PTR_ERR(tfm) == -ENOENT) ? -ENOPKG : PTR_ERR(tfm); desc_size = crypto_shash_descsize(tfm) + sizeof(*desc); digest_size = crypto_shash_digestsize(tfm); - /* We allocate the hash operational data storage on the end of our - * context data. + /* We allocate the hash operational data storage on the end of the + * digest storage space. */ ret = -ENOMEM; - sig = kzalloc(sizeof(*sig) + desc_size + digest_size, GFP_KERNEL); - if (!sig) - goto error_no_sig; + digest = kzalloc(digest_size + desc_size, GFP_KERNEL); + if (!digest) + goto error; - sig->pkey_hash_algo = cert->sig_hash_algo; - sig->digest = (u8 *)sig + sizeof(*sig) + desc_size; - sig->digest_size = digest_size; + cert->sig.digest = digest; + cert->sig.digest_size = digest_size; - desc = (void *)sig + sizeof(*sig); - desc->tfm = tfm; - desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP; + desc = digest + digest_size; + desc->tfm = tfm; + desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP; ret = crypto_shash_init(desc); if (ret < 0) goto error; + might_sleep(); + ret = crypto_shash_finup(desc, cert->tbs, cert->tbs_size, digest); +error: + crypto_free_shash(tfm); + pr_devel("<==%s() = %d\n", __func__, ret); + return ret; +} +EXPORT_SYMBOL_GPL(x509_get_sig_params); - ret = -ENOMEM; - sig->rsa.s = mpi_read_raw_data(cert->sig, cert->sig_size); - if (!sig->rsa.s) - goto error; +/* + * Check the signature on a certificate using the provided public key + */ +int x509_check_signature(const struct public_key *pub, + struct x509_certificate *cert) +{ + int ret; - ret = crypto_shash_finup(desc, cert->tbs, cert->tbs_size, sig->digest); - if (ret < 0) - goto error_mpi; + pr_devel("==>%s()\n", __func__); - ret = pub->algo->verify_signature(pub, sig); + ret = x509_get_sig_params(cert); + if (ret < 0) + return ret; + ret = public_key_verify_signature(pub, &cert->sig); pr_debug("Cert Verification: %d\n", ret); + return ret; +} +EXPORT_SYMBOL_GPL(x509_check_signature); -error_mpi: - mpi_free(sig->rsa.s); -error: - kfree(sig); -error_no_sig: - crypto_free_shash(tfm); +/* + * Check the new certificate against the ones in the trust keyring. If one of + * those is the signing key and validates the new certificate, then mark the + * new certificate as being trusted. + * + * Return 0 if the new certificate was successfully validated, 1 if we couldn't + * find a matching parent certificate in the trusted list and an error if there + * is a matching certificate but the signature check fails. + */ +static int x509_validate_trust(struct x509_certificate *cert, + struct key *trust_keyring) +{ + const struct public_key *pk; + struct key *key; + int ret = 1; - pr_devel("<==%s() = %d\n", __func__, ret); + key = x509_request_asymmetric_key(trust_keyring, + cert->issuer, strlen(cert->issuer), + cert->authority, + strlen(cert->authority)); + if (!IS_ERR(key)) { + pk = key->payload.data; + ret = x509_check_signature(pk, cert); + } return ret; } @@ -106,7 +183,6 @@ error_no_sig: static int x509_key_preparse(struct key_preparsed_payload *prep) { struct x509_certificate *cert; - struct tm now; size_t srlen, sulen; char *desc = NULL; int ret; @@ -117,7 +193,18 @@ static int x509_key_preparse(struct key_preparsed_payload *prep) pr_devel("Cert Issuer: %s\n", cert->issuer); pr_devel("Cert Subject: %s\n", cert->subject); - pr_devel("Cert Key Algo: %s\n", pkey_algo[cert->pkey_algo]); + + if (cert->pub->pkey_algo >= PKEY_ALGO__LAST || + cert->sig.pkey_algo >= PKEY_ALGO__LAST || + cert->sig.pkey_hash_algo >= PKEY_HASH__LAST || + !pkey_algo[cert->pub->pkey_algo] || + !pkey_algo[cert->sig.pkey_algo] || + !hash_algo_name[cert->sig.pkey_hash_algo]) { + ret = -ENOPKG; + goto error_free_cert; + } + + pr_devel("Cert Key Algo: %s\n", pkey_algo_name[cert->pub->pkey_algo]); pr_devel("Cert Valid From: %04ld-%02d-%02d %02d:%02d:%02d\n", cert->valid_from.tm_year + 1900, cert->valid_from.tm_mon + 1, cert->valid_from.tm_mday, cert->valid_from.tm_hour, @@ -127,61 +214,29 @@ static int x509_key_preparse(struct key_preparsed_payload *prep) cert->valid_to.tm_mday, cert->valid_to.tm_hour, cert->valid_to.tm_min, cert->valid_to.tm_sec); pr_devel("Cert Signature: %s + %s\n", - pkey_algo[cert->sig_pkey_algo], - pkey_hash_algo[cert->sig_hash_algo]); + pkey_algo_name[cert->sig.pkey_algo], + hash_algo_name[cert->sig.pkey_hash_algo]); - if (!cert->fingerprint || !cert->authority) { - pr_warn("Cert for '%s' must have SubjKeyId and AuthKeyId extensions\n", + if (!cert->fingerprint) { + pr_warn("Cert for '%s' must have a SubjKeyId extension\n", cert->subject); ret = -EKEYREJECTED; goto error_free_cert; } - time_to_tm(CURRENT_TIME.tv_sec, 0, &now); - pr_devel("Now: %04ld-%02d-%02d %02d:%02d:%02d\n", - now.tm_year + 1900, now.tm_mon + 1, now.tm_mday, - now.tm_hour, now.tm_min, now.tm_sec); - if (now.tm_year < cert->valid_from.tm_year || - (now.tm_year == cert->valid_from.tm_year && - (now.tm_mon < cert->valid_from.tm_mon || - (now.tm_mon == cert->valid_from.tm_mon && - (now.tm_mday < cert->valid_from.tm_mday || - (now.tm_mday == cert->valid_from.tm_mday && - (now.tm_hour < cert->valid_from.tm_hour || - (now.tm_hour == cert->valid_from.tm_hour && - (now.tm_min < cert->valid_from.tm_min || - (now.tm_min == cert->valid_from.tm_min && - (now.tm_sec < cert->valid_from.tm_sec - ))))))))))) { - pr_warn("Cert %s is not yet valid\n", cert->fingerprint); - ret = -EKEYREJECTED; - goto error_free_cert; - } - if (now.tm_year > cert->valid_to.tm_year || - (now.tm_year == cert->valid_to.tm_year && - (now.tm_mon > cert->valid_to.tm_mon || - (now.tm_mon == cert->valid_to.tm_mon && - (now.tm_mday > cert->valid_to.tm_mday || - (now.tm_mday == cert->valid_to.tm_mday && - (now.tm_hour > cert->valid_to.tm_hour || - (now.tm_hour == cert->valid_to.tm_hour && - (now.tm_min > cert->valid_to.tm_min || - (now.tm_min == cert->valid_to.tm_min && - (now.tm_sec > cert->valid_to.tm_sec - ))))))))))) { - pr_warn("Cert %s has expired\n", cert->fingerprint); - ret = -EKEYEXPIRED; - goto error_free_cert; - } - - cert->pub->algo = x509_public_key_algorithms[cert->pkey_algo]; + cert->pub->algo = pkey_algo[cert->pub->pkey_algo]; cert->pub->id_type = PKEY_ID_X509; - /* Check the signature on the key */ - if (strcmp(cert->fingerprint, cert->authority) == 0) { - ret = x509_check_signature(cert->pub, cert); + /* Check the signature on the key if it appears to be self-signed */ + if (!cert->authority || + strcmp(cert->fingerprint, cert->authority) == 0) { + ret = x509_check_signature(cert->pub, cert); /* self-signed */ if (ret < 0) goto error_free_cert; + } else { + ret = x509_validate_trust(cert, system_trusted_keyring); + if (!ret) + prep->trusted = 1; } /* Propose a description */ @@ -237,3 +292,6 @@ static void __exit x509_key_exit(void) module_init(x509_key_init); module_exit(x509_key_exit); + +MODULE_DESCRIPTION("X.509 certificate parser"); +MODULE_LICENSE("GPL"); diff --git a/crypto/async_tx/async_memcpy.c b/crypto/async_tx/async_memcpy.c index 9e62feffb37..f8c0b8dbeb7 100644 --- a/crypto/async_tx/async_memcpy.c +++ b/crypto/async_tx/async_memcpy.c @@ -50,33 +50,36 @@ async_memcpy(struct page *dest, struct page *src, unsigned int dest_offset, &dest, 1, &src, 1, len); struct dma_device *device = chan ? chan->device : NULL; struct dma_async_tx_descriptor *tx = NULL; + struct dmaengine_unmap_data *unmap = NULL; - if (device && is_dma_copy_aligned(device, src_offset, dest_offset, len)) { - dma_addr_t dma_dest, dma_src; + if (device) + unmap = dmaengine_get_unmap_data(device->dev, 2, GFP_NOIO); + + if (unmap && is_dma_copy_aligned(device, src_offset, dest_offset, len)) { unsigned long dma_prep_flags = 0; if (submit->cb_fn) dma_prep_flags |= DMA_PREP_INTERRUPT; if (submit->flags & ASYNC_TX_FENCE) dma_prep_flags |= DMA_PREP_FENCE; - dma_dest = dma_map_page(device->dev, dest, dest_offset, len, - DMA_FROM_DEVICE); - - dma_src = dma_map_page(device->dev, src, src_offset, len, - DMA_TO_DEVICE); - - tx = device->device_prep_dma_memcpy(chan, dma_dest, dma_src, - len, dma_prep_flags); - if (!tx) { - dma_unmap_page(device->dev, dma_dest, len, - DMA_FROM_DEVICE); - dma_unmap_page(device->dev, dma_src, len, - DMA_TO_DEVICE); - } + + unmap->to_cnt = 1; + unmap->addr[0] = dma_map_page(device->dev, src, src_offset, len, + DMA_TO_DEVICE); + unmap->from_cnt = 1; + unmap->addr[1] = dma_map_page(device->dev, dest, dest_offset, len, + DMA_FROM_DEVICE); + unmap->len = len; + + tx = device->device_prep_dma_memcpy(chan, unmap->addr[1], + unmap->addr[0], len, + dma_prep_flags); } if (tx) { pr_debug("%s: (async) len: %zu\n", __func__, len); + + dma_set_unmap(tx, unmap); async_tx_submit(chan, tx, submit); } else { void *dest_buf, *src_buf; @@ -96,6 +99,8 @@ async_memcpy(struct page *dest, struct page *src, unsigned int dest_offset, async_tx_sync_epilog(submit); } + dmaengine_unmap_put(unmap); + return tx; } EXPORT_SYMBOL_GPL(async_memcpy); diff --git a/crypto/async_tx/async_pq.c b/crypto/async_tx/async_pq.c index 91d5d385899..d05327caf69 100644 --- a/crypto/async_tx/async_pq.c +++ b/crypto/async_tx/async_pq.c @@ -46,49 +46,24 @@ static struct page *pq_scribble_page; * do_async_gen_syndrome - asynchronously calculate P and/or Q */ static __async_inline struct dma_async_tx_descriptor * -do_async_gen_syndrome(struct dma_chan *chan, struct page **blocks, - const unsigned char *scfs, unsigned int offset, int disks, - size_t len, dma_addr_t *dma_src, +do_async_gen_syndrome(struct dma_chan *chan, + const unsigned char *scfs, int disks, + struct dmaengine_unmap_data *unmap, + enum dma_ctrl_flags dma_flags, struct async_submit_ctl *submit) { struct dma_async_tx_descriptor *tx = NULL; struct dma_device *dma = chan->device; - enum dma_ctrl_flags dma_flags = 0; enum async_tx_flags flags_orig = submit->flags; dma_async_tx_callback cb_fn_orig = submit->cb_fn; dma_async_tx_callback cb_param_orig = submit->cb_param; int src_cnt = disks - 2; - unsigned char coefs[src_cnt]; unsigned short pq_src_cnt; dma_addr_t dma_dest[2]; int src_off = 0; - int idx; - int i; - /* DMAs use destinations as sources, so use BIDIRECTIONAL mapping */ - if (P(blocks, disks)) - dma_dest[0] = dma_map_page(dma->dev, P(blocks, disks), offset, - len, DMA_BIDIRECTIONAL); - else - dma_flags |= DMA_PREP_PQ_DISABLE_P; - if (Q(blocks, disks)) - dma_dest[1] = dma_map_page(dma->dev, Q(blocks, disks), offset, - len, DMA_BIDIRECTIONAL); - else - dma_flags |= DMA_PREP_PQ_DISABLE_Q; - - /* convert source addresses being careful to collapse 'empty' - * sources and update the coefficients accordingly - */ - for (i = 0, idx = 0; i < src_cnt; i++) { - if (blocks[i] == NULL) - continue; - dma_src[idx] = dma_map_page(dma->dev, blocks[i], offset, len, - DMA_TO_DEVICE); - coefs[idx] = scfs[i]; - idx++; - } - src_cnt = idx; + if (submit->flags & ASYNC_TX_FENCE) + dma_flags |= DMA_PREP_FENCE; while (src_cnt > 0) { submit->flags = flags_orig; @@ -100,28 +75,25 @@ do_async_gen_syndrome(struct dma_chan *chan, struct page **blocks, if (src_cnt > pq_src_cnt) { submit->flags &= ~ASYNC_TX_ACK; submit->flags |= ASYNC_TX_FENCE; - dma_flags |= DMA_COMPL_SKIP_DEST_UNMAP; submit->cb_fn = NULL; submit->cb_param = NULL; } else { - dma_flags &= ~DMA_COMPL_SKIP_DEST_UNMAP; submit->cb_fn = cb_fn_orig; submit->cb_param = cb_param_orig; if (cb_fn_orig) dma_flags |= DMA_PREP_INTERRUPT; } - if (submit->flags & ASYNC_TX_FENCE) - dma_flags |= DMA_PREP_FENCE; - /* Since we have clobbered the src_list we are committed - * to doing this asynchronously. Drivers force forward - * progress in case they can not provide a descriptor + /* Drivers force forward progress in case they can not provide + * a descriptor */ for (;;) { + dma_dest[0] = unmap->addr[disks - 2]; + dma_dest[1] = unmap->addr[disks - 1]; tx = dma->device_prep_dma_pq(chan, dma_dest, - &dma_src[src_off], + &unmap->addr[src_off], pq_src_cnt, - &coefs[src_off], len, + &scfs[src_off], unmap->len, dma_flags); if (likely(tx)) break; @@ -129,6 +101,7 @@ do_async_gen_syndrome(struct dma_chan *chan, struct page **blocks, dma_async_issue_pending(chan); } + dma_set_unmap(tx, unmap); async_tx_submit(chan, tx, submit); submit->depend_tx = tx; @@ -188,10 +161,6 @@ do_sync_gen_syndrome(struct page **blocks, unsigned int offset, int disks, * set to NULL those buffers will be replaced with the raid6_zero_page * in the synchronous path and omitted in the hardware-asynchronous * path. - * - * 'blocks' note: if submit->scribble is NULL then the contents of - * 'blocks' may be overwritten to perform address conversions - * (dma_map_page() or page_address()). */ struct dma_async_tx_descriptor * async_gen_syndrome(struct page **blocks, unsigned int offset, int disks, @@ -202,26 +171,69 @@ async_gen_syndrome(struct page **blocks, unsigned int offset, int disks, &P(blocks, disks), 2, blocks, src_cnt, len); struct dma_device *device = chan ? chan->device : NULL; - dma_addr_t *dma_src = NULL; + struct dmaengine_unmap_data *unmap = NULL; BUG_ON(disks > 255 || !(P(blocks, disks) || Q(blocks, disks))); - if (submit->scribble) - dma_src = submit->scribble; - else if (sizeof(dma_addr_t) <= sizeof(struct page *)) - dma_src = (dma_addr_t *) blocks; + if (device) + unmap = dmaengine_get_unmap_data(device->dev, disks, GFP_NOIO); - if (dma_src && device && + if (unmap && (src_cnt <= dma_maxpq(device, 0) || dma_maxpq(device, DMA_PREP_CONTINUE) > 0) && is_dma_pq_aligned(device, offset, 0, len)) { + struct dma_async_tx_descriptor *tx; + enum dma_ctrl_flags dma_flags = 0; + unsigned char coefs[src_cnt]; + int i, j; + /* run the p+q asynchronously */ pr_debug("%s: (async) disks: %d len: %zu\n", __func__, disks, len); - return do_async_gen_syndrome(chan, blocks, raid6_gfexp, offset, - disks, len, dma_src, submit); + + /* convert source addresses being careful to collapse 'empty' + * sources and update the coefficients accordingly + */ + unmap->len = len; + for (i = 0, j = 0; i < src_cnt; i++) { + if (blocks[i] == NULL) + continue; + unmap->addr[j] = dma_map_page(device->dev, blocks[i], offset, + len, DMA_TO_DEVICE); + coefs[j] = raid6_gfexp[i]; + unmap->to_cnt++; + j++; + } + + /* + * DMAs use destinations as sources, + * so use BIDIRECTIONAL mapping + */ + unmap->bidi_cnt++; + if (P(blocks, disks)) + unmap->addr[j++] = dma_map_page(device->dev, P(blocks, disks), + offset, len, DMA_BIDIRECTIONAL); + else { + unmap->addr[j++] = 0; + dma_flags |= DMA_PREP_PQ_DISABLE_P; + } + + unmap->bidi_cnt++; + if (Q(blocks, disks)) + unmap->addr[j++] = dma_map_page(device->dev, Q(blocks, disks), + offset, len, DMA_BIDIRECTIONAL); + else { + unmap->addr[j++] = 0; + dma_flags |= DMA_PREP_PQ_DISABLE_Q; + } + + tx = do_async_gen_syndrome(chan, coefs, j, unmap, dma_flags, submit); + dmaengine_unmap_put(unmap); + return tx; } + dmaengine_unmap_put(unmap); + /* run the pq synchronously */ pr_debug("%s: (sync) disks: %d len: %zu\n", __func__, disks, len); @@ -277,50 +289,60 @@ async_syndrome_val(struct page **blocks, unsigned int offset, int disks, struct dma_async_tx_descriptor *tx; unsigned char coefs[disks-2]; enum dma_ctrl_flags dma_flags = submit->cb_fn ? DMA_PREP_INTERRUPT : 0; - dma_addr_t *dma_src = NULL; - int src_cnt = 0; + struct dmaengine_unmap_data *unmap = NULL; BUG_ON(disks < 4); - if (submit->scribble) - dma_src = submit->scribble; - else if (sizeof(dma_addr_t) <= sizeof(struct page *)) - dma_src = (dma_addr_t *) blocks; + if (device) + unmap = dmaengine_get_unmap_data(device->dev, disks, GFP_NOIO); - if (dma_src && device && disks <= dma_maxpq(device, 0) && + if (unmap && disks <= dma_maxpq(device, 0) && is_dma_pq_aligned(device, offset, 0, len)) { struct device *dev = device->dev; - dma_addr_t *pq = &dma_src[disks-2]; - int i; + dma_addr_t pq[2]; + int i, j = 0, src_cnt = 0; pr_debug("%s: (async) disks: %d len: %zu\n", __func__, disks, len); - if (!P(blocks, disks)) + + unmap->len = len; + for (i = 0; i < disks-2; i++) + if (likely(blocks[i])) { + unmap->addr[j] = dma_map_page(dev, blocks[i], + offset, len, + DMA_TO_DEVICE); + coefs[j] = raid6_gfexp[i]; + unmap->to_cnt++; + src_cnt++; + j++; + } + + if (!P(blocks, disks)) { + pq[0] = 0; dma_flags |= DMA_PREP_PQ_DISABLE_P; - else + } else { pq[0] = dma_map_page(dev, P(blocks, disks), offset, len, DMA_TO_DEVICE); - if (!Q(blocks, disks)) + unmap->addr[j++] = pq[0]; + unmap->to_cnt++; + } + if (!Q(blocks, disks)) { + pq[1] = 0; dma_flags |= DMA_PREP_PQ_DISABLE_Q; - else + } else { pq[1] = dma_map_page(dev, Q(blocks, disks), offset, len, DMA_TO_DEVICE); + unmap->addr[j++] = pq[1]; + unmap->to_cnt++; + } if (submit->flags & ASYNC_TX_FENCE) dma_flags |= DMA_PREP_FENCE; - for (i = 0; i < disks-2; i++) - if (likely(blocks[i])) { - dma_src[src_cnt] = dma_map_page(dev, blocks[i], - offset, len, - DMA_TO_DEVICE); - coefs[src_cnt] = raid6_gfexp[i]; - src_cnt++; - } - for (;;) { - tx = device->device_prep_dma_pq_val(chan, pq, dma_src, + tx = device->device_prep_dma_pq_val(chan, pq, + unmap->addr, src_cnt, coefs, len, pqres, @@ -330,6 +352,8 @@ async_syndrome_val(struct page **blocks, unsigned int offset, int disks, async_tx_quiesce(&submit->depend_tx); dma_async_issue_pending(chan); } + + dma_set_unmap(tx, unmap); async_tx_submit(chan, tx, submit); return tx; diff --git a/crypto/async_tx/async_raid6_recov.c b/crypto/async_tx/async_raid6_recov.c index a9f08a6a582..934a8498149 100644 --- a/crypto/async_tx/async_raid6_recov.c +++ b/crypto/async_tx/async_raid6_recov.c @@ -26,6 +26,7 @@ #include <linux/dma-mapping.h> #include <linux/raid/pq.h> #include <linux/async_tx.h> +#include <linux/dmaengine.h> static struct dma_async_tx_descriptor * async_sum_product(struct page *dest, struct page **srcs, unsigned char *coef, @@ -34,35 +35,45 @@ async_sum_product(struct page *dest, struct page **srcs, unsigned char *coef, struct dma_chan *chan = async_tx_find_channel(submit, DMA_PQ, &dest, 1, srcs, 2, len); struct dma_device *dma = chan ? chan->device : NULL; + struct dmaengine_unmap_data *unmap = NULL; const u8 *amul, *bmul; u8 ax, bx; u8 *a, *b, *c; - if (dma) { - dma_addr_t dma_dest[2]; - dma_addr_t dma_src[2]; + if (dma) + unmap = dmaengine_get_unmap_data(dma->dev, 3, GFP_NOIO); + + if (unmap) { struct device *dev = dma->dev; + dma_addr_t pq[2]; struct dma_async_tx_descriptor *tx; enum dma_ctrl_flags dma_flags = DMA_PREP_PQ_DISABLE_P; if (submit->flags & ASYNC_TX_FENCE) dma_flags |= DMA_PREP_FENCE; - dma_dest[1] = dma_map_page(dev, dest, 0, len, DMA_BIDIRECTIONAL); - dma_src[0] = dma_map_page(dev, srcs[0], 0, len, DMA_TO_DEVICE); - dma_src[1] = dma_map_page(dev, srcs[1], 0, len, DMA_TO_DEVICE); - tx = dma->device_prep_dma_pq(chan, dma_dest, dma_src, 2, coef, + unmap->addr[0] = dma_map_page(dev, srcs[0], 0, len, DMA_TO_DEVICE); + unmap->addr[1] = dma_map_page(dev, srcs[1], 0, len, DMA_TO_DEVICE); + unmap->to_cnt = 2; + + unmap->addr[2] = dma_map_page(dev, dest, 0, len, DMA_BIDIRECTIONAL); + unmap->bidi_cnt = 1; + /* engine only looks at Q, but expects it to follow P */ + pq[1] = unmap->addr[2]; + + unmap->len = len; + tx = dma->device_prep_dma_pq(chan, pq, unmap->addr, 2, coef, len, dma_flags); if (tx) { + dma_set_unmap(tx, unmap); async_tx_submit(chan, tx, submit); + dmaengine_unmap_put(unmap); return tx; } /* could not get a descriptor, unmap and fall through to * the synchronous path */ - dma_unmap_page(dev, dma_dest[1], len, DMA_BIDIRECTIONAL); - dma_unmap_page(dev, dma_src[0], len, DMA_TO_DEVICE); - dma_unmap_page(dev, dma_src[1], len, DMA_TO_DEVICE); + dmaengine_unmap_put(unmap); } /* run the operation synchronously */ @@ -89,23 +100,38 @@ async_mult(struct page *dest, struct page *src, u8 coef, size_t len, struct dma_chan *chan = async_tx_find_channel(submit, DMA_PQ, &dest, 1, &src, 1, len); struct dma_device *dma = chan ? chan->device : NULL; + struct dmaengine_unmap_data *unmap = NULL; const u8 *qmul; /* Q multiplier table */ u8 *d, *s; - if (dma) { + if (dma) + unmap = dmaengine_get_unmap_data(dma->dev, 3, GFP_NOIO); + + if (unmap) { dma_addr_t dma_dest[2]; - dma_addr_t dma_src[1]; struct device *dev = dma->dev; struct dma_async_tx_descriptor *tx; enum dma_ctrl_flags dma_flags = DMA_PREP_PQ_DISABLE_P; if (submit->flags & ASYNC_TX_FENCE) dma_flags |= DMA_PREP_FENCE; - dma_dest[1] = dma_map_page(dev, dest, 0, len, DMA_BIDIRECTIONAL); - dma_src[0] = dma_map_page(dev, src, 0, len, DMA_TO_DEVICE); - tx = dma->device_prep_dma_pq(chan, dma_dest, dma_src, 1, &coef, - len, dma_flags); + unmap->addr[0] = dma_map_page(dev, src, 0, len, DMA_TO_DEVICE); + unmap->to_cnt++; + unmap->addr[1] = dma_map_page(dev, dest, 0, len, DMA_BIDIRECTIONAL); + dma_dest[1] = unmap->addr[1]; + unmap->bidi_cnt++; + unmap->len = len; + + /* this looks funny, but the engine looks for Q at + * dma_dest[1] and ignores dma_dest[0] as a dest + * due to DMA_PREP_PQ_DISABLE_P + */ + tx = dma->device_prep_dma_pq(chan, dma_dest, unmap->addr, + 1, &coef, len, dma_flags); + if (tx) { + dma_set_unmap(tx, unmap); + dmaengine_unmap_put(unmap); async_tx_submit(chan, tx, submit); return tx; } @@ -113,8 +139,7 @@ async_mult(struct page *dest, struct page *src, u8 coef, size_t len, /* could not get a descriptor, unmap and fall through to * the synchronous path */ - dma_unmap_page(dev, dma_dest[1], len, DMA_BIDIRECTIONAL); - dma_unmap_page(dev, dma_src[0], len, DMA_TO_DEVICE); + dmaengine_unmap_put(unmap); } /* no channel available, or failed to allocate a descriptor, so diff --git a/crypto/async_tx/async_tx.c b/crypto/async_tx/async_tx.c index 7be34248b45..39ea4791a3c 100644 --- a/crypto/async_tx/async_tx.c +++ b/crypto/async_tx/async_tx.c @@ -128,7 +128,7 @@ async_tx_channel_switch(struct dma_async_tx_descriptor *depend_tx, } device->device_issue_pending(chan); } else { - if (dma_wait_for_async_tx(depend_tx) != DMA_SUCCESS) + if (dma_wait_for_async_tx(depend_tx) != DMA_COMPLETE) panic("%s: DMA error waiting for depend_tx\n", __func__); tx->tx_submit(tx); @@ -280,7 +280,7 @@ void async_tx_quiesce(struct dma_async_tx_descriptor **tx) * we are referring to the correct operation */ BUG_ON(async_tx_test_ack(*tx)); - if (dma_wait_for_async_tx(*tx) != DMA_SUCCESS) + if (dma_wait_for_async_tx(*tx) != DMA_COMPLETE) panic("%s: DMA error waiting for transaction\n", __func__); async_tx_ack(*tx); diff --git a/crypto/async_tx/async_xor.c b/crypto/async_tx/async_xor.c index 8ade0a0481c..3c562f5a60b 100644 --- a/crypto/async_tx/async_xor.c +++ b/crypto/async_tx/async_xor.c @@ -33,48 +33,31 @@ /* do_async_xor - dma map the pages and perform the xor with an engine */ static __async_inline struct dma_async_tx_descriptor * -do_async_xor(struct dma_chan *chan, struct page *dest, struct page **src_list, - unsigned int offset, int src_cnt, size_t len, dma_addr_t *dma_src, +do_async_xor(struct dma_chan *chan, struct dmaengine_unmap_data *unmap, struct async_submit_ctl *submit) { struct dma_device *dma = chan->device; struct dma_async_tx_descriptor *tx = NULL; - int src_off = 0; - int i; dma_async_tx_callback cb_fn_orig = submit->cb_fn; void *cb_param_orig = submit->cb_param; enum async_tx_flags flags_orig = submit->flags; - enum dma_ctrl_flags dma_flags; - int xor_src_cnt = 0; - dma_addr_t dma_dest; - - /* map the dest bidrectional in case it is re-used as a source */ - dma_dest = dma_map_page(dma->dev, dest, offset, len, DMA_BIDIRECTIONAL); - for (i = 0; i < src_cnt; i++) { - /* only map the dest once */ - if (!src_list[i]) - continue; - if (unlikely(src_list[i] == dest)) { - dma_src[xor_src_cnt++] = dma_dest; - continue; - } - dma_src[xor_src_cnt++] = dma_map_page(dma->dev, src_list[i], offset, - len, DMA_TO_DEVICE); - } - src_cnt = xor_src_cnt; + enum dma_ctrl_flags dma_flags = 0; + int src_cnt = unmap->to_cnt; + int xor_src_cnt; + dma_addr_t dma_dest = unmap->addr[unmap->to_cnt]; + dma_addr_t *src_list = unmap->addr; while (src_cnt) { + dma_addr_t tmp; + submit->flags = flags_orig; - dma_flags = 0; xor_src_cnt = min(src_cnt, (int)dma->max_xor); - /* if we are submitting additional xors, leave the chain open, - * clear the callback parameters, and leave the destination - * buffer mapped + /* if we are submitting additional xors, leave the chain open + * and clear the callback parameters */ if (src_cnt > xor_src_cnt) { submit->flags &= ~ASYNC_TX_ACK; submit->flags |= ASYNC_TX_FENCE; - dma_flags = DMA_COMPL_SKIP_DEST_UNMAP; submit->cb_fn = NULL; submit->cb_param = NULL; } else { @@ -85,12 +68,18 @@ do_async_xor(struct dma_chan *chan, struct page *dest, struct page **src_list, dma_flags |= DMA_PREP_INTERRUPT; if (submit->flags & ASYNC_TX_FENCE) dma_flags |= DMA_PREP_FENCE; - /* Since we have clobbered the src_list we are committed - * to doing this asynchronously. Drivers force forward progress - * in case they can not provide a descriptor + + /* Drivers force forward progress in case they can not provide a + * descriptor */ - tx = dma->device_prep_dma_xor(chan, dma_dest, &dma_src[src_off], - xor_src_cnt, len, dma_flags); + tmp = src_list[0]; + if (src_list > unmap->addr) + src_list[0] = dma_dest; + tx = dma->device_prep_dma_xor(chan, dma_dest, src_list, + xor_src_cnt, unmap->len, + dma_flags); + src_list[0] = tmp; + if (unlikely(!tx)) async_tx_quiesce(&submit->depend_tx); @@ -99,22 +88,21 @@ do_async_xor(struct dma_chan *chan, struct page *dest, struct page **src_list, while (unlikely(!tx)) { dma_async_issue_pending(chan); tx = dma->device_prep_dma_xor(chan, dma_dest, - &dma_src[src_off], - xor_src_cnt, len, + src_list, + xor_src_cnt, unmap->len, dma_flags); } + dma_set_unmap(tx, unmap); async_tx_submit(chan, tx, submit); submit->depend_tx = tx; if (src_cnt > xor_src_cnt) { /* drop completed sources */ src_cnt -= xor_src_cnt; - src_off += xor_src_cnt; - /* use the intermediate result a source */ - dma_src[--src_off] = dma_dest; src_cnt++; + src_list += xor_src_cnt - 1; } else break; } @@ -189,22 +177,40 @@ async_xor(struct page *dest, struct page **src_list, unsigned int offset, struct dma_chan *chan = async_tx_find_channel(submit, DMA_XOR, &dest, 1, src_list, src_cnt, len); - dma_addr_t *dma_src = NULL; + struct dma_device *device = chan ? chan->device : NULL; + struct dmaengine_unmap_data *unmap = NULL; BUG_ON(src_cnt <= 1); - if (submit->scribble) - dma_src = submit->scribble; - else if (sizeof(dma_addr_t) <= sizeof(struct page *)) - dma_src = (dma_addr_t *) src_list; + if (device) + unmap = dmaengine_get_unmap_data(device->dev, src_cnt+1, GFP_NOIO); + + if (unmap && is_dma_xor_aligned(device, offset, 0, len)) { + struct dma_async_tx_descriptor *tx; + int i, j; - if (dma_src && chan && is_dma_xor_aligned(chan->device, offset, 0, len)) { /* run the xor asynchronously */ pr_debug("%s (async): len: %zu\n", __func__, len); - return do_async_xor(chan, dest, src_list, offset, src_cnt, len, - dma_src, submit); + unmap->len = len; + for (i = 0, j = 0; i < src_cnt; i++) { + if (!src_list[i]) + continue; + unmap->to_cnt++; + unmap->addr[j++] = dma_map_page(device->dev, src_list[i], + offset, len, DMA_TO_DEVICE); + } + + /* map it bidirectional as it may be re-used as a source */ + unmap->addr[j] = dma_map_page(device->dev, dest, offset, len, + DMA_BIDIRECTIONAL); + unmap->bidi_cnt = 1; + + tx = do_async_xor(chan, unmap, submit); + dmaengine_unmap_put(unmap); + return tx; } else { + dmaengine_unmap_put(unmap); /* run the xor synchronously */ pr_debug("%s (sync): len: %zu\n", __func__, len); WARN_ONCE(chan, "%s: no space for dma address conversion\n", @@ -268,16 +274,14 @@ async_xor_val(struct page *dest, struct page **src_list, unsigned int offset, struct dma_chan *chan = xor_val_chan(submit, dest, src_list, src_cnt, len); struct dma_device *device = chan ? chan->device : NULL; struct dma_async_tx_descriptor *tx = NULL; - dma_addr_t *dma_src = NULL; + struct dmaengine_unmap_data *unmap = NULL; BUG_ON(src_cnt <= 1); - if (submit->scribble) - dma_src = submit->scribble; - else if (sizeof(dma_addr_t) <= sizeof(struct page *)) - dma_src = (dma_addr_t *) src_list; + if (device) + unmap = dmaengine_get_unmap_data(device->dev, src_cnt, GFP_NOIO); - if (dma_src && device && src_cnt <= device->max_xor && + if (unmap && src_cnt <= device->max_xor && is_dma_xor_aligned(device, offset, 0, len)) { unsigned long dma_prep_flags = 0; int i; @@ -288,11 +292,15 @@ async_xor_val(struct page *dest, struct page **src_list, unsigned int offset, dma_prep_flags |= DMA_PREP_INTERRUPT; if (submit->flags & ASYNC_TX_FENCE) dma_prep_flags |= DMA_PREP_FENCE; - for (i = 0; i < src_cnt; i++) - dma_src[i] = dma_map_page(device->dev, src_list[i], - offset, len, DMA_TO_DEVICE); - tx = device->device_prep_dma_xor_val(chan, dma_src, src_cnt, + for (i = 0; i < src_cnt; i++) { + unmap->addr[i] = dma_map_page(device->dev, src_list[i], + offset, len, DMA_TO_DEVICE); + unmap->to_cnt++; + } + unmap->len = len; + + tx = device->device_prep_dma_xor_val(chan, unmap->addr, src_cnt, len, result, dma_prep_flags); if (unlikely(!tx)) { @@ -301,11 +309,11 @@ async_xor_val(struct page *dest, struct page **src_list, unsigned int offset, while (!tx) { dma_async_issue_pending(chan); tx = device->device_prep_dma_xor_val(chan, - dma_src, src_cnt, len, result, + unmap->addr, src_cnt, len, result, dma_prep_flags); } } - + dma_set_unmap(tx, unmap); async_tx_submit(chan, tx, submit); } else { enum async_tx_flags flags_orig = submit->flags; @@ -327,6 +335,7 @@ async_xor_val(struct page *dest, struct page **src_list, unsigned int offset, async_tx_sync_epilog(submit); submit->flags = flags_orig; } + dmaengine_unmap_put(unmap); return tx; } diff --git a/crypto/async_tx/raid6test.c b/crypto/async_tx/raid6test.c index 4a92bac744d..dad95f45b88 100644 --- a/crypto/async_tx/raid6test.c +++ b/crypto/async_tx/raid6test.c @@ -28,7 +28,7 @@ #undef pr #define pr(fmt, args...) pr_info("raid6test: " fmt, ##args) -#define NDISKS 16 /* Including P and Q */ +#define NDISKS 64 /* Including P and Q */ static struct page *dataptrs[NDISKS]; static addr_conv_t addr_conv[NDISKS]; @@ -219,6 +219,14 @@ static int raid6_test(void) err += test(11, &tests); err += test(12, &tests); } + + /* the 24 disk case is special for ioatdma as it is the boudary point + * at which it needs to switch from 8-source ops to 16-source + * ops for continuation (assumes DMA_HAS_PQ_CONTINUE is not set) + */ + if (NDISKS > 24) + err += test(24, &tests); + err += test(NDISKS, &tests); pr("\n"); diff --git a/crypto/hash_info.c b/crypto/hash_info.c new file mode 100644 index 00000000000..3e7ff46f26e --- /dev/null +++ b/crypto/hash_info.c @@ -0,0 +1,56 @@ +/* + * Hash Info: Hash algorithms information + * + * Copyright (c) 2013 Dmitry Kasatkin <d.kasatkin@samsung.com> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + * + */ + +#include <linux/export.h> +#include <crypto/hash_info.h> + +const char *const hash_algo_name[HASH_ALGO__LAST] = { + [HASH_ALGO_MD4] = "md4", + [HASH_ALGO_MD5] = "md5", + [HASH_ALGO_SHA1] = "sha1", + [HASH_ALGO_RIPE_MD_160] = "rmd160", + [HASH_ALGO_SHA256] = "sha256", + [HASH_ALGO_SHA384] = "sha384", + [HASH_ALGO_SHA512] = "sha512", + [HASH_ALGO_SHA224] = "sha224", + [HASH_ALGO_RIPE_MD_128] = "rmd128", + [HASH_ALGO_RIPE_MD_256] = "rmd256", + [HASH_ALGO_RIPE_MD_320] = "rmd320", + [HASH_ALGO_WP_256] = "wp256", + [HASH_ALGO_WP_384] = "wp384", + [HASH_ALGO_WP_512] = "wp512", + [HASH_ALGO_TGR_128] = "tgr128", + [HASH_ALGO_TGR_160] = "tgr160", + [HASH_ALGO_TGR_192] = "tgr192", +}; +EXPORT_SYMBOL_GPL(hash_algo_name); + +const int hash_digest_size[HASH_ALGO__LAST] = { + [HASH_ALGO_MD4] = MD5_DIGEST_SIZE, + [HASH_ALGO_MD5] = MD5_DIGEST_SIZE, + [HASH_ALGO_SHA1] = SHA1_DIGEST_SIZE, + [HASH_ALGO_RIPE_MD_160] = RMD160_DIGEST_SIZE, + [HASH_ALGO_SHA256] = SHA256_DIGEST_SIZE, + [HASH_ALGO_SHA384] = SHA384_DIGEST_SIZE, + [HASH_ALGO_SHA512] = SHA512_DIGEST_SIZE, + [HASH_ALGO_SHA224] = SHA224_DIGEST_SIZE, + [HASH_ALGO_RIPE_MD_128] = RMD128_DIGEST_SIZE, + [HASH_ALGO_RIPE_MD_256] = RMD256_DIGEST_SIZE, + [HASH_ALGO_RIPE_MD_320] = RMD320_DIGEST_SIZE, + [HASH_ALGO_WP_256] = WP256_DIGEST_SIZE, + [HASH_ALGO_WP_384] = WP384_DIGEST_SIZE, + [HASH_ALGO_WP_512] = WP512_DIGEST_SIZE, + [HASH_ALGO_TGR_128] = TGR128_DIGEST_SIZE, + [HASH_ALGO_TGR_160] = TGR160_DIGEST_SIZE, + [HASH_ALGO_TGR_192] = TGR192_DIGEST_SIZE, +}; +EXPORT_SYMBOL_GPL(hash_digest_size); diff --git a/drivers/acpi/Kconfig b/drivers/acpi/Kconfig index c95df0b8c88..5d9248526d7 100644 --- a/drivers/acpi/Kconfig +++ b/drivers/acpi/Kconfig @@ -235,17 +235,6 @@ config ACPI_INITRD_TABLE_OVERRIDE initrd, therefore it's safe to say Y. See Documentation/acpi/initrd_table_override.txt for details -config ACPI_BLACKLIST_YEAR - int "Disable ACPI for systems before Jan 1st this year" if X86_32 - default 0 - help - Enter a 4-digit year, e.g., 2001, to disable ACPI by default - on platforms with DMI BIOS date before January 1st that year. - "acpi=force" can be used to override this mechanism. - - Enter 0 to disable this mechanism and allow ACPI to - run by default no matter what the year. (default) - config ACPI_DEBUG bool "Debug Statements" default n diff --git a/drivers/acpi/ac.c b/drivers/acpi/ac.c index b9f0d5f4bba..8711e379716 100644 --- a/drivers/acpi/ac.c +++ b/drivers/acpi/ac.c @@ -56,7 +56,6 @@ static int ac_sleep_before_get_state_ms; struct acpi_ac { struct power_supply charger; - struct acpi_device *adev; struct platform_device *pdev; unsigned long long state; }; @@ -70,8 +69,9 @@ struct acpi_ac { static int acpi_ac_get_state(struct acpi_ac *ac) { acpi_status status; + acpi_handle handle = ACPI_HANDLE(&ac->pdev->dev); - status = acpi_evaluate_integer(ac->adev->handle, "_PSR", NULL, + status = acpi_evaluate_integer(handle, "_PSR", NULL, &ac->state); if (ACPI_FAILURE(status)) { ACPI_EXCEPTION((AE_INFO, status, @@ -119,6 +119,7 @@ static enum power_supply_property ac_props[] = { static void acpi_ac_notify_handler(acpi_handle handle, u32 event, void *data) { struct acpi_ac *ac = data; + struct acpi_device *adev; if (!ac) return; @@ -141,10 +142,11 @@ static void acpi_ac_notify_handler(acpi_handle handle, u32 event, void *data) msleep(ac_sleep_before_get_state_ms); acpi_ac_get_state(ac); - acpi_bus_generate_netlink_event(ac->adev->pnp.device_class, + adev = ACPI_COMPANION(&ac->pdev->dev); + acpi_bus_generate_netlink_event(adev->pnp.device_class, dev_name(&ac->pdev->dev), event, (u32) ac->state); - acpi_notifier_call_chain(ac->adev, event, (u32) ac->state); + acpi_notifier_call_chain(adev, event, (u32) ac->state); kobject_uevent(&ac->charger.dev->kobj, KOBJ_CHANGE); } @@ -178,8 +180,8 @@ static int acpi_ac_probe(struct platform_device *pdev) if (!pdev) return -EINVAL; - result = acpi_bus_get_device(ACPI_HANDLE(&pdev->dev), &adev); - if (result) + adev = ACPI_COMPANION(&pdev->dev); + if (!adev) return -ENODEV; ac = kzalloc(sizeof(struct acpi_ac), GFP_KERNEL); @@ -188,7 +190,6 @@ static int acpi_ac_probe(struct platform_device *pdev) strcpy(acpi_device_name(adev), ACPI_AC_DEVICE_NAME); strcpy(acpi_device_class(adev), ACPI_AC_CLASS); - ac->adev = adev; ac->pdev = pdev; platform_set_drvdata(pdev, ac); diff --git a/drivers/acpi/acpi_lpss.c b/drivers/acpi/acpi_lpss.c index d3961014aad..6745fe137b9 100644 --- a/drivers/acpi/acpi_lpss.c +++ b/drivers/acpi/acpi_lpss.c @@ -163,6 +163,15 @@ static const struct acpi_device_id acpi_lpss_device_ids[] = { { "80860F41", (unsigned long)&byt_i2c_dev_desc }, { "INT33B2", }, + { "INT3430", (unsigned long)&lpt_dev_desc }, + { "INT3431", (unsigned long)&lpt_dev_desc }, + { "INT3432", (unsigned long)&lpt_dev_desc }, + { "INT3433", (unsigned long)&lpt_dev_desc }, + { "INT3434", (unsigned long)&lpt_uart_dev_desc }, + { "INT3435", (unsigned long)&lpt_uart_dev_desc }, + { "INT3436", (unsigned long)&lpt_sdio_dev_desc }, + { "INT3437", }, + { } }; diff --git a/drivers/acpi/acpi_platform.c b/drivers/acpi/acpi_platform.c index 8a4cfc7e71f..dbfe49e5fd6 100644 --- a/drivers/acpi/acpi_platform.c +++ b/drivers/acpi/acpi_platform.c @@ -111,7 +111,7 @@ int acpi_create_platform_device(struct acpi_device *adev, pdevinfo.id = -1; pdevinfo.res = resources; pdevinfo.num_res = count; - pdevinfo.acpi_node.handle = adev->handle; + pdevinfo.acpi_node.companion = adev; pdev = platform_device_register_full(&pdevinfo); if (IS_ERR(pdev)) { dev_err(&adev->dev, "platform device creation failed: %ld\n", diff --git a/drivers/acpi/blacklist.c b/drivers/acpi/blacklist.c index fb848378d58..078c4f7fe2d 100644 --- a/drivers/acpi/blacklist.c +++ b/drivers/acpi/blacklist.c @@ -75,39 +75,6 @@ static struct acpi_blacklist_item acpi_blacklist[] __initdata = { {""} }; -#if CONFIG_ACPI_BLACKLIST_YEAR - -static int __init blacklist_by_year(void) -{ - int year; - - /* Doesn't exist? Likely an old system */ - if (!dmi_get_date(DMI_BIOS_DATE, &year, NULL, NULL)) { - printk(KERN_ERR PREFIX "no DMI BIOS year, " - "acpi=force is required to enable ACPI\n" ); - return 1; - } - /* 0? Likely a buggy new BIOS */ - if (year == 0) { - printk(KERN_ERR PREFIX "DMI BIOS year==0, " - "assuming ACPI-capable machine\n" ); - return 0; - } - if (year < CONFIG_ACPI_BLACKLIST_YEAR) { - printk(KERN_ERR PREFIX "BIOS age (%d) fails cutoff (%d), " - "acpi=force is required to enable ACPI\n", - year, CONFIG_ACPI_BLACKLIST_YEAR); - return 1; - } - return 0; -} -#else -static inline int blacklist_by_year(void) -{ - return 0; -} -#endif - int __init acpi_blacklisted(void) { int i = 0; @@ -166,8 +133,6 @@ int __init acpi_blacklisted(void) } } - blacklisted += blacklist_by_year(); - dmi_check_system(acpi_osi_dmi_table); return blacklisted; diff --git a/drivers/acpi/device_pm.c b/drivers/acpi/device_pm.c index d42b2fb5a7e..b3480cf7db1 100644 --- a/drivers/acpi/device_pm.c +++ b/drivers/acpi/device_pm.c @@ -22,16 +22,12 @@ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ */ -#include <linux/device.h> +#include <linux/acpi.h> #include <linux/export.h> #include <linux/mutex.h> #include <linux/pm_qos.h> #include <linux/pm_runtime.h> -#include <acpi/acpi.h> -#include <acpi/acpi_bus.h> -#include <acpi/acpi_drivers.h> - #include "internal.h" #define _COMPONENT ACPI_POWER_COMPONENT @@ -548,7 +544,7 @@ static int acpi_dev_pm_get_state(struct device *dev, struct acpi_device *adev, */ int acpi_pm_device_sleep_state(struct device *dev, int *d_min_p, int d_max_in) { - acpi_handle handle = DEVICE_ACPI_HANDLE(dev); + acpi_handle handle = ACPI_HANDLE(dev); struct acpi_device *adev; int ret, d_min, d_max; @@ -656,7 +652,7 @@ int acpi_pm_device_run_wake(struct device *phys_dev, bool enable) if (!device_run_wake(phys_dev)) return -EINVAL; - handle = DEVICE_ACPI_HANDLE(phys_dev); + handle = ACPI_HANDLE(phys_dev); if (!handle || acpi_bus_get_device(handle, &adev)) { dev_dbg(phys_dev, "ACPI handle without context in %s!\n", __func__); @@ -700,7 +696,7 @@ int acpi_pm_device_sleep_wake(struct device *dev, bool enable) if (!device_can_wakeup(dev)) return -EINVAL; - handle = DEVICE_ACPI_HANDLE(dev); + handle = ACPI_HANDLE(dev); if (!handle || acpi_bus_get_device(handle, &adev)) { dev_dbg(dev, "ACPI handle without context in %s!\n", __func__); return -ENODEV; @@ -722,7 +718,7 @@ int acpi_pm_device_sleep_wake(struct device *dev, bool enable) */ struct acpi_device *acpi_dev_pm_get_node(struct device *dev) { - acpi_handle handle = DEVICE_ACPI_HANDLE(dev); + acpi_handle handle = ACPI_HANDLE(dev); struct acpi_device *adev; return handle && !acpi_bus_get_device(handle, &adev) ? adev : NULL; diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c index d5309fd4945..ba5b56db9d2 100644 --- a/drivers/acpi/ec.c +++ b/drivers/acpi/ec.c @@ -173,9 +173,10 @@ static void start_transaction(struct acpi_ec *ec) static void advance_transaction(struct acpi_ec *ec, u8 status) { unsigned long flags; - struct transaction *t = ec->curr; + struct transaction *t; spin_lock_irqsave(&ec->lock, flags); + t = ec->curr; if (!t) goto unlock; if (t->wlen > t->wi) { diff --git a/drivers/acpi/event.c b/drivers/acpi/event.c index fdef416c0ff..cae3b387b86 100644 --- a/drivers/acpi/event.c +++ b/drivers/acpi/event.c @@ -78,15 +78,17 @@ enum { #define ACPI_GENL_VERSION 0x01 #define ACPI_GENL_MCAST_GROUP_NAME "acpi_mc_group" +static const struct genl_multicast_group acpi_event_mcgrps[] = { + { .name = ACPI_GENL_MCAST_GROUP_NAME, }, +}; + static struct genl_family acpi_event_genl_family = { .id = GENL_ID_GENERATE, .name = ACPI_GENL_FAMILY_NAME, .version = ACPI_GENL_VERSION, .maxattr = ACPI_GENL_ATTR_MAX, -}; - -static struct genl_multicast_group acpi_event_mcgrp = { - .name = ACPI_GENL_MCAST_GROUP_NAME, + .mcgrps = acpi_event_mcgrps, + .n_mcgrps = ARRAY_SIZE(acpi_event_mcgrps), }; int acpi_bus_generate_netlink_event(const char *device_class, @@ -141,7 +143,7 @@ int acpi_bus_generate_netlink_event(const char *device_class, return result; } - genlmsg_multicast(skb, 0, acpi_event_mcgrp.id, GFP_ATOMIC); + genlmsg_multicast(&acpi_event_genl_family, skb, 0, 0, GFP_ATOMIC); return 0; } @@ -149,18 +151,7 @@ EXPORT_SYMBOL(acpi_bus_generate_netlink_event); static int acpi_event_genetlink_init(void) { - int result; - - result = genl_register_family(&acpi_event_genl_family); - if (result) - return result; - - result = genl_register_mc_group(&acpi_event_genl_family, - &acpi_event_mcgrp); - if (result) - genl_unregister_family(&acpi_event_genl_family); - - return result; + return genl_register_family(&acpi_event_genl_family); } #else diff --git a/drivers/acpi/glue.c b/drivers/acpi/glue.c index 10f0f40587b..a22a295edb6 100644 --- a/drivers/acpi/glue.c +++ b/drivers/acpi/glue.c @@ -197,30 +197,28 @@ static void acpi_physnode_link_name(char *buf, unsigned int node_id) int acpi_bind_one(struct device *dev, acpi_handle handle) { - struct acpi_device *acpi_dev; - acpi_status status; + struct acpi_device *acpi_dev = NULL; struct acpi_device_physical_node *physical_node, *pn; char physical_node_name[PHYSICAL_NODE_NAME_SIZE]; struct list_head *physnode_list; unsigned int node_id; int retval = -EINVAL; - if (ACPI_HANDLE(dev)) { + if (ACPI_COMPANION(dev)) { if (handle) { - dev_warn(dev, "ACPI handle is already set\n"); + dev_warn(dev, "ACPI companion already set\n"); return -EINVAL; } else { - handle = ACPI_HANDLE(dev); + acpi_dev = ACPI_COMPANION(dev); } + } else { + acpi_bus_get_device(handle, &acpi_dev); } - if (!handle) + if (!acpi_dev) return -EINVAL; + get_device(&acpi_dev->dev); get_device(dev); - status = acpi_bus_get_device(handle, &acpi_dev); - if (ACPI_FAILURE(status)) - goto err; - physical_node = kzalloc(sizeof(*physical_node), GFP_KERNEL); if (!physical_node) { retval = -ENOMEM; @@ -242,10 +240,11 @@ int acpi_bind_one(struct device *dev, acpi_handle handle) dev_warn(dev, "Already associated with ACPI node\n"); kfree(physical_node); - if (ACPI_HANDLE(dev) != handle) + if (ACPI_COMPANION(dev) != acpi_dev) goto err; put_device(dev); + put_device(&acpi_dev->dev); return 0; } if (pn->node_id == node_id) { @@ -259,8 +258,8 @@ int acpi_bind_one(struct device *dev, acpi_handle handle) list_add(&physical_node->node, physnode_list); acpi_dev->physical_node_count++; - if (!ACPI_HANDLE(dev)) - ACPI_HANDLE_SET(dev, acpi_dev->handle); + if (!ACPI_COMPANION(dev)) + ACPI_COMPANION_SET(dev, acpi_dev); acpi_physnode_link_name(physical_node_name, node_id); retval = sysfs_create_link(&acpi_dev->dev.kobj, &dev->kobj, @@ -283,27 +282,21 @@ int acpi_bind_one(struct device *dev, acpi_handle handle) return 0; err: - ACPI_HANDLE_SET(dev, NULL); + ACPI_COMPANION_SET(dev, NULL); put_device(dev); + put_device(&acpi_dev->dev); return retval; } EXPORT_SYMBOL_GPL(acpi_bind_one); int acpi_unbind_one(struct device *dev) { + struct acpi_device *acpi_dev = ACPI_COMPANION(dev); struct acpi_device_physical_node *entry; - struct acpi_device *acpi_dev; - acpi_status status; - if (!ACPI_HANDLE(dev)) + if (!acpi_dev) return 0; - status = acpi_bus_get_device(ACPI_HANDLE(dev), &acpi_dev); - if (ACPI_FAILURE(status)) { - dev_err(dev, "Oops, ACPI handle corrupt in %s()\n", __func__); - return -EINVAL; - } - mutex_lock(&acpi_dev->physical_node_lock); list_for_each_entry(entry, &acpi_dev->physical_node_list, node) @@ -316,9 +309,10 @@ int acpi_unbind_one(struct device *dev) acpi_physnode_link_name(physnode_name, entry->node_id); sysfs_remove_link(&acpi_dev->dev.kobj, physnode_name); sysfs_remove_link(&dev->kobj, "firmware_node"); - ACPI_HANDLE_SET(dev, NULL); - /* acpi_bind_one() increase refcnt by one. */ + ACPI_COMPANION_SET(dev, NULL); + /* Drop references taken by acpi_bind_one(). */ put_device(dev); + put_device(&acpi_dev->dev); kfree(entry); break; } @@ -328,6 +322,15 @@ int acpi_unbind_one(struct device *dev) } EXPORT_SYMBOL_GPL(acpi_unbind_one); +void acpi_preset_companion(struct device *dev, acpi_handle parent, u64 addr) +{ + struct acpi_device *adev; + + if (!acpi_bus_get_device(acpi_get_child(parent, addr), &adev)) + ACPI_COMPANION_SET(dev, adev); +} +EXPORT_SYMBOL_GPL(acpi_preset_companion); + static int acpi_platform_notify(struct device *dev) { struct acpi_bus_type *type = acpi_get_bus_type(dev); diff --git a/drivers/acpi/pci_root.c b/drivers/acpi/pci_root.c index 56f05869b08..0703bff5e60 100644 --- a/drivers/acpi/pci_root.c +++ b/drivers/acpi/pci_root.c @@ -575,6 +575,7 @@ static int acpi_pci_root_add(struct acpi_device *device, dev_err(&device->dev, "Bus %04x:%02x not present in PCI namespace\n", root->segment, (unsigned int)root->secondary.start); + device->driver_data = NULL; result = -ENODEV; goto end; } diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c index 55f9dedbbf9..15daa21fcd0 100644 --- a/drivers/acpi/scan.c +++ b/drivers/acpi/scan.c @@ -289,24 +289,17 @@ void acpi_bus_device_eject(void *data, u32 ost_src) { struct acpi_device *device = data; acpi_handle handle = device->handle; - struct acpi_scan_handler *handler; u32 ost_code = ACPI_OST_SC_NON_SPECIFIC_FAILURE; int error; lock_device_hotplug(); mutex_lock(&acpi_scan_lock); - handler = device->handler; - if (!handler || !handler->hotplug.enabled) { - put_device(&device->dev); - goto err_support; - } - if (ost_src == ACPI_NOTIFY_EJECT_REQUEST) acpi_evaluate_hotplug_ost(handle, ACPI_NOTIFY_EJECT_REQUEST, ACPI_OST_SC_EJECT_IN_PROGRESS, NULL); - if (handler->hotplug.mode == AHM_CONTAINER) + if (device->handler && device->handler->hotplug.mode == AHM_CONTAINER) kobject_uevent(&device->dev.kobj, KOBJ_OFFLINE); error = acpi_scan_hot_remove(device); @@ -411,8 +404,7 @@ static void acpi_hotplug_notify_cb(acpi_handle handle, u32 type, void *data) break; case ACPI_NOTIFY_EJECT_REQUEST: acpi_handle_debug(handle, "ACPI_NOTIFY_EJECT_REQUEST event\n"); - status = acpi_bus_get_device(handle, &adev); - if (ACPI_FAILURE(status)) + if (acpi_bus_get_device(handle, &adev)) goto err_out; get_device(&adev->dev); @@ -1997,6 +1989,7 @@ static int acpi_bus_scan_fixed(void) if (result) return result; + device->flags.match_driver = true; result = device_attach(&device->dev); if (result < 0) return result; @@ -2013,6 +2006,7 @@ static int acpi_bus_scan_fixed(void) if (result) return result; + device->flags.match_driver = true; result = device_attach(&device->dev); } diff --git a/drivers/acpi/video.c b/drivers/acpi/video.c index 18dbdff4656..995e91bcb97 100644 --- a/drivers/acpi/video.c +++ b/drivers/acpi/video.c @@ -82,13 +82,6 @@ static bool allow_duplicates; module_param(allow_duplicates, bool, 0644); /* - * Some BIOSes claim they use minimum backlight at boot, - * and this may bring dimming screen after boot - */ -static bool use_bios_initial_backlight = 1; -module_param(use_bios_initial_backlight, bool, 0644); - -/* * For Windows 8 systems: if set ture and the GPU driver has * registered a backlight interface, skip registering ACPI video's. */ @@ -406,12 +399,6 @@ static int __init video_set_bqc_offset(const struct dmi_system_id *d) return 0; } -static int video_ignore_initial_backlight(const struct dmi_system_id *d) -{ - use_bios_initial_backlight = 0; - return 0; -} - static struct dmi_system_id video_dmi_table[] __initdata = { /* * Broken _BQC workaround http://bugzilla.kernel.org/show_bug.cgi?id=13121 @@ -456,54 +443,6 @@ static struct dmi_system_id video_dmi_table[] __initdata = { DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 7720"), }, }, - { - .callback = video_ignore_initial_backlight, - .ident = "HP Folio 13-2000", - .matches = { - DMI_MATCH(DMI_BOARD_VENDOR, "Hewlett-Packard"), - DMI_MATCH(DMI_PRODUCT_NAME, "HP Folio 13 - 2000 Notebook PC"), - }, - }, - { - .callback = video_ignore_initial_backlight, - .ident = "Fujitsu E753", - .matches = { - DMI_MATCH(DMI_BOARD_VENDOR, "FUJITSU"), - DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK E753"), - }, - }, - { - .callback = video_ignore_initial_backlight, - .ident = "HP Pavilion dm4", - .matches = { - DMI_MATCH(DMI_BOARD_VENDOR, "Hewlett-Packard"), - DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion dm4 Notebook PC"), - }, - }, - { - .callback = video_ignore_initial_backlight, - .ident = "HP Pavilion g6 Notebook PC", - .matches = { - DMI_MATCH(DMI_BOARD_VENDOR, "Hewlett-Packard"), - DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion g6 Notebook PC"), - }, - }, - { - .callback = video_ignore_initial_backlight, - .ident = "HP 1000 Notebook PC", - .matches = { - DMI_MATCH(DMI_BOARD_VENDOR, "Hewlett-Packard"), - DMI_MATCH(DMI_PRODUCT_NAME, "HP 1000 Notebook PC"), - }, - }, - { - .callback = video_ignore_initial_backlight, - .ident = "HP Pavilion m4", - .matches = { - DMI_MATCH(DMI_BOARD_VENDOR, "Hewlett-Packard"), - DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion m4 Notebook PC"), - }, - }, {} }; @@ -839,20 +778,18 @@ acpi_video_init_brightness(struct acpi_video_device *device) if (!device->cap._BQC) goto set_level; - if (use_bios_initial_backlight) { - level = acpi_video_bqc_value_to_level(device, level_old); - /* - * On some buggy laptops, _BQC returns an uninitialized - * value when invoked for the first time, i.e. - * level_old is invalid (no matter whether it's a level - * or an index). Set the backlight to max_level in this case. - */ - for (i = 2; i < br->count; i++) - if (level == br->levels[i]) - break; - if (i == br->count || !level) - level = max_level; - } + level = acpi_video_bqc_value_to_level(device, level_old); + /* + * On some buggy laptops, _BQC returns an uninitialized + * value when invoked for the first time, i.e. + * level_old is invalid (no matter whether it's a level + * or an index). Set the backlight to max_level in this case. + */ + for (i = 2; i < br->count; i++) + if (level == br->levels[i]) + break; + if (i == br->count || !level) + level = max_level; set_level: result = acpi_video_device_lcd_set_level(device, level); diff --git a/drivers/ata/libata-acpi.c b/drivers/ata/libata-acpi.c index ab714d2ad97..4372cfa883c 100644 --- a/drivers/ata/libata-acpi.c +++ b/drivers/ata/libata-acpi.c @@ -185,7 +185,7 @@ void ata_acpi_bind_port(struct ata_port *ap) if (libata_noacpi || ap->flags & ATA_FLAG_ACPI_SATA || !host_handle) return; - ACPI_HANDLE_SET(&ap->tdev, acpi_get_child(host_handle, ap->port_no)); + acpi_preset_companion(&ap->tdev, host_handle, ap->port_no); if (ata_acpi_gtm(ap, &ap->__acpi_init_gtm) == 0) ap->pflags |= ATA_PFLAG_INIT_GTM_VALID; @@ -222,7 +222,7 @@ void ata_acpi_bind_dev(struct ata_device *dev) parent_handle = port_handle; } - ACPI_HANDLE_SET(&dev->tdev, acpi_get_child(parent_handle, adr)); + acpi_preset_companion(&dev->tdev, parent_handle, adr); register_hotplug_dock_device(ata_dev_acpi_handle(dev), &ata_acpi_dev_dock_ops, dev, NULL, NULL); diff --git a/drivers/ata/pata_arasan_cf.c b/drivers/ata/pata_arasan_cf.c index 853f610af28..e88690ebfd8 100644 --- a/drivers/ata/pata_arasan_cf.c +++ b/drivers/ata/pata_arasan_cf.c @@ -396,8 +396,7 @@ dma_xfer(struct arasan_cf_dev *acdev, dma_addr_t src, dma_addr_t dest, u32 len) struct dma_async_tx_descriptor *tx; struct dma_chan *chan = acdev->dma_chan; dma_cookie_t cookie; - unsigned long flags = DMA_PREP_INTERRUPT | DMA_COMPL_SKIP_SRC_UNMAP | - DMA_COMPL_SKIP_DEST_UNMAP; + unsigned long flags = DMA_PREP_INTERRUPT; int ret = 0; tx = chan->device->device_prep_dma_memcpy(chan, dest, src, len, flags); diff --git a/drivers/atm/idt77252.c b/drivers/atm/idt77252.c index 272f0092776..1bdf104e90b 100644 --- a/drivers/atm/idt77252.c +++ b/drivers/atm/idt77252.c @@ -3511,7 +3511,7 @@ static int init_card(struct atm_dev *dev) tmp = dev_get_by_name(&init_net, tname); /* jhs: was "tmp = dev_get(tname);" */ if (tmp) { memcpy(card->atmdev->esi, tmp->dev_addr, 6); - + dev_put(tmp); printk("%s: ESI %pM\n", card->name, card->atmdev->esi); } /* diff --git a/drivers/base/platform.c b/drivers/base/platform.c index 47051cd2511..3a94b799f16 100644 --- a/drivers/base/platform.c +++ b/drivers/base/platform.c @@ -432,7 +432,7 @@ struct platform_device *platform_device_register_full( goto err_alloc; pdev->dev.parent = pdevinfo->parent; - ACPI_HANDLE_SET(&pdev->dev, pdevinfo->acpi_node.handle); + ACPI_COMPANION_SET(&pdev->dev, pdevinfo->acpi_node.companion); if (pdevinfo->dma_mask) { /* @@ -463,7 +463,7 @@ struct platform_device *platform_device_register_full( ret = platform_device_add(pdev); if (ret) { err: - ACPI_HANDLE_SET(&pdev->dev, NULL); + ACPI_COMPANION_SET(&pdev->dev, NULL); kfree(pdev->dev.dma_mask); err_alloc: diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c index c12e9b9556b..1b41fca3d65 100644 --- a/drivers/base/power/main.c +++ b/drivers/base/power/main.c @@ -1350,6 +1350,9 @@ static int device_prepare(struct device *dev, pm_message_t state) device_unlock(dev); + if (error) + pm_runtime_put(dev); + return error; } diff --git a/drivers/block/null_blk.c b/drivers/block/null_blk.c index b5d842370cc..ea192ec029c 100644 --- a/drivers/block/null_blk.c +++ b/drivers/block/null_blk.c @@ -223,7 +223,7 @@ static void null_softirq_done_fn(struct request *rq) blk_end_request_all(rq, 0); } -#if defined(CONFIG_SMP) && defined(CONFIG_USE_GENERIC_SMP_HELPERS) +#ifdef CONFIG_SMP static void null_ipi_cmd_end_io(void *data) { @@ -260,7 +260,7 @@ static void null_cmd_end_ipi(struct nullb_cmd *cmd) put_cpu(); } -#endif /* CONFIG_SMP && CONFIG_USE_GENERIC_SMP_HELPERS */ +#endif /* CONFIG_SMP */ static inline void null_handle_cmd(struct nullb_cmd *cmd) { @@ -270,7 +270,7 @@ static inline void null_handle_cmd(struct nullb_cmd *cmd) end_cmd(cmd); break; case NULL_IRQ_SOFTIRQ: -#if defined(CONFIG_SMP) && defined(CONFIG_USE_GENERIC_SMP_HELPERS) +#ifdef CONFIG_SMP null_cmd_end_ipi(cmd); #else end_cmd(cmd); @@ -571,7 +571,7 @@ static int __init null_init(void) { unsigned int i; -#if !defined(CONFIG_SMP) || !defined(CONFIG_USE_GENERIC_SMP_HELPERS) +#if !defined(CONFIG_SMP) if (irqmode == NULL_IRQ_SOFTIRQ) { pr_warn("null_blk: softirq completions not available.\n"); pr_warn("null_blk: using direct completions.\n"); diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c index 588479d58f5..6a680d4de7f 100644 --- a/drivers/block/virtio_blk.c +++ b/drivers/block/virtio_blk.c @@ -199,15 +199,16 @@ static int virtio_queue_rq(struct blk_mq_hw_ctx *hctx, struct request *req) spin_lock_irqsave(&vblk->vq_lock, flags); if (__virtblk_add_req(vblk->vq, vbr, vbr->sg, num) < 0) { + virtqueue_kick(vblk->vq); spin_unlock_irqrestore(&vblk->vq_lock, flags); blk_mq_stop_hw_queue(hctx); - virtqueue_kick(vblk->vq); return BLK_MQ_RQ_QUEUE_BUSY; } - spin_unlock_irqrestore(&vblk->vq_lock, flags); if (last) virtqueue_kick(vblk->vq); + + spin_unlock_irqrestore(&vblk->vq_lock, flags); return BLK_MQ_RQ_QUEUE_OK; } diff --git a/drivers/char/tpm/Kconfig b/drivers/char/tpm/Kconfig index 94c0c74434e..1a65838888c 100644 --- a/drivers/char/tpm/Kconfig +++ b/drivers/char/tpm/Kconfig @@ -33,6 +33,15 @@ config TCG_TIS from within Linux. To compile this driver as a module, choose M here; the module will be called tpm_tis. +config TCG_TIS_I2C_ATMEL + tristate "TPM Interface Specification 1.2 Interface (I2C - Atmel)" + depends on I2C + ---help--- + If you have an Atmel I2C TPM security chip say Yes and it will be + accessible from within Linux. + To compile this driver as a module, choose M here; the module will + be called tpm_tis_i2c_atmel. + config TCG_TIS_I2C_INFINEON tristate "TPM Interface Specification 1.2 Interface (I2C - Infineon)" depends on I2C @@ -42,7 +51,17 @@ config TCG_TIS_I2C_INFINEON Specification 0.20 say Yes and it will be accessible from within Linux. To compile this driver as a module, choose M here; the module - will be called tpm_tis_i2c_infineon. + will be called tpm_i2c_infineon. + +config TCG_TIS_I2C_NUVOTON + tristate "TPM Interface Specification 1.2 Interface (I2C - Nuvoton)" + depends on I2C + ---help--- + If you have a TPM security chip with an I2C interface from + Nuvoton Technology Corp. say Yes and it will be accessible + from within Linux. + To compile this driver as a module, choose M here; the module + will be called tpm_i2c_nuvoton. config TCG_NSC tristate "National Semiconductor TPM Interface" @@ -82,14 +101,14 @@ config TCG_IBMVTPM as a module, choose M here; the module will be called tpm_ibmvtpm. config TCG_ST33_I2C - tristate "STMicroelectronics ST33 I2C TPM" - depends on I2C - depends on GPIOLIB - ---help--- - If you have a TPM security chip from STMicroelectronics working with - an I2C bus say Yes and it will be accessible from within Linux. - To compile this driver as a module, choose M here; the module will be - called tpm_stm_st33_i2c. + tristate "STMicroelectronics ST33 I2C TPM" + depends on I2C + depends on GPIOLIB + ---help--- + If you have a TPM security chip from STMicroelectronics working with + an I2C bus say Yes and it will be accessible from within Linux. + To compile this driver as a module, choose M here; the module will be + called tpm_stm_st33_i2c. config TCG_XEN tristate "XEN TPM Interface" diff --git a/drivers/char/tpm/Makefile b/drivers/char/tpm/Makefile index eb41ff97d0a..b80a4000dae 100644 --- a/drivers/char/tpm/Makefile +++ b/drivers/char/tpm/Makefile @@ -2,17 +2,20 @@ # Makefile for the kernel tpm device drivers. # obj-$(CONFIG_TCG_TPM) += tpm.o +tpm-y := tpm-interface.o +tpm-$(CONFIG_ACPI) += tpm_ppi.o + ifdef CONFIG_ACPI - obj-$(CONFIG_TCG_TPM) += tpm_bios.o - tpm_bios-objs += tpm_eventlog.o tpm_acpi.o tpm_ppi.o + tpm-y += tpm_eventlog.o tpm_acpi.o else ifdef CONFIG_TCG_IBMVTPM - obj-$(CONFIG_TCG_TPM) += tpm_bios.o - tpm_bios-objs += tpm_eventlog.o tpm_of.o + tpm-y += tpm_eventlog.o tpm_of.o endif endif obj-$(CONFIG_TCG_TIS) += tpm_tis.o +obj-$(CONFIG_TCG_TIS_I2C_ATMEL) += tpm_i2c_atmel.o obj-$(CONFIG_TCG_TIS_I2C_INFINEON) += tpm_i2c_infineon.o +obj-$(CONFIG_TCG_TIS_I2C_NUVOTON) += tpm_i2c_nuvoton.o obj-$(CONFIG_TCG_NSC) += tpm_nsc.o obj-$(CONFIG_TCG_ATMEL) += tpm_atmel.o obj-$(CONFIG_TCG_INFINEON) += tpm_infineon.o diff --git a/drivers/char/tpm/tpm.c b/drivers/char/tpm/tpm-interface.c index e3c974a6c52..6ae41d33763 100644 --- a/drivers/char/tpm/tpm.c +++ b/drivers/char/tpm/tpm-interface.c @@ -10,13 +10,13 @@ * Maintained by: <tpmdd-devel@lists.sourceforge.net> * * Device driver for TCG/TCPA TPM (trusted platform module). - * Specifications at www.trustedcomputinggroup.org + * Specifications at www.trustedcomputinggroup.org * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation, version 2 of the * License. - * + * * Note, the TPM chip is not interrupt driven (only polling) * and can have very long timeouts (minutes!). Hence the unusual * calls to msleep. @@ -371,13 +371,14 @@ static ssize_t tpm_transmit(struct tpm_chip *chip, const char *buf, return -ENODATA; if (count > bufsiz) { dev_err(chip->dev, - "invalid count value %x %zx \n", count, bufsiz); + "invalid count value %x %zx\n", count, bufsiz); return -E2BIG; } mutex_lock(&chip->tpm_mutex); - if ((rc = chip->vendor.send(chip, (u8 *) buf, count)) < 0) { + rc = chip->vendor.send(chip, (u8 *) buf, count); + if (rc < 0) { dev_err(chip->dev, "tpm_transmit: tpm_send: error %zd\n", rc); goto out; @@ -444,7 +445,7 @@ static ssize_t transmit_cmd(struct tpm_chip *chip, struct tpm_cmd_t *cmd, { int err; - len = tpm_transmit(chip,(u8 *) cmd, len); + len = tpm_transmit(chip, (u8 *) cmd, len); if (len < 0) return len; else if (len < TPM_HEADER_SIZE) @@ -658,7 +659,7 @@ static int tpm_continue_selftest(struct tpm_chip *chip) return rc; } -ssize_t tpm_show_enabled(struct device * dev, struct device_attribute * attr, +ssize_t tpm_show_enabled(struct device *dev, struct device_attribute *attr, char *buf) { cap_t cap; @@ -674,7 +675,7 @@ ssize_t tpm_show_enabled(struct device * dev, struct device_attribute * attr, } EXPORT_SYMBOL_GPL(tpm_show_enabled); -ssize_t tpm_show_active(struct device * dev, struct device_attribute * attr, +ssize_t tpm_show_active(struct device *dev, struct device_attribute *attr, char *buf) { cap_t cap; @@ -690,7 +691,7 @@ ssize_t tpm_show_active(struct device * dev, struct device_attribute * attr, } EXPORT_SYMBOL_GPL(tpm_show_active); -ssize_t tpm_show_owned(struct device * dev, struct device_attribute * attr, +ssize_t tpm_show_owned(struct device *dev, struct device_attribute *attr, char *buf) { cap_t cap; @@ -706,8 +707,8 @@ ssize_t tpm_show_owned(struct device * dev, struct device_attribute * attr, } EXPORT_SYMBOL_GPL(tpm_show_owned); -ssize_t tpm_show_temp_deactivated(struct device * dev, - struct device_attribute * attr, char *buf) +ssize_t tpm_show_temp_deactivated(struct device *dev, + struct device_attribute *attr, char *buf) { cap_t cap; ssize_t rc; @@ -769,10 +770,10 @@ static int __tpm_pcr_read(struct tpm_chip *chip, int pcr_idx, u8 *res_buf) /** * tpm_pcr_read - read a pcr value - * @chip_num: tpm idx # or ANY + * @chip_num: tpm idx # or ANY * @pcr_idx: pcr idx to retrieve - * @res_buf: TPM_PCR value - * size of res_buf is 20 bytes (or NULL if you don't care) + * @res_buf: TPM_PCR value + * size of res_buf is 20 bytes (or NULL if you don't care) * * The TPM driver should be built-in, but for whatever reason it * isn't, protect against the chip disappearing, by incrementing @@ -794,9 +795,9 @@ EXPORT_SYMBOL_GPL(tpm_pcr_read); /** * tpm_pcr_extend - extend pcr value with hash - * @chip_num: tpm idx # or AN& + * @chip_num: tpm idx # or AN& * @pcr_idx: pcr idx to extend - * @hash: hash value used to extend pcr value + * @hash: hash value used to extend pcr value * * The TPM driver should be built-in, but for whatever reason it * isn't, protect against the chip disappearing, by incrementing @@ -847,8 +848,7 @@ int tpm_do_selftest(struct tpm_chip *chip) unsigned long duration; struct tpm_cmd_t cmd; - duration = tpm_calc_ordinal_duration(chip, - TPM_ORD_CONTINUE_SELFTEST); + duration = tpm_calc_ordinal_duration(chip, TPM_ORD_CONTINUE_SELFTEST); loops = jiffies_to_msecs(duration) / delay_msec; @@ -965,12 +965,12 @@ ssize_t tpm_show_pubek(struct device *dev, struct device_attribute *attr, if (err) goto out; - /* + /* ignore header 10 bytes algorithm 32 bits (1 == RSA ) encscheme 16 bits sigscheme 16 bits - parameters (RSA 12->bytes: keybit, #primes, expbit) + parameters (RSA 12->bytes: keybit, #primes, expbit) keylenbytes 32 bits 256 byte modulus ignore checksum 20 bytes @@ -1020,43 +1020,33 @@ ssize_t tpm_show_caps(struct device *dev, struct device_attribute *attr, str += sprintf(str, "Manufacturer: 0x%x\n", be32_to_cpu(cap.manufacturer_id)); - rc = tpm_getcap(dev, CAP_VERSION_1_1, &cap, - "attempting to determine the 1.1 version"); - if (rc) - return 0; - str += sprintf(str, - "TCG version: %d.%d\nFirmware version: %d.%d\n", - cap.tpm_version.Major, cap.tpm_version.Minor, - cap.tpm_version.revMajor, cap.tpm_version.revMinor); - return str - buf; -} -EXPORT_SYMBOL_GPL(tpm_show_caps); - -ssize_t tpm_show_caps_1_2(struct device * dev, - struct device_attribute * attr, char *buf) -{ - cap_t cap; - ssize_t rc; - char *str = buf; - - rc = tpm_getcap(dev, TPM_CAP_PROP_MANUFACTURER, &cap, - "attempting to determine the manufacturer"); - if (rc) - return 0; - str += sprintf(str, "Manufacturer: 0x%x\n", - be32_to_cpu(cap.manufacturer_id)); + /* Try to get a TPM version 1.2 TPM_CAP_VERSION_INFO */ rc = tpm_getcap(dev, CAP_VERSION_1_2, &cap, "attempting to determine the 1.2 version"); - if (rc) - return 0; - str += sprintf(str, - "TCG version: %d.%d\nFirmware version: %d.%d\n", - cap.tpm_version_1_2.Major, cap.tpm_version_1_2.Minor, - cap.tpm_version_1_2.revMajor, - cap.tpm_version_1_2.revMinor); + if (!rc) { + str += sprintf(str, + "TCG version: %d.%d\nFirmware version: %d.%d\n", + cap.tpm_version_1_2.Major, + cap.tpm_version_1_2.Minor, + cap.tpm_version_1_2.revMajor, + cap.tpm_version_1_2.revMinor); + } else { + /* Otherwise just use TPM_STRUCT_VER */ + rc = tpm_getcap(dev, CAP_VERSION_1_1, &cap, + "attempting to determine the 1.1 version"); + if (rc) + return 0; + str += sprintf(str, + "TCG version: %d.%d\nFirmware version: %d.%d\n", + cap.tpm_version.Major, + cap.tpm_version.Minor, + cap.tpm_version.revMajor, + cap.tpm_version.revMinor); + } + return str - buf; } -EXPORT_SYMBOL_GPL(tpm_show_caps_1_2); +EXPORT_SYMBOL_GPL(tpm_show_caps); ssize_t tpm_show_durations(struct device *dev, struct device_attribute *attr, char *buf) @@ -1102,8 +1092,8 @@ ssize_t tpm_store_cancel(struct device *dev, struct device_attribute *attr, } EXPORT_SYMBOL_GPL(tpm_store_cancel); -static bool wait_for_tpm_stat_cond(struct tpm_chip *chip, u8 mask, bool check_cancel, - bool *canceled) +static bool wait_for_tpm_stat_cond(struct tpm_chip *chip, u8 mask, + bool check_cancel, bool *canceled) { u8 status = chip->vendor.status(chip); @@ -1170,38 +1160,25 @@ EXPORT_SYMBOL_GPL(wait_for_tpm_stat); */ int tpm_open(struct inode *inode, struct file *file) { - int minor = iminor(inode); - struct tpm_chip *chip = NULL, *pos; - - rcu_read_lock(); - list_for_each_entry_rcu(pos, &tpm_chip_list, list) { - if (pos->vendor.miscdev.minor == minor) { - chip = pos; - get_device(chip->dev); - break; - } - } - rcu_read_unlock(); - - if (!chip) - return -ENODEV; + struct miscdevice *misc = file->private_data; + struct tpm_chip *chip = container_of(misc, struct tpm_chip, + vendor.miscdev); if (test_and_set_bit(0, &chip->is_open)) { dev_dbg(chip->dev, "Another process owns this TPM\n"); - put_device(chip->dev); return -EBUSY; } chip->data_buffer = kzalloc(TPM_BUFSIZE, GFP_KERNEL); if (chip->data_buffer == NULL) { clear_bit(0, &chip->is_open); - put_device(chip->dev); return -ENOMEM; } atomic_set(&chip->data_pending, 0); file->private_data = chip; + get_device(chip->dev); return 0; } EXPORT_SYMBOL_GPL(tpm_open); @@ -1463,7 +1440,6 @@ void tpm_dev_vendor_release(struct tpm_chip *chip) chip->vendor.release(chip->dev); clear_bit(chip->dev_num, dev_mask); - kfree(chip->vendor.miscdev.name); } EXPORT_SYMBOL_GPL(tpm_dev_vendor_release); @@ -1487,7 +1463,7 @@ void tpm_dev_release(struct device *dev) EXPORT_SYMBOL_GPL(tpm_dev_release); /* - * Called from tpm_<specific>.c probe function only for devices + * Called from tpm_<specific>.c probe function only for devices * the driver has determined it should claim. Prior to calling * this function the specific probe function has called pci_enable_device * upon errant exit from this function specific probe function should call @@ -1496,17 +1472,13 @@ EXPORT_SYMBOL_GPL(tpm_dev_release); struct tpm_chip *tpm_register_hardware(struct device *dev, const struct tpm_vendor_specific *entry) { -#define DEVNAME_SIZE 7 - - char *devname; struct tpm_chip *chip; /* Driver specific per-device data */ chip = kzalloc(sizeof(*chip), GFP_KERNEL); - devname = kmalloc(DEVNAME_SIZE, GFP_KERNEL); - if (chip == NULL || devname == NULL) - goto out_free; + if (chip == NULL) + return NULL; mutex_init(&chip->buffer_mutex); mutex_init(&chip->tpm_mutex); @@ -1531,8 +1503,9 @@ struct tpm_chip *tpm_register_hardware(struct device *dev, set_bit(chip->dev_num, dev_mask); - scnprintf(devname, DEVNAME_SIZE, "%s%d", "tpm", chip->dev_num); - chip->vendor.miscdev.name = devname; + scnprintf(chip->devname, sizeof(chip->devname), "%s%d", "tpm", + chip->dev_num); + chip->vendor.miscdev.name = chip->devname; chip->vendor.miscdev.parent = dev; chip->dev = get_device(dev); @@ -1558,7 +1531,7 @@ struct tpm_chip *tpm_register_hardware(struct device *dev, goto put_device; } - chip->bios_dir = tpm_bios_log_setup(devname); + chip->bios_dir = tpm_bios_log_setup(chip->devname); /* Make chip available */ spin_lock(&driver_lock); @@ -1571,7 +1544,6 @@ put_device: put_device(chip->dev); out_free: kfree(chip); - kfree(devname); return NULL; } EXPORT_SYMBOL_GPL(tpm_register_hardware); diff --git a/drivers/char/tpm/tpm.h b/drivers/char/tpm/tpm.h index a7bfc176ed4..f3284787219 100644 --- a/drivers/char/tpm/tpm.h +++ b/drivers/char/tpm/tpm.h @@ -59,8 +59,6 @@ extern ssize_t tpm_show_pcrs(struct device *, struct device_attribute *attr, char *); extern ssize_t tpm_show_caps(struct device *, struct device_attribute *attr, char *); -extern ssize_t tpm_show_caps_1_2(struct device *, struct device_attribute *attr, - char *); extern ssize_t tpm_store_cancel(struct device *, struct device_attribute *attr, const char *, size_t); extern ssize_t tpm_show_enabled(struct device *, struct device_attribute *attr, @@ -122,6 +120,7 @@ struct tpm_chip { struct device *dev; /* Device stuff */ int dev_num; /* /dev/tpm# */ + char devname[7]; unsigned long is_open; /* only one allowed */ int time_expired; diff --git a/drivers/char/tpm/tpm_atmel.c b/drivers/char/tpm/tpm_atmel.c index 99d6820c611..c9a528d25d2 100644 --- a/drivers/char/tpm/tpm_atmel.c +++ b/drivers/char/tpm/tpm_atmel.c @@ -202,7 +202,7 @@ static int __init init_atmel(void) have_region = (atmel_request_region - (tpm_atmel.base, region_size, "tpm_atmel0") == NULL) ? 0 : 1; + (base, region_size, "tpm_atmel0") == NULL) ? 0 : 1; pdev = platform_device_register_simple("tpm_atmel", -1, NULL, 0); if (IS_ERR(pdev)) { diff --git a/drivers/char/tpm/tpm_eventlog.c b/drivers/char/tpm/tpm_eventlog.c index 84ddc557b8f..59f7cb28260 100644 --- a/drivers/char/tpm/tpm_eventlog.c +++ b/drivers/char/tpm/tpm_eventlog.c @@ -406,7 +406,6 @@ out_tpm: out: return NULL; } -EXPORT_SYMBOL_GPL(tpm_bios_log_setup); void tpm_bios_log_teardown(struct dentry **lst) { @@ -415,5 +414,3 @@ void tpm_bios_log_teardown(struct dentry **lst) for (i = 0; i < 3; i++) securityfs_remove(lst[i]); } -EXPORT_SYMBOL_GPL(tpm_bios_log_teardown); -MODULE_LICENSE("GPL"); diff --git a/drivers/char/tpm/tpm_i2c_atmel.c b/drivers/char/tpm/tpm_i2c_atmel.c new file mode 100644 index 00000000000..c3cd7fe481a --- /dev/null +++ b/drivers/char/tpm/tpm_i2c_atmel.c @@ -0,0 +1,284 @@ +/* + * ATMEL I2C TPM AT97SC3204T + * + * Copyright (C) 2012 V Lab Technologies + * Teddy Reed <teddy@prosauce.org> + * Copyright (C) 2013, Obsidian Research Corp. + * Jason Gunthorpe <jgunthorpe@obsidianresearch.com> + * Device driver for ATMEL I2C TPMs. + * + * Teddy Reed determined the basic I2C command flow, unlike other I2C TPM + * devices the raw TCG formatted TPM command data is written via I2C and then + * raw TCG formatted TPM command data is returned via I2C. + * + * TGC status/locality/etc functions seen in the LPC implementation do not + * seem to be present. + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see http://www.gnu.org/licenses/>. + */ +#include <linux/init.h> +#include <linux/module.h> +#include <linux/moduleparam.h> +#include <linux/slab.h> +#include <linux/i2c.h> +#include "tpm.h" + +#define I2C_DRIVER_NAME "tpm_i2c_atmel" + +#define TPM_I2C_SHORT_TIMEOUT 750 /* ms */ +#define TPM_I2C_LONG_TIMEOUT 2000 /* 2 sec */ + +#define ATMEL_STS_OK 1 + +struct priv_data { + size_t len; + /* This is the amount we read on the first try. 25 was chosen to fit a + * fair number of read responses in the buffer so a 2nd retry can be + * avoided in small message cases. */ + u8 buffer[sizeof(struct tpm_output_header) + 25]; +}; + +static int i2c_atmel_send(struct tpm_chip *chip, u8 *buf, size_t len) +{ + struct priv_data *priv = chip->vendor.priv; + struct i2c_client *client = to_i2c_client(chip->dev); + s32 status; + + priv->len = 0; + + if (len <= 2) + return -EIO; + + status = i2c_master_send(client, buf, len); + + dev_dbg(chip->dev, + "%s(buf=%*ph len=%0zx) -> sts=%d\n", __func__, + (int)min_t(size_t, 64, len), buf, len, status); + return status; +} + +static int i2c_atmel_recv(struct tpm_chip *chip, u8 *buf, size_t count) +{ + struct priv_data *priv = chip->vendor.priv; + struct i2c_client *client = to_i2c_client(chip->dev); + struct tpm_output_header *hdr = + (struct tpm_output_header *)priv->buffer; + u32 expected_len; + int rc; + + if (priv->len == 0) + return -EIO; + + /* Get the message size from the message header, if we didn't get the + * whole message in read_status then we need to re-read the + * message. */ + expected_len = be32_to_cpu(hdr->length); + if (expected_len > count) + return -ENOMEM; + + if (priv->len >= expected_len) { + dev_dbg(chip->dev, + "%s early(buf=%*ph count=%0zx) -> ret=%d\n", __func__, + (int)min_t(size_t, 64, expected_len), buf, count, + expected_len); + memcpy(buf, priv->buffer, expected_len); + return expected_len; + } + + rc = i2c_master_recv(client, buf, expected_len); + dev_dbg(chip->dev, + "%s reread(buf=%*ph count=%0zx) -> ret=%d\n", __func__, + (int)min_t(size_t, 64, expected_len), buf, count, + expected_len); + return rc; +} + +static void i2c_atmel_cancel(struct tpm_chip *chip) +{ + dev_err(chip->dev, "TPM operation cancellation was requested, but is not supported"); +} + +static u8 i2c_atmel_read_status(struct tpm_chip *chip) +{ + struct priv_data *priv = chip->vendor.priv; + struct i2c_client *client = to_i2c_client(chip->dev); + int rc; + + /* The TPM fails the I2C read until it is ready, so we do the entire + * transfer here and buffer it locally. This way the common code can + * properly handle the timeouts. */ + priv->len = 0; + memset(priv->buffer, 0, sizeof(priv->buffer)); + + + /* Once the TPM has completed the command the command remains readable + * until another command is issued. */ + rc = i2c_master_recv(client, priv->buffer, sizeof(priv->buffer)); + dev_dbg(chip->dev, + "%s: sts=%d", __func__, rc); + if (rc <= 0) + return 0; + + priv->len = rc; + + return ATMEL_STS_OK; +} + +static const struct file_operations i2c_atmel_ops = { + .owner = THIS_MODULE, + .llseek = no_llseek, + .open = tpm_open, + .read = tpm_read, + .write = tpm_write, + .release = tpm_release, +}; + +static DEVICE_ATTR(pubek, S_IRUGO, tpm_show_pubek, NULL); +static DEVICE_ATTR(pcrs, S_IRUGO, tpm_show_pcrs, NULL); +static DEVICE_ATTR(enabled, S_IRUGO, tpm_show_enabled, NULL); +static DEVICE_ATTR(active, S_IRUGO, tpm_show_active, NULL); +static DEVICE_ATTR(owned, S_IRUGO, tpm_show_owned, NULL); +static DEVICE_ATTR(temp_deactivated, S_IRUGO, tpm_show_temp_deactivated, NULL); +static DEVICE_ATTR(caps, S_IRUGO, tpm_show_caps, NULL); +static DEVICE_ATTR(cancel, S_IWUSR | S_IWGRP, NULL, tpm_store_cancel); +static DEVICE_ATTR(durations, S_IRUGO, tpm_show_durations, NULL); +static DEVICE_ATTR(timeouts, S_IRUGO, tpm_show_timeouts, NULL); + +static struct attribute *i2c_atmel_attrs[] = { + &dev_attr_pubek.attr, + &dev_attr_pcrs.attr, + &dev_attr_enabled.attr, + &dev_attr_active.attr, + &dev_attr_owned.attr, + &dev_attr_temp_deactivated.attr, + &dev_attr_caps.attr, + &dev_attr_cancel.attr, + &dev_attr_durations.attr, + &dev_attr_timeouts.attr, + NULL, +}; + +static struct attribute_group i2c_atmel_attr_grp = { + .attrs = i2c_atmel_attrs +}; + +static bool i2c_atmel_req_canceled(struct tpm_chip *chip, u8 status) +{ + return 0; +} + +static const struct tpm_vendor_specific i2c_atmel = { + .status = i2c_atmel_read_status, + .recv = i2c_atmel_recv, + .send = i2c_atmel_send, + .cancel = i2c_atmel_cancel, + .req_complete_mask = ATMEL_STS_OK, + .req_complete_val = ATMEL_STS_OK, + .req_canceled = i2c_atmel_req_canceled, + .attr_group = &i2c_atmel_attr_grp, + .miscdev.fops = &i2c_atmel_ops, +}; + +static int i2c_atmel_probe(struct i2c_client *client, + const struct i2c_device_id *id) +{ + int rc; + struct tpm_chip *chip; + struct device *dev = &client->dev; + + if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) + return -ENODEV; + + chip = tpm_register_hardware(dev, &i2c_atmel); + if (!chip) { + dev_err(dev, "%s() error in tpm_register_hardware\n", __func__); + return -ENODEV; + } + + chip->vendor.priv = devm_kzalloc(dev, sizeof(struct priv_data), + GFP_KERNEL); + + /* Default timeouts */ + chip->vendor.timeout_a = msecs_to_jiffies(TPM_I2C_SHORT_TIMEOUT); + chip->vendor.timeout_b = msecs_to_jiffies(TPM_I2C_LONG_TIMEOUT); + chip->vendor.timeout_c = msecs_to_jiffies(TPM_I2C_SHORT_TIMEOUT); + chip->vendor.timeout_d = msecs_to_jiffies(TPM_I2C_SHORT_TIMEOUT); + chip->vendor.irq = 0; + + /* There is no known way to probe for this device, and all version + * information seems to be read via TPM commands. Thus we rely on the + * TPM startup process in the common code to detect the device. */ + if (tpm_get_timeouts(chip)) { + rc = -ENODEV; + goto out_err; + } + + if (tpm_do_selftest(chip)) { + rc = -ENODEV; + goto out_err; + } + + return 0; + +out_err: + tpm_dev_vendor_release(chip); + tpm_remove_hardware(chip->dev); + return rc; +} + +static int i2c_atmel_remove(struct i2c_client *client) +{ + struct device *dev = &(client->dev); + struct tpm_chip *chip = dev_get_drvdata(dev); + + if (chip) + tpm_dev_vendor_release(chip); + tpm_remove_hardware(dev); + kfree(chip); + return 0; +} + +static const struct i2c_device_id i2c_atmel_id[] = { + {I2C_DRIVER_NAME, 0}, + {} +}; +MODULE_DEVICE_TABLE(i2c, i2c_atmel_id); + +#ifdef CONFIG_OF +static const struct of_device_id i2c_atmel_of_match[] = { + {.compatible = "atmel,at97sc3204t"}, + {}, +}; +MODULE_DEVICE_TABLE(of, i2c_atmel_of_match); +#endif + +static SIMPLE_DEV_PM_OPS(i2c_atmel_pm_ops, tpm_pm_suspend, tpm_pm_resume); + +static struct i2c_driver i2c_atmel_driver = { + .id_table = i2c_atmel_id, + .probe = i2c_atmel_probe, + .remove = i2c_atmel_remove, + .driver = { + .name = I2C_DRIVER_NAME, + .owner = THIS_MODULE, + .pm = &i2c_atmel_pm_ops, + .of_match_table = of_match_ptr(i2c_atmel_of_match), + }, +}; + +module_i2c_driver(i2c_atmel_driver); + +MODULE_AUTHOR("Jason Gunthorpe <jgunthorpe@obsidianresearch.com>"); +MODULE_DESCRIPTION("Atmel TPM I2C Driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/char/tpm/tpm_i2c_infineon.c b/drivers/char/tpm/tpm_i2c_infineon.c index b8735de8ce9..fefd2aa5c81 100644 --- a/drivers/char/tpm/tpm_i2c_infineon.c +++ b/drivers/char/tpm/tpm_i2c_infineon.c @@ -581,7 +581,7 @@ static DEVICE_ATTR(enabled, S_IRUGO, tpm_show_enabled, NULL); static DEVICE_ATTR(active, S_IRUGO, tpm_show_active, NULL); static DEVICE_ATTR(owned, S_IRUGO, tpm_show_owned, NULL); static DEVICE_ATTR(temp_deactivated, S_IRUGO, tpm_show_temp_deactivated, NULL); -static DEVICE_ATTR(caps, S_IRUGO, tpm_show_caps_1_2, NULL); +static DEVICE_ATTR(caps, S_IRUGO, tpm_show_caps, NULL); static DEVICE_ATTR(cancel, S_IWUSR | S_IWGRP, NULL, tpm_store_cancel); static DEVICE_ATTR(durations, S_IRUGO, tpm_show_durations, NULL); static DEVICE_ATTR(timeouts, S_IRUGO, tpm_show_timeouts, NULL); @@ -685,7 +685,6 @@ out_vendor: chip->dev->release = NULL; chip->release = NULL; tpm_dev.client = NULL; - dev_set_drvdata(chip->dev, chip); out_err: return rc; } @@ -766,7 +765,6 @@ static int tpm_tis_i2c_remove(struct i2c_client *client) chip->dev->release = NULL; chip->release = NULL; tpm_dev.client = NULL; - dev_set_drvdata(chip->dev, chip); return 0; } diff --git a/drivers/char/tpm/tpm_i2c_nuvoton.c b/drivers/char/tpm/tpm_i2c_nuvoton.c new file mode 100644 index 00000000000..6276fea01ff --- /dev/null +++ b/drivers/char/tpm/tpm_i2c_nuvoton.c @@ -0,0 +1,710 @@ +/****************************************************************************** + * Nuvoton TPM I2C Device Driver Interface for WPCT301/NPCT501, + * based on the TCG TPM Interface Spec version 1.2. + * Specifications at www.trustedcomputinggroup.org + * + * Copyright (C) 2011, Nuvoton Technology Corporation. + * Dan Morav <dan.morav@nuvoton.com> + * Copyright (C) 2013, Obsidian Research Corp. + * Jason Gunthorpe <jgunthorpe@obsidianresearch.com> + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see http://www.gnu.org/licenses/>. + * + * Nuvoton contact information: APC.Support@nuvoton.com + *****************************************************************************/ + +#include <linux/init.h> +#include <linux/module.h> +#include <linux/moduleparam.h> +#include <linux/slab.h> +#include <linux/interrupt.h> +#include <linux/wait.h> +#include <linux/i2c.h> +#include "tpm.h" + +/* I2C interface offsets */ +#define TPM_STS 0x00 +#define TPM_BURST_COUNT 0x01 +#define TPM_DATA_FIFO_W 0x20 +#define TPM_DATA_FIFO_R 0x40 +#define TPM_VID_DID_RID 0x60 +/* TPM command header size */ +#define TPM_HEADER_SIZE 10 +#define TPM_RETRY 5 +/* + * I2C bus device maximum buffer size w/o counting I2C address or command + * i.e. max size required for I2C write is 34 = addr, command, 32 bytes data + */ +#define TPM_I2C_MAX_BUF_SIZE 32 +#define TPM_I2C_RETRY_COUNT 32 +#define TPM_I2C_BUS_DELAY 1 /* msec */ +#define TPM_I2C_RETRY_DELAY_SHORT 2 /* msec */ +#define TPM_I2C_RETRY_DELAY_LONG 10 /* msec */ + +#define I2C_DRIVER_NAME "tpm_i2c_nuvoton" + +struct priv_data { + unsigned int intrs; +}; + +static s32 i2c_nuvoton_read_buf(struct i2c_client *client, u8 offset, u8 size, + u8 *data) +{ + s32 status; + + status = i2c_smbus_read_i2c_block_data(client, offset, size, data); + dev_dbg(&client->dev, + "%s(offset=%u size=%u data=%*ph) -> sts=%d\n", __func__, + offset, size, (int)size, data, status); + return status; +} + +static s32 i2c_nuvoton_write_buf(struct i2c_client *client, u8 offset, u8 size, + u8 *data) +{ + s32 status; + + status = i2c_smbus_write_i2c_block_data(client, offset, size, data); + dev_dbg(&client->dev, + "%s(offset=%u size=%u data=%*ph) -> sts=%d\n", __func__, + offset, size, (int)size, data, status); + return status; +} + +#define TPM_STS_VALID 0x80 +#define TPM_STS_COMMAND_READY 0x40 +#define TPM_STS_GO 0x20 +#define TPM_STS_DATA_AVAIL 0x10 +#define TPM_STS_EXPECT 0x08 +#define TPM_STS_RESPONSE_RETRY 0x02 +#define TPM_STS_ERR_VAL 0x07 /* bit2...bit0 reads always 0 */ + +#define TPM_I2C_SHORT_TIMEOUT 750 /* ms */ +#define TPM_I2C_LONG_TIMEOUT 2000 /* 2 sec */ + +/* read TPM_STS register */ +static u8 i2c_nuvoton_read_status(struct tpm_chip *chip) +{ + struct i2c_client *client = to_i2c_client(chip->dev); + s32 status; + u8 data; + + status = i2c_nuvoton_read_buf(client, TPM_STS, 1, &data); + if (status <= 0) { + dev_err(chip->dev, "%s() error return %d\n", __func__, + status); + data = TPM_STS_ERR_VAL; + } + + return data; +} + +/* write byte to TPM_STS register */ +static s32 i2c_nuvoton_write_status(struct i2c_client *client, u8 data) +{ + s32 status; + int i; + + /* this causes the current command to be aborted */ + for (i = 0, status = -1; i < TPM_I2C_RETRY_COUNT && status < 0; i++) { + status = i2c_nuvoton_write_buf(client, TPM_STS, 1, &data); + msleep(TPM_I2C_BUS_DELAY); + } + return status; +} + +/* write commandReady to TPM_STS register */ +static void i2c_nuvoton_ready(struct tpm_chip *chip) +{ + struct i2c_client *client = to_i2c_client(chip->dev); + s32 status; + + /* this causes the current command to be aborted */ + status = i2c_nuvoton_write_status(client, TPM_STS_COMMAND_READY); + if (status < 0) + dev_err(chip->dev, + "%s() fail to write TPM_STS.commandReady\n", __func__); +} + +/* read burstCount field from TPM_STS register + * return -1 on fail to read */ +static int i2c_nuvoton_get_burstcount(struct i2c_client *client, + struct tpm_chip *chip) +{ + unsigned long stop = jiffies + chip->vendor.timeout_d; + s32 status; + int burst_count = -1; + u8 data; + + /* wait for burstcount to be non-zero */ + do { + /* in I2C burstCount is 1 byte */ + status = i2c_nuvoton_read_buf(client, TPM_BURST_COUNT, 1, + &data); + if (status > 0 && data > 0) { + burst_count = min_t(u8, TPM_I2C_MAX_BUF_SIZE, data); + break; + } + msleep(TPM_I2C_BUS_DELAY); + } while (time_before(jiffies, stop)); + + return burst_count; +} + +/* + * WPCT301/NPCT501 SINT# supports only dataAvail + * any call to this function which is not waiting for dataAvail will + * set queue to NULL to avoid waiting for interrupt + */ +static bool i2c_nuvoton_check_status(struct tpm_chip *chip, u8 mask, u8 value) +{ + u8 status = i2c_nuvoton_read_status(chip); + return (status != TPM_STS_ERR_VAL) && ((status & mask) == value); +} + +static int i2c_nuvoton_wait_for_stat(struct tpm_chip *chip, u8 mask, u8 value, + u32 timeout, wait_queue_head_t *queue) +{ + if (chip->vendor.irq && queue) { + s32 rc; + DEFINE_WAIT(wait); + struct priv_data *priv = chip->vendor.priv; + unsigned int cur_intrs = priv->intrs; + + enable_irq(chip->vendor.irq); + rc = wait_event_interruptible_timeout(*queue, + cur_intrs != priv->intrs, + timeout); + if (rc > 0) + return 0; + /* At this point we know that the SINT pin is asserted, so we + * do not need to do i2c_nuvoton_check_status */ + } else { + unsigned long ten_msec, stop; + bool status_valid; + + /* check current status */ + status_valid = i2c_nuvoton_check_status(chip, mask, value); + if (status_valid) + return 0; + + /* use polling to wait for the event */ + ten_msec = jiffies + msecs_to_jiffies(TPM_I2C_RETRY_DELAY_LONG); + stop = jiffies + timeout; + do { + if (time_before(jiffies, ten_msec)) + msleep(TPM_I2C_RETRY_DELAY_SHORT); + else + msleep(TPM_I2C_RETRY_DELAY_LONG); + status_valid = i2c_nuvoton_check_status(chip, mask, + value); + if (status_valid) + return 0; + } while (time_before(jiffies, stop)); + } + dev_err(chip->dev, "%s(%02x, %02x) -> timeout\n", __func__, mask, + value); + return -ETIMEDOUT; +} + +/* wait for dataAvail field to be set in the TPM_STS register */ +static int i2c_nuvoton_wait_for_data_avail(struct tpm_chip *chip, u32 timeout, + wait_queue_head_t *queue) +{ + return i2c_nuvoton_wait_for_stat(chip, + TPM_STS_DATA_AVAIL | TPM_STS_VALID, + TPM_STS_DATA_AVAIL | TPM_STS_VALID, + timeout, queue); +} + +/* Read @count bytes into @buf from TPM_RD_FIFO register */ +static int i2c_nuvoton_recv_data(struct i2c_client *client, + struct tpm_chip *chip, u8 *buf, size_t count) +{ + s32 rc; + int burst_count, bytes2read, size = 0; + + while (size < count && + i2c_nuvoton_wait_for_data_avail(chip, + chip->vendor.timeout_c, + &chip->vendor.read_queue) == 0) { + burst_count = i2c_nuvoton_get_burstcount(client, chip); + if (burst_count < 0) { + dev_err(chip->dev, + "%s() fail to read burstCount=%d\n", __func__, + burst_count); + return -EIO; + } + bytes2read = min_t(size_t, burst_count, count - size); + rc = i2c_nuvoton_read_buf(client, TPM_DATA_FIFO_R, + bytes2read, &buf[size]); + if (rc < 0) { + dev_err(chip->dev, + "%s() fail on i2c_nuvoton_read_buf()=%d\n", + __func__, rc); + return -EIO; + } + dev_dbg(chip->dev, "%s(%d):", __func__, bytes2read); + size += bytes2read; + } + + return size; +} + +/* Read TPM command results */ +static int i2c_nuvoton_recv(struct tpm_chip *chip, u8 *buf, size_t count) +{ + struct device *dev = chip->dev; + struct i2c_client *client = to_i2c_client(dev); + s32 rc; + int expected, status, burst_count, retries, size = 0; + + if (count < TPM_HEADER_SIZE) { + i2c_nuvoton_ready(chip); /* return to idle */ + dev_err(dev, "%s() count < header size\n", __func__); + return -EIO; + } + for (retries = 0; retries < TPM_RETRY; retries++) { + if (retries > 0) { + /* if this is not the first trial, set responseRetry */ + i2c_nuvoton_write_status(client, + TPM_STS_RESPONSE_RETRY); + } + /* + * read first available (> 10 bytes), including: + * tag, paramsize, and result + */ + status = i2c_nuvoton_wait_for_data_avail( + chip, chip->vendor.timeout_c, &chip->vendor.read_queue); + if (status != 0) { + dev_err(dev, "%s() timeout on dataAvail\n", __func__); + size = -ETIMEDOUT; + continue; + } + burst_count = i2c_nuvoton_get_burstcount(client, chip); + if (burst_count < 0) { + dev_err(dev, "%s() fail to get burstCount\n", __func__); + size = -EIO; + continue; + } + size = i2c_nuvoton_recv_data(client, chip, buf, + burst_count); + if (size < TPM_HEADER_SIZE) { + dev_err(dev, "%s() fail to read header\n", __func__); + size = -EIO; + continue; + } + /* + * convert number of expected bytes field from big endian 32 bit + * to machine native + */ + expected = be32_to_cpu(*(__be32 *) (buf + 2)); + if (expected > count) { + dev_err(dev, "%s() expected > count\n", __func__); + size = -EIO; + continue; + } + rc = i2c_nuvoton_recv_data(client, chip, &buf[size], + expected - size); + size += rc; + if (rc < 0 || size < expected) { + dev_err(dev, "%s() fail to read remainder of result\n", + __func__); + size = -EIO; + continue; + } + if (i2c_nuvoton_wait_for_stat( + chip, TPM_STS_VALID | TPM_STS_DATA_AVAIL, + TPM_STS_VALID, chip->vendor.timeout_c, + NULL)) { + dev_err(dev, "%s() error left over data\n", __func__); + size = -ETIMEDOUT; + continue; + } + break; + } + i2c_nuvoton_ready(chip); + dev_dbg(chip->dev, "%s() -> %d\n", __func__, size); + return size; +} + +/* + * Send TPM command. + * + * If interrupts are used (signaled by an irq set in the vendor structure) + * tpm.c can skip polling for the data to be available as the interrupt is + * waited for here + */ +static int i2c_nuvoton_send(struct tpm_chip *chip, u8 *buf, size_t len) +{ + struct device *dev = chip->dev; + struct i2c_client *client = to_i2c_client(dev); + u32 ordinal; + size_t count = 0; + int burst_count, bytes2write, retries, rc = -EIO; + + for (retries = 0; retries < TPM_RETRY; retries++) { + i2c_nuvoton_ready(chip); + if (i2c_nuvoton_wait_for_stat(chip, TPM_STS_COMMAND_READY, + TPM_STS_COMMAND_READY, + chip->vendor.timeout_b, NULL)) { + dev_err(dev, "%s() timeout on commandReady\n", + __func__); + rc = -EIO; + continue; + } + rc = 0; + while (count < len - 1) { + burst_count = i2c_nuvoton_get_burstcount(client, + chip); + if (burst_count < 0) { + dev_err(dev, "%s() fail get burstCount\n", + __func__); + rc = -EIO; + break; + } + bytes2write = min_t(size_t, burst_count, + len - 1 - count); + rc = i2c_nuvoton_write_buf(client, TPM_DATA_FIFO_W, + bytes2write, &buf[count]); + if (rc < 0) { + dev_err(dev, "%s() fail i2cWriteBuf\n", + __func__); + break; + } + dev_dbg(dev, "%s(%d):", __func__, bytes2write); + count += bytes2write; + rc = i2c_nuvoton_wait_for_stat(chip, + TPM_STS_VALID | + TPM_STS_EXPECT, + TPM_STS_VALID | + TPM_STS_EXPECT, + chip->vendor.timeout_c, + NULL); + if (rc < 0) { + dev_err(dev, "%s() timeout on Expect\n", + __func__); + rc = -ETIMEDOUT; + break; + } + } + if (rc < 0) + continue; + + /* write last byte */ + rc = i2c_nuvoton_write_buf(client, TPM_DATA_FIFO_W, 1, + &buf[count]); + if (rc < 0) { + dev_err(dev, "%s() fail to write last byte\n", + __func__); + rc = -EIO; + continue; + } + dev_dbg(dev, "%s(last): %02x", __func__, buf[count]); + rc = i2c_nuvoton_wait_for_stat(chip, + TPM_STS_VALID | TPM_STS_EXPECT, + TPM_STS_VALID, + chip->vendor.timeout_c, NULL); + if (rc) { + dev_err(dev, "%s() timeout on Expect to clear\n", + __func__); + rc = -ETIMEDOUT; + continue; + } + break; + } + if (rc < 0) { + /* retries == TPM_RETRY */ + i2c_nuvoton_ready(chip); + return rc; + } + /* execute the TPM command */ + rc = i2c_nuvoton_write_status(client, TPM_STS_GO); + if (rc < 0) { + dev_err(dev, "%s() fail to write Go\n", __func__); + i2c_nuvoton_ready(chip); + return rc; + } + ordinal = be32_to_cpu(*((__be32 *) (buf + 6))); + rc = i2c_nuvoton_wait_for_data_avail(chip, + tpm_calc_ordinal_duration(chip, + ordinal), + &chip->vendor.read_queue); + if (rc) { + dev_err(dev, "%s() timeout command duration\n", __func__); + i2c_nuvoton_ready(chip); + return rc; + } + + dev_dbg(dev, "%s() -> %zd\n", __func__, len); + return len; +} + +static bool i2c_nuvoton_req_canceled(struct tpm_chip *chip, u8 status) +{ + return (status == TPM_STS_COMMAND_READY); +} + +static const struct file_operations i2c_nuvoton_ops = { + .owner = THIS_MODULE, + .llseek = no_llseek, + .open = tpm_open, + .read = tpm_read, + .write = tpm_write, + .release = tpm_release, +}; + +static DEVICE_ATTR(pubek, S_IRUGO, tpm_show_pubek, NULL); +static DEVICE_ATTR(pcrs, S_IRUGO, tpm_show_pcrs, NULL); +static DEVICE_ATTR(enabled, S_IRUGO, tpm_show_enabled, NULL); +static DEVICE_ATTR(active, S_IRUGO, tpm_show_active, NULL); +static DEVICE_ATTR(owned, S_IRUGO, tpm_show_owned, NULL); +static DEVICE_ATTR(temp_deactivated, S_IRUGO, tpm_show_temp_deactivated, NULL); +static DEVICE_ATTR(caps, S_IRUGO, tpm_show_caps, NULL); +static DEVICE_ATTR(cancel, S_IWUSR | S_IWGRP, NULL, tpm_store_cancel); +static DEVICE_ATTR(durations, S_IRUGO, tpm_show_durations, NULL); +static DEVICE_ATTR(timeouts, S_IRUGO, tpm_show_timeouts, NULL); + +static struct attribute *i2c_nuvoton_attrs[] = { + &dev_attr_pubek.attr, + &dev_attr_pcrs.attr, + &dev_attr_enabled.attr, + &dev_attr_active.attr, + &dev_attr_owned.attr, + &dev_attr_temp_deactivated.attr, + &dev_attr_caps.attr, + &dev_attr_cancel.attr, + &dev_attr_durations.attr, + &dev_attr_timeouts.attr, + NULL, +}; + +static struct attribute_group i2c_nuvoton_attr_grp = { + .attrs = i2c_nuvoton_attrs +}; + +static const struct tpm_vendor_specific tpm_i2c = { + .status = i2c_nuvoton_read_status, + .recv = i2c_nuvoton_recv, + .send = i2c_nuvoton_send, + .cancel = i2c_nuvoton_ready, + .req_complete_mask = TPM_STS_DATA_AVAIL | TPM_STS_VALID, + .req_complete_val = TPM_STS_DATA_AVAIL | TPM_STS_VALID, + .req_canceled = i2c_nuvoton_req_canceled, + .attr_group = &i2c_nuvoton_attr_grp, + .miscdev.fops = &i2c_nuvoton_ops, +}; + +/* The only purpose for the handler is to signal to any waiting threads that + * the interrupt is currently being asserted. The driver does not do any + * processing triggered by interrupts, and the chip provides no way to mask at + * the source (plus that would be slow over I2C). Run the IRQ as a one-shot, + * this means it cannot be shared. */ +static irqreturn_t i2c_nuvoton_int_handler(int dummy, void *dev_id) +{ + struct tpm_chip *chip = dev_id; + struct priv_data *priv = chip->vendor.priv; + + priv->intrs++; + wake_up(&chip->vendor.read_queue); + disable_irq_nosync(chip->vendor.irq); + return IRQ_HANDLED; +} + +static int get_vid(struct i2c_client *client, u32 *res) +{ + static const u8 vid_did_rid_value[] = { 0x50, 0x10, 0xfe }; + u32 temp; + s32 rc; + + if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_BYTE_DATA)) + return -ENODEV; + rc = i2c_nuvoton_read_buf(client, TPM_VID_DID_RID, 4, (u8 *)&temp); + if (rc < 0) + return rc; + + /* check WPCT301 values - ignore RID */ + if (memcmp(&temp, vid_did_rid_value, sizeof(vid_did_rid_value))) { + /* + * f/w rev 2.81 has an issue where the VID_DID_RID is not + * reporting the right value. so give it another chance at + * offset 0x20 (FIFO_W). + */ + rc = i2c_nuvoton_read_buf(client, TPM_DATA_FIFO_W, 4, + (u8 *) (&temp)); + if (rc < 0) + return rc; + + /* check WPCT301 values - ignore RID */ + if (memcmp(&temp, vid_did_rid_value, + sizeof(vid_did_rid_value))) + return -ENODEV; + } + + *res = temp; + return 0; +} + +static int i2c_nuvoton_probe(struct i2c_client *client, + const struct i2c_device_id *id) +{ + int rc; + struct tpm_chip *chip; + struct device *dev = &client->dev; + u32 vid = 0; + + rc = get_vid(client, &vid); + if (rc) + return rc; + + dev_info(dev, "VID: %04X DID: %02X RID: %02X\n", (u16) vid, + (u8) (vid >> 16), (u8) (vid >> 24)); + + chip = tpm_register_hardware(dev, &tpm_i2c); + if (!chip) { + dev_err(dev, "%s() error in tpm_register_hardware\n", __func__); + return -ENODEV; + } + + chip->vendor.priv = devm_kzalloc(dev, sizeof(struct priv_data), + GFP_KERNEL); + init_waitqueue_head(&chip->vendor.read_queue); + init_waitqueue_head(&chip->vendor.int_queue); + + /* Default timeouts */ + chip->vendor.timeout_a = msecs_to_jiffies(TPM_I2C_SHORT_TIMEOUT); + chip->vendor.timeout_b = msecs_to_jiffies(TPM_I2C_LONG_TIMEOUT); + chip->vendor.timeout_c = msecs_to_jiffies(TPM_I2C_SHORT_TIMEOUT); + chip->vendor.timeout_d = msecs_to_jiffies(TPM_I2C_SHORT_TIMEOUT); + + /* + * I2C intfcaps (interrupt capabilitieis) in the chip are hard coded to: + * TPM_INTF_INT_LEVEL_LOW | TPM_INTF_DATA_AVAIL_INT + * The IRQ should be set in the i2c_board_info (which is done + * automatically in of_i2c_register_devices, for device tree users */ + chip->vendor.irq = client->irq; + + if (chip->vendor.irq) { + dev_dbg(dev, "%s() chip-vendor.irq\n", __func__); + rc = devm_request_irq(dev, chip->vendor.irq, + i2c_nuvoton_int_handler, + IRQF_TRIGGER_LOW, + chip->vendor.miscdev.name, + chip); + if (rc) { + dev_err(dev, "%s() Unable to request irq: %d for use\n", + __func__, chip->vendor.irq); + chip->vendor.irq = 0; + } else { + /* Clear any pending interrupt */ + i2c_nuvoton_ready(chip); + /* - wait for TPM_STS==0xA0 (stsValid, commandReady) */ + rc = i2c_nuvoton_wait_for_stat(chip, + TPM_STS_COMMAND_READY, + TPM_STS_COMMAND_READY, + chip->vendor.timeout_b, + NULL); + if (rc == 0) { + /* + * TIS is in ready state + * write dummy byte to enter reception state + * TPM_DATA_FIFO_W <- rc (0) + */ + rc = i2c_nuvoton_write_buf(client, + TPM_DATA_FIFO_W, + 1, (u8 *) (&rc)); + if (rc < 0) + goto out_err; + /* TPM_STS <- 0x40 (commandReady) */ + i2c_nuvoton_ready(chip); + } else { + /* + * timeout_b reached - command was + * aborted. TIS should now be in idle state - + * only TPM_STS_VALID should be set + */ + if (i2c_nuvoton_read_status(chip) != + TPM_STS_VALID) { + rc = -EIO; + goto out_err; + } + } + } + } + + if (tpm_get_timeouts(chip)) { + rc = -ENODEV; + goto out_err; + } + + if (tpm_do_selftest(chip)) { + rc = -ENODEV; + goto out_err; + } + + return 0; + +out_err: + tpm_dev_vendor_release(chip); + tpm_remove_hardware(chip->dev); + return rc; +} + +static int i2c_nuvoton_remove(struct i2c_client *client) +{ + struct device *dev = &(client->dev); + struct tpm_chip *chip = dev_get_drvdata(dev); + + if (chip) + tpm_dev_vendor_release(chip); + tpm_remove_hardware(dev); + kfree(chip); + return 0; +} + + +static const struct i2c_device_id i2c_nuvoton_id[] = { + {I2C_DRIVER_NAME, 0}, + {} +}; +MODULE_DEVICE_TABLE(i2c, i2c_nuvoton_id); + +#ifdef CONFIG_OF +static const struct of_device_id i2c_nuvoton_of_match[] = { + {.compatible = "nuvoton,npct501"}, + {.compatible = "winbond,wpct301"}, + {}, +}; +MODULE_DEVICE_TABLE(of, i2c_nuvoton_of_match); +#endif + +static SIMPLE_DEV_PM_OPS(i2c_nuvoton_pm_ops, tpm_pm_suspend, tpm_pm_resume); + +static struct i2c_driver i2c_nuvoton_driver = { + .id_table = i2c_nuvoton_id, + .probe = i2c_nuvoton_probe, + .remove = i2c_nuvoton_remove, + .driver = { + .name = I2C_DRIVER_NAME, + .owner = THIS_MODULE, + .pm = &i2c_nuvoton_pm_ops, + .of_match_table = of_match_ptr(i2c_nuvoton_of_match), + }, +}; + +module_i2c_driver(i2c_nuvoton_driver); + +MODULE_AUTHOR("Dan Morav (dan.morav@nuvoton.com)"); +MODULE_DESCRIPTION("Nuvoton TPM I2C Driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/char/tpm/tpm_i2c_stm_st33.c b/drivers/char/tpm/tpm_i2c_stm_st33.c index 5bb8e2ddd3b..a0d6ceb5d00 100644 --- a/drivers/char/tpm/tpm_i2c_stm_st33.c +++ b/drivers/char/tpm/tpm_i2c_stm_st33.c @@ -584,7 +584,7 @@ static DEVICE_ATTR(enabled, S_IRUGO, tpm_show_enabled, NULL); static DEVICE_ATTR(active, S_IRUGO, tpm_show_active, NULL); static DEVICE_ATTR(owned, S_IRUGO, tpm_show_owned, NULL); static DEVICE_ATTR(temp_deactivated, S_IRUGO, tpm_show_temp_deactivated, NULL); -static DEVICE_ATTR(caps, S_IRUGO, tpm_show_caps_1_2, NULL); +static DEVICE_ATTR(caps, S_IRUGO, tpm_show_caps, NULL); static DEVICE_ATTR(cancel, S_IWUSR | S_IWGRP, NULL, tpm_store_cancel); static struct attribute *stm_tpm_attrs[] = { @@ -746,8 +746,6 @@ tpm_st33_i2c_probe(struct i2c_client *client, const struct i2c_device_id *id) tpm_get_timeouts(chip); - i2c_set_clientdata(client, chip); - dev_info(chip->dev, "TPM I2C Initialized\n"); return 0; _irq_set: @@ -807,24 +805,18 @@ static int tpm_st33_i2c_remove(struct i2c_client *client) #ifdef CONFIG_PM_SLEEP /* * tpm_st33_i2c_pm_suspend suspend the TPM device - * Added: Work around when suspend and no tpm application is running, suspend - * may fail because chip->data_buffer is not set (only set in tpm_open in Linux - * TPM core) * @param: client, the i2c_client drescription (TPM I2C description). * @param: mesg, the power management message. * @return: 0 in case of success. */ static int tpm_st33_i2c_pm_suspend(struct device *dev) { - struct tpm_chip *chip = dev_get_drvdata(dev); struct st33zp24_platform_data *pin_infos = dev->platform_data; int ret = 0; if (power_mgt) { gpio_set_value(pin_infos->io_lpcpd, 0); } else { - if (chip->data_buffer == NULL) - chip->data_buffer = pin_infos->tpm_i2c_buffer[0]; ret = tpm_pm_suspend(dev); } return ret; @@ -849,8 +841,6 @@ static int tpm_st33_i2c_pm_resume(struct device *dev) TPM_STS_VALID) == TPM_STS_VALID, chip->vendor.timeout_b); } else { - if (chip->data_buffer == NULL) - chip->data_buffer = pin_infos->tpm_i2c_buffer[0]; ret = tpm_pm_resume(dev); if (!ret) tpm_do_selftest(chip); diff --git a/drivers/char/tpm/tpm_ibmvtpm.c b/drivers/char/tpm/tpm_ibmvtpm.c index 56b07c35a13..2783a42aa73 100644 --- a/drivers/char/tpm/tpm_ibmvtpm.c +++ b/drivers/char/tpm/tpm_ibmvtpm.c @@ -98,7 +98,7 @@ static int tpm_ibmvtpm_recv(struct tpm_chip *chip, u8 *buf, size_t count) if (count < len) { dev_err(ibmvtpm->dev, - "Invalid size in recv: count=%ld, crq_size=%d\n", + "Invalid size in recv: count=%zd, crq_size=%d\n", count, len); return -EIO; } @@ -136,7 +136,7 @@ static int tpm_ibmvtpm_send(struct tpm_chip *chip, u8 *buf, size_t count) if (count > ibmvtpm->rtce_size) { dev_err(ibmvtpm->dev, - "Invalid size in send: count=%ld, rtce_size=%d\n", + "Invalid size in send: count=%zd, rtce_size=%d\n", count, ibmvtpm->rtce_size); return -EIO; } @@ -419,7 +419,7 @@ static DEVICE_ATTR(active, S_IRUGO, tpm_show_active, NULL); static DEVICE_ATTR(owned, S_IRUGO, tpm_show_owned, NULL); static DEVICE_ATTR(temp_deactivated, S_IRUGO, tpm_show_temp_deactivated, NULL); -static DEVICE_ATTR(caps, S_IRUGO, tpm_show_caps_1_2, NULL); +static DEVICE_ATTR(caps, S_IRUGO, tpm_show_caps, NULL); static DEVICE_ATTR(cancel, S_IWUSR | S_IWGRP, NULL, tpm_store_cancel); static DEVICE_ATTR(durations, S_IRUGO, tpm_show_durations, NULL); static DEVICE_ATTR(timeouts, S_IRUGO, tpm_show_timeouts, NULL); diff --git a/drivers/char/tpm/tpm_ppi.c b/drivers/char/tpm/tpm_ppi.c index 2168d15bc72..8e562dc6560 100644 --- a/drivers/char/tpm/tpm_ppi.c +++ b/drivers/char/tpm/tpm_ppi.c @@ -452,12 +452,8 @@ int tpm_add_ppi(struct kobject *parent) { return sysfs_create_group(parent, &ppi_attr_grp); } -EXPORT_SYMBOL_GPL(tpm_add_ppi); void tpm_remove_ppi(struct kobject *parent) { sysfs_remove_group(parent, &ppi_attr_grp); } -EXPORT_SYMBOL_GPL(tpm_remove_ppi); - -MODULE_LICENSE("GPL"); diff --git a/drivers/char/tpm/tpm_tis.c b/drivers/char/tpm/tpm_tis.c index 5796d0157ce..1b74459c072 100644 --- a/drivers/char/tpm/tpm_tis.c +++ b/drivers/char/tpm/tpm_tis.c @@ -448,7 +448,7 @@ static DEVICE_ATTR(active, S_IRUGO, tpm_show_active, NULL); static DEVICE_ATTR(owned, S_IRUGO, tpm_show_owned, NULL); static DEVICE_ATTR(temp_deactivated, S_IRUGO, tpm_show_temp_deactivated, NULL); -static DEVICE_ATTR(caps, S_IRUGO, tpm_show_caps_1_2, NULL); +static DEVICE_ATTR(caps, S_IRUGO, tpm_show_caps, NULL); static DEVICE_ATTR(cancel, S_IWUSR | S_IWGRP, NULL, tpm_store_cancel); static DEVICE_ATTR(durations, S_IRUGO, tpm_show_durations, NULL); static DEVICE_ATTR(timeouts, S_IRUGO, tpm_show_timeouts, NULL); diff --git a/drivers/char/tpm/xen-tpmfront.c b/drivers/char/tpm/xen-tpmfront.c index 94c280d36e8..c8ff4df8177 100644 --- a/drivers/char/tpm/xen-tpmfront.c +++ b/drivers/char/tpm/xen-tpmfront.c @@ -351,8 +351,6 @@ static int tpmfront_probe(struct xenbus_device *dev, tpm_get_timeouts(priv->chip); - dev_set_drvdata(&dev->dev, priv->chip); - return rv; } diff --git a/drivers/connector/cn_proc.c b/drivers/connector/cn_proc.c index c73fc2b74de..18c5b9b1664 100644 --- a/drivers/connector/cn_proc.c +++ b/drivers/connector/cn_proc.c @@ -32,11 +32,23 @@ #include <linux/atomic.h> #include <linux/pid_namespace.h> -#include <asm/unaligned.h> - #include <linux/cn_proc.h> -#define CN_PROC_MSG_SIZE (sizeof(struct cn_msg) + sizeof(struct proc_event)) +/* + * Size of a cn_msg followed by a proc_event structure. Since the + * sizeof struct cn_msg is a multiple of 4 bytes, but not 8 bytes, we + * add one 4-byte word to the size here, and then start the actual + * cn_msg structure 4 bytes into the stack buffer. The result is that + * the immediately following proc_event structure is aligned to 8 bytes. + */ +#define CN_PROC_MSG_SIZE (sizeof(struct cn_msg) + sizeof(struct proc_event) + 4) + +/* See comment above; we test our assumption about sizeof struct cn_msg here. */ +static inline struct cn_msg *buffer_to_cn_msg(__u8 *buffer) +{ + BUILD_BUG_ON(sizeof(struct cn_msg) != 20); + return (struct cn_msg *)(buffer + 4); +} static atomic_t proc_event_num_listeners = ATOMIC_INIT(0); static struct cb_id cn_proc_event_id = { CN_IDX_PROC, CN_VAL_PROC }; @@ -56,19 +68,19 @@ void proc_fork_connector(struct task_struct *task) { struct cn_msg *msg; struct proc_event *ev; - __u8 buffer[CN_PROC_MSG_SIZE]; + __u8 buffer[CN_PROC_MSG_SIZE] __aligned(8); struct timespec ts; struct task_struct *parent; if (atomic_read(&proc_event_num_listeners) < 1) return; - msg = (struct cn_msg *)buffer; + msg = buffer_to_cn_msg(buffer); ev = (struct proc_event *)msg->data; memset(&ev->event_data, 0, sizeof(ev->event_data)); get_seq(&msg->seq, &ev->cpu); ktime_get_ts(&ts); /* get high res monotonic timestamp */ - put_unaligned(timespec_to_ns(&ts), (__u64 *)&ev->timestamp_ns); + ev->timestamp_ns = timespec_to_ns(&ts); ev->what = PROC_EVENT_FORK; rcu_read_lock(); parent = rcu_dereference(task->real_parent); @@ -91,17 +103,17 @@ void proc_exec_connector(struct task_struct *task) struct cn_msg *msg; struct proc_event *ev; struct timespec ts; - __u8 buffer[CN_PROC_MSG_SIZE]; + __u8 buffer[CN_PROC_MSG_SIZE] __aligned(8); if (atomic_read(&proc_event_num_listeners) < 1) return; - msg = (struct cn_msg *)buffer; + msg = buffer_to_cn_msg(buffer); ev = (struct proc_event *)msg->data; memset(&ev->event_data, 0, sizeof(ev->event_data)); get_seq(&msg->seq, &ev->cpu); ktime_get_ts(&ts); /* get high res monotonic timestamp */ - put_unaligned(timespec_to_ns(&ts), (__u64 *)&ev->timestamp_ns); + ev->timestamp_ns = timespec_to_ns(&ts); ev->what = PROC_EVENT_EXEC; ev->event_data.exec.process_pid = task->pid; ev->event_data.exec.process_tgid = task->tgid; @@ -117,14 +129,14 @@ void proc_id_connector(struct task_struct *task, int which_id) { struct cn_msg *msg; struct proc_event *ev; - __u8 buffer[CN_PROC_MSG_SIZE]; + __u8 buffer[CN_PROC_MSG_SIZE] __aligned(8); struct timespec ts; const struct cred *cred; if (atomic_read(&proc_event_num_listeners) < 1) return; - msg = (struct cn_msg *)buffer; + msg = buffer_to_cn_msg(buffer); ev = (struct proc_event *)msg->data; memset(&ev->event_data, 0, sizeof(ev->event_data)); ev->what = which_id; @@ -145,7 +157,7 @@ void proc_id_connector(struct task_struct *task, int which_id) rcu_read_unlock(); get_seq(&msg->seq, &ev->cpu); ktime_get_ts(&ts); /* get high res monotonic timestamp */ - put_unaligned(timespec_to_ns(&ts), (__u64 *)&ev->timestamp_ns); + ev->timestamp_ns = timespec_to_ns(&ts); memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id)); msg->ack = 0; /* not used */ @@ -159,17 +171,17 @@ void proc_sid_connector(struct task_struct *task) struct cn_msg *msg; struct proc_event *ev; struct timespec ts; - __u8 buffer[CN_PROC_MSG_SIZE]; + __u8 buffer[CN_PROC_MSG_SIZE] __aligned(8); if (atomic_read(&proc_event_num_listeners) < 1) return; - msg = (struct cn_msg *)buffer; + msg = buffer_to_cn_msg(buffer); ev = (struct proc_event *)msg->data; memset(&ev->event_data, 0, sizeof(ev->event_data)); get_seq(&msg->seq, &ev->cpu); ktime_get_ts(&ts); /* get high res monotonic timestamp */ - put_unaligned(timespec_to_ns(&ts), (__u64 *)&ev->timestamp_ns); + ev->timestamp_ns = timespec_to_ns(&ts); ev->what = PROC_EVENT_SID; ev->event_data.sid.process_pid = task->pid; ev->event_data.sid.process_tgid = task->tgid; @@ -186,17 +198,17 @@ void proc_ptrace_connector(struct task_struct *task, int ptrace_id) struct cn_msg *msg; struct proc_event *ev; struct timespec ts; - __u8 buffer[CN_PROC_MSG_SIZE]; + __u8 buffer[CN_PROC_MSG_SIZE] __aligned(8); if (atomic_read(&proc_event_num_listeners) < 1) return; - msg = (struct cn_msg *)buffer; + msg = buffer_to_cn_msg(buffer); ev = (struct proc_event *)msg->data; memset(&ev->event_data, 0, sizeof(ev->event_data)); get_seq(&msg->seq, &ev->cpu); ktime_get_ts(&ts); /* get high res monotonic timestamp */ - put_unaligned(timespec_to_ns(&ts), (__u64 *)&ev->timestamp_ns); + ev->timestamp_ns = timespec_to_ns(&ts); ev->what = PROC_EVENT_PTRACE; ev->event_data.ptrace.process_pid = task->pid; ev->event_data.ptrace.process_tgid = task->tgid; @@ -221,17 +233,17 @@ void proc_comm_connector(struct task_struct *task) struct cn_msg *msg; struct proc_event *ev; struct timespec ts; - __u8 buffer[CN_PROC_MSG_SIZE]; + __u8 buffer[CN_PROC_MSG_SIZE] __aligned(8); if (atomic_read(&proc_event_num_listeners) < 1) return; - msg = (struct cn_msg *)buffer; + msg = buffer_to_cn_msg(buffer); ev = (struct proc_event *)msg->data; memset(&ev->event_data, 0, sizeof(ev->event_data)); get_seq(&msg->seq, &ev->cpu); ktime_get_ts(&ts); /* get high res monotonic timestamp */ - put_unaligned(timespec_to_ns(&ts), (__u64 *)&ev->timestamp_ns); + ev->timestamp_ns = timespec_to_ns(&ts); ev->what = PROC_EVENT_COMM; ev->event_data.comm.process_pid = task->pid; ev->event_data.comm.process_tgid = task->tgid; @@ -248,18 +260,18 @@ void proc_coredump_connector(struct task_struct *task) { struct cn_msg *msg; struct proc_event *ev; - __u8 buffer[CN_PROC_MSG_SIZE]; + __u8 buffer[CN_PROC_MSG_SIZE] __aligned(8); struct timespec ts; if (atomic_read(&proc_event_num_listeners) < 1) return; - msg = (struct cn_msg *)buffer; + msg = buffer_to_cn_msg(buffer); ev = (struct proc_event *)msg->data; memset(&ev->event_data, 0, sizeof(ev->event_data)); get_seq(&msg->seq, &ev->cpu); ktime_get_ts(&ts); /* get high res monotonic timestamp */ - put_unaligned(timespec_to_ns(&ts), (__u64 *)&ev->timestamp_ns); + ev->timestamp_ns = timespec_to_ns(&ts); ev->what = PROC_EVENT_COREDUMP; ev->event_data.coredump.process_pid = task->pid; ev->event_data.coredump.process_tgid = task->tgid; @@ -275,18 +287,18 @@ void proc_exit_connector(struct task_struct *task) { struct cn_msg *msg; struct proc_event *ev; - __u8 buffer[CN_PROC_MSG_SIZE]; + __u8 buffer[CN_PROC_MSG_SIZE] __aligned(8); struct timespec ts; if (atomic_read(&proc_event_num_listeners) < 1) return; - msg = (struct cn_msg *)buffer; + msg = buffer_to_cn_msg(buffer); ev = (struct proc_event *)msg->data; memset(&ev->event_data, 0, sizeof(ev->event_data)); get_seq(&msg->seq, &ev->cpu); ktime_get_ts(&ts); /* get high res monotonic timestamp */ - put_unaligned(timespec_to_ns(&ts), (__u64 *)&ev->timestamp_ns); + ev->timestamp_ns = timespec_to_ns(&ts); ev->what = PROC_EVENT_EXIT; ev->event_data.exit.process_pid = task->pid; ev->event_data.exit.process_tgid = task->tgid; @@ -312,18 +324,18 @@ static void cn_proc_ack(int err, int rcvd_seq, int rcvd_ack) { struct cn_msg *msg; struct proc_event *ev; - __u8 buffer[CN_PROC_MSG_SIZE]; + __u8 buffer[CN_PROC_MSG_SIZE] __aligned(8); struct timespec ts; if (atomic_read(&proc_event_num_listeners) < 1) return; - msg = (struct cn_msg *)buffer; + msg = buffer_to_cn_msg(buffer); ev = (struct proc_event *)msg->data; memset(&ev->event_data, 0, sizeof(ev->event_data)); msg->seq = rcvd_seq; ktime_get_ts(&ts); /* get high res monotonic timestamp */ - put_unaligned(timespec_to_ns(&ts), (__u64 *)&ev->timestamp_ns); + ev->timestamp_ns = timespec_to_ns(&ts); ev->cpu = -1; ev->what = PROC_EVENT_NONE; ev->event_data.ack.err = err; diff --git a/drivers/cpufreq/cpufreq_conservative.c b/drivers/cpufreq/cpufreq_conservative.c index 218460fcd2e..25a70d06c5b 100644 --- a/drivers/cpufreq/cpufreq_conservative.c +++ b/drivers/cpufreq/cpufreq_conservative.c @@ -68,6 +68,9 @@ static void cs_check_cpu(int cpu, unsigned int load) dbs_info->requested_freq += get_freq_target(cs_tuners, policy); + if (dbs_info->requested_freq > policy->max) + dbs_info->requested_freq = policy->max; + __cpufreq_driver_target(policy, dbs_info->requested_freq, CPUFREQ_RELATION_H); return; diff --git a/drivers/cpufreq/cpufreq_governor.c b/drivers/cpufreq/cpufreq_governor.c index 0806c31e576..e6be63561fa 100644 --- a/drivers/cpufreq/cpufreq_governor.c +++ b/drivers/cpufreq/cpufreq_governor.c @@ -328,10 +328,6 @@ int cpufreq_governor_dbs(struct cpufreq_policy *policy, dbs_data->cdata->gov_dbs_timer); } - /* - * conservative does not implement micro like ondemand - * governor, thus we are bound to jiffes/HZ - */ if (dbs_data->cdata->governor == GOV_CONSERVATIVE) { cs_dbs_info->down_skip = 0; cs_dbs_info->enable = 1; diff --git a/drivers/cpufreq/omap-cpufreq.c b/drivers/cpufreq/omap-cpufreq.c index be6d14307aa..a0acd0bfba4 100644 --- a/drivers/cpufreq/omap-cpufreq.c +++ b/drivers/cpufreq/omap-cpufreq.c @@ -53,6 +53,7 @@ static unsigned int omap_getspeed(unsigned int cpu) static int omap_target(struct cpufreq_policy *policy, unsigned int index) { + int r, ret; struct dev_pm_opp *opp; unsigned long freq, volt = 0, volt_old = 0, tol = 0; unsigned int old_freq, new_freq; diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig index dd2874ec192..446687cc233 100644 --- a/drivers/dma/Kconfig +++ b/drivers/dma/Kconfig @@ -89,14 +89,15 @@ config AT_HDMAC Support the Atmel AHB DMA controller. config FSL_DMA - tristate "Freescale Elo and Elo Plus DMA support" + tristate "Freescale Elo series DMA support" depends on FSL_SOC select DMA_ENGINE select ASYNC_TX_ENABLE_CHANNEL_SWITCH ---help--- - Enable support for the Freescale Elo and Elo Plus DMA controllers. - The Elo is the DMA controller on some 82xx and 83xx parts, and the - Elo Plus is the DMA controller on 85xx and 86xx parts. + Enable support for the Freescale Elo series DMA controllers. + The Elo is the DMA controller on some mpc82xx and mpc83xx parts, the + EloPlus is on mpc85xx and mpc86xx and Pxxx parts, and the Elo3 is on + some Txxx and Bxxx parts. config MPC512X_DMA tristate "Freescale MPC512x built-in DMA engine support" diff --git a/drivers/dma/amba-pl08x.c b/drivers/dma/amba-pl08x.c index e51a9832ef0..16a2aa28f85 100644 --- a/drivers/dma/amba-pl08x.c +++ b/drivers/dma/amba-pl08x.c @@ -1164,42 +1164,12 @@ static void pl08x_free_txd(struct pl08x_driver_data *pl08x, kfree(txd); } -static void pl08x_unmap_buffers(struct pl08x_txd *txd) -{ - struct device *dev = txd->vd.tx.chan->device->dev; - struct pl08x_sg *dsg; - - if (!(txd->vd.tx.flags & DMA_COMPL_SKIP_SRC_UNMAP)) { - if (txd->vd.tx.flags & DMA_COMPL_SRC_UNMAP_SINGLE) - list_for_each_entry(dsg, &txd->dsg_list, node) - dma_unmap_single(dev, dsg->src_addr, dsg->len, - DMA_TO_DEVICE); - else { - list_for_each_entry(dsg, &txd->dsg_list, node) - dma_unmap_page(dev, dsg->src_addr, dsg->len, - DMA_TO_DEVICE); - } - } - if (!(txd->vd.tx.flags & DMA_COMPL_SKIP_DEST_UNMAP)) { - if (txd->vd.tx.flags & DMA_COMPL_DEST_UNMAP_SINGLE) - list_for_each_entry(dsg, &txd->dsg_list, node) - dma_unmap_single(dev, dsg->dst_addr, dsg->len, - DMA_FROM_DEVICE); - else - list_for_each_entry(dsg, &txd->dsg_list, node) - dma_unmap_page(dev, dsg->dst_addr, dsg->len, - DMA_FROM_DEVICE); - } -} - static void pl08x_desc_free(struct virt_dma_desc *vd) { struct pl08x_txd *txd = to_pl08x_txd(&vd->tx); struct pl08x_dma_chan *plchan = to_pl08x_chan(vd->tx.chan); - if (!plchan->slave) - pl08x_unmap_buffers(txd); - + dma_descriptor_unmap(txd); if (!txd->done) pl08x_release_mux(plchan); @@ -1252,7 +1222,7 @@ static enum dma_status pl08x_dma_tx_status(struct dma_chan *chan, size_t bytes = 0; ret = dma_cookie_status(chan, cookie, txstate); - if (ret == DMA_SUCCESS) + if (ret == DMA_COMPLETE) return ret; /* @@ -1267,7 +1237,7 @@ static enum dma_status pl08x_dma_tx_status(struct dma_chan *chan, spin_lock_irqsave(&plchan->vc.lock, flags); ret = dma_cookie_status(chan, cookie, txstate); - if (ret != DMA_SUCCESS) { + if (ret != DMA_COMPLETE) { vd = vchan_find_desc(&plchan->vc, cookie); if (vd) { /* On the issued list, so hasn't been processed yet */ @@ -2138,8 +2108,7 @@ static int pl08x_probe(struct amba_device *adev, const struct amba_id *id) writel(0x000000FF, pl08x->base + PL080_ERR_CLEAR); writel(0x000000FF, pl08x->base + PL080_TC_CLEAR); - ret = request_irq(adev->irq[0], pl08x_irq, IRQF_DISABLED, - DRIVER_NAME, pl08x); + ret = request_irq(adev->irq[0], pl08x_irq, 0, DRIVER_NAME, pl08x); if (ret) { dev_err(&adev->dev, "%s failed to request interrupt %d\n", __func__, adev->irq[0]); diff --git a/drivers/dma/at_hdmac.c b/drivers/dma/at_hdmac.c index c787f38a186..e2c04dc81e2 100644 --- a/drivers/dma/at_hdmac.c +++ b/drivers/dma/at_hdmac.c @@ -344,31 +344,7 @@ atc_chain_complete(struct at_dma_chan *atchan, struct at_desc *desc) /* move myself to free_list */ list_move(&desc->desc_node, &atchan->free_list); - /* unmap dma addresses (not on slave channels) */ - if (!atchan->chan_common.private) { - struct device *parent = chan2parent(&atchan->chan_common); - if (!(txd->flags & DMA_COMPL_SKIP_DEST_UNMAP)) { - if (txd->flags & DMA_COMPL_DEST_UNMAP_SINGLE) - dma_unmap_single(parent, - desc->lli.daddr, - desc->len, DMA_FROM_DEVICE); - else - dma_unmap_page(parent, - desc->lli.daddr, - desc->len, DMA_FROM_DEVICE); - } - if (!(txd->flags & DMA_COMPL_SKIP_SRC_UNMAP)) { - if (txd->flags & DMA_COMPL_SRC_UNMAP_SINGLE) - dma_unmap_single(parent, - desc->lli.saddr, - desc->len, DMA_TO_DEVICE); - else - dma_unmap_page(parent, - desc->lli.saddr, - desc->len, DMA_TO_DEVICE); - } - } - + dma_descriptor_unmap(txd); /* for cyclic transfers, * no need to replay callback function while stopping */ if (!atc_chan_is_cyclic(atchan)) { @@ -1102,7 +1078,7 @@ atc_tx_status(struct dma_chan *chan, int bytes = 0; ret = dma_cookie_status(chan, cookie, txstate); - if (ret == DMA_SUCCESS) + if (ret == DMA_COMPLETE) return ret; /* * There's no point calculating the residue if there's diff --git a/drivers/dma/coh901318.c b/drivers/dma/coh901318.c index 31011d2a26f..3c6716e0b78 100644 --- a/drivers/dma/coh901318.c +++ b/drivers/dma/coh901318.c @@ -2369,7 +2369,7 @@ coh901318_tx_status(struct dma_chan *chan, dma_cookie_t cookie, enum dma_status ret; ret = dma_cookie_status(chan, cookie, txstate); - if (ret == DMA_SUCCESS) + if (ret == DMA_COMPLETE) return ret; dma_set_residue(txstate, coh901318_get_bytes_left(chan)); @@ -2694,7 +2694,7 @@ static int __init coh901318_probe(struct platform_device *pdev) if (irq < 0) return irq; - err = devm_request_irq(&pdev->dev, irq, dma_irq_handler, IRQF_DISABLED, + err = devm_request_irq(&pdev->dev, irq, dma_irq_handler, 0, "coh901318", base); if (err) return err; diff --git a/drivers/dma/cppi41.c b/drivers/dma/cppi41.c index 7c82b92f9b1..c29dacff66f 100644 --- a/drivers/dma/cppi41.c +++ b/drivers/dma/cppi41.c @@ -141,6 +141,9 @@ struct cppi41_dd { const struct chan_queues *queues_rx; const struct chan_queues *queues_tx; struct chan_queues td_queue; + + /* context for suspend/resume */ + unsigned int dma_tdfdq; }; #define FIST_COMPLETION_QUEUE 93 @@ -263,6 +266,15 @@ static u32 pd_trans_len(u32 val) return val & ((1 << (DESC_LENGTH_BITS_NUM + 1)) - 1); } +static u32 cppi41_pop_desc(struct cppi41_dd *cdd, unsigned queue_num) +{ + u32 desc; + + desc = cppi_readl(cdd->qmgr_mem + QMGR_QUEUE_D(queue_num)); + desc &= ~0x1f; + return desc; +} + static irqreturn_t cppi41_irq(int irq, void *data) { struct cppi41_dd *cdd = data; @@ -300,8 +312,7 @@ static irqreturn_t cppi41_irq(int irq, void *data) q_num = __fls(val); val &= ~(1 << q_num); q_num += 32 * i; - desc = cppi_readl(cdd->qmgr_mem + QMGR_QUEUE_D(q_num)); - desc &= ~0x1f; + desc = cppi41_pop_desc(cdd, q_num); c = desc_to_chan(cdd, desc); if (WARN_ON(!c)) { pr_err("%s() q %d desc %08x\n", __func__, @@ -353,7 +364,7 @@ static enum dma_status cppi41_dma_tx_status(struct dma_chan *chan, /* lock */ ret = dma_cookie_status(chan, cookie, txstate); - if (txstate && ret == DMA_SUCCESS) + if (txstate && ret == DMA_COMPLETE) txstate->residue = c->residue; /* unlock */ @@ -517,15 +528,6 @@ static void cppi41_compute_td_desc(struct cppi41_desc *d) d->pd0 = DESC_TYPE_TEARD << DESC_TYPE; } -static u32 cppi41_pop_desc(struct cppi41_dd *cdd, unsigned queue_num) -{ - u32 desc; - - desc = cppi_readl(cdd->qmgr_mem + QMGR_QUEUE_D(queue_num)); - desc &= ~0x1f; - return desc; -} - static int cppi41_tear_down_chan(struct cppi41_channel *c) { struct cppi41_dd *cdd = c->cdd; @@ -561,36 +563,26 @@ static int cppi41_tear_down_chan(struct cppi41_channel *c) c->td_retry = 100; } - if (!c->td_seen) { - unsigned td_comp_queue; + if (!c->td_seen || !c->td_desc_seen) { - if (c->is_tx) - td_comp_queue = cdd->td_queue.complete; - else - td_comp_queue = c->q_comp_num; + desc_phys = cppi41_pop_desc(cdd, cdd->td_queue.complete); + if (!desc_phys) + desc_phys = cppi41_pop_desc(cdd, c->q_comp_num); - desc_phys = cppi41_pop_desc(cdd, td_comp_queue); - if (desc_phys) { - __iormb(); + if (desc_phys == c->desc_phys) { + c->td_desc_seen = 1; + + } else if (desc_phys == td_desc_phys) { + u32 pd0; - if (desc_phys == td_desc_phys) { - u32 pd0; - pd0 = td->pd0; - WARN_ON((pd0 >> DESC_TYPE) != DESC_TYPE_TEARD); - WARN_ON(!c->is_tx && !(pd0 & TD_DESC_IS_RX)); - WARN_ON((pd0 & 0x1f) != c->port_num); - } else { - WARN_ON_ONCE(1); - } - c->td_seen = 1; - } - } - if (!c->td_desc_seen) { - desc_phys = cppi41_pop_desc(cdd, c->q_comp_num); - if (desc_phys) { __iormb(); - WARN_ON(c->desc_phys != desc_phys); - c->td_desc_seen = 1; + pd0 = td->pd0; + WARN_ON((pd0 >> DESC_TYPE) != DESC_TYPE_TEARD); + WARN_ON(!c->is_tx && !(pd0 & TD_DESC_IS_RX)); + WARN_ON((pd0 & 0x1f) != c->port_num); + c->td_seen = 1; + } else if (desc_phys) { + WARN_ON_ONCE(1); } } c->td_retry--; @@ -609,7 +601,7 @@ static int cppi41_tear_down_chan(struct cppi41_channel *c) WARN_ON(!c->td_retry); if (!c->td_desc_seen) { - desc_phys = cppi_readl(cdd->qmgr_mem + QMGR_QUEUE_D(c->q_num)); + desc_phys = cppi41_pop_desc(cdd, c->q_num); WARN_ON(!desc_phys); } @@ -674,14 +666,14 @@ static void cleanup_chans(struct cppi41_dd *cdd) } } -static int cppi41_add_chans(struct platform_device *pdev, struct cppi41_dd *cdd) +static int cppi41_add_chans(struct device *dev, struct cppi41_dd *cdd) { struct cppi41_channel *cchan; int i; int ret; u32 n_chans; - ret = of_property_read_u32(pdev->dev.of_node, "#dma-channels", + ret = of_property_read_u32(dev->of_node, "#dma-channels", &n_chans); if (ret) return ret; @@ -719,7 +711,7 @@ err: return -ENOMEM; } -static void purge_descs(struct platform_device *pdev, struct cppi41_dd *cdd) +static void purge_descs(struct device *dev, struct cppi41_dd *cdd) { unsigned int mem_decs; int i; @@ -731,7 +723,7 @@ static void purge_descs(struct platform_device *pdev, struct cppi41_dd *cdd) cppi_writel(0, cdd->qmgr_mem + QMGR_MEMBASE(i)); cppi_writel(0, cdd->qmgr_mem + QMGR_MEMCTRL(i)); - dma_free_coherent(&pdev->dev, mem_decs, cdd->cd, + dma_free_coherent(dev, mem_decs, cdd->cd, cdd->descs_phys); } } @@ -741,19 +733,19 @@ static void disable_sched(struct cppi41_dd *cdd) cppi_writel(0, cdd->sched_mem + DMA_SCHED_CTRL); } -static void deinit_cpii41(struct platform_device *pdev, struct cppi41_dd *cdd) +static void deinit_cppi41(struct device *dev, struct cppi41_dd *cdd) { disable_sched(cdd); - purge_descs(pdev, cdd); + purge_descs(dev, cdd); cppi_writel(0, cdd->qmgr_mem + QMGR_LRAM0_BASE); cppi_writel(0, cdd->qmgr_mem + QMGR_LRAM0_BASE); - dma_free_coherent(&pdev->dev, QMGR_SCRATCH_SIZE, cdd->qmgr_scratch, + dma_free_coherent(dev, QMGR_SCRATCH_SIZE, cdd->qmgr_scratch, cdd->scratch_phys); } -static int init_descs(struct platform_device *pdev, struct cppi41_dd *cdd) +static int init_descs(struct device *dev, struct cppi41_dd *cdd) { unsigned int desc_size; unsigned int mem_decs; @@ -777,7 +769,7 @@ static int init_descs(struct platform_device *pdev, struct cppi41_dd *cdd) reg |= ilog2(ALLOC_DECS_NUM) - 5; BUILD_BUG_ON(DESCS_AREAS != 1); - cdd->cd = dma_alloc_coherent(&pdev->dev, mem_decs, + cdd->cd = dma_alloc_coherent(dev, mem_decs, &cdd->descs_phys, GFP_KERNEL); if (!cdd->cd) return -ENOMEM; @@ -813,12 +805,12 @@ static void init_sched(struct cppi41_dd *cdd) cppi_writel(reg, cdd->sched_mem + DMA_SCHED_CTRL); } -static int init_cppi41(struct platform_device *pdev, struct cppi41_dd *cdd) +static int init_cppi41(struct device *dev, struct cppi41_dd *cdd) { int ret; BUILD_BUG_ON(QMGR_SCRATCH_SIZE > ((1 << 14) - 1)); - cdd->qmgr_scratch = dma_alloc_coherent(&pdev->dev, QMGR_SCRATCH_SIZE, + cdd->qmgr_scratch = dma_alloc_coherent(dev, QMGR_SCRATCH_SIZE, &cdd->scratch_phys, GFP_KERNEL); if (!cdd->qmgr_scratch) return -ENOMEM; @@ -827,7 +819,7 @@ static int init_cppi41(struct platform_device *pdev, struct cppi41_dd *cdd) cppi_writel(QMGR_SCRATCH_SIZE, cdd->qmgr_mem + QMGR_LRAM_SIZE); cppi_writel(0, cdd->qmgr_mem + QMGR_LRAM1_BASE); - ret = init_descs(pdev, cdd); + ret = init_descs(dev, cdd); if (ret) goto err_td; @@ -835,7 +827,7 @@ static int init_cppi41(struct platform_device *pdev, struct cppi41_dd *cdd) init_sched(cdd); return 0; err_td: - deinit_cpii41(pdev, cdd); + deinit_cppi41(dev, cdd); return ret; } @@ -914,11 +906,11 @@ static const struct of_device_id cppi41_dma_ids[] = { }; MODULE_DEVICE_TABLE(of, cppi41_dma_ids); -static const struct cppi_glue_infos *get_glue_info(struct platform_device *pdev) +static const struct cppi_glue_infos *get_glue_info(struct device *dev) { const struct of_device_id *of_id; - of_id = of_match_node(cppi41_dma_ids, pdev->dev.of_node); + of_id = of_match_node(cppi41_dma_ids, dev->of_node); if (!of_id) return NULL; return of_id->data; @@ -927,11 +919,12 @@ static const struct cppi_glue_infos *get_glue_info(struct platform_device *pdev) static int cppi41_dma_probe(struct platform_device *pdev) { struct cppi41_dd *cdd; + struct device *dev = &pdev->dev; const struct cppi_glue_infos *glue_info; int irq; int ret; - glue_info = get_glue_info(pdev); + glue_info = get_glue_info(dev); if (!glue_info) return -EINVAL; @@ -946,14 +939,14 @@ static int cppi41_dma_probe(struct platform_device *pdev) cdd->ddev.device_issue_pending = cppi41_dma_issue_pending; cdd->ddev.device_prep_slave_sg = cppi41_dma_prep_slave_sg; cdd->ddev.device_control = cppi41_dma_control; - cdd->ddev.dev = &pdev->dev; + cdd->ddev.dev = dev; INIT_LIST_HEAD(&cdd->ddev.channels); cpp41_dma_info.dma_cap = cdd->ddev.cap_mask; - cdd->usbss_mem = of_iomap(pdev->dev.of_node, 0); - cdd->ctrl_mem = of_iomap(pdev->dev.of_node, 1); - cdd->sched_mem = of_iomap(pdev->dev.of_node, 2); - cdd->qmgr_mem = of_iomap(pdev->dev.of_node, 3); + cdd->usbss_mem = of_iomap(dev->of_node, 0); + cdd->ctrl_mem = of_iomap(dev->of_node, 1); + cdd->sched_mem = of_iomap(dev->of_node, 2); + cdd->qmgr_mem = of_iomap(dev->of_node, 3); if (!cdd->usbss_mem || !cdd->ctrl_mem || !cdd->sched_mem || !cdd->qmgr_mem) { @@ -961,31 +954,31 @@ static int cppi41_dma_probe(struct platform_device *pdev) goto err_remap; } - pm_runtime_enable(&pdev->dev); - ret = pm_runtime_get_sync(&pdev->dev); - if (ret) + pm_runtime_enable(dev); + ret = pm_runtime_get_sync(dev); + if (ret < 0) goto err_get_sync; cdd->queues_rx = glue_info->queues_rx; cdd->queues_tx = glue_info->queues_tx; cdd->td_queue = glue_info->td_queue; - ret = init_cppi41(pdev, cdd); + ret = init_cppi41(dev, cdd); if (ret) goto err_init_cppi; - ret = cppi41_add_chans(pdev, cdd); + ret = cppi41_add_chans(dev, cdd); if (ret) goto err_chans; - irq = irq_of_parse_and_map(pdev->dev.of_node, 0); + irq = irq_of_parse_and_map(dev->of_node, 0); if (!irq) goto err_irq; cppi_writel(USBSS_IRQ_PD_COMP, cdd->usbss_mem + USBSS_IRQ_ENABLER); ret = request_irq(irq, glue_info->isr, IRQF_SHARED, - dev_name(&pdev->dev), cdd); + dev_name(dev), cdd); if (ret) goto err_irq; cdd->irq = irq; @@ -994,7 +987,7 @@ static int cppi41_dma_probe(struct platform_device *pdev) if (ret) goto err_dma_reg; - ret = of_dma_controller_register(pdev->dev.of_node, + ret = of_dma_controller_register(dev->of_node, cppi41_dma_xlate, &cpp41_dma_info); if (ret) goto err_of; @@ -1009,11 +1002,11 @@ err_irq: cppi_writel(0, cdd->usbss_mem + USBSS_IRQ_CLEARR); cleanup_chans(cdd); err_chans: - deinit_cpii41(pdev, cdd); + deinit_cppi41(dev, cdd); err_init_cppi: - pm_runtime_put(&pdev->dev); + pm_runtime_put(dev); err_get_sync: - pm_runtime_disable(&pdev->dev); + pm_runtime_disable(dev); iounmap(cdd->usbss_mem); iounmap(cdd->ctrl_mem); iounmap(cdd->sched_mem); @@ -1033,7 +1026,7 @@ static int cppi41_dma_remove(struct platform_device *pdev) cppi_writel(0, cdd->usbss_mem + USBSS_IRQ_CLEARR); free_irq(cdd->irq, cdd); cleanup_chans(cdd); - deinit_cpii41(pdev, cdd); + deinit_cppi41(&pdev->dev, cdd); iounmap(cdd->usbss_mem); iounmap(cdd->ctrl_mem); iounmap(cdd->sched_mem); @@ -1044,12 +1037,53 @@ static int cppi41_dma_remove(struct platform_device *pdev) return 0; } +#ifdef CONFIG_PM_SLEEP +static int cppi41_suspend(struct device *dev) +{ + struct cppi41_dd *cdd = dev_get_drvdata(dev); + + cdd->dma_tdfdq = cppi_readl(cdd->ctrl_mem + DMA_TDFDQ); + cppi_writel(0, cdd->usbss_mem + USBSS_IRQ_CLEARR); + disable_sched(cdd); + + return 0; +} + +static int cppi41_resume(struct device *dev) +{ + struct cppi41_dd *cdd = dev_get_drvdata(dev); + struct cppi41_channel *c; + int i; + + for (i = 0; i < DESCS_AREAS; i++) + cppi_writel(cdd->descs_phys, cdd->qmgr_mem + QMGR_MEMBASE(i)); + + list_for_each_entry(c, &cdd->ddev.channels, chan.device_node) + if (!c->is_tx) + cppi_writel(c->q_num, c->gcr_reg + RXHPCRA0); + + init_sched(cdd); + + cppi_writel(cdd->dma_tdfdq, cdd->ctrl_mem + DMA_TDFDQ); + cppi_writel(cdd->scratch_phys, cdd->qmgr_mem + QMGR_LRAM0_BASE); + cppi_writel(QMGR_SCRATCH_SIZE, cdd->qmgr_mem + QMGR_LRAM_SIZE); + cppi_writel(0, cdd->qmgr_mem + QMGR_LRAM1_BASE); + + cppi_writel(USBSS_IRQ_PD_COMP, cdd->usbss_mem + USBSS_IRQ_ENABLER); + + return 0; +} +#endif + +static SIMPLE_DEV_PM_OPS(cppi41_pm_ops, cppi41_suspend, cppi41_resume); + static struct platform_driver cpp41_dma_driver = { .probe = cppi41_dma_probe, .remove = cppi41_dma_remove, .driver = { .name = "cppi41-dma-engine", .owner = THIS_MODULE, + .pm = &cppi41_pm_ops, .of_match_table = of_match_ptr(cppi41_dma_ids), }, }; diff --git a/drivers/dma/dma-jz4740.c b/drivers/dma/dma-jz4740.c index b0c0c8268d4..94c380f0753 100644 --- a/drivers/dma/dma-jz4740.c +++ b/drivers/dma/dma-jz4740.c @@ -491,7 +491,7 @@ static enum dma_status jz4740_dma_tx_status(struct dma_chan *c, unsigned long flags; status = dma_cookie_status(c, cookie, state); - if (status == DMA_SUCCESS || !state) + if (status == DMA_COMPLETE || !state) return status; spin_lock_irqsave(&chan->vchan.lock, flags); diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c index 9162ac80c18..ea806bdc12e 100644 --- a/drivers/dma/dmaengine.c +++ b/drivers/dma/dmaengine.c @@ -65,6 +65,7 @@ #include <linux/acpi.h> #include <linux/acpi_dma.h> #include <linux/of_dma.h> +#include <linux/mempool.h> static DEFINE_MUTEX(dma_list_mutex); static DEFINE_IDR(dma_idr); @@ -901,98 +902,132 @@ void dma_async_device_unregister(struct dma_device *device) } EXPORT_SYMBOL(dma_async_device_unregister); -/** - * dma_async_memcpy_buf_to_buf - offloaded copy between virtual addresses - * @chan: DMA channel to offload copy to - * @dest: destination address (virtual) - * @src: source address (virtual) - * @len: length - * - * Both @dest and @src must be mappable to a bus address according to the - * DMA mapping API rules for streaming mappings. - * Both @dest and @src must stay memory resident (kernel memory or locked - * user space pages). - */ -dma_cookie_t -dma_async_memcpy_buf_to_buf(struct dma_chan *chan, void *dest, - void *src, size_t len) -{ - struct dma_device *dev = chan->device; - struct dma_async_tx_descriptor *tx; - dma_addr_t dma_dest, dma_src; - dma_cookie_t cookie; - unsigned long flags; +struct dmaengine_unmap_pool { + struct kmem_cache *cache; + const char *name; + mempool_t *pool; + size_t size; +}; - dma_src = dma_map_single(dev->dev, src, len, DMA_TO_DEVICE); - dma_dest = dma_map_single(dev->dev, dest, len, DMA_FROM_DEVICE); - flags = DMA_CTRL_ACK | - DMA_COMPL_SRC_UNMAP_SINGLE | - DMA_COMPL_DEST_UNMAP_SINGLE; - tx = dev->device_prep_dma_memcpy(chan, dma_dest, dma_src, len, flags); +#define __UNMAP_POOL(x) { .size = x, .name = "dmaengine-unmap-" __stringify(x) } +static struct dmaengine_unmap_pool unmap_pool[] = { + __UNMAP_POOL(2), + #if IS_ENABLED(CONFIG_ASYNC_TX_DMA) + __UNMAP_POOL(16), + __UNMAP_POOL(128), + __UNMAP_POOL(256), + #endif +}; - if (!tx) { - dma_unmap_single(dev->dev, dma_src, len, DMA_TO_DEVICE); - dma_unmap_single(dev->dev, dma_dest, len, DMA_FROM_DEVICE); - return -ENOMEM; +static struct dmaengine_unmap_pool *__get_unmap_pool(int nr) +{ + int order = get_count_order(nr); + + switch (order) { + case 0 ... 1: + return &unmap_pool[0]; + case 2 ... 4: + return &unmap_pool[1]; + case 5 ... 7: + return &unmap_pool[2]; + case 8: + return &unmap_pool[3]; + default: + BUG(); + return NULL; } +} - tx->callback = NULL; - cookie = tx->tx_submit(tx); +static void dmaengine_unmap(struct kref *kref) +{ + struct dmaengine_unmap_data *unmap = container_of(kref, typeof(*unmap), kref); + struct device *dev = unmap->dev; + int cnt, i; + + cnt = unmap->to_cnt; + for (i = 0; i < cnt; i++) + dma_unmap_page(dev, unmap->addr[i], unmap->len, + DMA_TO_DEVICE); + cnt += unmap->from_cnt; + for (; i < cnt; i++) + dma_unmap_page(dev, unmap->addr[i], unmap->len, + DMA_FROM_DEVICE); + cnt += unmap->bidi_cnt; + for (; i < cnt; i++) { + if (unmap->addr[i] == 0) + continue; + dma_unmap_page(dev, unmap->addr[i], unmap->len, + DMA_BIDIRECTIONAL); + } + mempool_free(unmap, __get_unmap_pool(cnt)->pool); +} - preempt_disable(); - __this_cpu_add(chan->local->bytes_transferred, len); - __this_cpu_inc(chan->local->memcpy_count); - preempt_enable(); +void dmaengine_unmap_put(struct dmaengine_unmap_data *unmap) +{ + if (unmap) + kref_put(&unmap->kref, dmaengine_unmap); +} +EXPORT_SYMBOL_GPL(dmaengine_unmap_put); - return cookie; +static void dmaengine_destroy_unmap_pool(void) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(unmap_pool); i++) { + struct dmaengine_unmap_pool *p = &unmap_pool[i]; + + if (p->pool) + mempool_destroy(p->pool); + p->pool = NULL; + if (p->cache) + kmem_cache_destroy(p->cache); + p->cache = NULL; + } } -EXPORT_SYMBOL(dma_async_memcpy_buf_to_buf); -/** - * dma_async_memcpy_buf_to_pg - offloaded copy from address to page - * @chan: DMA channel to offload copy to - * @page: destination page - * @offset: offset in page to copy to - * @kdata: source address (virtual) - * @len: length - * - * Both @page/@offset and @kdata must be mappable to a bus address according - * to the DMA mapping API rules for streaming mappings. - * Both @page/@offset and @kdata must stay memory resident (kernel memory or - * locked user space pages) - */ -dma_cookie_t -dma_async_memcpy_buf_to_pg(struct dma_chan *chan, struct page *page, - unsigned int offset, void *kdata, size_t len) +static int __init dmaengine_init_unmap_pool(void) { - struct dma_device *dev = chan->device; - struct dma_async_tx_descriptor *tx; - dma_addr_t dma_dest, dma_src; - dma_cookie_t cookie; - unsigned long flags; + int i; - dma_src = dma_map_single(dev->dev, kdata, len, DMA_TO_DEVICE); - dma_dest = dma_map_page(dev->dev, page, offset, len, DMA_FROM_DEVICE); - flags = DMA_CTRL_ACK | DMA_COMPL_SRC_UNMAP_SINGLE; - tx = dev->device_prep_dma_memcpy(chan, dma_dest, dma_src, len, flags); + for (i = 0; i < ARRAY_SIZE(unmap_pool); i++) { + struct dmaengine_unmap_pool *p = &unmap_pool[i]; + size_t size; - if (!tx) { - dma_unmap_single(dev->dev, dma_src, len, DMA_TO_DEVICE); - dma_unmap_page(dev->dev, dma_dest, len, DMA_FROM_DEVICE); - return -ENOMEM; + size = sizeof(struct dmaengine_unmap_data) + + sizeof(dma_addr_t) * p->size; + + p->cache = kmem_cache_create(p->name, size, 0, + SLAB_HWCACHE_ALIGN, NULL); + if (!p->cache) + break; + p->pool = mempool_create_slab_pool(1, p->cache); + if (!p->pool) + break; } - tx->callback = NULL; - cookie = tx->tx_submit(tx); + if (i == ARRAY_SIZE(unmap_pool)) + return 0; - preempt_disable(); - __this_cpu_add(chan->local->bytes_transferred, len); - __this_cpu_inc(chan->local->memcpy_count); - preempt_enable(); + dmaengine_destroy_unmap_pool(); + return -ENOMEM; +} - return cookie; +struct dmaengine_unmap_data * +dmaengine_get_unmap_data(struct device *dev, int nr, gfp_t flags) +{ + struct dmaengine_unmap_data *unmap; + + unmap = mempool_alloc(__get_unmap_pool(nr)->pool, flags); + if (!unmap) + return NULL; + + memset(unmap, 0, sizeof(*unmap)); + kref_init(&unmap->kref); + unmap->dev = dev; + + return unmap; } -EXPORT_SYMBOL(dma_async_memcpy_buf_to_pg); +EXPORT_SYMBOL(dmaengine_get_unmap_data); /** * dma_async_memcpy_pg_to_pg - offloaded copy from page to page @@ -1015,24 +1050,33 @@ dma_async_memcpy_pg_to_pg(struct dma_chan *chan, struct page *dest_pg, { struct dma_device *dev = chan->device; struct dma_async_tx_descriptor *tx; - dma_addr_t dma_dest, dma_src; + struct dmaengine_unmap_data *unmap; dma_cookie_t cookie; unsigned long flags; - dma_src = dma_map_page(dev->dev, src_pg, src_off, len, DMA_TO_DEVICE); - dma_dest = dma_map_page(dev->dev, dest_pg, dest_off, len, - DMA_FROM_DEVICE); + unmap = dmaengine_get_unmap_data(dev->dev, 2, GFP_NOIO); + if (!unmap) + return -ENOMEM; + + unmap->to_cnt = 1; + unmap->from_cnt = 1; + unmap->addr[0] = dma_map_page(dev->dev, src_pg, src_off, len, + DMA_TO_DEVICE); + unmap->addr[1] = dma_map_page(dev->dev, dest_pg, dest_off, len, + DMA_FROM_DEVICE); + unmap->len = len; flags = DMA_CTRL_ACK; - tx = dev->device_prep_dma_memcpy(chan, dma_dest, dma_src, len, flags); + tx = dev->device_prep_dma_memcpy(chan, unmap->addr[1], unmap->addr[0], + len, flags); if (!tx) { - dma_unmap_page(dev->dev, dma_src, len, DMA_TO_DEVICE); - dma_unmap_page(dev->dev, dma_dest, len, DMA_FROM_DEVICE); + dmaengine_unmap_put(unmap); return -ENOMEM; } - tx->callback = NULL; + dma_set_unmap(tx, unmap); cookie = tx->tx_submit(tx); + dmaengine_unmap_put(unmap); preempt_disable(); __this_cpu_add(chan->local->bytes_transferred, len); @@ -1043,6 +1087,52 @@ dma_async_memcpy_pg_to_pg(struct dma_chan *chan, struct page *dest_pg, } EXPORT_SYMBOL(dma_async_memcpy_pg_to_pg); +/** + * dma_async_memcpy_buf_to_buf - offloaded copy between virtual addresses + * @chan: DMA channel to offload copy to + * @dest: destination address (virtual) + * @src: source address (virtual) + * @len: length + * + * Both @dest and @src must be mappable to a bus address according to the + * DMA mapping API rules for streaming mappings. + * Both @dest and @src must stay memory resident (kernel memory or locked + * user space pages). + */ +dma_cookie_t +dma_async_memcpy_buf_to_buf(struct dma_chan *chan, void *dest, + void *src, size_t len) +{ + return dma_async_memcpy_pg_to_pg(chan, virt_to_page(dest), + (unsigned long) dest & ~PAGE_MASK, + virt_to_page(src), + (unsigned long) src & ~PAGE_MASK, len); +} +EXPORT_SYMBOL(dma_async_memcpy_buf_to_buf); + +/** + * dma_async_memcpy_buf_to_pg - offloaded copy from address to page + * @chan: DMA channel to offload copy to + * @page: destination page + * @offset: offset in page to copy to + * @kdata: source address (virtual) + * @len: length + * + * Both @page/@offset and @kdata must be mappable to a bus address according + * to the DMA mapping API rules for streaming mappings. + * Both @page/@offset and @kdata must stay memory resident (kernel memory or + * locked user space pages) + */ +dma_cookie_t +dma_async_memcpy_buf_to_pg(struct dma_chan *chan, struct page *page, + unsigned int offset, void *kdata, size_t len) +{ + return dma_async_memcpy_pg_to_pg(chan, page, offset, + virt_to_page(kdata), + (unsigned long) kdata & ~PAGE_MASK, len); +} +EXPORT_SYMBOL(dma_async_memcpy_buf_to_pg); + void dma_async_tx_descriptor_init(struct dma_async_tx_descriptor *tx, struct dma_chan *chan) { @@ -1062,7 +1152,7 @@ dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx) unsigned long dma_sync_wait_timeout = jiffies + msecs_to_jiffies(5000); if (!tx) - return DMA_SUCCESS; + return DMA_COMPLETE; while (tx->cookie == -EBUSY) { if (time_after_eq(jiffies, dma_sync_wait_timeout)) { @@ -1116,6 +1206,10 @@ EXPORT_SYMBOL_GPL(dma_run_dependencies); static int __init dma_bus_init(void) { + int err = dmaengine_init_unmap_pool(); + + if (err) + return err; return class_register(&dma_devclass); } arch_initcall(dma_bus_init); diff --git a/drivers/dma/dmatest.c b/drivers/dma/dmatest.c index 92f796cdc6a..20f9a3aaf92 100644 --- a/drivers/dma/dmatest.c +++ b/drivers/dma/dmatest.c @@ -8,6 +8,8 @@ * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + #include <linux/delay.h> #include <linux/dma-mapping.h> #include <linux/dmaengine.h> @@ -19,10 +21,6 @@ #include <linux/random.h> #include <linux/slab.h> #include <linux/wait.h> -#include <linux/ctype.h> -#include <linux/debugfs.h> -#include <linux/uaccess.h> -#include <linux/seq_file.h> static unsigned int test_buf_size = 16384; module_param(test_buf_size, uint, S_IRUGO | S_IWUSR); @@ -68,92 +66,13 @@ module_param(timeout, uint, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(timeout, "Transfer Timeout in msec (default: 3000), " "Pass -1 for infinite timeout"); -/* Maximum amount of mismatched bytes in buffer to print */ -#define MAX_ERROR_COUNT 32 - -/* - * Initialization patterns. All bytes in the source buffer has bit 7 - * set, all bytes in the destination buffer has bit 7 cleared. - * - * Bit 6 is set for all bytes which are to be copied by the DMA - * engine. Bit 5 is set for all bytes which are to be overwritten by - * the DMA engine. - * - * The remaining bits are the inverse of a counter which increments by - * one for each byte address. - */ -#define PATTERN_SRC 0x80 -#define PATTERN_DST 0x00 -#define PATTERN_COPY 0x40 -#define PATTERN_OVERWRITE 0x20 -#define PATTERN_COUNT_MASK 0x1f - -enum dmatest_error_type { - DMATEST_ET_OK, - DMATEST_ET_MAP_SRC, - DMATEST_ET_MAP_DST, - DMATEST_ET_PREP, - DMATEST_ET_SUBMIT, - DMATEST_ET_TIMEOUT, - DMATEST_ET_DMA_ERROR, - DMATEST_ET_DMA_IN_PROGRESS, - DMATEST_ET_VERIFY, - DMATEST_ET_VERIFY_BUF, -}; - -struct dmatest_verify_buffer { - unsigned int index; - u8 expected; - u8 actual; -}; - -struct dmatest_verify_result { - unsigned int error_count; - struct dmatest_verify_buffer data[MAX_ERROR_COUNT]; - u8 pattern; - bool is_srcbuf; -}; - -struct dmatest_thread_result { - struct list_head node; - unsigned int n; - unsigned int src_off; - unsigned int dst_off; - unsigned int len; - enum dmatest_error_type type; - union { - unsigned long data; - dma_cookie_t cookie; - enum dma_status status; - int error; - struct dmatest_verify_result *vr; - }; -}; - -struct dmatest_result { - struct list_head node; - char *name; - struct list_head results; -}; - -struct dmatest_info; - -struct dmatest_thread { - struct list_head node; - struct dmatest_info *info; - struct task_struct *task; - struct dma_chan *chan; - u8 **srcs; - u8 **dsts; - enum dma_transaction_type type; - bool done; -}; +static bool noverify; +module_param(noverify, bool, S_IRUGO | S_IWUSR); +MODULE_PARM_DESC(noverify, "Disable random data setup and verification"); -struct dmatest_chan { - struct list_head node; - struct dma_chan *chan; - struct list_head threads; -}; +static bool verbose; +module_param(verbose, bool, S_IRUGO | S_IWUSR); +MODULE_PARM_DESC(verbose, "Enable \"success\" result messages (default: off)"); /** * struct dmatest_params - test parameters. @@ -177,6 +96,7 @@ struct dmatest_params { unsigned int xor_sources; unsigned int pq_sources; int timeout; + bool noverify; }; /** @@ -184,7 +104,7 @@ struct dmatest_params { * @params: test parameters * @lock: access protection to the fields of this structure */ -struct dmatest_info { +static struct dmatest_info { /* Test parameters */ struct dmatest_params params; @@ -192,16 +112,95 @@ struct dmatest_info { struct list_head channels; unsigned int nr_channels; struct mutex lock; + bool did_init; +} test_info = { + .channels = LIST_HEAD_INIT(test_info.channels), + .lock = __MUTEX_INITIALIZER(test_info.lock), +}; + +static int dmatest_run_set(const char *val, const struct kernel_param *kp); +static int dmatest_run_get(char *val, const struct kernel_param *kp); +static struct kernel_param_ops run_ops = { + .set = dmatest_run_set, + .get = dmatest_run_get, +}; +static bool dmatest_run; +module_param_cb(run, &run_ops, &dmatest_run, S_IRUGO | S_IWUSR); +MODULE_PARM_DESC(run, "Run the test (default: false)"); + +/* Maximum amount of mismatched bytes in buffer to print */ +#define MAX_ERROR_COUNT 32 + +/* + * Initialization patterns. All bytes in the source buffer has bit 7 + * set, all bytes in the destination buffer has bit 7 cleared. + * + * Bit 6 is set for all bytes which are to be copied by the DMA + * engine. Bit 5 is set for all bytes which are to be overwritten by + * the DMA engine. + * + * The remaining bits are the inverse of a counter which increments by + * one for each byte address. + */ +#define PATTERN_SRC 0x80 +#define PATTERN_DST 0x00 +#define PATTERN_COPY 0x40 +#define PATTERN_OVERWRITE 0x20 +#define PATTERN_COUNT_MASK 0x1f - /* debugfs related stuff */ - struct dentry *root; +struct dmatest_thread { + struct list_head node; + struct dmatest_info *info; + struct task_struct *task; + struct dma_chan *chan; + u8 **srcs; + u8 **dsts; + enum dma_transaction_type type; + bool done; +}; - /* Test results */ - struct list_head results; - struct mutex results_lock; +struct dmatest_chan { + struct list_head node; + struct dma_chan *chan; + struct list_head threads; }; -static struct dmatest_info test_info; +static DECLARE_WAIT_QUEUE_HEAD(thread_wait); +static bool wait; + +static bool is_threaded_test_run(struct dmatest_info *info) +{ + struct dmatest_chan *dtc; + + list_for_each_entry(dtc, &info->channels, node) { + struct dmatest_thread *thread; + + list_for_each_entry(thread, &dtc->threads, node) { + if (!thread->done) + return true; + } + } + + return false; +} + +static int dmatest_wait_get(char *val, const struct kernel_param *kp) +{ + struct dmatest_info *info = &test_info; + struct dmatest_params *params = &info->params; + + if (params->iterations) + wait_event(thread_wait, !is_threaded_test_run(info)); + wait = true; + return param_get_bool(val, kp); +} + +static struct kernel_param_ops wait_ops = { + .get = dmatest_wait_get, + .set = param_set_bool, +}; +module_param_cb(wait, &wait_ops, &wait, S_IRUGO); +MODULE_PARM_DESC(wait, "Wait for tests to complete (default: false)"); static bool dmatest_match_channel(struct dmatest_params *params, struct dma_chan *chan) @@ -223,7 +222,7 @@ static unsigned long dmatest_random(void) { unsigned long buf; - get_random_bytes(&buf, sizeof(buf)); + prandom_bytes(&buf, sizeof(buf)); return buf; } @@ -262,9 +261,31 @@ static void dmatest_init_dsts(u8 **bufs, unsigned int start, unsigned int len, } } -static unsigned int dmatest_verify(struct dmatest_verify_result *vr, u8 **bufs, - unsigned int start, unsigned int end, unsigned int counter, - u8 pattern, bool is_srcbuf) +static void dmatest_mismatch(u8 actual, u8 pattern, unsigned int index, + unsigned int counter, bool is_srcbuf) +{ + u8 diff = actual ^ pattern; + u8 expected = pattern | (~counter & PATTERN_COUNT_MASK); + const char *thread_name = current->comm; + + if (is_srcbuf) + pr_warn("%s: srcbuf[0x%x] overwritten! Expected %02x, got %02x\n", + thread_name, index, expected, actual); + else if ((pattern & PATTERN_COPY) + && (diff & (PATTERN_COPY | PATTERN_OVERWRITE))) + pr_warn("%s: dstbuf[0x%x] not copied! Expected %02x, got %02x\n", + thread_name, index, expected, actual); + else if (diff & PATTERN_SRC) + pr_warn("%s: dstbuf[0x%x] was copied! Expected %02x, got %02x\n", + thread_name, index, expected, actual); + else + pr_warn("%s: dstbuf[0x%x] mismatch! Expected %02x, got %02x\n", + thread_name, index, expected, actual); +} + +static unsigned int dmatest_verify(u8 **bufs, unsigned int start, + unsigned int end, unsigned int counter, u8 pattern, + bool is_srcbuf) { unsigned int i; unsigned int error_count = 0; @@ -272,7 +293,6 @@ static unsigned int dmatest_verify(struct dmatest_verify_result *vr, u8 **bufs, u8 expected; u8 *buf; unsigned int counter_orig = counter; - struct dmatest_verify_buffer *vb; for (; (buf = *bufs); bufs++) { counter = counter_orig; @@ -280,12 +300,9 @@ static unsigned int dmatest_verify(struct dmatest_verify_result *vr, u8 **bufs, actual = buf[i]; expected = pattern | (~counter & PATTERN_COUNT_MASK); if (actual != expected) { - if (error_count < MAX_ERROR_COUNT && vr) { - vb = &vr->data[error_count]; - vb->index = i; - vb->expected = expected; - vb->actual = actual; - } + if (error_count < MAX_ERROR_COUNT) + dmatest_mismatch(actual, pattern, i, + counter, is_srcbuf); error_count++; } counter++; @@ -293,7 +310,7 @@ static unsigned int dmatest_verify(struct dmatest_verify_result *vr, u8 **bufs, } if (error_count > MAX_ERROR_COUNT) - pr_warning("%s: %u errors suppressed\n", + pr_warn("%s: %u errors suppressed\n", current->comm, error_count - MAX_ERROR_COUNT); return error_count; @@ -313,20 +330,6 @@ static void dmatest_callback(void *arg) wake_up_all(done->wait); } -static inline void unmap_src(struct device *dev, dma_addr_t *addr, size_t len, - unsigned int count) -{ - while (count--) - dma_unmap_single(dev, addr[count], len, DMA_TO_DEVICE); -} - -static inline void unmap_dst(struct device *dev, dma_addr_t *addr, size_t len, - unsigned int count) -{ - while (count--) - dma_unmap_single(dev, addr[count], len, DMA_BIDIRECTIONAL); -} - static unsigned int min_odd(unsigned int x, unsigned int y) { unsigned int val = min(x, y); @@ -334,172 +337,49 @@ static unsigned int min_odd(unsigned int x, unsigned int y) return val % 2 ? val : val - 1; } -static char *verify_result_get_one(struct dmatest_verify_result *vr, - unsigned int i) +static void result(const char *err, unsigned int n, unsigned int src_off, + unsigned int dst_off, unsigned int len, unsigned long data) { - struct dmatest_verify_buffer *vb = &vr->data[i]; - u8 diff = vb->actual ^ vr->pattern; - static char buf[512]; - char *msg; - - if (vr->is_srcbuf) - msg = "srcbuf overwritten!"; - else if ((vr->pattern & PATTERN_COPY) - && (diff & (PATTERN_COPY | PATTERN_OVERWRITE))) - msg = "dstbuf not copied!"; - else if (diff & PATTERN_SRC) - msg = "dstbuf was copied!"; - else - msg = "dstbuf mismatch!"; - - snprintf(buf, sizeof(buf) - 1, "%s [0x%x] Expected %02x, got %02x", msg, - vb->index, vb->expected, vb->actual); - - return buf; + pr_info("%s: result #%u: '%s' with src_off=0x%x dst_off=0x%x len=0x%x (%lu)", + current->comm, n, err, src_off, dst_off, len, data); } -static char *thread_result_get(const char *name, - struct dmatest_thread_result *tr) +static void dbg_result(const char *err, unsigned int n, unsigned int src_off, + unsigned int dst_off, unsigned int len, + unsigned long data) { - static const char * const messages[] = { - [DMATEST_ET_OK] = "No errors", - [DMATEST_ET_MAP_SRC] = "src mapping error", - [DMATEST_ET_MAP_DST] = "dst mapping error", - [DMATEST_ET_PREP] = "prep error", - [DMATEST_ET_SUBMIT] = "submit error", - [DMATEST_ET_TIMEOUT] = "test timed out", - [DMATEST_ET_DMA_ERROR] = - "got completion callback (DMA_ERROR)", - [DMATEST_ET_DMA_IN_PROGRESS] = - "got completion callback (DMA_IN_PROGRESS)", - [DMATEST_ET_VERIFY] = "errors", - [DMATEST_ET_VERIFY_BUF] = "verify errors", - }; - static char buf[512]; - - snprintf(buf, sizeof(buf) - 1, - "%s: #%u: %s with src_off=0x%x ""dst_off=0x%x len=0x%x (%lu)", - name, tr->n, messages[tr->type], tr->src_off, tr->dst_off, - tr->len, tr->data); - - return buf; + pr_debug("%s: result #%u: '%s' with src_off=0x%x dst_off=0x%x len=0x%x (%lu)", + current->comm, n, err, src_off, dst_off, len, data); } -static int thread_result_add(struct dmatest_info *info, - struct dmatest_result *r, enum dmatest_error_type type, - unsigned int n, unsigned int src_off, unsigned int dst_off, - unsigned int len, unsigned long data) -{ - struct dmatest_thread_result *tr; - - tr = kzalloc(sizeof(*tr), GFP_KERNEL); - if (!tr) - return -ENOMEM; - - tr->type = type; - tr->n = n; - tr->src_off = src_off; - tr->dst_off = dst_off; - tr->len = len; - tr->data = data; +#define verbose_result(err, n, src_off, dst_off, len, data) ({ \ + if (verbose) \ + result(err, n, src_off, dst_off, len, data); \ + else \ + dbg_result(err, n, src_off, dst_off, len, data); \ +}) - mutex_lock(&info->results_lock); - list_add_tail(&tr->node, &r->results); - mutex_unlock(&info->results_lock); - - if (tr->type == DMATEST_ET_OK) - pr_debug("%s\n", thread_result_get(r->name, tr)); - else - pr_warn("%s\n", thread_result_get(r->name, tr)); - - return 0; -} - -static unsigned int verify_result_add(struct dmatest_info *info, - struct dmatest_result *r, unsigned int n, - unsigned int src_off, unsigned int dst_off, unsigned int len, - u8 **bufs, int whence, unsigned int counter, u8 pattern, - bool is_srcbuf) +static unsigned long long dmatest_persec(s64 runtime, unsigned int val) { - struct dmatest_verify_result *vr; - unsigned int error_count; - unsigned int buf_off = is_srcbuf ? src_off : dst_off; - unsigned int start, end; - - if (whence < 0) { - start = 0; - end = buf_off; - } else if (whence > 0) { - start = buf_off + len; - end = info->params.buf_size; - } else { - start = buf_off; - end = buf_off + len; - } + unsigned long long per_sec = 1000000; - vr = kmalloc(sizeof(*vr), GFP_KERNEL); - if (!vr) { - pr_warn("dmatest: No memory to store verify result\n"); - return dmatest_verify(NULL, bufs, start, end, counter, pattern, - is_srcbuf); - } - - vr->pattern = pattern; - vr->is_srcbuf = is_srcbuf; - - error_count = dmatest_verify(vr, bufs, start, end, counter, pattern, - is_srcbuf); - if (error_count) { - vr->error_count = error_count; - thread_result_add(info, r, DMATEST_ET_VERIFY_BUF, n, src_off, - dst_off, len, (unsigned long)vr); - return error_count; - } - - kfree(vr); - return 0; -} - -static void result_free(struct dmatest_info *info, const char *name) -{ - struct dmatest_result *r, *_r; - - mutex_lock(&info->results_lock); - list_for_each_entry_safe(r, _r, &info->results, node) { - struct dmatest_thread_result *tr, *_tr; - - if (name && strcmp(r->name, name)) - continue; - - list_for_each_entry_safe(tr, _tr, &r->results, node) { - if (tr->type == DMATEST_ET_VERIFY_BUF) - kfree(tr->vr); - list_del(&tr->node); - kfree(tr); - } + if (runtime <= 0) + return 0; - kfree(r->name); - list_del(&r->node); - kfree(r); + /* drop precision until runtime is 32-bits */ + while (runtime > UINT_MAX) { + runtime >>= 1; + per_sec <<= 1; } - mutex_unlock(&info->results_lock); + per_sec *= val; + do_div(per_sec, runtime); + return per_sec; } -static struct dmatest_result *result_init(struct dmatest_info *info, - const char *name) +static unsigned long long dmatest_KBs(s64 runtime, unsigned long long len) { - struct dmatest_result *r; - - r = kzalloc(sizeof(*r), GFP_KERNEL); - if (r) { - r->name = kstrdup(name, GFP_KERNEL); - INIT_LIST_HEAD(&r->results); - mutex_lock(&info->results_lock); - list_add_tail(&r->node, &info->results); - mutex_unlock(&info->results_lock); - } - return r; + return dmatest_persec(runtime, len >> 10); } /* @@ -525,7 +405,6 @@ static int dmatest_func(void *data) struct dmatest_params *params; struct dma_chan *chan; struct dma_device *dev; - const char *thread_name; unsigned int src_off, dst_off, len; unsigned int error_count; unsigned int failed_tests = 0; @@ -538,9 +417,10 @@ static int dmatest_func(void *data) int src_cnt; int dst_cnt; int i; - struct dmatest_result *result; + ktime_t ktime; + s64 runtime = 0; + unsigned long long total_len = 0; - thread_name = current->comm; set_freezable(); ret = -ENOMEM; @@ -570,10 +450,6 @@ static int dmatest_func(void *data) } else goto err_thread_type; - result = result_init(info, thread_name); - if (!result) - goto err_srcs; - thread->srcs = kcalloc(src_cnt+1, sizeof(u8 *), GFP_KERNEL); if (!thread->srcs) goto err_srcs; @@ -597,17 +473,17 @@ static int dmatest_func(void *data) set_user_nice(current, 10); /* - * src buffers are freed by the DMAEngine code with dma_unmap_single() - * dst buffers are freed by ourselves below + * src and dst buffers are freed by ourselves below */ - flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT - | DMA_COMPL_SKIP_DEST_UNMAP | DMA_COMPL_SRC_UNMAP_SINGLE; + flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT; + ktime = ktime_get(); while (!kthread_should_stop() && !(params->iterations && total_tests >= params->iterations)) { struct dma_async_tx_descriptor *tx = NULL; - dma_addr_t dma_srcs[src_cnt]; - dma_addr_t dma_dsts[dst_cnt]; + struct dmaengine_unmap_data *um; + dma_addr_t srcs[src_cnt]; + dma_addr_t *dsts; u8 align = 0; total_tests++; @@ -626,81 +502,103 @@ static int dmatest_func(void *data) break; } - len = dmatest_random() % params->buf_size + 1; + if (params->noverify) { + len = params->buf_size; + src_off = 0; + dst_off = 0; + } else { + len = dmatest_random() % params->buf_size + 1; + len = (len >> align) << align; + if (!len) + len = 1 << align; + src_off = dmatest_random() % (params->buf_size - len + 1); + dst_off = dmatest_random() % (params->buf_size - len + 1); + + src_off = (src_off >> align) << align; + dst_off = (dst_off >> align) << align; + + dmatest_init_srcs(thread->srcs, src_off, len, + params->buf_size); + dmatest_init_dsts(thread->dsts, dst_off, len, + params->buf_size); + } + len = (len >> align) << align; if (!len) len = 1 << align; - src_off = dmatest_random() % (params->buf_size - len + 1); - dst_off = dmatest_random() % (params->buf_size - len + 1); + total_len += len; - src_off = (src_off >> align) << align; - dst_off = (dst_off >> align) << align; - - dmatest_init_srcs(thread->srcs, src_off, len, params->buf_size); - dmatest_init_dsts(thread->dsts, dst_off, len, params->buf_size); + um = dmaengine_get_unmap_data(dev->dev, src_cnt+dst_cnt, + GFP_KERNEL); + if (!um) { + failed_tests++; + result("unmap data NULL", total_tests, + src_off, dst_off, len, ret); + continue; + } + um->len = params->buf_size; for (i = 0; i < src_cnt; i++) { - u8 *buf = thread->srcs[i] + src_off; - - dma_srcs[i] = dma_map_single(dev->dev, buf, len, - DMA_TO_DEVICE); - ret = dma_mapping_error(dev->dev, dma_srcs[i]); + unsigned long buf = (unsigned long) thread->srcs[i]; + struct page *pg = virt_to_page(buf); + unsigned pg_off = buf & ~PAGE_MASK; + + um->addr[i] = dma_map_page(dev->dev, pg, pg_off, + um->len, DMA_TO_DEVICE); + srcs[i] = um->addr[i] + src_off; + ret = dma_mapping_error(dev->dev, um->addr[i]); if (ret) { - unmap_src(dev->dev, dma_srcs, len, i); - thread_result_add(info, result, - DMATEST_ET_MAP_SRC, - total_tests, src_off, dst_off, - len, ret); + dmaengine_unmap_put(um); + result("src mapping error", total_tests, + src_off, dst_off, len, ret); failed_tests++; continue; } + um->to_cnt++; } /* map with DMA_BIDIRECTIONAL to force writeback/invalidate */ + dsts = &um->addr[src_cnt]; for (i = 0; i < dst_cnt; i++) { - dma_dsts[i] = dma_map_single(dev->dev, thread->dsts[i], - params->buf_size, - DMA_BIDIRECTIONAL); - ret = dma_mapping_error(dev->dev, dma_dsts[i]); + unsigned long buf = (unsigned long) thread->dsts[i]; + struct page *pg = virt_to_page(buf); + unsigned pg_off = buf & ~PAGE_MASK; + + dsts[i] = dma_map_page(dev->dev, pg, pg_off, um->len, + DMA_BIDIRECTIONAL); + ret = dma_mapping_error(dev->dev, dsts[i]); if (ret) { - unmap_src(dev->dev, dma_srcs, len, src_cnt); - unmap_dst(dev->dev, dma_dsts, params->buf_size, - i); - thread_result_add(info, result, - DMATEST_ET_MAP_DST, - total_tests, src_off, dst_off, - len, ret); + dmaengine_unmap_put(um); + result("dst mapping error", total_tests, + src_off, dst_off, len, ret); failed_tests++; continue; } + um->bidi_cnt++; } if (thread->type == DMA_MEMCPY) tx = dev->device_prep_dma_memcpy(chan, - dma_dsts[0] + dst_off, - dma_srcs[0], len, - flags); + dsts[0] + dst_off, + srcs[0], len, flags); else if (thread->type == DMA_XOR) tx = dev->device_prep_dma_xor(chan, - dma_dsts[0] + dst_off, - dma_srcs, src_cnt, + dsts[0] + dst_off, + srcs, src_cnt, len, flags); else if (thread->type == DMA_PQ) { dma_addr_t dma_pq[dst_cnt]; for (i = 0; i < dst_cnt; i++) - dma_pq[i] = dma_dsts[i] + dst_off; - tx = dev->device_prep_dma_pq(chan, dma_pq, dma_srcs, + dma_pq[i] = dsts[i] + dst_off; + tx = dev->device_prep_dma_pq(chan, dma_pq, srcs, src_cnt, pq_coefs, len, flags); } if (!tx) { - unmap_src(dev->dev, dma_srcs, len, src_cnt); - unmap_dst(dev->dev, dma_dsts, params->buf_size, - dst_cnt); - thread_result_add(info, result, DMATEST_ET_PREP, - total_tests, src_off, dst_off, - len, 0); + dmaengine_unmap_put(um); + result("prep error", total_tests, src_off, + dst_off, len, ret); msleep(100); failed_tests++; continue; @@ -712,9 +610,9 @@ static int dmatest_func(void *data) cookie = tx->tx_submit(tx); if (dma_submit_error(cookie)) { - thread_result_add(info, result, DMATEST_ET_SUBMIT, - total_tests, src_off, dst_off, - len, cookie); + dmaengine_unmap_put(um); + result("submit error", total_tests, src_off, + dst_off, len, ret); msleep(100); failed_tests++; continue; @@ -735,59 +633,59 @@ static int dmatest_func(void *data) * free it this time?" dancing. For now, just * leave it dangling. */ - thread_result_add(info, result, DMATEST_ET_TIMEOUT, - total_tests, src_off, dst_off, - len, 0); + dmaengine_unmap_put(um); + result("test timed out", total_tests, src_off, dst_off, + len, 0); failed_tests++; continue; - } else if (status != DMA_SUCCESS) { - enum dmatest_error_type type = (status == DMA_ERROR) ? - DMATEST_ET_DMA_ERROR : DMATEST_ET_DMA_IN_PROGRESS; - thread_result_add(info, result, type, - total_tests, src_off, dst_off, - len, status); + } else if (status != DMA_COMPLETE) { + dmaengine_unmap_put(um); + result(status == DMA_ERROR ? + "completion error status" : + "completion busy status", total_tests, src_off, + dst_off, len, ret); failed_tests++; continue; } - /* Unmap by myself (see DMA_COMPL_SKIP_DEST_UNMAP above) */ - unmap_dst(dev->dev, dma_dsts, params->buf_size, dst_cnt); + dmaengine_unmap_put(um); - error_count = 0; + if (params->noverify) { + verbose_result("test passed", total_tests, src_off, + dst_off, len, 0); + continue; + } - pr_debug("%s: verifying source buffer...\n", thread_name); - error_count += verify_result_add(info, result, total_tests, - src_off, dst_off, len, thread->srcs, -1, + pr_debug("%s: verifying source buffer...\n", current->comm); + error_count = dmatest_verify(thread->srcs, 0, src_off, 0, PATTERN_SRC, true); - error_count += verify_result_add(info, result, total_tests, - src_off, dst_off, len, thread->srcs, 0, - src_off, PATTERN_SRC | PATTERN_COPY, true); - error_count += verify_result_add(info, result, total_tests, - src_off, dst_off, len, thread->srcs, 1, - src_off + len, PATTERN_SRC, true); - - pr_debug("%s: verifying dest buffer...\n", thread_name); - error_count += verify_result_add(info, result, total_tests, - src_off, dst_off, len, thread->dsts, -1, + error_count += dmatest_verify(thread->srcs, src_off, + src_off + len, src_off, + PATTERN_SRC | PATTERN_COPY, true); + error_count += dmatest_verify(thread->srcs, src_off + len, + params->buf_size, src_off + len, + PATTERN_SRC, true); + + pr_debug("%s: verifying dest buffer...\n", current->comm); + error_count += dmatest_verify(thread->dsts, 0, dst_off, 0, PATTERN_DST, false); - error_count += verify_result_add(info, result, total_tests, - src_off, dst_off, len, thread->dsts, 0, - src_off, PATTERN_SRC | PATTERN_COPY, false); - error_count += verify_result_add(info, result, total_tests, - src_off, dst_off, len, thread->dsts, 1, - dst_off + len, PATTERN_DST, false); + error_count += dmatest_verify(thread->dsts, dst_off, + dst_off + len, src_off, + PATTERN_SRC | PATTERN_COPY, false); + error_count += dmatest_verify(thread->dsts, dst_off + len, + params->buf_size, dst_off + len, + PATTERN_DST, false); if (error_count) { - thread_result_add(info, result, DMATEST_ET_VERIFY, - total_tests, src_off, dst_off, - len, error_count); + result("data error", total_tests, src_off, dst_off, + len, error_count); failed_tests++; } else { - thread_result_add(info, result, DMATEST_ET_OK, - total_tests, src_off, dst_off, - len, 0); + verbose_result("test passed", total_tests, src_off, + dst_off, len, 0); } } + runtime = ktime_us_delta(ktime_get(), ktime); ret = 0; for (i = 0; thread->dsts[i]; i++) @@ -802,20 +700,17 @@ err_srcbuf: err_srcs: kfree(pq_coefs); err_thread_type: - pr_notice("%s: terminating after %u tests, %u failures (status %d)\n", - thread_name, total_tests, failed_tests, ret); + pr_info("%s: summary %u tests, %u failures %llu iops %llu KB/s (%d)\n", + current->comm, total_tests, failed_tests, + dmatest_persec(runtime, total_tests), + dmatest_KBs(runtime, total_len), ret); /* terminate all transfers on specified channels */ if (ret) dmaengine_terminate_all(chan); thread->done = true; - - if (params->iterations > 0) - while (!kthread_should_stop()) { - DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wait_dmatest_exit); - interruptible_sleep_on(&wait_dmatest_exit); - } + wake_up(&thread_wait); return ret; } @@ -828,9 +723,10 @@ static void dmatest_cleanup_channel(struct dmatest_chan *dtc) list_for_each_entry_safe(thread, _thread, &dtc->threads, node) { ret = kthread_stop(thread->task); - pr_debug("dmatest: thread %s exited with status %d\n", - thread->task->comm, ret); + pr_debug("thread %s exited with status %d\n", + thread->task->comm, ret); list_del(&thread->node); + put_task_struct(thread->task); kfree(thread); } @@ -861,27 +757,27 @@ static int dmatest_add_threads(struct dmatest_info *info, for (i = 0; i < params->threads_per_chan; i++) { thread = kzalloc(sizeof(struct dmatest_thread), GFP_KERNEL); if (!thread) { - pr_warning("dmatest: No memory for %s-%s%u\n", - dma_chan_name(chan), op, i); - + pr_warn("No memory for %s-%s%u\n", + dma_chan_name(chan), op, i); break; } thread->info = info; thread->chan = dtc->chan; thread->type = type; smp_wmb(); - thread->task = kthread_run(dmatest_func, thread, "%s-%s%u", + thread->task = kthread_create(dmatest_func, thread, "%s-%s%u", dma_chan_name(chan), op, i); if (IS_ERR(thread->task)) { - pr_warning("dmatest: Failed to run thread %s-%s%u\n", - dma_chan_name(chan), op, i); + pr_warn("Failed to create thread %s-%s%u\n", + dma_chan_name(chan), op, i); kfree(thread); break; } /* srcbuf and dstbuf are allocated by the thread itself */ - + get_task_struct(thread->task); list_add_tail(&thread->node, &dtc->threads); + wake_up_process(thread->task); } return i; @@ -897,7 +793,7 @@ static int dmatest_add_channel(struct dmatest_info *info, dtc = kmalloc(sizeof(struct dmatest_chan), GFP_KERNEL); if (!dtc) { - pr_warning("dmatest: No memory for %s\n", dma_chan_name(chan)); + pr_warn("No memory for %s\n", dma_chan_name(chan)); return -ENOMEM; } @@ -917,7 +813,7 @@ static int dmatest_add_channel(struct dmatest_info *info, thread_count += cnt > 0 ? cnt : 0; } - pr_info("dmatest: Started %u threads using %s\n", + pr_info("Started %u threads using %s\n", thread_count, dma_chan_name(chan)); list_add_tail(&dtc->node, &info->channels); @@ -937,20 +833,20 @@ static bool filter(struct dma_chan *chan, void *param) return true; } -static int __run_threaded_test(struct dmatest_info *info) +static void request_channels(struct dmatest_info *info, + enum dma_transaction_type type) { dma_cap_mask_t mask; - struct dma_chan *chan; - struct dmatest_params *params = &info->params; - int err = 0; dma_cap_zero(mask); - dma_cap_set(DMA_MEMCPY, mask); + dma_cap_set(type, mask); for (;;) { + struct dmatest_params *params = &info->params; + struct dma_chan *chan; + chan = dma_request_channel(mask, filter, params); if (chan) { - err = dmatest_add_channel(info, chan); - if (err) { + if (dmatest_add_channel(info, chan)) { dma_release_channel(chan); break; /* add_channel failed, punt */ } @@ -960,22 +856,30 @@ static int __run_threaded_test(struct dmatest_info *info) info->nr_channels >= params->max_channels) break; /* we have all we need */ } - return err; } -#ifndef MODULE -static int run_threaded_test(struct dmatest_info *info) +static void run_threaded_test(struct dmatest_info *info) { - int ret; + struct dmatest_params *params = &info->params; - mutex_lock(&info->lock); - ret = __run_threaded_test(info); - mutex_unlock(&info->lock); - return ret; + /* Copy test parameters */ + params->buf_size = test_buf_size; + strlcpy(params->channel, strim(test_channel), sizeof(params->channel)); + strlcpy(params->device, strim(test_device), sizeof(params->device)); + params->threads_per_chan = threads_per_chan; + params->max_channels = max_channels; + params->iterations = iterations; + params->xor_sources = xor_sources; + params->pq_sources = pq_sources; + params->timeout = timeout; + params->noverify = noverify; + + request_channels(info, DMA_MEMCPY); + request_channels(info, DMA_XOR); + request_channels(info, DMA_PQ); } -#endif -static void __stop_threaded_test(struct dmatest_info *info) +static void stop_threaded_test(struct dmatest_info *info) { struct dmatest_chan *dtc, *_dtc; struct dma_chan *chan; @@ -984,203 +888,86 @@ static void __stop_threaded_test(struct dmatest_info *info) list_del(&dtc->node); chan = dtc->chan; dmatest_cleanup_channel(dtc); - pr_debug("dmatest: dropped channel %s\n", dma_chan_name(chan)); + pr_debug("dropped channel %s\n", dma_chan_name(chan)); dma_release_channel(chan); } info->nr_channels = 0; } -static void stop_threaded_test(struct dmatest_info *info) +static void restart_threaded_test(struct dmatest_info *info, bool run) { - mutex_lock(&info->lock); - __stop_threaded_test(info); - mutex_unlock(&info->lock); -} - -static int __restart_threaded_test(struct dmatest_info *info, bool run) -{ - struct dmatest_params *params = &info->params; + /* we might be called early to set run=, defer running until all + * parameters have been evaluated + */ + if (!info->did_init) + return; /* Stop any running test first */ - __stop_threaded_test(info); - - if (run == false) - return 0; - - /* Clear results from previous run */ - result_free(info, NULL); - - /* Copy test parameters */ - params->buf_size = test_buf_size; - strlcpy(params->channel, strim(test_channel), sizeof(params->channel)); - strlcpy(params->device, strim(test_device), sizeof(params->device)); - params->threads_per_chan = threads_per_chan; - params->max_channels = max_channels; - params->iterations = iterations; - params->xor_sources = xor_sources; - params->pq_sources = pq_sources; - params->timeout = timeout; + stop_threaded_test(info); /* Run test with new parameters */ - return __run_threaded_test(info); -} - -static bool __is_threaded_test_run(struct dmatest_info *info) -{ - struct dmatest_chan *dtc; - - list_for_each_entry(dtc, &info->channels, node) { - struct dmatest_thread *thread; - - list_for_each_entry(thread, &dtc->threads, node) { - if (!thread->done) - return true; - } - } - - return false; + run_threaded_test(info); } -static ssize_t dtf_read_run(struct file *file, char __user *user_buf, - size_t count, loff_t *ppos) +static int dmatest_run_get(char *val, const struct kernel_param *kp) { - struct dmatest_info *info = file->private_data; - char buf[3]; + struct dmatest_info *info = &test_info; mutex_lock(&info->lock); - - if (__is_threaded_test_run(info)) { - buf[0] = 'Y'; + if (is_threaded_test_run(info)) { + dmatest_run = true; } else { - __stop_threaded_test(info); - buf[0] = 'N'; + stop_threaded_test(info); + dmatest_run = false; } - mutex_unlock(&info->lock); - buf[1] = '\n'; - buf[2] = 0x00; - return simple_read_from_buffer(user_buf, count, ppos, buf, 2); -} - -static ssize_t dtf_write_run(struct file *file, const char __user *user_buf, - size_t count, loff_t *ppos) -{ - struct dmatest_info *info = file->private_data; - char buf[16]; - bool bv; - int ret = 0; - if (copy_from_user(buf, user_buf, min(count, (sizeof(buf) - 1)))) - return -EFAULT; - - if (strtobool(buf, &bv) == 0) { - mutex_lock(&info->lock); - - if (__is_threaded_test_run(info)) - ret = -EBUSY; - else - ret = __restart_threaded_test(info, bv); - - mutex_unlock(&info->lock); - } - - return ret ? ret : count; + return param_get_bool(val, kp); } -static const struct file_operations dtf_run_fops = { - .read = dtf_read_run, - .write = dtf_write_run, - .open = simple_open, - .llseek = default_llseek, -}; - -static int dtf_results_show(struct seq_file *sf, void *data) +static int dmatest_run_set(const char *val, const struct kernel_param *kp) { - struct dmatest_info *info = sf->private; - struct dmatest_result *result; - struct dmatest_thread_result *tr; - unsigned int i; + struct dmatest_info *info = &test_info; + int ret; - mutex_lock(&info->results_lock); - list_for_each_entry(result, &info->results, node) { - list_for_each_entry(tr, &result->results, node) { - seq_printf(sf, "%s\n", - thread_result_get(result->name, tr)); - if (tr->type == DMATEST_ET_VERIFY_BUF) { - for (i = 0; i < tr->vr->error_count; i++) { - seq_printf(sf, "\t%s\n", - verify_result_get_one(tr->vr, i)); - } - } - } + mutex_lock(&info->lock); + ret = param_set_bool(val, kp); + if (ret) { + mutex_unlock(&info->lock); + return ret; } - mutex_unlock(&info->results_lock); - return 0; -} - -static int dtf_results_open(struct inode *inode, struct file *file) -{ - return single_open(file, dtf_results_show, inode->i_private); -} - -static const struct file_operations dtf_results_fops = { - .open = dtf_results_open, - .read = seq_read, - .llseek = seq_lseek, - .release = single_release, -}; - -static int dmatest_register_dbgfs(struct dmatest_info *info) -{ - struct dentry *d; - - d = debugfs_create_dir("dmatest", NULL); - if (IS_ERR(d)) - return PTR_ERR(d); - if (!d) - goto err_root; + if (is_threaded_test_run(info)) + ret = -EBUSY; + else if (dmatest_run) + restart_threaded_test(info, dmatest_run); - info->root = d; - - /* Run or stop threaded test */ - debugfs_create_file("run", S_IWUSR | S_IRUGO, info->root, info, - &dtf_run_fops); - - /* Results of test in progress */ - debugfs_create_file("results", S_IRUGO, info->root, info, - &dtf_results_fops); - - return 0; + mutex_unlock(&info->lock); -err_root: - pr_err("dmatest: Failed to initialize debugfs\n"); - return -ENOMEM; + return ret; } static int __init dmatest_init(void) { struct dmatest_info *info = &test_info; - int ret; - - memset(info, 0, sizeof(*info)); + struct dmatest_params *params = &info->params; - mutex_init(&info->lock); - INIT_LIST_HEAD(&info->channels); + if (dmatest_run) { + mutex_lock(&info->lock); + run_threaded_test(info); + mutex_unlock(&info->lock); + } - mutex_init(&info->results_lock); - INIT_LIST_HEAD(&info->results); + if (params->iterations && wait) + wait_event(thread_wait, !is_threaded_test_run(info)); - ret = dmatest_register_dbgfs(info); - if (ret) - return ret; + /* module parameters are stable, inittime tests are started, + * let userspace take over 'run' control + */ + info->did_init = true; -#ifdef MODULE return 0; -#else - return run_threaded_test(info); -#endif } /* when compiled-in wait for drivers to load first */ late_initcall(dmatest_init); @@ -1189,9 +976,9 @@ static void __exit dmatest_exit(void) { struct dmatest_info *info = &test_info; - debugfs_remove_recursive(info->root); + mutex_lock(&info->lock); stop_threaded_test(info); - result_free(info, NULL); + mutex_unlock(&info->lock); } module_exit(dmatest_exit); diff --git a/drivers/dma/dw/core.c b/drivers/dma/dw/core.c index 89eb89f2228..7516be4677c 100644 --- a/drivers/dma/dw/core.c +++ b/drivers/dma/dw/core.c @@ -85,10 +85,6 @@ static struct device *chan2dev(struct dma_chan *chan) { return &chan->dev->device; } -static struct device *chan2parent(struct dma_chan *chan) -{ - return chan->dev->device.parent; -} static struct dw_desc *dwc_first_active(struct dw_dma_chan *dwc) { @@ -311,26 +307,7 @@ dwc_descriptor_complete(struct dw_dma_chan *dwc, struct dw_desc *desc, list_splice_init(&desc->tx_list, &dwc->free_list); list_move(&desc->desc_node, &dwc->free_list); - if (!is_slave_direction(dwc->direction)) { - struct device *parent = chan2parent(&dwc->chan); - if (!(txd->flags & DMA_COMPL_SKIP_DEST_UNMAP)) { - if (txd->flags & DMA_COMPL_DEST_UNMAP_SINGLE) - dma_unmap_single(parent, desc->lli.dar, - desc->total_len, DMA_FROM_DEVICE); - else - dma_unmap_page(parent, desc->lli.dar, - desc->total_len, DMA_FROM_DEVICE); - } - if (!(txd->flags & DMA_COMPL_SKIP_SRC_UNMAP)) { - if (txd->flags & DMA_COMPL_SRC_UNMAP_SINGLE) - dma_unmap_single(parent, desc->lli.sar, - desc->total_len, DMA_TO_DEVICE); - else - dma_unmap_page(parent, desc->lli.sar, - desc->total_len, DMA_TO_DEVICE); - } - } - + dma_descriptor_unmap(txd); spin_unlock_irqrestore(&dwc->lock, flags); if (callback) @@ -1098,13 +1075,13 @@ dwc_tx_status(struct dma_chan *chan, enum dma_status ret; ret = dma_cookie_status(chan, cookie, txstate); - if (ret == DMA_SUCCESS) + if (ret == DMA_COMPLETE) return ret; dwc_scan_descriptors(to_dw_dma(chan->device), dwc); ret = dma_cookie_status(chan, cookie, txstate); - if (ret != DMA_SUCCESS) + if (ret != DMA_COMPLETE) dma_set_residue(txstate, dwc_get_residue(dwc)); if (dwc->paused && ret == DMA_IN_PROGRESS) diff --git a/drivers/dma/edma.c b/drivers/dma/edma.c index bef8a368c8d..2539ea0cbc6 100644 --- a/drivers/dma/edma.c +++ b/drivers/dma/edma.c @@ -46,14 +46,21 @@ #define EDMA_CHANS 64 #endif /* CONFIG_ARCH_DAVINCI_DA8XX */ -/* Max of 16 segments per channel to conserve PaRAM slots */ -#define MAX_NR_SG 16 +/* + * Max of 20 segments per channel to conserve PaRAM slots + * Also note that MAX_NR_SG should be atleast the no.of periods + * that are required for ASoC, otherwise DMA prep calls will + * fail. Today davinci-pcm is the only user of this driver and + * requires atleast 17 slots, so we setup the default to 20. + */ +#define MAX_NR_SG 20 #define EDMA_MAX_SLOTS MAX_NR_SG #define EDMA_DESCRIPTORS 16 struct edma_desc { struct virt_dma_desc vdesc; struct list_head node; + int cyclic; int absync; int pset_nr; int processed; @@ -167,8 +174,13 @@ static void edma_execute(struct edma_chan *echan) * then setup a link to the dummy slot, this results in all future * events being absorbed and that's OK because we're done */ - if (edesc->processed == edesc->pset_nr) - edma_link(echan->slot[nslots-1], echan->ecc->dummy_slot); + if (edesc->processed == edesc->pset_nr) { + if (edesc->cyclic) + edma_link(echan->slot[nslots-1], echan->slot[1]); + else + edma_link(echan->slot[nslots-1], + echan->ecc->dummy_slot); + } edma_resume(echan->ch_num); @@ -250,6 +262,117 @@ static int edma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, return ret; } +/* + * A PaRAM set configuration abstraction used by other modes + * @chan: Channel who's PaRAM set we're configuring + * @pset: PaRAM set to initialize and setup. + * @src_addr: Source address of the DMA + * @dst_addr: Destination address of the DMA + * @burst: In units of dev_width, how much to send + * @dev_width: How much is the dev_width + * @dma_length: Total length of the DMA transfer + * @direction: Direction of the transfer + */ +static int edma_config_pset(struct dma_chan *chan, struct edmacc_param *pset, + dma_addr_t src_addr, dma_addr_t dst_addr, u32 burst, + enum dma_slave_buswidth dev_width, unsigned int dma_length, + enum dma_transfer_direction direction) +{ + struct edma_chan *echan = to_edma_chan(chan); + struct device *dev = chan->device->dev; + int acnt, bcnt, ccnt, cidx; + int src_bidx, dst_bidx, src_cidx, dst_cidx; + int absync; + + acnt = dev_width; + /* + * If the maxburst is equal to the fifo width, use + * A-synced transfers. This allows for large contiguous + * buffer transfers using only one PaRAM set. + */ + if (burst == 1) { + /* + * For the A-sync case, bcnt and ccnt are the remainder + * and quotient respectively of the division of: + * (dma_length / acnt) by (SZ_64K -1). This is so + * that in case bcnt over flows, we have ccnt to use. + * Note: In A-sync tranfer only, bcntrld is used, but it + * only applies for sg_dma_len(sg) >= SZ_64K. + * In this case, the best way adopted is- bccnt for the + * first frame will be the remainder below. Then for + * every successive frame, bcnt will be SZ_64K-1. This + * is assured as bcntrld = 0xffff in end of function. + */ + absync = false; + ccnt = dma_length / acnt / (SZ_64K - 1); + bcnt = dma_length / acnt - ccnt * (SZ_64K - 1); + /* + * If bcnt is non-zero, we have a remainder and hence an + * extra frame to transfer, so increment ccnt. + */ + if (bcnt) + ccnt++; + else + bcnt = SZ_64K - 1; + cidx = acnt; + } else { + /* + * If maxburst is greater than the fifo address_width, + * use AB-synced transfers where A count is the fifo + * address_width and B count is the maxburst. In this + * case, we are limited to transfers of C count frames + * of (address_width * maxburst) where C count is limited + * to SZ_64K-1. This places an upper bound on the length + * of an SG segment that can be handled. + */ + absync = true; + bcnt = burst; + ccnt = dma_length / (acnt * bcnt); + if (ccnt > (SZ_64K - 1)) { + dev_err(dev, "Exceeded max SG segment size\n"); + return -EINVAL; + } + cidx = acnt * bcnt; + } + + if (direction == DMA_MEM_TO_DEV) { + src_bidx = acnt; + src_cidx = cidx; + dst_bidx = 0; + dst_cidx = 0; + } else if (direction == DMA_DEV_TO_MEM) { + src_bidx = 0; + src_cidx = 0; + dst_bidx = acnt; + dst_cidx = cidx; + } else { + dev_err(dev, "%s: direction not implemented yet\n", __func__); + return -EINVAL; + } + + pset->opt = EDMA_TCC(EDMA_CHAN_SLOT(echan->ch_num)); + /* Configure A or AB synchronized transfers */ + if (absync) + pset->opt |= SYNCDIM; + + pset->src = src_addr; + pset->dst = dst_addr; + + pset->src_dst_bidx = (dst_bidx << 16) | src_bidx; + pset->src_dst_cidx = (dst_cidx << 16) | src_cidx; + + pset->a_b_cnt = bcnt << 16 | acnt; + pset->ccnt = ccnt; + /* + * Only time when (bcntrld) auto reload is required is for + * A-sync case, and in this case, a requirement of reload value + * of SZ_64K-1 only is assured. 'link' is initially set to NULL + * and then later will be populated by edma_execute. + */ + pset->link_bcntrld = 0xffffffff; + return absync; +} + static struct dma_async_tx_descriptor *edma_prep_slave_sg( struct dma_chan *chan, struct scatterlist *sgl, unsigned int sg_len, enum dma_transfer_direction direction, @@ -258,23 +381,21 @@ static struct dma_async_tx_descriptor *edma_prep_slave_sg( struct edma_chan *echan = to_edma_chan(chan); struct device *dev = chan->device->dev; struct edma_desc *edesc; - dma_addr_t dev_addr; + dma_addr_t src_addr = 0, dst_addr = 0; enum dma_slave_buswidth dev_width; u32 burst; struct scatterlist *sg; - int acnt, bcnt, ccnt, src, dst, cidx; - int src_bidx, dst_bidx, src_cidx, dst_cidx; - int i, nslots; + int i, nslots, ret; if (unlikely(!echan || !sgl || !sg_len)) return NULL; if (direction == DMA_DEV_TO_MEM) { - dev_addr = echan->cfg.src_addr; + src_addr = echan->cfg.src_addr; dev_width = echan->cfg.src_addr_width; burst = echan->cfg.src_maxburst; } else if (direction == DMA_MEM_TO_DEV) { - dev_addr = echan->cfg.dst_addr; + dst_addr = echan->cfg.dst_addr; dev_width = echan->cfg.dst_addr_width; burst = echan->cfg.dst_maxburst; } else { @@ -307,7 +428,6 @@ static struct dma_async_tx_descriptor *edma_prep_slave_sg( if (echan->slot[i] < 0) { kfree(edesc); dev_err(dev, "Failed to allocate slot\n"); - kfree(edesc); return NULL; } } @@ -315,64 +435,21 @@ static struct dma_async_tx_descriptor *edma_prep_slave_sg( /* Configure PaRAM sets for each SG */ for_each_sg(sgl, sg, sg_len, i) { - - acnt = dev_width; - - /* - * If the maxburst is equal to the fifo width, use - * A-synced transfers. This allows for large contiguous - * buffer transfers using only one PaRAM set. - */ - if (burst == 1) { - edesc->absync = false; - ccnt = sg_dma_len(sg) / acnt / (SZ_64K - 1); - bcnt = sg_dma_len(sg) / acnt - ccnt * (SZ_64K - 1); - if (bcnt) - ccnt++; - else - bcnt = SZ_64K - 1; - cidx = acnt; - /* - * If maxburst is greater than the fifo address_width, - * use AB-synced transfers where A count is the fifo - * address_width and B count is the maxburst. In this - * case, we are limited to transfers of C count frames - * of (address_width * maxburst) where C count is limited - * to SZ_64K-1. This places an upper bound on the length - * of an SG segment that can be handled. - */ - } else { - edesc->absync = true; - bcnt = burst; - ccnt = sg_dma_len(sg) / (acnt * bcnt); - if (ccnt > (SZ_64K - 1)) { - dev_err(dev, "Exceeded max SG segment size\n"); - kfree(edesc); - return NULL; - } - cidx = acnt * bcnt; + /* Get address for each SG */ + if (direction == DMA_DEV_TO_MEM) + dst_addr = sg_dma_address(sg); + else + src_addr = sg_dma_address(sg); + + ret = edma_config_pset(chan, &edesc->pset[i], src_addr, + dst_addr, burst, dev_width, + sg_dma_len(sg), direction); + if (ret < 0) { + kfree(edesc); + return NULL; } - if (direction == DMA_MEM_TO_DEV) { - src = sg_dma_address(sg); - dst = dev_addr; - src_bidx = acnt; - src_cidx = cidx; - dst_bidx = 0; - dst_cidx = 0; - } else { - src = dev_addr; - dst = sg_dma_address(sg); - src_bidx = 0; - src_cidx = 0; - dst_bidx = acnt; - dst_cidx = cidx; - } - - edesc->pset[i].opt = EDMA_TCC(EDMA_CHAN_SLOT(echan->ch_num)); - /* Configure A or AB synchronized transfers */ - if (edesc->absync) - edesc->pset[i].opt |= SYNCDIM; + edesc->absync = ret; /* If this is the last in a current SG set of transactions, enable interrupts so that next set is processed */ @@ -382,17 +459,138 @@ static struct dma_async_tx_descriptor *edma_prep_slave_sg( /* If this is the last set, enable completion interrupt flag */ if (i == sg_len - 1) edesc->pset[i].opt |= TCINTEN; + } - edesc->pset[i].src = src; - edesc->pset[i].dst = dst; + return vchan_tx_prep(&echan->vchan, &edesc->vdesc, tx_flags); +} - edesc->pset[i].src_dst_bidx = (dst_bidx << 16) | src_bidx; - edesc->pset[i].src_dst_cidx = (dst_cidx << 16) | src_cidx; +static struct dma_async_tx_descriptor *edma_prep_dma_cyclic( + struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len, + size_t period_len, enum dma_transfer_direction direction, + unsigned long tx_flags, void *context) +{ + struct edma_chan *echan = to_edma_chan(chan); + struct device *dev = chan->device->dev; + struct edma_desc *edesc; + dma_addr_t src_addr, dst_addr; + enum dma_slave_buswidth dev_width; + u32 burst; + int i, ret, nslots; + + if (unlikely(!echan || !buf_len || !period_len)) + return NULL; + + if (direction == DMA_DEV_TO_MEM) { + src_addr = echan->cfg.src_addr; + dst_addr = buf_addr; + dev_width = echan->cfg.src_addr_width; + burst = echan->cfg.src_maxburst; + } else if (direction == DMA_MEM_TO_DEV) { + src_addr = buf_addr; + dst_addr = echan->cfg.dst_addr; + dev_width = echan->cfg.dst_addr_width; + burst = echan->cfg.dst_maxburst; + } else { + dev_err(dev, "%s: bad direction?\n", __func__); + return NULL; + } + + if (dev_width == DMA_SLAVE_BUSWIDTH_UNDEFINED) { + dev_err(dev, "Undefined slave buswidth\n"); + return NULL; + } + + if (unlikely(buf_len % period_len)) { + dev_err(dev, "Period should be multiple of Buffer length\n"); + return NULL; + } + + nslots = (buf_len / period_len) + 1; + + /* + * Cyclic DMA users such as audio cannot tolerate delays introduced + * by cases where the number of periods is more than the maximum + * number of SGs the EDMA driver can handle at a time. For DMA types + * such as Slave SGs, such delays are tolerable and synchronized, + * but the synchronization is difficult to achieve with Cyclic and + * cannot be guaranteed, so we error out early. + */ + if (nslots > MAX_NR_SG) + return NULL; + + edesc = kzalloc(sizeof(*edesc) + nslots * + sizeof(edesc->pset[0]), GFP_ATOMIC); + if (!edesc) { + dev_dbg(dev, "Failed to allocate a descriptor\n"); + return NULL; + } + + edesc->cyclic = 1; + edesc->pset_nr = nslots; + + dev_dbg(dev, "%s: nslots=%d\n", __func__, nslots); + dev_dbg(dev, "%s: period_len=%d\n", __func__, period_len); + dev_dbg(dev, "%s: buf_len=%d\n", __func__, buf_len); + + for (i = 0; i < nslots; i++) { + /* Allocate a PaRAM slot, if needed */ + if (echan->slot[i] < 0) { + echan->slot[i] = + edma_alloc_slot(EDMA_CTLR(echan->ch_num), + EDMA_SLOT_ANY); + if (echan->slot[i] < 0) { + dev_err(dev, "Failed to allocate slot\n"); + return NULL; + } + } + + if (i == nslots - 1) { + memcpy(&edesc->pset[i], &edesc->pset[0], + sizeof(edesc->pset[0])); + break; + } + + ret = edma_config_pset(chan, &edesc->pset[i], src_addr, + dst_addr, burst, dev_width, period_len, + direction); + if (ret < 0) + return NULL; - edesc->pset[i].a_b_cnt = bcnt << 16 | acnt; - edesc->pset[i].ccnt = ccnt; - edesc->pset[i].link_bcntrld = 0xffffffff; + if (direction == DMA_DEV_TO_MEM) + dst_addr += period_len; + else + src_addr += period_len; + dev_dbg(dev, "%s: Configure period %d of buf:\n", __func__, i); + dev_dbg(dev, + "\n pset[%d]:\n" + " chnum\t%d\n" + " slot\t%d\n" + " opt\t%08x\n" + " src\t%08x\n" + " dst\t%08x\n" + " abcnt\t%08x\n" + " ccnt\t%08x\n" + " bidx\t%08x\n" + " cidx\t%08x\n" + " lkrld\t%08x\n", + i, echan->ch_num, echan->slot[i], + edesc->pset[i].opt, + edesc->pset[i].src, + edesc->pset[i].dst, + edesc->pset[i].a_b_cnt, + edesc->pset[i].ccnt, + edesc->pset[i].src_dst_bidx, + edesc->pset[i].src_dst_cidx, + edesc->pset[i].link_bcntrld); + + edesc->absync = ret; + + /* + * Enable interrupts for every period because callback + * has to be called for every period. + */ + edesc->pset[i].opt |= TCINTEN; } return vchan_tx_prep(&echan->vchan, &edesc->vdesc, tx_flags); @@ -406,30 +604,34 @@ static void edma_callback(unsigned ch_num, u16 ch_status, void *data) unsigned long flags; struct edmacc_param p; - /* Pause the channel */ - edma_pause(echan->ch_num); + edesc = echan->edesc; + + /* Pause the channel for non-cyclic */ + if (!edesc || (edesc && !edesc->cyclic)) + edma_pause(echan->ch_num); switch (ch_status) { - case DMA_COMPLETE: + case EDMA_DMA_COMPLETE: spin_lock_irqsave(&echan->vchan.lock, flags); - edesc = echan->edesc; if (edesc) { - if (edesc->processed == edesc->pset_nr) { + if (edesc->cyclic) { + vchan_cyclic_callback(&edesc->vdesc); + } else if (edesc->processed == edesc->pset_nr) { dev_dbg(dev, "Transfer complete, stopping channel %d\n", ch_num); edma_stop(echan->ch_num); vchan_cookie_complete(&edesc->vdesc); + edma_execute(echan); } else { dev_dbg(dev, "Intermediate transfer complete on channel %d\n", ch_num); + edma_execute(echan); } - - edma_execute(echan); } spin_unlock_irqrestore(&echan->vchan.lock, flags); break; - case DMA_CC_ERROR: + case EDMA_DMA_CC_ERROR: spin_lock_irqsave(&echan->vchan.lock, flags); edma_read_slot(EDMA_CHAN_SLOT(echan->slot[0]), &p); @@ -579,7 +781,7 @@ static enum dma_status edma_tx_status(struct dma_chan *chan, unsigned long flags; ret = dma_cookie_status(chan, cookie, txstate); - if (ret == DMA_SUCCESS || !txstate) + if (ret == DMA_COMPLETE || !txstate) return ret; spin_lock_irqsave(&echan->vchan.lock, flags); @@ -619,6 +821,7 @@ static void edma_dma_init(struct edma_cc *ecc, struct dma_device *dma, struct device *dev) { dma->device_prep_slave_sg = edma_prep_slave_sg; + dma->device_prep_dma_cyclic = edma_prep_dma_cyclic; dma->device_alloc_chan_resources = edma_alloc_chan_resources; dma->device_free_chan_resources = edma_free_chan_resources; dma->device_issue_pending = edma_issue_pending; diff --git a/drivers/dma/ep93xx_dma.c b/drivers/dma/ep93xx_dma.c index 591cd8c63ab..cb4bf682a70 100644 --- a/drivers/dma/ep93xx_dma.c +++ b/drivers/dma/ep93xx_dma.c @@ -733,28 +733,6 @@ static void ep93xx_dma_advance_work(struct ep93xx_dma_chan *edmac) spin_unlock_irqrestore(&edmac->lock, flags); } -static void ep93xx_dma_unmap_buffers(struct ep93xx_dma_desc *desc) -{ - struct device *dev = desc->txd.chan->device->dev; - - if (!(desc->txd.flags & DMA_COMPL_SKIP_SRC_UNMAP)) { - if (desc->txd.flags & DMA_COMPL_SRC_UNMAP_SINGLE) - dma_unmap_single(dev, desc->src_addr, desc->size, - DMA_TO_DEVICE); - else - dma_unmap_page(dev, desc->src_addr, desc->size, - DMA_TO_DEVICE); - } - if (!(desc->txd.flags & DMA_COMPL_SKIP_DEST_UNMAP)) { - if (desc->txd.flags & DMA_COMPL_DEST_UNMAP_SINGLE) - dma_unmap_single(dev, desc->dst_addr, desc->size, - DMA_FROM_DEVICE); - else - dma_unmap_page(dev, desc->dst_addr, desc->size, - DMA_FROM_DEVICE); - } -} - static void ep93xx_dma_tasklet(unsigned long data) { struct ep93xx_dma_chan *edmac = (struct ep93xx_dma_chan *)data; @@ -787,13 +765,7 @@ static void ep93xx_dma_tasklet(unsigned long data) /* Now we can release all the chained descriptors */ list_for_each_entry_safe(desc, d, &list, node) { - /* - * For the memcpy channels the API requires us to unmap the - * buffers unless requested otherwise. - */ - if (!edmac->chan.private) - ep93xx_dma_unmap_buffers(desc); - + dma_descriptor_unmap(&desc->txd); ep93xx_dma_desc_put(edmac, desc); } diff --git a/drivers/dma/fsldma.c b/drivers/dma/fsldma.c index 61517dd0d0b..7086a16a55f 100644 --- a/drivers/dma/fsldma.c +++ b/drivers/dma/fsldma.c @@ -870,22 +870,7 @@ static void fsldma_cleanup_descriptor(struct fsldma_chan *chan, /* Run any dependencies */ dma_run_dependencies(txd); - /* Unmap the dst buffer, if requested */ - if (!(txd->flags & DMA_COMPL_SKIP_DEST_UNMAP)) { - if (txd->flags & DMA_COMPL_DEST_UNMAP_SINGLE) - dma_unmap_single(dev, dst, len, DMA_FROM_DEVICE); - else - dma_unmap_page(dev, dst, len, DMA_FROM_DEVICE); - } - - /* Unmap the src buffer, if requested */ - if (!(txd->flags & DMA_COMPL_SKIP_SRC_UNMAP)) { - if (txd->flags & DMA_COMPL_SRC_UNMAP_SINGLE) - dma_unmap_single(dev, src, len, DMA_TO_DEVICE); - else - dma_unmap_page(dev, src, len, DMA_TO_DEVICE); - } - + dma_descriptor_unmap(txd); #ifdef FSL_DMA_LD_DEBUG chan_dbg(chan, "LD %p free\n", desc); #endif @@ -1255,7 +1240,9 @@ static int fsl_dma_chan_probe(struct fsldma_device *fdev, WARN_ON(fdev->feature != chan->feature); chan->dev = fdev->dev; - chan->id = ((res.start - 0x100) & 0xfff) >> 7; + chan->id = (res.start & 0xfff) < 0x300 ? + ((res.start - 0x100) & 0xfff) >> 7 : + ((res.start - 0x200) & 0xfff) >> 7; if (chan->id >= FSL_DMA_MAX_CHANS_PER_DEVICE) { dev_err(fdev->dev, "too many channels for device\n"); err = -EINVAL; @@ -1428,6 +1415,7 @@ static int fsldma_of_remove(struct platform_device *op) } static const struct of_device_id fsldma_of_ids[] = { + { .compatible = "fsl,elo3-dma", }, { .compatible = "fsl,eloplus-dma", }, { .compatible = "fsl,elo-dma", }, {} @@ -1449,7 +1437,7 @@ static struct platform_driver fsldma_of_driver = { static __init int fsldma_init(void) { - pr_info("Freescale Elo / Elo Plus DMA driver\n"); + pr_info("Freescale Elo series DMA driver\n"); return platform_driver_register(&fsldma_of_driver); } @@ -1461,5 +1449,5 @@ static void __exit fsldma_exit(void) subsys_initcall(fsldma_init); module_exit(fsldma_exit); -MODULE_DESCRIPTION("Freescale Elo / Elo Plus DMA driver"); +MODULE_DESCRIPTION("Freescale Elo series DMA driver"); MODULE_LICENSE("GPL"); diff --git a/drivers/dma/fsldma.h b/drivers/dma/fsldma.h index f5c38791fc7..1ffc24484d2 100644 --- a/drivers/dma/fsldma.h +++ b/drivers/dma/fsldma.h @@ -112,7 +112,7 @@ struct fsldma_chan_regs { }; struct fsldma_chan; -#define FSL_DMA_MAX_CHANS_PER_DEVICE 4 +#define FSL_DMA_MAX_CHANS_PER_DEVICE 8 struct fsldma_device { void __iomem *regs; /* DGSR register base */ diff --git a/drivers/dma/imx-dma.c b/drivers/dma/imx-dma.c index 55852c02679..6f9ac2022ab 100644 --- a/drivers/dma/imx-dma.c +++ b/drivers/dma/imx-dma.c @@ -572,9 +572,11 @@ static int imxdma_xfer_desc(struct imxdma_desc *d) imx_dmav1_writel(imxdma, d->len, DMA_CNTR(imxdmac->channel)); - dev_dbg(imxdma->dev, "%s channel: %d dest=0x%08x src=0x%08x " - "dma_length=%d\n", __func__, imxdmac->channel, - d->dest, d->src, d->len); + dev_dbg(imxdma->dev, + "%s channel: %d dest=0x%08llx src=0x%08llx dma_length=%zu\n", + __func__, imxdmac->channel, + (unsigned long long)d->dest, + (unsigned long long)d->src, d->len); break; /* Cyclic transfer is the same as slave_sg with special sg configuration. */ @@ -586,20 +588,22 @@ static int imxdma_xfer_desc(struct imxdma_desc *d) imx_dmav1_writel(imxdma, imxdmac->ccr_from_device, DMA_CCR(imxdmac->channel)); - dev_dbg(imxdma->dev, "%s channel: %d sg=%p sgcount=%d " - "total length=%d dev_addr=0x%08x (dev2mem)\n", - __func__, imxdmac->channel, d->sg, d->sgcount, - d->len, imxdmac->per_address); + dev_dbg(imxdma->dev, + "%s channel: %d sg=%p sgcount=%d total length=%zu dev_addr=0x%08llx (dev2mem)\n", + __func__, imxdmac->channel, + d->sg, d->sgcount, d->len, + (unsigned long long)imxdmac->per_address); } else if (d->direction == DMA_MEM_TO_DEV) { imx_dmav1_writel(imxdma, imxdmac->per_address, DMA_DAR(imxdmac->channel)); imx_dmav1_writel(imxdma, imxdmac->ccr_to_device, DMA_CCR(imxdmac->channel)); - dev_dbg(imxdma->dev, "%s channel: %d sg=%p sgcount=%d " - "total length=%d dev_addr=0x%08x (mem2dev)\n", - __func__, imxdmac->channel, d->sg, d->sgcount, - d->len, imxdmac->per_address); + dev_dbg(imxdma->dev, + "%s channel: %d sg=%p sgcount=%d total length=%zu dev_addr=0x%08llx (mem2dev)\n", + __func__, imxdmac->channel, + d->sg, d->sgcount, d->len, + (unsigned long long)imxdmac->per_address); } else { dev_err(imxdma->dev, "%s channel: %d bad dma mode\n", __func__, imxdmac->channel); @@ -771,7 +775,7 @@ static int imxdma_alloc_chan_resources(struct dma_chan *chan) desc->desc.tx_submit = imxdma_tx_submit; /* txd.flags will be overwritten in prep funcs */ desc->desc.flags = DMA_CTRL_ACK; - desc->status = DMA_SUCCESS; + desc->status = DMA_COMPLETE; list_add_tail(&desc->node, &imxdmac->ld_free); imxdmac->descs_allocated++; @@ -870,7 +874,7 @@ static struct dma_async_tx_descriptor *imxdma_prep_dma_cyclic( int i; unsigned int periods = buf_len / period_len; - dev_dbg(imxdma->dev, "%s channel: %d buf_len=%d period_len=%d\n", + dev_dbg(imxdma->dev, "%s channel: %d buf_len=%zu period_len=%zu\n", __func__, imxdmac->channel, buf_len, period_len); if (list_empty(&imxdmac->ld_free) || @@ -926,8 +930,9 @@ static struct dma_async_tx_descriptor *imxdma_prep_dma_memcpy( struct imxdma_engine *imxdma = imxdmac->imxdma; struct imxdma_desc *desc; - dev_dbg(imxdma->dev, "%s channel: %d src=0x%x dst=0x%x len=%d\n", - __func__, imxdmac->channel, src, dest, len); + dev_dbg(imxdma->dev, "%s channel: %d src=0x%llx dst=0x%llx len=%zu\n", + __func__, imxdmac->channel, (unsigned long long)src, + (unsigned long long)dest, len); if (list_empty(&imxdmac->ld_free) || imxdma_chan_is_doing_cyclic(imxdmac)) @@ -956,9 +961,10 @@ static struct dma_async_tx_descriptor *imxdma_prep_dma_interleaved( struct imxdma_engine *imxdma = imxdmac->imxdma; struct imxdma_desc *desc; - dev_dbg(imxdma->dev, "%s channel: %d src_start=0x%x dst_start=0x%x\n" - " src_sgl=%s dst_sgl=%s numf=%d frame_size=%d\n", __func__, - imxdmac->channel, xt->src_start, xt->dst_start, + dev_dbg(imxdma->dev, "%s channel: %d src_start=0x%llx dst_start=0x%llx\n" + " src_sgl=%s dst_sgl=%s numf=%zu frame_size=%zu\n", __func__, + imxdmac->channel, (unsigned long long)xt->src_start, + (unsigned long long) xt->dst_start, xt->src_sgl ? "true" : "false", xt->dst_sgl ? "true" : "false", xt->numf, xt->frame_size); diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c index c1fd504cae2..c75679d4202 100644 --- a/drivers/dma/imx-sdma.c +++ b/drivers/dma/imx-sdma.c @@ -638,7 +638,7 @@ static void mxc_sdma_handle_channel_normal(struct sdma_channel *sdmac) if (error) sdmac->status = DMA_ERROR; else - sdmac->status = DMA_SUCCESS; + sdmac->status = DMA_COMPLETE; dma_cookie_complete(&sdmac->desc); if (sdmac->desc.callback) @@ -1089,8 +1089,8 @@ static struct dma_async_tx_descriptor *sdma_prep_slave_sg( param &= ~BD_CONT; } - dev_dbg(sdma->dev, "entry %d: count: %d dma: 0x%08x %s%s\n", - i, count, sg->dma_address, + dev_dbg(sdma->dev, "entry %d: count: %d dma: %#llx %s%s\n", + i, count, (u64)sg->dma_address, param & BD_WRAP ? "wrap" : "", param & BD_INTR ? " intr" : ""); @@ -1163,8 +1163,8 @@ static struct dma_async_tx_descriptor *sdma_prep_dma_cyclic( if (i + 1 == num_periods) param |= BD_WRAP; - dev_dbg(sdma->dev, "entry %d: count: %d dma: 0x%08x %s%s\n", - i, period_len, dma_addr, + dev_dbg(sdma->dev, "entry %d: count: %d dma: %#llx %s%s\n", + i, period_len, (u64)dma_addr, param & BD_WRAP ? "wrap" : "", param & BD_INTR ? " intr" : ""); diff --git a/drivers/dma/intel_mid_dma.c b/drivers/dma/intel_mid_dma.c index a975ebebea8..1aab8130efa 100644 --- a/drivers/dma/intel_mid_dma.c +++ b/drivers/dma/intel_mid_dma.c @@ -309,7 +309,7 @@ static void midc_descriptor_complete(struct intel_mid_dma_chan *midc, callback_txd(param_txd); } if (midc->raw_tfr) { - desc->status = DMA_SUCCESS; + desc->status = DMA_COMPLETE; if (desc->lli != NULL) { pci_pool_free(desc->lli_pool, desc->lli, desc->lli_phys); @@ -481,7 +481,7 @@ static enum dma_status intel_mid_dma_tx_status(struct dma_chan *chan, enum dma_status ret; ret = dma_cookie_status(chan, cookie, txstate); - if (ret != DMA_SUCCESS) { + if (ret != DMA_COMPLETE) { spin_lock_bh(&midc->lock); midc_scan_descriptors(to_middma_device(chan->device), midc); spin_unlock_bh(&midc->lock); diff --git a/drivers/dma/ioat/dma.c b/drivers/dma/ioat/dma.c index 5ff6fc1819d..1a49c777607 100644 --- a/drivers/dma/ioat/dma.c +++ b/drivers/dma/ioat/dma.c @@ -531,21 +531,6 @@ static void ioat1_cleanup_event(unsigned long data) writew(IOAT_CHANCTRL_RUN, ioat->base.reg_base + IOAT_CHANCTRL_OFFSET); } -void ioat_dma_unmap(struct ioat_chan_common *chan, enum dma_ctrl_flags flags, - size_t len, struct ioat_dma_descriptor *hw) -{ - struct pci_dev *pdev = chan->device->pdev; - size_t offset = len - hw->size; - - if (!(flags & DMA_COMPL_SKIP_DEST_UNMAP)) - ioat_unmap(pdev, hw->dst_addr - offset, len, - PCI_DMA_FROMDEVICE, flags, 1); - - if (!(flags & DMA_COMPL_SKIP_SRC_UNMAP)) - ioat_unmap(pdev, hw->src_addr - offset, len, - PCI_DMA_TODEVICE, flags, 0); -} - dma_addr_t ioat_get_current_completion(struct ioat_chan_common *chan) { dma_addr_t phys_complete; @@ -602,7 +587,7 @@ static void __cleanup(struct ioat_dma_chan *ioat, dma_addr_t phys_complete) dump_desc_dbg(ioat, desc); if (tx->cookie) { dma_cookie_complete(tx); - ioat_dma_unmap(chan, tx->flags, desc->len, desc->hw); + dma_descriptor_unmap(tx); ioat->active -= desc->hw->tx_cnt; if (tx->callback) { tx->callback(tx->callback_param); @@ -733,7 +718,7 @@ ioat_dma_tx_status(struct dma_chan *c, dma_cookie_t cookie, enum dma_status ret; ret = dma_cookie_status(c, cookie, txstate); - if (ret == DMA_SUCCESS) + if (ret == DMA_COMPLETE) return ret; device->cleanup_fn((unsigned long) c); @@ -833,8 +818,7 @@ int ioat_dma_self_test(struct ioatdma_device *device) dma_src = dma_map_single(dev, src, IOAT_TEST_SIZE, DMA_TO_DEVICE); dma_dest = dma_map_single(dev, dest, IOAT_TEST_SIZE, DMA_FROM_DEVICE); - flags = DMA_COMPL_SKIP_SRC_UNMAP | DMA_COMPL_SKIP_DEST_UNMAP | - DMA_PREP_INTERRUPT; + flags = DMA_PREP_INTERRUPT; tx = device->common.device_prep_dma_memcpy(dma_chan, dma_dest, dma_src, IOAT_TEST_SIZE, flags); if (!tx) { @@ -859,7 +843,7 @@ int ioat_dma_self_test(struct ioatdma_device *device) if (tmo == 0 || dma->device_tx_status(dma_chan, cookie, NULL) - != DMA_SUCCESS) { + != DMA_COMPLETE) { dev_err(dev, "Self-test copy timed out, disabling\n"); err = -ENODEV; goto unmap_dma; @@ -885,8 +869,7 @@ static char ioat_interrupt_style[32] = "msix"; module_param_string(ioat_interrupt_style, ioat_interrupt_style, sizeof(ioat_interrupt_style), 0644); MODULE_PARM_DESC(ioat_interrupt_style, - "set ioat interrupt style: msix (default), " - "msix-single-vector, msi, intx)"); + "set ioat interrupt style: msix (default), msi, intx"); /** * ioat_dma_setup_interrupts - setup interrupt handler @@ -904,8 +887,6 @@ int ioat_dma_setup_interrupts(struct ioatdma_device *device) if (!strcmp(ioat_interrupt_style, "msix")) goto msix; - if (!strcmp(ioat_interrupt_style, "msix-single-vector")) - goto msix_single_vector; if (!strcmp(ioat_interrupt_style, "msi")) goto msi; if (!strcmp(ioat_interrupt_style, "intx")) @@ -920,10 +901,8 @@ msix: device->msix_entries[i].entry = i; err = pci_enable_msix(pdev, device->msix_entries, msixcnt); - if (err < 0) + if (err) goto msi; - if (err > 0) - goto msix_single_vector; for (i = 0; i < msixcnt; i++) { msix = &device->msix_entries[i]; @@ -937,29 +916,13 @@ msix: chan = ioat_chan_by_index(device, j); devm_free_irq(dev, msix->vector, chan); } - goto msix_single_vector; + goto msi; } } intrctrl |= IOAT_INTRCTRL_MSIX_VECTOR_CONTROL; device->irq_mode = IOAT_MSIX; goto done; -msix_single_vector: - msix = &device->msix_entries[0]; - msix->entry = 0; - err = pci_enable_msix(pdev, device->msix_entries, 1); - if (err) - goto msi; - - err = devm_request_irq(dev, msix->vector, ioat_dma_do_interrupt, 0, - "ioat-msix", device); - if (err) { - pci_disable_msix(pdev); - goto msi; - } - device->irq_mode = IOAT_MSIX_SINGLE; - goto done; - msi: err = pci_enable_msi(pdev); if (err) @@ -971,7 +934,7 @@ msi: pci_disable_msi(pdev); goto intx; } - device->irq_mode = IOAT_MSIX; + device->irq_mode = IOAT_MSI; goto done; intx: diff --git a/drivers/dma/ioat/dma.h b/drivers/dma/ioat/dma.h index 54fb7b9ff9a..11fb877ddca 100644 --- a/drivers/dma/ioat/dma.h +++ b/drivers/dma/ioat/dma.h @@ -52,7 +52,6 @@ enum ioat_irq_mode { IOAT_NOIRQ = 0, IOAT_MSIX, - IOAT_MSIX_SINGLE, IOAT_MSI, IOAT_INTX }; @@ -83,7 +82,6 @@ struct ioatdma_device { struct pci_pool *completion_pool; #define MAX_SED_POOLS 5 struct dma_pool *sed_hw_pool[MAX_SED_POOLS]; - struct kmem_cache *sed_pool; struct dma_device common; u8 version; struct msix_entry msix_entries[4]; @@ -342,16 +340,6 @@ static inline bool is_ioat_bug(unsigned long err) return !!err; } -static inline void ioat_unmap(struct pci_dev *pdev, dma_addr_t addr, size_t len, - int direction, enum dma_ctrl_flags flags, bool dst) -{ - if ((dst && (flags & DMA_COMPL_DEST_UNMAP_SINGLE)) || - (!dst && (flags & DMA_COMPL_SRC_UNMAP_SINGLE))) - pci_unmap_single(pdev, addr, len, direction); - else - pci_unmap_page(pdev, addr, len, direction); -} - int ioat_probe(struct ioatdma_device *device); int ioat_register(struct ioatdma_device *device); int ioat1_dma_probe(struct ioatdma_device *dev, int dca); @@ -363,8 +351,6 @@ void ioat_init_channel(struct ioatdma_device *device, struct ioat_chan_common *chan, int idx); enum dma_status ioat_dma_tx_status(struct dma_chan *c, dma_cookie_t cookie, struct dma_tx_state *txstate); -void ioat_dma_unmap(struct ioat_chan_common *chan, enum dma_ctrl_flags flags, - size_t len, struct ioat_dma_descriptor *hw); bool ioat_cleanup_preamble(struct ioat_chan_common *chan, dma_addr_t *phys_complete); void ioat_kobject_add(struct ioatdma_device *device, struct kobj_type *type); diff --git a/drivers/dma/ioat/dma_v2.c b/drivers/dma/ioat/dma_v2.c index b925e1b1d13..5d3affe7e97 100644 --- a/drivers/dma/ioat/dma_v2.c +++ b/drivers/dma/ioat/dma_v2.c @@ -148,7 +148,7 @@ static void __cleanup(struct ioat2_dma_chan *ioat, dma_addr_t phys_complete) tx = &desc->txd; dump_desc_dbg(ioat, desc); if (tx->cookie) { - ioat_dma_unmap(chan, tx->flags, desc->len, desc->hw); + dma_descriptor_unmap(tx); dma_cookie_complete(tx); if (tx->callback) { tx->callback(tx->callback_param); diff --git a/drivers/dma/ioat/dma_v2.h b/drivers/dma/ioat/dma_v2.h index 212d584fe42..470292767e6 100644 --- a/drivers/dma/ioat/dma_v2.h +++ b/drivers/dma/ioat/dma_v2.h @@ -157,7 +157,6 @@ static inline void ioat2_set_chainaddr(struct ioat2_dma_chan *ioat, u64 addr) int ioat2_dma_probe(struct ioatdma_device *dev, int dca); int ioat3_dma_probe(struct ioatdma_device *dev, int dca); -void ioat3_dma_remove(struct ioatdma_device *dev); struct dca_provider *ioat2_dca_init(struct pci_dev *pdev, void __iomem *iobase); struct dca_provider *ioat3_dca_init(struct pci_dev *pdev, void __iomem *iobase); int ioat2_check_space_lock(struct ioat2_dma_chan *ioat, int num_descs); diff --git a/drivers/dma/ioat/dma_v3.c b/drivers/dma/ioat/dma_v3.c index d8ececaf1b5..820817e97e6 100644 --- a/drivers/dma/ioat/dma_v3.c +++ b/drivers/dma/ioat/dma_v3.c @@ -67,6 +67,8 @@ #include "dma.h" #include "dma_v2.h" +extern struct kmem_cache *ioat3_sed_cache; + /* ioat hardware assumes at least two sources for raid operations */ #define src_cnt_to_sw(x) ((x) + 2) #define src_cnt_to_hw(x) ((x) - 2) @@ -87,22 +89,8 @@ static const u8 pq_idx_to_field[] = { 1, 4, 5, 0, 1, 2, 4, 5 }; static const u8 pq16_idx_to_field[] = { 1, 4, 1, 2, 3, 4, 5, 6, 7, 0, 1, 2, 3, 4, 5, 6 }; -/* - * technically sources 1 and 2 do not require SED, but the op will have - * at least 9 descriptors so that's irrelevant. - */ -static const u8 pq16_idx_to_sed[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, - 1, 1, 1, 1, 1, 1, 1 }; - static void ioat3_eh(struct ioat2_dma_chan *ioat); -static dma_addr_t xor_get_src(struct ioat_raw_descriptor *descs[2], int idx) -{ - struct ioat_raw_descriptor *raw = descs[xor_idx_to_desc >> idx & 1]; - - return raw->field[xor_idx_to_field[idx]]; -} - static void xor_set_src(struct ioat_raw_descriptor *descs[2], dma_addr_t addr, u32 offset, int idx) { @@ -135,12 +123,6 @@ static void pq_set_src(struct ioat_raw_descriptor *descs[2], pq->coef[idx] = coef; } -static int sed_get_pq16_pool_idx(int src_cnt) -{ - - return pq16_idx_to_sed[src_cnt]; -} - static bool is_jf_ioat(struct pci_dev *pdev) { switch (pdev->device) { @@ -272,7 +254,7 @@ ioat3_alloc_sed(struct ioatdma_device *device, unsigned int hw_pool) struct ioat_sed_ent *sed; gfp_t flags = __GFP_ZERO | GFP_ATOMIC; - sed = kmem_cache_alloc(device->sed_pool, flags); + sed = kmem_cache_alloc(ioat3_sed_cache, flags); if (!sed) return NULL; @@ -280,7 +262,7 @@ ioat3_alloc_sed(struct ioatdma_device *device, unsigned int hw_pool) sed->hw = dma_pool_alloc(device->sed_hw_pool[hw_pool], flags, &sed->dma); if (!sed->hw) { - kmem_cache_free(device->sed_pool, sed); + kmem_cache_free(ioat3_sed_cache, sed); return NULL; } @@ -293,165 +275,7 @@ static void ioat3_free_sed(struct ioatdma_device *device, struct ioat_sed_ent *s return; dma_pool_free(device->sed_hw_pool[sed->hw_pool], sed->hw, sed->dma); - kmem_cache_free(device->sed_pool, sed); -} - -static void ioat3_dma_unmap(struct ioat2_dma_chan *ioat, - struct ioat_ring_ent *desc, int idx) -{ - struct ioat_chan_common *chan = &ioat->base; - struct pci_dev *pdev = chan->device->pdev; - size_t len = desc->len; - size_t offset = len - desc->hw->size; - struct dma_async_tx_descriptor *tx = &desc->txd; - enum dma_ctrl_flags flags = tx->flags; - - switch (desc->hw->ctl_f.op) { - case IOAT_OP_COPY: - if (!desc->hw->ctl_f.null) /* skip 'interrupt' ops */ - ioat_dma_unmap(chan, flags, len, desc->hw); - break; - case IOAT_OP_XOR_VAL: - case IOAT_OP_XOR: { - struct ioat_xor_descriptor *xor = desc->xor; - struct ioat_ring_ent *ext; - struct ioat_xor_ext_descriptor *xor_ex = NULL; - int src_cnt = src_cnt_to_sw(xor->ctl_f.src_cnt); - struct ioat_raw_descriptor *descs[2]; - int i; - - if (src_cnt > 5) { - ext = ioat2_get_ring_ent(ioat, idx + 1); - xor_ex = ext->xor_ex; - } - - if (!(flags & DMA_COMPL_SKIP_SRC_UNMAP)) { - descs[0] = (struct ioat_raw_descriptor *) xor; - descs[1] = (struct ioat_raw_descriptor *) xor_ex; - for (i = 0; i < src_cnt; i++) { - dma_addr_t src = xor_get_src(descs, i); - - ioat_unmap(pdev, src - offset, len, - PCI_DMA_TODEVICE, flags, 0); - } - - /* dest is a source in xor validate operations */ - if (xor->ctl_f.op == IOAT_OP_XOR_VAL) { - ioat_unmap(pdev, xor->dst_addr - offset, len, - PCI_DMA_TODEVICE, flags, 1); - break; - } - } - - if (!(flags & DMA_COMPL_SKIP_DEST_UNMAP)) - ioat_unmap(pdev, xor->dst_addr - offset, len, - PCI_DMA_FROMDEVICE, flags, 1); - break; - } - case IOAT_OP_PQ_VAL: - case IOAT_OP_PQ: { - struct ioat_pq_descriptor *pq = desc->pq; - struct ioat_ring_ent *ext; - struct ioat_pq_ext_descriptor *pq_ex = NULL; - int src_cnt = src_cnt_to_sw(pq->ctl_f.src_cnt); - struct ioat_raw_descriptor *descs[2]; - int i; - - if (src_cnt > 3) { - ext = ioat2_get_ring_ent(ioat, idx + 1); - pq_ex = ext->pq_ex; - } - - /* in the 'continue' case don't unmap the dests as sources */ - if (dmaf_p_disabled_continue(flags)) - src_cnt--; - else if (dmaf_continue(flags)) - src_cnt -= 3; - - if (!(flags & DMA_COMPL_SKIP_SRC_UNMAP)) { - descs[0] = (struct ioat_raw_descriptor *) pq; - descs[1] = (struct ioat_raw_descriptor *) pq_ex; - for (i = 0; i < src_cnt; i++) { - dma_addr_t src = pq_get_src(descs, i); - - ioat_unmap(pdev, src - offset, len, - PCI_DMA_TODEVICE, flags, 0); - } - - /* the dests are sources in pq validate operations */ - if (pq->ctl_f.op == IOAT_OP_XOR_VAL) { - if (!(flags & DMA_PREP_PQ_DISABLE_P)) - ioat_unmap(pdev, pq->p_addr - offset, - len, PCI_DMA_TODEVICE, flags, 0); - if (!(flags & DMA_PREP_PQ_DISABLE_Q)) - ioat_unmap(pdev, pq->q_addr - offset, - len, PCI_DMA_TODEVICE, flags, 0); - break; - } - } - - if (!(flags & DMA_COMPL_SKIP_DEST_UNMAP)) { - if (!(flags & DMA_PREP_PQ_DISABLE_P)) - ioat_unmap(pdev, pq->p_addr - offset, len, - PCI_DMA_BIDIRECTIONAL, flags, 1); - if (!(flags & DMA_PREP_PQ_DISABLE_Q)) - ioat_unmap(pdev, pq->q_addr - offset, len, - PCI_DMA_BIDIRECTIONAL, flags, 1); - } - break; - } - case IOAT_OP_PQ_16S: - case IOAT_OP_PQ_VAL_16S: { - struct ioat_pq_descriptor *pq = desc->pq; - int src_cnt = src16_cnt_to_sw(pq->ctl_f.src_cnt); - struct ioat_raw_descriptor *descs[4]; - int i; - - /* in the 'continue' case don't unmap the dests as sources */ - if (dmaf_p_disabled_continue(flags)) - src_cnt--; - else if (dmaf_continue(flags)) - src_cnt -= 3; - - if (!(flags & DMA_COMPL_SKIP_SRC_UNMAP)) { - descs[0] = (struct ioat_raw_descriptor *)pq; - descs[1] = (struct ioat_raw_descriptor *)(desc->sed->hw); - descs[2] = (struct ioat_raw_descriptor *)(&desc->sed->hw->b[0]); - for (i = 0; i < src_cnt; i++) { - dma_addr_t src = pq16_get_src(descs, i); - - ioat_unmap(pdev, src - offset, len, - PCI_DMA_TODEVICE, flags, 0); - } - - /* the dests are sources in pq validate operations */ - if (pq->ctl_f.op == IOAT_OP_XOR_VAL) { - if (!(flags & DMA_PREP_PQ_DISABLE_P)) - ioat_unmap(pdev, pq->p_addr - offset, - len, PCI_DMA_TODEVICE, - flags, 0); - if (!(flags & DMA_PREP_PQ_DISABLE_Q)) - ioat_unmap(pdev, pq->q_addr - offset, - len, PCI_DMA_TODEVICE, - flags, 0); - break; - } - } - - if (!(flags & DMA_COMPL_SKIP_DEST_UNMAP)) { - if (!(flags & DMA_PREP_PQ_DISABLE_P)) - ioat_unmap(pdev, pq->p_addr - offset, len, - PCI_DMA_BIDIRECTIONAL, flags, 1); - if (!(flags & DMA_PREP_PQ_DISABLE_Q)) - ioat_unmap(pdev, pq->q_addr - offset, len, - PCI_DMA_BIDIRECTIONAL, flags, 1); - } - break; - } - default: - dev_err(&pdev->dev, "%s: unknown op type: %#x\n", - __func__, desc->hw->ctl_f.op); - } + kmem_cache_free(ioat3_sed_cache, sed); } static bool desc_has_ext(struct ioat_ring_ent *desc) @@ -577,7 +401,7 @@ static void __cleanup(struct ioat2_dma_chan *ioat, dma_addr_t phys_complete) tx = &desc->txd; if (tx->cookie) { dma_cookie_complete(tx); - ioat3_dma_unmap(ioat, desc, idx + i); + dma_descriptor_unmap(tx); if (tx->callback) { tx->callback(tx->callback_param); tx->callback = NULL; @@ -807,7 +631,7 @@ ioat3_tx_status(struct dma_chan *c, dma_cookie_t cookie, enum dma_status ret; ret = dma_cookie_status(c, cookie, txstate); - if (ret == DMA_SUCCESS) + if (ret == DMA_COMPLETE) return ret; ioat3_cleanup(ioat); @@ -1129,9 +953,6 @@ __ioat3_prep_pq16_lock(struct dma_chan *c, enum sum_check_flags *result, u8 op; int i, s, idx, num_descs; - /* this function only handles src_cnt 9 - 16 */ - BUG_ON(src_cnt < 9); - /* this function is only called with 9-16 sources */ op = result ? IOAT_OP_PQ_VAL_16S : IOAT_OP_PQ_16S; @@ -1159,8 +980,7 @@ __ioat3_prep_pq16_lock(struct dma_chan *c, enum sum_check_flags *result, descs[0] = (struct ioat_raw_descriptor *) pq; - desc->sed = ioat3_alloc_sed(device, - sed_get_pq16_pool_idx(src_cnt)); + desc->sed = ioat3_alloc_sed(device, (src_cnt-2) >> 3); if (!desc->sed) { dev_err(to_dev(chan), "%s: no free sed entries\n", __func__); @@ -1218,13 +1038,21 @@ __ioat3_prep_pq16_lock(struct dma_chan *c, enum sum_check_flags *result, return &desc->txd; } +static int src_cnt_flags(unsigned int src_cnt, unsigned long flags) +{ + if (dmaf_p_disabled_continue(flags)) + return src_cnt + 1; + else if (dmaf_continue(flags)) + return src_cnt + 3; + else + return src_cnt; +} + static struct dma_async_tx_descriptor * ioat3_prep_pq(struct dma_chan *chan, dma_addr_t *dst, dma_addr_t *src, unsigned int src_cnt, const unsigned char *scf, size_t len, unsigned long flags) { - struct dma_device *dma = chan->device; - /* specify valid address for disabled result */ if (flags & DMA_PREP_PQ_DISABLE_P) dst[0] = dst[1]; @@ -1244,7 +1072,7 @@ ioat3_prep_pq(struct dma_chan *chan, dma_addr_t *dst, dma_addr_t *src, single_source_coef[0] = scf[0]; single_source_coef[1] = 0; - return (src_cnt > 8) && (dma->max_pq > 8) ? + return src_cnt_flags(src_cnt, flags) > 8 ? __ioat3_prep_pq16_lock(chan, NULL, dst, single_source, 2, single_source_coef, len, flags) : @@ -1252,7 +1080,7 @@ ioat3_prep_pq(struct dma_chan *chan, dma_addr_t *dst, dma_addr_t *src, single_source_coef, len, flags); } else { - return (src_cnt > 8) && (dma->max_pq > 8) ? + return src_cnt_flags(src_cnt, flags) > 8 ? __ioat3_prep_pq16_lock(chan, NULL, dst, src, src_cnt, scf, len, flags) : __ioat3_prep_pq_lock(chan, NULL, dst, src, src_cnt, @@ -1265,8 +1093,6 @@ ioat3_prep_pq_val(struct dma_chan *chan, dma_addr_t *pq, dma_addr_t *src, unsigned int src_cnt, const unsigned char *scf, size_t len, enum sum_check_flags *pqres, unsigned long flags) { - struct dma_device *dma = chan->device; - /* specify valid address for disabled result */ if (flags & DMA_PREP_PQ_DISABLE_P) pq[0] = pq[1]; @@ -1278,7 +1104,7 @@ ioat3_prep_pq_val(struct dma_chan *chan, dma_addr_t *pq, dma_addr_t *src, */ *pqres = 0; - return (src_cnt > 8) && (dma->max_pq > 8) ? + return src_cnt_flags(src_cnt, flags) > 8 ? __ioat3_prep_pq16_lock(chan, pqres, pq, src, src_cnt, scf, len, flags) : __ioat3_prep_pq_lock(chan, pqres, pq, src, src_cnt, scf, len, @@ -1289,7 +1115,6 @@ static struct dma_async_tx_descriptor * ioat3_prep_pqxor(struct dma_chan *chan, dma_addr_t dst, dma_addr_t *src, unsigned int src_cnt, size_t len, unsigned long flags) { - struct dma_device *dma = chan->device; unsigned char scf[src_cnt]; dma_addr_t pq[2]; @@ -1298,7 +1123,7 @@ ioat3_prep_pqxor(struct dma_chan *chan, dma_addr_t dst, dma_addr_t *src, flags |= DMA_PREP_PQ_DISABLE_Q; pq[1] = dst; /* specify valid address for disabled result */ - return (src_cnt > 8) && (dma->max_pq > 8) ? + return src_cnt_flags(src_cnt, flags) > 8 ? __ioat3_prep_pq16_lock(chan, NULL, pq, src, src_cnt, scf, len, flags) : __ioat3_prep_pq_lock(chan, NULL, pq, src, src_cnt, scf, len, @@ -1310,7 +1135,6 @@ ioat3_prep_pqxor_val(struct dma_chan *chan, dma_addr_t *src, unsigned int src_cnt, size_t len, enum sum_check_flags *result, unsigned long flags) { - struct dma_device *dma = chan->device; unsigned char scf[src_cnt]; dma_addr_t pq[2]; @@ -1324,8 +1148,7 @@ ioat3_prep_pqxor_val(struct dma_chan *chan, dma_addr_t *src, flags |= DMA_PREP_PQ_DISABLE_Q; pq[1] = pq[0]; /* specify valid address for disabled result */ - - return (src_cnt > 8) && (dma->max_pq > 8) ? + return src_cnt_flags(src_cnt, flags) > 8 ? __ioat3_prep_pq16_lock(chan, result, pq, &src[1], src_cnt - 1, scf, len, flags) : __ioat3_prep_pq_lock(chan, result, pq, &src[1], src_cnt - 1, @@ -1444,9 +1267,7 @@ static int ioat_xor_val_self_test(struct ioatdma_device *device) DMA_TO_DEVICE); tx = dma->device_prep_dma_xor(dma_chan, dest_dma, dma_srcs, IOAT_NUM_SRC_TEST, PAGE_SIZE, - DMA_PREP_INTERRUPT | - DMA_COMPL_SKIP_SRC_UNMAP | - DMA_COMPL_SKIP_DEST_UNMAP); + DMA_PREP_INTERRUPT); if (!tx) { dev_err(dev, "Self-test xor prep failed\n"); @@ -1468,7 +1289,7 @@ static int ioat_xor_val_self_test(struct ioatdma_device *device) tmo = wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000)); - if (dma->device_tx_status(dma_chan, cookie, NULL) != DMA_SUCCESS) { + if (dma->device_tx_status(dma_chan, cookie, NULL) != DMA_COMPLETE) { dev_err(dev, "Self-test xor timed out\n"); err = -ENODEV; goto dma_unmap; @@ -1507,9 +1328,7 @@ static int ioat_xor_val_self_test(struct ioatdma_device *device) DMA_TO_DEVICE); tx = dma->device_prep_dma_xor_val(dma_chan, dma_srcs, IOAT_NUM_SRC_TEST + 1, PAGE_SIZE, - &xor_val_result, DMA_PREP_INTERRUPT | - DMA_COMPL_SKIP_SRC_UNMAP | - DMA_COMPL_SKIP_DEST_UNMAP); + &xor_val_result, DMA_PREP_INTERRUPT); if (!tx) { dev_err(dev, "Self-test zero prep failed\n"); err = -ENODEV; @@ -1530,7 +1349,7 @@ static int ioat_xor_val_self_test(struct ioatdma_device *device) tmo = wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000)); - if (dma->device_tx_status(dma_chan, cookie, NULL) != DMA_SUCCESS) { + if (dma->device_tx_status(dma_chan, cookie, NULL) != DMA_COMPLETE) { dev_err(dev, "Self-test validate timed out\n"); err = -ENODEV; goto dma_unmap; @@ -1545,6 +1364,8 @@ static int ioat_xor_val_self_test(struct ioatdma_device *device) goto free_resources; } + memset(page_address(dest), 0, PAGE_SIZE); + /* test for non-zero parity sum */ op = IOAT_OP_XOR_VAL; @@ -1554,9 +1375,7 @@ static int ioat_xor_val_self_test(struct ioatdma_device *device) DMA_TO_DEVICE); tx = dma->device_prep_dma_xor_val(dma_chan, dma_srcs, IOAT_NUM_SRC_TEST + 1, PAGE_SIZE, - &xor_val_result, DMA_PREP_INTERRUPT | - DMA_COMPL_SKIP_SRC_UNMAP | - DMA_COMPL_SKIP_DEST_UNMAP); + &xor_val_result, DMA_PREP_INTERRUPT); if (!tx) { dev_err(dev, "Self-test 2nd zero prep failed\n"); err = -ENODEV; @@ -1577,7 +1396,7 @@ static int ioat_xor_val_self_test(struct ioatdma_device *device) tmo = wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000)); - if (dma->device_tx_status(dma_chan, cookie, NULL) != DMA_SUCCESS) { + if (dma->device_tx_status(dma_chan, cookie, NULL) != DMA_COMPLETE) { dev_err(dev, "Self-test 2nd validate timed out\n"); err = -ENODEV; goto dma_unmap; @@ -1630,52 +1449,36 @@ static int ioat3_dma_self_test(struct ioatdma_device *device) static int ioat3_irq_reinit(struct ioatdma_device *device) { - int msixcnt = device->common.chancnt; struct pci_dev *pdev = device->pdev; - int i; - struct msix_entry *msix; - struct ioat_chan_common *chan; - int err = 0; + int irq = pdev->irq, i; + + if (!is_bwd_ioat(pdev)) + return 0; switch (device->irq_mode) { case IOAT_MSIX: + for (i = 0; i < device->common.chancnt; i++) { + struct msix_entry *msix = &device->msix_entries[i]; + struct ioat_chan_common *chan; - for (i = 0; i < msixcnt; i++) { - msix = &device->msix_entries[i]; chan = ioat_chan_by_index(device, i); devm_free_irq(&pdev->dev, msix->vector, chan); } pci_disable_msix(pdev); break; - - case IOAT_MSIX_SINGLE: - msix = &device->msix_entries[0]; - chan = ioat_chan_by_index(device, 0); - devm_free_irq(&pdev->dev, msix->vector, chan); - pci_disable_msix(pdev); - break; - case IOAT_MSI: - chan = ioat_chan_by_index(device, 0); - devm_free_irq(&pdev->dev, pdev->irq, chan); pci_disable_msi(pdev); - break; - + /* fall through */ case IOAT_INTX: - chan = ioat_chan_by_index(device, 0); - devm_free_irq(&pdev->dev, pdev->irq, chan); + devm_free_irq(&pdev->dev, irq, device); break; - default: return 0; } - device->irq_mode = IOAT_NOIRQ; - err = ioat_dma_setup_interrupts(device); - - return err; + return ioat_dma_setup_interrupts(device); } static int ioat3_reset_hw(struct ioat_chan_common *chan) @@ -1718,14 +1521,12 @@ static int ioat3_reset_hw(struct ioat_chan_common *chan) } err = ioat2_reset_sync(chan, msecs_to_jiffies(200)); - if (err) { - dev_err(&pdev->dev, "Failed to reset!\n"); - return err; - } - - if (device->irq_mode != IOAT_NOIRQ && is_bwd_ioat(pdev)) + if (!err) err = ioat3_irq_reinit(device); + if (err) + dev_err(&pdev->dev, "Failed to reset: %d\n", err); + return err; } @@ -1835,21 +1636,15 @@ int ioat3_dma_probe(struct ioatdma_device *device, int dca) char pool_name[14]; int i; - /* allocate sw descriptor pool for SED */ - device->sed_pool = kmem_cache_create("ioat_sed", - sizeof(struct ioat_sed_ent), 0, 0, NULL); - if (!device->sed_pool) - return -ENOMEM; - for (i = 0; i < MAX_SED_POOLS; i++) { snprintf(pool_name, 14, "ioat_hw%d_sed", i); /* allocate SED DMA pool */ - device->sed_hw_pool[i] = dma_pool_create(pool_name, + device->sed_hw_pool[i] = dmam_pool_create(pool_name, &pdev->dev, SED_SIZE * (i + 1), 64, 0); if (!device->sed_hw_pool[i]) - goto sed_pool_cleanup; + return -ENOMEM; } } @@ -1875,28 +1670,4 @@ int ioat3_dma_probe(struct ioatdma_device *device, int dca) device->dca = ioat3_dca_init(pdev, device->reg_base); return 0; - -sed_pool_cleanup: - if (device->sed_pool) { - int i; - kmem_cache_destroy(device->sed_pool); - - for (i = 0; i < MAX_SED_POOLS; i++) - if (device->sed_hw_pool[i]) - dma_pool_destroy(device->sed_hw_pool[i]); - } - - return -ENOMEM; -} - -void ioat3_dma_remove(struct ioatdma_device *device) -{ - if (device->sed_pool) { - int i; - kmem_cache_destroy(device->sed_pool); - - for (i = 0; i < MAX_SED_POOLS; i++) - if (device->sed_hw_pool[i]) - dma_pool_destroy(device->sed_hw_pool[i]); - } } diff --git a/drivers/dma/ioat/pci.c b/drivers/dma/ioat/pci.c index 2c8d560e633..1d051cd045d 100644 --- a/drivers/dma/ioat/pci.c +++ b/drivers/dma/ioat/pci.c @@ -123,6 +123,7 @@ module_param(ioat_dca_enabled, int, 0644); MODULE_PARM_DESC(ioat_dca_enabled, "control support of dca service (default: 1)"); struct kmem_cache *ioat2_cache; +struct kmem_cache *ioat3_sed_cache; #define DRV_NAME "ioatdma" @@ -207,9 +208,6 @@ static void ioat_remove(struct pci_dev *pdev) if (!device) return; - if (device->version >= IOAT_VER_3_0) - ioat3_dma_remove(device); - dev_err(&pdev->dev, "Removing dma and dca services\n"); if (device->dca) { unregister_dca_provider(device->dca, &pdev->dev); @@ -221,7 +219,7 @@ static void ioat_remove(struct pci_dev *pdev) static int __init ioat_init_module(void) { - int err; + int err = -ENOMEM; pr_info("%s: Intel(R) QuickData Technology Driver %s\n", DRV_NAME, IOAT_DMA_VERSION); @@ -231,9 +229,21 @@ static int __init ioat_init_module(void) if (!ioat2_cache) return -ENOMEM; + ioat3_sed_cache = KMEM_CACHE(ioat_sed_ent, 0); + if (!ioat3_sed_cache) + goto err_ioat2_cache; + err = pci_register_driver(&ioat_pci_driver); if (err) - kmem_cache_destroy(ioat2_cache); + goto err_ioat3_cache; + + return 0; + + err_ioat3_cache: + kmem_cache_destroy(ioat3_sed_cache); + + err_ioat2_cache: + kmem_cache_destroy(ioat2_cache); return err; } diff --git a/drivers/dma/iop-adma.c b/drivers/dma/iop-adma.c index dd8b44a56e5..c56137bc386 100644 --- a/drivers/dma/iop-adma.c +++ b/drivers/dma/iop-adma.c @@ -61,80 +61,6 @@ static void iop_adma_free_slots(struct iop_adma_desc_slot *slot) } } -static void -iop_desc_unmap(struct iop_adma_chan *iop_chan, struct iop_adma_desc_slot *desc) -{ - struct dma_async_tx_descriptor *tx = &desc->async_tx; - struct iop_adma_desc_slot *unmap = desc->group_head; - struct device *dev = &iop_chan->device->pdev->dev; - u32 len = unmap->unmap_len; - enum dma_ctrl_flags flags = tx->flags; - u32 src_cnt; - dma_addr_t addr; - dma_addr_t dest; - - src_cnt = unmap->unmap_src_cnt; - dest = iop_desc_get_dest_addr(unmap, iop_chan); - if (!(flags & DMA_COMPL_SKIP_DEST_UNMAP)) { - enum dma_data_direction dir; - - if (src_cnt > 1) /* is xor? */ - dir = DMA_BIDIRECTIONAL; - else - dir = DMA_FROM_DEVICE; - - dma_unmap_page(dev, dest, len, dir); - } - - if (!(flags & DMA_COMPL_SKIP_SRC_UNMAP)) { - while (src_cnt--) { - addr = iop_desc_get_src_addr(unmap, iop_chan, src_cnt); - if (addr == dest) - continue; - dma_unmap_page(dev, addr, len, DMA_TO_DEVICE); - } - } - desc->group_head = NULL; -} - -static void -iop_desc_unmap_pq(struct iop_adma_chan *iop_chan, struct iop_adma_desc_slot *desc) -{ - struct dma_async_tx_descriptor *tx = &desc->async_tx; - struct iop_adma_desc_slot *unmap = desc->group_head; - struct device *dev = &iop_chan->device->pdev->dev; - u32 len = unmap->unmap_len; - enum dma_ctrl_flags flags = tx->flags; - u32 src_cnt = unmap->unmap_src_cnt; - dma_addr_t pdest = iop_desc_get_dest_addr(unmap, iop_chan); - dma_addr_t qdest = iop_desc_get_qdest_addr(unmap, iop_chan); - int i; - - if (tx->flags & DMA_PREP_CONTINUE) - src_cnt -= 3; - - if (!(flags & DMA_COMPL_SKIP_DEST_UNMAP) && !desc->pq_check_result) { - dma_unmap_page(dev, pdest, len, DMA_BIDIRECTIONAL); - dma_unmap_page(dev, qdest, len, DMA_BIDIRECTIONAL); - } - - if (!(flags & DMA_COMPL_SKIP_SRC_UNMAP)) { - dma_addr_t addr; - - for (i = 0; i < src_cnt; i++) { - addr = iop_desc_get_src_addr(unmap, iop_chan, i); - dma_unmap_page(dev, addr, len, DMA_TO_DEVICE); - } - if (desc->pq_check_result) { - dma_unmap_page(dev, pdest, len, DMA_TO_DEVICE); - dma_unmap_page(dev, qdest, len, DMA_TO_DEVICE); - } - } - - desc->group_head = NULL; -} - - static dma_cookie_t iop_adma_run_tx_complete_actions(struct iop_adma_desc_slot *desc, struct iop_adma_chan *iop_chan, dma_cookie_t cookie) @@ -152,15 +78,9 @@ iop_adma_run_tx_complete_actions(struct iop_adma_desc_slot *desc, if (tx->callback) tx->callback(tx->callback_param); - /* unmap dma addresses - * (unmap_single vs unmap_page?) - */ - if (desc->group_head && desc->unmap_len) { - if (iop_desc_is_pq(desc)) - iop_desc_unmap_pq(iop_chan, desc); - else - iop_desc_unmap(iop_chan, desc); - } + dma_descriptor_unmap(tx); + if (desc->group_head) + desc->group_head = NULL; } /* run dependent operations */ @@ -591,7 +511,6 @@ iop_adma_prep_dma_interrupt(struct dma_chan *chan, unsigned long flags) if (sw_desc) { grp_start = sw_desc->group_head; iop_desc_init_interrupt(grp_start, iop_chan); - grp_start->unmap_len = 0; sw_desc->async_tx.flags = flags; } spin_unlock_bh(&iop_chan->lock); @@ -623,8 +542,6 @@ iop_adma_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dma_dest, iop_desc_set_byte_count(grp_start, iop_chan, len); iop_desc_set_dest_addr(grp_start, iop_chan, dma_dest); iop_desc_set_memcpy_src_addr(grp_start, dma_src); - sw_desc->unmap_src_cnt = 1; - sw_desc->unmap_len = len; sw_desc->async_tx.flags = flags; } spin_unlock_bh(&iop_chan->lock); @@ -657,8 +574,6 @@ iop_adma_prep_dma_xor(struct dma_chan *chan, dma_addr_t dma_dest, iop_desc_init_xor(grp_start, src_cnt, flags); iop_desc_set_byte_count(grp_start, iop_chan, len); iop_desc_set_dest_addr(grp_start, iop_chan, dma_dest); - sw_desc->unmap_src_cnt = src_cnt; - sw_desc->unmap_len = len; sw_desc->async_tx.flags = flags; while (src_cnt--) iop_desc_set_xor_src_addr(grp_start, src_cnt, @@ -694,8 +609,6 @@ iop_adma_prep_dma_xor_val(struct dma_chan *chan, dma_addr_t *dma_src, grp_start->xor_check_result = result; pr_debug("\t%s: grp_start->xor_check_result: %p\n", __func__, grp_start->xor_check_result); - sw_desc->unmap_src_cnt = src_cnt; - sw_desc->unmap_len = len; sw_desc->async_tx.flags = flags; while (src_cnt--) iop_desc_set_zero_sum_src_addr(grp_start, src_cnt, @@ -748,8 +661,6 @@ iop_adma_prep_dma_pq(struct dma_chan *chan, dma_addr_t *dst, dma_addr_t *src, dst[0] = dst[1] & 0x7; iop_desc_set_pq_addr(g, dst); - sw_desc->unmap_src_cnt = src_cnt; - sw_desc->unmap_len = len; sw_desc->async_tx.flags = flags; for (i = 0; i < src_cnt; i++) iop_desc_set_pq_src_addr(g, i, src[i], scf[i]); @@ -804,8 +715,6 @@ iop_adma_prep_dma_pq_val(struct dma_chan *chan, dma_addr_t *pq, dma_addr_t *src, g->pq_check_result = pqres; pr_debug("\t%s: g->pq_check_result: %p\n", __func__, g->pq_check_result); - sw_desc->unmap_src_cnt = src_cnt+2; - sw_desc->unmap_len = len; sw_desc->async_tx.flags = flags; while (src_cnt--) iop_desc_set_pq_zero_sum_src_addr(g, src_cnt, @@ -864,7 +773,7 @@ static enum dma_status iop_adma_status(struct dma_chan *chan, int ret; ret = dma_cookie_status(chan, cookie, txstate); - if (ret == DMA_SUCCESS) + if (ret == DMA_COMPLETE) return ret; iop_adma_slot_cleanup(iop_chan); @@ -983,7 +892,7 @@ static int iop_adma_memcpy_self_test(struct iop_adma_device *device) msleep(1); if (iop_adma_status(dma_chan, cookie, NULL) != - DMA_SUCCESS) { + DMA_COMPLETE) { dev_err(dma_chan->device->dev, "Self-test copy timed out, disabling\n"); err = -ENODEV; @@ -1083,7 +992,7 @@ iop_adma_xor_val_self_test(struct iop_adma_device *device) msleep(8); if (iop_adma_status(dma_chan, cookie, NULL) != - DMA_SUCCESS) { + DMA_COMPLETE) { dev_err(dma_chan->device->dev, "Self-test xor timed out, disabling\n"); err = -ENODEV; @@ -1129,7 +1038,7 @@ iop_adma_xor_val_self_test(struct iop_adma_device *device) iop_adma_issue_pending(dma_chan); msleep(8); - if (iop_adma_status(dma_chan, cookie, NULL) != DMA_SUCCESS) { + if (iop_adma_status(dma_chan, cookie, NULL) != DMA_COMPLETE) { dev_err(dma_chan->device->dev, "Self-test zero sum timed out, disabling\n"); err = -ENODEV; @@ -1158,7 +1067,7 @@ iop_adma_xor_val_self_test(struct iop_adma_device *device) iop_adma_issue_pending(dma_chan); msleep(8); - if (iop_adma_status(dma_chan, cookie, NULL) != DMA_SUCCESS) { + if (iop_adma_status(dma_chan, cookie, NULL) != DMA_COMPLETE) { dev_err(dma_chan->device->dev, "Self-test non-zero sum timed out, disabling\n"); err = -ENODEV; @@ -1254,7 +1163,7 @@ iop_adma_pq_zero_sum_self_test(struct iop_adma_device *device) msleep(8); if (iop_adma_status(dma_chan, cookie, NULL) != - DMA_SUCCESS) { + DMA_COMPLETE) { dev_err(dev, "Self-test pq timed out, disabling\n"); err = -ENODEV; goto free_resources; @@ -1291,7 +1200,7 @@ iop_adma_pq_zero_sum_self_test(struct iop_adma_device *device) msleep(8); if (iop_adma_status(dma_chan, cookie, NULL) != - DMA_SUCCESS) { + DMA_COMPLETE) { dev_err(dev, "Self-test pq-zero-sum timed out, disabling\n"); err = -ENODEV; goto free_resources; @@ -1323,7 +1232,7 @@ iop_adma_pq_zero_sum_self_test(struct iop_adma_device *device) msleep(8); if (iop_adma_status(dma_chan, cookie, NULL) != - DMA_SUCCESS) { + DMA_COMPLETE) { dev_err(dev, "Self-test !pq-zero-sum timed out, disabling\n"); err = -ENODEV; goto free_resources; diff --git a/drivers/dma/ipu/ipu_idmac.c b/drivers/dma/ipu/ipu_idmac.c index cb9c0bc317e..128ca143486 100644 --- a/drivers/dma/ipu/ipu_idmac.c +++ b/drivers/dma/ipu/ipu_idmac.c @@ -1232,8 +1232,10 @@ static irqreturn_t idmac_interrupt(int irq, void *dev_id) desc = list_entry(ichan->queue.next, struct idmac_tx_desc, list); descnew = desc; - dev_dbg(dev, "IDMAC irq %d, dma 0x%08x, next dma 0x%08x, current %d, curbuf 0x%08x\n", - irq, sg_dma_address(*sg), sgnext ? sg_dma_address(sgnext) : 0, ichan->active_buffer, curbuf); + dev_dbg(dev, "IDMAC irq %d, dma %#llx, next dma %#llx, current %d, curbuf %#x\n", + irq, (u64)sg_dma_address(*sg), + sgnext ? (u64)sg_dma_address(sgnext) : 0, + ichan->active_buffer, curbuf); /* Find the descriptor of sgnext */ sgnew = idmac_sg_next(ichan, &descnew, *sg); diff --git a/drivers/dma/k3dma.c b/drivers/dma/k3dma.c index a2c330f5f95..e26075408e9 100644 --- a/drivers/dma/k3dma.c +++ b/drivers/dma/k3dma.c @@ -344,7 +344,7 @@ static enum dma_status k3_dma_tx_status(struct dma_chan *chan, size_t bytes = 0; ret = dma_cookie_status(&c->vc.chan, cookie, state); - if (ret == DMA_SUCCESS) + if (ret == DMA_COMPLETE) return ret; spin_lock_irqsave(&c->vc.lock, flags); @@ -693,7 +693,7 @@ static int k3_dma_probe(struct platform_device *op) irq = platform_get_irq(op, 0); ret = devm_request_irq(&op->dev, irq, - k3_dma_int_handler, IRQF_DISABLED, DRIVER_NAME, d); + k3_dma_int_handler, 0, DRIVER_NAME, d); if (ret) return ret; diff --git a/drivers/dma/mmp_pdma.c b/drivers/dma/mmp_pdma.c index ff8d7827f8c..dcb1e05149a 100644 --- a/drivers/dma/mmp_pdma.c +++ b/drivers/dma/mmp_pdma.c @@ -798,8 +798,7 @@ static void dma_do_tasklet(unsigned long data) * move the descriptors to a temporary list so we can drop * the lock during the entire cleanup operation */ - list_del(&desc->node); - list_add(&desc->node, &chain_cleanup); + list_move(&desc->node, &chain_cleanup); /* * Look for the first list entry which has the ENDIRQEN flag @@ -863,7 +862,7 @@ static int mmp_pdma_chan_init(struct mmp_pdma_device *pdev, if (irq) { ret = devm_request_irq(pdev->dev, irq, - mmp_pdma_chan_handler, IRQF_DISABLED, "pdma", phy); + mmp_pdma_chan_handler, 0, "pdma", phy); if (ret) { dev_err(pdev->dev, "channel request irq fail!\n"); return ret; @@ -970,7 +969,7 @@ static int mmp_pdma_probe(struct platform_device *op) /* all chan share one irq, demux inside */ irq = platform_get_irq(op, 0); ret = devm_request_irq(pdev->dev, irq, - mmp_pdma_int_handler, IRQF_DISABLED, "pdma", pdev); + mmp_pdma_int_handler, 0, "pdma", pdev); if (ret) return ret; } diff --git a/drivers/dma/mmp_tdma.c b/drivers/dma/mmp_tdma.c index d3b6358e5a2..3ddacc14a73 100644 --- a/drivers/dma/mmp_tdma.c +++ b/drivers/dma/mmp_tdma.c @@ -62,6 +62,11 @@ #define TDCR_BURSTSZ_16B (0x3 << 6) #define TDCR_BURSTSZ_32B (0x6 << 6) #define TDCR_BURSTSZ_64B (0x7 << 6) +#define TDCR_BURSTSZ_SQU_1B (0x5 << 6) +#define TDCR_BURSTSZ_SQU_2B (0x6 << 6) +#define TDCR_BURSTSZ_SQU_4B (0x0 << 6) +#define TDCR_BURSTSZ_SQU_8B (0x1 << 6) +#define TDCR_BURSTSZ_SQU_16B (0x3 << 6) #define TDCR_BURSTSZ_SQU_32B (0x7 << 6) #define TDCR_BURSTSZ_128B (0x5 << 6) #define TDCR_DSTDIR_MSK (0x3 << 4) /* Dst Direction */ @@ -158,7 +163,7 @@ static void mmp_tdma_disable_chan(struct mmp_tdma_chan *tdmac) /* disable irq */ writel(0, tdmac->reg_base + TDIMR); - tdmac->status = DMA_SUCCESS; + tdmac->status = DMA_COMPLETE; } static void mmp_tdma_resume_chan(struct mmp_tdma_chan *tdmac) @@ -228,8 +233,31 @@ static int mmp_tdma_config_chan(struct mmp_tdma_chan *tdmac) return -EINVAL; } } else if (tdmac->type == PXA910_SQU) { - tdcr |= TDCR_BURSTSZ_SQU_32B; tdcr |= TDCR_SSPMOD; + + switch (tdmac->burst_sz) { + case 1: + tdcr |= TDCR_BURSTSZ_SQU_1B; + break; + case 2: + tdcr |= TDCR_BURSTSZ_SQU_2B; + break; + case 4: + tdcr |= TDCR_BURSTSZ_SQU_4B; + break; + case 8: + tdcr |= TDCR_BURSTSZ_SQU_8B; + break; + case 16: + tdcr |= TDCR_BURSTSZ_SQU_16B; + break; + case 32: + tdcr |= TDCR_BURSTSZ_SQU_32B; + break; + default: + dev_err(tdmac->dev, "mmp_tdma: unknown burst size.\n"); + return -EINVAL; + } } writel(tdcr, tdmac->reg_base + TDCR); @@ -324,7 +352,7 @@ static int mmp_tdma_alloc_chan_resources(struct dma_chan *chan) if (tdmac->irq) { ret = devm_request_irq(tdmac->dev, tdmac->irq, - mmp_tdma_chan_handler, IRQF_DISABLED, "tdma", tdmac); + mmp_tdma_chan_handler, 0, "tdma", tdmac); if (ret) return ret; } @@ -365,7 +393,7 @@ static struct dma_async_tx_descriptor *mmp_tdma_prep_dma_cyclic( int num_periods = buf_len / period_len; int i = 0, buf = 0; - if (tdmac->status != DMA_SUCCESS) + if (tdmac->status != DMA_COMPLETE) return NULL; if (period_len > TDMA_MAX_XFER_BYTES) { @@ -499,7 +527,7 @@ static int mmp_tdma_chan_init(struct mmp_tdma_device *tdev, tdmac->idx = idx; tdmac->type = type; tdmac->reg_base = (unsigned long)tdev->base + idx * 4; - tdmac->status = DMA_SUCCESS; + tdmac->status = DMA_COMPLETE; tdev->tdmac[tdmac->idx] = tdmac; tasklet_init(&tdmac->tasklet, dma_do_tasklet, (unsigned long)tdmac); @@ -554,7 +582,7 @@ static int mmp_tdma_probe(struct platform_device *pdev) if (irq_num != chan_num) { irq = platform_get_irq(pdev, 0); ret = devm_request_irq(&pdev->dev, irq, - mmp_tdma_int_handler, IRQF_DISABLED, "tdma", tdev); + mmp_tdma_int_handler, 0, "tdma", tdev); if (ret) return ret; } diff --git a/drivers/dma/mv_xor.c b/drivers/dma/mv_xor.c index 536dcb8ba5f..7807f0ef4e2 100644 --- a/drivers/dma/mv_xor.c +++ b/drivers/dma/mv_xor.c @@ -60,14 +60,6 @@ static u32 mv_desc_get_dest_addr(struct mv_xor_desc_slot *desc) return hw_desc->phy_dest_addr; } -static u32 mv_desc_get_src_addr(struct mv_xor_desc_slot *desc, - int src_idx) -{ - struct mv_xor_desc *hw_desc = desc->hw_desc; - return hw_desc->phy_src_addr[mv_phy_src_idx(src_idx)]; -} - - static void mv_desc_set_byte_count(struct mv_xor_desc_slot *desc, u32 byte_count) { @@ -278,42 +270,9 @@ mv_xor_run_tx_complete_actions(struct mv_xor_desc_slot *desc, desc->async_tx.callback( desc->async_tx.callback_param); - /* unmap dma addresses - * (unmap_single vs unmap_page?) - */ - if (desc->group_head && desc->unmap_len) { - struct mv_xor_desc_slot *unmap = desc->group_head; - struct device *dev = mv_chan_to_devp(mv_chan); - u32 len = unmap->unmap_len; - enum dma_ctrl_flags flags = desc->async_tx.flags; - u32 src_cnt; - dma_addr_t addr; - dma_addr_t dest; - - src_cnt = unmap->unmap_src_cnt; - dest = mv_desc_get_dest_addr(unmap); - if (!(flags & DMA_COMPL_SKIP_DEST_UNMAP)) { - enum dma_data_direction dir; - - if (src_cnt > 1) /* is xor ? */ - dir = DMA_BIDIRECTIONAL; - else - dir = DMA_FROM_DEVICE; - dma_unmap_page(dev, dest, len, dir); - } - - if (!(flags & DMA_COMPL_SKIP_SRC_UNMAP)) { - while (src_cnt--) { - addr = mv_desc_get_src_addr(unmap, - src_cnt); - if (addr == dest) - continue; - dma_unmap_page(dev, addr, len, - DMA_TO_DEVICE); - } - } + dma_descriptor_unmap(&desc->async_tx); + if (desc->group_head) desc->group_head = NULL; - } } /* run dependent operations */ @@ -749,7 +708,7 @@ static enum dma_status mv_xor_status(struct dma_chan *chan, enum dma_status ret; ret = dma_cookie_status(chan, cookie, txstate); - if (ret == DMA_SUCCESS) { + if (ret == DMA_COMPLETE) { mv_xor_clean_completed_slots(mv_chan); return ret; } @@ -874,7 +833,7 @@ static int mv_xor_memcpy_self_test(struct mv_xor_chan *mv_chan) msleep(1); if (mv_xor_status(dma_chan, cookie, NULL) != - DMA_SUCCESS) { + DMA_COMPLETE) { dev_err(dma_chan->device->dev, "Self-test copy timed out, disabling\n"); err = -ENODEV; @@ -968,7 +927,7 @@ mv_xor_xor_self_test(struct mv_xor_chan *mv_chan) msleep(8); if (mv_xor_status(dma_chan, cookie, NULL) != - DMA_SUCCESS) { + DMA_COMPLETE) { dev_err(dma_chan->device->dev, "Self-test xor timed out, disabling\n"); err = -ENODEV; @@ -1076,10 +1035,7 @@ mv_xor_channel_add(struct mv_xor_device *xordev, } mv_chan->mmr_base = xordev->xor_base; - if (!mv_chan->mmr_base) { - ret = -ENOMEM; - goto err_free_dma; - } + mv_chan->mmr_high_base = xordev->xor_high_base; tasklet_init(&mv_chan->irq_tasklet, mv_xor_tasklet, (unsigned long) mv_chan); @@ -1138,7 +1094,7 @@ static void mv_xor_conf_mbus_windows(struct mv_xor_device *xordev, const struct mbus_dram_target_info *dram) { - void __iomem *base = xordev->xor_base; + void __iomem *base = xordev->xor_high_base; u32 win_enable = 0; int i; diff --git a/drivers/dma/mv_xor.h b/drivers/dma/mv_xor.h index 06b067f24c9..d0749229c87 100644 --- a/drivers/dma/mv_xor.h +++ b/drivers/dma/mv_xor.h @@ -34,13 +34,13 @@ #define XOR_OPERATION_MODE_MEMCPY 2 #define XOR_DESCRIPTOR_SWAP BIT(14) -#define XOR_CURR_DESC(chan) (chan->mmr_base + 0x210 + (chan->idx * 4)) -#define XOR_NEXT_DESC(chan) (chan->mmr_base + 0x200 + (chan->idx * 4)) -#define XOR_BYTE_COUNT(chan) (chan->mmr_base + 0x220 + (chan->idx * 4)) -#define XOR_DEST_POINTER(chan) (chan->mmr_base + 0x2B0 + (chan->idx * 4)) -#define XOR_BLOCK_SIZE(chan) (chan->mmr_base + 0x2C0 + (chan->idx * 4)) -#define XOR_INIT_VALUE_LOW(chan) (chan->mmr_base + 0x2E0) -#define XOR_INIT_VALUE_HIGH(chan) (chan->mmr_base + 0x2E4) +#define XOR_CURR_DESC(chan) (chan->mmr_high_base + 0x10 + (chan->idx * 4)) +#define XOR_NEXT_DESC(chan) (chan->mmr_high_base + 0x00 + (chan->idx * 4)) +#define XOR_BYTE_COUNT(chan) (chan->mmr_high_base + 0x20 + (chan->idx * 4)) +#define XOR_DEST_POINTER(chan) (chan->mmr_high_base + 0xB0 + (chan->idx * 4)) +#define XOR_BLOCK_SIZE(chan) (chan->mmr_high_base + 0xC0 + (chan->idx * 4)) +#define XOR_INIT_VALUE_LOW(chan) (chan->mmr_high_base + 0xE0) +#define XOR_INIT_VALUE_HIGH(chan) (chan->mmr_high_base + 0xE4) #define XOR_CONFIG(chan) (chan->mmr_base + 0x10 + (chan->idx * 4)) #define XOR_ACTIVATION(chan) (chan->mmr_base + 0x20 + (chan->idx * 4)) @@ -50,11 +50,11 @@ #define XOR_ERROR_ADDR(chan) (chan->mmr_base + 0x60) #define XOR_INTR_MASK_VALUE 0x3F5 -#define WINDOW_BASE(w) (0x250 + ((w) << 2)) -#define WINDOW_SIZE(w) (0x270 + ((w) << 2)) -#define WINDOW_REMAP_HIGH(w) (0x290 + ((w) << 2)) -#define WINDOW_BAR_ENABLE(chan) (0x240 + ((chan) << 2)) -#define WINDOW_OVERRIDE_CTRL(chan) (0x2A0 + ((chan) << 2)) +#define WINDOW_BASE(w) (0x50 + ((w) << 2)) +#define WINDOW_SIZE(w) (0x70 + ((w) << 2)) +#define WINDOW_REMAP_HIGH(w) (0x90 + ((w) << 2)) +#define WINDOW_BAR_ENABLE(chan) (0x40 + ((chan) << 2)) +#define WINDOW_OVERRIDE_CTRL(chan) (0xA0 + ((chan) << 2)) struct mv_xor_device { void __iomem *xor_base; @@ -82,6 +82,7 @@ struct mv_xor_chan { int pending; spinlock_t lock; /* protects the descriptor slot pool */ void __iomem *mmr_base; + void __iomem *mmr_high_base; unsigned int idx; int irq; enum dma_transaction_type current_type; diff --git a/drivers/dma/mxs-dma.c b/drivers/dma/mxs-dma.c index ccd13df841d..ead491346da 100644 --- a/drivers/dma/mxs-dma.c +++ b/drivers/dma/mxs-dma.c @@ -27,6 +27,7 @@ #include <linux/of.h> #include <linux/of_device.h> #include <linux/of_dma.h> +#include <linux/list.h> #include <asm/irq.h> @@ -57,6 +58,9 @@ (((dma_is_apbh(d) && apbh_is_old(d)) ? 0x050 : 0x110) + (n) * 0x70) #define HW_APBHX_CHn_SEMA(d, n) \ (((dma_is_apbh(d) && apbh_is_old(d)) ? 0x080 : 0x140) + (n) * 0x70) +#define HW_APBHX_CHn_BAR(d, n) \ + (((dma_is_apbh(d) && apbh_is_old(d)) ? 0x070 : 0x130) + (n) * 0x70) +#define HW_APBX_CHn_DEBUG1(d, n) (0x150 + (n) * 0x70) /* * ccw bits definitions @@ -115,7 +119,9 @@ struct mxs_dma_chan { int desc_count; enum dma_status status; unsigned int flags; + bool reset; #define MXS_DMA_SG_LOOP (1 << 0) +#define MXS_DMA_USE_SEMAPHORE (1 << 1) }; #define MXS_DMA_CHANNELS 16 @@ -201,12 +207,47 @@ static void mxs_dma_reset_chan(struct mxs_dma_chan *mxs_chan) struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma; int chan_id = mxs_chan->chan.chan_id; - if (dma_is_apbh(mxs_dma) && apbh_is_old(mxs_dma)) + /* + * mxs dma channel resets can cause a channel stall. To recover from a + * channel stall, we have to reset the whole DMA engine. To avoid this, + * we use cyclic DMA with semaphores, that are enhanced in + * mxs_dma_int_handler. To reset the channel, we can simply stop writing + * into the semaphore counter. + */ + if (mxs_chan->flags & MXS_DMA_USE_SEMAPHORE && + mxs_chan->flags & MXS_DMA_SG_LOOP) { + mxs_chan->reset = true; + } else if (dma_is_apbh(mxs_dma) && apbh_is_old(mxs_dma)) { writel(1 << (chan_id + BP_APBH_CTRL0_RESET_CHANNEL), mxs_dma->base + HW_APBHX_CTRL0 + STMP_OFFSET_REG_SET); - else + } else { + unsigned long elapsed = 0; + const unsigned long max_wait = 50000; /* 50ms */ + void __iomem *reg_dbg1 = mxs_dma->base + + HW_APBX_CHn_DEBUG1(mxs_dma, chan_id); + + /* + * On i.MX28 APBX, the DMA channel can stop working if we reset + * the channel while it is in READ_FLUSH (0x08) state. + * We wait here until we leave the state. Then we trigger the + * reset. Waiting a maximum of 50ms, the kernel shouldn't crash + * because of this. + */ + while ((readl(reg_dbg1) & 0xf) == 0x8 && elapsed < max_wait) { + udelay(100); + elapsed += 100; + } + + if (elapsed >= max_wait) + dev_err(&mxs_chan->mxs_dma->pdev->dev, + "Failed waiting for the DMA channel %d to leave state READ_FLUSH, trying to reset channel in READ_FLUSH state now\n", + chan_id); + writel(1 << (chan_id + BP_APBHX_CHANNEL_CTRL_RESET_CHANNEL), mxs_dma->base + HW_APBHX_CHANNEL_CTRL + STMP_OFFSET_REG_SET); + } + + mxs_chan->status = DMA_COMPLETE; } static void mxs_dma_enable_chan(struct mxs_dma_chan *mxs_chan) @@ -219,12 +260,21 @@ static void mxs_dma_enable_chan(struct mxs_dma_chan *mxs_chan) mxs_dma->base + HW_APBHX_CHn_NXTCMDAR(mxs_dma, chan_id)); /* write 1 to SEMA to kick off the channel */ - writel(1, mxs_dma->base + HW_APBHX_CHn_SEMA(mxs_dma, chan_id)); + if (mxs_chan->flags & MXS_DMA_USE_SEMAPHORE && + mxs_chan->flags & MXS_DMA_SG_LOOP) { + /* A cyclic DMA consists of at least 2 segments, so initialize + * the semaphore with 2 so we have enough time to add 1 to the + * semaphore if we need to */ + writel(2, mxs_dma->base + HW_APBHX_CHn_SEMA(mxs_dma, chan_id)); + } else { + writel(1, mxs_dma->base + HW_APBHX_CHn_SEMA(mxs_dma, chan_id)); + } + mxs_chan->reset = false; } static void mxs_dma_disable_chan(struct mxs_dma_chan *mxs_chan) { - mxs_chan->status = DMA_SUCCESS; + mxs_chan->status = DMA_COMPLETE; } static void mxs_dma_pause_chan(struct mxs_dma_chan *mxs_chan) @@ -272,58 +322,88 @@ static void mxs_dma_tasklet(unsigned long data) mxs_chan->desc.callback(mxs_chan->desc.callback_param); } +static int mxs_dma_irq_to_chan(struct mxs_dma_engine *mxs_dma, int irq) +{ + int i; + + for (i = 0; i != mxs_dma->nr_channels; ++i) + if (mxs_dma->mxs_chans[i].chan_irq == irq) + return i; + + return -EINVAL; +} + static irqreturn_t mxs_dma_int_handler(int irq, void *dev_id) { struct mxs_dma_engine *mxs_dma = dev_id; - u32 stat1, stat2; + struct mxs_dma_chan *mxs_chan; + u32 completed; + u32 err; + int chan = mxs_dma_irq_to_chan(mxs_dma, irq); + + if (chan < 0) + return IRQ_NONE; /* completion status */ - stat1 = readl(mxs_dma->base + HW_APBHX_CTRL1); - stat1 &= MXS_DMA_CHANNELS_MASK; - writel(stat1, mxs_dma->base + HW_APBHX_CTRL1 + STMP_OFFSET_REG_CLR); + completed = readl(mxs_dma->base + HW_APBHX_CTRL1); + completed = (completed >> chan) & 0x1; + + /* Clear interrupt */ + writel((1 << chan), + mxs_dma->base + HW_APBHX_CTRL1 + STMP_OFFSET_REG_CLR); /* error status */ - stat2 = readl(mxs_dma->base + HW_APBHX_CTRL2); - writel(stat2, mxs_dma->base + HW_APBHX_CTRL2 + STMP_OFFSET_REG_CLR); + err = readl(mxs_dma->base + HW_APBHX_CTRL2); + err &= (1 << (MXS_DMA_CHANNELS + chan)) | (1 << chan); + + /* + * error status bit is in the upper 16 bits, error irq bit in the lower + * 16 bits. We transform it into a simpler error code: + * err: 0x00 = no error, 0x01 = TERMINATION, 0x02 = BUS_ERROR + */ + err = (err >> (MXS_DMA_CHANNELS + chan)) + (err >> chan); + + /* Clear error irq */ + writel((1 << chan), + mxs_dma->base + HW_APBHX_CTRL2 + STMP_OFFSET_REG_CLR); /* * When both completion and error of termination bits set at the * same time, we do not take it as an error. IOW, it only becomes - * an error we need to handle here in case of either it's (1) a bus - * error or (2) a termination error with no completion. + * an error we need to handle here in case of either it's a bus + * error or a termination error with no completion. 0x01 is termination + * error, so we can subtract err & completed to get the real error case. */ - stat2 = ((stat2 >> MXS_DMA_CHANNELS) & stat2) | /* (1) */ - (~(stat2 >> MXS_DMA_CHANNELS) & stat2 & ~stat1); /* (2) */ - - /* combine error and completion status for checking */ - stat1 = (stat2 << MXS_DMA_CHANNELS) | stat1; - while (stat1) { - int channel = fls(stat1) - 1; - struct mxs_dma_chan *mxs_chan = - &mxs_dma->mxs_chans[channel % MXS_DMA_CHANNELS]; - - if (channel >= MXS_DMA_CHANNELS) { - dev_dbg(mxs_dma->dma_device.dev, - "%s: error in channel %d\n", __func__, - channel - MXS_DMA_CHANNELS); - mxs_chan->status = DMA_ERROR; - mxs_dma_reset_chan(mxs_chan); - } else { - if (mxs_chan->flags & MXS_DMA_SG_LOOP) - mxs_chan->status = DMA_IN_PROGRESS; - else - mxs_chan->status = DMA_SUCCESS; - } + err -= err & completed; - stat1 &= ~(1 << channel); + mxs_chan = &mxs_dma->mxs_chans[chan]; - if (mxs_chan->status == DMA_SUCCESS) - dma_cookie_complete(&mxs_chan->desc); + if (err) { + dev_dbg(mxs_dma->dma_device.dev, + "%s: error in channel %d\n", __func__, + chan); + mxs_chan->status = DMA_ERROR; + mxs_dma_reset_chan(mxs_chan); + } else if (mxs_chan->status != DMA_COMPLETE) { + if (mxs_chan->flags & MXS_DMA_SG_LOOP) { + mxs_chan->status = DMA_IN_PROGRESS; + if (mxs_chan->flags & MXS_DMA_USE_SEMAPHORE) + writel(1, mxs_dma->base + + HW_APBHX_CHn_SEMA(mxs_dma, chan)); + } else { + mxs_chan->status = DMA_COMPLETE; + } + } - /* schedule tasklet on this channel */ - tasklet_schedule(&mxs_chan->tasklet); + if (mxs_chan->status == DMA_COMPLETE) { + if (mxs_chan->reset) + return IRQ_HANDLED; + dma_cookie_complete(&mxs_chan->desc); } + /* schedule tasklet on this channel */ + tasklet_schedule(&mxs_chan->tasklet); + return IRQ_HANDLED; } @@ -523,6 +603,7 @@ static struct dma_async_tx_descriptor *mxs_dma_prep_dma_cyclic( mxs_chan->status = DMA_IN_PROGRESS; mxs_chan->flags |= MXS_DMA_SG_LOOP; + mxs_chan->flags |= MXS_DMA_USE_SEMAPHORE; if (num_periods > NUM_CCW) { dev_err(mxs_dma->dma_device.dev, @@ -554,6 +635,7 @@ static struct dma_async_tx_descriptor *mxs_dma_prep_dma_cyclic( ccw->bits |= CCW_IRQ; ccw->bits |= CCW_HALT_ON_TERM; ccw->bits |= CCW_TERM_FLUSH; + ccw->bits |= CCW_DEC_SEM; ccw->bits |= BF_CCW(direction == DMA_DEV_TO_MEM ? MXS_DMA_CMD_WRITE : MXS_DMA_CMD_READ, COMMAND); @@ -599,8 +681,24 @@ static enum dma_status mxs_dma_tx_status(struct dma_chan *chan, dma_cookie_t cookie, struct dma_tx_state *txstate) { struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan); + struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma; + u32 residue = 0; + + if (mxs_chan->status == DMA_IN_PROGRESS && + mxs_chan->flags & MXS_DMA_SG_LOOP) { + struct mxs_dma_ccw *last_ccw; + u32 bar; + + last_ccw = &mxs_chan->ccw[mxs_chan->desc_count - 1]; + residue = last_ccw->xfer_bytes + last_ccw->bufaddr; + + bar = readl(mxs_dma->base + + HW_APBHX_CHn_BAR(mxs_dma, chan->chan_id)); + residue -= bar; + } - dma_set_tx_state(txstate, chan->completed_cookie, chan->cookie, 0); + dma_set_tx_state(txstate, chan->completed_cookie, chan->cookie, + residue); return mxs_chan->status; } diff --git a/drivers/dma/omap-dma.c b/drivers/dma/omap-dma.c index ec3fc4fd916..2f66cf4e54f 100644 --- a/drivers/dma/omap-dma.c +++ b/drivers/dma/omap-dma.c @@ -248,7 +248,7 @@ static enum dma_status omap_dma_tx_status(struct dma_chan *chan, unsigned long flags; ret = dma_cookie_status(chan, cookie, txstate); - if (ret == DMA_SUCCESS || !txstate) + if (ret == DMA_COMPLETE || !txstate) return ret; spin_lock_irqsave(&c->vc.lock, flags); diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c index df8b10fd172..cdf0483b8f2 100644 --- a/drivers/dma/pl330.c +++ b/drivers/dma/pl330.c @@ -2268,6 +2268,8 @@ static void pl330_tasklet(unsigned long data) list_move_tail(&desc->node, &pch->dmac->desc_pool); } + dma_descriptor_unmap(&desc->txd); + if (callback) { spin_unlock_irqrestore(&pch->lock, flags); callback(callback_param); @@ -2314,7 +2316,7 @@ bool pl330_filter(struct dma_chan *chan, void *param) return false; peri_id = chan->private; - return *peri_id == (unsigned)param; + return *peri_id == (unsigned long)param; } EXPORT_SYMBOL(pl330_filter); @@ -2926,16 +2928,23 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id) amba_set_drvdata(adev, pdmac); - irq = adev->irq[0]; - ret = request_irq(irq, pl330_irq_handler, 0, - dev_name(&adev->dev), pi); - if (ret) - return ret; + for (i = 0; i < AMBA_NR_IRQS; i++) { + irq = adev->irq[i]; + if (irq) { + ret = devm_request_irq(&adev->dev, irq, + pl330_irq_handler, 0, + dev_name(&adev->dev), pi); + if (ret) + return ret; + } else { + break; + } + } pi->pcfg.periph_id = adev->periphid; ret = pl330_add(pi); if (ret) - goto probe_err1; + return ret; INIT_LIST_HEAD(&pdmac->desc_pool); spin_lock_init(&pdmac->pool_lock); @@ -3033,8 +3042,6 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id) return 0; probe_err3: - amba_set_drvdata(adev, NULL); - /* Idle the DMAC */ list_for_each_entry_safe(pch, _p, &pdmac->ddma.channels, chan.device_node) { @@ -3048,8 +3055,6 @@ probe_err3: } probe_err2: pl330_del(pi); -probe_err1: - free_irq(irq, pi); return ret; } @@ -3059,7 +3064,6 @@ static int pl330_remove(struct amba_device *adev) struct dma_pl330_dmac *pdmac = amba_get_drvdata(adev); struct dma_pl330_chan *pch, *_p; struct pl330_info *pi; - int irq; if (!pdmac) return 0; @@ -3068,7 +3072,6 @@ static int pl330_remove(struct amba_device *adev) of_dma_controller_free(adev->dev.of_node); dma_async_device_unregister(&pdmac->ddma); - amba_set_drvdata(adev, NULL); /* Idle the DMAC */ list_for_each_entry_safe(pch, _p, &pdmac->ddma.channels, @@ -3086,9 +3089,6 @@ static int pl330_remove(struct amba_device *adev) pl330_del(pi); - irq = adev->irq[0]; - free_irq(irq, pi); - return 0; } diff --git a/drivers/dma/ppc4xx/adma.c b/drivers/dma/ppc4xx/adma.c index e24b5ef486b..8da48c6b2a3 100644 --- a/drivers/dma/ppc4xx/adma.c +++ b/drivers/dma/ppc4xx/adma.c @@ -804,218 +804,6 @@ static void ppc440spe_desc_set_link(struct ppc440spe_adma_chan *chan, } /** - * ppc440spe_desc_get_src_addr - extract the source address from the descriptor - */ -static u32 ppc440spe_desc_get_src_addr(struct ppc440spe_adma_desc_slot *desc, - struct ppc440spe_adma_chan *chan, int src_idx) -{ - struct dma_cdb *dma_hw_desc; - struct xor_cb *xor_hw_desc; - - switch (chan->device->id) { - case PPC440SPE_DMA0_ID: - case PPC440SPE_DMA1_ID: - dma_hw_desc = desc->hw_desc; - /* May have 0, 1, 2, or 3 sources */ - switch (dma_hw_desc->opc) { - case DMA_CDB_OPC_NO_OP: - case DMA_CDB_OPC_DFILL128: - return 0; - case DMA_CDB_OPC_DCHECK128: - if (unlikely(src_idx)) { - printk(KERN_ERR "%s: try to get %d source for" - " DCHECK128\n", __func__, src_idx); - BUG(); - } - return le32_to_cpu(dma_hw_desc->sg1l); - case DMA_CDB_OPC_MULTICAST: - case DMA_CDB_OPC_MV_SG1_SG2: - if (unlikely(src_idx > 2)) { - printk(KERN_ERR "%s: try to get %d source from" - " DMA descr\n", __func__, src_idx); - BUG(); - } - if (src_idx) { - if (le32_to_cpu(dma_hw_desc->sg1u) & - DMA_CUED_XOR_WIN_MSK) { - u8 region; - - if (src_idx == 1) - return le32_to_cpu( - dma_hw_desc->sg1l) + - desc->unmap_len; - - region = (le32_to_cpu( - dma_hw_desc->sg1u)) >> - DMA_CUED_REGION_OFF; - - region &= DMA_CUED_REGION_MSK; - switch (region) { - case DMA_RXOR123: - return le32_to_cpu( - dma_hw_desc->sg1l) + - (desc->unmap_len << 1); - case DMA_RXOR124: - return le32_to_cpu( - dma_hw_desc->sg1l) + - (desc->unmap_len * 3); - case DMA_RXOR125: - return le32_to_cpu( - dma_hw_desc->sg1l) + - (desc->unmap_len << 2); - default: - printk(KERN_ERR - "%s: try to" - " get src3 for region %02x" - "PPC440SPE_DESC_RXOR12?\n", - __func__, region); - BUG(); - } - } else { - printk(KERN_ERR - "%s: try to get %d" - " source for non-cued descr\n", - __func__, src_idx); - BUG(); - } - } - return le32_to_cpu(dma_hw_desc->sg1l); - default: - printk(KERN_ERR "%s: unknown OPC 0x%02x\n", - __func__, dma_hw_desc->opc); - BUG(); - } - return le32_to_cpu(dma_hw_desc->sg1l); - case PPC440SPE_XOR_ID: - /* May have up to 16 sources */ - xor_hw_desc = desc->hw_desc; - return xor_hw_desc->ops[src_idx].l; - } - return 0; -} - -/** - * ppc440spe_desc_get_dest_addr - extract the destination address from the - * descriptor - */ -static u32 ppc440spe_desc_get_dest_addr(struct ppc440spe_adma_desc_slot *desc, - struct ppc440spe_adma_chan *chan, int idx) -{ - struct dma_cdb *dma_hw_desc; - struct xor_cb *xor_hw_desc; - - switch (chan->device->id) { - case PPC440SPE_DMA0_ID: - case PPC440SPE_DMA1_ID: - dma_hw_desc = desc->hw_desc; - - if (likely(!idx)) - return le32_to_cpu(dma_hw_desc->sg2l); - return le32_to_cpu(dma_hw_desc->sg3l); - case PPC440SPE_XOR_ID: - xor_hw_desc = desc->hw_desc; - return xor_hw_desc->cbtal; - } - return 0; -} - -/** - * ppc440spe_desc_get_src_num - extract the number of source addresses from - * the descriptor - */ -static u32 ppc440spe_desc_get_src_num(struct ppc440spe_adma_desc_slot *desc, - struct ppc440spe_adma_chan *chan) -{ - struct dma_cdb *dma_hw_desc; - struct xor_cb *xor_hw_desc; - - switch (chan->device->id) { - case PPC440SPE_DMA0_ID: - case PPC440SPE_DMA1_ID: - dma_hw_desc = desc->hw_desc; - - switch (dma_hw_desc->opc) { - case DMA_CDB_OPC_NO_OP: - case DMA_CDB_OPC_DFILL128: - return 0; - case DMA_CDB_OPC_DCHECK128: - return 1; - case DMA_CDB_OPC_MV_SG1_SG2: - case DMA_CDB_OPC_MULTICAST: - /* - * Only for RXOR operations we have more than - * one source - */ - if (le32_to_cpu(dma_hw_desc->sg1u) & - DMA_CUED_XOR_WIN_MSK) { - /* RXOR op, there are 2 or 3 sources */ - if (((le32_to_cpu(dma_hw_desc->sg1u) >> - DMA_CUED_REGION_OFF) & - DMA_CUED_REGION_MSK) == DMA_RXOR12) { - /* RXOR 1-2 */ - return 2; - } else { - /* RXOR 1-2-3/1-2-4/1-2-5 */ - return 3; - } - } - return 1; - default: - printk(KERN_ERR "%s: unknown OPC 0x%02x\n", - __func__, dma_hw_desc->opc); - BUG(); - } - case PPC440SPE_XOR_ID: - /* up to 16 sources */ - xor_hw_desc = desc->hw_desc; - return xor_hw_desc->cbc & XOR_CDCR_OAC_MSK; - default: - BUG(); - } - return 0; -} - -/** - * ppc440spe_desc_get_dst_num - get the number of destination addresses in - * this descriptor - */ -static u32 ppc440spe_desc_get_dst_num(struct ppc440spe_adma_desc_slot *desc, - struct ppc440spe_adma_chan *chan) -{ - struct dma_cdb *dma_hw_desc; - - switch (chan->device->id) { - case PPC440SPE_DMA0_ID: - case PPC440SPE_DMA1_ID: - /* May be 1 or 2 destinations */ - dma_hw_desc = desc->hw_desc; - switch (dma_hw_desc->opc) { - case DMA_CDB_OPC_NO_OP: - case DMA_CDB_OPC_DCHECK128: - return 0; - case DMA_CDB_OPC_MV_SG1_SG2: - case DMA_CDB_OPC_DFILL128: - return 1; - case DMA_CDB_OPC_MULTICAST: - if (desc->dst_cnt == 2) - return 2; - else - return 1; - default: - printk(KERN_ERR "%s: unknown OPC 0x%02x\n", - __func__, dma_hw_desc->opc); - BUG(); - } - case PPC440SPE_XOR_ID: - /* Always only 1 destination */ - return 1; - default: - BUG(); - } - return 0; -} - -/** * ppc440spe_desc_get_link - get the address of the descriptor that * follows this one */ @@ -1707,43 +1495,6 @@ static void ppc440spe_adma_free_slots(struct ppc440spe_adma_desc_slot *slot, } } -static void ppc440spe_adma_unmap(struct ppc440spe_adma_chan *chan, - struct ppc440spe_adma_desc_slot *desc) -{ - u32 src_cnt, dst_cnt; - dma_addr_t addr; - - /* - * get the number of sources & destination - * included in this descriptor and unmap - * them all - */ - src_cnt = ppc440spe_desc_get_src_num(desc, chan); - dst_cnt = ppc440spe_desc_get_dst_num(desc, chan); - - /* unmap destinations */ - if (!(desc->async_tx.flags & DMA_COMPL_SKIP_DEST_UNMAP)) { - while (dst_cnt--) { - addr = ppc440spe_desc_get_dest_addr( - desc, chan, dst_cnt); - dma_unmap_page(chan->device->dev, - addr, desc->unmap_len, - DMA_FROM_DEVICE); - } - } - - /* unmap sources */ - if (!(desc->async_tx.flags & DMA_COMPL_SKIP_SRC_UNMAP)) { - while (src_cnt--) { - addr = ppc440spe_desc_get_src_addr( - desc, chan, src_cnt); - dma_unmap_page(chan->device->dev, - addr, desc->unmap_len, - DMA_TO_DEVICE); - } - } -} - /** * ppc440spe_adma_run_tx_complete_actions - call functions to be called * upon completion @@ -1767,26 +1518,7 @@ static dma_cookie_t ppc440spe_adma_run_tx_complete_actions( desc->async_tx.callback( desc->async_tx.callback_param); - /* unmap dma addresses - * (unmap_single vs unmap_page?) - * - * actually, ppc's dma_unmap_page() functions are empty, so - * the following code is just for the sake of completeness - */ - if (chan && chan->needs_unmap && desc->group_head && - desc->unmap_len) { - struct ppc440spe_adma_desc_slot *unmap = - desc->group_head; - /* assume 1 slot per op always */ - u32 slot_count = unmap->slot_cnt; - - /* Run through the group list and unmap addresses */ - for (i = 0; i < slot_count; i++) { - BUG_ON(!unmap); - ppc440spe_adma_unmap(chan, unmap); - unmap = unmap->hw_next; - } - } + dma_descriptor_unmap(&desc->async_tx); } /* run dependent operations */ @@ -3893,7 +3625,7 @@ static enum dma_status ppc440spe_adma_tx_status(struct dma_chan *chan, ppc440spe_chan = to_ppc440spe_adma_chan(chan); ret = dma_cookie_status(chan, cookie, txstate); - if (ret == DMA_SUCCESS) + if (ret == DMA_COMPLETE) return ret; ppc440spe_adma_slot_cleanup(ppc440spe_chan); diff --git a/drivers/dma/sa11x0-dma.c b/drivers/dma/sa11x0-dma.c index 461a91ab70b..ab26d46bbe1 100644 --- a/drivers/dma/sa11x0-dma.c +++ b/drivers/dma/sa11x0-dma.c @@ -436,7 +436,7 @@ static enum dma_status sa11x0_dma_tx_status(struct dma_chan *chan, enum dma_status ret; ret = dma_cookie_status(&c->vc.chan, cookie, state); - if (ret == DMA_SUCCESS) + if (ret == DMA_COMPLETE) return ret; if (!state) diff --git a/drivers/dma/sh/shdma-base.c b/drivers/dma/sh/shdma-base.c index d94ab592cc1..2e7b394def8 100644 --- a/drivers/dma/sh/shdma-base.c +++ b/drivers/dma/sh/shdma-base.c @@ -724,7 +724,7 @@ static enum dma_status shdma_tx_status(struct dma_chan *chan, * If we don't find cookie on the queue, it has been aborted and we have * to report error */ - if (status != DMA_SUCCESS) { + if (status != DMA_COMPLETE) { struct shdma_desc *sdesc; status = DMA_ERROR; list_for_each_entry(sdesc, &schan->ld_queue, node) diff --git a/drivers/dma/sh/shdmac.c b/drivers/dma/sh/shdmac.c index 1069e8869f2..0d765c0e21e 100644 --- a/drivers/dma/sh/shdmac.c +++ b/drivers/dma/sh/shdmac.c @@ -685,7 +685,7 @@ MODULE_DEVICE_TABLE(of, sh_dmae_of_match); static int sh_dmae_probe(struct platform_device *pdev) { const struct sh_dmae_pdata *pdata; - unsigned long irqflags = IRQF_DISABLED, + unsigned long irqflags = 0, chan_flag[SH_DMAE_MAX_CHANNELS] = {}; int errirq, chan_irq[SH_DMAE_MAX_CHANNELS]; int err, i, irq_cnt = 0, irqres = 0, irq_cap = 0; @@ -838,7 +838,7 @@ static int sh_dmae_probe(struct platform_device *pdev) IORESOURCE_IRQ_SHAREABLE) chan_flag[irq_cnt] = IRQF_SHARED; else - chan_flag[irq_cnt] = IRQF_DISABLED; + chan_flag[irq_cnt] = 0; dev_dbg(&pdev->dev, "Found IRQ %d for channel %d\n", i, irq_cnt); diff --git a/drivers/dma/ste_dma40.c b/drivers/dma/ste_dma40.c index 82d2b97ad94..b8c031b7de4 100644 --- a/drivers/dma/ste_dma40.c +++ b/drivers/dma/ste_dma40.c @@ -14,6 +14,7 @@ #include <linux/platform_device.h> #include <linux/clk.h> #include <linux/delay.h> +#include <linux/log2.h> #include <linux/pm.h> #include <linux/pm_runtime.h> #include <linux/err.h> @@ -2626,7 +2627,7 @@ static enum dma_status d40_tx_status(struct dma_chan *chan, } ret = dma_cookie_status(chan, cookie, txstate); - if (ret != DMA_SUCCESS) + if (ret != DMA_COMPLETE) dma_set_residue(txstate, stedma40_residue(chan)); if (d40_is_paused(d40c)) @@ -2796,8 +2797,8 @@ static int d40_set_runtime_config(struct dma_chan *chan, src_addr_width > DMA_SLAVE_BUSWIDTH_8_BYTES || dst_addr_width <= DMA_SLAVE_BUSWIDTH_UNDEFINED || dst_addr_width > DMA_SLAVE_BUSWIDTH_8_BYTES || - ((src_addr_width > 1) && (src_addr_width & 1)) || - ((dst_addr_width > 1) && (dst_addr_width & 1))) + !is_power_of_2(src_addr_width) || + !is_power_of_2(dst_addr_width)) return -EINVAL; cfg->src_info.data_width = src_addr_width; diff --git a/drivers/dma/tegra20-apb-dma.c b/drivers/dma/tegra20-apb-dma.c index 5d4986e5f5f..73654e33f13 100644 --- a/drivers/dma/tegra20-apb-dma.c +++ b/drivers/dma/tegra20-apb-dma.c @@ -570,7 +570,7 @@ static void handle_once_dma_done(struct tegra_dma_channel *tdc, list_del(&sgreq->node); if (sgreq->last_sg) { - dma_desc->dma_status = DMA_SUCCESS; + dma_desc->dma_status = DMA_COMPLETE; dma_cookie_complete(&dma_desc->txd); if (!dma_desc->cb_count) list_add_tail(&dma_desc->cb_node, &tdc->cb_desc); @@ -768,7 +768,7 @@ static enum dma_status tegra_dma_tx_status(struct dma_chan *dc, unsigned int residual; ret = dma_cookie_status(dc, cookie, txstate); - if (ret == DMA_SUCCESS) + if (ret == DMA_COMPLETE) return ret; spin_lock_irqsave(&tdc->lock, flags); @@ -1018,7 +1018,7 @@ static struct dma_async_tx_descriptor *tegra_dma_prep_slave_sg( return &dma_desc->txd; } -struct dma_async_tx_descriptor *tegra_dma_prep_dma_cyclic( +static struct dma_async_tx_descriptor *tegra_dma_prep_dma_cyclic( struct dma_chan *dc, dma_addr_t buf_addr, size_t buf_len, size_t period_len, enum dma_transfer_direction direction, unsigned long flags, void *context) diff --git a/drivers/dma/timb_dma.c b/drivers/dma/timb_dma.c index 28af214fce0..4506a7b4f97 100644 --- a/drivers/dma/timb_dma.c +++ b/drivers/dma/timb_dma.c @@ -154,38 +154,6 @@ static bool __td_dma_done_ack(struct timb_dma_chan *td_chan) return done; } -static void __td_unmap_desc(struct timb_dma_chan *td_chan, const u8 *dma_desc, - bool single) -{ - dma_addr_t addr; - int len; - - addr = (dma_desc[7] << 24) | (dma_desc[6] << 16) | (dma_desc[5] << 8) | - dma_desc[4]; - - len = (dma_desc[3] << 8) | dma_desc[2]; - - if (single) - dma_unmap_single(chan2dev(&td_chan->chan), addr, len, - DMA_TO_DEVICE); - else - dma_unmap_page(chan2dev(&td_chan->chan), addr, len, - DMA_TO_DEVICE); -} - -static void __td_unmap_descs(struct timb_dma_desc *td_desc, bool single) -{ - struct timb_dma_chan *td_chan = container_of(td_desc->txd.chan, - struct timb_dma_chan, chan); - u8 *descs; - - for (descs = td_desc->desc_list; ; descs += TIMB_DMA_DESC_SIZE) { - __td_unmap_desc(td_chan, descs, single); - if (descs[0] & 0x02) - break; - } -} - static int td_fill_desc(struct timb_dma_chan *td_chan, u8 *dma_desc, struct scatterlist *sg, bool last) { @@ -293,10 +261,7 @@ static void __td_finish(struct timb_dma_chan *td_chan) list_move(&td_desc->desc_node, &td_chan->free_list); - if (!(txd->flags & DMA_COMPL_SKIP_SRC_UNMAP)) - __td_unmap_descs(td_desc, - txd->flags & DMA_COMPL_SRC_UNMAP_SINGLE); - + dma_descriptor_unmap(txd); /* * The API requires that no submissions are done from a * callback, so we don't need to drop the lock here diff --git a/drivers/dma/txx9dmac.c b/drivers/dma/txx9dmac.c index 71e8e775189..bae6c29f550 100644 --- a/drivers/dma/txx9dmac.c +++ b/drivers/dma/txx9dmac.c @@ -419,30 +419,7 @@ txx9dmac_descriptor_complete(struct txx9dmac_chan *dc, list_splice_init(&desc->tx_list, &dc->free_list); list_move(&desc->desc_node, &dc->free_list); - if (!ds) { - dma_addr_t dmaaddr; - if (!(txd->flags & DMA_COMPL_SKIP_DEST_UNMAP)) { - dmaaddr = is_dmac64(dc) ? - desc->hwdesc.DAR : desc->hwdesc32.DAR; - if (txd->flags & DMA_COMPL_DEST_UNMAP_SINGLE) - dma_unmap_single(chan2parent(&dc->chan), - dmaaddr, desc->len, DMA_FROM_DEVICE); - else - dma_unmap_page(chan2parent(&dc->chan), - dmaaddr, desc->len, DMA_FROM_DEVICE); - } - if (!(txd->flags & DMA_COMPL_SKIP_SRC_UNMAP)) { - dmaaddr = is_dmac64(dc) ? - desc->hwdesc.SAR : desc->hwdesc32.SAR; - if (txd->flags & DMA_COMPL_SRC_UNMAP_SINGLE) - dma_unmap_single(chan2parent(&dc->chan), - dmaaddr, desc->len, DMA_TO_DEVICE); - else - dma_unmap_page(chan2parent(&dc->chan), - dmaaddr, desc->len, DMA_TO_DEVICE); - } - } - + dma_descriptor_unmap(txd); /* * The API requires that no submissions are done from a * callback, so we don't need to drop the lock here @@ -962,8 +939,8 @@ txx9dmac_tx_status(struct dma_chan *chan, dma_cookie_t cookie, enum dma_status ret; ret = dma_cookie_status(chan, cookie, txstate); - if (ret == DMA_SUCCESS) - return DMA_SUCCESS; + if (ret == DMA_COMPLETE) + return DMA_COMPLETE; spin_lock_bh(&dc->lock); txx9dmac_scan_descriptors(dc); diff --git a/drivers/edac/cell_edac.c b/drivers/edac/cell_edac.c index 9ee1c76da7b..374b57fc596 100644 --- a/drivers/edac/cell_edac.c +++ b/drivers/edac/cell_edac.c @@ -163,6 +163,7 @@ static void cell_edac_init_csrows(struct mem_ctl_info *mci) csrow->first_page, nr_pages); break; } + of_node_put(np); } static int cell_edac_probe(struct platform_device *pdev) diff --git a/drivers/edac/edac_device.c b/drivers/edac/edac_device.c index 211021dfec7..10267434603 100644 --- a/drivers/edac/edac_device.c +++ b/drivers/edac/edac_device.c @@ -530,12 +530,9 @@ int edac_device_add_device(struct edac_device_ctl_info *edac_dev) /* Report action taken */ edac_device_printk(edac_dev, KERN_INFO, - "Giving out device to module '%s' controller " - "'%s': DEV '%s' (%s)\n", - edac_dev->mod_name, - edac_dev->ctl_name, - edac_dev_name(edac_dev), - edac_op_state_to_string(edac_dev->op_state)); + "Giving out device to module %s controller %s: DEV %s (%s)\n", + edac_dev->mod_name, edac_dev->ctl_name, edac_dev->dev_name, + edac_op_state_to_string(edac_dev->op_state)); mutex_unlock(&device_ctls_mutex); return 0; diff --git a/drivers/edac/edac_mc.c b/drivers/edac/edac_mc.c index 89e109022d7..e8c9ef03495 100644 --- a/drivers/edac/edac_mc.c +++ b/drivers/edac/edac_mc.c @@ -788,8 +788,10 @@ int edac_mc_add_mc(struct mem_ctl_info *mci) } /* Report action taken */ - edac_mc_printk(mci, KERN_INFO, "Giving out device to '%s' '%s':" - " DEV %s\n", mci->mod_name, mci->ctl_name, edac_dev_name(mci)); + edac_mc_printk(mci, KERN_INFO, + "Giving out device to module %s controller %s: DEV %s (%s)\n", + mci->mod_name, mci->ctl_name, mci->dev_name, + edac_op_state_to_string(mci->op_state)); edac_mc_owner = mci->mod_name; diff --git a/drivers/edac/edac_pci.c b/drivers/edac/edac_pci.c index dd370f92ace..2cf44b4db80 100644 --- a/drivers/edac/edac_pci.c +++ b/drivers/edac/edac_pci.c @@ -358,11 +358,9 @@ int edac_pci_add_device(struct edac_pci_ctl_info *pci, int edac_idx) } edac_pci_printk(pci, KERN_INFO, - "Giving out device to module '%s' controller '%s':" - " DEV '%s' (%s)\n", - pci->mod_name, - pci->ctl_name, - edac_dev_name(pci), edac_op_state_to_string(pci->op_state)); + "Giving out device to module %s controller %s: DEV %s (%s)\n", + pci->mod_name, pci->ctl_name, pci->dev_name, + edac_op_state_to_string(pci->op_state)); mutex_unlock(&edac_pci_ctls_mutex); return 0; diff --git a/drivers/edac/highbank_l2_edac.c b/drivers/edac/highbank_l2_edac.c index c2bd8c6a434..2f193668ebc 100644 --- a/drivers/edac/highbank_l2_edac.c +++ b/drivers/edac/highbank_l2_edac.c @@ -50,8 +50,15 @@ static irqreturn_t highbank_l2_err_handler(int irq, void *dev_id) return IRQ_HANDLED; } +static const struct of_device_id hb_l2_err_of_match[] = { + { .compatible = "calxeda,hb-sregs-l2-ecc", }, + {}, +}; +MODULE_DEVICE_TABLE(of, hb_l2_err_of_match); + static int highbank_l2_err_probe(struct platform_device *pdev) { + const struct of_device_id *id; struct edac_device_ctl_info *dci; struct hb_l2_drvdata *drvdata; struct resource *r; @@ -90,28 +97,32 @@ static int highbank_l2_err_probe(struct platform_device *pdev) goto err; } + id = of_match_device(hb_l2_err_of_match, &pdev->dev); + dci->mod_name = pdev->dev.driver->name; + dci->ctl_name = id ? id->compatible : "unknown"; + dci->dev_name = dev_name(&pdev->dev); + + if (edac_device_add_device(dci)) + goto err; + drvdata->db_irq = platform_get_irq(pdev, 0); res = devm_request_irq(&pdev->dev, drvdata->db_irq, highbank_l2_err_handler, 0, dev_name(&pdev->dev), dci); if (res < 0) - goto err; + goto err2; drvdata->sb_irq = platform_get_irq(pdev, 1); res = devm_request_irq(&pdev->dev, drvdata->sb_irq, highbank_l2_err_handler, 0, dev_name(&pdev->dev), dci); if (res < 0) - goto err; - - dci->mod_name = dev_name(&pdev->dev); - dci->dev_name = dev_name(&pdev->dev); - - if (edac_device_add_device(dci)) - goto err; + goto err2; devres_close_group(&pdev->dev, NULL); return 0; +err2: + edac_device_del_device(&pdev->dev); err: devres_release_group(&pdev->dev, NULL); edac_device_free_ctl_info(dci); @@ -127,12 +138,6 @@ static int highbank_l2_err_remove(struct platform_device *pdev) return 0; } -static const struct of_device_id hb_l2_err_of_match[] = { - { .compatible = "calxeda,hb-sregs-l2-ecc", }, - {}, -}; -MODULE_DEVICE_TABLE(of, hb_l2_err_of_match); - static struct platform_driver highbank_l2_edac_driver = { .probe = highbank_l2_err_probe, .remove = highbank_l2_err_remove, diff --git a/drivers/edac/highbank_mc_edac.c b/drivers/edac/highbank_mc_edac.c index 4695dd2d71f..f784de1dc79 100644 --- a/drivers/edac/highbank_mc_edac.c +++ b/drivers/edac/highbank_mc_edac.c @@ -26,31 +26,40 @@ #include "edac_module.h" /* DDR Ctrlr Error Registers */ -#define HB_DDR_ECC_OPT 0x128 -#define HB_DDR_ECC_U_ERR_ADDR 0x130 -#define HB_DDR_ECC_U_ERR_STAT 0x134 -#define HB_DDR_ECC_U_ERR_DATAL 0x138 -#define HB_DDR_ECC_U_ERR_DATAH 0x13c -#define HB_DDR_ECC_C_ERR_ADDR 0x140 -#define HB_DDR_ECC_C_ERR_STAT 0x144 -#define HB_DDR_ECC_C_ERR_DATAL 0x148 -#define HB_DDR_ECC_C_ERR_DATAH 0x14c -#define HB_DDR_ECC_INT_STATUS 0x180 -#define HB_DDR_ECC_INT_ACK 0x184 -#define HB_DDR_ECC_U_ERR_ID 0x424 -#define HB_DDR_ECC_C_ERR_ID 0x428 -#define HB_DDR_ECC_INT_STAT_CE 0x8 -#define HB_DDR_ECC_INT_STAT_DOUBLE_CE 0x10 -#define HB_DDR_ECC_INT_STAT_UE 0x20 -#define HB_DDR_ECC_INT_STAT_DOUBLE_UE 0x40 +#define HB_DDR_ECC_ERR_BASE 0x128 +#define MW_DDR_ECC_ERR_BASE 0x1b4 + +#define HB_DDR_ECC_OPT 0x00 +#define HB_DDR_ECC_U_ERR_ADDR 0x08 +#define HB_DDR_ECC_U_ERR_STAT 0x0c +#define HB_DDR_ECC_U_ERR_DATAL 0x10 +#define HB_DDR_ECC_U_ERR_DATAH 0x14 +#define HB_DDR_ECC_C_ERR_ADDR 0x18 +#define HB_DDR_ECC_C_ERR_STAT 0x1c +#define HB_DDR_ECC_C_ERR_DATAL 0x20 +#define HB_DDR_ECC_C_ERR_DATAH 0x24 #define HB_DDR_ECC_OPT_MODE_MASK 0x3 #define HB_DDR_ECC_OPT_FWC 0x100 #define HB_DDR_ECC_OPT_XOR_SHIFT 16 +/* DDR Ctrlr Interrupt Registers */ + +#define HB_DDR_ECC_INT_BASE 0x180 +#define MW_DDR_ECC_INT_BASE 0x218 + +#define HB_DDR_ECC_INT_STATUS 0x00 +#define HB_DDR_ECC_INT_ACK 0x04 + +#define HB_DDR_ECC_INT_STAT_CE 0x8 +#define HB_DDR_ECC_INT_STAT_DOUBLE_CE 0x10 +#define HB_DDR_ECC_INT_STAT_UE 0x20 +#define HB_DDR_ECC_INT_STAT_DOUBLE_UE 0x40 + struct hb_mc_drvdata { - void __iomem *mc_vbase; + void __iomem *mc_err_base; + void __iomem *mc_int_base; }; static irqreturn_t highbank_mc_err_handler(int irq, void *dev_id) @@ -60,10 +69,10 @@ static irqreturn_t highbank_mc_err_handler(int irq, void *dev_id) u32 status, err_addr; /* Read the interrupt status register */ - status = readl(drvdata->mc_vbase + HB_DDR_ECC_INT_STATUS); + status = readl(drvdata->mc_int_base + HB_DDR_ECC_INT_STATUS); if (status & HB_DDR_ECC_INT_STAT_UE) { - err_addr = readl(drvdata->mc_vbase + HB_DDR_ECC_U_ERR_ADDR); + err_addr = readl(drvdata->mc_err_base + HB_DDR_ECC_U_ERR_ADDR); edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1, err_addr >> PAGE_SHIFT, err_addr & ~PAGE_MASK, 0, @@ -71,9 +80,9 @@ static irqreturn_t highbank_mc_err_handler(int irq, void *dev_id) mci->ctl_name, ""); } if (status & HB_DDR_ECC_INT_STAT_CE) { - u32 syndrome = readl(drvdata->mc_vbase + HB_DDR_ECC_C_ERR_STAT); + u32 syndrome = readl(drvdata->mc_err_base + HB_DDR_ECC_C_ERR_STAT); syndrome = (syndrome >> 8) & 0xff; - err_addr = readl(drvdata->mc_vbase + HB_DDR_ECC_C_ERR_ADDR); + err_addr = readl(drvdata->mc_err_base + HB_DDR_ECC_C_ERR_ADDR); edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 1, err_addr >> PAGE_SHIFT, err_addr & ~PAGE_MASK, syndrome, @@ -82,66 +91,79 @@ static irqreturn_t highbank_mc_err_handler(int irq, void *dev_id) } /* clear the error, clears the interrupt */ - writel(status, drvdata->mc_vbase + HB_DDR_ECC_INT_ACK); + writel(status, drvdata->mc_int_base + HB_DDR_ECC_INT_ACK); return IRQ_HANDLED; } -#ifdef CONFIG_EDAC_DEBUG -static ssize_t highbank_mc_err_inject_write(struct file *file, - const char __user *data, - size_t count, loff_t *ppos) +static void highbank_mc_err_inject(struct mem_ctl_info *mci, u8 synd) { - struct mem_ctl_info *mci = file->private_data; struct hb_mc_drvdata *pdata = mci->pvt_info; - char buf[32]; - size_t buf_size; u32 reg; + + reg = readl(pdata->mc_err_base + HB_DDR_ECC_OPT); + reg &= HB_DDR_ECC_OPT_MODE_MASK; + reg |= (synd << HB_DDR_ECC_OPT_XOR_SHIFT) | HB_DDR_ECC_OPT_FWC; + writel(reg, pdata->mc_err_base + HB_DDR_ECC_OPT); +} + +#define to_mci(k) container_of(k, struct mem_ctl_info, dev) + +static ssize_t highbank_mc_inject_ctrl(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) +{ + struct mem_ctl_info *mci = to_mci(dev); u8 synd; - buf_size = min(count, (sizeof(buf)-1)); - if (copy_from_user(buf, data, buf_size)) - return -EFAULT; - buf[buf_size] = 0; + if (kstrtou8(buf, 16, &synd)) + return -EINVAL; - if (!kstrtou8(buf, 16, &synd)) { - reg = readl(pdata->mc_vbase + HB_DDR_ECC_OPT); - reg &= HB_DDR_ECC_OPT_MODE_MASK; - reg |= (synd << HB_DDR_ECC_OPT_XOR_SHIFT) | HB_DDR_ECC_OPT_FWC; - writel(reg, pdata->mc_vbase + HB_DDR_ECC_OPT); - } + highbank_mc_err_inject(mci, synd); return count; } -static const struct file_operations highbank_mc_debug_inject_fops = { - .open = simple_open, - .write = highbank_mc_err_inject_write, - .llseek = generic_file_llseek, +static DEVICE_ATTR(inject_ctrl, S_IWUSR, NULL, highbank_mc_inject_ctrl); + +struct hb_mc_settings { + int err_offset; + int int_offset; }; -static void highbank_mc_create_debugfs_nodes(struct mem_ctl_info *mci) -{ - if (mci->debugfs) - debugfs_create_file("inject_ctrl", S_IWUSR, mci->debugfs, mci, - &highbank_mc_debug_inject_fops); -; -} -#else -static void highbank_mc_create_debugfs_nodes(struct mem_ctl_info *mci) -{} -#endif +static struct hb_mc_settings hb_settings = { + .err_offset = HB_DDR_ECC_ERR_BASE, + .int_offset = HB_DDR_ECC_INT_BASE, +}; + +static struct hb_mc_settings mw_settings = { + .err_offset = MW_DDR_ECC_ERR_BASE, + .int_offset = MW_DDR_ECC_INT_BASE, +}; + +static struct of_device_id hb_ddr_ctrl_of_match[] = { + { .compatible = "calxeda,hb-ddr-ctrl", .data = &hb_settings }, + { .compatible = "calxeda,ecx-2000-ddr-ctrl", .data = &mw_settings }, + {}, +}; +MODULE_DEVICE_TABLE(of, hb_ddr_ctrl_of_match); static int highbank_mc_probe(struct platform_device *pdev) { + const struct of_device_id *id; + const struct hb_mc_settings *settings; struct edac_mc_layer layers[2]; struct mem_ctl_info *mci; struct hb_mc_drvdata *drvdata; struct dimm_info *dimm; struct resource *r; + void __iomem *base; u32 control; int irq; int res = 0; + id = of_match_device(hb_ddr_ctrl_of_match, &pdev->dev); + if (!id) + return -ENODEV; + layers[0].type = EDAC_MC_LAYER_CHIP_SELECT; layers[0].size = 1; layers[0].is_virt_csrow = true; @@ -174,35 +196,31 @@ static int highbank_mc_probe(struct platform_device *pdev) goto err; } - drvdata->mc_vbase = devm_ioremap(&pdev->dev, - r->start, resource_size(r)); - if (!drvdata->mc_vbase) { + base = devm_ioremap(&pdev->dev, r->start, resource_size(r)); + if (!base) { dev_err(&pdev->dev, "Unable to map regs\n"); res = -ENOMEM; goto err; } - control = readl(drvdata->mc_vbase + HB_DDR_ECC_OPT) & 0x3; + settings = id->data; + drvdata->mc_err_base = base + settings->err_offset; + drvdata->mc_int_base = base + settings->int_offset; + + control = readl(drvdata->mc_err_base + HB_DDR_ECC_OPT) & 0x3; if (!control || (control == 0x2)) { dev_err(&pdev->dev, "No ECC present, or ECC disabled\n"); res = -ENODEV; goto err; } - irq = platform_get_irq(pdev, 0); - res = devm_request_irq(&pdev->dev, irq, highbank_mc_err_handler, - 0, dev_name(&pdev->dev), mci); - if (res < 0) { - dev_err(&pdev->dev, "Unable to request irq %d\n", irq); - goto err; - } - mci->mtype_cap = MEM_FLAG_DDR3; mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_SECDED; mci->edac_cap = EDAC_FLAG_SECDED; - mci->mod_name = dev_name(&pdev->dev); + mci->mod_name = pdev->dev.driver->name; mci->mod_ver = "1"; - mci->ctl_name = dev_name(&pdev->dev); + mci->ctl_name = id->compatible; + mci->dev_name = dev_name(&pdev->dev); mci->scrub_mode = SCRUB_SW_SRC; /* Only a single 4GB DIMM is supported */ @@ -217,10 +235,20 @@ static int highbank_mc_probe(struct platform_device *pdev) if (res < 0) goto err; - highbank_mc_create_debugfs_nodes(mci); + irq = platform_get_irq(pdev, 0); + res = devm_request_irq(&pdev->dev, irq, highbank_mc_err_handler, + 0, dev_name(&pdev->dev), mci); + if (res < 0) { + dev_err(&pdev->dev, "Unable to request irq %d\n", irq); + goto err2; + } + + device_create_file(&mci->dev, &dev_attr_inject_ctrl); devres_close_group(&pdev->dev, NULL); return 0; +err2: + edac_mc_del_mc(&pdev->dev); err: devres_release_group(&pdev->dev, NULL); edac_mc_free(mci); @@ -231,17 +259,12 @@ static int highbank_mc_remove(struct platform_device *pdev) { struct mem_ctl_info *mci = platform_get_drvdata(pdev); + device_remove_file(&mci->dev, &dev_attr_inject_ctrl); edac_mc_del_mc(&pdev->dev); edac_mc_free(mci); return 0; } -static const struct of_device_id hb_ddr_ctrl_of_match[] = { - { .compatible = "calxeda,hb-ddr-ctrl", }, - {}, -}; -MODULE_DEVICE_TABLE(of, hb_ddr_ctrl_of_match); - static struct platform_driver highbank_mc_edac_driver = { .probe = highbank_mc_probe, .remove = highbank_mc_remove, diff --git a/drivers/edac/mpc85xx_edac.c b/drivers/edac/mpc85xx_edac.c index 3eb32f62d72..fd46b0bd5f2 100644 --- a/drivers/edac/mpc85xx_edac.c +++ b/drivers/edac/mpc85xx_edac.c @@ -327,28 +327,6 @@ err: } EXPORT_SYMBOL(mpc85xx_pci_err_probe); -static int mpc85xx_pci_err_remove(struct platform_device *op) -{ - struct edac_pci_ctl_info *pci = dev_get_drvdata(&op->dev); - struct mpc85xx_pci_pdata *pdata = pci->pvt_info; - - edac_dbg(0, "\n"); - - out_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_CAP_DR, - orig_pci_err_cap_dr); - - out_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_EN, orig_pci_err_en); - - edac_pci_del_device(pci->dev); - - if (edac_op_state == EDAC_OPSTATE_INT) - irq_dispose_mapping(pdata->irq); - - edac_pci_free_ctl_info(pci); - - return 0; -} - #endif /* CONFIG_PCI */ /**************************** L2 Err device ***************************/ diff --git a/drivers/edac/sb_edac.c b/drivers/edac/sb_edac.c index 88f60c5fecb..8472405c558 100644 --- a/drivers/edac/sb_edac.c +++ b/drivers/edac/sb_edac.c @@ -34,7 +34,7 @@ static int probed; /* * Alter this version for the module when modifications are made */ -#define SBRIDGE_REVISION " Ver: 1.0.0 " +#define SBRIDGE_REVISION " Ver: 1.1.0 " #define EDAC_MOD_STR "sbridge_edac" /* @@ -83,11 +83,17 @@ static int probed; #define PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_ERR3 0x3c77 /* 16.7 */ /* Devices 12 Function 6, Offsets 0x80 to 0xcc */ -static const u32 dram_rule[] = { +static const u32 sbridge_dram_rule[] = { 0x80, 0x88, 0x90, 0x98, 0xa0, 0xa8, 0xb0, 0xb8, 0xc0, 0xc8, }; -#define MAX_SAD ARRAY_SIZE(dram_rule) + +static const u32 ibridge_dram_rule[] = { + 0x60, 0x68, 0x70, 0x78, 0x80, + 0x88, 0x90, 0x98, 0xa0, 0xa8, + 0xb0, 0xb8, 0xc0, 0xc8, 0xd0, + 0xd8, 0xe0, 0xe8, 0xf0, 0xf8, +}; #define SAD_LIMIT(reg) ((GET_BITFIELD(reg, 6, 25) << 26) | 0x3ffffff) #define DRAM_ATTR(reg) GET_BITFIELD(reg, 2, 3) @@ -108,43 +114,50 @@ static char *get_dram_attr(u32 reg) } } -static const u32 interleave_list[] = { +static const u32 sbridge_interleave_list[] = { 0x84, 0x8c, 0x94, 0x9c, 0xa4, 0xac, 0xb4, 0xbc, 0xc4, 0xcc, }; -#define MAX_INTERLEAVE ARRAY_SIZE(interleave_list) - -#define SAD_PKG0(reg) GET_BITFIELD(reg, 0, 2) -#define SAD_PKG1(reg) GET_BITFIELD(reg, 3, 5) -#define SAD_PKG2(reg) GET_BITFIELD(reg, 8, 10) -#define SAD_PKG3(reg) GET_BITFIELD(reg, 11, 13) -#define SAD_PKG4(reg) GET_BITFIELD(reg, 16, 18) -#define SAD_PKG5(reg) GET_BITFIELD(reg, 19, 21) -#define SAD_PKG6(reg) GET_BITFIELD(reg, 24, 26) -#define SAD_PKG7(reg) GET_BITFIELD(reg, 27, 29) - -static inline int sad_pkg(u32 reg, int interleave) + +static const u32 ibridge_interleave_list[] = { + 0x64, 0x6c, 0x74, 0x7c, 0x84, + 0x8c, 0x94, 0x9c, 0xa4, 0xac, + 0xb4, 0xbc, 0xc4, 0xcc, 0xd4, + 0xdc, 0xe4, 0xec, 0xf4, 0xfc, +}; + +struct interleave_pkg { + unsigned char start; + unsigned char end; +}; + +static const struct interleave_pkg sbridge_interleave_pkg[] = { + { 0, 2 }, + { 3, 5 }, + { 8, 10 }, + { 11, 13 }, + { 16, 18 }, + { 19, 21 }, + { 24, 26 }, + { 27, 29 }, +}; + +static const struct interleave_pkg ibridge_interleave_pkg[] = { + { 0, 3 }, + { 4, 7 }, + { 8, 11 }, + { 12, 15 }, + { 16, 19 }, + { 20, 23 }, + { 24, 27 }, + { 28, 31 }, +}; + +static inline int sad_pkg(const struct interleave_pkg *table, u32 reg, + int interleave) { - switch (interleave) { - case 0: - return SAD_PKG0(reg); - case 1: - return SAD_PKG1(reg); - case 2: - return SAD_PKG2(reg); - case 3: - return SAD_PKG3(reg); - case 4: - return SAD_PKG4(reg); - case 5: - return SAD_PKG5(reg); - case 6: - return SAD_PKG6(reg); - case 7: - return SAD_PKG7(reg); - default: - return -EINVAL; - } + return GET_BITFIELD(reg, table[interleave].start, + table[interleave].end); } /* Devices 12 Function 7 */ @@ -262,7 +275,9 @@ static const u32 correrrthrsld[] = { /* Device 17, function 0 */ -#define RANK_CFG_A 0x0328 +#define SB_RANK_CFG_A 0x0328 + +#define IB_RANK_CFG_A 0x0320 #define IS_RDIMM_ENABLED(reg) GET_BITFIELD(reg, 11, 11) @@ -273,8 +288,23 @@ static const u32 correrrthrsld[] = { #define NUM_CHANNELS 4 #define MAX_DIMMS 3 /* Max DIMMS per channel */ +enum type { + SANDY_BRIDGE, + IVY_BRIDGE, +}; + +struct sbridge_pvt; struct sbridge_info { - u32 mcmtr; + enum type type; + u32 mcmtr; + u32 rankcfgr; + u64 (*get_tolm)(struct sbridge_pvt *pvt); + u64 (*get_tohm)(struct sbridge_pvt *pvt); + const u32 *dram_rule; + const u32 *interleave_list; + const struct interleave_pkg *interleave_pkg; + u8 max_sad; + u8 max_interleave; }; struct sbridge_channel { @@ -305,8 +335,9 @@ struct sbridge_dev { struct sbridge_pvt { struct pci_dev *pci_ta, *pci_ddrio, *pci_ras; - struct pci_dev *pci_sad0, *pci_sad1, *pci_ha0; - struct pci_dev *pci_br; + struct pci_dev *pci_sad0, *pci_sad1; + struct pci_dev *pci_ha0, *pci_ha1; + struct pci_dev *pci_br0, *pci_br1; struct pci_dev *pci_tad[NUM_CHANNELS]; struct sbridge_dev *sbridge_dev; @@ -364,11 +395,75 @@ static const struct pci_id_table pci_dev_descr_sbridge_table[] = { {0,} /* 0 terminated list. */ }; +/* This changes depending if 1HA or 2HA: + * 1HA: + * 0x0eb8 (17.0) is DDRIO0 + * 2HA: + * 0x0ebc (17.4) is DDRIO0 + */ +#define PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_1HA_DDRIO0 0x0eb8 +#define PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_2HA_DDRIO0 0x0ebc + +/* pci ids */ +#define PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0 0x0ea0 +#define PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TA 0x0ea8 +#define PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_RAS 0x0e71 +#define PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TAD0 0x0eaa +#define PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TAD1 0x0eab +#define PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TAD2 0x0eac +#define PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TAD3 0x0ead +#define PCI_DEVICE_ID_INTEL_IBRIDGE_SAD 0x0ec8 +#define PCI_DEVICE_ID_INTEL_IBRIDGE_BR0 0x0ec9 +#define PCI_DEVICE_ID_INTEL_IBRIDGE_BR1 0x0eca +#define PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1 0x0e60 +#define PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TA 0x0e68 +#define PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_RAS 0x0e79 +#define PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TAD0 0x0e6a +#define PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TAD1 0x0e6b + +static const struct pci_id_descr pci_dev_descr_ibridge[] = { + /* Processor Home Agent */ + { PCI_DESCR(14, 0, PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0, 0) }, + + /* Memory controller */ + { PCI_DESCR(15, 0, PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TA, 0) }, + { PCI_DESCR(15, 1, PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_RAS, 0) }, + { PCI_DESCR(15, 2, PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TAD0, 0) }, + { PCI_DESCR(15, 3, PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TAD1, 0) }, + { PCI_DESCR(15, 4, PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TAD2, 0) }, + { PCI_DESCR(15, 5, PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TAD3, 0) }, + + /* System Address Decoder */ + { PCI_DESCR(22, 0, PCI_DEVICE_ID_INTEL_IBRIDGE_SAD, 0) }, + + /* Broadcast Registers */ + { PCI_DESCR(22, 1, PCI_DEVICE_ID_INTEL_IBRIDGE_BR0, 1) }, + { PCI_DESCR(22, 2, PCI_DEVICE_ID_INTEL_IBRIDGE_BR1, 0) }, + + /* Optional, mode 2HA */ + { PCI_DESCR(28, 0, PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1, 1) }, +#if 0 + { PCI_DESCR(29, 0, PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TA, 1) }, + { PCI_DESCR(29, 1, PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_RAS, 1) }, +#endif + { PCI_DESCR(29, 2, PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TAD0, 1) }, + { PCI_DESCR(29, 3, PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TAD1, 1) }, + + { PCI_DESCR(17, 0, PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_1HA_DDRIO0, 1) }, + { PCI_DESCR(17, 4, PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_2HA_DDRIO0, 1) }, +}; + +static const struct pci_id_table pci_dev_descr_ibridge_table[] = { + PCI_ID_TABLE_ENTRY(pci_dev_descr_ibridge), + {0,} /* 0 terminated list. */ +}; + /* * pci_device_id table for which devices we are looking for */ static DEFINE_PCI_DEVICE_TABLE(sbridge_pci_tbl) = { {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TA)}, + {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TA)}, {0,} /* 0 terminated list. */ }; @@ -458,6 +553,52 @@ static void free_sbridge_dev(struct sbridge_dev *sbridge_dev) kfree(sbridge_dev); } +static u64 sbridge_get_tolm(struct sbridge_pvt *pvt) +{ + u32 reg; + + /* Address range is 32:28 */ + pci_read_config_dword(pvt->pci_sad1, TOLM, ®); + return GET_TOLM(reg); +} + +static u64 sbridge_get_tohm(struct sbridge_pvt *pvt) +{ + u32 reg; + + pci_read_config_dword(pvt->pci_sad1, TOHM, ®); + return GET_TOHM(reg); +} + +static u64 ibridge_get_tolm(struct sbridge_pvt *pvt) +{ + u32 reg; + + pci_read_config_dword(pvt->pci_br1, TOLM, ®); + + return GET_TOLM(reg); +} + +static u64 ibridge_get_tohm(struct sbridge_pvt *pvt) +{ + u32 reg; + + pci_read_config_dword(pvt->pci_br1, TOHM, ®); + + return GET_TOHM(reg); +} + +static inline u8 sad_pkg_socket(u8 pkg) +{ + /* on Ivy Bridge, nodeID is SASS, where A is HA and S is node id */ + return (pkg >> 3) | (pkg & 0x3); +} + +static inline u8 sad_pkg_ha(u8 pkg) +{ + return (pkg >> 2) & 0x1; +} + /**************************************************************************** Memory check routines ****************************************************************************/ @@ -520,10 +661,10 @@ static int get_dimm_config(struct mem_ctl_info *mci) enum edac_type mode; enum mem_type mtype; - pci_read_config_dword(pvt->pci_br, SAD_TARGET, ®); + pci_read_config_dword(pvt->pci_br0, SAD_TARGET, ®); pvt->sbridge_dev->source_id = SOURCE_ID(reg); - pci_read_config_dword(pvt->pci_br, SAD_CONTROL, ®); + pci_read_config_dword(pvt->pci_br0, SAD_CONTROL, ®); pvt->sbridge_dev->node_id = NODE_ID(reg); edac_dbg(0, "mc#%d: Node ID: %d, source ID: %d\n", pvt->sbridge_dev->mc, @@ -558,7 +699,8 @@ static int get_dimm_config(struct mem_ctl_info *mci) } if (pvt->pci_ddrio) { - pci_read_config_dword(pvt->pci_ddrio, RANK_CFG_A, ®); + pci_read_config_dword(pvt->pci_ddrio, pvt->info.rankcfgr, + ®); if (IS_RDIMM_ENABLED(reg)) { /* FIXME: Can also be LRDIMM */ edac_dbg(0, "Memory is registered\n"); @@ -629,19 +771,14 @@ static void get_memory_layout(const struct mem_ctl_info *mci) * Step 1) Get TOLM/TOHM ranges */ - /* Address range is 32:28 */ - pci_read_config_dword(pvt->pci_sad1, TOLM, - ®); - pvt->tolm = GET_TOLM(reg); + pvt->tolm = pvt->info.get_tolm(pvt); tmp_mb = (1 + pvt->tolm) >> 20; mb = div_u64_rem(tmp_mb, 1000, &kb); edac_dbg(0, "TOLM: %u.%03u GB (0x%016Lx)\n", mb, kb, (u64)pvt->tolm); /* Address range is already 45:25 */ - pci_read_config_dword(pvt->pci_sad1, TOHM, - ®); - pvt->tohm = GET_TOHM(reg); + pvt->tohm = pvt->info.get_tohm(pvt); tmp_mb = (1 + pvt->tohm) >> 20; mb = div_u64_rem(tmp_mb, 1000, &kb); @@ -654,9 +791,9 @@ static void get_memory_layout(const struct mem_ctl_info *mci) * algorithm bellow. */ prv = 0; - for (n_sads = 0; n_sads < MAX_SAD; n_sads++) { + for (n_sads = 0; n_sads < pvt->info.max_sad; n_sads++) { /* SAD_LIMIT Address range is 45:26 */ - pci_read_config_dword(pvt->pci_sad0, dram_rule[n_sads], + pci_read_config_dword(pvt->pci_sad0, pvt->info.dram_rule[n_sads], ®); limit = SAD_LIMIT(reg); @@ -677,15 +814,16 @@ static void get_memory_layout(const struct mem_ctl_info *mci) reg); prv = limit; - pci_read_config_dword(pvt->pci_sad0, interleave_list[n_sads], + pci_read_config_dword(pvt->pci_sad0, pvt->info.interleave_list[n_sads], ®); - sad_interl = sad_pkg(reg, 0); + sad_interl = sad_pkg(pvt->info.interleave_pkg, reg, 0); for (j = 0; j < 8; j++) { - if (j > 0 && sad_interl == sad_pkg(reg, j)) + u32 pkg = sad_pkg(pvt->info.interleave_pkg, reg, j); + if (j > 0 && sad_interl == pkg) break; edac_dbg(0, "SAD#%d, interleave #%d: %d\n", - n_sads, j, sad_pkg(reg, j)); + n_sads, j, pkg); } } @@ -797,12 +935,13 @@ static int get_memory_error_data(struct mem_ctl_info *mci, { struct mem_ctl_info *new_mci; struct sbridge_pvt *pvt = mci->pvt_info; + struct pci_dev *pci_ha; int n_rir, n_sads, n_tads, sad_way, sck_xch; int sad_interl, idx, base_ch; int interleave_mode; - unsigned sad_interleave[MAX_INTERLEAVE]; + unsigned sad_interleave[pvt->info.max_interleave]; u32 reg; - u8 ch_way,sck_way; + u8 ch_way, sck_way, pkg, sad_ha = 0; u32 tad_offset; u32 rir_way; u32 mb, kb; @@ -828,8 +967,8 @@ static int get_memory_error_data(struct mem_ctl_info *mci, /* * Step 1) Get socket */ - for (n_sads = 0; n_sads < MAX_SAD; n_sads++) { - pci_read_config_dword(pvt->pci_sad0, dram_rule[n_sads], + for (n_sads = 0; n_sads < pvt->info.max_sad; n_sads++) { + pci_read_config_dword(pvt->pci_sad0, pvt->info.dram_rule[n_sads], ®); if (!DRAM_RULE_ENABLE(reg)) @@ -844,53 +983,65 @@ static int get_memory_error_data(struct mem_ctl_info *mci, break; prv = limit; } - if (n_sads == MAX_SAD) { + if (n_sads == pvt->info.max_sad) { sprintf(msg, "Can't discover the memory socket"); return -EINVAL; } *area_type = get_dram_attr(reg); interleave_mode = INTERLEAVE_MODE(reg); - pci_read_config_dword(pvt->pci_sad0, interleave_list[n_sads], + pci_read_config_dword(pvt->pci_sad0, pvt->info.interleave_list[n_sads], ®); - sad_interl = sad_pkg(reg, 0); - for (sad_way = 0; sad_way < 8; sad_way++) { - if (sad_way > 0 && sad_interl == sad_pkg(reg, sad_way)) + + if (pvt->info.type == SANDY_BRIDGE) { + sad_interl = sad_pkg(pvt->info.interleave_pkg, reg, 0); + for (sad_way = 0; sad_way < 8; sad_way++) { + u32 pkg = sad_pkg(pvt->info.interleave_pkg, reg, sad_way); + if (sad_way > 0 && sad_interl == pkg) + break; + sad_interleave[sad_way] = pkg; + edac_dbg(0, "SAD interleave #%d: %d\n", + sad_way, sad_interleave[sad_way]); + } + edac_dbg(0, "mc#%d: Error detected on SAD#%d: address 0x%016Lx < 0x%016Lx, Interleave [%d:6]%s\n", + pvt->sbridge_dev->mc, + n_sads, + addr, + limit, + sad_way + 7, + !interleave_mode ? "" : "XOR[18:16]"); + if (interleave_mode) + idx = ((addr >> 6) ^ (addr >> 16)) & 7; + else + idx = (addr >> 6) & 7; + switch (sad_way) { + case 1: + idx = 0; break; - sad_interleave[sad_way] = sad_pkg(reg, sad_way); - edac_dbg(0, "SAD interleave #%d: %d\n", - sad_way, sad_interleave[sad_way]); - } - edac_dbg(0, "mc#%d: Error detected on SAD#%d: address 0x%016Lx < 0x%016Lx, Interleave [%d:6]%s\n", - pvt->sbridge_dev->mc, - n_sads, - addr, - limit, - sad_way + 7, - interleave_mode ? "" : "XOR[18:16]"); - if (interleave_mode) - idx = ((addr >> 6) ^ (addr >> 16)) & 7; - else + case 2: + idx = idx & 1; + break; + case 4: + idx = idx & 3; + break; + case 8: + break; + default: + sprintf(msg, "Can't discover socket interleave"); + return -EINVAL; + } + *socket = sad_interleave[idx]; + edac_dbg(0, "SAD interleave index: %d (wayness %d) = CPU socket %d\n", + idx, sad_way, *socket); + } else { + /* Ivy Bridge's SAD mode doesn't support XOR interleave mode */ idx = (addr >> 6) & 7; - switch (sad_way) { - case 1: - idx = 0; - break; - case 2: - idx = idx & 1; - break; - case 4: - idx = idx & 3; - break; - case 8: - break; - default: - sprintf(msg, "Can't discover socket interleave"); - return -EINVAL; + pkg = sad_pkg(pvt->info.interleave_pkg, reg, idx); + *socket = sad_pkg_socket(pkg); + sad_ha = sad_pkg_ha(pkg); + edac_dbg(0, "SAD interleave package: %d = CPU socket %d, HA %d\n", + idx, *socket, sad_ha); } - *socket = sad_interleave[idx]; - edac_dbg(0, "SAD interleave index: %d (wayness %d) = CPU socket %d\n", - idx, sad_way, *socket); /* * Move to the proper node structure, in order to access the @@ -909,9 +1060,16 @@ static int get_memory_error_data(struct mem_ctl_info *mci, * Step 2) Get memory channel */ prv = 0; + if (pvt->info.type == SANDY_BRIDGE) + pci_ha = pvt->pci_ha0; + else { + if (sad_ha) + pci_ha = pvt->pci_ha1; + else + pci_ha = pvt->pci_ha0; + } for (n_tads = 0; n_tads < MAX_TAD; n_tads++) { - pci_read_config_dword(pvt->pci_ha0, tad_dram_rule[n_tads], - ®); + pci_read_config_dword(pci_ha, tad_dram_rule[n_tads], ®); limit = TAD_LIMIT(reg); if (limit <= prv) { sprintf(msg, "Can't discover the memory channel"); @@ -921,14 +1079,13 @@ static int get_memory_error_data(struct mem_ctl_info *mci, break; prv = limit; } + if (n_tads == MAX_TAD) { + sprintf(msg, "Can't discover the memory channel"); + return -EINVAL; + } + ch_way = TAD_CH(reg) + 1; sck_way = TAD_SOCK(reg) + 1; - /* - * FIXME: Is it right to always use channel 0 for offsets? - */ - pci_read_config_dword(pvt->pci_tad[0], - tad_ch_nilv_offset[n_tads], - &tad_offset); if (ch_way == 3) idx = addr >> 6; @@ -958,6 +1115,10 @@ static int get_memory_error_data(struct mem_ctl_info *mci, } *channel_mask = 1 << base_ch; + pci_read_config_dword(pvt->pci_tad[base_ch], + tad_ch_nilv_offset[n_tads], + &tad_offset); + if (pvt->is_mirrored) { *channel_mask |= 1 << ((base_ch + 2) % 4); switch(ch_way) { @@ -1091,12 +1252,6 @@ static void sbridge_put_all_devices(void) } } -/* - * sbridge_get_all_devices Find and perform 'get' operation on the MCH's - * device/functions we want to reference for this driver - * - * Need to 'get' device 16 func 1 and func 2 - */ static int sbridge_get_onedevice(struct pci_dev **prev, u8 *num_mc, const struct pci_id_table *table, @@ -1198,11 +1353,21 @@ static int sbridge_get_onedevice(struct pci_dev **prev, return 0; } -static int sbridge_get_all_devices(u8 *num_mc) +/* + * sbridge_get_all_devices - Find and perform 'get' operation on the MCH's + * device/functions we want to reference for this driver. + * Need to 'get' device 16 func 1 and func 2. + * @num_mc: pointer to the memory controllers count, to be incremented in case + * of success. + * @table: model specific table + * + * returns 0 in case of success or error code + */ +static int sbridge_get_all_devices(u8 *num_mc, + const struct pci_id_table *table) { int i, rc; struct pci_dev *pdev = NULL; - const struct pci_id_table *table = pci_dev_descr_sbridge_table; while (table && table->descr) { for (i = 0; i < table->n_devs; i++) { @@ -1226,8 +1391,8 @@ static int sbridge_get_all_devices(u8 *num_mc) return 0; } -static int mci_bind_devs(struct mem_ctl_info *mci, - struct sbridge_dev *sbridge_dev) +static int sbridge_mci_bind_devs(struct mem_ctl_info *mci, + struct sbridge_dev *sbridge_dev) { struct sbridge_pvt *pvt = mci->pvt_info; struct pci_dev *pdev; @@ -1255,7 +1420,7 @@ static int mci_bind_devs(struct mem_ctl_info *mci, case 13: switch (func) { case 6: - pvt->pci_br = pdev; + pvt->pci_br0 = pdev; break; default: goto error; @@ -1329,6 +1494,131 @@ error: return -EINVAL; } +static int ibridge_mci_bind_devs(struct mem_ctl_info *mci, + struct sbridge_dev *sbridge_dev) +{ + struct sbridge_pvt *pvt = mci->pvt_info; + struct pci_dev *pdev, *tmp; + int i, func, slot; + bool mode_2ha = false; + + tmp = pci_get_device(PCI_VENDOR_ID_INTEL, + PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1, NULL); + if (tmp) { + mode_2ha = true; + pci_dev_put(tmp); + } + + for (i = 0; i < sbridge_dev->n_devs; i++) { + pdev = sbridge_dev->pdev[i]; + if (!pdev) + continue; + slot = PCI_SLOT(pdev->devfn); + func = PCI_FUNC(pdev->devfn); + + switch (slot) { + case 14: + if (func == 0) { + pvt->pci_ha0 = pdev; + break; + } + goto error; + case 15: + switch (func) { + case 0: + pvt->pci_ta = pdev; + break; + case 1: + pvt->pci_ras = pdev; + break; + case 4: + case 5: + /* if we have 2 HAs active, channels 2 and 3 + * are in other device */ + if (mode_2ha) + break; + /* fall through */ + case 2: + case 3: + pvt->pci_tad[func - 2] = pdev; + break; + default: + goto error; + } + break; + case 17: + if (func == 4) { + pvt->pci_ddrio = pdev; + break; + } else if (func == 0) { + if (!mode_2ha) + pvt->pci_ddrio = pdev; + break; + } + goto error; + case 22: + switch (func) { + case 0: + pvt->pci_sad0 = pdev; + break; + case 1: + pvt->pci_br0 = pdev; + break; + case 2: + pvt->pci_br1 = pdev; + break; + default: + goto error; + } + break; + case 28: + if (func == 0) { + pvt->pci_ha1 = pdev; + break; + } + goto error; + case 29: + /* we shouldn't have this device if we have just one + * HA present */ + WARN_ON(!mode_2ha); + if (func == 2 || func == 3) { + pvt->pci_tad[func] = pdev; + break; + } + goto error; + default: + goto error; + } + + edac_dbg(0, "Associated PCI %02x.%02d.%d with dev = %p\n", + sbridge_dev->bus, + PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn), + pdev); + } + + /* Check if everything were registered */ + if (!pvt->pci_sad0 || !pvt->pci_ha0 || !pvt->pci_br0 || + !pvt->pci_br1 || !pvt->pci_tad || !pvt->pci_ras || + !pvt->pci_ta) + goto enodev; + + for (i = 0; i < NUM_CHANNELS; i++) { + if (!pvt->pci_tad[i]) + goto enodev; + } + return 0; + +enodev: + sbridge_printk(KERN_ERR, "Some needed devices are missing\n"); + return -ENODEV; + +error: + sbridge_printk(KERN_ERR, + "Device %d, function %d is out of the expected range\n", + slot, func); + return -EINVAL; +} + /**************************************************************************** Error check routines ****************************************************************************/ @@ -1349,7 +1639,7 @@ static void sbridge_mce_output_error(struct mem_ctl_info *mci, bool ripv = GET_BITFIELD(m->mcgstatus, 0, 0); bool overflow = GET_BITFIELD(m->status, 62, 62); bool uncorrected_error = GET_BITFIELD(m->status, 61, 61); - bool recoverable = GET_BITFIELD(m->status, 56, 56); + bool recoverable; u32 core_err_cnt = GET_BITFIELD(m->status, 38, 52); u32 mscod = GET_BITFIELD(m->status, 16, 31); u32 errcode = GET_BITFIELD(m->status, 0, 15); @@ -1360,6 +1650,11 @@ static void sbridge_mce_output_error(struct mem_ctl_info *mci, int rc, dimm; char *area_type = NULL; + if (pvt->info.type == IVY_BRIDGE) + recoverable = true; + else + recoverable = GET_BITFIELD(m->status, 56, 56); + if (uncorrected_error) { if (ripv) { type = "FATAL"; @@ -1409,6 +1704,10 @@ static void sbridge_mce_output_error(struct mem_ctl_info *mci, } } + /* Only decode errors with an valid address (ADDRV) */ + if (!GET_BITFIELD(m->status, 58, 58)) + return; + rc = get_memory_error_data(mci, m->addr, &socket, &channel_mask, &rank, &area_type, msg); if (rc < 0) @@ -1614,11 +1913,12 @@ static void sbridge_unregister_mci(struct sbridge_dev *sbridge_dev) sbridge_dev->mci = NULL; } -static int sbridge_register_mci(struct sbridge_dev *sbridge_dev) +static int sbridge_register_mci(struct sbridge_dev *sbridge_dev, enum type type) { struct mem_ctl_info *mci; struct edac_mc_layer layers[2]; struct sbridge_pvt *pvt; + struct pci_dev *pdev = sbridge_dev->pdev[0]; int rc; /* Check the number of active and not disabled channels */ @@ -1640,7 +1940,7 @@ static int sbridge_register_mci(struct sbridge_dev *sbridge_dev) return -ENOMEM; edac_dbg(0, "MC: mci = %p, dev = %p\n", - mci, &sbridge_dev->pdev[0]->dev); + mci, &pdev->dev); pvt = mci->pvt_info; memset(pvt, 0, sizeof(*pvt)); @@ -1654,24 +1954,52 @@ static int sbridge_register_mci(struct sbridge_dev *sbridge_dev) mci->edac_cap = EDAC_FLAG_NONE; mci->mod_name = "sbridge_edac.c"; mci->mod_ver = SBRIDGE_REVISION; - mci->ctl_name = kasprintf(GFP_KERNEL, "Sandy Bridge Socket#%d", mci->mc_idx); - mci->dev_name = pci_name(sbridge_dev->pdev[0]); + mci->dev_name = pci_name(pdev); mci->ctl_page_to_phys = NULL; /* Set the function pointer to an actual operation function */ mci->edac_check = sbridge_check_error; - /* Store pci devices at mci for faster access */ - rc = mci_bind_devs(mci, sbridge_dev); - if (unlikely(rc < 0)) - goto fail0; + pvt->info.type = type; + if (type == IVY_BRIDGE) { + pvt->info.rankcfgr = IB_RANK_CFG_A; + pvt->info.get_tolm = ibridge_get_tolm; + pvt->info.get_tohm = ibridge_get_tohm; + pvt->info.dram_rule = ibridge_dram_rule; + pvt->info.max_sad = ARRAY_SIZE(ibridge_dram_rule); + pvt->info.interleave_list = ibridge_interleave_list; + pvt->info.max_interleave = ARRAY_SIZE(ibridge_interleave_list); + pvt->info.interleave_pkg = ibridge_interleave_pkg; + mci->ctl_name = kasprintf(GFP_KERNEL, "Ivy Bridge Socket#%d", mci->mc_idx); + + /* Store pci devices at mci for faster access */ + rc = ibridge_mci_bind_devs(mci, sbridge_dev); + if (unlikely(rc < 0)) + goto fail0; + } else { + pvt->info.rankcfgr = SB_RANK_CFG_A; + pvt->info.get_tolm = sbridge_get_tolm; + pvt->info.get_tohm = sbridge_get_tohm; + pvt->info.dram_rule = sbridge_dram_rule; + pvt->info.max_sad = ARRAY_SIZE(sbridge_dram_rule); + pvt->info.interleave_list = sbridge_interleave_list; + pvt->info.max_interleave = ARRAY_SIZE(sbridge_interleave_list); + pvt->info.interleave_pkg = sbridge_interleave_pkg; + mci->ctl_name = kasprintf(GFP_KERNEL, "Sandy Bridge Socket#%d", mci->mc_idx); + + /* Store pci devices at mci for faster access */ + rc = sbridge_mci_bind_devs(mci, sbridge_dev); + if (unlikely(rc < 0)) + goto fail0; + } + /* Get dimm basic config and the memory layout */ get_dimm_config(mci); get_memory_layout(mci); /* record ptr to the generic device */ - mci->pdev = &sbridge_dev->pdev[0]->dev; + mci->pdev = &pdev->dev; /* add this new MC control structure to EDAC's list of MCs */ if (unlikely(edac_mc_add_mc(mci))) { @@ -1702,6 +2030,7 @@ static int sbridge_probe(struct pci_dev *pdev, const struct pci_device_id *id) int rc; u8 mc, num_mc = 0; struct sbridge_dev *sbridge_dev; + enum type type; /* get the pci devices we want to reserve for our use */ mutex_lock(&sbridge_edac_lock); @@ -1715,7 +2044,13 @@ static int sbridge_probe(struct pci_dev *pdev, const struct pci_device_id *id) } probed++; - rc = sbridge_get_all_devices(&num_mc); + if (pdev->device == PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TA) { + rc = sbridge_get_all_devices(&num_mc, pci_dev_descr_ibridge_table); + type = IVY_BRIDGE; + } else { + rc = sbridge_get_all_devices(&num_mc, pci_dev_descr_sbridge_table); + type = SANDY_BRIDGE; + } if (unlikely(rc < 0)) goto fail0; mc = 0; @@ -1724,7 +2059,7 @@ static int sbridge_probe(struct pci_dev *pdev, const struct pci_device_id *id) edac_dbg(0, "Registering MC#%d (%d of %d)\n", mc, mc + 1, num_mc); sbridge_dev->mc = mc++; - rc = sbridge_register_mci(sbridge_dev); + rc = sbridge_register_mci(sbridge_dev, type); if (unlikely(rc < 0)) goto fail1; } @@ -1839,5 +2174,5 @@ MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI"); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>"); MODULE_AUTHOR("Red Hat Inc. (http://www.redhat.com)"); -MODULE_DESCRIPTION("MC Driver for Intel Sandy Bridge memory controllers - " +MODULE_DESCRIPTION("MC Driver for Intel Sandy Bridge and Ivy Bridge memory controllers - " SBRIDGE_REVISION); diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c index 7dd44615029..4e10b10d3dd 100644 --- a/drivers/gpio/gpiolib.c +++ b/drivers/gpio/gpiolib.c @@ -13,6 +13,7 @@ #include <linux/acpi_gpio.h> #include <linux/idr.h> #include <linux/slab.h> +#include <linux/acpi.h> #define CREATE_TRACE_POINTS #include <trace/events/gpio.h> diff --git a/drivers/gpu/drm/drm_encoder_slave.c b/drivers/gpu/drm/drm_encoder_slave.c index 0cfb60f5476..d18b88b755c 100644 --- a/drivers/gpu/drm/drm_encoder_slave.c +++ b/drivers/gpu/drm/drm_encoder_slave.c @@ -67,12 +67,12 @@ int drm_i2c_encoder_init(struct drm_device *dev, goto fail; } - if (!client->driver) { + if (!client->dev.driver) { err = -ENODEV; goto fail_unregister; } - module = client->driver->driver.owner; + module = client->dev.driver->owner; if (!try_module_get(module)) { err = -ENODEV; goto fail_unregister; @@ -80,7 +80,7 @@ int drm_i2c_encoder_init(struct drm_device *dev, encoder->bus_priv = client; - encoder_drv = to_drm_i2c_encoder_driver(client->driver); + encoder_drv = to_drm_i2c_encoder_driver(to_i2c_driver(client->dev.driver)); err = encoder_drv->encoder_init(client, dev, encoder); if (err) @@ -111,7 +111,7 @@ void drm_i2c_encoder_destroy(struct drm_encoder *drm_encoder) { struct drm_encoder_slave *encoder = to_encoder_slave(drm_encoder); struct i2c_client *client = drm_i2c_encoder_get_client(drm_encoder); - struct module *module = client->driver->driver.owner; + struct module *module = client->dev.driver->owner; i2c_unregister_device(client); encoder->bus_priv = NULL; diff --git a/drivers/gpu/drm/i915/intel_acpi.c b/drivers/gpu/drm/i915/intel_acpi.c index 43959edd429..dfff0907f70 100644 --- a/drivers/gpu/drm/i915/intel_acpi.c +++ b/drivers/gpu/drm/i915/intel_acpi.c @@ -196,7 +196,7 @@ static bool intel_dsm_pci_probe(struct pci_dev *pdev) acpi_handle dhandle; int ret; - dhandle = DEVICE_ACPI_HANDLE(&pdev->dev); + dhandle = ACPI_HANDLE(&pdev->dev); if (!dhandle) return false; diff --git a/drivers/gpu/drm/i915/intel_opregion.c b/drivers/gpu/drm/i915/intel_opregion.c index 1b2f41c3f19..6d69a9bad86 100644 --- a/drivers/gpu/drm/i915/intel_opregion.c +++ b/drivers/gpu/drm/i915/intel_opregion.c @@ -638,7 +638,7 @@ static void intel_didl_outputs(struct drm_device *dev) u32 temp; int i = 0; - handle = DEVICE_ACPI_HANDLE(&dev->pdev->dev); + handle = ACPI_HANDLE(&dev->pdev->dev); if (!handle || acpi_bus_get_device(handle, &acpi_dev)) return; diff --git a/drivers/gpu/drm/nouveau/core/subdev/mxm/base.c b/drivers/gpu/drm/nouveau/core/subdev/mxm/base.c index e286e132c7e..129120473f6 100644 --- a/drivers/gpu/drm/nouveau/core/subdev/mxm/base.c +++ b/drivers/gpu/drm/nouveau/core/subdev/mxm/base.c @@ -116,7 +116,7 @@ mxm_shadow_dsm(struct nouveau_mxm *mxm, u8 version) acpi_handle handle; int ret; - handle = DEVICE_ACPI_HANDLE(&device->pdev->dev); + handle = ACPI_HANDLE(&device->pdev->dev); if (!handle) return false; diff --git a/drivers/gpu/drm/nouveau/core/subdev/therm/ic.c b/drivers/gpu/drm/nouveau/core/subdev/therm/ic.c index 13b85007644..e44ed7b93c6 100644 --- a/drivers/gpu/drm/nouveau/core/subdev/therm/ic.c +++ b/drivers/gpu/drm/nouveau/core/subdev/therm/ic.c @@ -41,7 +41,8 @@ probe_monitoring_device(struct nouveau_i2c_port *i2c, if (!client) return false; - if (!client->driver || client->driver->detect(client, info)) { + if (!client->dev.driver || + to_i2c_driver(client->dev.driver)->detect(client, info)) { i2c_unregister_device(client); return false; } diff --git a/drivers/gpu/drm/nouveau/nouveau_acpi.c b/drivers/gpu/drm/nouveau/nouveau_acpi.c index 07273a2ae62..95c74045404 100644 --- a/drivers/gpu/drm/nouveau/nouveau_acpi.c +++ b/drivers/gpu/drm/nouveau/nouveau_acpi.c @@ -256,7 +256,7 @@ static int nouveau_dsm_pci_probe(struct pci_dev *pdev) acpi_handle dhandle; int retval = 0; - dhandle = DEVICE_ACPI_HANDLE(&pdev->dev); + dhandle = ACPI_HANDLE(&pdev->dev); if (!dhandle) return false; @@ -414,7 +414,7 @@ bool nouveau_acpi_rom_supported(struct pci_dev *pdev) if (!nouveau_dsm_priv.dsm_detected && !nouveau_dsm_priv.optimus_detected) return false; - dhandle = DEVICE_ACPI_HANDLE(&pdev->dev); + dhandle = ACPI_HANDLE(&pdev->dev); if (!dhandle) return false; @@ -448,7 +448,7 @@ nouveau_acpi_edid(struct drm_device *dev, struct drm_connector *connector) return NULL; } - handle = DEVICE_ACPI_HANDLE(&dev->pdev->dev); + handle = ACPI_HANDLE(&dev->pdev->dev); if (!handle) return NULL; diff --git a/drivers/gpu/drm/radeon/radeon_acpi.c b/drivers/gpu/drm/radeon/radeon_acpi.c index 10f98c7742d..98a9074b306 100644 --- a/drivers/gpu/drm/radeon/radeon_acpi.c +++ b/drivers/gpu/drm/radeon/radeon_acpi.c @@ -369,7 +369,7 @@ int radeon_atif_handler(struct radeon_device *rdev, return NOTIFY_DONE; /* Check pending SBIOS requests */ - handle = DEVICE_ACPI_HANDLE(&rdev->pdev->dev); + handle = ACPI_HANDLE(&rdev->pdev->dev); count = radeon_atif_get_sbios_requests(handle, &req); if (count <= 0) @@ -556,7 +556,7 @@ int radeon_acpi_pcie_notify_device_ready(struct radeon_device *rdev) struct radeon_atcs *atcs = &rdev->atcs; /* Get the device handle */ - handle = DEVICE_ACPI_HANDLE(&rdev->pdev->dev); + handle = ACPI_HANDLE(&rdev->pdev->dev); if (!handle) return -EINVAL; @@ -596,7 +596,7 @@ int radeon_acpi_pcie_performance_request(struct radeon_device *rdev, u32 retry = 3; /* Get the device handle */ - handle = DEVICE_ACPI_HANDLE(&rdev->pdev->dev); + handle = ACPI_HANDLE(&rdev->pdev->dev); if (!handle) return -EINVAL; @@ -699,7 +699,7 @@ int radeon_acpi_init(struct radeon_device *rdev) int ret; /* Get the device handle */ - handle = DEVICE_ACPI_HANDLE(&rdev->pdev->dev); + handle = ACPI_HANDLE(&rdev->pdev->dev); /* No need to proceed if we're sure that ATIF is not supported */ if (!ASIC_IS_AVIVO(rdev) || !rdev->bios || !handle) diff --git a/drivers/gpu/drm/radeon/radeon_atpx_handler.c b/drivers/gpu/drm/radeon/radeon_atpx_handler.c index 6153ec18943..9d302eaeea1 100644 --- a/drivers/gpu/drm/radeon/radeon_atpx_handler.c +++ b/drivers/gpu/drm/radeon/radeon_atpx_handler.c @@ -8,8 +8,7 @@ */ #include <linux/vga_switcheroo.h> #include <linux/slab.h> -#include <acpi/acpi.h> -#include <acpi/acpi_bus.h> +#include <linux/acpi.h> #include <linux/pci.h> #include "radeon_acpi.h" @@ -447,7 +446,7 @@ static bool radeon_atpx_pci_probe_handle(struct pci_dev *pdev) acpi_handle dhandle, atpx_handle; acpi_status status; - dhandle = DEVICE_ACPI_HANDLE(&pdev->dev); + dhandle = ACPI_HANDLE(&pdev->dev); if (!dhandle) return false; @@ -493,7 +492,7 @@ static int radeon_atpx_init(void) */ static int radeon_atpx_get_client_id(struct pci_dev *pdev) { - if (radeon_atpx_priv.dhandle == DEVICE_ACPI_HANDLE(&pdev->dev)) + if (radeon_atpx_priv.dhandle == ACPI_HANDLE(&pdev->dev)) return VGA_SWITCHEROO_IGD; else return VGA_SWITCHEROO_DIS; diff --git a/drivers/gpu/drm/radeon/radeon_bios.c b/drivers/gpu/drm/radeon/radeon_bios.c index c155d6f3fa6..b3633d9a531 100644 --- a/drivers/gpu/drm/radeon/radeon_bios.c +++ b/drivers/gpu/drm/radeon/radeon_bios.c @@ -185,7 +185,7 @@ static bool radeon_atrm_get_bios(struct radeon_device *rdev) return false; while ((pdev = pci_get_class(PCI_CLASS_DISPLAY_VGA << 8, pdev)) != NULL) { - dhandle = DEVICE_ACPI_HANDLE(&pdev->dev); + dhandle = ACPI_HANDLE(&pdev->dev); if (!dhandle) continue; diff --git a/drivers/hid/i2c-hid/i2c-hid.c b/drivers/hid/i2c-hid/i2c-hid.c index ae48d18ee31..5f7e55f4b7f 100644 --- a/drivers/hid/i2c-hid/i2c-hid.c +++ b/drivers/hid/i2c-hid/i2c-hid.c @@ -1008,7 +1008,7 @@ static int i2c_hid_probe(struct i2c_client *client, hid->hid_get_raw_report = i2c_hid_get_raw_report; hid->hid_output_raw_report = i2c_hid_output_raw_report; hid->dev.parent = &client->dev; - ACPI_HANDLE_SET(&hid->dev, ACPI_HANDLE(&client->dev)); + ACPI_COMPANION_SET(&hid->dev, ACPI_COMPANION(&client->dev)); hid->bus = BUS_I2C; hid->version = le16_to_cpu(ihid->hdesc.bcdVersion); hid->vendor = le16_to_cpu(ihid->hdesc.wVendorID); diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig index cdcbd8368ed..3b26129f605 100644 --- a/drivers/i2c/busses/Kconfig +++ b/drivers/i2c/busses/Kconfig @@ -109,6 +109,7 @@ config I2C_I801 Avoton (SOC) Wellsburg (PCH) Coleto Creek (PCH) + Wildcat Point-LP (PCH) This driver can also be built as a module. If so, the module will be called i2c-i801. @@ -345,6 +346,16 @@ config I2C_BCM2835 This support is also available as a module. If so, the module will be called i2c-bcm2835. +config I2C_BCM_KONA + tristate "BCM Kona I2C adapter" + depends on ARCH_BCM_MOBILE + default y + help + If you say yes to this option, support will be included for the + I2C interface on the Broadcom Kona family of processors. + + If you do not need KONA I2C inteface, say N. + config I2C_BLACKFIN_TWI tristate "Blackfin TWI I2C support" depends on BLACKFIN @@ -436,6 +447,13 @@ config I2C_EG20T ML7213/ML7223/ML7831 is companion chip for Intel Atom E6xx series. ML7213/ML7223/ML7831 is completely compatible for Intel EG20T PCH. +config I2C_EXYNOS5 + tristate "Exynos5 high-speed I2C driver" + depends on ARCH_EXYNOS5 && OF + help + Say Y here to include support for high-speed I2C controller in the + Exynos5 based Samsung SoCs. + config I2C_GPIO tristate "GPIO-based bitbanging I2C" depends on GPIOLIB @@ -665,7 +683,7 @@ config I2C_SH7760 config I2C_SH_MOBILE tristate "SuperH Mobile I2C Controller" - depends on SUPERH || ARCH_SHMOBILE + depends on SUPERH || ARM || COMPILE_TEST help If you say yes to this option, support will be included for the built-in I2C interface on the Renesas SH-Mobile processor. @@ -695,6 +713,16 @@ config I2C_SIRF This driver can also be built as a module. If so, the module will be called i2c-sirf. +config I2C_ST + tristate "STMicroelectronics SSC I2C support" + depends on ARCH_STI + help + Enable this option to add support for STMicroelectronics SoCs + hardware SSC (Synchronous Serial Controller) as an I2C controller. + + This driver can also be built as module. If so, the module + will be called i2c-st. + config I2C_STU300 tristate "ST Microelectronics DDC I2C interface" depends on MACH_U300 @@ -768,7 +796,7 @@ config I2C_XLR config I2C_RCAR tristate "Renesas R-Car I2C Controller" - depends on ARCH_SHMOBILE && I2C + depends on ARM || COMPILE_TEST help If you say yes to this option, support will be included for the R-Car I2C controller. diff --git a/drivers/i2c/busses/Makefile b/drivers/i2c/busses/Makefile index d00997f3eb3..c73eb0ea788 100644 --- a/drivers/i2c/busses/Makefile +++ b/drivers/i2c/busses/Makefile @@ -42,6 +42,7 @@ i2c-designware-platform-objs := i2c-designware-platdrv.o obj-$(CONFIG_I2C_DESIGNWARE_PCI) += i2c-designware-pci.o i2c-designware-pci-objs := i2c-designware-pcidrv.o obj-$(CONFIG_I2C_EG20T) += i2c-eg20t.o +obj-$(CONFIG_I2C_EXYNOS5) += i2c-exynos5.o obj-$(CONFIG_I2C_GPIO) += i2c-gpio.o obj-$(CONFIG_I2C_HIGHLANDER) += i2c-highlander.o obj-$(CONFIG_I2C_IBM_IIC) += i2c-ibm_iic.o @@ -68,6 +69,7 @@ obj-$(CONFIG_I2C_SH7760) += i2c-sh7760.o obj-$(CONFIG_I2C_SH_MOBILE) += i2c-sh_mobile.o obj-$(CONFIG_I2C_SIMTEC) += i2c-simtec.o obj-$(CONFIG_I2C_SIRF) += i2c-sirf.o +obj-$(CONFIG_I2C_ST) += i2c-st.o obj-$(CONFIG_I2C_STU300) += i2c-stu300.o obj-$(CONFIG_I2C_TEGRA) += i2c-tegra.o obj-$(CONFIG_I2C_VERSATILE) += i2c-versatile.o @@ -87,6 +89,7 @@ obj-$(CONFIG_I2C_VIPERBOARD) += i2c-viperboard.o # Other I2C/SMBus bus drivers obj-$(CONFIG_I2C_ACORN) += i2c-acorn.o +obj-$(CONFIG_I2C_BCM_KONA) += i2c-bcm-kona.o obj-$(CONFIG_I2C_ELEKTOR) += i2c-elektor.o obj-$(CONFIG_I2C_PCA_ISA) += i2c-pca-isa.o obj-$(CONFIG_I2C_SIBYTE) += i2c-sibyte.o diff --git a/drivers/i2c/busses/i2c-bcm-kona.c b/drivers/i2c/busses/i2c-bcm-kona.c new file mode 100644 index 00000000000..036cf03aeb6 --- /dev/null +++ b/drivers/i2c/busses/i2c-bcm-kona.c @@ -0,0 +1,909 @@ +/* + * Copyright (C) 2013 Broadcom Corporation + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation version 2. + * + * This program is distributed "as is" WITHOUT ANY WARRANTY of any + * kind, whether express or implied; without even the implied warranty + * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include <linux/device.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/sched.h> +#include <linux/i2c.h> +#include <linux/interrupt.h> +#include <linux/platform_device.h> +#include <linux/clk.h> +#include <linux/io.h> +#include <linux/clk.h> +#include <linux/slab.h> + +/* Hardware register offsets and field defintions */ +#define CS_OFFSET 0x00000020 +#define CS_ACK_SHIFT 3 +#define CS_ACK_MASK 0x00000008 +#define CS_ACK_CMD_GEN_START 0x00000000 +#define CS_ACK_CMD_GEN_RESTART 0x00000001 +#define CS_CMD_SHIFT 1 +#define CS_CMD_CMD_NO_ACTION 0x00000000 +#define CS_CMD_CMD_START_RESTART 0x00000001 +#define CS_CMD_CMD_STOP 0x00000002 +#define CS_EN_SHIFT 0 +#define CS_EN_CMD_ENABLE_BSC 0x00000001 + +#define TIM_OFFSET 0x00000024 +#define TIM_PRESCALE_SHIFT 6 +#define TIM_P_SHIFT 3 +#define TIM_NO_DIV_SHIFT 2 +#define TIM_DIV_SHIFT 0 + +#define DAT_OFFSET 0x00000028 + +#define TOUT_OFFSET 0x0000002c + +#define TXFCR_OFFSET 0x0000003c +#define TXFCR_FIFO_FLUSH_MASK 0x00000080 +#define TXFCR_FIFO_EN_MASK 0x00000040 + +#define IER_OFFSET 0x00000044 +#define IER_READ_COMPLETE_INT_MASK 0x00000010 +#define IER_I2C_INT_EN_MASK 0x00000008 +#define IER_FIFO_INT_EN_MASK 0x00000002 +#define IER_NOACK_EN_MASK 0x00000001 + +#define ISR_OFFSET 0x00000048 +#define ISR_RESERVED_MASK 0xffffff60 +#define ISR_CMDBUSY_MASK 0x00000080 +#define ISR_READ_COMPLETE_MASK 0x00000010 +#define ISR_SES_DONE_MASK 0x00000008 +#define ISR_ERR_MASK 0x00000004 +#define ISR_TXFIFOEMPTY_MASK 0x00000002 +#define ISR_NOACK_MASK 0x00000001 + +#define CLKEN_OFFSET 0x0000004C +#define CLKEN_AUTOSENSE_OFF_MASK 0x00000080 +#define CLKEN_M_SHIFT 4 +#define CLKEN_N_SHIFT 1 +#define CLKEN_CLKEN_MASK 0x00000001 + +#define FIFO_STATUS_OFFSET 0x00000054 +#define FIFO_STATUS_RXFIFO_EMPTY_MASK 0x00000004 +#define FIFO_STATUS_TXFIFO_EMPTY_MASK 0x00000010 + +#define HSTIM_OFFSET 0x00000058 +#define HSTIM_HS_MODE_MASK 0x00008000 +#define HSTIM_HS_HOLD_SHIFT 10 +#define HSTIM_HS_HIGH_PHASE_SHIFT 5 +#define HSTIM_HS_SETUP_SHIFT 0 + +#define PADCTL_OFFSET 0x0000005c +#define PADCTL_PAD_OUT_EN_MASK 0x00000004 + +#define RXFCR_OFFSET 0x00000068 +#define RXFCR_NACK_EN_SHIFT 7 +#define RXFCR_READ_COUNT_SHIFT 0 +#define RXFIFORDOUT_OFFSET 0x0000006c + +/* Locally used constants */ +#define MAX_RX_FIFO_SIZE 64U /* bytes */ +#define MAX_TX_FIFO_SIZE 64U /* bytes */ + +#define STD_EXT_CLK_FREQ 13000000UL +#define HS_EXT_CLK_FREQ 104000000UL + +#define MASTERCODE 0x08 /* Mastercodes are 0000_1xxxb */ + +#define I2C_TIMEOUT 100 /* msecs */ + +/* Operations that can be commanded to the controller */ +enum bcm_kona_cmd_t { + BCM_CMD_NOACTION = 0, + BCM_CMD_START, + BCM_CMD_RESTART, + BCM_CMD_STOP, +}; + +enum bus_speed_index { + BCM_SPD_100K = 0, + BCM_SPD_400K, + BCM_SPD_1MHZ, +}; + +enum hs_bus_speed_index { + BCM_SPD_3P4MHZ = 0, +}; + +/* Internal divider settings for standard mode, fast mode and fast mode plus */ +struct bus_speed_cfg { + uint8_t time_m; /* Number of cycles for setup time */ + uint8_t time_n; /* Number of cycles for hold time */ + uint8_t prescale; /* Prescale divider */ + uint8_t time_p; /* Timing coefficient */ + uint8_t no_div; /* Disable clock divider */ + uint8_t time_div; /* Post-prescale divider */ +}; + +/* Internal divider settings for high-speed mode */ +struct hs_bus_speed_cfg { + uint8_t hs_hold; /* Number of clock cycles SCL stays low until + the end of bit period */ + uint8_t hs_high_phase; /* Number of clock cycles SCL stays high + before it falls */ + uint8_t hs_setup; /* Number of clock cycles SCL stays low + before it rises */ + uint8_t prescale; /* Prescale divider */ + uint8_t time_p; /* Timing coefficient */ + uint8_t no_div; /* Disable clock divider */ + uint8_t time_div; /* Post-prescale divider */ +}; + +static const struct bus_speed_cfg std_cfg_table[] = { + [BCM_SPD_100K] = {0x01, 0x01, 0x03, 0x06, 0x00, 0x02}, + [BCM_SPD_400K] = {0x05, 0x01, 0x03, 0x05, 0x01, 0x02}, + [BCM_SPD_1MHZ] = {0x01, 0x01, 0x03, 0x01, 0x01, 0x03}, +}; + +static const struct hs_bus_speed_cfg hs_cfg_table[] = { + [BCM_SPD_3P4MHZ] = {0x01, 0x08, 0x14, 0x00, 0x06, 0x01, 0x00}, +}; + +struct bcm_kona_i2c_dev { + struct device *device; + + void __iomem *base; + int irq; + struct clk *external_clk; + + struct i2c_adapter adapter; + + struct completion done; + + const struct bus_speed_cfg *std_cfg; + const struct hs_bus_speed_cfg *hs_cfg; +}; + +static void bcm_kona_i2c_send_cmd_to_ctrl(struct bcm_kona_i2c_dev *dev, + enum bcm_kona_cmd_t cmd) +{ + dev_dbg(dev->device, "%s, %d\n", __func__, cmd); + + switch (cmd) { + case BCM_CMD_NOACTION: + writel((CS_CMD_CMD_NO_ACTION << CS_CMD_SHIFT) | + (CS_EN_CMD_ENABLE_BSC << CS_EN_SHIFT), + dev->base + CS_OFFSET); + break; + + case BCM_CMD_START: + writel((CS_ACK_CMD_GEN_START << CS_ACK_SHIFT) | + (CS_CMD_CMD_START_RESTART << CS_CMD_SHIFT) | + (CS_EN_CMD_ENABLE_BSC << CS_EN_SHIFT), + dev->base + CS_OFFSET); + break; + + case BCM_CMD_RESTART: + writel((CS_ACK_CMD_GEN_RESTART << CS_ACK_SHIFT) | + (CS_CMD_CMD_START_RESTART << CS_CMD_SHIFT) | + (CS_EN_CMD_ENABLE_BSC << CS_EN_SHIFT), + dev->base + CS_OFFSET); + break; + + case BCM_CMD_STOP: + writel((CS_CMD_CMD_STOP << CS_CMD_SHIFT) | + (CS_EN_CMD_ENABLE_BSC << CS_EN_SHIFT), + dev->base + CS_OFFSET); + break; + + default: + dev_err(dev->device, "Unknown command %d\n", cmd); + } +} + +static void bcm_kona_i2c_enable_clock(struct bcm_kona_i2c_dev *dev) +{ + writel(readl(dev->base + CLKEN_OFFSET) | CLKEN_CLKEN_MASK, + dev->base + CLKEN_OFFSET); +} + +static void bcm_kona_i2c_disable_clock(struct bcm_kona_i2c_dev *dev) +{ + writel(readl(dev->base + CLKEN_OFFSET) & ~CLKEN_CLKEN_MASK, + dev->base + CLKEN_OFFSET); +} + +static irqreturn_t bcm_kona_i2c_isr(int irq, void *devid) +{ + struct bcm_kona_i2c_dev *dev = devid; + uint32_t status = readl(dev->base + ISR_OFFSET); + + if ((status & ~ISR_RESERVED_MASK) == 0) + return IRQ_NONE; + + /* Must flush the TX FIFO when NAK detected */ + if (status & ISR_NOACK_MASK) + writel(TXFCR_FIFO_FLUSH_MASK | TXFCR_FIFO_EN_MASK, + dev->base + TXFCR_OFFSET); + + writel(status & ~ISR_RESERVED_MASK, dev->base + ISR_OFFSET); + complete_all(&dev->done); + + return IRQ_HANDLED; +} + +/* Wait for ISR_CMDBUSY_MASK to go low before writing to CS, DAT, or RCD */ +static int bcm_kona_i2c_wait_if_busy(struct bcm_kona_i2c_dev *dev) +{ + unsigned long timeout = jiffies + msecs_to_jiffies(I2C_TIMEOUT); + + while (readl(dev->base + ISR_OFFSET) & ISR_CMDBUSY_MASK) + if (time_after(jiffies, timeout)) { + dev_err(dev->device, "CMDBUSY timeout\n"); + return -ETIMEDOUT; + } + + return 0; +} + +/* Send command to I2C bus */ +static int bcm_kona_send_i2c_cmd(struct bcm_kona_i2c_dev *dev, + enum bcm_kona_cmd_t cmd) +{ + int rc; + unsigned long time_left = msecs_to_jiffies(I2C_TIMEOUT); + + /* Make sure the hardware is ready */ + rc = bcm_kona_i2c_wait_if_busy(dev); + if (rc < 0) + return rc; + + /* Unmask the session done interrupt */ + writel(IER_I2C_INT_EN_MASK, dev->base + IER_OFFSET); + + /* Mark as incomplete before sending the command */ + reinit_completion(&dev->done); + + /* Send the command */ + bcm_kona_i2c_send_cmd_to_ctrl(dev, cmd); + + /* Wait for transaction to finish or timeout */ + time_left = wait_for_completion_timeout(&dev->done, time_left); + + /* Mask all interrupts */ + writel(0, dev->base + IER_OFFSET); + + if (!time_left) { + dev_err(dev->device, "controller timed out\n"); + rc = -ETIMEDOUT; + } + + /* Clear command */ + bcm_kona_i2c_send_cmd_to_ctrl(dev, BCM_CMD_NOACTION); + + return rc; +} + +/* Read a single RX FIFO worth of data from the i2c bus */ +static int bcm_kona_i2c_read_fifo_single(struct bcm_kona_i2c_dev *dev, + uint8_t *buf, unsigned int len, + unsigned int last_byte_nak) +{ + unsigned long time_left = msecs_to_jiffies(I2C_TIMEOUT); + + /* Mark as incomplete before starting the RX FIFO */ + reinit_completion(&dev->done); + + /* Unmask the read complete interrupt */ + writel(IER_READ_COMPLETE_INT_MASK, dev->base + IER_OFFSET); + + /* Start the RX FIFO */ + writel((last_byte_nak << RXFCR_NACK_EN_SHIFT) | + (len << RXFCR_READ_COUNT_SHIFT), + dev->base + RXFCR_OFFSET); + + /* Wait for FIFO read to complete */ + time_left = wait_for_completion_timeout(&dev->done, time_left); + + /* Mask all interrupts */ + writel(0, dev->base + IER_OFFSET); + + if (!time_left) { + dev_err(dev->device, "RX FIFO time out\n"); + return -EREMOTEIO; + } + + /* Read data from FIFO */ + for (; len > 0; len--, buf++) + *buf = readl(dev->base + RXFIFORDOUT_OFFSET); + + return 0; +} + +/* Read any amount of data using the RX FIFO from the i2c bus */ +static int bcm_kona_i2c_read_fifo(struct bcm_kona_i2c_dev *dev, + struct i2c_msg *msg) +{ + unsigned int bytes_to_read = MAX_RX_FIFO_SIZE; + unsigned int last_byte_nak = 0; + unsigned int bytes_read = 0; + int rc; + + uint8_t *tmp_buf = msg->buf; + + while (bytes_read < msg->len) { + if (msg->len - bytes_read <= MAX_RX_FIFO_SIZE) { + last_byte_nak = 1; /* NAK last byte of transfer */ + bytes_to_read = msg->len - bytes_read; + } + + rc = bcm_kona_i2c_read_fifo_single(dev, tmp_buf, bytes_to_read, + last_byte_nak); + if (rc < 0) + return -EREMOTEIO; + + bytes_read += bytes_to_read; + tmp_buf += bytes_to_read; + } + + return 0; +} + +/* Write a single byte of data to the i2c bus */ +static int bcm_kona_i2c_write_byte(struct bcm_kona_i2c_dev *dev, uint8_t data, + unsigned int nak_expected) +{ + int rc; + unsigned long time_left = msecs_to_jiffies(I2C_TIMEOUT); + unsigned int nak_received; + + /* Make sure the hardware is ready */ + rc = bcm_kona_i2c_wait_if_busy(dev); + if (rc < 0) + return rc; + + /* Clear pending session done interrupt */ + writel(ISR_SES_DONE_MASK, dev->base + ISR_OFFSET); + + /* Unmask the session done interrupt */ + writel(IER_I2C_INT_EN_MASK, dev->base + IER_OFFSET); + + /* Mark as incomplete before sending the data */ + reinit_completion(&dev->done); + + /* Send one byte of data */ + writel(data, dev->base + DAT_OFFSET); + + /* Wait for byte to be written */ + time_left = wait_for_completion_timeout(&dev->done, time_left); + + /* Mask all interrupts */ + writel(0, dev->base + IER_OFFSET); + + if (!time_left) { + dev_dbg(dev->device, "controller timed out\n"); + return -ETIMEDOUT; + } + + nak_received = readl(dev->base + CS_OFFSET) & CS_ACK_MASK ? 1 : 0; + + if (nak_received ^ nak_expected) { + dev_dbg(dev->device, "unexpected NAK/ACK\n"); + return -EREMOTEIO; + } + + return 0; +} + +/* Write a single TX FIFO worth of data to the i2c bus */ +static int bcm_kona_i2c_write_fifo_single(struct bcm_kona_i2c_dev *dev, + uint8_t *buf, unsigned int len) +{ + int k; + unsigned long time_left = msecs_to_jiffies(I2C_TIMEOUT); + unsigned int fifo_status; + + /* Mark as incomplete before sending data to the TX FIFO */ + reinit_completion(&dev->done); + + /* Unmask the fifo empty and nak interrupt */ + writel(IER_FIFO_INT_EN_MASK | IER_NOACK_EN_MASK, + dev->base + IER_OFFSET); + + /* Disable IRQ to load a FIFO worth of data without interruption */ + disable_irq(dev->irq); + + /* Write data into FIFO */ + for (k = 0; k < len; k++) + writel(buf[k], (dev->base + DAT_OFFSET)); + + /* Enable IRQ now that data has been loaded */ + enable_irq(dev->irq); + + /* Wait for FIFO to empty */ + do { + time_left = wait_for_completion_timeout(&dev->done, time_left); + fifo_status = readl(dev->base + FIFO_STATUS_OFFSET); + } while (time_left && !(fifo_status & FIFO_STATUS_TXFIFO_EMPTY_MASK)); + + /* Mask all interrupts */ + writel(0, dev->base + IER_OFFSET); + + /* Check if there was a NAK */ + if (readl(dev->base + CS_OFFSET) & CS_ACK_MASK) { + dev_err(dev->device, "unexpected NAK\n"); + return -EREMOTEIO; + } + + /* Check if a timeout occured */ + if (!time_left) { + dev_err(dev->device, "completion timed out\n"); + return -EREMOTEIO; + } + + return 0; +} + + +/* Write any amount of data using TX FIFO to the i2c bus */ +static int bcm_kona_i2c_write_fifo(struct bcm_kona_i2c_dev *dev, + struct i2c_msg *msg) +{ + unsigned int bytes_to_write = MAX_TX_FIFO_SIZE; + unsigned int bytes_written = 0; + int rc; + + uint8_t *tmp_buf = msg->buf; + + while (bytes_written < msg->len) { + if (msg->len - bytes_written <= MAX_TX_FIFO_SIZE) + bytes_to_write = msg->len - bytes_written; + + rc = bcm_kona_i2c_write_fifo_single(dev, tmp_buf, + bytes_to_write); + if (rc < 0) + return -EREMOTEIO; + + bytes_written += bytes_to_write; + tmp_buf += bytes_to_write; + } + + return 0; +} + +/* Send i2c address */ +static int bcm_kona_i2c_do_addr(struct bcm_kona_i2c_dev *dev, + struct i2c_msg *msg) +{ + unsigned char addr; + + if (msg->flags & I2C_M_TEN) { + /* First byte is 11110XX0 where XX is upper 2 bits */ + addr = 0xF0 | ((msg->addr & 0x300) >> 7); + if (bcm_kona_i2c_write_byte(dev, addr, 0) < 0) + return -EREMOTEIO; + + /* Second byte is the remaining 8 bits */ + addr = msg->addr & 0xFF; + if (bcm_kona_i2c_write_byte(dev, addr, 0) < 0) + return -EREMOTEIO; + + if (msg->flags & I2C_M_RD) { + /* For read, send restart command */ + if (bcm_kona_send_i2c_cmd(dev, BCM_CMD_RESTART) < 0) + return -EREMOTEIO; + + /* Then re-send the first byte with the read bit set */ + addr = 0xF0 | ((msg->addr & 0x300) >> 7) | 0x01; + if (bcm_kona_i2c_write_byte(dev, addr, 0) < 0) + return -EREMOTEIO; + } + } else { + addr = msg->addr << 1; + + if (msg->flags & I2C_M_RD) + addr |= 1; + + if (bcm_kona_i2c_write_byte(dev, addr, 0) < 0) + return -EREMOTEIO; + } + + return 0; +} + +static void bcm_kona_i2c_enable_autosense(struct bcm_kona_i2c_dev *dev) +{ + writel(readl(dev->base + CLKEN_OFFSET) & ~CLKEN_AUTOSENSE_OFF_MASK, + dev->base + CLKEN_OFFSET); +} + +static void bcm_kona_i2c_config_timing(struct bcm_kona_i2c_dev *dev) +{ + writel(readl(dev->base + HSTIM_OFFSET) & ~HSTIM_HS_MODE_MASK, + dev->base + HSTIM_OFFSET); + + writel((dev->std_cfg->prescale << TIM_PRESCALE_SHIFT) | + (dev->std_cfg->time_p << TIM_P_SHIFT) | + (dev->std_cfg->no_div << TIM_NO_DIV_SHIFT) | + (dev->std_cfg->time_div << TIM_DIV_SHIFT), + dev->base + TIM_OFFSET); + + writel((dev->std_cfg->time_m << CLKEN_M_SHIFT) | + (dev->std_cfg->time_n << CLKEN_N_SHIFT) | + CLKEN_CLKEN_MASK, + dev->base + CLKEN_OFFSET); +} + +static void bcm_kona_i2c_config_timing_hs(struct bcm_kona_i2c_dev *dev) +{ + writel((dev->hs_cfg->prescale << TIM_PRESCALE_SHIFT) | + (dev->hs_cfg->time_p << TIM_P_SHIFT) | + (dev->hs_cfg->no_div << TIM_NO_DIV_SHIFT) | + (dev->hs_cfg->time_div << TIM_DIV_SHIFT), + dev->base + TIM_OFFSET); + + writel((dev->hs_cfg->hs_hold << HSTIM_HS_HOLD_SHIFT) | + (dev->hs_cfg->hs_high_phase << HSTIM_HS_HIGH_PHASE_SHIFT) | + (dev->hs_cfg->hs_setup << HSTIM_HS_SETUP_SHIFT), + dev->base + HSTIM_OFFSET); + + writel(readl(dev->base + HSTIM_OFFSET) | HSTIM_HS_MODE_MASK, + dev->base + HSTIM_OFFSET); +} + +static int bcm_kona_i2c_switch_to_hs(struct bcm_kona_i2c_dev *dev) +{ + int rc; + + /* Send mastercode at standard speed */ + rc = bcm_kona_i2c_write_byte(dev, MASTERCODE, 1); + if (rc < 0) { + pr_err("High speed handshake failed\n"); + return rc; + } + + /* Configure external clock to higher frequency */ + rc = clk_set_rate(dev->external_clk, HS_EXT_CLK_FREQ); + if (rc) { + dev_err(dev->device, "%s: clk_set_rate returned %d\n", + __func__, rc); + return rc; + } + + /* Reconfigure internal dividers */ + bcm_kona_i2c_config_timing_hs(dev); + + /* Send a restart command */ + rc = bcm_kona_send_i2c_cmd(dev, BCM_CMD_RESTART); + if (rc < 0) + dev_err(dev->device, "High speed restart command failed\n"); + + return rc; +} + +static int bcm_kona_i2c_switch_to_std(struct bcm_kona_i2c_dev *dev) +{ + int rc; + + /* Reconfigure internal dividers */ + bcm_kona_i2c_config_timing(dev); + + /* Configure external clock to lower frequency */ + rc = clk_set_rate(dev->external_clk, STD_EXT_CLK_FREQ); + if (rc) { + dev_err(dev->device, "%s: clk_set_rate returned %d\n", + __func__, rc); + } + + return rc; +} + +/* Master transfer function */ +static int bcm_kona_i2c_xfer(struct i2c_adapter *adapter, + struct i2c_msg msgs[], int num) +{ + struct bcm_kona_i2c_dev *dev = i2c_get_adapdata(adapter); + struct i2c_msg *pmsg; + int rc = 0; + int i; + + rc = clk_prepare_enable(dev->external_clk); + if (rc) { + dev_err(dev->device, "%s: peri clock enable failed. err %d\n", + __func__, rc); + return rc; + } + + /* Enable pad output */ + writel(0, dev->base + PADCTL_OFFSET); + + /* Enable internal clocks */ + bcm_kona_i2c_enable_clock(dev); + + /* Send start command */ + rc = bcm_kona_send_i2c_cmd(dev, BCM_CMD_START); + if (rc < 0) { + dev_err(dev->device, "Start command failed rc = %d\n", rc); + goto xfer_disable_pad; + } + + /* Switch to high speed if applicable */ + if (dev->hs_cfg) { + rc = bcm_kona_i2c_switch_to_hs(dev); + if (rc < 0) + goto xfer_send_stop; + } + + /* Loop through all messages */ + for (i = 0; i < num; i++) { + pmsg = &msgs[i]; + + /* Send restart for subsequent messages */ + if ((i != 0) && ((pmsg->flags & I2C_M_NOSTART) == 0)) { + rc = bcm_kona_send_i2c_cmd(dev, BCM_CMD_RESTART); + if (rc < 0) { + dev_err(dev->device, + "restart cmd failed rc = %d\n", rc); + goto xfer_send_stop; + } + } + + /* Send slave address */ + if (!(pmsg->flags & I2C_M_NOSTART)) { + rc = bcm_kona_i2c_do_addr(dev, pmsg); + if (rc < 0) { + dev_err(dev->device, + "NAK from addr %2.2x msg#%d rc = %d\n", + pmsg->addr, i, rc); + goto xfer_send_stop; + } + } + + /* Perform data transfer */ + if (pmsg->flags & I2C_M_RD) { + rc = bcm_kona_i2c_read_fifo(dev, pmsg); + if (rc < 0) { + dev_err(dev->device, "read failure\n"); + goto xfer_send_stop; + } + } else { + rc = bcm_kona_i2c_write_fifo(dev, pmsg); + if (rc < 0) { + dev_err(dev->device, "write failure"); + goto xfer_send_stop; + } + } + } + + rc = num; + +xfer_send_stop: + /* Send a STOP command */ + bcm_kona_send_i2c_cmd(dev, BCM_CMD_STOP); + + /* Return from high speed if applicable */ + if (dev->hs_cfg) { + int hs_rc = bcm_kona_i2c_switch_to_std(dev); + + if (hs_rc) + rc = hs_rc; + } + +xfer_disable_pad: + /* Disable pad output */ + writel(PADCTL_PAD_OUT_EN_MASK, dev->base + PADCTL_OFFSET); + + /* Stop internal clock */ + bcm_kona_i2c_disable_clock(dev); + + clk_disable_unprepare(dev->external_clk); + + return rc; +} + +static uint32_t bcm_kona_i2c_functionality(struct i2c_adapter *adap) +{ + return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL | I2C_FUNC_10BIT_ADDR | + I2C_FUNC_NOSTART; +} + +static const struct i2c_algorithm bcm_algo = { + .master_xfer = bcm_kona_i2c_xfer, + .functionality = bcm_kona_i2c_functionality, +}; + +static int bcm_kona_i2c_assign_bus_speed(struct bcm_kona_i2c_dev *dev) +{ + unsigned int bus_speed; + int ret = of_property_read_u32(dev->device->of_node, "clock-frequency", + &bus_speed); + if (ret < 0) { + dev_err(dev->device, "missing clock-frequency property\n"); + return -ENODEV; + } + + switch (bus_speed) { + case 100000: + dev->std_cfg = &std_cfg_table[BCM_SPD_100K]; + break; + case 400000: + dev->std_cfg = &std_cfg_table[BCM_SPD_400K]; + break; + case 1000000: + dev->std_cfg = &std_cfg_table[BCM_SPD_1MHZ]; + break; + case 3400000: + /* Send mastercode at 100k */ + dev->std_cfg = &std_cfg_table[BCM_SPD_100K]; + dev->hs_cfg = &hs_cfg_table[BCM_SPD_3P4MHZ]; + break; + default: + pr_err("%d hz bus speed not supported\n", bus_speed); + pr_err("Valid speeds are 100khz, 400khz, 1mhz, and 3.4mhz\n"); + return -EINVAL; + } + + return 0; +} + +static int bcm_kona_i2c_probe(struct platform_device *pdev) +{ + int rc = 0; + struct bcm_kona_i2c_dev *dev; + struct i2c_adapter *adap; + struct resource *iomem; + + /* Allocate memory for private data structure */ + dev = devm_kzalloc(&pdev->dev, sizeof(*dev), GFP_KERNEL); + if (!dev) + return -ENOMEM; + + platform_set_drvdata(pdev, dev); + dev->device = &pdev->dev; + init_completion(&dev->done); + + /* Map hardware registers */ + iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0); + dev->base = devm_ioremap_resource(dev->device, iomem); + if (IS_ERR(dev->base)) + return -ENOMEM; + + /* Get and enable external clock */ + dev->external_clk = devm_clk_get(dev->device, NULL); + if (IS_ERR(dev->external_clk)) { + dev_err(dev->device, "couldn't get clock\n"); + return -ENODEV; + } + + rc = clk_set_rate(dev->external_clk, STD_EXT_CLK_FREQ); + if (rc) { + dev_err(dev->device, "%s: clk_set_rate returned %d\n", + __func__, rc); + return rc; + } + + rc = clk_prepare_enable(dev->external_clk); + if (rc) { + dev_err(dev->device, "couldn't enable clock\n"); + return rc; + } + + /* Parse bus speed */ + rc = bcm_kona_i2c_assign_bus_speed(dev); + if (rc) + goto probe_disable_clk; + + /* Enable internal clocks */ + bcm_kona_i2c_enable_clock(dev); + + /* Configure internal dividers */ + bcm_kona_i2c_config_timing(dev); + + /* Disable timeout */ + writel(0, dev->base + TOUT_OFFSET); + + /* Enable autosense */ + bcm_kona_i2c_enable_autosense(dev); + + /* Enable TX FIFO */ + writel(TXFCR_FIFO_FLUSH_MASK | TXFCR_FIFO_EN_MASK, + dev->base + TXFCR_OFFSET); + + /* Mask all interrupts */ + writel(0, dev->base + IER_OFFSET); + + /* Clear all pending interrupts */ + writel(ISR_CMDBUSY_MASK | + ISR_READ_COMPLETE_MASK | + ISR_SES_DONE_MASK | + ISR_ERR_MASK | + ISR_TXFIFOEMPTY_MASK | + ISR_NOACK_MASK, + dev->base + ISR_OFFSET); + + /* Get the interrupt number */ + dev->irq = platform_get_irq(pdev, 0); + if (dev->irq < 0) { + dev_err(dev->device, "no irq resource\n"); + rc = -ENODEV; + goto probe_disable_clk; + } + + /* register the ISR handler */ + rc = devm_request_irq(&pdev->dev, dev->irq, bcm_kona_i2c_isr, + IRQF_SHARED, pdev->name, dev); + if (rc) { + dev_err(dev->device, "failed to request irq %i\n", dev->irq); + goto probe_disable_clk; + } + + /* Enable the controller but leave it idle */ + bcm_kona_i2c_send_cmd_to_ctrl(dev, BCM_CMD_NOACTION); + + /* Disable pad output */ + writel(PADCTL_PAD_OUT_EN_MASK, dev->base + PADCTL_OFFSET); + + /* Disable internal clock */ + bcm_kona_i2c_disable_clock(dev); + + /* Disable external clock */ + clk_disable_unprepare(dev->external_clk); + + /* Add the i2c adapter */ + adap = &dev->adapter; + i2c_set_adapdata(adap, dev); + adap->owner = THIS_MODULE; + strlcpy(adap->name, "Broadcom I2C adapter", sizeof(adap->name)); + adap->algo = &bcm_algo; + adap->dev.parent = &pdev->dev; + adap->dev.of_node = pdev->dev.of_node; + + rc = i2c_add_adapter(adap); + if (rc) { + dev_err(dev->device, "failed to add adapter\n"); + return rc; + } + + dev_info(dev->device, "device registered successfully\n"); + + return 0; + +probe_disable_clk: + bcm_kona_i2c_disable_clock(dev); + clk_disable_unprepare(dev->external_clk); + + return rc; +} + +static int bcm_kona_i2c_remove(struct platform_device *pdev) +{ + struct bcm_kona_i2c_dev *dev = platform_get_drvdata(pdev); + + i2c_del_adapter(&dev->adapter); + + return 0; +} + +static const struct of_device_id bcm_kona_i2c_of_match[] = { + {.compatible = "brcm,kona-i2c",}, + {}, +}; +MODULE_DEVICE_TABLE(of, kona_i2c_of_match); + +static struct platform_driver bcm_kona_i2c_driver = { + .driver = { + .name = "bcm-kona-i2c", + .owner = THIS_MODULE, + .of_match_table = bcm_kona_i2c_of_match, + }, + .probe = bcm_kona_i2c_probe, + .remove = bcm_kona_i2c_remove, +}; +module_platform_driver(bcm_kona_i2c_driver); + +MODULE_AUTHOR("Tim Kryger <tkryger@broadcom.com>"); +MODULE_DESCRIPTION("Broadcom Kona I2C Driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/i2c/busses/i2c-bfin-twi.c b/drivers/i2c/busses/i2c-bfin-twi.c index 35a473ba3d8..3b9bd9a3f2b 100644 --- a/drivers/i2c/busses/i2c-bfin-twi.c +++ b/drivers/i2c/busses/i2c-bfin-twi.c @@ -675,7 +675,7 @@ static int i2c_bfin_twi_probe(struct platform_device *pdev) p_adap->retries = 3; rc = peripheral_request_list( - (unsigned short *)dev_get_platdata(&pdev->dev), + dev_get_platdata(&pdev->dev), "i2c-bfin-twi"); if (rc) { dev_err(&pdev->dev, "Can't setup pin mux!\n"); @@ -723,7 +723,7 @@ out_error_add_adapter: free_irq(iface->irq, iface); out_error_req_irq: out_error_no_irq: - peripheral_free_list((unsigned short *)dev_get_platdata(&pdev->dev)); + peripheral_free_list(dev_get_platdata(&pdev->dev)); out_error_pin_mux: iounmap(iface->regs_base); out_error_ioremap: @@ -739,7 +739,7 @@ static int i2c_bfin_twi_remove(struct platform_device *pdev) i2c_del_adapter(&(iface->adap)); free_irq(iface->irq, iface); - peripheral_free_list((unsigned short *)dev_get_platdata(&pdev->dev)); + peripheral_free_list(dev_get_platdata(&pdev->dev)); iounmap(iface->regs_base); kfree(iface); diff --git a/drivers/i2c/busses/i2c-cbus-gpio.c b/drivers/i2c/busses/i2c-cbus-gpio.c index 2d46f13adfd..ce7ffba2b02 100644 --- a/drivers/i2c/busses/i2c-cbus-gpio.c +++ b/drivers/i2c/busses/i2c-cbus-gpio.c @@ -246,6 +246,7 @@ static int cbus_i2c_probe(struct platform_device *pdev) adapter->owner = THIS_MODULE; adapter->class = I2C_CLASS_HWMON; adapter->dev.parent = &pdev->dev; + adapter->dev.of_node = pdev->dev.of_node; adapter->nr = pdev->id; adapter->timeout = HZ; adapter->algo = &cbus_i2c_algo; @@ -289,6 +290,7 @@ static struct platform_driver cbus_i2c_driver = { .driver = { .owner = THIS_MODULE, .name = "i2c-cbus-gpio", + .of_match_table = of_match_ptr(i2c_cbus_dt_ids), }, }; module_platform_driver(cbus_i2c_driver); diff --git a/drivers/i2c/busses/i2c-davinci.c b/drivers/i2c/busses/i2c-davinci.c index 960dec61c64..ff05d9fef4a 100644 --- a/drivers/i2c/busses/i2c-davinci.c +++ b/drivers/i2c/busses/i2c-davinci.c @@ -795,7 +795,7 @@ static struct platform_driver davinci_i2c_driver = { .name = "i2c_davinci", .owner = THIS_MODULE, .pm = davinci_i2c_pm_ops, - .of_match_table = of_match_ptr(davinci_i2c_of_match), + .of_match_table = davinci_i2c_of_match, }, }; diff --git a/drivers/i2c/busses/i2c-designware-platdrv.c b/drivers/i2c/busses/i2c-designware-platdrv.c index 0aa01136f8d..d0bdac0498c 100644 --- a/drivers/i2c/busses/i2c-designware-platdrv.c +++ b/drivers/i2c/busses/i2c-designware-platdrv.c @@ -103,6 +103,8 @@ static int dw_i2c_acpi_configure(struct platform_device *pdev) static const struct acpi_device_id dw_i2c_acpi_match[] = { { "INT33C2", 0 }, { "INT33C3", 0 }, + { "INT3432", 0 }, + { "INT3433", 0 }, { "80860F41", 0 }, { } }; diff --git a/drivers/i2c/busses/i2c-eg20t.c b/drivers/i2c/busses/i2c-eg20t.c index 0f3752967c4..ff15ae90aaf 100644 --- a/drivers/i2c/busses/i2c-eg20t.c +++ b/drivers/i2c/busses/i2c-eg20t.c @@ -312,24 +312,6 @@ static void pch_i2c_start(struct i2c_algo_pch_data *adap) } /** - * pch_i2c_getack() - to confirm ACK/NACK - * @adap: Pointer to struct i2c_algo_pch_data. - */ -static s32 pch_i2c_getack(struct i2c_algo_pch_data *adap) -{ - u32 reg_val; - void __iomem *p = adap->pch_base_address; - reg_val = ioread32(p + PCH_I2CSR) & PCH_GETACK; - - if (reg_val != 0) { - pch_err(adap, "return%d\n", -EPROTO); - return -EPROTO; - } - - return 0; -} - -/** * pch_i2c_stop() - generate stop condition in normal mode. * @adap: Pointer to struct i2c_algo_pch_data. */ @@ -344,6 +326,7 @@ static void pch_i2c_stop(struct i2c_algo_pch_data *adap) static int pch_i2c_wait_for_check_xfer(struct i2c_algo_pch_data *adap) { long ret; + void __iomem *p = adap->pch_base_address; ret = wait_event_timeout(pch_event, (adap->pch_event_flag != 0), msecs_to_jiffies(1000)); @@ -366,10 +349,9 @@ static int pch_i2c_wait_for_check_xfer(struct i2c_algo_pch_data *adap) adap->pch_event_flag = 0; - if (pch_i2c_getack(adap)) { - pch_dbg(adap, "Receive NACK for slave address" - "setting\n"); - return -EIO; + if (ioread32(p + PCH_I2CSR) & PCH_GETACK) { + pch_dbg(adap, "Receive NACK for slave address setting\n"); + return -ENXIO; } return 0; diff --git a/drivers/i2c/busses/i2c-exynos5.c b/drivers/i2c/busses/i2c-exynos5.c new file mode 100644 index 00000000000..c1ef228095b --- /dev/null +++ b/drivers/i2c/busses/i2c-exynos5.c @@ -0,0 +1,769 @@ +/** + * i2c-exynos5.c - Samsung Exynos5 I2C Controller Driver + * + * Copyright (C) 2013 Samsung Electronics Co., Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. +*/ + +#include <linux/kernel.h> +#include <linux/module.h> + +#include <linux/i2c.h> +#include <linux/init.h> +#include <linux/time.h> +#include <linux/interrupt.h> +#include <linux/delay.h> +#include <linux/errno.h> +#include <linux/err.h> +#include <linux/platform_device.h> +#include <linux/clk.h> +#include <linux/slab.h> +#include <linux/io.h> +#include <linux/of_address.h> +#include <linux/of_irq.h> +#include <linux/spinlock.h> + +/* + * HSI2C controller from Samsung supports 2 modes of operation + * 1. Auto mode: Where in master automatically controls the whole transaction + * 2. Manual mode: Software controls the transaction by issuing commands + * START, READ, WRITE, STOP, RESTART in I2C_MANUAL_CMD register. + * + * Operation mode can be selected by setting AUTO_MODE bit in I2C_CONF register + * + * Special bits are available for both modes of operation to set commands + * and for checking transfer status + */ + +/* Register Map */ +#define HSI2C_CTL 0x00 +#define HSI2C_FIFO_CTL 0x04 +#define HSI2C_TRAILIG_CTL 0x08 +#define HSI2C_CLK_CTL 0x0C +#define HSI2C_CLK_SLOT 0x10 +#define HSI2C_INT_ENABLE 0x20 +#define HSI2C_INT_STATUS 0x24 +#define HSI2C_ERR_STATUS 0x2C +#define HSI2C_FIFO_STATUS 0x30 +#define HSI2C_TX_DATA 0x34 +#define HSI2C_RX_DATA 0x38 +#define HSI2C_CONF 0x40 +#define HSI2C_AUTO_CONF 0x44 +#define HSI2C_TIMEOUT 0x48 +#define HSI2C_MANUAL_CMD 0x4C +#define HSI2C_TRANS_STATUS 0x50 +#define HSI2C_TIMING_HS1 0x54 +#define HSI2C_TIMING_HS2 0x58 +#define HSI2C_TIMING_HS3 0x5C +#define HSI2C_TIMING_FS1 0x60 +#define HSI2C_TIMING_FS2 0x64 +#define HSI2C_TIMING_FS3 0x68 +#define HSI2C_TIMING_SLA 0x6C +#define HSI2C_ADDR 0x70 + +/* I2C_CTL Register bits */ +#define HSI2C_FUNC_MODE_I2C (1u << 0) +#define HSI2C_MASTER (1u << 3) +#define HSI2C_RXCHON (1u << 6) +#define HSI2C_TXCHON (1u << 7) +#define HSI2C_SW_RST (1u << 31) + +/* I2C_FIFO_CTL Register bits */ +#define HSI2C_RXFIFO_EN (1u << 0) +#define HSI2C_TXFIFO_EN (1u << 1) +#define HSI2C_RXFIFO_TRIGGER_LEVEL(x) ((x) << 4) +#define HSI2C_TXFIFO_TRIGGER_LEVEL(x) ((x) << 16) + +/* As per user manual FIFO max depth is 64bytes */ +#define HSI2C_FIFO_MAX 0x40 +/* default trigger levels for Tx and Rx FIFOs */ +#define HSI2C_DEF_TXFIFO_LVL (HSI2C_FIFO_MAX - 0x30) +#define HSI2C_DEF_RXFIFO_LVL (HSI2C_FIFO_MAX - 0x10) + +/* I2C_TRAILING_CTL Register bits */ +#define HSI2C_TRAILING_COUNT (0xf) + +/* I2C_INT_EN Register bits */ +#define HSI2C_INT_TX_ALMOSTEMPTY_EN (1u << 0) +#define HSI2C_INT_RX_ALMOSTFULL_EN (1u << 1) +#define HSI2C_INT_TRAILING_EN (1u << 6) +#define HSI2C_INT_I2C_EN (1u << 9) + +/* I2C_INT_STAT Register bits */ +#define HSI2C_INT_TX_ALMOSTEMPTY (1u << 0) +#define HSI2C_INT_RX_ALMOSTFULL (1u << 1) +#define HSI2C_INT_TX_UNDERRUN (1u << 2) +#define HSI2C_INT_TX_OVERRUN (1u << 3) +#define HSI2C_INT_RX_UNDERRUN (1u << 4) +#define HSI2C_INT_RX_OVERRUN (1u << 5) +#define HSI2C_INT_TRAILING (1u << 6) +#define HSI2C_INT_I2C (1u << 9) + +/* I2C_FIFO_STAT Register bits */ +#define HSI2C_RX_FIFO_EMPTY (1u << 24) +#define HSI2C_RX_FIFO_FULL (1u << 23) +#define HSI2C_RX_FIFO_LVL(x) ((x >> 16) & 0x7f) +#define HSI2C_TX_FIFO_EMPTY (1u << 8) +#define HSI2C_TX_FIFO_FULL (1u << 7) +#define HSI2C_TX_FIFO_LVL(x) ((x >> 0) & 0x7f) + +/* I2C_CONF Register bits */ +#define HSI2C_AUTO_MODE (1u << 31) +#define HSI2C_10BIT_ADDR_MODE (1u << 30) +#define HSI2C_HS_MODE (1u << 29) + +/* I2C_AUTO_CONF Register bits */ +#define HSI2C_READ_WRITE (1u << 16) +#define HSI2C_STOP_AFTER_TRANS (1u << 17) +#define HSI2C_MASTER_RUN (1u << 31) + +/* I2C_TIMEOUT Register bits */ +#define HSI2C_TIMEOUT_EN (1u << 31) +#define HSI2C_TIMEOUT_MASK 0xff + +/* I2C_TRANS_STATUS register bits */ +#define HSI2C_MASTER_BUSY (1u << 17) +#define HSI2C_SLAVE_BUSY (1u << 16) +#define HSI2C_TIMEOUT_AUTO (1u << 4) +#define HSI2C_NO_DEV (1u << 3) +#define HSI2C_NO_DEV_ACK (1u << 2) +#define HSI2C_TRANS_ABORT (1u << 1) +#define HSI2C_TRANS_DONE (1u << 0) + +/* I2C_ADDR register bits */ +#define HSI2C_SLV_ADDR_SLV(x) ((x & 0x3ff) << 0) +#define HSI2C_SLV_ADDR_MAS(x) ((x & 0x3ff) << 10) +#define HSI2C_MASTER_ID(x) ((x & 0xff) << 24) +#define MASTER_ID(x) ((x & 0x7) + 0x08) + +/* + * Controller operating frequency, timing values for operation + * are calculated against this frequency + */ +#define HSI2C_HS_TX_CLOCK 1000000 +#define HSI2C_FS_TX_CLOCK 100000 +#define HSI2C_HIGH_SPD 1 +#define HSI2C_FAST_SPD 0 + +#define EXYNOS5_I2C_TIMEOUT (msecs_to_jiffies(1000)) + +struct exynos5_i2c { + struct i2c_adapter adap; + unsigned int suspended:1; + + struct i2c_msg *msg; + struct completion msg_complete; + unsigned int msg_ptr; + + unsigned int irq; + + void __iomem *regs; + struct clk *clk; + struct device *dev; + int state; + + spinlock_t lock; /* IRQ synchronization */ + + /* + * Since the TRANS_DONE bit is cleared on read, and we may read it + * either during an IRQ or after a transaction, keep track of its + * state here. + */ + int trans_done; + + /* Controller operating frequency */ + unsigned int fs_clock; + unsigned int hs_clock; + + /* + * HSI2C Controller can operate in + * 1. High speed upto 3.4Mbps + * 2. Fast speed upto 1Mbps + */ + int speed_mode; +}; + +static const struct of_device_id exynos5_i2c_match[] = { + { .compatible = "samsung,exynos5-hsi2c" }, + {}, +}; +MODULE_DEVICE_TABLE(of, exynos5_i2c_match); + +static void exynos5_i2c_clr_pend_irq(struct exynos5_i2c *i2c) +{ + writel(readl(i2c->regs + HSI2C_INT_STATUS), + i2c->regs + HSI2C_INT_STATUS); +} + +/* + * exynos5_i2c_set_timing: updates the registers with appropriate + * timing values calculated + * + * Returns 0 on success, -EINVAL if the cycle length cannot + * be calculated. + */ +static int exynos5_i2c_set_timing(struct exynos5_i2c *i2c, int mode) +{ + u32 i2c_timing_s1; + u32 i2c_timing_s2; + u32 i2c_timing_s3; + u32 i2c_timing_sla; + unsigned int t_start_su, t_start_hd; + unsigned int t_stop_su; + unsigned int t_data_su, t_data_hd; + unsigned int t_scl_l, t_scl_h; + unsigned int t_sr_release; + unsigned int t_ftl_cycle; + unsigned int clkin = clk_get_rate(i2c->clk); + unsigned int div, utemp0 = 0, utemp1 = 0, clk_cycle; + unsigned int op_clk = (mode == HSI2C_HIGH_SPD) ? + i2c->hs_clock : i2c->fs_clock; + + /* + * FPCLK / FI2C = + * (CLK_DIV + 1) * (TSCLK_L + TSCLK_H + 2) + 8 + 2 * FLT_CYCLE + * utemp0 = (CLK_DIV + 1) * (TSCLK_L + TSCLK_H + 2) + * utemp1 = (TSCLK_L + TSCLK_H + 2) + */ + t_ftl_cycle = (readl(i2c->regs + HSI2C_CONF) >> 16) & 0x7; + utemp0 = (clkin / op_clk) - 8 - 2 * t_ftl_cycle; + + /* CLK_DIV max is 256 */ + for (div = 0; div < 256; div++) { + utemp1 = utemp0 / (div + 1); + + /* + * SCL_L and SCL_H each has max value of 255 + * Hence, For the clk_cycle to the have right value + * utemp1 has to be less then 512 and more than 4. + */ + if ((utemp1 < 512) && (utemp1 > 4)) { + clk_cycle = utemp1 - 2; + break; + } else if (div == 255) { + dev_warn(i2c->dev, "Failed to calculate divisor"); + return -EINVAL; + } + } + + t_scl_l = clk_cycle / 2; + t_scl_h = clk_cycle / 2; + t_start_su = t_scl_l; + t_start_hd = t_scl_l; + t_stop_su = t_scl_l; + t_data_su = t_scl_l / 2; + t_data_hd = t_scl_l / 2; + t_sr_release = clk_cycle; + + i2c_timing_s1 = t_start_su << 24 | t_start_hd << 16 | t_stop_su << 8; + i2c_timing_s2 = t_data_su << 24 | t_scl_l << 8 | t_scl_h << 0; + i2c_timing_s3 = div << 16 | t_sr_release << 0; + i2c_timing_sla = t_data_hd << 0; + + dev_dbg(i2c->dev, "tSTART_SU: %X, tSTART_HD: %X, tSTOP_SU: %X\n", + t_start_su, t_start_hd, t_stop_su); + dev_dbg(i2c->dev, "tDATA_SU: %X, tSCL_L: %X, tSCL_H: %X\n", + t_data_su, t_scl_l, t_scl_h); + dev_dbg(i2c->dev, "nClkDiv: %X, tSR_RELEASE: %X\n", + div, t_sr_release); + dev_dbg(i2c->dev, "tDATA_HD: %X\n", t_data_hd); + + if (mode == HSI2C_HIGH_SPD) { + writel(i2c_timing_s1, i2c->regs + HSI2C_TIMING_HS1); + writel(i2c_timing_s2, i2c->regs + HSI2C_TIMING_HS2); + writel(i2c_timing_s3, i2c->regs + HSI2C_TIMING_HS3); + } else { + writel(i2c_timing_s1, i2c->regs + HSI2C_TIMING_FS1); + writel(i2c_timing_s2, i2c->regs + HSI2C_TIMING_FS2); + writel(i2c_timing_s3, i2c->regs + HSI2C_TIMING_FS3); + } + writel(i2c_timing_sla, i2c->regs + HSI2C_TIMING_SLA); + + return 0; +} + +static int exynos5_hsi2c_clock_setup(struct exynos5_i2c *i2c) +{ + /* + * Configure the Fast speed timing values + * Even the High Speed mode initially starts with Fast mode + */ + if (exynos5_i2c_set_timing(i2c, HSI2C_FAST_SPD)) { + dev_err(i2c->dev, "HSI2C FS Clock set up failed\n"); + return -EINVAL; + } + + /* configure the High speed timing values */ + if (i2c->speed_mode == HSI2C_HIGH_SPD) { + if (exynos5_i2c_set_timing(i2c, HSI2C_HIGH_SPD)) { + dev_err(i2c->dev, "HSI2C HS Clock set up failed\n"); + return -EINVAL; + } + } + + return 0; +} + +/* + * exynos5_i2c_init: configures the controller for I2C functionality + * Programs I2C controller for Master mode operation + */ +static void exynos5_i2c_init(struct exynos5_i2c *i2c) +{ + u32 i2c_conf = readl(i2c->regs + HSI2C_CONF); + u32 i2c_timeout = readl(i2c->regs + HSI2C_TIMEOUT); + + /* Clear to disable Timeout */ + i2c_timeout &= ~HSI2C_TIMEOUT_EN; + writel(i2c_timeout, i2c->regs + HSI2C_TIMEOUT); + + writel((HSI2C_FUNC_MODE_I2C | HSI2C_MASTER), + i2c->regs + HSI2C_CTL); + writel(HSI2C_TRAILING_COUNT, i2c->regs + HSI2C_TRAILIG_CTL); + + if (i2c->speed_mode == HSI2C_HIGH_SPD) { + writel(HSI2C_MASTER_ID(MASTER_ID(i2c->adap.nr)), + i2c->regs + HSI2C_ADDR); + i2c_conf |= HSI2C_HS_MODE; + } + + writel(i2c_conf | HSI2C_AUTO_MODE, i2c->regs + HSI2C_CONF); +} + +static void exynos5_i2c_reset(struct exynos5_i2c *i2c) +{ + u32 i2c_ctl; + + /* Set and clear the bit for reset */ + i2c_ctl = readl(i2c->regs + HSI2C_CTL); + i2c_ctl |= HSI2C_SW_RST; + writel(i2c_ctl, i2c->regs + HSI2C_CTL); + + i2c_ctl = readl(i2c->regs + HSI2C_CTL); + i2c_ctl &= ~HSI2C_SW_RST; + writel(i2c_ctl, i2c->regs + HSI2C_CTL); + + /* We don't expect calculations to fail during the run */ + exynos5_hsi2c_clock_setup(i2c); + /* Initialize the configure registers */ + exynos5_i2c_init(i2c); +} + +/* + * exynos5_i2c_irq: top level IRQ servicing routine + * + * INT_STATUS registers gives the interrupt details. Further, + * FIFO_STATUS or TRANS_STATUS registers are to be check for detailed + * state of the bus. + */ +static irqreturn_t exynos5_i2c_irq(int irqno, void *dev_id) +{ + struct exynos5_i2c *i2c = dev_id; + u32 fifo_level, int_status, fifo_status, trans_status; + unsigned char byte; + int len = 0; + + i2c->state = -EINVAL; + + spin_lock(&i2c->lock); + + int_status = readl(i2c->regs + HSI2C_INT_STATUS); + writel(int_status, i2c->regs + HSI2C_INT_STATUS); + fifo_status = readl(i2c->regs + HSI2C_FIFO_STATUS); + + /* handle interrupt related to the transfer status */ + if (int_status & HSI2C_INT_I2C) { + trans_status = readl(i2c->regs + HSI2C_TRANS_STATUS); + if (trans_status & HSI2C_NO_DEV_ACK) { + dev_dbg(i2c->dev, "No ACK from device\n"); + i2c->state = -ENXIO; + goto stop; + } else if (trans_status & HSI2C_NO_DEV) { + dev_dbg(i2c->dev, "No device\n"); + i2c->state = -ENXIO; + goto stop; + } else if (trans_status & HSI2C_TRANS_ABORT) { + dev_dbg(i2c->dev, "Deal with arbitration lose\n"); + i2c->state = -EAGAIN; + goto stop; + } else if (trans_status & HSI2C_TIMEOUT_AUTO) { + dev_dbg(i2c->dev, "Accessing device timed out\n"); + i2c->state = -EAGAIN; + goto stop; + } else if (trans_status & HSI2C_TRANS_DONE) { + i2c->trans_done = 1; + i2c->state = 0; + } + } + + if ((i2c->msg->flags & I2C_M_RD) && (int_status & + (HSI2C_INT_TRAILING | HSI2C_INT_RX_ALMOSTFULL))) { + fifo_status = readl(i2c->regs + HSI2C_FIFO_STATUS); + fifo_level = HSI2C_RX_FIFO_LVL(fifo_status); + len = min(fifo_level, i2c->msg->len - i2c->msg_ptr); + + while (len > 0) { + byte = (unsigned char) + readl(i2c->regs + HSI2C_RX_DATA); + i2c->msg->buf[i2c->msg_ptr++] = byte; + len--; + } + i2c->state = 0; + } else if (int_status & HSI2C_INT_TX_ALMOSTEMPTY) { + fifo_status = readl(i2c->regs + HSI2C_FIFO_STATUS); + fifo_level = HSI2C_TX_FIFO_LVL(fifo_status); + + len = HSI2C_FIFO_MAX - fifo_level; + if (len > (i2c->msg->len - i2c->msg_ptr)) + len = i2c->msg->len - i2c->msg_ptr; + + while (len > 0) { + byte = i2c->msg->buf[i2c->msg_ptr++]; + writel(byte, i2c->regs + HSI2C_TX_DATA); + len--; + } + i2c->state = 0; + } + + stop: + if ((i2c->trans_done && (i2c->msg->len == i2c->msg_ptr)) || + (i2c->state < 0)) { + writel(0, i2c->regs + HSI2C_INT_ENABLE); + exynos5_i2c_clr_pend_irq(i2c); + complete(&i2c->msg_complete); + } + + spin_unlock(&i2c->lock); + + return IRQ_HANDLED; +} + +/* + * exynos5_i2c_wait_bus_idle + * + * Wait for the bus to go idle, indicated by the MASTER_BUSY bit being + * cleared. + * + * Returns -EBUSY if the bus cannot be bought to idle + */ +static int exynos5_i2c_wait_bus_idle(struct exynos5_i2c *i2c) +{ + unsigned long stop_time; + u32 trans_status; + + /* wait for 100 milli seconds for the bus to be idle */ + stop_time = jiffies + msecs_to_jiffies(100) + 1; + do { + trans_status = readl(i2c->regs + HSI2C_TRANS_STATUS); + if (!(trans_status & HSI2C_MASTER_BUSY)) + return 0; + + usleep_range(50, 200); + } while (time_before(jiffies, stop_time)); + + return -EBUSY; +} + +/* + * exynos5_i2c_message_start: Configures the bus and starts the xfer + * i2c: struct exynos5_i2c pointer for the current bus + * stop: Enables stop after transfer if set. Set for last transfer of + * in the list of messages. + * + * Configures the bus for read/write function + * Sets chip address to talk to, message length to be sent. + * Enables appropriate interrupts and sends start xfer command. + */ +static void exynos5_i2c_message_start(struct exynos5_i2c *i2c, int stop) +{ + u32 i2c_ctl; + u32 int_en = HSI2C_INT_I2C_EN; + u32 i2c_auto_conf = 0; + u32 fifo_ctl; + unsigned long flags; + + i2c_ctl = readl(i2c->regs + HSI2C_CTL); + i2c_ctl &= ~(HSI2C_TXCHON | HSI2C_RXCHON); + fifo_ctl = HSI2C_RXFIFO_EN | HSI2C_TXFIFO_EN; + + if (i2c->msg->flags & I2C_M_RD) { + i2c_ctl |= HSI2C_RXCHON; + + i2c_auto_conf = HSI2C_READ_WRITE; + + fifo_ctl |= HSI2C_RXFIFO_TRIGGER_LEVEL(HSI2C_DEF_TXFIFO_LVL); + int_en |= (HSI2C_INT_RX_ALMOSTFULL_EN | + HSI2C_INT_TRAILING_EN); + } else { + i2c_ctl |= HSI2C_TXCHON; + + fifo_ctl |= HSI2C_TXFIFO_TRIGGER_LEVEL(HSI2C_DEF_RXFIFO_LVL); + int_en |= HSI2C_INT_TX_ALMOSTEMPTY_EN; + } + + writel(HSI2C_SLV_ADDR_MAS(i2c->msg->addr), i2c->regs + HSI2C_ADDR); + + writel(fifo_ctl, i2c->regs + HSI2C_FIFO_CTL); + writel(i2c_ctl, i2c->regs + HSI2C_CTL); + + + /* + * Enable interrupts before starting the transfer so that we don't + * miss any INT_I2C interrupts. + */ + spin_lock_irqsave(&i2c->lock, flags); + writel(int_en, i2c->regs + HSI2C_INT_ENABLE); + + if (stop == 1) + i2c_auto_conf |= HSI2C_STOP_AFTER_TRANS; + i2c_auto_conf |= i2c->msg->len; + i2c_auto_conf |= HSI2C_MASTER_RUN; + writel(i2c_auto_conf, i2c->regs + HSI2C_AUTO_CONF); + spin_unlock_irqrestore(&i2c->lock, flags); +} + +static int exynos5_i2c_xfer_msg(struct exynos5_i2c *i2c, + struct i2c_msg *msgs, int stop) +{ + unsigned long timeout; + int ret; + + i2c->msg = msgs; + i2c->msg_ptr = 0; + i2c->trans_done = 0; + + reinit_completion(&i2c->msg_complete); + + exynos5_i2c_message_start(i2c, stop); + + timeout = wait_for_completion_timeout(&i2c->msg_complete, + EXYNOS5_I2C_TIMEOUT); + if (timeout == 0) + ret = -ETIMEDOUT; + else + ret = i2c->state; + + /* + * If this is the last message to be transfered (stop == 1) + * Then check if the bus can be brought back to idle. + */ + if (ret == 0 && stop) + ret = exynos5_i2c_wait_bus_idle(i2c); + + if (ret < 0) { + exynos5_i2c_reset(i2c); + if (ret == -ETIMEDOUT) + dev_warn(i2c->dev, "%s timeout\n", + (msgs->flags & I2C_M_RD) ? "rx" : "tx"); + } + + /* Return the state as in interrupt routine */ + return ret; +} + +static int exynos5_i2c_xfer(struct i2c_adapter *adap, + struct i2c_msg *msgs, int num) +{ + struct exynos5_i2c *i2c = (struct exynos5_i2c *)adap->algo_data; + int i = 0, ret = 0, stop = 0; + + if (i2c->suspended) { + dev_err(i2c->dev, "HS-I2C is not initialzed.\n"); + return -EIO; + } + + clk_prepare_enable(i2c->clk); + + for (i = 0; i < num; i++, msgs++) { + stop = (i == num - 1); + + ret = exynos5_i2c_xfer_msg(i2c, msgs, stop); + + if (ret < 0) + goto out; + } + + if (i == num) { + ret = num; + } else { + /* Only one message, cannot access the device */ + if (i == 1) + ret = -EREMOTEIO; + else + ret = i; + + dev_warn(i2c->dev, "xfer message failed\n"); + } + + out: + clk_disable_unprepare(i2c->clk); + return ret; +} + +static u32 exynos5_i2c_func(struct i2c_adapter *adap) +{ + return I2C_FUNC_I2C | (I2C_FUNC_SMBUS_EMUL & ~I2C_FUNC_SMBUS_QUICK); +} + +static const struct i2c_algorithm exynos5_i2c_algorithm = { + .master_xfer = exynos5_i2c_xfer, + .functionality = exynos5_i2c_func, +}; + +static int exynos5_i2c_probe(struct platform_device *pdev) +{ + struct device_node *np = pdev->dev.of_node; + struct exynos5_i2c *i2c; + struct resource *mem; + unsigned int op_clock; + int ret; + + i2c = devm_kzalloc(&pdev->dev, sizeof(struct exynos5_i2c), GFP_KERNEL); + if (!i2c) { + dev_err(&pdev->dev, "no memory for state\n"); + return -ENOMEM; + } + + if (of_property_read_u32(np, "clock-frequency", &op_clock)) { + i2c->speed_mode = HSI2C_FAST_SPD; + i2c->fs_clock = HSI2C_FS_TX_CLOCK; + } else { + if (op_clock >= HSI2C_HS_TX_CLOCK) { + i2c->speed_mode = HSI2C_HIGH_SPD; + i2c->fs_clock = HSI2C_FS_TX_CLOCK; + i2c->hs_clock = op_clock; + } else { + i2c->speed_mode = HSI2C_FAST_SPD; + i2c->fs_clock = op_clock; + } + } + + strlcpy(i2c->adap.name, "exynos5-i2c", sizeof(i2c->adap.name)); + i2c->adap.owner = THIS_MODULE; + i2c->adap.algo = &exynos5_i2c_algorithm; + i2c->adap.retries = 3; + + i2c->dev = &pdev->dev; + i2c->clk = devm_clk_get(&pdev->dev, "hsi2c"); + if (IS_ERR(i2c->clk)) { + dev_err(&pdev->dev, "cannot get clock\n"); + return -ENOENT; + } + + clk_prepare_enable(i2c->clk); + + mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); + i2c->regs = devm_ioremap_resource(&pdev->dev, mem); + if (IS_ERR(i2c->regs)) { + ret = PTR_ERR(i2c->regs); + goto err_clk; + } + + i2c->adap.dev.of_node = np; + i2c->adap.algo_data = i2c; + i2c->adap.dev.parent = &pdev->dev; + + /* Clear pending interrupts from u-boot or misc causes */ + exynos5_i2c_clr_pend_irq(i2c); + + spin_lock_init(&i2c->lock); + init_completion(&i2c->msg_complete); + + i2c->irq = ret = platform_get_irq(pdev, 0); + if (ret <= 0) { + dev_err(&pdev->dev, "cannot find HS-I2C IRQ\n"); + ret = -EINVAL; + goto err_clk; + } + + ret = devm_request_irq(&pdev->dev, i2c->irq, exynos5_i2c_irq, + IRQF_NO_SUSPEND | IRQF_ONESHOT, + dev_name(&pdev->dev), i2c); + + if (ret != 0) { + dev_err(&pdev->dev, "cannot request HS-I2C IRQ %d\n", i2c->irq); + goto err_clk; + } + + ret = exynos5_hsi2c_clock_setup(i2c); + if (ret) + goto err_clk; + + exynos5_i2c_init(i2c); + + ret = i2c_add_adapter(&i2c->adap); + if (ret < 0) { + dev_err(&pdev->dev, "failed to add bus to i2c core\n"); + goto err_clk; + } + + platform_set_drvdata(pdev, i2c); + + err_clk: + clk_disable_unprepare(i2c->clk); + return ret; +} + +static int exynos5_i2c_remove(struct platform_device *pdev) +{ + struct exynos5_i2c *i2c = platform_get_drvdata(pdev); + + i2c_del_adapter(&i2c->adap); + + return 0; +} + +static int exynos5_i2c_suspend_noirq(struct device *dev) +{ + struct platform_device *pdev = to_platform_device(dev); + struct exynos5_i2c *i2c = platform_get_drvdata(pdev); + + i2c->suspended = 1; + + return 0; +} + +static int exynos5_i2c_resume_noirq(struct device *dev) +{ + struct platform_device *pdev = to_platform_device(dev); + struct exynos5_i2c *i2c = platform_get_drvdata(pdev); + int ret = 0; + + clk_prepare_enable(i2c->clk); + + ret = exynos5_hsi2c_clock_setup(i2c); + if (ret) { + clk_disable_unprepare(i2c->clk); + return ret; + } + + exynos5_i2c_init(i2c); + clk_disable_unprepare(i2c->clk); + i2c->suspended = 0; + + return 0; +} + +static SIMPLE_DEV_PM_OPS(exynos5_i2c_dev_pm_ops, exynos5_i2c_suspend_noirq, + exynos5_i2c_resume_noirq); + +static struct platform_driver exynos5_i2c_driver = { + .probe = exynos5_i2c_probe, + .remove = exynos5_i2c_remove, + .driver = { + .owner = THIS_MODULE, + .name = "exynos5-hsi2c", + .pm = &exynos5_i2c_dev_pm_ops, + .of_match_table = exynos5_i2c_match, + }, +}; + +module_platform_driver(exynos5_i2c_driver); + +MODULE_DESCRIPTION("Exynos5 HS-I2C Bus driver"); +MODULE_AUTHOR("Naveen Krishna Chatradhi, <ch.naveen@samsung.com>"); +MODULE_AUTHOR("Taekgyun Ko, <taeggyun.ko@samsung.com>"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/i2c/busses/i2c-gpio.c b/drivers/i2c/busses/i2c-gpio.c index bfa02c6c2dd..d9f7e186a4c 100644 --- a/drivers/i2c/busses/i2c-gpio.c +++ b/drivers/i2c/busses/i2c-gpio.c @@ -15,6 +15,7 @@ #include <linux/slab.h> #include <linux/platform_device.h> #include <linux/gpio.h> +#include <linux/of.h> #include <linux/of_gpio.h> struct i2c_gpio_private_data { diff --git a/drivers/i2c/busses/i2c-i801.c b/drivers/i2c/busses/i2c-i801.c index 4296d172127..737e2986688 100644 --- a/drivers/i2c/busses/i2c-i801.c +++ b/drivers/i2c/busses/i2c-i801.c @@ -59,6 +59,7 @@ Wellsburg (PCH) MS 0x8d7e 32 hard yes yes yes Wellsburg (PCH) MS 0x8d7f 32 hard yes yes yes Coleto Creek (PCH) 0x23b0 32 hard yes yes yes + Wildcat Point-LP (PCH) 0x9ca2 32 hard yes yes yes Features supported by this driver: Software PEC no @@ -177,6 +178,7 @@ #define PCI_DEVICE_ID_INTEL_WELLSBURG_SMBUS_MS1 0x8d7e #define PCI_DEVICE_ID_INTEL_WELLSBURG_SMBUS_MS2 0x8d7f #define PCI_DEVICE_ID_INTEL_LYNXPOINT_LP_SMBUS 0x9c22 +#define PCI_DEVICE_ID_INTEL_WILDCATPOINT_LP_SMBUS 0x9ca2 struct i801_mux_config { char *gpio_chip; @@ -819,6 +821,7 @@ static DEFINE_PCI_DEVICE_TABLE(i801_ids) = { { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_WELLSBURG_SMBUS_MS1) }, { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_WELLSBURG_SMBUS_MS2) }, { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_COLETOCREEK_SMBUS) }, + { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_WILDCATPOINT_LP_SMBUS) }, { 0, } }; diff --git a/drivers/i2c/busses/i2c-mv64xxx.c b/drivers/i2c/busses/i2c-mv64xxx.c index d3e9cc3153a..8be7e42aa4d 100644 --- a/drivers/i2c/busses/i2c-mv64xxx.c +++ b/drivers/i2c/busses/i2c-mv64xxx.c @@ -911,7 +911,7 @@ static struct platform_driver mv64xxx_i2c_driver = { .driver = { .owner = THIS_MODULE, .name = MV64XXX_I2C_CTLR_NAME, - .of_match_table = of_match_ptr(mv64xxx_i2c_of_match_table), + .of_match_table = mv64xxx_i2c_of_match_table, }, }; diff --git a/drivers/i2c/busses/i2c-mxs.c b/drivers/i2c/busses/i2c-mxs.c index 3aedd86a646..0cde4e6ab2b 100644 --- a/drivers/i2c/busses/i2c-mxs.c +++ b/drivers/i2c/busses/i2c-mxs.c @@ -1,6 +1,7 @@ /* * Freescale MXS I2C bus driver * + * Copyright (C) 2012-2013 Marek Vasut <marex@denx.de> * Copyright (C) 2011-2012 Wolfram Sang, Pengutronix e.K. * * based on a (non-working) driver which was: @@ -34,10 +35,12 @@ #define MXS_I2C_CTRL0 (0x00) #define MXS_I2C_CTRL0_SET (0x04) +#define MXS_I2C_CTRL0_CLR (0x08) #define MXS_I2C_CTRL0_SFTRST 0x80000000 #define MXS_I2C_CTRL0_RUN 0x20000000 #define MXS_I2C_CTRL0_SEND_NAK_ON_LAST 0x02000000 +#define MXS_I2C_CTRL0_PIO_MODE 0x01000000 #define MXS_I2C_CTRL0_RETAIN_CLOCK 0x00200000 #define MXS_I2C_CTRL0_POST_SEND_STOP 0x00100000 #define MXS_I2C_CTRL0_PRE_SEND_START 0x00080000 @@ -64,13 +67,13 @@ #define MXS_I2C_CTRL1_SLAVE_IRQ 0x01 #define MXS_I2C_STAT (0x50) +#define MXS_I2C_STAT_GOT_A_NAK 0x10000000 #define MXS_I2C_STAT_BUS_BUSY 0x00000800 #define MXS_I2C_STAT_CLK_GEN_BUSY 0x00000400 -#define MXS_I2C_DATA (0xa0) +#define MXS_I2C_DATA(i2c) ((i2c->dev_type == MXS_I2C_V1) ? 0x60 : 0xa0) -#define MXS_I2C_DEBUG0 (0xb0) -#define MXS_I2C_DEBUG0_CLR (0xb8) +#define MXS_I2C_DEBUG0_CLR(i2c) ((i2c->dev_type == MXS_I2C_V1) ? 0x78 : 0xb8) #define MXS_I2C_DEBUG0_DMAREQ 0x80000000 @@ -95,10 +98,17 @@ #define MXS_CMD_I2C_READ (MXS_I2C_CTRL0_SEND_NAK_ON_LAST | \ MXS_I2C_CTRL0_MASTER_MODE) +enum mxs_i2c_devtype { + MXS_I2C_UNKNOWN = 0, + MXS_I2C_V1, + MXS_I2C_V2, +}; + /** * struct mxs_i2c_dev - per device, private MXS-I2C data * * @dev: driver model device node + * @dev_type: distinguish i.MX23/i.MX28 features * @regs: IO registers pointer * @cmd_complete: completion object for transaction wait * @cmd_err: error code for last transaction @@ -106,6 +116,7 @@ */ struct mxs_i2c_dev { struct device *dev; + enum mxs_i2c_devtype dev_type; void __iomem *regs; struct completion cmd_complete; int cmd_err; @@ -291,48 +302,11 @@ write_init_pio_fail: return -EINVAL; } -static int mxs_i2c_pio_wait_dmareq(struct mxs_i2c_dev *i2c) +static int mxs_i2c_pio_wait_xfer_end(struct mxs_i2c_dev *i2c) { unsigned long timeout = jiffies + msecs_to_jiffies(1000); - while (!(readl(i2c->regs + MXS_I2C_DEBUG0) & - MXS_I2C_DEBUG0_DMAREQ)) { - if (time_after(jiffies, timeout)) - return -ETIMEDOUT; - cond_resched(); - } - - return 0; -} - -static int mxs_i2c_pio_wait_cplt(struct mxs_i2c_dev *i2c, int last) -{ - unsigned long timeout = jiffies + msecs_to_jiffies(1000); - - /* - * We do not use interrupts in the PIO mode. Due to the - * maximum transfer length being 8 bytes in PIO mode, the - * overhead of interrupt would be too large and this would - * neglect the gain from using the PIO mode. - */ - - while (!(readl(i2c->regs + MXS_I2C_CTRL1) & - MXS_I2C_CTRL1_DATA_ENGINE_CMPLT_IRQ)) { - if (time_after(jiffies, timeout)) - return -ETIMEDOUT; - cond_resched(); - } - - writel(MXS_I2C_CTRL1_DATA_ENGINE_CMPLT_IRQ, - i2c->regs + MXS_I2C_CTRL1_CLR); - - /* - * When ending a transfer with a stop, we have to wait for the bus to - * go idle before we report the transfer as completed. Otherwise the - * start of the next transfer may race with the end of the current one. - */ - while (last && (readl(i2c->regs + MXS_I2C_STAT) & - (MXS_I2C_STAT_BUS_BUSY | MXS_I2C_STAT_CLK_GEN_BUSY))) { + while (readl(i2c->regs + MXS_I2C_CTRL0) & MXS_I2C_CTRL0_RUN) { if (time_after(jiffies, timeout)) return -ETIMEDOUT; cond_resched(); @@ -370,106 +344,215 @@ static void mxs_i2c_pio_trigger_cmd(struct mxs_i2c_dev *i2c, u32 cmd) writel(reg, i2c->regs + MXS_I2C_CTRL0); } +/* + * Start WRITE transaction on the I2C bus. By studying i.MX23 datasheet, + * CTRL0::PIO_MODE bit description clarifies the order in which the registers + * must be written during PIO mode operation. First, the CTRL0 register has + * to be programmed with all the necessary bits but the RUN bit. Then the + * payload has to be written into the DATA register. Finally, the transmission + * is executed by setting the RUN bit in CTRL0. + */ +static void mxs_i2c_pio_trigger_write_cmd(struct mxs_i2c_dev *i2c, u32 cmd, + u32 data) +{ + writel(cmd, i2c->regs + MXS_I2C_CTRL0); + + if (i2c->dev_type == MXS_I2C_V1) + writel(MXS_I2C_CTRL0_PIO_MODE, i2c->regs + MXS_I2C_CTRL0_SET); + + writel(data, i2c->regs + MXS_I2C_DATA(i2c)); + writel(MXS_I2C_CTRL0_RUN, i2c->regs + MXS_I2C_CTRL0_SET); +} + static int mxs_i2c_pio_setup_xfer(struct i2c_adapter *adap, struct i2c_msg *msg, uint32_t flags) { struct mxs_i2c_dev *i2c = i2c_get_adapdata(adap); uint32_t addr_data = msg->addr << 1; uint32_t data = 0; - int i, shifts_left, ret; + int i, ret, xlen = 0, xmit = 0; + uint32_t start; /* Mute IRQs coming from this block. */ writel(MXS_I2C_IRQ_MASK << 8, i2c->regs + MXS_I2C_CTRL1_CLR); + /* + * MX23 idea: + * - Enable CTRL0::PIO_MODE (1 << 24) + * - Enable CTRL1::ACK_MODE (1 << 27) + * + * WARNING! The MX23 is broken in some way, even if it claims + * to support PIO, when we try to transfer any amount of data + * that is not aligned to 4 bytes, the DMA engine will have + * bits in DEBUG1::DMA_BYTES_ENABLES still set even after the + * transfer. This in turn will mess up the next transfer as + * the block it emit one byte write onto the bus terminated + * with a NAK+STOP. A possible workaround is to reset the IP + * block after every PIO transmission, which might just work. + * + * NOTE: The CTRL0::PIO_MODE description is important, since + * it outlines how the PIO mode is really supposed to work. + */ if (msg->flags & I2C_M_RD) { + /* + * PIO READ transfer: + * + * This transfer MUST be limited to 4 bytes maximum. It is not + * possible to transfer more than four bytes via PIO, since we + * can not in any way make sure we can read the data from the + * DATA register fast enough. Besides, the RX FIFO is only four + * bytes deep, thus we can only really read up to four bytes at + * time. Finally, there is no bit indicating us that new data + * arrived at the FIFO and can thus be fetched from the DATA + * register. + */ + BUG_ON(msg->len > 4); + addr_data |= I2C_SMBUS_READ; /* SELECT command. */ - mxs_i2c_pio_trigger_cmd(i2c, MXS_CMD_I2C_SELECT); - - ret = mxs_i2c_pio_wait_dmareq(i2c); - if (ret) - return ret; - - writel(addr_data, i2c->regs + MXS_I2C_DATA); - writel(MXS_I2C_DEBUG0_DMAREQ, i2c->regs + MXS_I2C_DEBUG0_CLR); + mxs_i2c_pio_trigger_write_cmd(i2c, MXS_CMD_I2C_SELECT, + addr_data); - ret = mxs_i2c_pio_wait_cplt(i2c, 0); - if (ret) - return ret; - - if (mxs_i2c_pio_check_error_state(i2c)) + ret = mxs_i2c_pio_wait_xfer_end(i2c); + if (ret) { + dev_err(i2c->dev, + "PIO: Failed to send SELECT command!\n"); goto cleanup; + } /* READ command. */ mxs_i2c_pio_trigger_cmd(i2c, MXS_CMD_I2C_READ | flags | MXS_I2C_CTRL0_XFER_COUNT(msg->len)); + ret = mxs_i2c_pio_wait_xfer_end(i2c); + if (ret) { + dev_err(i2c->dev, + "PIO: Failed to send SELECT command!\n"); + goto cleanup; + } + + data = readl(i2c->regs + MXS_I2C_DATA(i2c)); for (i = 0; i < msg->len; i++) { - if ((i & 3) == 0) { - ret = mxs_i2c_pio_wait_dmareq(i2c); - if (ret) - return ret; - data = readl(i2c->regs + MXS_I2C_DATA); - writel(MXS_I2C_DEBUG0_DMAREQ, - i2c->regs + MXS_I2C_DEBUG0_CLR); - } msg->buf[i] = data & 0xff; data >>= 8; } } else { + /* + * PIO WRITE transfer: + * + * The code below implements clock stretching to circumvent + * the possibility of kernel not being able to supply data + * fast enough. It is possible to transfer arbitrary amount + * of data using PIO write. + */ addr_data |= I2C_SMBUS_WRITE; - /* WRITE command. */ - mxs_i2c_pio_trigger_cmd(i2c, - MXS_CMD_I2C_WRITE | flags | - MXS_I2C_CTRL0_XFER_COUNT(msg->len + 1)); - /* * The LSB of data buffer is the first byte blasted across * the bus. Higher order bytes follow. Thus the following * filling schematic. */ + data = addr_data << 24; + + /* Start the transfer with START condition. */ + start = MXS_I2C_CTRL0_PRE_SEND_START; + + /* If the transfer is long, use clock stretching. */ + if (msg->len > 3) + start |= MXS_I2C_CTRL0_RETAIN_CLOCK; + for (i = 0; i < msg->len; i++) { data >>= 8; data |= (msg->buf[i] << 24); - if ((i & 3) == 2) { - ret = mxs_i2c_pio_wait_dmareq(i2c); - if (ret) - return ret; - writel(data, i2c->regs + MXS_I2C_DATA); - writel(MXS_I2C_DEBUG0_DMAREQ, - i2c->regs + MXS_I2C_DEBUG0_CLR); + + xmit = 0; + + /* This is the last transfer of the message. */ + if (i + 1 == msg->len) { + /* Add optional STOP flag. */ + start |= flags; + /* Remove RETAIN_CLOCK bit. */ + start &= ~MXS_I2C_CTRL0_RETAIN_CLOCK; + xmit = 1; } - } - shifts_left = 24 - (i & 3) * 8; - if (shifts_left) { - data >>= shifts_left; - ret = mxs_i2c_pio_wait_dmareq(i2c); - if (ret) - return ret; - writel(data, i2c->regs + MXS_I2C_DATA); + /* Four bytes are ready in the "data" variable. */ + if ((i & 3) == 2) + xmit = 1; + + /* Nothing interesting happened, continue stuffing. */ + if (!xmit) + continue; + + /* + * Compute the size of the transfer and shift the + * data accordingly. + * + * i = (4k + 0) .... xlen = 2 + * i = (4k + 1) .... xlen = 3 + * i = (4k + 2) .... xlen = 4 + * i = (4k + 3) .... xlen = 1 + */ + + if ((i % 4) == 3) + xlen = 1; + else + xlen = (i % 4) + 2; + + data >>= (4 - xlen) * 8; + + dev_dbg(i2c->dev, + "PIO: len=%i pos=%i total=%i [W%s%s%s]\n", + xlen, i, msg->len, + start & MXS_I2C_CTRL0_PRE_SEND_START ? "S" : "", + start & MXS_I2C_CTRL0_POST_SEND_STOP ? "E" : "", + start & MXS_I2C_CTRL0_RETAIN_CLOCK ? "C" : ""); + writel(MXS_I2C_DEBUG0_DMAREQ, - i2c->regs + MXS_I2C_DEBUG0_CLR); + i2c->regs + MXS_I2C_DEBUG0_CLR(i2c)); + + mxs_i2c_pio_trigger_write_cmd(i2c, + start | MXS_I2C_CTRL0_MASTER_MODE | + MXS_I2C_CTRL0_DIRECTION | + MXS_I2C_CTRL0_XFER_COUNT(xlen), data); + + /* The START condition is sent only once. */ + start &= ~MXS_I2C_CTRL0_PRE_SEND_START; + + /* Wait for the end of the transfer. */ + ret = mxs_i2c_pio_wait_xfer_end(i2c); + if (ret) { + dev_err(i2c->dev, + "PIO: Failed to finish WRITE cmd!\n"); + break; + } + + /* Check NAK here. */ + ret = readl(i2c->regs + MXS_I2C_STAT) & + MXS_I2C_STAT_GOT_A_NAK; + if (ret) { + ret = -ENXIO; + goto cleanup; + } } } - ret = mxs_i2c_pio_wait_cplt(i2c, flags & MXS_I2C_CTRL0_POST_SEND_STOP); - if (ret) - return ret; - /* make sure we capture any occurred error into cmd_err */ - mxs_i2c_pio_check_error_state(i2c); + ret = mxs_i2c_pio_check_error_state(i2c); cleanup: /* Clear any dangling IRQs and re-enable interrupts. */ writel(MXS_I2C_IRQ_MASK, i2c->regs + MXS_I2C_CTRL1_CLR); writel(MXS_I2C_IRQ_MASK << 8, i2c->regs + MXS_I2C_CTRL1_SET); - return 0; + /* Clear the PIO_MODE on i.MX23 */ + if (i2c->dev_type == MXS_I2C_V1) + writel(MXS_I2C_CTRL0_PIO_MODE, i2c->regs + MXS_I2C_CTRL0_CLR); + + return ret; } /* @@ -479,8 +562,9 @@ static int mxs_i2c_xfer_msg(struct i2c_adapter *adap, struct i2c_msg *msg, int stop) { struct mxs_i2c_dev *i2c = i2c_get_adapdata(adap); - int ret, err; + int ret; int flags; + int use_pio = 0; flags = stop ? MXS_I2C_CTRL0_POST_SEND_STOP : 0; @@ -491,19 +575,21 @@ static int mxs_i2c_xfer_msg(struct i2c_adapter *adap, struct i2c_msg *msg, return -EINVAL; /* - * The current boundary to select between PIO/DMA transfer method - * is set to 8 bytes, transfers shorter than 8 bytes are transfered - * using PIO mode while longer transfers use DMA. The 8 byte border is - * based on this empirical measurement and a lot of previous frobbing. + * The MX28 I2C IP block can only do PIO READ for transfer of to up + * 4 bytes of length. The write transfer is not limited as it can use + * clock stretching to avoid FIFO underruns. */ + if ((msg->flags & I2C_M_RD) && (msg->len <= 4)) + use_pio = 1; + if (!(msg->flags & I2C_M_RD) && (msg->len < 7)) + use_pio = 1; + i2c->cmd_err = 0; - if (0) { /* disable PIO mode until a proper fix is made */ + if (use_pio) { ret = mxs_i2c_pio_setup_xfer(adap, msg, flags); - if (ret) { - err = mxs_i2c_reset(i2c); - if (err) - return err; - } + /* No need to reset the block if NAK was received. */ + if (ret && (ret != -ENXIO)) + mxs_i2c_reset(i2c); } else { reinit_completion(&i2c->cmd_complete); ret = mxs_i2c_dma_setup_xfer(adap, msg, flags); @@ -514,9 +600,11 @@ static int mxs_i2c_xfer_msg(struct i2c_adapter *adap, struct i2c_msg *msg, msecs_to_jiffies(1000)); if (ret == 0) goto timeout; + + ret = i2c->cmd_err; } - if (i2c->cmd_err == -ENXIO) { + if (ret == -ENXIO) { /* * If the transfer fails with a NAK from the slave the * controller halts until it gets told to return to idle state. @@ -525,7 +613,19 @@ static int mxs_i2c_xfer_msg(struct i2c_adapter *adap, struct i2c_msg *msg, i2c->regs + MXS_I2C_CTRL1_SET); } - ret = i2c->cmd_err; + /* + * WARNING! + * The i.MX23 is strange. After each and every operation, it's I2C IP + * block must be reset, otherwise the IP block will misbehave. This can + * be observed on the bus by the block sending out one single byte onto + * the bus. In case such an error happens, bit 27 will be set in the + * DEBUG0 register. This bit is not documented in the i.MX23 datasheet + * and is marked as "TBD" instead. To reset this bit to a correct state, + * reset the whole block. Since the block reset does not take long, do + * reset the block after every transfer to play safe. + */ + if (i2c->dev_type == MXS_I2C_V1) + mxs_i2c_reset(i2c); dev_dbg(i2c->dev, "Done with err=%d\n", ret); @@ -680,8 +780,28 @@ static int mxs_i2c_get_ofdata(struct mxs_i2c_dev *i2c) return 0; } +static struct platform_device_id mxs_i2c_devtype[] = { + { + .name = "imx23-i2c", + .driver_data = MXS_I2C_V1, + }, { + .name = "imx28-i2c", + .driver_data = MXS_I2C_V2, + }, { /* sentinel */ } +}; +MODULE_DEVICE_TABLE(platform, mxs_i2c_devtype); + +static const struct of_device_id mxs_i2c_dt_ids[] = { + { .compatible = "fsl,imx23-i2c", .data = &mxs_i2c_devtype[0], }, + { .compatible = "fsl,imx28-i2c", .data = &mxs_i2c_devtype[1], }, + { /* sentinel */ } +}; +MODULE_DEVICE_TABLE(of, mxs_i2c_dt_ids); + static int mxs_i2c_probe(struct platform_device *pdev) { + const struct of_device_id *of_id = + of_match_device(mxs_i2c_dt_ids, &pdev->dev); struct device *dev = &pdev->dev; struct mxs_i2c_dev *i2c; struct i2c_adapter *adap; @@ -693,6 +813,11 @@ static int mxs_i2c_probe(struct platform_device *pdev) if (!i2c) return -ENOMEM; + if (of_id) { + const struct platform_device_id *device_id = of_id->data; + i2c->dev_type = device_id->driver_data; + } + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); irq = platform_get_irq(pdev, 0); @@ -768,12 +893,6 @@ static int mxs_i2c_remove(struct platform_device *pdev) return 0; } -static const struct of_device_id mxs_i2c_dt_ids[] = { - { .compatible = "fsl,imx28-i2c", }, - { /* sentinel */ } -}; -MODULE_DEVICE_TABLE(of, mxs_i2c_dt_ids); - static struct platform_driver mxs_i2c_driver = { .driver = { .name = DRIVER_NAME, @@ -796,6 +915,7 @@ static void __exit mxs_i2c_exit(void) } module_exit(mxs_i2c_exit); +MODULE_AUTHOR("Marek Vasut <marex@denx.de>"); MODULE_AUTHOR("Wolfram Sang <w.sang@pengutronix.de>"); MODULE_DESCRIPTION("MXS I2C Bus Driver"); MODULE_LICENSE("GPL"); diff --git a/drivers/i2c/busses/i2c-pnx.c b/drivers/i2c/busses/i2c-pnx.c index 1a9ea25f231..c9a352f0a9a 100644 --- a/drivers/i2c/busses/i2c-pnx.c +++ b/drivers/i2c/busses/i2c-pnx.c @@ -23,6 +23,7 @@ #include <linux/err.h> #include <linux/clk.h> #include <linux/slab.h> +#include <linux/of.h> #define I2C_PNX_TIMEOUT_DEFAULT 10 /* msec */ #define I2C_PNX_SPEED_KHZ_DEFAULT 100 diff --git a/drivers/i2c/busses/i2c-rcar.c b/drivers/i2c/busses/i2c-rcar.c index d2fe11da5e8..2c2fd7c2b11 100644 --- a/drivers/i2c/busses/i2c-rcar.c +++ b/drivers/i2c/busses/i2c-rcar.c @@ -33,6 +33,7 @@ #include <linux/i2c/i2c-rcar.h> #include <linux/kernel.h> #include <linux/module.h> +#include <linux/of_device.h> #include <linux/platform_device.h> #include <linux/pm_runtime.h> #include <linux/slab.h> @@ -102,8 +103,8 @@ enum { #define ID_NACK (1 << 4) enum rcar_i2c_type { - I2C_RCAR_H1, - I2C_RCAR_H2, + I2C_RCAR_GEN1, + I2C_RCAR_GEN2, }; struct rcar_i2c_priv { @@ -226,22 +227,23 @@ static int rcar_i2c_clock_calculate(struct rcar_i2c_priv *priv, u32 bus_speed, struct device *dev) { - struct clk *clkp = clk_get(NULL, "peripheral_clk"); + struct clk *clkp = clk_get(dev, NULL); u32 scgd, cdf; u32 round, ick; u32 scl; u32 cdf_width; + unsigned long rate; - if (!clkp) { - dev_err(dev, "there is no peripheral_clk\n"); - return -EIO; + if (IS_ERR(clkp)) { + dev_err(dev, "couldn't get clock\n"); + return PTR_ERR(clkp); } switch (priv->devtype) { - case I2C_RCAR_H1: + case I2C_RCAR_GEN1: cdf_width = 2; break; - case I2C_RCAR_H2: + case I2C_RCAR_GEN2: cdf_width = 3; break; default: @@ -264,15 +266,14 @@ static int rcar_i2c_clock_calculate(struct rcar_i2c_priv *priv, * clkp : peripheral_clk * F[] : integer up-valuation */ - for (cdf = 0; cdf < (1 << cdf_width); cdf++) { - ick = clk_get_rate(clkp) / (1 + cdf); - if (ick < 20000000) - goto ick_find; + rate = clk_get_rate(clkp); + cdf = rate / 20000000; + if (cdf >= 1 << cdf_width) { + dev_err(dev, "Input clock %lu too high\n", rate); + return -EIO; } - dev_err(dev, "there is no best CDF\n"); - return -EIO; + ick = rate / (cdf + 1); -ick_find: /* * it is impossible to calculate large scale * number on u32. separate it @@ -290,6 +291,12 @@ ick_find: * * Calculation result (= SCL) should be less than * bus_speed for hardware safety + * + * We could use something along the lines of + * div = ick / (bus_speed + 1) + 1; + * scgd = (div - 20 - round + 7) / 8; + * scl = ick / (20 + (scgd * 8) + round); + * (not fully verified) but that would get pretty involved */ for (scgd = 0; scgd < 0x40; scgd++) { scl = ick / (20 + (scgd * 8) + round); @@ -306,7 +313,7 @@ scgd_find: /* * keep icccr value */ - priv->icccr = (scgd << (cdf_width) | cdf); + priv->icccr = scgd << cdf_width | cdf; return 0; } @@ -632,6 +639,15 @@ static const struct i2c_algorithm rcar_i2c_algo = { .functionality = rcar_i2c_func, }; +static const struct of_device_id rcar_i2c_dt_ids[] = { + { .compatible = "renesas,i2c-rcar", .data = (void *)I2C_RCAR_GEN1 }, + { .compatible = "renesas,i2c-r8a7778", .data = (void *)I2C_RCAR_GEN1 }, + { .compatible = "renesas,i2c-r8a7779", .data = (void *)I2C_RCAR_GEN1 }, + { .compatible = "renesas,i2c-r8a7790", .data = (void *)I2C_RCAR_GEN2 }, + {}, +}; +MODULE_DEVICE_TABLE(of, rcar_i2c_dt_ids); + static int rcar_i2c_probe(struct platform_device *pdev) { struct i2c_rcar_platform_data *pdata = dev_get_platdata(&pdev->dev); @@ -649,10 +665,15 @@ static int rcar_i2c_probe(struct platform_device *pdev) } bus_speed = 100000; /* default 100 kHz */ - if (pdata && pdata->bus_speed) + ret = of_property_read_u32(dev->of_node, "clock-frequency", &bus_speed); + if (ret < 0 && pdata && pdata->bus_speed) bus_speed = pdata->bus_speed; - priv->devtype = platform_get_device_id(pdev)->driver_data; + if (pdev->dev.of_node) + priv->devtype = (long)of_match_device(rcar_i2c_dt_ids, + dev)->data; + else + priv->devtype = platform_get_device_id(pdev)->driver_data; ret = rcar_i2c_clock_calculate(priv, bus_speed, dev); if (ret < 0) @@ -673,6 +694,7 @@ static int rcar_i2c_probe(struct platform_device *pdev) adap->class = I2C_CLASS_HWMON | I2C_CLASS_SPD; adap->retries = 3; adap->dev.parent = dev; + adap->dev.of_node = dev->of_node; i2c_set_adapdata(adap, priv); strlcpy(adap->name, pdev->name, sizeof(adap->name)); @@ -709,9 +731,9 @@ static int rcar_i2c_remove(struct platform_device *pdev) } static struct platform_device_id rcar_i2c_id_table[] = { - { "i2c-rcar", I2C_RCAR_H1 }, - { "i2c-rcar_h1", I2C_RCAR_H1 }, - { "i2c-rcar_h2", I2C_RCAR_H2 }, + { "i2c-rcar", I2C_RCAR_GEN1 }, + { "i2c-rcar_gen1", I2C_RCAR_GEN1 }, + { "i2c-rcar_gen2", I2C_RCAR_GEN2 }, {}, }; MODULE_DEVICE_TABLE(platform, rcar_i2c_id_table); @@ -720,6 +742,7 @@ static struct platform_driver rcar_i2c_driver = { .driver = { .name = "i2c-rcar", .owner = THIS_MODULE, + .of_match_table = rcar_i2c_dt_ids, }, .probe = rcar_i2c_probe, .remove = rcar_i2c_remove, diff --git a/drivers/i2c/busses/i2c-s3c2410.c b/drivers/i2c/busses/i2c-s3c2410.c index 3747b9bf67d..bf8fb94ebc5 100644 --- a/drivers/i2c/busses/i2c-s3c2410.c +++ b/drivers/i2c/busses/i2c-s3c2410.c @@ -36,6 +36,7 @@ #include <linux/cpufreq.h> #include <linux/slab.h> #include <linux/io.h> +#include <linux/of.h> #include <linux/of_gpio.h> #include <linux/pinctrl/consumer.h> diff --git a/drivers/i2c/busses/i2c-scmi.c b/drivers/i2c/busses/i2c-scmi.c index c447e8d40b7..59923551413 100644 --- a/drivers/i2c/busses/i2c-scmi.c +++ b/drivers/i2c/busses/i2c-scmi.c @@ -223,7 +223,7 @@ acpi_smbus_cmi_access(struct i2c_adapter *adap, u16 addr, unsigned short flags, goto out; obj = pkg->package.elements + 1; - if (obj == NULL || obj->type != ACPI_TYPE_INTEGER) { + if (obj->type != ACPI_TYPE_INTEGER) { ACPI_ERROR((AE_INFO, "Invalid argument type")); result = -EIO; goto out; @@ -235,7 +235,7 @@ acpi_smbus_cmi_access(struct i2c_adapter *adap, u16 addr, unsigned short flags, case I2C_SMBUS_BYTE: case I2C_SMBUS_BYTE_DATA: case I2C_SMBUS_WORD_DATA: - if (obj == NULL || obj->type != ACPI_TYPE_INTEGER) { + if (obj->type != ACPI_TYPE_INTEGER) { ACPI_ERROR((AE_INFO, "Invalid argument type")); result = -EIO; goto out; @@ -246,7 +246,7 @@ acpi_smbus_cmi_access(struct i2c_adapter *adap, u16 addr, unsigned short flags, data->byte = obj->integer.value; break; case I2C_SMBUS_BLOCK_DATA: - if (obj == NULL || obj->type != ACPI_TYPE_BUFFER) { + if (obj->type != ACPI_TYPE_BUFFER) { ACPI_ERROR((AE_INFO, "Invalid argument type")); result = -EIO; goto out; diff --git a/drivers/i2c/busses/i2c-sh_mobile.c b/drivers/i2c/busses/i2c-sh_mobile.c index 55110ddbed1..1d79585ba4b 100644 --- a/drivers/i2c/busses/i2c-sh_mobile.c +++ b/drivers/i2c/busses/i2c-sh_mobile.c @@ -235,7 +235,7 @@ static void sh_mobile_i2c_init(struct sh_mobile_i2c_data *pd) int offset; /* Get clock rate after clock is enabled */ - clk_enable(pd->clk); + clk_prepare_enable(pd->clk); i2c_clk_khz = clk_get_rate(pd->clk) / 1000; i2c_clk_khz /= pd->clks_per_count; @@ -270,14 +270,14 @@ static void sh_mobile_i2c_init(struct sh_mobile_i2c_data *pd) pd->icic &= ~ICIC_ICCHB8; out: - clk_disable(pd->clk); + clk_disable_unprepare(pd->clk); } static void activate_ch(struct sh_mobile_i2c_data *pd) { /* Wake up device and enable clock */ pm_runtime_get_sync(pd->dev); - clk_enable(pd->clk); + clk_prepare_enable(pd->clk); /* Enable channel and configure rx ack */ iic_set_clr(pd, ICCR, ICCR_ICE, 0); @@ -300,7 +300,7 @@ static void deactivate_ch(struct sh_mobile_i2c_data *pd) iic_set_clr(pd, ICCR, 0, ICCR_ICE); /* Disable clock and mark device as idle */ - clk_disable(pd->clk); + clk_disable_unprepare(pd->clk); pm_runtime_put_sync(pd->dev); } diff --git a/drivers/i2c/busses/i2c-st.c b/drivers/i2c/busses/i2c-st.c new file mode 100644 index 00000000000..9cf715d6955 --- /dev/null +++ b/drivers/i2c/busses/i2c-st.c @@ -0,0 +1,872 @@ +/* + * Copyright (C) 2013 STMicroelectronics + * + * I2C master mode controller driver, used in STMicroelectronics devices. + * + * Author: Maxime Coquelin <maxime.coquelin@st.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2, as + * published by the Free Software Foundation. + */ + +#include <linux/module.h> +#include <linux/platform_device.h> +#include <linux/i2c.h> +#include <linux/clk.h> +#include <linux/io.h> +#include <linux/delay.h> +#include <linux/interrupt.h> +#include <linux/err.h> +#include <linux/of.h> +#include <linux/of_address.h> +#include <linux/of_irq.h> + +/* SSC registers */ +#define SSC_BRG 0x000 +#define SSC_TBUF 0x004 +#define SSC_RBUF 0x008 +#define SSC_CTL 0x00C +#define SSC_IEN 0x010 +#define SSC_STA 0x014 +#define SSC_I2C 0x018 +#define SSC_SLAD 0x01C +#define SSC_REP_START_HOLD 0x020 +#define SSC_START_HOLD 0x024 +#define SSC_REP_START_SETUP 0x028 +#define SSC_DATA_SETUP 0x02C +#define SSC_STOP_SETUP 0x030 +#define SSC_BUS_FREE 0x034 +#define SSC_TX_FSTAT 0x038 +#define SSC_RX_FSTAT 0x03C +#define SSC_PRE_SCALER_BRG 0x040 +#define SSC_CLR 0x080 +#define SSC_NOISE_SUPP_WIDTH 0x100 +#define SSC_PRSCALER 0x104 +#define SSC_NOISE_SUPP_WIDTH_DATAOUT 0x108 +#define SSC_PRSCALER_DATAOUT 0x10c + +/* SSC Control */ +#define SSC_CTL_DATA_WIDTH_9 0x8 +#define SSC_CTL_DATA_WIDTH_MSK 0xf +#define SSC_CTL_BM 0xf +#define SSC_CTL_HB BIT(4) +#define SSC_CTL_PH BIT(5) +#define SSC_CTL_PO BIT(6) +#define SSC_CTL_SR BIT(7) +#define SSC_CTL_MS BIT(8) +#define SSC_CTL_EN BIT(9) +#define SSC_CTL_LPB BIT(10) +#define SSC_CTL_EN_TX_FIFO BIT(11) +#define SSC_CTL_EN_RX_FIFO BIT(12) +#define SSC_CTL_EN_CLST_RX BIT(13) + +/* SSC Interrupt Enable */ +#define SSC_IEN_RIEN BIT(0) +#define SSC_IEN_TIEN BIT(1) +#define SSC_IEN_TEEN BIT(2) +#define SSC_IEN_REEN BIT(3) +#define SSC_IEN_PEEN BIT(4) +#define SSC_IEN_AASEN BIT(6) +#define SSC_IEN_STOPEN BIT(7) +#define SSC_IEN_ARBLEN BIT(8) +#define SSC_IEN_NACKEN BIT(10) +#define SSC_IEN_REPSTRTEN BIT(11) +#define SSC_IEN_TX_FIFO_HALF BIT(12) +#define SSC_IEN_RX_FIFO_HALF_FULL BIT(14) + +/* SSC Status */ +#define SSC_STA_RIR BIT(0) +#define SSC_STA_TIR BIT(1) +#define SSC_STA_TE BIT(2) +#define SSC_STA_RE BIT(3) +#define SSC_STA_PE BIT(4) +#define SSC_STA_CLST BIT(5) +#define SSC_STA_AAS BIT(6) +#define SSC_STA_STOP BIT(7) +#define SSC_STA_ARBL BIT(8) +#define SSC_STA_BUSY BIT(9) +#define SSC_STA_NACK BIT(10) +#define SSC_STA_REPSTRT BIT(11) +#define SSC_STA_TX_FIFO_HALF BIT(12) +#define SSC_STA_TX_FIFO_FULL BIT(13) +#define SSC_STA_RX_FIFO_HALF BIT(14) + +/* SSC I2C Control */ +#define SSC_I2C_I2CM BIT(0) +#define SSC_I2C_STRTG BIT(1) +#define SSC_I2C_STOPG BIT(2) +#define SSC_I2C_ACKG BIT(3) +#define SSC_I2C_AD10 BIT(4) +#define SSC_I2C_TXENB BIT(5) +#define SSC_I2C_REPSTRTG BIT(11) +#define SSC_I2C_SLAVE_DISABLE BIT(12) + +/* SSC Tx FIFO Status */ +#define SSC_TX_FSTAT_STATUS 0x07 + +/* SSC Rx FIFO Status */ +#define SSC_RX_FSTAT_STATUS 0x07 + +/* SSC Clear bit operation */ +#define SSC_CLR_SSCAAS BIT(6) +#define SSC_CLR_SSCSTOP BIT(7) +#define SSC_CLR_SSCARBL BIT(8) +#define SSC_CLR_NACK BIT(10) +#define SSC_CLR_REPSTRT BIT(11) + +/* SSC Clock Prescaler */ +#define SSC_PRSC_VALUE 0x0f + + +#define SSC_TXFIFO_SIZE 0x8 +#define SSC_RXFIFO_SIZE 0x8 + +enum st_i2c_mode { + I2C_MODE_STANDARD, + I2C_MODE_FAST, + I2C_MODE_END, +}; + +/** + * struct st_i2c_timings - per-Mode tuning parameters + * @rate: I2C bus rate + * @rep_start_hold: I2C repeated start hold time requirement + * @rep_start_setup: I2C repeated start set up time requirement + * @start_hold: I2C start hold time requirement + * @data_setup_time: I2C data set up time requirement + * @stop_setup_time: I2C stop set up time requirement + * @bus_free_time: I2C bus free time requirement + * @sda_pulse_min_limit: I2C SDA pulse mini width limit + */ +struct st_i2c_timings { + u32 rate; + u32 rep_start_hold; + u32 rep_start_setup; + u32 start_hold; + u32 data_setup_time; + u32 stop_setup_time; + u32 bus_free_time; + u32 sda_pulse_min_limit; +}; + +/** + * struct st_i2c_client - client specific data + * @addr: 8-bit slave addr, including r/w bit + * @count: number of bytes to be transfered + * @xfered: number of bytes already transferred + * @buf: data buffer + * @result: result of the transfer + * @stop: last I2C msg to be sent, i.e. STOP to be generated + */ +struct st_i2c_client { + u8 addr; + u32 count; + u32 xfered; + u8 *buf; + int result; + bool stop; +}; + +/** + * struct st_i2c_dev - private data of the controller + * @adap: I2C adapter for this controller + * @dev: device for this controller + * @base: virtual memory area + * @complete: completion of I2C message + * @irq: interrupt line for th controller + * @clk: hw ssc block clock + * @mode: I2C mode of the controller. Standard or Fast only supported + * @scl_min_width_us: SCL line minimum pulse width in us + * @sda_min_width_us: SDA line minimum pulse width in us + * @client: I2C transfert information + * @busy: I2C transfer on-going + */ +struct st_i2c_dev { + struct i2c_adapter adap; + struct device *dev; + void __iomem *base; + struct completion complete; + int irq; + struct clk *clk; + int mode; + u32 scl_min_width_us; + u32 sda_min_width_us; + struct st_i2c_client client; + bool busy; +}; + +static inline void st_i2c_set_bits(void __iomem *reg, u32 mask) +{ + writel_relaxed(readl_relaxed(reg) | mask, reg); +} + +static inline void st_i2c_clr_bits(void __iomem *reg, u32 mask) +{ + writel_relaxed(readl_relaxed(reg) & ~mask, reg); +} + +/* From I2C Specifications v0.5 */ +static struct st_i2c_timings i2c_timings[] = { + [I2C_MODE_STANDARD] = { + .rate = 100000, + .rep_start_hold = 4000, + .rep_start_setup = 4700, + .start_hold = 4000, + .data_setup_time = 250, + .stop_setup_time = 4000, + .bus_free_time = 4700, + }, + [I2C_MODE_FAST] = { + .rate = 400000, + .rep_start_hold = 600, + .rep_start_setup = 600, + .start_hold = 600, + .data_setup_time = 100, + .stop_setup_time = 600, + .bus_free_time = 1300, + }, +}; + +static void st_i2c_flush_rx_fifo(struct st_i2c_dev *i2c_dev) +{ + int count, i; + + /* + * Counter only counts up to 7 but fifo size is 8... + * When fifo is full, counter is 0 and RIR bit of status register is + * set + */ + if (readl_relaxed(i2c_dev->base + SSC_STA) & SSC_STA_RIR) + count = SSC_RXFIFO_SIZE; + else + count = readl_relaxed(i2c_dev->base + SSC_RX_FSTAT) & + SSC_RX_FSTAT_STATUS; + + for (i = 0; i < count; i++) + readl_relaxed(i2c_dev->base + SSC_RBUF); +} + +static void st_i2c_soft_reset(struct st_i2c_dev *i2c_dev) +{ + /* + * FIFO needs to be emptied before reseting the IP, + * else the controller raises a BUSY error. + */ + st_i2c_flush_rx_fifo(i2c_dev); + + st_i2c_set_bits(i2c_dev->base + SSC_CTL, SSC_CTL_SR); + st_i2c_clr_bits(i2c_dev->base + SSC_CTL, SSC_CTL_SR); +} + +/** + * st_i2c_hw_config() - Prepare SSC block, calculate and apply tuning timings + * @i2c_dev: Controller's private data + */ +static void st_i2c_hw_config(struct st_i2c_dev *i2c_dev) +{ + unsigned long rate; + u32 val, ns_per_clk; + struct st_i2c_timings *t = &i2c_timings[i2c_dev->mode]; + + st_i2c_soft_reset(i2c_dev); + + val = SSC_CLR_REPSTRT | SSC_CLR_NACK | SSC_CLR_SSCARBL | + SSC_CLR_SSCAAS | SSC_CLR_SSCSTOP; + writel_relaxed(val, i2c_dev->base + SSC_CLR); + + /* SSC Control register setup */ + val = SSC_CTL_PO | SSC_CTL_PH | SSC_CTL_HB | SSC_CTL_DATA_WIDTH_9; + writel_relaxed(val, i2c_dev->base + SSC_CTL); + + rate = clk_get_rate(i2c_dev->clk); + ns_per_clk = 1000000000 / rate; + + /* Baudrate */ + val = rate / (2 * t->rate); + writel_relaxed(val, i2c_dev->base + SSC_BRG); + + /* Pre-scaler baudrate */ + writel_relaxed(1, i2c_dev->base + SSC_PRE_SCALER_BRG); + + /* Enable I2C mode */ + writel_relaxed(SSC_I2C_I2CM, i2c_dev->base + SSC_I2C); + + /* Repeated start hold time */ + val = t->rep_start_hold / ns_per_clk; + writel_relaxed(val, i2c_dev->base + SSC_REP_START_HOLD); + + /* Repeated start set up time */ + val = t->rep_start_setup / ns_per_clk; + writel_relaxed(val, i2c_dev->base + SSC_REP_START_SETUP); + + /* Start hold time */ + val = t->start_hold / ns_per_clk; + writel_relaxed(val, i2c_dev->base + SSC_START_HOLD); + + /* Data set up time */ + val = t->data_setup_time / ns_per_clk; + writel_relaxed(val, i2c_dev->base + SSC_DATA_SETUP); + + /* Stop set up time */ + val = t->stop_setup_time / ns_per_clk; + writel_relaxed(val, i2c_dev->base + SSC_STOP_SETUP); + + /* Bus free time */ + val = t->bus_free_time / ns_per_clk; + writel_relaxed(val, i2c_dev->base + SSC_BUS_FREE); + + /* Prescalers set up */ + val = rate / 10000000; + writel_relaxed(val, i2c_dev->base + SSC_PRSCALER); + writel_relaxed(val, i2c_dev->base + SSC_PRSCALER_DATAOUT); + + /* Noise suppression witdh */ + val = i2c_dev->scl_min_width_us * rate / 100000000; + writel_relaxed(val, i2c_dev->base + SSC_NOISE_SUPP_WIDTH); + + /* Noise suppression max output data delay width */ + val = i2c_dev->sda_min_width_us * rate / 100000000; + writel_relaxed(val, i2c_dev->base + SSC_NOISE_SUPP_WIDTH_DATAOUT); +} + +static int st_i2c_wait_free_bus(struct st_i2c_dev *i2c_dev) +{ + u32 sta; + int i; + + for (i = 0; i < 10; i++) { + sta = readl_relaxed(i2c_dev->base + SSC_STA); + if (!(sta & SSC_STA_BUSY)) + return 0; + + usleep_range(2000, 4000); + } + + dev_err(i2c_dev->dev, "bus not free (status = 0x%08x)\n", sta); + + return -EBUSY; +} + +/** + * st_i2c_write_tx_fifo() - Write a byte in the Tx FIFO + * @i2c_dev: Controller's private data + * @byte: Data to write in the Tx FIFO + */ +static inline void st_i2c_write_tx_fifo(struct st_i2c_dev *i2c_dev, u8 byte) +{ + u16 tbuf = byte << 1; + + writel_relaxed(tbuf | 1, i2c_dev->base + SSC_TBUF); +} + +/** + * st_i2c_wr_fill_tx_fifo() - Fill the Tx FIFO in write mode + * @i2c_dev: Controller's private data + * + * This functions fills the Tx FIFO with I2C transfert buffer when + * in write mode. + */ +static void st_i2c_wr_fill_tx_fifo(struct st_i2c_dev *i2c_dev) +{ + struct st_i2c_client *c = &i2c_dev->client; + u32 tx_fstat, sta; + int i; + + sta = readl_relaxed(i2c_dev->base + SSC_STA); + if (sta & SSC_STA_TX_FIFO_FULL) + return; + + tx_fstat = readl_relaxed(i2c_dev->base + SSC_TX_FSTAT); + tx_fstat &= SSC_TX_FSTAT_STATUS; + + if (c->count < (SSC_TXFIFO_SIZE - tx_fstat)) + i = c->count; + else + i = SSC_TXFIFO_SIZE - tx_fstat; + + for (; i > 0; i--, c->count--, c->buf++) + st_i2c_write_tx_fifo(i2c_dev, *c->buf); +} + +/** + * st_i2c_rd_fill_tx_fifo() - Fill the Tx FIFO in read mode + * @i2c_dev: Controller's private data + * + * This functions fills the Tx FIFO with fixed pattern when + * in read mode to trigger clock. + */ +static void st_i2c_rd_fill_tx_fifo(struct st_i2c_dev *i2c_dev, int max) +{ + struct st_i2c_client *c = &i2c_dev->client; + u32 tx_fstat, sta; + int i; + + sta = readl_relaxed(i2c_dev->base + SSC_STA); + if (sta & SSC_STA_TX_FIFO_FULL) + return; + + tx_fstat = readl_relaxed(i2c_dev->base + SSC_TX_FSTAT); + tx_fstat &= SSC_TX_FSTAT_STATUS; + + if (max < (SSC_TXFIFO_SIZE - tx_fstat)) + i = max; + else + i = SSC_TXFIFO_SIZE - tx_fstat; + + for (; i > 0; i--, c->xfered++) + st_i2c_write_tx_fifo(i2c_dev, 0xff); +} + +static void st_i2c_read_rx_fifo(struct st_i2c_dev *i2c_dev) +{ + struct st_i2c_client *c = &i2c_dev->client; + u32 i, sta; + u16 rbuf; + + sta = readl_relaxed(i2c_dev->base + SSC_STA); + if (sta & SSC_STA_RIR) { + i = SSC_RXFIFO_SIZE; + } else { + i = readl_relaxed(i2c_dev->base + SSC_RX_FSTAT); + i &= SSC_RX_FSTAT_STATUS; + } + + for (; (i > 0) && (c->count > 0); i--, c->count--) { + rbuf = readl_relaxed(i2c_dev->base + SSC_RBUF) >> 1; + *c->buf++ = (u8)rbuf & 0xff; + } + + if (i) { + dev_err(i2c_dev->dev, "Unexpected %d bytes in rx fifo\n", i); + st_i2c_flush_rx_fifo(i2c_dev); + } +} + +/** + * st_i2c_terminate_xfer() - Send either STOP or REPSTART condition + * @i2c_dev: Controller's private data + */ +static void st_i2c_terminate_xfer(struct st_i2c_dev *i2c_dev) +{ + struct st_i2c_client *c = &i2c_dev->client; + + st_i2c_clr_bits(i2c_dev->base + SSC_IEN, SSC_IEN_TEEN); + st_i2c_clr_bits(i2c_dev->base + SSC_I2C, SSC_I2C_STRTG); + + if (c->stop) { + st_i2c_set_bits(i2c_dev->base + SSC_IEN, SSC_IEN_STOPEN); + st_i2c_set_bits(i2c_dev->base + SSC_I2C, SSC_I2C_STOPG); + } else { + st_i2c_set_bits(i2c_dev->base + SSC_IEN, SSC_IEN_REPSTRTEN); + st_i2c_set_bits(i2c_dev->base + SSC_I2C, SSC_I2C_REPSTRTG); + } +} + +/** + * st_i2c_handle_write() - Handle FIFO empty interrupt in case of write + * @i2c_dev: Controller's private data + */ +static void st_i2c_handle_write(struct st_i2c_dev *i2c_dev) +{ + struct st_i2c_client *c = &i2c_dev->client; + + st_i2c_flush_rx_fifo(i2c_dev); + + if (!c->count) + /* End of xfer, send stop or repstart */ + st_i2c_terminate_xfer(i2c_dev); + else + st_i2c_wr_fill_tx_fifo(i2c_dev); +} + +/** + * st_i2c_handle_write() - Handle FIFO enmpty interrupt in case of read + * @i2c_dev: Controller's private data + */ +static void st_i2c_handle_read(struct st_i2c_dev *i2c_dev) +{ + struct st_i2c_client *c = &i2c_dev->client; + u32 ien; + + /* Trash the address read back */ + if (!c->xfered) { + readl_relaxed(i2c_dev->base + SSC_RBUF); + st_i2c_clr_bits(i2c_dev->base + SSC_I2C, SSC_I2C_TXENB); + } else { + st_i2c_read_rx_fifo(i2c_dev); + } + + if (!c->count) { + /* End of xfer, send stop or repstart */ + st_i2c_terminate_xfer(i2c_dev); + } else if (c->count == 1) { + /* Penultimate byte to xfer, disable ACK gen. */ + st_i2c_clr_bits(i2c_dev->base + SSC_I2C, SSC_I2C_ACKG); + + /* Last received byte is to be handled by NACK interrupt */ + ien = SSC_IEN_NACKEN | SSC_IEN_ARBLEN; + writel_relaxed(ien, i2c_dev->base + SSC_IEN); + + st_i2c_rd_fill_tx_fifo(i2c_dev, c->count); + } else { + st_i2c_rd_fill_tx_fifo(i2c_dev, c->count - 1); + } +} + +/** + * st_i2c_isr() - Interrupt routine + * @irq: interrupt number + * @data: Controller's private data + */ +static irqreturn_t st_i2c_isr_thread(int irq, void *data) +{ + struct st_i2c_dev *i2c_dev = data; + struct st_i2c_client *c = &i2c_dev->client; + u32 sta, ien; + int it; + + ien = readl_relaxed(i2c_dev->base + SSC_IEN); + sta = readl_relaxed(i2c_dev->base + SSC_STA); + + /* Use __fls() to check error bits first */ + it = __fls(sta & ien); + if (it < 0) { + dev_dbg(i2c_dev->dev, "spurious it (sta=0x%04x, ien=0x%04x)\n", + sta, ien); + return IRQ_NONE; + } + + switch (1 << it) { + case SSC_STA_TE: + if (c->addr & I2C_M_RD) + st_i2c_handle_read(i2c_dev); + else + st_i2c_handle_write(i2c_dev); + break; + + case SSC_STA_STOP: + case SSC_STA_REPSTRT: + writel_relaxed(0, i2c_dev->base + SSC_IEN); + complete(&i2c_dev->complete); + break; + + case SSC_STA_NACK: + writel_relaxed(SSC_CLR_NACK, i2c_dev->base + SSC_CLR); + + /* Last received byte handled by NACK interrupt */ + if ((c->addr & I2C_M_RD) && (c->count == 1) && (c->xfered)) { + st_i2c_handle_read(i2c_dev); + break; + } + + it = SSC_IEN_STOPEN | SSC_IEN_ARBLEN; + writel_relaxed(it, i2c_dev->base + SSC_IEN); + + st_i2c_set_bits(i2c_dev->base + SSC_I2C, SSC_I2C_STOPG); + c->result = -EIO; + break; + + case SSC_STA_ARBL: + writel_relaxed(SSC_CLR_SSCARBL, i2c_dev->base + SSC_CLR); + + it = SSC_IEN_STOPEN | SSC_IEN_ARBLEN; + writel_relaxed(it, i2c_dev->base + SSC_IEN); + + st_i2c_set_bits(i2c_dev->base + SSC_I2C, SSC_I2C_STOPG); + c->result = -EIO; + break; + + default: + dev_err(i2c_dev->dev, + "it %d unhandled (sta=0x%04x)\n", it, sta); + } + + /* + * Read IEN register to ensure interrupt mask write is effective + * before re-enabling interrupt at GIC level, and thus avoid spurious + * interrupts. + */ + readl(i2c_dev->base + SSC_IEN); + + return IRQ_HANDLED; +} + +/** + * st_i2c_xfer_msg() - Transfer a single I2C message + * @i2c_dev: Controller's private data + * @msg: I2C message to transfer + * @is_first: first message of the sequence + * @is_last: last message of the sequence + */ +static int st_i2c_xfer_msg(struct st_i2c_dev *i2c_dev, struct i2c_msg *msg, + bool is_first, bool is_last) +{ + struct st_i2c_client *c = &i2c_dev->client; + u32 ctl, i2c, it; + unsigned long timeout; + int ret; + + c->addr = (u8)(msg->addr << 1); + c->addr |= (msg->flags & I2C_M_RD); + c->buf = msg->buf; + c->count = msg->len; + c->xfered = 0; + c->result = 0; + c->stop = is_last; + + reinit_completion(&i2c_dev->complete); + + ctl = SSC_CTL_EN | SSC_CTL_MS | SSC_CTL_EN_RX_FIFO | SSC_CTL_EN_TX_FIFO; + st_i2c_set_bits(i2c_dev->base + SSC_CTL, ctl); + + i2c = SSC_I2C_TXENB; + if (c->addr & I2C_M_RD) + i2c |= SSC_I2C_ACKG; + st_i2c_set_bits(i2c_dev->base + SSC_I2C, i2c); + + /* Write slave address */ + st_i2c_write_tx_fifo(i2c_dev, c->addr); + + /* Pre-fill Tx fifo with data in case of write */ + if (!(c->addr & I2C_M_RD)) + st_i2c_wr_fill_tx_fifo(i2c_dev); + + it = SSC_IEN_NACKEN | SSC_IEN_TEEN | SSC_IEN_ARBLEN; + writel_relaxed(it, i2c_dev->base + SSC_IEN); + + if (is_first) { + ret = st_i2c_wait_free_bus(i2c_dev); + if (ret) + return ret; + + st_i2c_set_bits(i2c_dev->base + SSC_I2C, SSC_I2C_STRTG); + } + + timeout = wait_for_completion_timeout(&i2c_dev->complete, + i2c_dev->adap.timeout); + ret = c->result; + + if (!timeout) { + dev_err(i2c_dev->dev, "Write to slave 0x%x timed out\n", + c->addr); + ret = -ETIMEDOUT; + } + + i2c = SSC_I2C_STOPG | SSC_I2C_REPSTRTG; + st_i2c_clr_bits(i2c_dev->base + SSC_I2C, i2c); + + writel_relaxed(SSC_CLR_SSCSTOP | SSC_CLR_REPSTRT, + i2c_dev->base + SSC_CLR); + + return ret; +} + +/** + * st_i2c_xfer() - Transfer a single I2C message + * @i2c_adap: Adapter pointer to the controller + * @msgs: Pointer to data to be written. + * @num: Number of messages to be executed + */ +static int st_i2c_xfer(struct i2c_adapter *i2c_adap, + struct i2c_msg msgs[], int num) +{ + struct st_i2c_dev *i2c_dev = i2c_get_adapdata(i2c_adap); + int ret, i; + + i2c_dev->busy = true; + + ret = clk_prepare_enable(i2c_dev->clk); + if (ret) { + dev_err(i2c_dev->dev, "Failed to prepare_enable clock\n"); + return ret; + } + + pinctrl_pm_select_default_state(i2c_dev->dev); + + st_i2c_hw_config(i2c_dev); + + for (i = 0; (i < num) && !ret; i++) + ret = st_i2c_xfer_msg(i2c_dev, &msgs[i], i == 0, i == num - 1); + + pinctrl_pm_select_idle_state(i2c_dev->dev); + + clk_disable_unprepare(i2c_dev->clk); + + i2c_dev->busy = false; + + return (ret < 0) ? ret : i; +} + +#ifdef CONFIG_PM_SLEEP +static int st_i2c_suspend(struct device *dev) +{ + struct platform_device *pdev = + container_of(dev, struct platform_device, dev); + struct st_i2c_dev *i2c_dev = platform_get_drvdata(pdev); + + if (i2c_dev->busy) + return -EBUSY; + + pinctrl_pm_select_sleep_state(dev); + + return 0; +} + +static int st_i2c_resume(struct device *dev) +{ + pinctrl_pm_select_default_state(dev); + /* Go in idle state if available */ + pinctrl_pm_select_idle_state(dev); + + return 0; +} + +static SIMPLE_DEV_PM_OPS(st_i2c_pm, st_i2c_suspend, st_i2c_resume); +#define ST_I2C_PM (&st_i2c_pm) +#else +#define ST_I2C_PM NULL +#endif + +static u32 st_i2c_func(struct i2c_adapter *adap) +{ + return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL; +} + +static struct i2c_algorithm st_i2c_algo = { + .master_xfer = st_i2c_xfer, + .functionality = st_i2c_func, +}; + +static int st_i2c_of_get_deglitch(struct device_node *np, + struct st_i2c_dev *i2c_dev) +{ + int ret; + + ret = of_property_read_u32(np, "st,i2c-min-scl-pulse-width-us", + &i2c_dev->scl_min_width_us); + if ((ret == -ENODATA) || (ret == -EOVERFLOW)) { + dev_err(i2c_dev->dev, "st,i2c-min-scl-pulse-width-us invalid\n"); + return ret; + } + + ret = of_property_read_u32(np, "st,i2c-min-sda-pulse-width-us", + &i2c_dev->sda_min_width_us); + if ((ret == -ENODATA) || (ret == -EOVERFLOW)) { + dev_err(i2c_dev->dev, "st,i2c-min-sda-pulse-width-us invalid\n"); + return ret; + } + + return 0; +} + +static int st_i2c_probe(struct platform_device *pdev) +{ + struct device_node *np = pdev->dev.of_node; + struct st_i2c_dev *i2c_dev; + struct resource *res; + u32 clk_rate; + struct i2c_adapter *adap; + int ret; + + i2c_dev = devm_kzalloc(&pdev->dev, sizeof(*i2c_dev), GFP_KERNEL); + if (!i2c_dev) + return -ENOMEM; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + i2c_dev->base = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(i2c_dev->base)) + return PTR_ERR(i2c_dev->base); + + i2c_dev->irq = irq_of_parse_and_map(np, 0); + if (!i2c_dev->irq) { + dev_err(&pdev->dev, "IRQ missing or invalid\n"); + return -EINVAL; + } + + i2c_dev->clk = of_clk_get_by_name(np, "ssc"); + if (IS_ERR(i2c_dev->clk)) { + dev_err(&pdev->dev, "Unable to request clock\n"); + return PTR_ERR(i2c_dev->clk); + } + + i2c_dev->mode = I2C_MODE_STANDARD; + ret = of_property_read_u32(np, "clock-frequency", &clk_rate); + if ((!ret) && (clk_rate == 400000)) + i2c_dev->mode = I2C_MODE_FAST; + + i2c_dev->dev = &pdev->dev; + + ret = devm_request_threaded_irq(&pdev->dev, i2c_dev->irq, + NULL, st_i2c_isr_thread, + IRQF_ONESHOT, pdev->name, i2c_dev); + if (ret) { + dev_err(&pdev->dev, "Failed to request irq %i\n", i2c_dev->irq); + return ret; + } + + pinctrl_pm_select_default_state(i2c_dev->dev); + /* In case idle state available, select it */ + pinctrl_pm_select_idle_state(i2c_dev->dev); + + ret = st_i2c_of_get_deglitch(np, i2c_dev); + if (ret) + return ret; + + adap = &i2c_dev->adap; + i2c_set_adapdata(adap, i2c_dev); + snprintf(adap->name, sizeof(adap->name), "ST I2C(0x%x)", res->start); + adap->owner = THIS_MODULE; + adap->timeout = 2 * HZ; + adap->retries = 0; + adap->algo = &st_i2c_algo; + adap->dev.parent = &pdev->dev; + adap->dev.of_node = pdev->dev.of_node; + + init_completion(&i2c_dev->complete); + + ret = i2c_add_adapter(adap); + if (ret) { + dev_err(&pdev->dev, "Failed to add adapter\n"); + return ret; + } + + platform_set_drvdata(pdev, i2c_dev); + + dev_info(i2c_dev->dev, "%s initialized\n", adap->name); + + return 0; +} + +static int st_i2c_remove(struct platform_device *pdev) +{ + struct st_i2c_dev *i2c_dev = platform_get_drvdata(pdev); + + i2c_del_adapter(&i2c_dev->adap); + + return 0; +} + +static struct of_device_id st_i2c_match[] = { + { .compatible = "st,comms-ssc-i2c", }, + { .compatible = "st,comms-ssc4-i2c", }, + {}, +}; +MODULE_DEVICE_TABLE(of, st_i2c_match); + +static struct platform_driver st_i2c_driver = { + .driver = { + .name = "st-i2c", + .owner = THIS_MODULE, + .of_match_table = st_i2c_match, + .pm = ST_I2C_PM, + }, + .probe = st_i2c_probe, + .remove = st_i2c_remove, +}; + +module_platform_driver(st_i2c_driver); + +MODULE_AUTHOR("Maxime Coquelin <maxime.coquelin@st.com>"); +MODULE_DESCRIPTION("STMicroelectronics I2C driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/i2c/busses/i2c-wmt.c b/drivers/i2c/busses/i2c-wmt.c index 31395fa8121..2c8a3e4f900 100644 --- a/drivers/i2c/busses/i2c-wmt.c +++ b/drivers/i2c/busses/i2c-wmt.c @@ -349,6 +349,7 @@ static int wmt_i2c_reset_hardware(struct wmt_i2c_dev *i2c_dev) err = clk_set_rate(i2c_dev->clk, 20000000); if (err) { dev_err(i2c_dev->dev, "failed to set clock = 20Mhz\n"); + clk_disable_unprepare(i2c_dev->clk); return err; } diff --git a/drivers/i2c/busses/i2c-xiic.c b/drivers/i2c/busses/i2c-xiic.c index 4c8b368d463..fc2716afdfd 100644 --- a/drivers/i2c/busses/i2c-xiic.c +++ b/drivers/i2c/busses/i2c-xiic.c @@ -40,6 +40,7 @@ #include <linux/i2c-xiic.h> #include <linux/io.h> #include <linux/slab.h> +#include <linux/of.h> #define DRIVER_NAME "xiic-i2c" @@ -702,7 +703,7 @@ static int xiic_i2c_probe(struct platform_device *pdev) if (irq < 0) goto resource_missing; - pdata = (struct xiic_i2c_platform_data *)dev_get_platdata(&pdev->dev); + pdata = dev_get_platdata(&pdev->dev); i2c = kzalloc(sizeof(*i2c), GFP_KERNEL); if (!i2c) diff --git a/drivers/i2c/i2c-core.c b/drivers/i2c/i2c-core.c index 75ba8608383..d74c0b34248 100644 --- a/drivers/i2c/i2c-core.c +++ b/drivers/i2c/i2c-core.c @@ -248,7 +248,7 @@ static int i2c_device_probe(struct device *dev) driver = to_i2c_driver(dev->driver); if (!driver->probe || !driver->id_table) return -ENODEV; - client->driver = driver; + if (!device_can_wakeup(&client->dev)) device_init_wakeup(&client->dev, client->flags & I2C_CLIENT_WAKE); @@ -257,7 +257,6 @@ static int i2c_device_probe(struct device *dev) acpi_dev_pm_attach(&client->dev, true); status = driver->probe(client, i2c_match_id(driver->id_table, client)); if (status) { - client->driver = NULL; i2c_set_clientdata(client, NULL); acpi_dev_pm_detach(&client->dev, true); } @@ -281,10 +280,8 @@ static int i2c_device_remove(struct device *dev) dev->driver = NULL; status = 0; } - if (status == 0) { - client->driver = NULL; + if (status == 0) i2c_set_clientdata(client, NULL); - } acpi_dev_pm_detach(&client->dev, true); return status; } @@ -618,6 +615,22 @@ void i2c_unlock_adapter(struct i2c_adapter *adapter) } EXPORT_SYMBOL_GPL(i2c_unlock_adapter); +static void i2c_dev_set_name(struct i2c_adapter *adap, + struct i2c_client *client) +{ + struct acpi_device *adev = ACPI_COMPANION(&client->dev); + + if (adev) { + dev_set_name(&client->dev, "i2c-%s", acpi_dev_name(adev)); + return; + } + + /* For 10-bit clients, add an arbitrary offset to avoid collisions */ + dev_set_name(&client->dev, "%d-%04x", i2c_adapter_id(adap), + client->addr | ((client->flags & I2C_CLIENT_TEN) + ? 0xa000 : 0)); +} + /** * i2c_new_device - instantiate an i2c device * @adap: the adapter managing the device @@ -674,12 +687,9 @@ i2c_new_device(struct i2c_adapter *adap, struct i2c_board_info const *info) client->dev.bus = &i2c_bus_type; client->dev.type = &i2c_client_type; client->dev.of_node = info->of_node; - ACPI_HANDLE_SET(&client->dev, info->acpi_node.handle); + ACPI_COMPANION_SET(&client->dev, info->acpi_node.companion); - /* For 10-bit clients, add an arbitrary offset to avoid collisions */ - dev_set_name(&client->dev, "%d-%04x", i2c_adapter_id(adap), - client->addr | ((client->flags & I2C_CLIENT_TEN) - ? 0xa000 : 0)); + i2c_dev_set_name(adap, client); status = device_register(&client->dev); if (status) goto out_err; @@ -1103,7 +1113,7 @@ static acpi_status acpi_i2c_add_device(acpi_handle handle, u32 level, return AE_OK; memset(&info, 0, sizeof(info)); - info.acpi_node.handle = handle; + info.acpi_node.companion = adev; info.irq = -1; INIT_LIST_HEAD(&resource_list); @@ -1614,9 +1624,14 @@ static int i2c_cmd(struct device *dev, void *_arg) { struct i2c_client *client = i2c_verify_client(dev); struct i2c_cmd_arg *arg = _arg; + struct i2c_driver *driver; + + if (!client || !client->dev.driver) + return 0; - if (client && client->driver && client->driver->command) - client->driver->command(client, arg->cmd, arg->arg); + driver = to_i2c_driver(client->dev.driver); + if (driver->command) + driver->command(client, arg->cmd, arg->arg); return 0; } diff --git a/drivers/i2c/i2c-dev.c b/drivers/i2c/i2c-dev.c index c3ccdea3d18..80b47e8ce03 100644 --- a/drivers/i2c/i2c-dev.c +++ b/drivers/i2c/i2c-dev.c @@ -102,8 +102,8 @@ static void return_i2c_dev(struct i2c_dev *i2c_dev) kfree(i2c_dev); } -static ssize_t show_adapter_name(struct device *dev, - struct device_attribute *attr, char *buf) +static ssize_t name_show(struct device *dev, + struct device_attribute *attr, char *buf) { struct i2c_dev *i2c_dev = i2c_dev_get_by_minor(MINOR(dev->devt)); @@ -111,7 +111,13 @@ static ssize_t show_adapter_name(struct device *dev, return -ENODEV; return sprintf(buf, "%s\n", i2c_dev->adap->name); } -static DEVICE_ATTR(name, S_IRUGO, show_adapter_name, NULL); +static DEVICE_ATTR_RO(name); + +static struct attribute *i2c_attrs[] = { + &dev_attr_name.attr, + NULL, +}; +ATTRIBUTE_GROUPS(i2c); /* ------------------------------------------------------------------------- */ @@ -562,15 +568,10 @@ static int i2cdev_attach_adapter(struct device *dev, void *dummy) res = PTR_ERR(i2c_dev->dev); goto error; } - res = device_create_file(i2c_dev->dev, &dev_attr_name); - if (res) - goto error_destroy; pr_debug("i2c-dev: adapter [%s] registered as minor %d\n", adap->name, adap->nr); return 0; -error_destroy: - device_destroy(i2c_dev_class, MKDEV(I2C_MAJOR, adap->nr)); error: return_i2c_dev(i2c_dev); return res; @@ -589,7 +590,6 @@ static int i2cdev_detach_adapter(struct device *dev, void *dummy) if (!i2c_dev) /* attach_adapter must have failed */ return 0; - device_remove_file(i2c_dev->dev, &dev_attr_name); return_i2c_dev(i2c_dev); device_destroy(i2c_dev_class, MKDEV(I2C_MAJOR, adap->nr)); @@ -637,6 +637,7 @@ static int __init i2c_dev_init(void) res = PTR_ERR(i2c_dev_class); goto out_unreg_chrdev; } + i2c_dev_class->dev_groups = i2c_groups; /* Keep track of adapters which will be added or removed later */ res = bus_register_notifier(&i2c_bus_type, &i2cdev_notifier); diff --git a/drivers/i2c/i2c-smbus.c b/drivers/i2c/i2c-smbus.c index 44d4c6071c1..c99b2298736 100644 --- a/drivers/i2c/i2c-smbus.c +++ b/drivers/i2c/i2c-smbus.c @@ -46,6 +46,7 @@ static int smbus_do_alert(struct device *dev, void *addrp) { struct i2c_client *client = i2c_verify_client(dev); struct alert_data *data = addrp; + struct i2c_driver *driver; if (!client || client->addr != data->addr) return 0; @@ -54,12 +55,13 @@ static int smbus_do_alert(struct device *dev, void *addrp) /* * Drivers should either disable alerts, or provide at least - * a minimal handler. Lock so client->driver won't change. + * a minimal handler. Lock so the driver won't change. */ device_lock(dev); - if (client->driver) { - if (client->driver->alert) - client->driver->alert(client, data->flag); + if (client->dev.driver) { + driver = to_i2c_driver(client->dev.driver); + if (driver->alert) + driver->alert(client, data->flag); else dev_warn(&client->dev, "no driver alert()!\n"); } else diff --git a/drivers/i2c/muxes/i2c-arb-gpio-challenge.c b/drivers/i2c/muxes/i2c-arb-gpio-challenge.c index 928656e241d..c58e093b603 100644 --- a/drivers/i2c/muxes/i2c-arb-gpio-challenge.c +++ b/drivers/i2c/muxes/i2c-arb-gpio-challenge.c @@ -238,7 +238,7 @@ static struct platform_driver i2c_arbitrator_driver = { .driver = { .owner = THIS_MODULE, .name = "i2c-arb-gpio-challenge", - .of_match_table = of_match_ptr(i2c_arbitrator_of_match), + .of_match_table = i2c_arbitrator_of_match, }, }; diff --git a/drivers/i2c/muxes/i2c-mux-gpio.c b/drivers/i2c/muxes/i2c-mux-gpio.c index a764da777f0..8a8c56f4b02 100644 --- a/drivers/i2c/muxes/i2c-mux-gpio.c +++ b/drivers/i2c/muxes/i2c-mux-gpio.c @@ -30,15 +30,15 @@ static void i2c_mux_gpio_set(const struct gpiomux *mux, unsigned val) int i; for (i = 0; i < mux->data.n_gpios; i++) - gpio_set_value(mux->gpio_base + mux->data.gpios[i], - val & (1 << i)); + gpio_set_value_cansleep(mux->gpio_base + mux->data.gpios[i], + val & (1 << i)); } static int i2c_mux_gpio_select(struct i2c_adapter *adap, void *data, u32 chan) { struct gpiomux *mux = data; - i2c_mux_gpio_set(mux, mux->data.values[chan]); + i2c_mux_gpio_set(mux, chan); return 0; } @@ -228,7 +228,7 @@ static int i2c_mux_gpio_probe(struct platform_device *pdev) unsigned int class = mux->data.classes ? mux->data.classes[i] : 0; mux->adap[i] = i2c_add_mux_adapter(parent, &pdev->dev, mux, nr, - i, class, + mux->data.values[i], class, i2c_mux_gpio_select, deselect); if (!mux->adap[i]) { ret = -ENODEV; @@ -283,7 +283,7 @@ static struct platform_driver i2c_mux_gpio_driver = { .driver = { .owner = THIS_MODULE, .name = "i2c-mux-gpio", - .of_match_table = of_match_ptr(i2c_mux_gpio_of_match), + .of_match_table = i2c_mux_gpio_of_match, }, }; diff --git a/drivers/i2c/muxes/i2c-mux-pinctrl.c b/drivers/i2c/muxes/i2c-mux-pinctrl.c index 68a37157377..d7978dc4ad0 100644 --- a/drivers/i2c/muxes/i2c-mux-pinctrl.c +++ b/drivers/i2c/muxes/i2c-mux-pinctrl.c @@ -24,6 +24,7 @@ #include <linux/i2c-mux-pinctrl.h> #include <linux/platform_device.h> #include <linux/slab.h> +#include <linux/of.h> struct i2c_mux_pinctrl { struct device *dev; diff --git a/drivers/ide/ide-acpi.c b/drivers/ide/ide-acpi.c index 140c8ef5052..d9e1f7ccfe6 100644 --- a/drivers/ide/ide-acpi.c +++ b/drivers/ide/ide-acpi.c @@ -7,6 +7,7 @@ * Copyright (C) 2006 Hannes Reinecke */ +#include <linux/acpi.h> #include <linux/ata.h> #include <linux/delay.h> #include <linux/device.h> @@ -19,8 +20,6 @@ #include <linux/dmi.h> #include <linux/module.h> -#include <acpi/acpi_bus.h> - #define REGS_PER_GTF 7 struct GTM_buffer { @@ -128,7 +127,7 @@ static int ide_get_dev_handle(struct device *dev, acpi_handle *handle, DEBPRINT("ENTER: pci %02x:%02x.%01x\n", bus, devnum, func); - dev_handle = DEVICE_ACPI_HANDLE(dev); + dev_handle = ACPI_HANDLE(dev); if (!dev_handle) { DEBPRINT("no acpi handle for device\n"); goto err; diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c index 3226ce98fb1..cbd4e9abc47 100644 --- a/drivers/idle/intel_idle.c +++ b/drivers/idle/intel_idle.c @@ -1,7 +1,7 @@ /* * intel_idle.c - native hardware idle loop for modern Intel processors * - * Copyright (c) 2010, Intel Corporation. + * Copyright (c) 2013, Intel Corporation. * Len Brown <len.brown@intel.com> * * This program is free software; you can redistribute it and/or modify it @@ -329,6 +329,22 @@ static struct cpuidle_state atom_cstates[] __initdata = { { .enter = NULL } }; +static struct cpuidle_state avn_cstates[CPUIDLE_STATE_MAX] = { + { + .name = "C1-AVN", + .desc = "MWAIT 0x00", + .flags = MWAIT2flg(0x00) | CPUIDLE_FLAG_TIME_VALID, + .exit_latency = 2, + .target_residency = 2, + .enter = &intel_idle }, + { + .name = "C6-AVN", + .desc = "MWAIT 0x51", + .flags = MWAIT2flg(0x58) | CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED, + .exit_latency = 15, + .target_residency = 45, + .enter = &intel_idle }, +}; /** * intel_idle @@ -462,6 +478,11 @@ static const struct idle_cpu idle_cpu_hsw = { .disable_promotion_to_c1e = true, }; +static const struct idle_cpu idle_cpu_avn = { + .state_table = avn_cstates, + .disable_promotion_to_c1e = true, +}; + #define ICPU(model, cpu) \ { X86_VENDOR_INTEL, 6, model, X86_FEATURE_MWAIT, (unsigned long)&cpu } @@ -483,6 +504,7 @@ static const struct x86_cpu_id intel_idle_ids[] = { ICPU(0x3f, idle_cpu_hsw), ICPU(0x45, idle_cpu_hsw), ICPU(0x46, idle_cpu_hsw), + ICPU(0x4D, idle_cpu_avn), {} }; MODULE_DEVICE_TABLE(x86cpu, intel_idle_ids); diff --git a/drivers/infiniband/Kconfig b/drivers/infiniband/Kconfig index b84791f03a2..5ceda710f51 100644 --- a/drivers/infiniband/Kconfig +++ b/drivers/infiniband/Kconfig @@ -31,17 +31,6 @@ config INFINIBAND_USER_ACCESS libibverbs, libibcm and a hardware driver library from <http://www.openfabrics.org/git/>. -config INFINIBAND_EXPERIMENTAL_UVERBS_FLOW_STEERING - bool "Experimental and unstable ABI for userspace access to flow steering verbs" - depends on INFINIBAND_USER_ACCESS - depends on STAGING - ---help--- - The final ABI for userspace access to flow steering verbs - has not been defined. To use the current ABI, *WHICH WILL - CHANGE IN THE FUTURE*, say Y here. - - If unsure, say N. - config INFINIBAND_USER_MEM bool depends on INFINIBAND_USER_ACCESS != n diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c index 784b97cb05b..f2ef7ef0f36 100644 --- a/drivers/infiniband/core/cm.c +++ b/drivers/infiniband/core/cm.c @@ -383,14 +383,11 @@ static int cm_alloc_id(struct cm_id_private *cm_id_priv) { unsigned long flags; int id; - static int next_id; idr_preload(GFP_KERNEL); spin_lock_irqsave(&cm.lock, flags); - id = idr_alloc(&cm.local_id_table, cm_id_priv, next_id, 0, GFP_NOWAIT); - if (id >= 0) - next_id = max(id + 1, 0); + id = idr_alloc_cyclic(&cm.local_id_table, cm_id_priv, 0, 0, GFP_NOWAIT); spin_unlock_irqrestore(&cm.lock, flags); idr_preload_end(); diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c index d2172e71f98..8e49db690f3 100644 --- a/drivers/infiniband/core/cma.c +++ b/drivers/infiniband/core/cma.c @@ -328,28 +328,6 @@ static int cma_set_qkey(struct rdma_id_private *id_priv, u32 qkey) return ret; } -static int find_gid_port(struct ib_device *device, union ib_gid *gid, u8 port_num) -{ - int i; - int err; - struct ib_port_attr props; - union ib_gid tmp; - - err = ib_query_port(device, port_num, &props); - if (err) - return err; - - for (i = 0; i < props.gid_tbl_len; ++i) { - err = ib_query_gid(device, port_num, i, &tmp); - if (err) - return err; - if (!memcmp(&tmp, gid, sizeof tmp)) - return 0; - } - - return -EADDRNOTAVAIL; -} - static void cma_translate_ib(struct sockaddr_ib *sib, struct rdma_dev_addr *dev_addr) { dev_addr->dev_type = ARPHRD_INFINIBAND; @@ -371,13 +349,14 @@ static int cma_translate_addr(struct sockaddr *addr, struct rdma_dev_addr *dev_a return ret; } -static int cma_acquire_dev(struct rdma_id_private *id_priv) +static int cma_acquire_dev(struct rdma_id_private *id_priv, + struct rdma_id_private *listen_id_priv) { struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr; struct cma_device *cma_dev; union ib_gid gid, iboe_gid; int ret = -ENODEV; - u8 port; + u8 port, found_port; enum rdma_link_layer dev_ll = dev_addr->dev_type == ARPHRD_INFINIBAND ? IB_LINK_LAYER_INFINIBAND : IB_LINK_LAYER_ETHERNET; @@ -389,17 +368,39 @@ static int cma_acquire_dev(struct rdma_id_private *id_priv) iboe_addr_get_sgid(dev_addr, &iboe_gid); memcpy(&gid, dev_addr->src_dev_addr + rdma_addr_gid_offset(dev_addr), sizeof gid); + if (listen_id_priv && + rdma_port_get_link_layer(listen_id_priv->id.device, + listen_id_priv->id.port_num) == dev_ll) { + cma_dev = listen_id_priv->cma_dev; + port = listen_id_priv->id.port_num; + if (rdma_node_get_transport(cma_dev->device->node_type) == RDMA_TRANSPORT_IB && + rdma_port_get_link_layer(cma_dev->device, port) == IB_LINK_LAYER_ETHERNET) + ret = ib_find_cached_gid(cma_dev->device, &iboe_gid, + &found_port, NULL); + else + ret = ib_find_cached_gid(cma_dev->device, &gid, + &found_port, NULL); + + if (!ret && (port == found_port)) { + id_priv->id.port_num = found_port; + goto out; + } + } list_for_each_entry(cma_dev, &dev_list, list) { for (port = 1; port <= cma_dev->device->phys_port_cnt; ++port) { + if (listen_id_priv && + listen_id_priv->cma_dev == cma_dev && + listen_id_priv->id.port_num == port) + continue; if (rdma_port_get_link_layer(cma_dev->device, port) == dev_ll) { if (rdma_node_get_transport(cma_dev->device->node_type) == RDMA_TRANSPORT_IB && rdma_port_get_link_layer(cma_dev->device, port) == IB_LINK_LAYER_ETHERNET) - ret = find_gid_port(cma_dev->device, &iboe_gid, port); + ret = ib_find_cached_gid(cma_dev->device, &iboe_gid, &found_port, NULL); else - ret = find_gid_port(cma_dev->device, &gid, port); + ret = ib_find_cached_gid(cma_dev->device, &gid, &found_port, NULL); - if (!ret) { - id_priv->id.port_num = port; + if (!ret && (port == found_port)) { + id_priv->id.port_num = found_port; goto out; } } @@ -1292,7 +1293,7 @@ static int cma_req_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event) } mutex_lock_nested(&conn_id->handler_mutex, SINGLE_DEPTH_NESTING); - ret = cma_acquire_dev(conn_id); + ret = cma_acquire_dev(conn_id, listen_id); if (ret) goto err2; @@ -1451,7 +1452,6 @@ static int iw_conn_req_handler(struct iw_cm_id *cm_id, { struct rdma_cm_id *new_cm_id; struct rdma_id_private *listen_id, *conn_id; - struct net_device *dev = NULL; struct rdma_cm_event event; int ret; struct ib_device_attr attr; @@ -1481,7 +1481,7 @@ static int iw_conn_req_handler(struct iw_cm_id *cm_id, goto out; } - ret = cma_acquire_dev(conn_id); + ret = cma_acquire_dev(conn_id, listen_id); if (ret) { mutex_unlock(&conn_id->handler_mutex); rdma_destroy_id(new_cm_id); @@ -1529,8 +1529,6 @@ static int iw_conn_req_handler(struct iw_cm_id *cm_id, cma_deref_id(conn_id); out: - if (dev) - dev_put(dev); mutex_unlock(&listen_id->handler_mutex); return ret; } @@ -2066,7 +2064,7 @@ static void addr_handler(int status, struct sockaddr *src_addr, goto out; if (!status && !id_priv->cma_dev) - status = cma_acquire_dev(id_priv); + status = cma_acquire_dev(id_priv, NULL); if (status) { if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_RESOLVED, @@ -2563,7 +2561,7 @@ int rdma_bind_addr(struct rdma_cm_id *id, struct sockaddr *addr) if (ret) goto err1; - ret = cma_acquire_dev(id_priv); + ret = cma_acquire_dev(id_priv, NULL); if (ret) goto err1; } diff --git a/drivers/infiniband/core/netlink.c b/drivers/infiniband/core/netlink.c index da06abde9e0..a1e9cba8494 100644 --- a/drivers/infiniband/core/netlink.c +++ b/drivers/infiniband/core/netlink.c @@ -148,7 +148,7 @@ static int ibnl_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh) list_for_each_entry(client, &client_list, list) { if (client->index == index) { if (op < 0 || op >= client->nops || - !client->cb_table[RDMA_NL_GET_OP(op)].dump) + !client->cb_table[op].dump) return -EINVAL; { diff --git a/drivers/infiniband/core/sysfs.c b/drivers/infiniband/core/sysfs.c index cde1e7b5b85..faad2caf22b 100644 --- a/drivers/infiniband/core/sysfs.c +++ b/drivers/infiniband/core/sysfs.c @@ -612,6 +612,7 @@ static ssize_t show_node_type(struct device *device, switch (dev->node_type) { case RDMA_NODE_IB_CA: return sprintf(buf, "%d: CA\n", dev->node_type); case RDMA_NODE_RNIC: return sprintf(buf, "%d: RNIC\n", dev->node_type); + case RDMA_NODE_USNIC: return sprintf(buf, "%d: usNIC\n", dev->node_type); case RDMA_NODE_IB_SWITCH: return sprintf(buf, "%d: switch\n", dev->node_type); case RDMA_NODE_IB_ROUTER: return sprintf(buf, "%d: router\n", dev->node_type); default: return sprintf(buf, "%d: <unknown>\n", dev->node_type); diff --git a/drivers/infiniband/core/ucma.c b/drivers/infiniband/core/ucma.c index b0f189be543..ab8b1c30b36 100644 --- a/drivers/infiniband/core/ucma.c +++ b/drivers/infiniband/core/ucma.c @@ -57,7 +57,7 @@ MODULE_LICENSE("Dual BSD/GPL"); static unsigned int max_backlog = 1024; static struct ctl_table_header *ucma_ctl_table_hdr; -static ctl_table ucma_ctl_table[] = { +static struct ctl_table ucma_ctl_table[] = { { .procname = "max_backlog", .data = &max_backlog, @@ -271,7 +271,7 @@ static int ucma_event_handler(struct rdma_cm_id *cm_id, goto out; } ctx->backlog--; - } else if (!ctx->uid) { + } else if (!ctx->uid || ctx->cm_id != cm_id) { /* * We ignore events for new connections until userspace has set * their context. This can only happen if an error occurs on a diff --git a/drivers/infiniband/core/uverbs.h b/drivers/infiniband/core/uverbs.h index d8f9c6c272d..bdc842e9fae 100644 --- a/drivers/infiniband/core/uverbs.h +++ b/drivers/infiniband/core/uverbs.h @@ -47,6 +47,14 @@ #include <rdma/ib_umem.h> #include <rdma/ib_user_verbs.h> +#define INIT_UDATA(udata, ibuf, obuf, ilen, olen) \ + do { \ + (udata)->inbuf = (void __user *) (ibuf); \ + (udata)->outbuf = (void __user *) (obuf); \ + (udata)->inlen = (ilen); \ + (udata)->outlen = (olen); \ + } while (0) + /* * Our lifetime rules for these structs are the following: * @@ -178,6 +186,22 @@ void ib_uverbs_event_handler(struct ib_event_handler *handler, struct ib_event *event); void ib_uverbs_dealloc_xrcd(struct ib_uverbs_device *dev, struct ib_xrcd *xrcd); +struct ib_uverbs_flow_spec { + union { + union { + struct ib_uverbs_flow_spec_hdr hdr; + struct { + __u32 type; + __u16 size; + __u16 reserved; + }; + }; + struct ib_uverbs_flow_spec_eth eth; + struct ib_uverbs_flow_spec_ipv4 ipv4; + struct ib_uverbs_flow_spec_tcp_udp tcp_udp; + }; +}; + #define IB_UVERBS_DECLARE_CMD(name) \ ssize_t ib_uverbs_##name(struct ib_uverbs_file *file, \ const char __user *buf, int in_len, \ @@ -217,9 +241,13 @@ IB_UVERBS_DECLARE_CMD(destroy_srq); IB_UVERBS_DECLARE_CMD(create_xsrq); IB_UVERBS_DECLARE_CMD(open_xrcd); IB_UVERBS_DECLARE_CMD(close_xrcd); -#ifdef CONFIG_INFINIBAND_EXPERIMENTAL_UVERBS_FLOW_STEERING -IB_UVERBS_DECLARE_CMD(create_flow); -IB_UVERBS_DECLARE_CMD(destroy_flow); -#endif /* CONFIG_INFINIBAND_EXPERIMENTAL_UVERBS_FLOW_STEERING */ + +#define IB_UVERBS_DECLARE_EX_CMD(name) \ + int ib_uverbs_ex_##name(struct ib_uverbs_file *file, \ + struct ib_udata *ucore, \ + struct ib_udata *uhw) + +IB_UVERBS_DECLARE_EX_CMD(create_flow); +IB_UVERBS_DECLARE_EX_CMD(destroy_flow); #endif /* UVERBS_H */ diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c index 2f0f01b70e3..65f6e7dc380 100644 --- a/drivers/infiniband/core/uverbs_cmd.c +++ b/drivers/infiniband/core/uverbs_cmd.c @@ -54,17 +54,7 @@ static struct uverbs_lock_class qp_lock_class = { .name = "QP-uobj" }; static struct uverbs_lock_class ah_lock_class = { .name = "AH-uobj" }; static struct uverbs_lock_class srq_lock_class = { .name = "SRQ-uobj" }; static struct uverbs_lock_class xrcd_lock_class = { .name = "XRCD-uobj" }; -#ifdef CONFIG_INFINIBAND_EXPERIMENTAL_UVERBS_FLOW_STEERING static struct uverbs_lock_class rule_lock_class = { .name = "RULE-uobj" }; -#endif /* CONFIG_INFINIBAND_EXPERIMENTAL_UVERBS_FLOW_STEERING */ - -#define INIT_UDATA(udata, ibuf, obuf, ilen, olen) \ - do { \ - (udata)->inbuf = (void __user *) (ibuf); \ - (udata)->outbuf = (void __user *) (obuf); \ - (udata)->inlen = (ilen); \ - (udata)->outlen = (olen); \ - } while (0) /* * The ib_uobject locking scheme is as follows: @@ -939,13 +929,9 @@ ssize_t ib_uverbs_reg_mr(struct ib_uverbs_file *file, if ((cmd.start & ~PAGE_MASK) != (cmd.hca_va & ~PAGE_MASK)) return -EINVAL; - /* - * Local write permission is required if remote write or - * remote atomic permission is also requested. - */ - if (cmd.access_flags & (IB_ACCESS_REMOTE_ATOMIC | IB_ACCESS_REMOTE_WRITE) && - !(cmd.access_flags & IB_ACCESS_LOCAL_WRITE)) - return -EINVAL; + ret = ib_check_mr_access(cmd.access_flags); + if (ret) + return ret; uobj = kmalloc(sizeof *uobj, GFP_KERNEL); if (!uobj) @@ -2128,6 +2114,9 @@ ssize_t ib_uverbs_post_send(struct ib_uverbs_file *file, } next->wr.ud.remote_qpn = user_wr->wr.ud.remote_qpn; next->wr.ud.remote_qkey = user_wr->wr.ud.remote_qkey; + if (next->opcode == IB_WR_SEND_WITH_IMM) + next->ex.imm_data = + (__be32 __force) user_wr->ex.imm_data; } else { switch (next->opcode) { case IB_WR_RDMA_WRITE_WITH_IMM: @@ -2601,8 +2590,7 @@ out_put: return ret ? ret : in_len; } -#ifdef CONFIG_INFINIBAND_EXPERIMENTAL_UVERBS_FLOW_STEERING -static int kern_spec_to_ib_spec(struct ib_kern_spec *kern_spec, +static int kern_spec_to_ib_spec(struct ib_uverbs_flow_spec *kern_spec, union ib_flow_spec *ib_spec) { ib_spec->type = kern_spec->type; @@ -2642,28 +2630,31 @@ static int kern_spec_to_ib_spec(struct ib_kern_spec *kern_spec, return 0; } -ssize_t ib_uverbs_create_flow(struct ib_uverbs_file *file, - const char __user *buf, int in_len, - int out_len) +int ib_uverbs_ex_create_flow(struct ib_uverbs_file *file, + struct ib_udata *ucore, + struct ib_udata *uhw) { struct ib_uverbs_create_flow cmd; struct ib_uverbs_create_flow_resp resp; struct ib_uobject *uobj; struct ib_flow *flow_id; - struct ib_kern_flow_attr *kern_flow_attr; + struct ib_uverbs_flow_attr *kern_flow_attr; struct ib_flow_attr *flow_attr; struct ib_qp *qp; int err = 0; void *kern_spec; void *ib_spec; int i; - int kern_attr_size; - if (out_len < sizeof(resp)) + if (ucore->outlen < sizeof(resp)) return -ENOSPC; - if (copy_from_user(&cmd, buf, sizeof(cmd))) - return -EFAULT; + err = ib_copy_from_udata(&cmd, ucore, sizeof(cmd)); + if (err) + return err; + + ucore->inbuf += sizeof(cmd); + ucore->inlen -= sizeof(cmd); if (cmd.comp_mask) return -EINVAL; @@ -2672,32 +2663,27 @@ ssize_t ib_uverbs_create_flow(struct ib_uverbs_file *file, !capable(CAP_NET_ADMIN)) || !capable(CAP_NET_RAW)) return -EPERM; - if (cmd.flow_attr.num_of_specs < 0 || - cmd.flow_attr.num_of_specs > IB_FLOW_SPEC_SUPPORT_LAYERS) + if (cmd.flow_attr.num_of_specs > IB_FLOW_SPEC_SUPPORT_LAYERS) return -EINVAL; - kern_attr_size = cmd.flow_attr.size - sizeof(cmd) - - sizeof(struct ib_uverbs_cmd_hdr_ex); - - if (cmd.flow_attr.size < 0 || cmd.flow_attr.size > in_len || - kern_attr_size < 0 || kern_attr_size > - (cmd.flow_attr.num_of_specs * sizeof(struct ib_kern_spec))) + if (cmd.flow_attr.size > ucore->inlen || + cmd.flow_attr.size > + (cmd.flow_attr.num_of_specs * sizeof(struct ib_uverbs_flow_spec))) return -EINVAL; if (cmd.flow_attr.num_of_specs) { - kern_flow_attr = kmalloc(cmd.flow_attr.size, GFP_KERNEL); + kern_flow_attr = kmalloc(sizeof(*kern_flow_attr) + cmd.flow_attr.size, + GFP_KERNEL); if (!kern_flow_attr) return -ENOMEM; memcpy(kern_flow_attr, &cmd.flow_attr, sizeof(*kern_flow_attr)); - if (copy_from_user(kern_flow_attr + 1, buf + sizeof(cmd), - kern_attr_size)) { - err = -EFAULT; + err = ib_copy_from_udata(kern_flow_attr + 1, ucore, + cmd.flow_attr.size); + if (err) goto err_free_attr; - } } else { kern_flow_attr = &cmd.flow_attr; - kern_attr_size = sizeof(cmd.flow_attr); } uobj = kmalloc(sizeof(*uobj), GFP_KERNEL); @@ -2714,7 +2700,7 @@ ssize_t ib_uverbs_create_flow(struct ib_uverbs_file *file, goto err_uobj; } - flow_attr = kmalloc(cmd.flow_attr.size, GFP_KERNEL); + flow_attr = kmalloc(sizeof(*flow_attr) + cmd.flow_attr.size, GFP_KERNEL); if (!flow_attr) { err = -ENOMEM; goto err_put; @@ -2729,19 +2715,22 @@ ssize_t ib_uverbs_create_flow(struct ib_uverbs_file *file, kern_spec = kern_flow_attr + 1; ib_spec = flow_attr + 1; - for (i = 0; i < flow_attr->num_of_specs && kern_attr_size > 0; i++) { + for (i = 0; i < flow_attr->num_of_specs && + cmd.flow_attr.size > offsetof(struct ib_uverbs_flow_spec, reserved) && + cmd.flow_attr.size >= + ((struct ib_uverbs_flow_spec *)kern_spec)->size; i++) { err = kern_spec_to_ib_spec(kern_spec, ib_spec); if (err) goto err_free; flow_attr->size += ((union ib_flow_spec *) ib_spec)->size; - kern_attr_size -= ((struct ib_kern_spec *) kern_spec)->size; - kern_spec += ((struct ib_kern_spec *) kern_spec)->size; + cmd.flow_attr.size -= ((struct ib_uverbs_flow_spec *)kern_spec)->size; + kern_spec += ((struct ib_uverbs_flow_spec *) kern_spec)->size; ib_spec += ((union ib_flow_spec *) ib_spec)->size; } - if (kern_attr_size) { - pr_warn("create flow failed, %d bytes left from uverb cmd\n", - kern_attr_size); + if (cmd.flow_attr.size || (i != flow_attr->num_of_specs)) { + pr_warn("create flow failed, flow %d: %d bytes left from uverb cmd\n", + i, cmd.flow_attr.size); goto err_free; } flow_id = ib_create_flow(qp, flow_attr, IB_FLOW_DOMAIN_USER); @@ -2760,11 +2749,10 @@ ssize_t ib_uverbs_create_flow(struct ib_uverbs_file *file, memset(&resp, 0, sizeof(resp)); resp.flow_handle = uobj->id; - if (copy_to_user((void __user *)(unsigned long) cmd.response, - &resp, sizeof(resp))) { - err = -EFAULT; + err = ib_copy_to_udata(ucore, + &resp, sizeof(resp)); + if (err) goto err_copy; - } put_qp_read(qp); mutex_lock(&file->mutex); @@ -2777,7 +2765,7 @@ ssize_t ib_uverbs_create_flow(struct ib_uverbs_file *file, kfree(flow_attr); if (cmd.flow_attr.num_of_specs) kfree(kern_flow_attr); - return in_len; + return 0; err_copy: idr_remove_uobj(&ib_uverbs_rule_idr, uobj); destroy_flow: @@ -2794,16 +2782,18 @@ err_free_attr: return err; } -ssize_t ib_uverbs_destroy_flow(struct ib_uverbs_file *file, - const char __user *buf, int in_len, - int out_len) { +int ib_uverbs_ex_destroy_flow(struct ib_uverbs_file *file, + struct ib_udata *ucore, + struct ib_udata *uhw) +{ struct ib_uverbs_destroy_flow cmd; struct ib_flow *flow_id; struct ib_uobject *uobj; int ret; - if (copy_from_user(&cmd, buf, sizeof(cmd))) - return -EFAULT; + ret = ib_copy_from_udata(&cmd, ucore, sizeof(cmd)); + if (ret) + return ret; uobj = idr_write_uobj(&ib_uverbs_rule_idr, cmd.flow_handle, file->ucontext); @@ -2825,9 +2815,8 @@ ssize_t ib_uverbs_destroy_flow(struct ib_uverbs_file *file, put_uobj(uobj); - return ret ? ret : in_len; + return ret; } -#endif /* CONFIG_INFINIBAND_EXPERIMENTAL_UVERBS_FLOW_STEERING */ static int __uverbs_create_xsrq(struct ib_uverbs_file *file, struct ib_uverbs_create_xsrq *cmd, diff --git a/drivers/infiniband/core/uverbs_main.c b/drivers/infiniband/core/uverbs_main.c index 2df31f68ea0..34386943ebc 100644 --- a/drivers/infiniband/core/uverbs_main.c +++ b/drivers/infiniband/core/uverbs_main.c @@ -115,10 +115,13 @@ static ssize_t (*uverbs_cmd_table[])(struct ib_uverbs_file *file, [IB_USER_VERBS_CMD_CLOSE_XRCD] = ib_uverbs_close_xrcd, [IB_USER_VERBS_CMD_CREATE_XSRQ] = ib_uverbs_create_xsrq, [IB_USER_VERBS_CMD_OPEN_QP] = ib_uverbs_open_qp, -#ifdef CONFIG_INFINIBAND_EXPERIMENTAL_UVERBS_FLOW_STEERING - [IB_USER_VERBS_CMD_CREATE_FLOW] = ib_uverbs_create_flow, - [IB_USER_VERBS_CMD_DESTROY_FLOW] = ib_uverbs_destroy_flow -#endif /* CONFIG_INFINIBAND_EXPERIMENTAL_UVERBS_FLOW_STEERING */ +}; + +static int (*uverbs_ex_cmd_table[])(struct ib_uverbs_file *file, + struct ib_udata *ucore, + struct ib_udata *uhw) = { + [IB_USER_VERBS_EX_CMD_CREATE_FLOW] = ib_uverbs_ex_create_flow, + [IB_USER_VERBS_EX_CMD_DESTROY_FLOW] = ib_uverbs_ex_destroy_flow }; static void ib_uverbs_add_one(struct ib_device *device); @@ -589,6 +592,7 @@ static ssize_t ib_uverbs_write(struct file *filp, const char __user *buf, { struct ib_uverbs_file *file = filp->private_data; struct ib_uverbs_cmd_hdr hdr; + __u32 flags; if (count < sizeof hdr) return -EINVAL; @@ -596,45 +600,105 @@ static ssize_t ib_uverbs_write(struct file *filp, const char __user *buf, if (copy_from_user(&hdr, buf, sizeof hdr)) return -EFAULT; - if (hdr.command >= ARRAY_SIZE(uverbs_cmd_table) || - !uverbs_cmd_table[hdr.command]) - return -EINVAL; + flags = (hdr.command & + IB_USER_VERBS_CMD_FLAGS_MASK) >> IB_USER_VERBS_CMD_FLAGS_SHIFT; - if (!file->ucontext && - hdr.command != IB_USER_VERBS_CMD_GET_CONTEXT) - return -EINVAL; + if (!flags) { + __u32 command; - if (!(file->device->ib_dev->uverbs_cmd_mask & (1ull << hdr.command))) - return -ENOSYS; + if (hdr.command & ~(__u32)(IB_USER_VERBS_CMD_FLAGS_MASK | + IB_USER_VERBS_CMD_COMMAND_MASK)) + return -EINVAL; -#ifdef CONFIG_INFINIBAND_EXPERIMENTAL_UVERBS_FLOW_STEERING - if (hdr.command >= IB_USER_VERBS_CMD_THRESHOLD) { - struct ib_uverbs_cmd_hdr_ex hdr_ex; + command = hdr.command & IB_USER_VERBS_CMD_COMMAND_MASK; - if (copy_from_user(&hdr_ex, buf, sizeof(hdr_ex))) - return -EFAULT; + if (command >= ARRAY_SIZE(uverbs_cmd_table) || + !uverbs_cmd_table[command]) + return -EINVAL; - if (((hdr_ex.in_words + hdr_ex.provider_in_words) * 4) != count) + if (!file->ucontext && + command != IB_USER_VERBS_CMD_GET_CONTEXT) return -EINVAL; - return uverbs_cmd_table[hdr.command](file, - buf + sizeof(hdr_ex), - (hdr_ex.in_words + - hdr_ex.provider_in_words) * 4, - (hdr_ex.out_words + - hdr_ex.provider_out_words) * 4); - } else { -#endif /* CONFIG_INFINIBAND_EXPERIMENTAL_UVERBS_FLOW_STEERING */ + if (!(file->device->ib_dev->uverbs_cmd_mask & (1ull << command))) + return -ENOSYS; + if (hdr.in_words * 4 != count) return -EINVAL; - return uverbs_cmd_table[hdr.command](file, - buf + sizeof(hdr), - hdr.in_words * 4, - hdr.out_words * 4); -#ifdef CONFIG_INFINIBAND_EXPERIMENTAL_UVERBS_FLOW_STEERING + return uverbs_cmd_table[command](file, + buf + sizeof(hdr), + hdr.in_words * 4, + hdr.out_words * 4); + + } else if (flags == IB_USER_VERBS_CMD_FLAG_EXTENDED) { + __u32 command; + + struct ib_uverbs_ex_cmd_hdr ex_hdr; + struct ib_udata ucore; + struct ib_udata uhw; + int err; + size_t written_count = count; + + if (hdr.command & ~(__u32)(IB_USER_VERBS_CMD_FLAGS_MASK | + IB_USER_VERBS_CMD_COMMAND_MASK)) + return -EINVAL; + + command = hdr.command & IB_USER_VERBS_CMD_COMMAND_MASK; + + if (command >= ARRAY_SIZE(uverbs_ex_cmd_table) || + !uverbs_ex_cmd_table[command]) + return -ENOSYS; + + if (!file->ucontext) + return -EINVAL; + + if (!(file->device->ib_dev->uverbs_ex_cmd_mask & (1ull << command))) + return -ENOSYS; + + if (count < (sizeof(hdr) + sizeof(ex_hdr))) + return -EINVAL; + + if (copy_from_user(&ex_hdr, buf + sizeof(hdr), sizeof(ex_hdr))) + return -EFAULT; + + count -= sizeof(hdr) + sizeof(ex_hdr); + buf += sizeof(hdr) + sizeof(ex_hdr); + + if ((hdr.in_words + ex_hdr.provider_in_words) * 8 != count) + return -EINVAL; + + if (ex_hdr.response) { + if (!hdr.out_words && !ex_hdr.provider_out_words) + return -EINVAL; + } else { + if (hdr.out_words || ex_hdr.provider_out_words) + return -EINVAL; + } + + INIT_UDATA(&ucore, + (hdr.in_words) ? buf : 0, + (unsigned long)ex_hdr.response, + hdr.in_words * 8, + hdr.out_words * 8); + + INIT_UDATA(&uhw, + (ex_hdr.provider_in_words) ? buf + ucore.inlen : 0, + (ex_hdr.provider_out_words) ? (unsigned long)ex_hdr.response + ucore.outlen : 0, + ex_hdr.provider_in_words * 8, + ex_hdr.provider_out_words * 8); + + err = uverbs_ex_cmd_table[command](file, + &ucore, + &uhw); + + if (err) + return err; + + return written_count; } -#endif /* CONFIG_INFINIBAND_EXPERIMENTAL_UVERBS_FLOW_STEERING */ + + return -ENOSYS; } static int ib_uverbs_mmap(struct file *filp, struct vm_area_struct *vma) diff --git a/drivers/infiniband/core/verbs.c b/drivers/infiniband/core/verbs.c index a321df28bab..d4f6ddf72ff 100644 --- a/drivers/infiniband/core/verbs.c +++ b/drivers/infiniband/core/verbs.c @@ -114,6 +114,8 @@ rdma_node_get_transport(enum rdma_node_type node_type) return RDMA_TRANSPORT_IB; case RDMA_NODE_RNIC: return RDMA_TRANSPORT_IWARP; + case RDMA_NODE_USNIC: + return RDMA_TRANSPORT_USNIC; default: BUG(); return 0; @@ -130,6 +132,7 @@ enum rdma_link_layer rdma_port_get_link_layer(struct ib_device *device, u8 port_ case RDMA_TRANSPORT_IB: return IB_LINK_LAYER_INFINIBAND; case RDMA_TRANSPORT_IWARP: + case RDMA_TRANSPORT_USNIC: return IB_LINK_LAYER_ETHERNET; default: return IB_LINK_LAYER_UNSPECIFIED; @@ -958,6 +961,11 @@ EXPORT_SYMBOL(ib_resize_cq); struct ib_mr *ib_get_dma_mr(struct ib_pd *pd, int mr_access_flags) { struct ib_mr *mr; + int err; + + err = ib_check_mr_access(mr_access_flags); + if (err) + return ERR_PTR(err); mr = pd->device->get_dma_mr(pd, mr_access_flags); @@ -980,6 +988,11 @@ struct ib_mr *ib_reg_phys_mr(struct ib_pd *pd, u64 *iova_start) { struct ib_mr *mr; + int err; + + err = ib_check_mr_access(mr_access_flags); + if (err) + return ERR_PTR(err); if (!pd->device->reg_phys_mr) return ERR_PTR(-ENOSYS); @@ -1010,6 +1023,10 @@ int ib_rereg_phys_mr(struct ib_mr *mr, struct ib_pd *old_pd; int ret; + ret = ib_check_mr_access(mr_access_flags); + if (ret) + return ret; + if (!mr->device->rereg_phys_mr) return -ENOSYS; diff --git a/drivers/infiniband/hw/cxgb4/device.c b/drivers/infiniband/hw/cxgb4/device.c index 33d2cc6ab56..4a033853312 100644 --- a/drivers/infiniband/hw/cxgb4/device.c +++ b/drivers/infiniband/hw/cxgb4/device.c @@ -602,10 +602,10 @@ static int c4iw_rdev_open(struct c4iw_rdev *rdev) rdev->lldi.vr->qp.size, rdev->lldi.vr->cq.start, rdev->lldi.vr->cq.size); - PDBG("udb len 0x%x udb base %p db_reg %p gts_reg %p qpshift %lu " + PDBG("udb len 0x%x udb base %llx db_reg %p gts_reg %p qpshift %lu " "qpmask 0x%x cqshift %lu cqmask 0x%x\n", (unsigned)pci_resource_len(rdev->lldi.pdev, 2), - (void *)(unsigned long)pci_resource_start(rdev->lldi.pdev, 2), + (u64)pci_resource_start(rdev->lldi.pdev, 2), rdev->lldi.db_reg, rdev->lldi.gts_reg, rdev->qpshift, rdev->qpmask, diff --git a/drivers/infiniband/hw/ipath/ipath_user_sdma.c b/drivers/infiniband/hw/ipath/ipath_user_sdma.c index f5cb13b2144..cc04b7ba348 100644 --- a/drivers/infiniband/hw/ipath/ipath_user_sdma.c +++ b/drivers/infiniband/hw/ipath/ipath_user_sdma.c @@ -280,9 +280,7 @@ static int ipath_user_sdma_pin_pages(const struct ipath_devdata *dd, int j; int ret; - ret = get_user_pages(current, current->mm, addr, - npages, 0, 1, pages, NULL); - + ret = get_user_pages_fast(addr, npages, 0, pages); if (ret != npages) { int i; @@ -811,10 +809,7 @@ int ipath_user_sdma_writev(struct ipath_devdata *dd, while (dim) { const int mxp = 8; - down_write(¤t->mm->mmap_sem); ret = ipath_user_sdma_queue_pkts(dd, pq, &list, iov, dim, mxp); - up_write(¤t->mm->mmap_sem); - if (ret <= 0) goto done_unlock; else { diff --git a/drivers/infiniband/hw/mlx4/cq.c b/drivers/infiniband/hw/mlx4/cq.c index d5e60f44ba5..66dbf806237 100644 --- a/drivers/infiniband/hw/mlx4/cq.c +++ b/drivers/infiniband/hw/mlx4/cq.c @@ -324,7 +324,7 @@ static int mlx4_ib_get_outstanding_cqes(struct mlx4_ib_cq *cq) u32 i; i = cq->mcq.cons_index; - while (get_sw_cqe(cq, i & cq->ibcq.cqe)) + while (get_sw_cqe(cq, i)) ++i; return i - cq->mcq.cons_index; @@ -365,7 +365,7 @@ int mlx4_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata) mutex_lock(&cq->resize_mutex); - if (entries < 1 || entries > dev->dev->caps.max_cqes) { + if (entries < 1) { err = -EINVAL; goto out; } @@ -376,6 +376,11 @@ int mlx4_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata) goto out; } + if (entries > dev->dev->caps.max_cqes) { + err = -EINVAL; + goto out; + } + if (ibcq->uobject) { err = mlx4_alloc_resize_umem(dev, cq, entries, udata); if (err) diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c index 6a0a0d29660..1958c5ca792 100644 --- a/drivers/infiniband/hw/mlx4/main.c +++ b/drivers/infiniband/hw/mlx4/main.c @@ -1685,11 +1685,9 @@ static void *mlx4_ib_add(struct mlx4_dev *dev) ibdev->ib_dev.create_flow = mlx4_ib_create_flow; ibdev->ib_dev.destroy_flow = mlx4_ib_destroy_flow; -#ifdef CONFIG_INFINIBAND_EXPERIMENTAL_UVERBS_FLOW_STEERING - ibdev->ib_dev.uverbs_cmd_mask |= - (1ull << IB_USER_VERBS_CMD_CREATE_FLOW) | - (1ull << IB_USER_VERBS_CMD_DESTROY_FLOW); -#endif /* CONFIG_INFINIBAND_EXPERIMENTAL_UVERBS_FLOW_STEERING */ + ibdev->ib_dev.uverbs_ex_cmd_mask |= + (1ull << IB_USER_VERBS_EX_CMD_CREATE_FLOW) | + (1ull << IB_USER_VERBS_EX_CMD_DESTROY_FLOW); } mlx4_ib_alloc_eqs(dev, ibdev); diff --git a/drivers/infiniband/hw/mlx5/cq.c b/drivers/infiniband/hw/mlx5/cq.c index 344ab03948a..b7262742974 100644 --- a/drivers/infiniband/hw/mlx5/cq.c +++ b/drivers/infiniband/hw/mlx5/cq.c @@ -556,7 +556,7 @@ static int create_cq_user(struct mlx5_ib_dev *dev, struct ib_udata *udata, goto err_db; } mlx5_ib_populate_pas(dev, cq->buf.umem, page_shift, (*cqb)->pas, 0); - (*cqb)->ctx.log_pg_sz = page_shift - PAGE_SHIFT; + (*cqb)->ctx.log_pg_sz = page_shift - MLX5_ADAPTER_PAGE_SHIFT; *index = to_mucontext(context)->uuari.uars[0].index; @@ -620,7 +620,7 @@ static int create_cq_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq, } mlx5_fill_page_array(&cq->buf.buf, (*cqb)->pas); - (*cqb)->ctx.log_pg_sz = cq->buf.buf.page_shift - PAGE_SHIFT; + (*cqb)->ctx.log_pg_sz = cq->buf.buf.page_shift - MLX5_ADAPTER_PAGE_SHIFT; *index = dev->mdev.priv.uuari.uars[0].index; return 0; @@ -653,8 +653,11 @@ struct ib_cq *mlx5_ib_create_cq(struct ib_device *ibdev, int entries, int eqn; int err; + if (entries < 0) + return ERR_PTR(-EINVAL); + entries = roundup_pow_of_two(entries + 1); - if (entries < 1 || entries > dev->mdev.caps.max_cqes) + if (entries > dev->mdev.caps.max_cqes) return ERR_PTR(-EINVAL); cq = kzalloc(sizeof(*cq), GFP_KERNEL); @@ -747,17 +750,9 @@ int mlx5_ib_destroy_cq(struct ib_cq *cq) return 0; } -static int is_equal_rsn(struct mlx5_cqe64 *cqe64, struct mlx5_ib_srq *srq, - u32 rsn) +static int is_equal_rsn(struct mlx5_cqe64 *cqe64, u32 rsn) { - u32 lrsn; - - if (srq) - lrsn = be32_to_cpu(cqe64->srqn) & 0xffffff; - else - lrsn = be32_to_cpu(cqe64->sop_drop_qpn) & 0xffffff; - - return rsn == lrsn; + return rsn == (ntohl(cqe64->sop_drop_qpn) & 0xffffff); } void __mlx5_ib_cq_clean(struct mlx5_ib_cq *cq, u32 rsn, struct mlx5_ib_srq *srq) @@ -787,8 +782,8 @@ void __mlx5_ib_cq_clean(struct mlx5_ib_cq *cq, u32 rsn, struct mlx5_ib_srq *srq) while ((int) --prod_index - (int) cq->mcq.cons_index >= 0) { cqe = get_cqe(cq, prod_index & cq->ibcq.cqe); cqe64 = (cq->mcq.cqe_sz == 64) ? cqe : cqe + 64; - if (is_equal_rsn(cqe64, srq, rsn)) { - if (srq) + if (is_equal_rsn(cqe64, rsn)) { + if (srq && (ntohl(cqe64->srqn) & 0xffffff)) mlx5_ib_free_srq_wqe(srq, be16_to_cpu(cqe64->wqe_counter)); ++nfreed; } else if (nfreed) { diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c index b1a6cb3a280..30653410962 100644 --- a/drivers/infiniband/hw/mlx5/main.c +++ b/drivers/infiniband/hw/mlx5/main.c @@ -745,7 +745,8 @@ static int alloc_pa_mkey(struct mlx5_ib_dev *dev, u32 *key, u32 pdn) seg->qpn_mkey7_0 = cpu_to_be32(0xffffff << 8); seg->start_addr = 0; - err = mlx5_core_create_mkey(&dev->mdev, &mr, in, sizeof(*in)); + err = mlx5_core_create_mkey(&dev->mdev, &mr, in, sizeof(*in), + NULL, NULL, NULL); if (err) { mlx5_ib_warn(dev, "failed to create mkey, %d\n", err); goto err_in; diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h b/drivers/infiniband/hw/mlx5/mlx5_ib.h index 836be915724..4c134d93d4f 100644 --- a/drivers/infiniband/hw/mlx5/mlx5_ib.h +++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h @@ -262,6 +262,9 @@ struct mlx5_ib_mr { int npages; struct completion done; enum ib_wc_status status; + struct mlx5_ib_dev *dev; + struct mlx5_create_mkey_mbox_out out; + unsigned long start; }; struct mlx5_ib_fast_reg_page_list { @@ -323,6 +326,7 @@ struct mlx5_cache_ent { struct mlx5_ib_dev *dev; struct work_struct work; struct delayed_work dwork; + int pending; }; struct mlx5_mr_cache { @@ -358,6 +362,8 @@ struct mlx5_ib_dev { spinlock_t mr_lock; struct mlx5_ib_resources devr; struct mlx5_mr_cache cache; + struct timer_list delay_timer; + int fill_delay; }; static inline struct mlx5_ib_cq *to_mibcq(struct mlx5_core_cq *mcq) diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c index 3453580b1eb..039c3e40fcb 100644 --- a/drivers/infiniband/hw/mlx5/mr.c +++ b/drivers/infiniband/hw/mlx5/mr.c @@ -35,11 +35,12 @@ #include <linux/random.h> #include <linux/debugfs.h> #include <linux/export.h> +#include <linux/delay.h> #include <rdma/ib_umem.h> #include "mlx5_ib.h" enum { - DEF_CACHE_SIZE = 10, + MAX_PENDING_REG_MR = 8, }; enum { @@ -63,6 +64,51 @@ static int order2idx(struct mlx5_ib_dev *dev, int order) return order - cache->ent[0].order; } +static void reg_mr_callback(int status, void *context) +{ + struct mlx5_ib_mr *mr = context; + struct mlx5_ib_dev *dev = mr->dev; + struct mlx5_mr_cache *cache = &dev->cache; + int c = order2idx(dev, mr->order); + struct mlx5_cache_ent *ent = &cache->ent[c]; + u8 key; + unsigned long flags; + + spin_lock_irqsave(&ent->lock, flags); + ent->pending--; + spin_unlock_irqrestore(&ent->lock, flags); + if (status) { + mlx5_ib_warn(dev, "async reg mr failed. status %d\n", status); + kfree(mr); + dev->fill_delay = 1; + mod_timer(&dev->delay_timer, jiffies + HZ); + return; + } + + if (mr->out.hdr.status) { + mlx5_ib_warn(dev, "failed - status %d, syndorme 0x%x\n", + mr->out.hdr.status, + be32_to_cpu(mr->out.hdr.syndrome)); + kfree(mr); + dev->fill_delay = 1; + mod_timer(&dev->delay_timer, jiffies + HZ); + return; + } + + spin_lock_irqsave(&dev->mdev.priv.mkey_lock, flags); + key = dev->mdev.priv.mkey_key++; + spin_unlock_irqrestore(&dev->mdev.priv.mkey_lock, flags); + mr->mmr.key = mlx5_idx_to_mkey(be32_to_cpu(mr->out.mkey) & 0xffffff) | key; + + cache->last_add = jiffies; + + spin_lock_irqsave(&ent->lock, flags); + list_add_tail(&mr->list, &ent->head); + ent->cur++; + ent->size++; + spin_unlock_irqrestore(&ent->lock, flags); +} + static int add_keys(struct mlx5_ib_dev *dev, int c, int num) { struct mlx5_mr_cache *cache = &dev->cache; @@ -78,36 +124,39 @@ static int add_keys(struct mlx5_ib_dev *dev, int c, int num) return -ENOMEM; for (i = 0; i < num; i++) { + if (ent->pending >= MAX_PENDING_REG_MR) { + err = -EAGAIN; + break; + } + mr = kzalloc(sizeof(*mr), GFP_KERNEL); if (!mr) { err = -ENOMEM; - goto out; + break; } mr->order = ent->order; mr->umred = 1; + mr->dev = dev; in->seg.status = 1 << 6; in->seg.xlt_oct_size = cpu_to_be32((npages + 1) / 2); in->seg.qpn_mkey7_0 = cpu_to_be32(0xffffff << 8); in->seg.flags = MLX5_ACCESS_MODE_MTT | MLX5_PERM_UMR_EN; in->seg.log2_page_size = 12; + spin_lock_irq(&ent->lock); + ent->pending++; + spin_unlock_irq(&ent->lock); + mr->start = jiffies; err = mlx5_core_create_mkey(&dev->mdev, &mr->mmr, in, - sizeof(*in)); + sizeof(*in), reg_mr_callback, + mr, &mr->out); if (err) { mlx5_ib_warn(dev, "create mkey failed %d\n", err); kfree(mr); - goto out; + break; } - cache->last_add = jiffies; - - spin_lock(&ent->lock); - list_add_tail(&mr->list, &ent->head); - ent->cur++; - ent->size++; - spin_unlock(&ent->lock); } -out: kfree(in); return err; } @@ -121,16 +170,16 @@ static void remove_keys(struct mlx5_ib_dev *dev, int c, int num) int i; for (i = 0; i < num; i++) { - spin_lock(&ent->lock); + spin_lock_irq(&ent->lock); if (list_empty(&ent->head)) { - spin_unlock(&ent->lock); + spin_unlock_irq(&ent->lock); return; } mr = list_first_entry(&ent->head, struct mlx5_ib_mr, list); list_del(&mr->list); ent->cur--; ent->size--; - spin_unlock(&ent->lock); + spin_unlock_irq(&ent->lock); err = mlx5_core_destroy_mkey(&dev->mdev, &mr->mmr); if (err) mlx5_ib_warn(dev, "failed destroy mkey\n"); @@ -162,9 +211,13 @@ static ssize_t size_write(struct file *filp, const char __user *buf, return -EINVAL; if (var > ent->size) { - err = add_keys(dev, c, var - ent->size); - if (err) - return err; + do { + err = add_keys(dev, c, var - ent->size); + if (err && err != -EAGAIN) + return err; + + usleep_range(3000, 5000); + } while (err); } else if (var < ent->size) { remove_keys(dev, c, ent->size - var); } @@ -280,23 +333,37 @@ static void __cache_work_func(struct mlx5_cache_ent *ent) struct mlx5_ib_dev *dev = ent->dev; struct mlx5_mr_cache *cache = &dev->cache; int i = order2idx(dev, ent->order); + int err; if (cache->stopped) return; ent = &dev->cache.ent[i]; - if (ent->cur < 2 * ent->limit) { - add_keys(dev, i, 1); - if (ent->cur < 2 * ent->limit) - queue_work(cache->wq, &ent->work); + if (ent->cur < 2 * ent->limit && !dev->fill_delay) { + err = add_keys(dev, i, 1); + if (ent->cur < 2 * ent->limit) { + if (err == -EAGAIN) { + mlx5_ib_dbg(dev, "returned eagain, order %d\n", + i + 2); + queue_delayed_work(cache->wq, &ent->dwork, + msecs_to_jiffies(3)); + } else if (err) { + mlx5_ib_warn(dev, "command failed order %d, err %d\n", + i + 2, err); + queue_delayed_work(cache->wq, &ent->dwork, + msecs_to_jiffies(1000)); + } else { + queue_work(cache->wq, &ent->work); + } + } } else if (ent->cur > 2 * ent->limit) { if (!someone_adding(cache) && - time_after(jiffies, cache->last_add + 60 * HZ)) { + time_after(jiffies, cache->last_add + 300 * HZ)) { remove_keys(dev, i, 1); if (ent->cur > ent->limit) queue_work(cache->wq, &ent->work); } else { - queue_delayed_work(cache->wq, &ent->dwork, 60 * HZ); + queue_delayed_work(cache->wq, &ent->dwork, 300 * HZ); } } } @@ -336,18 +403,18 @@ static struct mlx5_ib_mr *alloc_cached_mr(struct mlx5_ib_dev *dev, int order) mlx5_ib_dbg(dev, "order %d, cache index %d\n", ent->order, i); - spin_lock(&ent->lock); + spin_lock_irq(&ent->lock); if (!list_empty(&ent->head)) { mr = list_first_entry(&ent->head, struct mlx5_ib_mr, list); list_del(&mr->list); ent->cur--; - spin_unlock(&ent->lock); + spin_unlock_irq(&ent->lock); if (ent->cur < ent->limit) queue_work(cache->wq, &ent->work); break; } - spin_unlock(&ent->lock); + spin_unlock_irq(&ent->lock); queue_work(cache->wq, &ent->work); @@ -374,12 +441,12 @@ static void free_cached_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr) return; } ent = &cache->ent[c]; - spin_lock(&ent->lock); + spin_lock_irq(&ent->lock); list_add_tail(&mr->list, &ent->head); ent->cur++; if (ent->cur > 2 * ent->limit) shrink = 1; - spin_unlock(&ent->lock); + spin_unlock_irq(&ent->lock); if (shrink) queue_work(cache->wq, &ent->work); @@ -394,16 +461,16 @@ static void clean_keys(struct mlx5_ib_dev *dev, int c) cancel_delayed_work(&ent->dwork); while (1) { - spin_lock(&ent->lock); + spin_lock_irq(&ent->lock); if (list_empty(&ent->head)) { - spin_unlock(&ent->lock); + spin_unlock_irq(&ent->lock); return; } mr = list_first_entry(&ent->head, struct mlx5_ib_mr, list); list_del(&mr->list); ent->cur--; ent->size--; - spin_unlock(&ent->lock); + spin_unlock_irq(&ent->lock); err = mlx5_core_destroy_mkey(&dev->mdev, &mr->mmr); if (err) mlx5_ib_warn(dev, "failed destroy mkey\n"); @@ -464,12 +531,18 @@ static void mlx5_mr_cache_debugfs_cleanup(struct mlx5_ib_dev *dev) debugfs_remove_recursive(dev->cache.root); } +static void delay_time_func(unsigned long ctx) +{ + struct mlx5_ib_dev *dev = (struct mlx5_ib_dev *)ctx; + + dev->fill_delay = 0; +} + int mlx5_mr_cache_init(struct mlx5_ib_dev *dev) { struct mlx5_mr_cache *cache = &dev->cache; struct mlx5_cache_ent *ent; int limit; - int size; int err; int i; @@ -479,6 +552,7 @@ int mlx5_mr_cache_init(struct mlx5_ib_dev *dev) return -ENOMEM; } + setup_timer(&dev->delay_timer, delay_time_func, (unsigned long)dev); for (i = 0; i < MAX_MR_CACHE_ENTRIES; i++) { INIT_LIST_HEAD(&cache->ent[i].head); spin_lock_init(&cache->ent[i].lock); @@ -489,13 +563,11 @@ int mlx5_mr_cache_init(struct mlx5_ib_dev *dev) ent->order = i + 2; ent->dev = dev; - if (dev->mdev.profile->mask & MLX5_PROF_MASK_MR_CACHE) { - size = dev->mdev.profile->mr_cache[i].size; + if (dev->mdev.profile->mask & MLX5_PROF_MASK_MR_CACHE) limit = dev->mdev.profile->mr_cache[i].limit; - } else { - size = DEF_CACHE_SIZE; + else limit = 0; - } + INIT_WORK(&ent->work, cache_work_func); INIT_DELAYED_WORK(&ent->dwork, delayed_cache_work_func); ent->limit = limit; @@ -522,6 +594,7 @@ int mlx5_mr_cache_cleanup(struct mlx5_ib_dev *dev) clean_keys(dev, i); destroy_workqueue(dev->cache.wq); + del_timer_sync(&dev->delay_timer); return 0; } @@ -551,7 +624,8 @@ struct ib_mr *mlx5_ib_get_dma_mr(struct ib_pd *pd, int acc) seg->qpn_mkey7_0 = cpu_to_be32(0xffffff << 8); seg->start_addr = 0; - err = mlx5_core_create_mkey(mdev, &mr->mmr, in, sizeof(*in)); + err = mlx5_core_create_mkey(mdev, &mr->mmr, in, sizeof(*in), NULL, NULL, + NULL); if (err) goto err_in; @@ -660,14 +734,14 @@ static struct mlx5_ib_mr *reg_umr(struct ib_pd *pd, struct ib_umem *umem, int err; int i; - for (i = 0; i < 10; i++) { + for (i = 0; i < 1; i++) { mr = alloc_cached_mr(dev, order); if (mr) break; err = add_keys(dev, order2idx(dev, order), 1); - if (err) { - mlx5_ib_warn(dev, "add_keys failed\n"); + if (err && err != -EAGAIN) { + mlx5_ib_warn(dev, "add_keys failed, err %d\n", err); break; } } @@ -759,8 +833,10 @@ static struct mlx5_ib_mr *reg_create(struct ib_pd *pd, u64 virt_addr, in->seg.xlt_oct_size = cpu_to_be32(get_octo_len(virt_addr, length, 1 << page_shift)); in->seg.log2_page_size = page_shift; in->seg.qpn_mkey7_0 = cpu_to_be32(0xffffff << 8); - in->xlat_oct_act_size = cpu_to_be32(get_octo_len(virt_addr, length, 1 << page_shift)); - err = mlx5_core_create_mkey(&dev->mdev, &mr->mmr, in, inlen); + in->xlat_oct_act_size = cpu_to_be32(get_octo_len(virt_addr, length, + 1 << page_shift)); + err = mlx5_core_create_mkey(&dev->mdev, &mr->mmr, in, inlen, NULL, + NULL, NULL); if (err) { mlx5_ib_warn(dev, "create mkey failed\n"); goto err_2; @@ -944,7 +1020,8 @@ struct ib_mr *mlx5_ib_alloc_fast_reg_mr(struct ib_pd *pd, * TBD not needed - issue 197292 */ in->seg.log2_page_size = PAGE_SHIFT; - err = mlx5_core_create_mkey(&dev->mdev, &mr->mmr, in, sizeof(*in)); + err = mlx5_core_create_mkey(&dev->mdev, &mr->mmr, in, sizeof(*in), NULL, + NULL, NULL); kfree(in); if (err) goto err_free; diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c index 5659ea88074..7c6b4ba49be 100644 --- a/drivers/infiniband/hw/mlx5/qp.c +++ b/drivers/infiniband/hw/mlx5/qp.c @@ -551,7 +551,7 @@ static int create_user_qp(struct mlx5_ib_dev *dev, struct ib_pd *pd, } mlx5_ib_populate_pas(dev, qp->umem, page_shift, (*in)->pas, 0); (*in)->ctx.log_pg_sz_remote_qpn = - cpu_to_be32((page_shift - PAGE_SHIFT) << 24); + cpu_to_be32((page_shift - MLX5_ADAPTER_PAGE_SHIFT) << 24); (*in)->ctx.params2 = cpu_to_be32(offset << 6); (*in)->ctx.qp_counter_set_usr_page = cpu_to_be32(uar_index); @@ -648,7 +648,8 @@ static int create_kernel_qp(struct mlx5_ib_dev *dev, goto err_buf; } (*in)->ctx.qp_counter_set_usr_page = cpu_to_be32(uar_index); - (*in)->ctx.log_pg_sz_remote_qpn = cpu_to_be32((qp->buf.page_shift - PAGE_SHIFT) << 24); + (*in)->ctx.log_pg_sz_remote_qpn = + cpu_to_be32((qp->buf.page_shift - MLX5_ADAPTER_PAGE_SHIFT) << 24); /* Set "fast registration enabled" for all kernel QPs */ (*in)->ctx.params1 |= cpu_to_be32(1 << 11); (*in)->ctx.sq_crq_size |= cpu_to_be16(1 << 4); @@ -1317,9 +1318,11 @@ static enum mlx5_qp_optpar opt_mask[MLX5_QP_NUM_STATE][MLX5_QP_NUM_STATE][MLX5_Q MLX5_QP_OPTPAR_RAE | MLX5_QP_OPTPAR_RWE | MLX5_QP_OPTPAR_RNR_TIMEOUT | - MLX5_QP_OPTPAR_PM_STATE, + MLX5_QP_OPTPAR_PM_STATE | + MLX5_QP_OPTPAR_ALT_ADDR_PATH, [MLX5_QP_ST_UC] = MLX5_QP_OPTPAR_RWE | - MLX5_QP_OPTPAR_PM_STATE, + MLX5_QP_OPTPAR_PM_STATE | + MLX5_QP_OPTPAR_ALT_ADDR_PATH, [MLX5_QP_ST_UD] = MLX5_QP_OPTPAR_Q_KEY | MLX5_QP_OPTPAR_SRQN | MLX5_QP_OPTPAR_CQN_RCV, @@ -1550,7 +1553,7 @@ static int __mlx5_ib_modify_qp(struct ib_qp *ibqp, mlx5_cur = to_mlx5_state(cur_state); mlx5_new = to_mlx5_state(new_state); mlx5_st = to_mlx5_st(ibqp->qp_type); - if (mlx5_cur < 0 || mlx5_new < 0 || mlx5_st < 0) + if (mlx5_st < 0) goto out; optpar = ib_mask_to_mlx5_opt(attr_mask); @@ -1744,6 +1747,7 @@ static void set_reg_umr_segment(struct mlx5_wqe_umr_ctrl_seg *umr, MLX5_MKEY_MASK_PD | MLX5_MKEY_MASK_LR | MLX5_MKEY_MASK_LW | + MLX5_MKEY_MASK_KEY | MLX5_MKEY_MASK_RR | MLX5_MKEY_MASK_RW | MLX5_MKEY_MASK_A | @@ -1800,7 +1804,8 @@ static void set_reg_mkey_segment(struct mlx5_mkey_seg *seg, struct ib_send_wr *w seg->start_addr = cpu_to_be64(wr->wr.fast_reg.iova_start); seg->len = cpu_to_be64(wr->wr.fast_reg.length); seg->log2_page_size = wr->wr.fast_reg.page_shift; - seg->qpn_mkey7_0 = cpu_to_be32(0xffffff << 8); + seg->qpn_mkey7_0 = cpu_to_be32(0xffffff00 | + mlx5_mkey_variant(wr->wr.fast_reg.rkey)); } static void set_frwr_pages(struct mlx5_wqe_data_seg *dseg, @@ -1913,6 +1918,10 @@ static int set_frwr_li_wr(void **seg, struct ib_send_wr *wr, int *size, if (unlikely((*seg == qp->sq.qend))) *seg = mlx5_get_send_wqe(qp, 0); if (!li) { + if (unlikely(wr->wr.fast_reg.page_list_len > + wr->wr.fast_reg.page_list->max_page_list_len)) + return -ENOMEM; + set_frwr_pages(*seg, wr, mdev, pd, writ); *seg += sizeof(struct mlx5_wqe_data_seg); *size += (sizeof(struct mlx5_wqe_data_seg) / 16); diff --git a/drivers/infiniband/hw/mlx5/srq.c b/drivers/infiniband/hw/mlx5/srq.c index 0aa478bc291..210b3eaf188 100644 --- a/drivers/infiniband/hw/mlx5/srq.c +++ b/drivers/infiniband/hw/mlx5/srq.c @@ -123,7 +123,7 @@ static int create_srq_user(struct ib_pd *pd, struct mlx5_ib_srq *srq, goto err_in; } - (*in)->ctx.log_pg_sz = page_shift - PAGE_SHIFT; + (*in)->ctx.log_pg_sz = page_shift - MLX5_ADAPTER_PAGE_SHIFT; (*in)->ctx.pgoff_cqn = cpu_to_be32(offset << 26); return 0; @@ -192,7 +192,7 @@ static int create_srq_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_srq *srq, } srq->wq_sig = !!srq_signature; - (*in)->ctx.log_pg_sz = page_shift - PAGE_SHIFT; + (*in)->ctx.log_pg_sz = page_shift - MLX5_ADAPTER_PAGE_SHIFT; return 0; @@ -390,9 +390,7 @@ int mlx5_ib_destroy_srq(struct ib_srq *srq) mlx5_ib_db_unmap_user(to_mucontext(srq->uobject->context), &msrq->db); ib_umem_release(msrq->umem); } else { - kfree(msrq->wrid); - mlx5_buf_free(&dev->mdev, &msrq->buf); - mlx5_db_free(&dev->mdev, &msrq->db); + destroy_srq_kernel(dev, msrq); } kfree(srq); diff --git a/drivers/infiniband/hw/nes/nes_verbs.c b/drivers/infiniband/hw/nes/nes_verbs.c index 5b53ca5a228..8308e363476 100644 --- a/drivers/infiniband/hw/nes/nes_verbs.c +++ b/drivers/infiniband/hw/nes/nes_verbs.c @@ -2834,7 +2834,7 @@ static int nes_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, init_attr->qp_context = nesqp->ibqp.qp_context; init_attr->send_cq = nesqp->ibqp.send_cq; init_attr->recv_cq = nesqp->ibqp.recv_cq; - init_attr->srq = nesqp->ibqp.srq = nesqp->ibqp.srq; + init_attr->srq = nesqp->ibqp.srq; init_attr->cap = attr->cap; return 0; diff --git a/drivers/infiniband/hw/ocrdma/ocrdma.h b/drivers/infiniband/hw/ocrdma/ocrdma.h index adc11d14f87..294dd27b601 100644 --- a/drivers/infiniband/hw/ocrdma/ocrdma.h +++ b/drivers/infiniband/hw/ocrdma/ocrdma.h @@ -122,6 +122,32 @@ struct mqe_ctx { bool cmd_done; }; +struct ocrdma_hw_mr { + u32 lkey; + u8 fr_mr; + u8 remote_atomic; + u8 remote_rd; + u8 remote_wr; + u8 local_rd; + u8 local_wr; + u8 mw_bind; + u8 rsvd; + u64 len; + struct ocrdma_pbl *pbl_table; + u32 num_pbls; + u32 num_pbes; + u32 pbl_size; + u32 pbe_size; + u64 fbo; + u64 va; +}; + +struct ocrdma_mr { + struct ib_mr ibmr; + struct ib_umem *umem; + struct ocrdma_hw_mr hwmr; +}; + struct ocrdma_dev { struct ib_device ibdev; struct ocrdma_dev_attr attr; @@ -169,7 +195,7 @@ struct ocrdma_dev { struct list_head entry; struct rcu_head rcu; int id; - u64 stag_arr[OCRDMA_MAX_STAG]; + struct ocrdma_mr *stag_arr[OCRDMA_MAX_STAG]; u16 pvid; }; @@ -294,31 +320,6 @@ struct ocrdma_qp { u16 db_cache; }; -struct ocrdma_hw_mr { - u32 lkey; - u8 fr_mr; - u8 remote_atomic; - u8 remote_rd; - u8 remote_wr; - u8 local_rd; - u8 local_wr; - u8 mw_bind; - u8 rsvd; - u64 len; - struct ocrdma_pbl *pbl_table; - u32 num_pbls; - u32 num_pbes; - u32 pbl_size; - u32 pbe_size; - u64 fbo; - u64 va; -}; - -struct ocrdma_mr { - struct ib_mr ibmr; - struct ib_umem *umem; - struct ocrdma_hw_mr hwmr; -}; struct ocrdma_ucontext { struct ib_ucontext ibucontext; diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_hw.c b/drivers/infiniband/hw/ocrdma/ocrdma_hw.c index 50219ab2279..56bf32fcb62 100644 --- a/drivers/infiniband/hw/ocrdma/ocrdma_hw.c +++ b/drivers/infiniband/hw/ocrdma/ocrdma_hw.c @@ -1783,7 +1783,7 @@ static int ocrdma_set_create_qp_sq_cmd(struct ocrdma_create_qp_req *cmd, u32 max_sges = attrs->cap.max_send_sge; /* QP1 may exceed 127 */ - max_wqe_allocated = min_t(int, attrs->cap.max_send_wr + 1, + max_wqe_allocated = min_t(u32, attrs->cap.max_send_wr + 1, dev->attr.max_wqe); status = ocrdma_build_q_conf(&max_wqe_allocated, diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_main.c b/drivers/infiniband/hw/ocrdma/ocrdma_main.c index 0ce7674621e..91443bcb9e0 100644 --- a/drivers/infiniband/hw/ocrdma/ocrdma_main.c +++ b/drivers/infiniband/hw/ocrdma/ocrdma_main.c @@ -452,9 +452,6 @@ static void ocrdma_remove_free(struct rcu_head *rcu) { struct ocrdma_dev *dev = container_of(rcu, struct ocrdma_dev, rcu); - ocrdma_free_resources(dev); - ocrdma_cleanup_hw(dev); - idr_remove(&ocrdma_dev_id, dev->id); kfree(dev->mbx_cmd); ib_dealloc_device(&dev->ibdev); @@ -470,6 +467,10 @@ static void ocrdma_remove(struct ocrdma_dev *dev) spin_lock(&ocrdma_devlist_lock); list_del_rcu(&dev->entry); spin_unlock(&ocrdma_devlist_lock); + + ocrdma_free_resources(dev); + ocrdma_cleanup_hw(dev); + call_rcu(&dev->rcu, ocrdma_remove_free); } diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c index 69f1d1221a6..7686dceadd2 100644 --- a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c +++ b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c @@ -1981,9 +1981,7 @@ static int ocrdma_build_fr(struct ocrdma_qp *qp, struct ocrdma_hdr_wqe *hdr, wqe_size = roundup(wqe_size, OCRDMA_WQE_ALIGN_BYTES); - if ((wr->wr.fast_reg.page_list_len > - qp->dev->attr.max_pages_per_frmr) || - (wr->wr.fast_reg.length > 0xffffffffULL)) + if (wr->wr.fast_reg.page_list_len > qp->dev->attr.max_pages_per_frmr) return -EINVAL; hdr->cw |= (OCRDMA_FR_MR << OCRDMA_WQE_OPCODE_SHIFT); @@ -2839,7 +2837,7 @@ struct ib_mr *ocrdma_alloc_frmr(struct ib_pd *ibpd, int max_page_list_len) goto mbx_err; mr->ibmr.rkey = mr->hwmr.lkey; mr->ibmr.lkey = mr->hwmr.lkey; - dev->stag_arr[(mr->hwmr.lkey >> 8) & (OCRDMA_MAX_STAG - 1)] = (unsigned long) mr; + dev->stag_arr[(mr->hwmr.lkey >> 8) & (OCRDMA_MAX_STAG - 1)] = mr; return &mr->ibmr; mbx_err: ocrdma_free_mr_pbl_tbl(dev, &mr->hwmr); diff --git a/drivers/infiniband/hw/qib/qib_iba7322.c b/drivers/infiniband/hw/qib/qib_iba7322.c index 016e7429adf..5bfc02f450e 100644 --- a/drivers/infiniband/hw/qib/qib_iba7322.c +++ b/drivers/infiniband/hw/qib/qib_iba7322.c @@ -6190,21 +6190,20 @@ static int setup_txselect(const char *str, struct kernel_param *kp) { struct qib_devdata *dd; unsigned long val; - int ret; - + char *n; if (strlen(str) >= MAX_ATTEN_LEN) { pr_info("txselect_values string too long\n"); return -ENOSPC; } - ret = kstrtoul(str, 0, &val); - if (ret || val >= (TXDDS_TABLE_SZ + TXDDS_EXTRA_SZ + + val = simple_strtoul(str, &n, 0); + if (n == str || val >= (TXDDS_TABLE_SZ + TXDDS_EXTRA_SZ + TXDDS_MFG_SZ)) { pr_info("txselect_values must start with a number < %d\n", TXDDS_TABLE_SZ + TXDDS_EXTRA_SZ + TXDDS_MFG_SZ); - return ret ? ret : -EINVAL; + return -EINVAL; } - strcpy(txselect_list, str); + list_for_each_entry(dd, &qib_dev_list, list) if (dd->deviceid == PCI_DEVICE_ID_QLOGIC_IB_7322) set_no_qsfp_atten(dd, 1); diff --git a/drivers/infiniband/hw/qib/qib_mad.h b/drivers/infiniband/hw/qib/qib_mad.h index 28874f8606f..941d4d50d8e 100644 --- a/drivers/infiniband/hw/qib/qib_mad.h +++ b/drivers/infiniband/hw/qib/qib_mad.h @@ -54,7 +54,7 @@ struct ib_node_info { __be32 revision; u8 local_port_num; u8 vendor_id[3]; -} __attribute__ ((packed)); +} __packed; struct ib_mad_notice_attr { u8 generic_type; @@ -73,7 +73,7 @@ struct ib_mad_notice_attr { __be16 reserved; __be16 lid; /* where violation happened */ u8 port_num; /* where violation happened */ - } __attribute__ ((packed)) ntc_129_131; + } __packed ntc_129_131; struct { __be16 reserved; @@ -83,14 +83,14 @@ struct ib_mad_notice_attr { __be32 new_cap_mask; /* new capability mask */ u8 reserved3; u8 change_flags; /* low 3 bits only */ - } __attribute__ ((packed)) ntc_144; + } __packed ntc_144; struct { __be16 reserved; __be16 lid; /* lid where sys guid changed */ __be16 reserved2; __be64 new_sys_guid; - } __attribute__ ((packed)) ntc_145; + } __packed ntc_145; struct { __be16 reserved; @@ -104,7 +104,7 @@ struct ib_mad_notice_attr { u8 reserved3; u8 dr_trunc_hop; u8 dr_rtn_path[30]; - } __attribute__ ((packed)) ntc_256; + } __packed ntc_256; struct { __be16 reserved; @@ -115,7 +115,7 @@ struct ib_mad_notice_attr { __be32 qp2; /* high 8 bits reserved */ union ib_gid gid1; union ib_gid gid2; - } __attribute__ ((packed)) ntc_257_258; + } __packed ntc_257_258; } details; }; @@ -209,7 +209,7 @@ struct ib_pma_portcounters_cong { __be64 port_rcv_packets; __be64 port_xmit_wait; __be64 port_adr_events; -} __attribute__ ((packed)); +} __packed; #define IB_PMA_CONG_HW_CONTROL_TIMER 0x00 #define IB_PMA_CONG_HW_CONTROL_SAMPLE 0x01 diff --git a/drivers/infiniband/hw/qib/qib_user_sdma.c b/drivers/infiniband/hw/qib/qib_user_sdma.c index d0a0ea0c14d..165aee2ca8a 100644 --- a/drivers/infiniband/hw/qib/qib_user_sdma.c +++ b/drivers/infiniband/hw/qib/qib_user_sdma.c @@ -594,8 +594,7 @@ static int qib_user_sdma_pin_pages(const struct qib_devdata *dd, else j = npages; - ret = get_user_pages(current, current->mm, addr, - j, 0, 1, pages, NULL); + ret = get_user_pages_fast(addr, j, 0, pages); if (ret != j) { i = 0; j = ret; @@ -1294,11 +1293,8 @@ int qib_user_sdma_writev(struct qib_ctxtdata *rcd, int mxp = 8; int ndesc = 0; - down_write(¤t->mm->mmap_sem); ret = qib_user_sdma_queue_pkts(dd, ppd, pq, iov, dim, &list, &mxp, &ndesc); - up_write(¤t->mm->mmap_sem); - if (ret < 0) goto done_unlock; else { diff --git a/drivers/infiniband/hw/qib/qib_verbs.h b/drivers/infiniband/hw/qib/qib_verbs.h index 012e2c7575a..a01c7d2cf54 100644 --- a/drivers/infiniband/hw/qib/qib_verbs.h +++ b/drivers/infiniband/hw/qib/qib_verbs.h @@ -150,14 +150,14 @@ struct ib_reth { __be64 vaddr; __be32 rkey; __be32 length; -} __attribute__ ((packed)); +} __packed; struct ib_atomic_eth { __be32 vaddr[2]; /* unaligned so access as 2 32-bit words */ __be32 rkey; __be64 swap_data; __be64 compare_data; -} __attribute__ ((packed)); +} __packed; struct qib_other_headers { __be32 bth[3]; @@ -178,7 +178,7 @@ struct qib_other_headers { __be32 aeth; struct ib_atomic_eth atomic_eth; } u; -} __attribute__ ((packed)); +} __packed; /* * Note that UD packets with a GRH header are 8+40+12+8 = 68 bytes @@ -195,12 +195,12 @@ struct qib_ib_header { } l; struct qib_other_headers oth; } u; -} __attribute__ ((packed)); +} __packed; struct qib_pio_header { __le32 pbc[2]; struct qib_ib_header hdr; -} __attribute__ ((packed)); +} __packed; /* * There is one struct qib_mcast for each multicast GID. diff --git a/drivers/infiniband/ulp/ipoib/ipoib.h b/drivers/infiniband/ulp/ipoib/ipoib.h index eb71aaa26a9..c639f90cfda 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib.h +++ b/drivers/infiniband/ulp/ipoib/ipoib.h @@ -101,6 +101,7 @@ enum { IPOIB_MCAST_FLAG_SENDONLY = 1, IPOIB_MCAST_FLAG_BUSY = 2, /* joining or already joined */ IPOIB_MCAST_FLAG_ATTACHED = 3, + IPOIB_MCAST_JOIN_STARTED = 4, MAX_SEND_CQE = 16, IPOIB_CM_COPYBREAK = 256, @@ -151,6 +152,7 @@ struct ipoib_mcast { struct sk_buff_head pkt_queue; struct net_device *dev; + struct completion done; }; struct ipoib_rx_buf { @@ -299,7 +301,7 @@ struct ipoib_dev_priv { unsigned long flags; - struct mutex vlan_mutex; + struct rw_semaphore vlan_rwsem; struct rb_root path_tree; struct list_head path_list; diff --git a/drivers/infiniband/ulp/ipoib/ipoib_cm.c b/drivers/infiniband/ulp/ipoib/ipoib_cm.c index 7a3175400b2..1377f85911c 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_cm.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_cm.c @@ -140,7 +140,8 @@ static int ipoib_cm_post_receive_nonsrq(struct net_device *dev, static struct sk_buff *ipoib_cm_alloc_rx_skb(struct net_device *dev, struct ipoib_cm_rx_buf *rx_ring, int id, int frags, - u64 mapping[IPOIB_CM_RX_SG]) + u64 mapping[IPOIB_CM_RX_SG], + gfp_t gfp) { struct ipoib_dev_priv *priv = netdev_priv(dev); struct sk_buff *skb; @@ -164,7 +165,7 @@ static struct sk_buff *ipoib_cm_alloc_rx_skb(struct net_device *dev, } for (i = 0; i < frags; i++) { - struct page *page = alloc_page(GFP_ATOMIC); + struct page *page = alloc_page(gfp); if (!page) goto partial_error; @@ -382,7 +383,8 @@ static int ipoib_cm_nonsrq_init_rx(struct net_device *dev, struct ib_cm_id *cm_i for (i = 0; i < ipoib_recvq_size; ++i) { if (!ipoib_cm_alloc_rx_skb(dev, rx->rx_ring, i, IPOIB_CM_RX_SG - 1, - rx->rx_ring[i].mapping)) { + rx->rx_ring[i].mapping, + GFP_KERNEL)) { ipoib_warn(priv, "failed to allocate receive buffer %d\n", i); ret = -ENOMEM; goto err_count; @@ -639,7 +641,8 @@ void ipoib_cm_handle_rx_wc(struct net_device *dev, struct ib_wc *wc) frags = PAGE_ALIGN(wc->byte_len - min(wc->byte_len, (unsigned)IPOIB_CM_HEAD_SIZE)) / PAGE_SIZE; - newskb = ipoib_cm_alloc_rx_skb(dev, rx_ring, wr_id, frags, mapping); + newskb = ipoib_cm_alloc_rx_skb(dev, rx_ring, wr_id, frags, + mapping, GFP_ATOMIC); if (unlikely(!newskb)) { /* * If we can't allocate a new RX buffer, dump @@ -1556,7 +1559,8 @@ int ipoib_cm_dev_init(struct net_device *dev) for (i = 0; i < ipoib_recvq_size; ++i) { if (!ipoib_cm_alloc_rx_skb(dev, priv->cm.srq_ring, i, priv->cm.num_frags - 1, - priv->cm.srq_ring[i].mapping)) { + priv->cm.srq_ring[i].mapping, + GFP_KERNEL)) { ipoib_warn(priv, "failed to allocate " "receive buffer %d\n", i); ipoib_cm_dev_cleanup(dev); diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ib.c b/drivers/infiniband/ulp/ipoib/ipoib_ib.c index 196b1d13cbc..6a7003ddb0b 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_ib.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_ib.c @@ -685,15 +685,13 @@ int ipoib_ib_dev_open(struct net_device *dev) ret = ipoib_ib_post_receives(dev); if (ret) { ipoib_warn(priv, "ipoib_ib_post_receives returned %d\n", ret); - ipoib_ib_dev_stop(dev, 1); - return -1; + goto dev_stop; } ret = ipoib_cm_dev_open(dev); if (ret) { ipoib_warn(priv, "ipoib_cm_dev_open returned %d\n", ret); - ipoib_ib_dev_stop(dev, 1); - return -1; + goto dev_stop; } clear_bit(IPOIB_STOP_REAPER, &priv->flags); @@ -704,6 +702,11 @@ int ipoib_ib_dev_open(struct net_device *dev) napi_enable(&priv->napi); return 0; +dev_stop: + if (!test_and_set_bit(IPOIB_FLAG_INITIALIZED, &priv->flags)) + napi_enable(&priv->napi); + ipoib_ib_dev_stop(dev, 1); + return -1; } static void ipoib_pkey_dev_check_presence(struct net_device *dev) @@ -746,10 +749,8 @@ int ipoib_ib_dev_down(struct net_device *dev, int flush) if (!test_bit(IPOIB_PKEY_ASSIGNED, &priv->flags)) { mutex_lock(&pkey_mutex); set_bit(IPOIB_PKEY_STOP, &priv->flags); - cancel_delayed_work(&priv->pkey_poll_task); + cancel_delayed_work_sync(&priv->pkey_poll_task); mutex_unlock(&pkey_mutex); - if (flush) - flush_workqueue(ipoib_workqueue); } ipoib_mcast_stop_thread(dev, flush); @@ -974,7 +975,7 @@ static void __ipoib_ib_dev_flush(struct ipoib_dev_priv *priv, u16 new_index; int result; - mutex_lock(&priv->vlan_mutex); + down_read(&priv->vlan_rwsem); /* * Flush any child interfaces too -- they might be up even if @@ -983,7 +984,7 @@ static void __ipoib_ib_dev_flush(struct ipoib_dev_priv *priv, list_for_each_entry(cpriv, &priv->child_intfs, list) __ipoib_ib_dev_flush(cpriv, level); - mutex_unlock(&priv->vlan_mutex); + up_read(&priv->vlan_rwsem); if (!test_bit(IPOIB_FLAG_INITIALIZED, &priv->flags)) { /* for non-child devices must check/update the pkey value here */ @@ -1081,6 +1082,11 @@ void ipoib_ib_dev_cleanup(struct net_device *dev) struct ipoib_dev_priv *priv = netdev_priv(dev); ipoib_dbg(priv, "cleaning up ib_dev\n"); + /* + * We must make sure there are no more (path) completions + * that may wish to touch priv fields that are no longer valid + */ + ipoib_flush_paths(dev); ipoib_mcast_stop_thread(dev, 1); ipoib_mcast_dev_flush(dev); diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c index 82cec1af902..d64ed05fb08 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_main.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c @@ -119,7 +119,7 @@ int ipoib_open(struct net_device *dev) struct ipoib_dev_priv *cpriv; /* Bring up any child interfaces too */ - mutex_lock(&priv->vlan_mutex); + down_read(&priv->vlan_rwsem); list_for_each_entry(cpriv, &priv->child_intfs, list) { int flags; @@ -129,7 +129,7 @@ int ipoib_open(struct net_device *dev) dev_change_flags(cpriv->dev, flags | IFF_UP); } - mutex_unlock(&priv->vlan_mutex); + up_read(&priv->vlan_rwsem); } netif_start_queue(dev); @@ -162,7 +162,7 @@ static int ipoib_stop(struct net_device *dev) struct ipoib_dev_priv *cpriv; /* Bring down any child interfaces too */ - mutex_lock(&priv->vlan_mutex); + down_read(&priv->vlan_rwsem); list_for_each_entry(cpriv, &priv->child_intfs, list) { int flags; @@ -172,7 +172,7 @@ static int ipoib_stop(struct net_device *dev) dev_change_flags(cpriv->dev, flags & ~IFF_UP); } - mutex_unlock(&priv->vlan_mutex); + up_read(&priv->vlan_rwsem); } return 0; @@ -1350,7 +1350,7 @@ void ipoib_setup(struct net_device *dev) ipoib_set_ethtool_ops(dev); - netif_napi_add(dev, &priv->napi, ipoib_poll, 100); + netif_napi_add(dev, &priv->napi, ipoib_poll, NAPI_POLL_WEIGHT); dev->watchdog_timeo = HZ; @@ -1372,7 +1372,7 @@ void ipoib_setup(struct net_device *dev) spin_lock_init(&priv->lock); - mutex_init(&priv->vlan_mutex); + init_rwsem(&priv->vlan_rwsem); INIT_LIST_HEAD(&priv->path_list); INIT_LIST_HEAD(&priv->child_intfs); diff --git a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c index cecb98a4c66..d4e005720d0 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c @@ -386,8 +386,10 @@ static int ipoib_mcast_join_complete(int status, mcast->mcmember.mgid.raw, status); /* We trap for port events ourselves. */ - if (status == -ENETRESET) - return 0; + if (status == -ENETRESET) { + status = 0; + goto out; + } if (!status) status = ipoib_mcast_join_finish(mcast, &multicast->rec); @@ -407,7 +409,8 @@ static int ipoib_mcast_join_complete(int status, if (mcast == priv->broadcast) queue_work(ipoib_workqueue, &priv->carrier_on_task); - return 0; + status = 0; + goto out; } if (mcast->logcount++ < 20) { @@ -434,7 +437,8 @@ static int ipoib_mcast_join_complete(int status, mcast->backoff * HZ); spin_unlock_irq(&priv->lock); mutex_unlock(&mcast_mutex); - +out: + complete(&mcast->done); return status; } @@ -484,11 +488,15 @@ static void ipoib_mcast_join(struct net_device *dev, struct ipoib_mcast *mcast, } set_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags); + init_completion(&mcast->done); + set_bit(IPOIB_MCAST_JOIN_STARTED, &mcast->flags); + mcast->mc = ib_sa_join_multicast(&ipoib_sa_client, priv->ca, priv->port, &rec, comp_mask, GFP_KERNEL, ipoib_mcast_join_complete, mcast); if (IS_ERR(mcast->mc)) { clear_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags); + complete(&mcast->done); ret = PTR_ERR(mcast->mc); ipoib_warn(priv, "ib_sa_join_multicast failed, status %d\n", ret); @@ -510,10 +518,18 @@ void ipoib_mcast_join_task(struct work_struct *work) struct ipoib_dev_priv *priv = container_of(work, struct ipoib_dev_priv, mcast_task.work); struct net_device *dev = priv->dev; + struct ib_port_attr port_attr; if (!test_bit(IPOIB_MCAST_RUN, &priv->flags)) return; + if (ib_query_port(priv->ca, priv->port, &port_attr) || + port_attr.state != IB_PORT_ACTIVE) { + ipoib_dbg(priv, "port state is not ACTIVE (state = %d) suspending join task\n", + port_attr.state); + return; + } + if (ib_query_gid(priv->ca, priv->port, 0, &priv->local_gid)) ipoib_warn(priv, "ib_query_gid() failed\n"); else @@ -751,6 +767,11 @@ void ipoib_mcast_dev_flush(struct net_device *dev) spin_unlock_irqrestore(&priv->lock, flags); + /* seperate between the wait to the leave*/ + list_for_each_entry_safe(mcast, tmcast, &remove_list, list) + if (test_bit(IPOIB_MCAST_JOIN_STARTED, &mcast->flags)) + wait_for_completion(&mcast->done); + list_for_each_entry_safe(mcast, tmcast, &remove_list, list) { ipoib_mcast_leave(dev, mcast); ipoib_mcast_free(mcast); diff --git a/drivers/infiniband/ulp/ipoib/ipoib_netlink.c b/drivers/infiniband/ulp/ipoib/ipoib_netlink.c index f81abe16cf0..c29b5c83883 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_netlink.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_netlink.c @@ -142,10 +142,10 @@ static void ipoib_unregister_child_dev(struct net_device *dev, struct list_head priv = netdev_priv(dev); ppriv = netdev_priv(priv->parent); - mutex_lock(&ppriv->vlan_mutex); + down_write(&ppriv->vlan_rwsem); unregister_netdevice_queue(dev, head); list_del(&priv->list); - mutex_unlock(&ppriv->vlan_mutex); + up_write(&ppriv->vlan_rwsem); } static size_t ipoib_get_size(const struct net_device *dev) diff --git a/drivers/infiniband/ulp/ipoib/ipoib_vlan.c b/drivers/infiniband/ulp/ipoib/ipoib_vlan.c index 8292554bccb..9fad7b5ac8b 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_vlan.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_vlan.c @@ -140,7 +140,7 @@ int ipoib_vlan_add(struct net_device *pdev, unsigned short pkey) if (!rtnl_trylock()) return restart_syscall(); - mutex_lock(&ppriv->vlan_mutex); + down_write(&ppriv->vlan_rwsem); /* * First ensure this isn't a duplicate. We check the parent device and @@ -163,7 +163,7 @@ int ipoib_vlan_add(struct net_device *pdev, unsigned short pkey) result = __ipoib_vlan_add(ppriv, priv, pkey, IPOIB_LEGACY_CHILD); out: - mutex_unlock(&ppriv->vlan_mutex); + up_write(&ppriv->vlan_rwsem); if (result) free_netdev(priv->dev); @@ -185,7 +185,8 @@ int ipoib_vlan_delete(struct net_device *pdev, unsigned short pkey) if (!rtnl_trylock()) return restart_syscall(); - mutex_lock(&ppriv->vlan_mutex); + + down_write(&ppriv->vlan_rwsem); list_for_each_entry_safe(priv, tpriv, &ppriv->child_intfs, list) { if (priv->pkey == pkey && priv->child_type == IPOIB_LEGACY_CHILD) { @@ -195,7 +196,8 @@ int ipoib_vlan_delete(struct net_device *pdev, unsigned short pkey) break; } } - mutex_unlock(&ppriv->vlan_mutex); + up_write(&ppriv->vlan_rwsem); + rtnl_unlock(); if (dev) { diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c index f93baf8254c..a88631918e8 100644 --- a/drivers/infiniband/ulp/srp/ib_srp.c +++ b/drivers/infiniband/ulp/srp/ib_srp.c @@ -46,6 +46,7 @@ #include <scsi/scsi.h> #include <scsi/scsi_device.h> #include <scsi/scsi_dbg.h> +#include <scsi/scsi_tcq.h> #include <scsi/srp.h> #include <scsi/scsi_transport_srp.h> @@ -86,6 +87,32 @@ module_param(topspin_workarounds, int, 0444); MODULE_PARM_DESC(topspin_workarounds, "Enable workarounds for Topspin/Cisco SRP target bugs if != 0"); +static struct kernel_param_ops srp_tmo_ops; + +static int srp_reconnect_delay = 10; +module_param_cb(reconnect_delay, &srp_tmo_ops, &srp_reconnect_delay, + S_IRUGO | S_IWUSR); +MODULE_PARM_DESC(reconnect_delay, "Time between successive reconnect attempts"); + +static int srp_fast_io_fail_tmo = 15; +module_param_cb(fast_io_fail_tmo, &srp_tmo_ops, &srp_fast_io_fail_tmo, + S_IRUGO | S_IWUSR); +MODULE_PARM_DESC(fast_io_fail_tmo, + "Number of seconds between the observation of a transport" + " layer error and failing all I/O. \"off\" means that this" + " functionality is disabled."); + +static int srp_dev_loss_tmo = 600; +module_param_cb(dev_loss_tmo, &srp_tmo_ops, &srp_dev_loss_tmo, + S_IRUGO | S_IWUSR); +MODULE_PARM_DESC(dev_loss_tmo, + "Maximum number of seconds that the SRP transport should" + " insulate transport layer errors. After this time has been" + " exceeded the SCSI host is removed. Should be" + " between 1 and " __stringify(SCSI_DEVICE_BLOCK_MAX_TIMEOUT) + " if fast_io_fail_tmo has not been set. \"off\" means that" + " this functionality is disabled."); + static void srp_add_one(struct ib_device *device); static void srp_remove_one(struct ib_device *device); static void srp_recv_completion(struct ib_cq *cq, void *target_ptr); @@ -102,6 +129,48 @@ static struct ib_client srp_client = { static struct ib_sa_client srp_sa_client; +static int srp_tmo_get(char *buffer, const struct kernel_param *kp) +{ + int tmo = *(int *)kp->arg; + + if (tmo >= 0) + return sprintf(buffer, "%d", tmo); + else + return sprintf(buffer, "off"); +} + +static int srp_tmo_set(const char *val, const struct kernel_param *kp) +{ + int tmo, res; + + if (strncmp(val, "off", 3) != 0) { + res = kstrtoint(val, 0, &tmo); + if (res) + goto out; + } else { + tmo = -1; + } + if (kp->arg == &srp_reconnect_delay) + res = srp_tmo_valid(tmo, srp_fast_io_fail_tmo, + srp_dev_loss_tmo); + else if (kp->arg == &srp_fast_io_fail_tmo) + res = srp_tmo_valid(srp_reconnect_delay, tmo, srp_dev_loss_tmo); + else + res = srp_tmo_valid(srp_reconnect_delay, srp_fast_io_fail_tmo, + tmo); + if (res) + goto out; + *(int *)kp->arg = tmo; + +out: + return res; +} + +static struct kernel_param_ops srp_tmo_ops = { + .get = srp_tmo_get, + .set = srp_tmo_set, +}; + static inline struct srp_target_port *host_to_target(struct Scsi_Host *host) { return (struct srp_target_port *) host->hostdata; @@ -231,16 +300,16 @@ static int srp_create_target_ib(struct srp_target_port *target) return -ENOMEM; recv_cq = ib_create_cq(target->srp_host->srp_dev->dev, - srp_recv_completion, NULL, target, SRP_RQ_SIZE, - target->comp_vector); + srp_recv_completion, NULL, target, + target->queue_size, target->comp_vector); if (IS_ERR(recv_cq)) { ret = PTR_ERR(recv_cq); goto err; } send_cq = ib_create_cq(target->srp_host->srp_dev->dev, - srp_send_completion, NULL, target, SRP_SQ_SIZE, - target->comp_vector); + srp_send_completion, NULL, target, + target->queue_size, target->comp_vector); if (IS_ERR(send_cq)) { ret = PTR_ERR(send_cq); goto err_recv_cq; @@ -249,8 +318,8 @@ static int srp_create_target_ib(struct srp_target_port *target) ib_req_notify_cq(recv_cq, IB_CQ_NEXT_COMP); init_attr->event_handler = srp_qp_event; - init_attr->cap.max_send_wr = SRP_SQ_SIZE; - init_attr->cap.max_recv_wr = SRP_RQ_SIZE; + init_attr->cap.max_send_wr = target->queue_size; + init_attr->cap.max_recv_wr = target->queue_size; init_attr->cap.max_recv_sge = 1; init_attr->cap.max_send_sge = 1; init_attr->sq_sig_type = IB_SIGNAL_ALL_WR; @@ -296,6 +365,10 @@ err: return ret; } +/* + * Note: this function may be called without srp_alloc_iu_bufs() having been + * invoked. Hence the target->[rt]x_ring checks. + */ static void srp_free_target_ib(struct srp_target_port *target) { int i; @@ -307,10 +380,18 @@ static void srp_free_target_ib(struct srp_target_port *target) target->qp = NULL; target->send_cq = target->recv_cq = NULL; - for (i = 0; i < SRP_RQ_SIZE; ++i) - srp_free_iu(target->srp_host, target->rx_ring[i]); - for (i = 0; i < SRP_SQ_SIZE; ++i) - srp_free_iu(target->srp_host, target->tx_ring[i]); + if (target->rx_ring) { + for (i = 0; i < target->queue_size; ++i) + srp_free_iu(target->srp_host, target->rx_ring[i]); + kfree(target->rx_ring); + target->rx_ring = NULL; + } + if (target->tx_ring) { + for (i = 0; i < target->queue_size; ++i) + srp_free_iu(target->srp_host, target->tx_ring[i]); + kfree(target->tx_ring); + target->tx_ring = NULL; + } } static void srp_path_rec_completion(int status, @@ -390,7 +471,7 @@ static int srp_send_req(struct srp_target_port *target) req->param.responder_resources = 4; req->param.remote_cm_response_timeout = 20; req->param.local_cm_response_timeout = 20; - req->param.retry_count = 7; + req->param.retry_count = target->tl_retry_count; req->param.rnr_retry_count = 7; req->param.max_cm_retries = 15; @@ -496,7 +577,11 @@ static void srp_free_req_data(struct srp_target_port *target) struct srp_request *req; int i; - for (i = 0, req = target->req_ring; i < SRP_CMD_SQ_SIZE; ++i, ++req) { + if (!target->req_ring) + return; + + for (i = 0; i < target->req_ring_size; ++i) { + req = &target->req_ring[i]; kfree(req->fmr_list); kfree(req->map_page); if (req->indirect_dma_addr) { @@ -506,6 +591,50 @@ static void srp_free_req_data(struct srp_target_port *target) } kfree(req->indirect_desc); } + + kfree(target->req_ring); + target->req_ring = NULL; +} + +static int srp_alloc_req_data(struct srp_target_port *target) +{ + struct srp_device *srp_dev = target->srp_host->srp_dev; + struct ib_device *ibdev = srp_dev->dev; + struct srp_request *req; + dma_addr_t dma_addr; + int i, ret = -ENOMEM; + + INIT_LIST_HEAD(&target->free_reqs); + + target->req_ring = kzalloc(target->req_ring_size * + sizeof(*target->req_ring), GFP_KERNEL); + if (!target->req_ring) + goto out; + + for (i = 0; i < target->req_ring_size; ++i) { + req = &target->req_ring[i]; + req->fmr_list = kmalloc(target->cmd_sg_cnt * sizeof(void *), + GFP_KERNEL); + req->map_page = kmalloc(SRP_FMR_SIZE * sizeof(void *), + GFP_KERNEL); + req->indirect_desc = kmalloc(target->indirect_size, GFP_KERNEL); + if (!req->fmr_list || !req->map_page || !req->indirect_desc) + goto out; + + dma_addr = ib_dma_map_single(ibdev, req->indirect_desc, + target->indirect_size, + DMA_TO_DEVICE); + if (ib_dma_mapping_error(ibdev, dma_addr)) + goto out; + + req->indirect_dma_addr = dma_addr; + req->index = i; + list_add_tail(&req->list, &target->free_reqs); + } + ret = 0; + +out: + return ret; } /** @@ -528,12 +657,20 @@ static void srp_remove_target(struct srp_target_port *target) WARN_ON_ONCE(target->state != SRP_TARGET_REMOVED); srp_del_scsi_host_attr(target->scsi_host); + srp_rport_get(target->rport); srp_remove_host(target->scsi_host); scsi_remove_host(target->scsi_host); srp_disconnect_target(target); ib_destroy_cm_id(target->cm_id); srp_free_target_ib(target); + cancel_work_sync(&target->tl_err_work); + srp_rport_put(target->rport); srp_free_req_data(target); + + spin_lock(&target->srp_host->target_lock); + list_del(&target->list); + spin_unlock(&target->srp_host->target_lock); + scsi_host_put(target->scsi_host); } @@ -545,10 +682,6 @@ static void srp_remove_work(struct work_struct *work) WARN_ON_ONCE(target->state != SRP_TARGET_REMOVED); srp_remove_target(target); - - spin_lock(&target->srp_host->target_lock); - list_del(&target->list); - spin_unlock(&target->srp_host->target_lock); } static void srp_rport_delete(struct srp_rport *rport) @@ -686,23 +819,42 @@ static void srp_free_req(struct srp_target_port *target, spin_unlock_irqrestore(&target->lock, flags); } -static void srp_reset_req(struct srp_target_port *target, struct srp_request *req) +static void srp_finish_req(struct srp_target_port *target, + struct srp_request *req, int result) { struct scsi_cmnd *scmnd = srp_claim_req(target, req, NULL); if (scmnd) { srp_free_req(target, req, scmnd, 0); - scmnd->result = DID_RESET << 16; + scmnd->result = result; scmnd->scsi_done(scmnd); } } -static int srp_reconnect_target(struct srp_target_port *target) +static void srp_terminate_io(struct srp_rport *rport) { - struct Scsi_Host *shost = target->scsi_host; - int i, ret; + struct srp_target_port *target = rport->lld_data; + int i; - scsi_target_block(&shost->shost_gendev); + for (i = 0; i < target->req_ring_size; ++i) { + struct srp_request *req = &target->req_ring[i]; + srp_finish_req(target, req, DID_TRANSPORT_FAILFAST << 16); + } +} + +/* + * It is up to the caller to ensure that srp_rport_reconnect() calls are + * serialized and that no concurrent srp_queuecommand(), srp_abort(), + * srp_reset_device() or srp_reset_host() calls will occur while this function + * is in progress. One way to realize that is not to call this function + * directly but to call srp_reconnect_rport() instead since that last function + * serializes calls of this function via rport->mutex and also blocks + * srp_queuecommand() calls before invoking this function. + */ +static int srp_rport_reconnect(struct srp_rport *rport) +{ + struct srp_target_port *target = rport->lld_data; + int i, ret; srp_disconnect_target(target); /* @@ -721,41 +873,21 @@ static int srp_reconnect_target(struct srp_target_port *target) else srp_create_target_ib(target); - for (i = 0; i < SRP_CMD_SQ_SIZE; ++i) { + for (i = 0; i < target->req_ring_size; ++i) { struct srp_request *req = &target->req_ring[i]; - if (req->scmnd) - srp_reset_req(target, req); + srp_finish_req(target, req, DID_RESET << 16); } INIT_LIST_HEAD(&target->free_tx); - for (i = 0; i < SRP_SQ_SIZE; ++i) + for (i = 0; i < target->queue_size; ++i) list_add(&target->tx_ring[i]->list, &target->free_tx); if (ret == 0) ret = srp_connect_target(target); - scsi_target_unblock(&shost->shost_gendev, ret == 0 ? SDEV_RUNNING : - SDEV_TRANSPORT_OFFLINE); - target->transport_offline = !!ret; - - if (ret) - goto err; - - shost_printk(KERN_INFO, target->scsi_host, PFX "reconnect succeeded\n"); - - return ret; - -err: - shost_printk(KERN_ERR, target->scsi_host, - PFX "reconnect failed (%d), removing target port.\n", ret); - - /* - * We couldn't reconnect, so kill our target port off. - * However, we have to defer the real removal because we - * are in the context of the SCSI error handler now, which - * will deadlock if we call scsi_remove_host(). - */ - srp_queue_remove_work(target); + if (ret == 0) + shost_printk(KERN_INFO, target->scsi_host, + PFX "reconnect succeeded\n"); return ret; } @@ -1302,15 +1434,30 @@ static void srp_handle_recv(struct srp_target_port *target, struct ib_wc *wc) PFX "Recv failed with error code %d\n", res); } -static void srp_handle_qp_err(enum ib_wc_status wc_status, - enum ib_wc_opcode wc_opcode, +/** + * srp_tl_err_work() - handle a transport layer error + * + * Note: This function may get invoked before the rport has been created, + * hence the target->rport test. + */ +static void srp_tl_err_work(struct work_struct *work) +{ + struct srp_target_port *target; + + target = container_of(work, struct srp_target_port, tl_err_work); + if (target->rport) + srp_start_tl_fail_timers(target->rport); +} + +static void srp_handle_qp_err(enum ib_wc_status wc_status, bool send_err, struct srp_target_port *target) { if (target->connected && !target->qp_in_error) { shost_printk(KERN_ERR, target->scsi_host, PFX "failed %s status %d\n", - wc_opcode & IB_WC_RECV ? "receive" : "send", + send_err ? "send" : "receive", wc_status); + queue_work(system_long_wq, &target->tl_err_work); } target->qp_in_error = true; } @@ -1325,7 +1472,7 @@ static void srp_recv_completion(struct ib_cq *cq, void *target_ptr) if (likely(wc.status == IB_WC_SUCCESS)) { srp_handle_recv(target, &wc); } else { - srp_handle_qp_err(wc.status, wc.opcode, target); + srp_handle_qp_err(wc.status, false, target); } } } @@ -1341,7 +1488,7 @@ static void srp_send_completion(struct ib_cq *cq, void *target_ptr) iu = (struct srp_iu *) (uintptr_t) wc.wr_id; list_add(&iu->list, &target->free_tx); } else { - srp_handle_qp_err(wc.status, wc.opcode, target); + srp_handle_qp_err(wc.status, true, target); } } } @@ -1349,17 +1496,29 @@ static void srp_send_completion(struct ib_cq *cq, void *target_ptr) static int srp_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *scmnd) { struct srp_target_port *target = host_to_target(shost); + struct srp_rport *rport = target->rport; struct srp_request *req; struct srp_iu *iu; struct srp_cmd *cmd; struct ib_device *dev; unsigned long flags; - int len; + int len, result; + const bool in_scsi_eh = !in_interrupt() && current == shost->ehandler; + + /* + * The SCSI EH thread is the only context from which srp_queuecommand() + * can get invoked for blocked devices (SDEV_BLOCK / + * SDEV_CREATED_BLOCK). Avoid racing with srp_reconnect_rport() by + * locking the rport mutex if invoked from inside the SCSI EH. + */ + if (in_scsi_eh) + mutex_lock(&rport->mutex); - if (unlikely(target->transport_offline)) { - scmnd->result = DID_NO_CONNECT << 16; + result = srp_chkready(target->rport); + if (unlikely(result)) { + scmnd->result = result; scmnd->scsi_done(scmnd); - return 0; + goto unlock_rport; } spin_lock_irqsave(&target->lock, flags); @@ -1404,6 +1563,10 @@ static int srp_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *scmnd) goto err_unmap; } +unlock_rport: + if (in_scsi_eh) + mutex_unlock(&rport->mutex); + return 0; err_unmap: @@ -1418,14 +1581,30 @@ err_iu: err_unlock: spin_unlock_irqrestore(&target->lock, flags); + if (in_scsi_eh) + mutex_unlock(&rport->mutex); + return SCSI_MLQUEUE_HOST_BUSY; } +/* + * Note: the resources allocated in this function are freed in + * srp_free_target_ib(). + */ static int srp_alloc_iu_bufs(struct srp_target_port *target) { int i; - for (i = 0; i < SRP_RQ_SIZE; ++i) { + target->rx_ring = kzalloc(target->queue_size * sizeof(*target->rx_ring), + GFP_KERNEL); + if (!target->rx_ring) + goto err_no_ring; + target->tx_ring = kzalloc(target->queue_size * sizeof(*target->tx_ring), + GFP_KERNEL); + if (!target->tx_ring) + goto err_no_ring; + + for (i = 0; i < target->queue_size; ++i) { target->rx_ring[i] = srp_alloc_iu(target->srp_host, target->max_ti_iu_len, GFP_KERNEL, DMA_FROM_DEVICE); @@ -1433,7 +1612,7 @@ static int srp_alloc_iu_bufs(struct srp_target_port *target) goto err; } - for (i = 0; i < SRP_SQ_SIZE; ++i) { + for (i = 0; i < target->queue_size; ++i) { target->tx_ring[i] = srp_alloc_iu(target->srp_host, target->max_iu_len, GFP_KERNEL, DMA_TO_DEVICE); @@ -1446,16 +1625,18 @@ static int srp_alloc_iu_bufs(struct srp_target_port *target) return 0; err: - for (i = 0; i < SRP_RQ_SIZE; ++i) { + for (i = 0; i < target->queue_size; ++i) { srp_free_iu(target->srp_host, target->rx_ring[i]); - target->rx_ring[i] = NULL; - } - - for (i = 0; i < SRP_SQ_SIZE; ++i) { srp_free_iu(target->srp_host, target->tx_ring[i]); - target->tx_ring[i] = NULL; } + +err_no_ring: + kfree(target->tx_ring); + target->tx_ring = NULL; + kfree(target->rx_ring); + target->rx_ring = NULL; + return -ENOMEM; } @@ -1506,6 +1687,9 @@ static void srp_cm_rep_handler(struct ib_cm_id *cm_id, target->scsi_host->can_queue = min(target->req_lim - SRP_TSK_MGMT_SQ_SIZE, target->scsi_host->can_queue); + target->scsi_host->cmd_per_lun + = min_t(int, target->scsi_host->can_queue, + target->scsi_host->cmd_per_lun); } else { shost_printk(KERN_WARNING, target->scsi_host, PFX "Unhandled RSP opcode %#x\n", lrsp->opcode); @@ -1513,7 +1697,7 @@ static void srp_cm_rep_handler(struct ib_cm_id *cm_id, goto error; } - if (!target->rx_ring[0]) { + if (!target->rx_ring) { ret = srp_alloc_iu_bufs(target); if (ret) goto error; @@ -1533,7 +1717,7 @@ static void srp_cm_rep_handler(struct ib_cm_id *cm_id, if (ret) goto error_free; - for (i = 0; i < SRP_RQ_SIZE; i++) { + for (i = 0; i < target->queue_size; i++) { struct srp_iu *iu = target->rx_ring[i]; ret = srp_post_recv(target, iu); if (ret) @@ -1672,6 +1856,7 @@ static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event) if (ib_send_cm_drep(cm_id, NULL, 0)) shost_printk(KERN_ERR, target->scsi_host, PFX "Sending CM DREP failed\n"); + queue_work(system_long_wq, &target->tl_err_work); break; case IB_CM_TIMEWAIT_EXIT: @@ -1698,9 +1883,61 @@ static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event) return 0; } +/** + * srp_change_queue_type - changing device queue tag type + * @sdev: scsi device struct + * @tag_type: requested tag type + * + * Returns queue tag type. + */ +static int +srp_change_queue_type(struct scsi_device *sdev, int tag_type) +{ + if (sdev->tagged_supported) { + scsi_set_tag_type(sdev, tag_type); + if (tag_type) + scsi_activate_tcq(sdev, sdev->queue_depth); + else + scsi_deactivate_tcq(sdev, sdev->queue_depth); + } else + tag_type = 0; + + return tag_type; +} + +/** + * srp_change_queue_depth - setting device queue depth + * @sdev: scsi device struct + * @qdepth: requested queue depth + * @reason: SCSI_QDEPTH_DEFAULT/SCSI_QDEPTH_QFULL/SCSI_QDEPTH_RAMP_UP + * (see include/scsi/scsi_host.h for definition) + * + * Returns queue depth. + */ +static int +srp_change_queue_depth(struct scsi_device *sdev, int qdepth, int reason) +{ + struct Scsi_Host *shost = sdev->host; + int max_depth; + if (reason == SCSI_QDEPTH_DEFAULT || reason == SCSI_QDEPTH_RAMP_UP) { + max_depth = shost->can_queue; + if (!sdev->tagged_supported) + max_depth = 1; + if (qdepth > max_depth) + qdepth = max_depth; + scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), qdepth); + } else if (reason == SCSI_QDEPTH_QFULL) + scsi_track_queue_full(sdev, qdepth); + else + return -EOPNOTSUPP; + + return sdev->queue_depth; +} + static int srp_send_tsk_mgmt(struct srp_target_port *target, u64 req_tag, unsigned int lun, u8 func) { + struct srp_rport *rport = target->rport; struct ib_device *dev = target->srp_host->srp_dev->dev; struct srp_iu *iu; struct srp_tsk_mgmt *tsk_mgmt; @@ -1710,12 +1947,20 @@ static int srp_send_tsk_mgmt(struct srp_target_port *target, init_completion(&target->tsk_mgmt_done); + /* + * Lock the rport mutex to avoid that srp_create_target_ib() is + * invoked while a task management function is being sent. + */ + mutex_lock(&rport->mutex); spin_lock_irq(&target->lock); iu = __srp_get_tx_iu(target, SRP_IU_TSK_MGMT); spin_unlock_irq(&target->lock); - if (!iu) + if (!iu) { + mutex_unlock(&rport->mutex); + return -1; + } ib_dma_sync_single_for_cpu(dev, iu->dma, sizeof *tsk_mgmt, DMA_TO_DEVICE); @@ -1732,8 +1977,11 @@ static int srp_send_tsk_mgmt(struct srp_target_port *target, DMA_TO_DEVICE); if (srp_post_send(target, iu, sizeof *tsk_mgmt)) { srp_put_tx_iu(target, iu, SRP_IU_TSK_MGMT); + mutex_unlock(&rport->mutex); + return -1; } + mutex_unlock(&rport->mutex); if (!wait_for_completion_timeout(&target->tsk_mgmt_done, msecs_to_jiffies(SRP_ABORT_TIMEOUT_MS))) @@ -1751,11 +1999,11 @@ static int srp_abort(struct scsi_cmnd *scmnd) shost_printk(KERN_ERR, target->scsi_host, "SRP abort called\n"); if (!req || !srp_claim_req(target, req, scmnd)) - return FAILED; + return SUCCESS; if (srp_send_tsk_mgmt(target, req->index, scmnd->device->lun, SRP_TSK_ABORT_TASK) == 0) ret = SUCCESS; - else if (target->transport_offline) + else if (target->rport->state == SRP_RPORT_LOST) ret = FAST_IO_FAIL; else ret = FAILED; @@ -1779,10 +2027,10 @@ static int srp_reset_device(struct scsi_cmnd *scmnd) if (target->tsk_mgmt_status) return FAILED; - for (i = 0; i < SRP_CMD_SQ_SIZE; ++i) { + for (i = 0; i < target->req_ring_size; ++i) { struct srp_request *req = &target->req_ring[i]; if (req->scmnd && req->scmnd->device == scmnd->device) - srp_reset_req(target, req); + srp_finish_req(target, req, DID_RESET << 16); } return SUCCESS; @@ -1791,14 +2039,10 @@ static int srp_reset_device(struct scsi_cmnd *scmnd) static int srp_reset_host(struct scsi_cmnd *scmnd) { struct srp_target_port *target = host_to_target(scmnd->device->host); - int ret = FAILED; shost_printk(KERN_ERR, target->scsi_host, PFX "SRP reset_host called\n"); - if (!srp_reconnect_target(target)) - ret = SUCCESS; - - return ret; + return srp_reconnect_rport(target->rport) == 0 ? SUCCESS : FAILED; } static int srp_slave_configure(struct scsi_device *sdev) @@ -1851,6 +2095,14 @@ static ssize_t show_pkey(struct device *dev, struct device_attribute *attr, return sprintf(buf, "0x%04x\n", be16_to_cpu(target->path.pkey)); } +static ssize_t show_sgid(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct srp_target_port *target = host_to_target(class_to_shost(dev)); + + return sprintf(buf, "%pI6\n", target->path.sgid.raw); +} + static ssize_t show_dgid(struct device *dev, struct device_attribute *attr, char *buf) { @@ -1907,6 +2159,14 @@ static ssize_t show_comp_vector(struct device *dev, return sprintf(buf, "%d\n", target->comp_vector); } +static ssize_t show_tl_retry_count(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct srp_target_port *target = host_to_target(class_to_shost(dev)); + + return sprintf(buf, "%d\n", target->tl_retry_count); +} + static ssize_t show_cmd_sg_entries(struct device *dev, struct device_attribute *attr, char *buf) { @@ -1927,6 +2187,7 @@ static DEVICE_ATTR(id_ext, S_IRUGO, show_id_ext, NULL); static DEVICE_ATTR(ioc_guid, S_IRUGO, show_ioc_guid, NULL); static DEVICE_ATTR(service_id, S_IRUGO, show_service_id, NULL); static DEVICE_ATTR(pkey, S_IRUGO, show_pkey, NULL); +static DEVICE_ATTR(sgid, S_IRUGO, show_sgid, NULL); static DEVICE_ATTR(dgid, S_IRUGO, show_dgid, NULL); static DEVICE_ATTR(orig_dgid, S_IRUGO, show_orig_dgid, NULL); static DEVICE_ATTR(req_lim, S_IRUGO, show_req_lim, NULL); @@ -1934,6 +2195,7 @@ static DEVICE_ATTR(zero_req_lim, S_IRUGO, show_zero_req_lim, NULL); static DEVICE_ATTR(local_ib_port, S_IRUGO, show_local_ib_port, NULL); static DEVICE_ATTR(local_ib_device, S_IRUGO, show_local_ib_device, NULL); static DEVICE_ATTR(comp_vector, S_IRUGO, show_comp_vector, NULL); +static DEVICE_ATTR(tl_retry_count, S_IRUGO, show_tl_retry_count, NULL); static DEVICE_ATTR(cmd_sg_entries, S_IRUGO, show_cmd_sg_entries, NULL); static DEVICE_ATTR(allow_ext_sg, S_IRUGO, show_allow_ext_sg, NULL); @@ -1942,6 +2204,7 @@ static struct device_attribute *srp_host_attrs[] = { &dev_attr_ioc_guid, &dev_attr_service_id, &dev_attr_pkey, + &dev_attr_sgid, &dev_attr_dgid, &dev_attr_orig_dgid, &dev_attr_req_lim, @@ -1949,6 +2212,7 @@ static struct device_attribute *srp_host_attrs[] = { &dev_attr_local_ib_port, &dev_attr_local_ib_device, &dev_attr_comp_vector, + &dev_attr_tl_retry_count, &dev_attr_cmd_sg_entries, &dev_attr_allow_ext_sg, NULL @@ -1961,14 +2225,16 @@ static struct scsi_host_template srp_template = { .slave_configure = srp_slave_configure, .info = srp_target_info, .queuecommand = srp_queuecommand, + .change_queue_depth = srp_change_queue_depth, + .change_queue_type = srp_change_queue_type, .eh_abort_handler = srp_abort, .eh_device_reset_handler = srp_reset_device, .eh_host_reset_handler = srp_reset_host, .skip_settle_delay = true, .sg_tablesize = SRP_DEF_SG_TABLESIZE, - .can_queue = SRP_CMD_SQ_SIZE, + .can_queue = SRP_DEFAULT_CMD_SQ_SIZE, .this_id = -1, - .cmd_per_lun = SRP_CMD_SQ_SIZE, + .cmd_per_lun = SRP_DEFAULT_CMD_SQ_SIZE, .use_clustering = ENABLE_CLUSTERING, .shost_attrs = srp_host_attrs }; @@ -1994,6 +2260,7 @@ static int srp_add_target(struct srp_host *host, struct srp_target_port *target) } rport->lld_data = target; + target->rport = rport; spin_lock(&host->target_lock); list_add_tail(&target->list, &host->target_list); @@ -2073,6 +2340,8 @@ enum { SRP_OPT_ALLOW_EXT_SG = 1 << 10, SRP_OPT_SG_TABLESIZE = 1 << 11, SRP_OPT_COMP_VECTOR = 1 << 12, + SRP_OPT_TL_RETRY_COUNT = 1 << 13, + SRP_OPT_QUEUE_SIZE = 1 << 14, SRP_OPT_ALL = (SRP_OPT_ID_EXT | SRP_OPT_IOC_GUID | SRP_OPT_DGID | @@ -2094,6 +2363,8 @@ static const match_table_t srp_opt_tokens = { { SRP_OPT_ALLOW_EXT_SG, "allow_ext_sg=%u" }, { SRP_OPT_SG_TABLESIZE, "sg_tablesize=%u" }, { SRP_OPT_COMP_VECTOR, "comp_vector=%u" }, + { SRP_OPT_TL_RETRY_COUNT, "tl_retry_count=%u" }, + { SRP_OPT_QUEUE_SIZE, "queue_size=%d" }, { SRP_OPT_ERR, NULL } }; @@ -2188,13 +2459,25 @@ static int srp_parse_options(const char *buf, struct srp_target_port *target) target->scsi_host->max_sectors = token; break; + case SRP_OPT_QUEUE_SIZE: + if (match_int(args, &token) || token < 1) { + pr_warn("bad queue_size parameter '%s'\n", p); + goto out; + } + target->scsi_host->can_queue = token; + target->queue_size = token + SRP_RSP_SQ_SIZE + + SRP_TSK_MGMT_SQ_SIZE; + if (!(opt_mask & SRP_OPT_MAX_CMD_PER_LUN)) + target->scsi_host->cmd_per_lun = token; + break; + case SRP_OPT_MAX_CMD_PER_LUN: - if (match_int(args, &token)) { + if (match_int(args, &token) || token < 1) { pr_warn("bad max cmd_per_lun parameter '%s'\n", p); goto out; } - target->scsi_host->cmd_per_lun = min(token, SRP_CMD_SQ_SIZE); + target->scsi_host->cmd_per_lun = token; break; case SRP_OPT_IO_CLASS: @@ -2257,6 +2540,15 @@ static int srp_parse_options(const char *buf, struct srp_target_port *target) target->comp_vector = token; break; + case SRP_OPT_TL_RETRY_COUNT: + if (match_int(args, &token) || token < 2 || token > 7) { + pr_warn("bad tl_retry_count parameter '%s' (must be a number between 2 and 7)\n", + p); + goto out; + } + target->tl_retry_count = token; + break; + default: pr_warn("unknown parameter or missing value '%s' in target creation request\n", p); @@ -2273,6 +2565,12 @@ static int srp_parse_options(const char *buf, struct srp_target_port *target) pr_warn("target creation request is missing parameter '%s'\n", srp_opt_tokens[i].pattern); + if (target->scsi_host->cmd_per_lun > target->scsi_host->can_queue + && (opt_mask & SRP_OPT_MAX_CMD_PER_LUN)) + pr_warn("cmd_per_lun = %d > queue_size = %d\n", + target->scsi_host->cmd_per_lun, + target->scsi_host->can_queue); + out: kfree(options); return ret; @@ -2287,8 +2585,7 @@ static ssize_t srp_create_target(struct device *dev, struct Scsi_Host *target_host; struct srp_target_port *target; struct ib_device *ibdev = host->srp_dev->dev; - dma_addr_t dma_addr; - int i, ret; + int ret; target_host = scsi_host_alloc(&srp_template, sizeof (struct srp_target_port)); @@ -2311,11 +2608,15 @@ static ssize_t srp_create_target(struct device *dev, target->cmd_sg_cnt = cmd_sg_entries; target->sg_tablesize = indirect_sg_entries ? : cmd_sg_entries; target->allow_ext_sg = allow_ext_sg; + target->tl_retry_count = 7; + target->queue_size = SRP_DEFAULT_QUEUE_SIZE; ret = srp_parse_options(buf, target); if (ret) goto err; + target->req_ring_size = target->queue_size - SRP_TSK_MGMT_SQ_SIZE; + if (!srp_conn_unique(target->srp_host, target)) { shost_printk(KERN_INFO, target->scsi_host, PFX "Already connected to target port with id_ext=%016llx;ioc_guid=%016llx;initiator_ext=%016llx\n", @@ -2339,31 +2640,13 @@ static ssize_t srp_create_target(struct device *dev, sizeof (struct srp_indirect_buf) + target->cmd_sg_cnt * sizeof (struct srp_direct_buf); + INIT_WORK(&target->tl_err_work, srp_tl_err_work); INIT_WORK(&target->remove_work, srp_remove_work); spin_lock_init(&target->lock); INIT_LIST_HEAD(&target->free_tx); - INIT_LIST_HEAD(&target->free_reqs); - for (i = 0; i < SRP_CMD_SQ_SIZE; ++i) { - struct srp_request *req = &target->req_ring[i]; - - req->fmr_list = kmalloc(target->cmd_sg_cnt * sizeof (void *), - GFP_KERNEL); - req->map_page = kmalloc(SRP_FMR_SIZE * sizeof (void *), - GFP_KERNEL); - req->indirect_desc = kmalloc(target->indirect_size, GFP_KERNEL); - if (!req->fmr_list || !req->map_page || !req->indirect_desc) - goto err_free_mem; - - dma_addr = ib_dma_map_single(ibdev, req->indirect_desc, - target->indirect_size, - DMA_TO_DEVICE); - if (ib_dma_mapping_error(ibdev, dma_addr)) - goto err_free_mem; - - req->indirect_dma_addr = dma_addr; - req->index = i; - list_add_tail(&req->list, &target->free_reqs); - } + ret = srp_alloc_req_data(target); + if (ret) + goto err_free_mem; ib_query_gid(ibdev, host->port, 0, &target->path.sgid); @@ -2612,7 +2895,14 @@ static void srp_remove_one(struct ib_device *device) } static struct srp_function_template ib_srp_transport_functions = { + .has_rport_state = true, + .reset_timer_if_blocked = true, + .reconnect_delay = &srp_reconnect_delay, + .fast_io_fail_tmo = &srp_fast_io_fail_tmo, + .dev_loss_tmo = &srp_dev_loss_tmo, + .reconnect = srp_rport_reconnect, .rport_delete = srp_rport_delete, + .terminate_rport_io = srp_terminate_io, }; static int __init srp_init_module(void) diff --git a/drivers/infiniband/ulp/srp/ib_srp.h b/drivers/infiniband/ulp/srp/ib_srp.h index e641088c14d..575681063f3 100644 --- a/drivers/infiniband/ulp/srp/ib_srp.h +++ b/drivers/infiniband/ulp/srp/ib_srp.h @@ -57,14 +57,11 @@ enum { SRP_MAX_LUN = 512, SRP_DEF_SG_TABLESIZE = 12, - SRP_RQ_SHIFT = 6, - SRP_RQ_SIZE = 1 << SRP_RQ_SHIFT, - - SRP_SQ_SIZE = SRP_RQ_SIZE, + SRP_DEFAULT_QUEUE_SIZE = 1 << 6, SRP_RSP_SQ_SIZE = 1, - SRP_REQ_SQ_SIZE = SRP_SQ_SIZE - SRP_RSP_SQ_SIZE, SRP_TSK_MGMT_SQ_SIZE = 1, - SRP_CMD_SQ_SIZE = SRP_REQ_SQ_SIZE - SRP_TSK_MGMT_SQ_SIZE, + SRP_DEFAULT_CMD_SQ_SIZE = SRP_DEFAULT_QUEUE_SIZE - SRP_RSP_SQ_SIZE - + SRP_TSK_MGMT_SQ_SIZE, SRP_TAG_NO_REQ = ~0U, SRP_TAG_TSK_MGMT = 1U << 31, @@ -140,7 +137,6 @@ struct srp_target_port { unsigned int cmd_sg_cnt; unsigned int indirect_size; bool allow_ext_sg; - bool transport_offline; /* Everything above this point is used in the hot path of * command processing. Try to keep them packed into cachelines. @@ -153,10 +149,14 @@ struct srp_target_port { u16 io_class; struct srp_host *srp_host; struct Scsi_Host *scsi_host; + struct srp_rport *rport; char target_name[32]; unsigned int scsi_id; unsigned int sg_tablesize; + int queue_size; + int req_ring_size; int comp_vector; + int tl_retry_count; struct ib_sa_path_rec path; __be16 orig_dgid[8]; @@ -172,10 +172,11 @@ struct srp_target_port { int zero_req_lim; - struct srp_iu *tx_ring[SRP_SQ_SIZE]; - struct srp_iu *rx_ring[SRP_RQ_SIZE]; - struct srp_request req_ring[SRP_CMD_SQ_SIZE]; + struct srp_iu **tx_ring; + struct srp_iu **rx_ring; + struct srp_request *req_ring; + struct work_struct tl_err_work; struct work_struct remove_work; struct list_head list; diff --git a/drivers/isdn/isdnloop/isdnloop.c b/drivers/isdn/isdnloop/isdnloop.c index baf2686aa8e..02125e6a910 100644 --- a/drivers/isdn/isdnloop/isdnloop.c +++ b/drivers/isdn/isdnloop/isdnloop.c @@ -1083,8 +1083,10 @@ isdnloop_start(isdnloop_card *card, isdnloop_sdef *sdefp) spin_unlock_irqrestore(&card->isdnloop_lock, flags); return -ENOMEM; } - for (i = 0; i < 3; i++) - strcpy(card->s0num[i], sdef.num[i]); + for (i = 0; i < 3; i++) { + strlcpy(card->s0num[i], sdef.num[i], + sizeof(card->s0num[0])); + } break; case ISDN_PTYPE_1TR6: if (isdnloop_fake(card, "DRV1.04TC-1TR6-CAPI-CNS-BASIS-29.11.95", @@ -1097,7 +1099,7 @@ isdnloop_start(isdnloop_card *card, isdnloop_sdef *sdefp) spin_unlock_irqrestore(&card->isdnloop_lock, flags); return -ENOMEM; } - strcpy(card->s0num[0], sdef.num[0]); + strlcpy(card->s0num[0], sdef.num[0], sizeof(card->s0num[0])); card->s0num[1][0] = '\0'; card->s0num[2][0] = '\0'; break; diff --git a/drivers/md/md.c b/drivers/md/md.c index 8766eabb001..b6b7a2866c9 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -112,7 +112,7 @@ static inline int speed_max(struct mddev *mddev) static struct ctl_table_header *raid_table_header; -static ctl_table raid_table[] = { +static struct ctl_table raid_table[] = { { .procname = "speed_limit_min", .data = &sysctl_speed_limit_min, @@ -130,7 +130,7 @@ static ctl_table raid_table[] = { { } }; -static ctl_table raid_dir_table[] = { +static struct ctl_table raid_dir_table[] = { { .procname = "raid", .maxlen = 0, @@ -140,7 +140,7 @@ static ctl_table raid_dir_table[] = { { } }; -static ctl_table raid_root_table[] = { +static struct ctl_table raid_root_table[] = { { .procname = "dev", .maxlen = 0, @@ -562,11 +562,19 @@ static struct mddev * mddev_find(dev_t unit) goto retry; } -static inline int mddev_lock(struct mddev * mddev) +static inline int __must_check mddev_lock(struct mddev * mddev) { return mutex_lock_interruptible(&mddev->reconfig_mutex); } +/* Sometimes we need to take the lock in a situation where + * failure due to interrupts is not acceptable. + */ +static inline void mddev_lock_nointr(struct mddev * mddev) +{ + mutex_lock(&mddev->reconfig_mutex); +} + static inline int mddev_is_locked(struct mddev *mddev) { return mutex_is_locked(&mddev->reconfig_mutex); @@ -2978,7 +2986,7 @@ rdev_size_store(struct md_rdev *rdev, const char *buf, size_t len) for_each_mddev(mddev, tmp) { struct md_rdev *rdev2; - mddev_lock(mddev); + mddev_lock_nointr(mddev); rdev_for_each(rdev2, mddev) if (rdev->bdev == rdev2->bdev && rdev != rdev2 && @@ -2994,7 +3002,7 @@ rdev_size_store(struct md_rdev *rdev, const char *buf, size_t len) break; } } - mddev_lock(my_mddev); + mddev_lock_nointr(my_mddev); if (overlap) { /* Someone else could have slipped in a size * change here, but doing so is just silly. @@ -3580,6 +3588,7 @@ level_store(struct mddev *mddev, const char *buf, size_t len) mddev->in_sync = 1; del_timer_sync(&mddev->safemode_timer); } + blk_set_stacking_limits(&mddev->queue->limits); pers->run(mddev); set_bit(MD_CHANGE_DEVS, &mddev->flags); mddev_resume(mddev); @@ -5258,7 +5267,7 @@ static void __md_stop_writes(struct mddev *mddev) void md_stop_writes(struct mddev *mddev) { - mddev_lock(mddev); + mddev_lock_nointr(mddev); __md_stop_writes(mddev); mddev_unlock(mddev); } @@ -5291,20 +5300,35 @@ EXPORT_SYMBOL_GPL(md_stop); static int md_set_readonly(struct mddev *mddev, struct block_device *bdev) { int err = 0; + int did_freeze = 0; + + if (!test_bit(MD_RECOVERY_FROZEN, &mddev->recovery)) { + did_freeze = 1; + set_bit(MD_RECOVERY_FROZEN, &mddev->recovery); + md_wakeup_thread(mddev->thread); + } + if (mddev->sync_thread) { + set_bit(MD_RECOVERY_INTR, &mddev->recovery); + /* Thread might be blocked waiting for metadata update + * which will now never happen */ + wake_up_process(mddev->sync_thread->tsk); + } + mddev_unlock(mddev); + wait_event(resync_wait, mddev->sync_thread == NULL); + mddev_lock_nointr(mddev); + mutex_lock(&mddev->open_mutex); - if (atomic_read(&mddev->openers) > !!bdev) { + if (atomic_read(&mddev->openers) > !!bdev || + mddev->sync_thread || + (bdev && !test_bit(MD_STILL_CLOSED, &mddev->flags))) { printk("md: %s still in use.\n",mdname(mddev)); + if (did_freeze) { + clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); + md_wakeup_thread(mddev->thread); + } err = -EBUSY; goto out; } - if (bdev && !test_bit(MD_STILL_CLOSED, &mddev->flags)) { - /* Someone opened the device since we flushed it - * so page cache could be dirty and it is too late - * to flush. So abort - */ - mutex_unlock(&mddev->open_mutex); - return -EBUSY; - } if (mddev->pers) { __md_stop_writes(mddev); @@ -5315,7 +5339,7 @@ static int md_set_readonly(struct mddev *mddev, struct block_device *bdev) set_disk_ro(mddev->gendisk, 1); clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); sysfs_notify_dirent_safe(mddev->sysfs_state); - err = 0; + err = 0; } out: mutex_unlock(&mddev->open_mutex); @@ -5331,20 +5355,34 @@ static int do_md_stop(struct mddev * mddev, int mode, { struct gendisk *disk = mddev->gendisk; struct md_rdev *rdev; + int did_freeze = 0; + + if (!test_bit(MD_RECOVERY_FROZEN, &mddev->recovery)) { + did_freeze = 1; + set_bit(MD_RECOVERY_FROZEN, &mddev->recovery); + md_wakeup_thread(mddev->thread); + } + if (mddev->sync_thread) { + set_bit(MD_RECOVERY_INTR, &mddev->recovery); + /* Thread might be blocked waiting for metadata update + * which will now never happen */ + wake_up_process(mddev->sync_thread->tsk); + } + mddev_unlock(mddev); + wait_event(resync_wait, mddev->sync_thread == NULL); + mddev_lock_nointr(mddev); mutex_lock(&mddev->open_mutex); if (atomic_read(&mddev->openers) > !!bdev || - mddev->sysfs_active) { + mddev->sysfs_active || + mddev->sync_thread || + (bdev && !test_bit(MD_STILL_CLOSED, &mddev->flags))) { printk("md: %s still in use.\n",mdname(mddev)); mutex_unlock(&mddev->open_mutex); - return -EBUSY; - } - if (bdev && !test_bit(MD_STILL_CLOSED, &mddev->flags)) { - /* Someone opened the device since we flushed it - * so page cache could be dirty and it is too late - * to flush. So abort - */ - mutex_unlock(&mddev->open_mutex); + if (did_freeze) { + clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); + md_wakeup_thread(mddev->thread); + } return -EBUSY; } if (mddev->pers) { @@ -6551,7 +6589,7 @@ static int md_ioctl(struct block_device *bdev, fmode_t mode, wait_event(mddev->sb_wait, !test_bit(MD_CHANGE_DEVS, &mddev->flags) && !test_bit(MD_CHANGE_PENDING, &mddev->flags)); - mddev_lock(mddev); + mddev_lock_nointr(mddev); } } else { err = -EROFS; @@ -7361,9 +7399,6 @@ void md_do_sync(struct md_thread *thread) mddev->curr_resync = 2; try_again: - if (kthread_should_stop()) - set_bit(MD_RECOVERY_INTR, &mddev->recovery); - if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) goto skip; for_each_mddev(mddev2, tmp) { @@ -7388,7 +7423,7 @@ void md_do_sync(struct md_thread *thread) * be caught by 'softlockup' */ prepare_to_wait(&resync_wait, &wq, TASK_INTERRUPTIBLE); - if (!kthread_should_stop() && + if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery) && mddev2->curr_resync >= mddev->curr_resync) { printk(KERN_INFO "md: delaying %s of %s" " until %s has finished (they" @@ -7464,7 +7499,7 @@ void md_do_sync(struct md_thread *thread) last_check = 0; if (j>2) { - printk(KERN_INFO + printk(KERN_INFO "md: resuming %s of %s from checkpoint.\n", desc, mdname(mddev)); mddev->curr_resync = j; @@ -7501,7 +7536,8 @@ void md_do_sync(struct md_thread *thread) sysfs_notify(&mddev->kobj, NULL, "sync_completed"); } - while (j >= mddev->resync_max && !kthread_should_stop()) { + while (j >= mddev->resync_max && + !test_bit(MD_RECOVERY_INTR, &mddev->recovery)) { /* As this condition is controlled by user-space, * we can block indefinitely, so use '_interruptible' * to avoid triggering warnings. @@ -7509,17 +7545,18 @@ void md_do_sync(struct md_thread *thread) flush_signals(current); /* just in case */ wait_event_interruptible(mddev->recovery_wait, mddev->resync_max > j - || kthread_should_stop()); + || test_bit(MD_RECOVERY_INTR, + &mddev->recovery)); } - if (kthread_should_stop()) - goto interrupted; + if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) + break; sectors = mddev->pers->sync_request(mddev, j, &skipped, currspeed < speed_min(mddev)); if (sectors == 0) { set_bit(MD_RECOVERY_INTR, &mddev->recovery); - goto out; + break; } if (!skipped) { /* actual IO requested */ @@ -7556,10 +7593,8 @@ void md_do_sync(struct md_thread *thread) last_mark = next; } - - if (kthread_should_stop()) - goto interrupted; - + if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) + break; /* * this loop exits only if either when we are slower than @@ -7582,11 +7617,12 @@ void md_do_sync(struct md_thread *thread) } } } - printk(KERN_INFO "md: %s: %s done.\n",mdname(mddev), desc); + printk(KERN_INFO "md: %s: %s %s.\n",mdname(mddev), desc, + test_bit(MD_RECOVERY_INTR, &mddev->recovery) + ? "interrupted" : "done"); /* * this also signals 'finished resyncing' to md_stop */ - out: blk_finish_plug(&plug); wait_event(mddev->recovery_wait, !atomic_read(&mddev->recovery_active)); @@ -7640,16 +7676,6 @@ void md_do_sync(struct md_thread *thread) set_bit(MD_RECOVERY_DONE, &mddev->recovery); md_wakeup_thread(mddev->thread); return; - - interrupted: - /* - * got a signal, exit. - */ - printk(KERN_INFO - "md: md_do_sync() got signal ... exiting\n"); - set_bit(MD_RECOVERY_INTR, &mddev->recovery); - goto out; - } EXPORT_SYMBOL_GPL(md_do_sync); @@ -7894,6 +7920,7 @@ void md_reap_sync_thread(struct mddev *mddev) /* resync has finished, collect result */ md_unregister_thread(&mddev->sync_thread); + wake_up(&resync_wait); if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery) && !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) { /* success...*/ diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c index af6681b1977..1e5a540995e 100644 --- a/drivers/md/raid1.c +++ b/drivers/md/raid1.c @@ -66,7 +66,8 @@ */ static int max_queued_requests = 1024; -static void allow_barrier(struct r1conf *conf); +static void allow_barrier(struct r1conf *conf, sector_t start_next_window, + sector_t bi_sector); static void lower_barrier(struct r1conf *conf); static void * r1bio_pool_alloc(gfp_t gfp_flags, void *data) @@ -84,10 +85,12 @@ static void r1bio_pool_free(void *r1_bio, void *data) } #define RESYNC_BLOCK_SIZE (64*1024) -//#define RESYNC_BLOCK_SIZE PAGE_SIZE +#define RESYNC_DEPTH 32 #define RESYNC_SECTORS (RESYNC_BLOCK_SIZE >> 9) #define RESYNC_PAGES ((RESYNC_BLOCK_SIZE + PAGE_SIZE-1) / PAGE_SIZE) -#define RESYNC_WINDOW (2048*1024) +#define RESYNC_WINDOW (RESYNC_BLOCK_SIZE * RESYNC_DEPTH) +#define RESYNC_WINDOW_SECTORS (RESYNC_WINDOW >> 9) +#define NEXT_NORMALIO_DISTANCE (3 * RESYNC_WINDOW_SECTORS) static void * r1buf_pool_alloc(gfp_t gfp_flags, void *data) { @@ -225,6 +228,8 @@ static void call_bio_endio(struct r1bio *r1_bio) struct bio *bio = r1_bio->master_bio; int done; struct r1conf *conf = r1_bio->mddev->private; + sector_t start_next_window = r1_bio->start_next_window; + sector_t bi_sector = bio->bi_sector; if (bio->bi_phys_segments) { unsigned long flags; @@ -232,6 +237,11 @@ static void call_bio_endio(struct r1bio *r1_bio) bio->bi_phys_segments--; done = (bio->bi_phys_segments == 0); spin_unlock_irqrestore(&conf->device_lock, flags); + /* + * make_request() might be waiting for + * bi_phys_segments to decrease + */ + wake_up(&conf->wait_barrier); } else done = 1; @@ -243,7 +253,7 @@ static void call_bio_endio(struct r1bio *r1_bio) * Wake up any possible resync thread that waits for the device * to go idle. */ - allow_barrier(conf); + allow_barrier(conf, start_next_window, bi_sector); } } @@ -814,8 +824,6 @@ static void flush_pending_writes(struct r1conf *conf) * there is no normal IO happeing. It must arrange to call * lower_barrier when the particular background IO completes. */ -#define RESYNC_DEPTH 32 - static void raise_barrier(struct r1conf *conf) { spin_lock_irq(&conf->resync_lock); @@ -827,9 +835,19 @@ static void raise_barrier(struct r1conf *conf) /* block any new IO from starting */ conf->barrier++; - /* Now wait for all pending IO to complete */ + /* For these conditions we must wait: + * A: while the array is in frozen state + * B: while barrier >= RESYNC_DEPTH, meaning resync reach + * the max count which allowed. + * C: next_resync + RESYNC_SECTORS > start_next_window, meaning + * next resync will reach to the window which normal bios are + * handling. + */ wait_event_lock_irq(conf->wait_barrier, - !conf->nr_pending && conf->barrier < RESYNC_DEPTH, + !conf->array_frozen && + conf->barrier < RESYNC_DEPTH && + (conf->start_next_window >= + conf->next_resync + RESYNC_SECTORS), conf->resync_lock); spin_unlock_irq(&conf->resync_lock); @@ -845,10 +863,33 @@ static void lower_barrier(struct r1conf *conf) wake_up(&conf->wait_barrier); } -static void wait_barrier(struct r1conf *conf) +static bool need_to_wait_for_sync(struct r1conf *conf, struct bio *bio) { + bool wait = false; + + if (conf->array_frozen || !bio) + wait = true; + else if (conf->barrier && bio_data_dir(bio) == WRITE) { + if (conf->next_resync < RESYNC_WINDOW_SECTORS) + wait = true; + else if ((conf->next_resync - RESYNC_WINDOW_SECTORS + >= bio_end_sector(bio)) || + (conf->next_resync + NEXT_NORMALIO_DISTANCE + <= bio->bi_sector)) + wait = false; + else + wait = true; + } + + return wait; +} + +static sector_t wait_barrier(struct r1conf *conf, struct bio *bio) +{ + sector_t sector = 0; + spin_lock_irq(&conf->resync_lock); - if (conf->barrier) { + if (need_to_wait_for_sync(conf, bio)) { conf->nr_waiting++; /* Wait for the barrier to drop. * However if there are already pending @@ -860,22 +901,67 @@ static void wait_barrier(struct r1conf *conf) * count down. */ wait_event_lock_irq(conf->wait_barrier, - !conf->barrier || - (conf->nr_pending && + !conf->array_frozen && + (!conf->barrier || + ((conf->start_next_window < + conf->next_resync + RESYNC_SECTORS) && current->bio_list && - !bio_list_empty(current->bio_list)), + !bio_list_empty(current->bio_list))), conf->resync_lock); conf->nr_waiting--; } + + if (bio && bio_data_dir(bio) == WRITE) { + if (conf->next_resync + NEXT_NORMALIO_DISTANCE + <= bio->bi_sector) { + if (conf->start_next_window == MaxSector) + conf->start_next_window = + conf->next_resync + + NEXT_NORMALIO_DISTANCE; + + if ((conf->start_next_window + NEXT_NORMALIO_DISTANCE) + <= bio->bi_sector) + conf->next_window_requests++; + else + conf->current_window_requests++; + } + if (bio->bi_sector >= conf->start_next_window) + sector = conf->start_next_window; + } + conf->nr_pending++; spin_unlock_irq(&conf->resync_lock); + return sector; } -static void allow_barrier(struct r1conf *conf) +static void allow_barrier(struct r1conf *conf, sector_t start_next_window, + sector_t bi_sector) { unsigned long flags; + spin_lock_irqsave(&conf->resync_lock, flags); conf->nr_pending--; + if (start_next_window) { + if (start_next_window == conf->start_next_window) { + if (conf->start_next_window + NEXT_NORMALIO_DISTANCE + <= bi_sector) + conf->next_window_requests--; + else + conf->current_window_requests--; + } else + conf->current_window_requests--; + + if (!conf->current_window_requests) { + if (conf->next_window_requests) { + conf->current_window_requests = + conf->next_window_requests; + conf->next_window_requests = 0; + conf->start_next_window += + NEXT_NORMALIO_DISTANCE; + } else + conf->start_next_window = MaxSector; + } + } spin_unlock_irqrestore(&conf->resync_lock, flags); wake_up(&conf->wait_barrier); } @@ -884,8 +970,7 @@ static void freeze_array(struct r1conf *conf, int extra) { /* stop syncio and normal IO and wait for everything to * go quite. - * We increment barrier and nr_waiting, and then - * wait until nr_pending match nr_queued+extra + * We wait until nr_pending match nr_queued+extra * This is called in the context of one normal IO request * that has failed. Thus any sync request that might be pending * will be blocked by nr_pending, and we need to wait for @@ -895,8 +980,7 @@ static void freeze_array(struct r1conf *conf, int extra) * we continue. */ spin_lock_irq(&conf->resync_lock); - conf->barrier++; - conf->nr_waiting++; + conf->array_frozen = 1; wait_event_lock_irq_cmd(conf->wait_barrier, conf->nr_pending == conf->nr_queued+extra, conf->resync_lock, @@ -907,8 +991,7 @@ static void unfreeze_array(struct r1conf *conf) { /* reverse the effect of the freeze */ spin_lock_irq(&conf->resync_lock); - conf->barrier--; - conf->nr_waiting--; + conf->array_frozen = 0; wake_up(&conf->wait_barrier); spin_unlock_irq(&conf->resync_lock); } @@ -1013,6 +1096,7 @@ static void make_request(struct mddev *mddev, struct bio * bio) int first_clone; int sectors_handled; int max_sectors; + sector_t start_next_window; /* * Register the new request and wait if the reconstruction @@ -1042,7 +1126,7 @@ static void make_request(struct mddev *mddev, struct bio * bio) finish_wait(&conf->wait_barrier, &w); } - wait_barrier(conf); + start_next_window = wait_barrier(conf, bio); bitmap = mddev->bitmap; @@ -1163,6 +1247,7 @@ read_again: disks = conf->raid_disks * 2; retry_write: + r1_bio->start_next_window = start_next_window; blocked_rdev = NULL; rcu_read_lock(); max_sectors = r1_bio->sectors; @@ -1231,14 +1316,24 @@ read_again: if (unlikely(blocked_rdev)) { /* Wait for this device to become unblocked */ int j; + sector_t old = start_next_window; for (j = 0; j < i; j++) if (r1_bio->bios[j]) rdev_dec_pending(conf->mirrors[j].rdev, mddev); r1_bio->state = 0; - allow_barrier(conf); + allow_barrier(conf, start_next_window, bio->bi_sector); md_wait_for_blocked_rdev(blocked_rdev, mddev); - wait_barrier(conf); + start_next_window = wait_barrier(conf, bio); + /* + * We must make sure the multi r1bios of bio have + * the same value of bi_phys_segments + */ + if (bio->bi_phys_segments && old && + old != start_next_window) + /* Wait for the former r1bio(s) to complete */ + wait_event(conf->wait_barrier, + bio->bi_phys_segments == 1); goto retry_write; } @@ -1438,11 +1533,14 @@ static void print_conf(struct r1conf *conf) static void close_sync(struct r1conf *conf) { - wait_barrier(conf); - allow_barrier(conf); + wait_barrier(conf, NULL); + allow_barrier(conf, 0, 0); mempool_destroy(conf->r1buf_pool); conf->r1buf_pool = NULL; + + conf->next_resync = 0; + conf->start_next_window = MaxSector; } static int raid1_spare_active(struct mddev *mddev) @@ -2714,6 +2812,9 @@ static struct r1conf *setup_conf(struct mddev *mddev) conf->pending_count = 0; conf->recovery_disabled = mddev->recovery_disabled - 1; + conf->start_next_window = MaxSector; + conf->current_window_requests = conf->next_window_requests = 0; + err = -EIO; for (i = 0; i < conf->raid_disks * 2; i++) { @@ -2871,8 +2972,8 @@ static int stop(struct mddev *mddev) atomic_read(&bitmap->behind_writes) == 0); } - raise_barrier(conf); - lower_barrier(conf); + freeze_array(conf, 0); + unfreeze_array(conf); md_unregister_thread(&mddev->thread); if (conf->r1bio_pool) @@ -3031,10 +3132,10 @@ static void raid1_quiesce(struct mddev *mddev, int state) wake_up(&conf->wait_barrier); break; case 1: - raise_barrier(conf); + freeze_array(conf, 0); break; case 0: - lower_barrier(conf); + unfreeze_array(conf); break; } } @@ -3051,7 +3152,8 @@ static void *raid1_takeover(struct mddev *mddev) mddev->new_chunk_sectors = 0; conf = setup_conf(mddev); if (!IS_ERR(conf)) - conf->barrier = 1; + /* Array must appear to be quiesced */ + conf->array_frozen = 1; return conf; } return ERR_PTR(-EINVAL); diff --git a/drivers/md/raid1.h b/drivers/md/raid1.h index 0ff3715fb7e..9bebca7bff2 100644 --- a/drivers/md/raid1.h +++ b/drivers/md/raid1.h @@ -41,6 +41,19 @@ struct r1conf { */ sector_t next_resync; + /* When raid1 starts resync, we divide array into four partitions + * |---------|--------------|---------------------|-------------| + * next_resync start_next_window end_window + * start_next_window = next_resync + NEXT_NORMALIO_DISTANCE + * end_window = start_next_window + NEXT_NORMALIO_DISTANCE + * current_window_requests means the count of normalIO between + * start_next_window and end_window. + * next_window_requests means the count of normalIO after end_window. + * */ + sector_t start_next_window; + int current_window_requests; + int next_window_requests; + spinlock_t device_lock; /* list of 'struct r1bio' that need to be processed by raid1d, @@ -65,6 +78,7 @@ struct r1conf { int nr_waiting; int nr_queued; int barrier; + int array_frozen; /* Set to 1 if a full sync is needed, (fresh device added). * Cleared when a sync completes. @@ -111,6 +125,7 @@ struct r1bio { * in this BehindIO request */ sector_t sector; + sector_t start_next_window; int sectors; unsigned long state; struct mddev *mddev; diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c index 7c3508abb5e..c504e8389e6 100644 --- a/drivers/md/raid10.c +++ b/drivers/md/raid10.c @@ -4384,7 +4384,11 @@ static sector_t reshape_request(struct mddev *mddev, sector_t sector_nr, set_bit(MD_CHANGE_DEVS, &mddev->flags); md_wakeup_thread(mddev->thread); wait_event(mddev->sb_wait, mddev->flags == 0 || - kthread_should_stop()); + test_bit(MD_RECOVERY_INTR, &mddev->recovery)); + if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) { + allow_barrier(conf); + return sectors_done; + } conf->reshape_safe = mddev->reshape_position; allow_barrier(conf); } diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index 7f0e17a27ae..47da0af6322 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c @@ -85,6 +85,42 @@ static inline struct hlist_head *stripe_hash(struct r5conf *conf, sector_t sect) return &conf->stripe_hashtbl[hash]; } +static inline int stripe_hash_locks_hash(sector_t sect) +{ + return (sect >> STRIPE_SHIFT) & STRIPE_HASH_LOCKS_MASK; +} + +static inline void lock_device_hash_lock(struct r5conf *conf, int hash) +{ + spin_lock_irq(conf->hash_locks + hash); + spin_lock(&conf->device_lock); +} + +static inline void unlock_device_hash_lock(struct r5conf *conf, int hash) +{ + spin_unlock(&conf->device_lock); + spin_unlock_irq(conf->hash_locks + hash); +} + +static inline void lock_all_device_hash_locks_irq(struct r5conf *conf) +{ + int i; + local_irq_disable(); + spin_lock(conf->hash_locks); + for (i = 1; i < NR_STRIPE_HASH_LOCKS; i++) + spin_lock_nest_lock(conf->hash_locks + i, conf->hash_locks); + spin_lock(&conf->device_lock); +} + +static inline void unlock_all_device_hash_locks_irq(struct r5conf *conf) +{ + int i; + spin_unlock(&conf->device_lock); + for (i = NR_STRIPE_HASH_LOCKS; i; i--) + spin_unlock(conf->hash_locks + i - 1); + local_irq_enable(); +} + /* bio's attached to a stripe+device for I/O are linked together in bi_sector * order without overlap. There may be several bio's per stripe+device, and * a bio could span several devices. @@ -249,7 +285,8 @@ static void raid5_wakeup_stripe_thread(struct stripe_head *sh) } } -static void do_release_stripe(struct r5conf *conf, struct stripe_head *sh) +static void do_release_stripe(struct r5conf *conf, struct stripe_head *sh, + struct list_head *temp_inactive_list) { BUG_ON(!list_empty(&sh->lru)); BUG_ON(atomic_read(&conf->active_stripes)==0); @@ -278,23 +315,68 @@ static void do_release_stripe(struct r5conf *conf, struct stripe_head *sh) < IO_THRESHOLD) md_wakeup_thread(conf->mddev->thread); atomic_dec(&conf->active_stripes); - if (!test_bit(STRIPE_EXPANDING, &sh->state)) { - list_add_tail(&sh->lru, &conf->inactive_list); - wake_up(&conf->wait_for_stripe); - if (conf->retry_read_aligned) - md_wakeup_thread(conf->mddev->thread); - } + if (!test_bit(STRIPE_EXPANDING, &sh->state)) + list_add_tail(&sh->lru, temp_inactive_list); } } -static void __release_stripe(struct r5conf *conf, struct stripe_head *sh) +static void __release_stripe(struct r5conf *conf, struct stripe_head *sh, + struct list_head *temp_inactive_list) { if (atomic_dec_and_test(&sh->count)) - do_release_stripe(conf, sh); + do_release_stripe(conf, sh, temp_inactive_list); +} + +/* + * @hash could be NR_STRIPE_HASH_LOCKS, then we have a list of inactive_list + * + * Be careful: Only one task can add/delete stripes from temp_inactive_list at + * given time. Adding stripes only takes device lock, while deleting stripes + * only takes hash lock. + */ +static void release_inactive_stripe_list(struct r5conf *conf, + struct list_head *temp_inactive_list, + int hash) +{ + int size; + bool do_wakeup = false; + unsigned long flags; + + if (hash == NR_STRIPE_HASH_LOCKS) { + size = NR_STRIPE_HASH_LOCKS; + hash = NR_STRIPE_HASH_LOCKS - 1; + } else + size = 1; + while (size) { + struct list_head *list = &temp_inactive_list[size - 1]; + + /* + * We don't hold any lock here yet, get_active_stripe() might + * remove stripes from the list + */ + if (!list_empty_careful(list)) { + spin_lock_irqsave(conf->hash_locks + hash, flags); + if (list_empty(conf->inactive_list + hash) && + !list_empty(list)) + atomic_dec(&conf->empty_inactive_list_nr); + list_splice_tail_init(list, conf->inactive_list + hash); + do_wakeup = true; + spin_unlock_irqrestore(conf->hash_locks + hash, flags); + } + size--; + hash--; + } + + if (do_wakeup) { + wake_up(&conf->wait_for_stripe); + if (conf->retry_read_aligned) + md_wakeup_thread(conf->mddev->thread); + } } /* should hold conf->device_lock already */ -static int release_stripe_list(struct r5conf *conf) +static int release_stripe_list(struct r5conf *conf, + struct list_head *temp_inactive_list) { struct stripe_head *sh; int count = 0; @@ -303,6 +385,8 @@ static int release_stripe_list(struct r5conf *conf) head = llist_del_all(&conf->released_stripes); head = llist_reverse_order(head); while (head) { + int hash; + sh = llist_entry(head, struct stripe_head, release_list); head = llist_next(head); /* sh could be readded after STRIPE_ON_RELEASE_LIST is cleard */ @@ -313,7 +397,8 @@ static int release_stripe_list(struct r5conf *conf) * again, the count is always > 1. This is true for * STRIPE_ON_UNPLUG_LIST bit too. */ - __release_stripe(conf, sh); + hash = sh->hash_lock_index; + __release_stripe(conf, sh, &temp_inactive_list[hash]); count++; } @@ -324,9 +409,12 @@ static void release_stripe(struct stripe_head *sh) { struct r5conf *conf = sh->raid_conf; unsigned long flags; + struct list_head list; + int hash; bool wakeup; - if (test_and_set_bit(STRIPE_ON_RELEASE_LIST, &sh->state)) + if (unlikely(!conf->mddev->thread) || + test_and_set_bit(STRIPE_ON_RELEASE_LIST, &sh->state)) goto slow_path; wakeup = llist_add(&sh->release_list, &conf->released_stripes); if (wakeup) @@ -336,8 +424,11 @@ slow_path: local_irq_save(flags); /* we are ok here if STRIPE_ON_RELEASE_LIST is set or not */ if (atomic_dec_and_lock(&sh->count, &conf->device_lock)) { - do_release_stripe(conf, sh); + INIT_LIST_HEAD(&list); + hash = sh->hash_lock_index; + do_release_stripe(conf, sh, &list); spin_unlock(&conf->device_lock); + release_inactive_stripe_list(conf, &list, hash); } local_irq_restore(flags); } @@ -362,18 +453,21 @@ static inline void insert_hash(struct r5conf *conf, struct stripe_head *sh) /* find an idle stripe, make sure it is unhashed, and return it. */ -static struct stripe_head *get_free_stripe(struct r5conf *conf) +static struct stripe_head *get_free_stripe(struct r5conf *conf, int hash) { struct stripe_head *sh = NULL; struct list_head *first; - if (list_empty(&conf->inactive_list)) + if (list_empty(conf->inactive_list + hash)) goto out; - first = conf->inactive_list.next; + first = (conf->inactive_list + hash)->next; sh = list_entry(first, struct stripe_head, lru); list_del_init(first); remove_hash(sh); atomic_inc(&conf->active_stripes); + BUG_ON(hash != sh->hash_lock_index); + if (list_empty(conf->inactive_list + hash)) + atomic_inc(&conf->empty_inactive_list_nr); out: return sh; } @@ -416,7 +510,7 @@ static void stripe_set_idx(sector_t stripe, struct r5conf *conf, int previous, static void init_stripe(struct stripe_head *sh, sector_t sector, int previous) { struct r5conf *conf = sh->raid_conf; - int i; + int i, seq; BUG_ON(atomic_read(&sh->count) != 0); BUG_ON(test_bit(STRIPE_HANDLE, &sh->state)); @@ -426,7 +520,8 @@ static void init_stripe(struct stripe_head *sh, sector_t sector, int previous) (unsigned long long)sh->sector); remove_hash(sh); - +retry: + seq = read_seqcount_begin(&conf->gen_lock); sh->generation = conf->generation - previous; sh->disks = previous ? conf->previous_raid_disks : conf->raid_disks; sh->sector = sector; @@ -448,6 +543,8 @@ static void init_stripe(struct stripe_head *sh, sector_t sector, int previous) dev->flags = 0; raid5_build_block(sh, i, previous); } + if (read_seqcount_retry(&conf->gen_lock, seq)) + goto retry; insert_hash(conf, sh); sh->cpu = smp_processor_id(); } @@ -552,29 +649,31 @@ get_active_stripe(struct r5conf *conf, sector_t sector, int previous, int noblock, int noquiesce) { struct stripe_head *sh; + int hash = stripe_hash_locks_hash(sector); pr_debug("get_stripe, sector %llu\n", (unsigned long long)sector); - spin_lock_irq(&conf->device_lock); + spin_lock_irq(conf->hash_locks + hash); do { wait_event_lock_irq(conf->wait_for_stripe, conf->quiesce == 0 || noquiesce, - conf->device_lock); + *(conf->hash_locks + hash)); sh = __find_stripe(conf, sector, conf->generation - previous); if (!sh) { if (!conf->inactive_blocked) - sh = get_free_stripe(conf); + sh = get_free_stripe(conf, hash); if (noblock && sh == NULL) break; if (!sh) { conf->inactive_blocked = 1; - wait_event_lock_irq(conf->wait_for_stripe, - !list_empty(&conf->inactive_list) && - (atomic_read(&conf->active_stripes) - < (conf->max_nr_stripes *3/4) - || !conf->inactive_blocked), - conf->device_lock); + wait_event_lock_irq( + conf->wait_for_stripe, + !list_empty(conf->inactive_list + hash) && + (atomic_read(&conf->active_stripes) + < (conf->max_nr_stripes * 3 / 4) + || !conf->inactive_blocked), + *(conf->hash_locks + hash)); conf->inactive_blocked = 0; } else init_stripe(sh, sector, previous); @@ -585,9 +684,11 @@ get_active_stripe(struct r5conf *conf, sector_t sector, && !test_bit(STRIPE_ON_UNPLUG_LIST, &sh->state) && !test_bit(STRIPE_ON_RELEASE_LIST, &sh->state)); } else { + spin_lock(&conf->device_lock); if (!test_bit(STRIPE_HANDLE, &sh->state)) atomic_inc(&conf->active_stripes); if (list_empty(&sh->lru) && + !test_bit(STRIPE_ON_RELEASE_LIST, &sh->state) && !test_bit(STRIPE_EXPANDING, &sh->state)) BUG(); list_del_init(&sh->lru); @@ -595,6 +696,7 @@ get_active_stripe(struct r5conf *conf, sector_t sector, sh->group->stripes_cnt--; sh->group = NULL; } + spin_unlock(&conf->device_lock); } } } while (sh == NULL); @@ -602,7 +704,7 @@ get_active_stripe(struct r5conf *conf, sector_t sector, if (sh) atomic_inc(&sh->count); - spin_unlock_irq(&conf->device_lock); + spin_unlock_irq(conf->hash_locks + hash); return sh; } @@ -758,7 +860,7 @@ static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s) bi->bi_sector = (sh->sector + rdev->data_offset); if (test_bit(R5_ReadNoMerge, &sh->dev[i].flags)) - bi->bi_rw |= REQ_FLUSH; + bi->bi_rw |= REQ_NOMERGE; bi->bi_vcnt = 1; bi->bi_io_vec[0].bv_len = STRIPE_SIZE; @@ -1582,7 +1684,7 @@ static void raid_run_ops(struct stripe_head *sh, unsigned long ops_request) put_cpu(); } -static int grow_one_stripe(struct r5conf *conf) +static int grow_one_stripe(struct r5conf *conf, int hash) { struct stripe_head *sh; sh = kmem_cache_zalloc(conf->slab_cache, GFP_KERNEL); @@ -1598,6 +1700,7 @@ static int grow_one_stripe(struct r5conf *conf) kmem_cache_free(conf->slab_cache, sh); return 0; } + sh->hash_lock_index = hash; /* we just created an active stripe so... */ atomic_set(&sh->count, 1); atomic_inc(&conf->active_stripes); @@ -1610,6 +1713,7 @@ static int grow_stripes(struct r5conf *conf, int num) { struct kmem_cache *sc; int devs = max(conf->raid_disks, conf->previous_raid_disks); + int hash; if (conf->mddev->gendisk) sprintf(conf->cache_name[0], @@ -1627,9 +1731,13 @@ static int grow_stripes(struct r5conf *conf, int num) return 1; conf->slab_cache = sc; conf->pool_size = devs; - while (num--) - if (!grow_one_stripe(conf)) + hash = conf->max_nr_stripes % NR_STRIPE_HASH_LOCKS; + while (num--) { + if (!grow_one_stripe(conf, hash)) return 1; + conf->max_nr_stripes++; + hash = (hash + 1) % NR_STRIPE_HASH_LOCKS; + } return 0; } @@ -1687,6 +1795,7 @@ static int resize_stripes(struct r5conf *conf, int newsize) int err; struct kmem_cache *sc; int i; + int hash, cnt; if (newsize <= conf->pool_size) return 0; /* never bother to shrink */ @@ -1726,19 +1835,29 @@ static int resize_stripes(struct r5conf *conf, int newsize) * OK, we have enough stripes, start collecting inactive * stripes and copying them over */ + hash = 0; + cnt = 0; list_for_each_entry(nsh, &newstripes, lru) { - spin_lock_irq(&conf->device_lock); - wait_event_lock_irq(conf->wait_for_stripe, - !list_empty(&conf->inactive_list), - conf->device_lock); - osh = get_free_stripe(conf); - spin_unlock_irq(&conf->device_lock); + lock_device_hash_lock(conf, hash); + wait_event_cmd(conf->wait_for_stripe, + !list_empty(conf->inactive_list + hash), + unlock_device_hash_lock(conf, hash), + lock_device_hash_lock(conf, hash)); + osh = get_free_stripe(conf, hash); + unlock_device_hash_lock(conf, hash); atomic_set(&nsh->count, 1); for(i=0; i<conf->pool_size; i++) nsh->dev[i].page = osh->dev[i].page; for( ; i<newsize; i++) nsh->dev[i].page = NULL; + nsh->hash_lock_index = hash; kmem_cache_free(conf->slab_cache, osh); + cnt++; + if (cnt >= conf->max_nr_stripes / NR_STRIPE_HASH_LOCKS + + !!((conf->max_nr_stripes % NR_STRIPE_HASH_LOCKS) > hash)) { + hash++; + cnt = 0; + } } kmem_cache_destroy(conf->slab_cache); @@ -1797,13 +1916,13 @@ static int resize_stripes(struct r5conf *conf, int newsize) return err; } -static int drop_one_stripe(struct r5conf *conf) +static int drop_one_stripe(struct r5conf *conf, int hash) { struct stripe_head *sh; - spin_lock_irq(&conf->device_lock); - sh = get_free_stripe(conf); - spin_unlock_irq(&conf->device_lock); + spin_lock_irq(conf->hash_locks + hash); + sh = get_free_stripe(conf, hash); + spin_unlock_irq(conf->hash_locks + hash); if (!sh) return 0; BUG_ON(atomic_read(&sh->count)); @@ -1815,8 +1934,10 @@ static int drop_one_stripe(struct r5conf *conf) static void shrink_stripes(struct r5conf *conf) { - while (drop_one_stripe(conf)) - ; + int hash; + for (hash = 0; hash < NR_STRIPE_HASH_LOCKS; hash++) + while (drop_one_stripe(conf, hash)) + ; if (conf->slab_cache) kmem_cache_destroy(conf->slab_cache); @@ -1921,6 +2042,9 @@ static void raid5_end_read_request(struct bio * bi, int error) mdname(conf->mddev), bdn); else retry = 1; + if (set_bad && test_bit(In_sync, &rdev->flags) + && !test_bit(R5_ReadNoMerge, &sh->dev[i].flags)) + retry = 1; if (retry) if (test_bit(R5_ReadNoMerge, &sh->dev[i].flags)) { set_bit(R5_ReadError, &sh->dev[i].flags); @@ -3900,7 +4024,8 @@ static void raid5_activate_delayed(struct r5conf *conf) } } -static void activate_bit_delay(struct r5conf *conf) +static void activate_bit_delay(struct r5conf *conf, + struct list_head *temp_inactive_list) { /* device_lock is held */ struct list_head head; @@ -3908,9 +4033,11 @@ static void activate_bit_delay(struct r5conf *conf) list_del_init(&conf->bitmap_list); while (!list_empty(&head)) { struct stripe_head *sh = list_entry(head.next, struct stripe_head, lru); + int hash; list_del_init(&sh->lru); atomic_inc(&sh->count); - __release_stripe(conf, sh); + hash = sh->hash_lock_index; + __release_stripe(conf, sh, &temp_inactive_list[hash]); } } @@ -3926,7 +4053,7 @@ int md_raid5_congested(struct mddev *mddev, int bits) return 1; if (conf->quiesce) return 1; - if (list_empty_careful(&conf->inactive_list)) + if (atomic_read(&conf->empty_inactive_list_nr)) return 1; return 0; @@ -4256,6 +4383,7 @@ static struct stripe_head *__get_priority_stripe(struct r5conf *conf, int group) struct raid5_plug_cb { struct blk_plug_cb cb; struct list_head list; + struct list_head temp_inactive_list[NR_STRIPE_HASH_LOCKS]; }; static void raid5_unplug(struct blk_plug_cb *blk_cb, bool from_schedule) @@ -4266,6 +4394,7 @@ static void raid5_unplug(struct blk_plug_cb *blk_cb, bool from_schedule) struct mddev *mddev = cb->cb.data; struct r5conf *conf = mddev->private; int cnt = 0; + int hash; if (cb->list.next && !list_empty(&cb->list)) { spin_lock_irq(&conf->device_lock); @@ -4283,11 +4412,14 @@ static void raid5_unplug(struct blk_plug_cb *blk_cb, bool from_schedule) * STRIPE_ON_RELEASE_LIST could be set here. In that * case, the count is always > 1 here */ - __release_stripe(conf, sh); + hash = sh->hash_lock_index; + __release_stripe(conf, sh, &cb->temp_inactive_list[hash]); cnt++; } spin_unlock_irq(&conf->device_lock); } + release_inactive_stripe_list(conf, cb->temp_inactive_list, + NR_STRIPE_HASH_LOCKS); if (mddev->queue) trace_block_unplug(mddev->queue, cnt, !from_schedule); kfree(cb); @@ -4308,8 +4440,12 @@ static void release_stripe_plug(struct mddev *mddev, cb = container_of(blk_cb, struct raid5_plug_cb, cb); - if (cb->list.next == NULL) + if (cb->list.next == NULL) { + int i; INIT_LIST_HEAD(&cb->list); + for (i = 0; i < NR_STRIPE_HASH_LOCKS; i++) + INIT_LIST_HEAD(cb->temp_inactive_list + i); + } if (!test_and_set_bit(STRIPE_ON_UNPLUG_LIST, &sh->state)) list_add_tail(&sh->lru, &cb->list); @@ -4692,14 +4828,19 @@ static sector_t reshape_request(struct mddev *mddev, sector_t sector_nr, int *sk time_after(jiffies, conf->reshape_checkpoint + 10*HZ)) { /* Cannot proceed until we've updated the superblock... */ wait_event(conf->wait_for_overlap, - atomic_read(&conf->reshape_stripes)==0); + atomic_read(&conf->reshape_stripes)==0 + || test_bit(MD_RECOVERY_INTR, &mddev->recovery)); + if (atomic_read(&conf->reshape_stripes) != 0) + return 0; mddev->reshape_position = conf->reshape_progress; mddev->curr_resync_completed = sector_nr; conf->reshape_checkpoint = jiffies; set_bit(MD_CHANGE_DEVS, &mddev->flags); md_wakeup_thread(mddev->thread); wait_event(mddev->sb_wait, mddev->flags == 0 || - kthread_should_stop()); + test_bit(MD_RECOVERY_INTR, &mddev->recovery)); + if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) + return 0; spin_lock_irq(&conf->device_lock); conf->reshape_safe = mddev->reshape_position; spin_unlock_irq(&conf->device_lock); @@ -4782,7 +4923,10 @@ static sector_t reshape_request(struct mddev *mddev, sector_t sector_nr, int *sk >= mddev->resync_max - mddev->curr_resync_completed) { /* Cannot proceed until we've updated the superblock... */ wait_event(conf->wait_for_overlap, - atomic_read(&conf->reshape_stripes) == 0); + atomic_read(&conf->reshape_stripes) == 0 + || test_bit(MD_RECOVERY_INTR, &mddev->recovery)); + if (atomic_read(&conf->reshape_stripes) != 0) + goto ret; mddev->reshape_position = conf->reshape_progress; mddev->curr_resync_completed = sector_nr; conf->reshape_checkpoint = jiffies; @@ -4790,13 +4934,16 @@ static sector_t reshape_request(struct mddev *mddev, sector_t sector_nr, int *sk md_wakeup_thread(mddev->thread); wait_event(mddev->sb_wait, !test_bit(MD_CHANGE_DEVS, &mddev->flags) - || kthread_should_stop()); + || test_bit(MD_RECOVERY_INTR, &mddev->recovery)); + if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) + goto ret; spin_lock_irq(&conf->device_lock); conf->reshape_safe = mddev->reshape_position; spin_unlock_irq(&conf->device_lock); wake_up(&conf->wait_for_overlap); sysfs_notify(&mddev->kobj, NULL, "sync_completed"); } +ret: return reshape_sectors; } @@ -4954,27 +5101,45 @@ static int retry_aligned_read(struct r5conf *conf, struct bio *raid_bio) } static int handle_active_stripes(struct r5conf *conf, int group, - struct r5worker *worker) + struct r5worker *worker, + struct list_head *temp_inactive_list) { struct stripe_head *batch[MAX_STRIPE_BATCH], *sh; - int i, batch_size = 0; + int i, batch_size = 0, hash; + bool release_inactive = false; while (batch_size < MAX_STRIPE_BATCH && (sh = __get_priority_stripe(conf, group)) != NULL) batch[batch_size++] = sh; - if (batch_size == 0) - return batch_size; + if (batch_size == 0) { + for (i = 0; i < NR_STRIPE_HASH_LOCKS; i++) + if (!list_empty(temp_inactive_list + i)) + break; + if (i == NR_STRIPE_HASH_LOCKS) + return batch_size; + release_inactive = true; + } spin_unlock_irq(&conf->device_lock); + release_inactive_stripe_list(conf, temp_inactive_list, + NR_STRIPE_HASH_LOCKS); + + if (release_inactive) { + spin_lock_irq(&conf->device_lock); + return 0; + } + for (i = 0; i < batch_size; i++) handle_stripe(batch[i]); cond_resched(); spin_lock_irq(&conf->device_lock); - for (i = 0; i < batch_size; i++) - __release_stripe(conf, batch[i]); + for (i = 0; i < batch_size; i++) { + hash = batch[i]->hash_lock_index; + __release_stripe(conf, batch[i], &temp_inactive_list[hash]); + } return batch_size; } @@ -4995,9 +5160,10 @@ static void raid5_do_work(struct work_struct *work) while (1) { int batch_size, released; - released = release_stripe_list(conf); + released = release_stripe_list(conf, worker->temp_inactive_list); - batch_size = handle_active_stripes(conf, group_id, worker); + batch_size = handle_active_stripes(conf, group_id, worker, + worker->temp_inactive_list); worker->working = false; if (!batch_size && !released) break; @@ -5036,7 +5202,7 @@ static void raid5d(struct md_thread *thread) struct bio *bio; int batch_size, released; - released = release_stripe_list(conf); + released = release_stripe_list(conf, conf->temp_inactive_list); if ( !list_empty(&conf->bitmap_list)) { @@ -5046,7 +5212,7 @@ static void raid5d(struct md_thread *thread) bitmap_unplug(mddev->bitmap); spin_lock_irq(&conf->device_lock); conf->seq_write = conf->seq_flush; - activate_bit_delay(conf); + activate_bit_delay(conf, conf->temp_inactive_list); } raid5_activate_delayed(conf); @@ -5060,7 +5226,8 @@ static void raid5d(struct md_thread *thread) handled++; } - batch_size = handle_active_stripes(conf, ANY_GROUP, NULL); + batch_size = handle_active_stripes(conf, ANY_GROUP, NULL, + conf->temp_inactive_list); if (!batch_size && !released) break; handled += batch_size; @@ -5096,22 +5263,29 @@ raid5_set_cache_size(struct mddev *mddev, int size) { struct r5conf *conf = mddev->private; int err; + int hash; if (size <= 16 || size > 32768) return -EINVAL; + hash = (conf->max_nr_stripes - 1) % NR_STRIPE_HASH_LOCKS; while (size < conf->max_nr_stripes) { - if (drop_one_stripe(conf)) + if (drop_one_stripe(conf, hash)) conf->max_nr_stripes--; else break; + hash--; + if (hash < 0) + hash = NR_STRIPE_HASH_LOCKS - 1; } err = md_allow_write(mddev); if (err) return err; + hash = conf->max_nr_stripes % NR_STRIPE_HASH_LOCKS; while (size > conf->max_nr_stripes) { - if (grow_one_stripe(conf)) + if (grow_one_stripe(conf, hash)) conf->max_nr_stripes++; else break; + hash = (hash + 1) % NR_STRIPE_HASH_LOCKS; } return 0; } @@ -5199,15 +5373,18 @@ raid5_show_group_thread_cnt(struct mddev *mddev, char *page) return 0; } -static int alloc_thread_groups(struct r5conf *conf, int cnt); +static int alloc_thread_groups(struct r5conf *conf, int cnt, + int *group_cnt, + int *worker_cnt_per_group, + struct r5worker_group **worker_groups); static ssize_t raid5_store_group_thread_cnt(struct mddev *mddev, const char *page, size_t len) { struct r5conf *conf = mddev->private; unsigned long new; int err; - struct r5worker_group *old_groups; - int old_group_cnt; + struct r5worker_group *new_groups, *old_groups; + int group_cnt, worker_cnt_per_group; if (len >= PAGE_SIZE) return -EINVAL; @@ -5223,14 +5400,19 @@ raid5_store_group_thread_cnt(struct mddev *mddev, const char *page, size_t len) mddev_suspend(mddev); old_groups = conf->worker_groups; - old_group_cnt = conf->worker_cnt_per_group; + if (old_groups) + flush_workqueue(raid5_wq); + + err = alloc_thread_groups(conf, new, + &group_cnt, &worker_cnt_per_group, + &new_groups); + if (!err) { + spin_lock_irq(&conf->device_lock); + conf->group_cnt = group_cnt; + conf->worker_cnt_per_group = worker_cnt_per_group; + conf->worker_groups = new_groups; + spin_unlock_irq(&conf->device_lock); - conf->worker_groups = NULL; - err = alloc_thread_groups(conf, new); - if (err) { - conf->worker_groups = old_groups; - conf->worker_cnt_per_group = old_group_cnt; - } else { if (old_groups) kfree(old_groups[0].workers); kfree(old_groups); @@ -5260,40 +5442,47 @@ static struct attribute_group raid5_attrs_group = { .attrs = raid5_attrs, }; -static int alloc_thread_groups(struct r5conf *conf, int cnt) +static int alloc_thread_groups(struct r5conf *conf, int cnt, + int *group_cnt, + int *worker_cnt_per_group, + struct r5worker_group **worker_groups) { - int i, j; + int i, j, k; ssize_t size; struct r5worker *workers; - conf->worker_cnt_per_group = cnt; + *worker_cnt_per_group = cnt; if (cnt == 0) { - conf->worker_groups = NULL; + *group_cnt = 0; + *worker_groups = NULL; return 0; } - conf->group_cnt = num_possible_nodes(); + *group_cnt = num_possible_nodes(); size = sizeof(struct r5worker) * cnt; - workers = kzalloc(size * conf->group_cnt, GFP_NOIO); - conf->worker_groups = kzalloc(sizeof(struct r5worker_group) * - conf->group_cnt, GFP_NOIO); - if (!conf->worker_groups || !workers) { + workers = kzalloc(size * *group_cnt, GFP_NOIO); + *worker_groups = kzalloc(sizeof(struct r5worker_group) * + *group_cnt, GFP_NOIO); + if (!*worker_groups || !workers) { kfree(workers); - kfree(conf->worker_groups); - conf->worker_groups = NULL; + kfree(*worker_groups); return -ENOMEM; } - for (i = 0; i < conf->group_cnt; i++) { + for (i = 0; i < *group_cnt; i++) { struct r5worker_group *group; - group = &conf->worker_groups[i]; + group = worker_groups[i]; INIT_LIST_HEAD(&group->handle_list); group->conf = conf; group->workers = workers + i * cnt; for (j = 0; j < cnt; j++) { - group->workers[j].group = group; - INIT_WORK(&group->workers[j].work, raid5_do_work); + struct r5worker *worker = group->workers + j; + worker->group = group; + INIT_WORK(&worker->work, raid5_do_work); + + for (k = 0; k < NR_STRIPE_HASH_LOCKS; k++) + INIT_LIST_HEAD(worker->temp_inactive_list + k); } } @@ -5444,6 +5633,9 @@ static struct r5conf *setup_conf(struct mddev *mddev) struct md_rdev *rdev; struct disk_info *disk; char pers_name[6]; + int i; + int group_cnt, worker_cnt_per_group; + struct r5worker_group *new_group; if (mddev->new_level != 5 && mddev->new_level != 4 @@ -5478,7 +5670,12 @@ static struct r5conf *setup_conf(struct mddev *mddev) if (conf == NULL) goto abort; /* Don't enable multi-threading by default*/ - if (alloc_thread_groups(conf, 0)) + if (!alloc_thread_groups(conf, 0, &group_cnt, &worker_cnt_per_group, + &new_group)) { + conf->group_cnt = group_cnt; + conf->worker_cnt_per_group = worker_cnt_per_group; + conf->worker_groups = new_group; + } else goto abort; spin_lock_init(&conf->device_lock); seqcount_init(&conf->gen_lock); @@ -5488,7 +5685,6 @@ static struct r5conf *setup_conf(struct mddev *mddev) INIT_LIST_HEAD(&conf->hold_list); INIT_LIST_HEAD(&conf->delayed_list); INIT_LIST_HEAD(&conf->bitmap_list); - INIT_LIST_HEAD(&conf->inactive_list); init_llist_head(&conf->released_stripes); atomic_set(&conf->active_stripes, 0); atomic_set(&conf->preread_active_stripes, 0); @@ -5514,6 +5710,21 @@ static struct r5conf *setup_conf(struct mddev *mddev) if ((conf->stripe_hashtbl = kzalloc(PAGE_SIZE, GFP_KERNEL)) == NULL) goto abort; + /* We init hash_locks[0] separately to that it can be used + * as the reference lock in the spin_lock_nest_lock() call + * in lock_all_device_hash_locks_irq in order to convince + * lockdep that we know what we are doing. + */ + spin_lock_init(conf->hash_locks); + for (i = 1; i < NR_STRIPE_HASH_LOCKS; i++) + spin_lock_init(conf->hash_locks + i); + + for (i = 0; i < NR_STRIPE_HASH_LOCKS; i++) + INIT_LIST_HEAD(conf->inactive_list + i); + + for (i = 0; i < NR_STRIPE_HASH_LOCKS; i++) + INIT_LIST_HEAD(conf->temp_inactive_list + i); + conf->level = mddev->new_level; if (raid5_alloc_percpu(conf) != 0) goto abort; @@ -5554,7 +5765,6 @@ static struct r5conf *setup_conf(struct mddev *mddev) else conf->max_degraded = 1; conf->algorithm = mddev->new_layout; - conf->max_nr_stripes = NR_STRIPES; conf->reshape_progress = mddev->reshape_position; if (conf->reshape_progress != MaxSector) { conf->prev_chunk_sectors = mddev->chunk_sectors; @@ -5563,7 +5773,8 @@ static struct r5conf *setup_conf(struct mddev *mddev) memory = conf->max_nr_stripes * (sizeof(struct stripe_head) + max_disks * ((sizeof(struct bio) + PAGE_SIZE))) / 1024; - if (grow_stripes(conf, conf->max_nr_stripes)) { + atomic_set(&conf->empty_inactive_list_nr, NR_STRIPE_HASH_LOCKS); + if (grow_stripes(conf, NR_STRIPES)) { printk(KERN_ERR "md/raid:%s: couldn't allocate %dkB for buffers\n", mdname(mddev), memory); @@ -6369,12 +6580,18 @@ static int raid5_start_reshape(struct mddev *mddev) if (!mddev->sync_thread) { mddev->recovery = 0; spin_lock_irq(&conf->device_lock); + write_seqcount_begin(&conf->gen_lock); mddev->raid_disks = conf->raid_disks = conf->previous_raid_disks; + mddev->new_chunk_sectors = + conf->chunk_sectors = conf->prev_chunk_sectors; + mddev->new_layout = conf->algorithm = conf->prev_algo; rdev_for_each(rdev, mddev) rdev->new_data_offset = rdev->data_offset; smp_wmb(); + conf->generation --; conf->reshape_progress = MaxSector; mddev->reshape_position = MaxSector; + write_seqcount_end(&conf->gen_lock); spin_unlock_irq(&conf->device_lock); return -EAGAIN; } @@ -6462,27 +6679,28 @@ static void raid5_quiesce(struct mddev *mddev, int state) break; case 1: /* stop all writes */ - spin_lock_irq(&conf->device_lock); + lock_all_device_hash_locks_irq(conf); /* '2' tells resync/reshape to pause so that all * active stripes can drain */ conf->quiesce = 2; - wait_event_lock_irq(conf->wait_for_stripe, + wait_event_cmd(conf->wait_for_stripe, atomic_read(&conf->active_stripes) == 0 && atomic_read(&conf->active_aligned_reads) == 0, - conf->device_lock); + unlock_all_device_hash_locks_irq(conf), + lock_all_device_hash_locks_irq(conf)); conf->quiesce = 1; - spin_unlock_irq(&conf->device_lock); + unlock_all_device_hash_locks_irq(conf); /* allow reshape to continue */ wake_up(&conf->wait_for_overlap); break; case 0: /* re-enable writes */ - spin_lock_irq(&conf->device_lock); + lock_all_device_hash_locks_irq(conf); conf->quiesce = 0; wake_up(&conf->wait_for_stripe); wake_up(&conf->wait_for_overlap); - spin_unlock_irq(&conf->device_lock); + unlock_all_device_hash_locks_irq(conf); break; } } diff --git a/drivers/md/raid5.h b/drivers/md/raid5.h index b42e6b462ed..01ad8ae8f57 100644 --- a/drivers/md/raid5.h +++ b/drivers/md/raid5.h @@ -205,6 +205,7 @@ struct stripe_head { short pd_idx; /* parity disk index */ short qd_idx; /* 'Q' disk index for raid6 */ short ddf_layout;/* use DDF ordering to calculate Q */ + short hash_lock_index; unsigned long state; /* state flags */ atomic_t count; /* nr of active thread/requests */ int bm_seq; /* sequence number for bitmap flushes */ @@ -367,9 +368,18 @@ struct disk_info { struct md_rdev *rdev, *replacement; }; +/* NOTE NR_STRIPE_HASH_LOCKS must remain below 64. + * This is because we sometimes take all the spinlocks + * and creating that much locking depth can cause + * problems. + */ +#define NR_STRIPE_HASH_LOCKS 8 +#define STRIPE_HASH_LOCKS_MASK (NR_STRIPE_HASH_LOCKS - 1) + struct r5worker { struct work_struct work; struct r5worker_group *group; + struct list_head temp_inactive_list[NR_STRIPE_HASH_LOCKS]; bool working; }; @@ -382,6 +392,8 @@ struct r5worker_group { struct r5conf { struct hlist_head *stripe_hashtbl; + /* only protect corresponding hash list and inactive_list */ + spinlock_t hash_locks[NR_STRIPE_HASH_LOCKS]; struct mddev *mddev; int chunk_sectors; int level, algorithm; @@ -462,7 +474,8 @@ struct r5conf { * Free stripes pool */ atomic_t active_stripes; - struct list_head inactive_list; + struct list_head inactive_list[NR_STRIPE_HASH_LOCKS]; + atomic_t empty_inactive_list_nr; struct llist_head released_stripes; wait_queue_head_t wait_for_stripe; wait_queue_head_t wait_for_overlap; @@ -477,6 +490,7 @@ struct r5conf { * the new thread here until we fully activate the array. */ struct md_thread *thread; + struct list_head temp_inactive_list[NR_STRIPE_HASH_LOCKS]; struct r5worker_group *worker_groups; int group_cnt; int worker_cnt_per_group; diff --git a/drivers/media/common/b2c2/flexcop-sram.c b/drivers/media/common/b2c2/flexcop-sram.c index f2199e43e80..185c285f70f 100644 --- a/drivers/media/common/b2c2/flexcop-sram.c +++ b/drivers/media/common/b2c2/flexcop-sram.c @@ -85,7 +85,7 @@ static void flexcop_sram_write(struct adapter *adapter, u32 bank, u32 addr, u8 * while (((read_reg_dw(adapter, 0x700) & 0x80000000) != 0) && (retries > 0)) { mdelay(1); retries--; - }; + } if (retries == 0) printk("%s: SRAM timeout\n", __func__); @@ -110,7 +110,7 @@ static void flex_sram_read(struct adapter *adapter, u32 bank, u32 addr, u8 *buf, while (((read_reg_dw(adapter, 0x700) & 0x80000000) != 0) && (retries > 0)) { mdelay(1); retries--; - }; + } if (retries == 0) printk("%s: SRAM timeout\n", __func__); @@ -122,7 +122,7 @@ static void flex_sram_read(struct adapter *adapter, u32 bank, u32 addr, u8 *buf, while (((read_reg_dw(adapter, 0x700) & 0x80000000) != 0) && (retries > 0)) { mdelay(1); retries--; - }; + } if (retries == 0) printk("%s: SRAM timeout\n", __func__); diff --git a/drivers/media/common/saa7146/saa7146_core.c b/drivers/media/common/saa7146/saa7146_core.c index bb6ee5191eb..34b0d0ddeef 100644 --- a/drivers/media/common/saa7146/saa7146_core.c +++ b/drivers/media/common/saa7146/saa7146_core.c @@ -411,7 +411,7 @@ static int saa7146_init_one(struct pci_dev *pci, const struct pci_device_id *ent saa7146_write(dev, MC2, 0xf8000000); /* request an interrupt for the saa7146 */ - err = request_irq(pci->irq, interrupt_hw, IRQF_SHARED | IRQF_DISABLED, + err = request_irq(pci->irq, interrupt_hw, IRQF_SHARED, dev->name, dev); if (err < 0) { ERR("request_irq() failed\n"); @@ -524,8 +524,6 @@ static void saa7146_remove_one(struct pci_dev *pdev) DEB_EE("dev:%p\n", dev); dev->ext->detach(dev); - /* Zero the PCI drvdata after use. */ - pci_set_drvdata(pdev, NULL); /* shut down all video dma transfers */ saa7146_write(dev, MC1, 0x00ff0000); diff --git a/drivers/media/common/siano/smscoreapi.c b/drivers/media/common/siano/smscoreapi.c index a142f7942a0..050984c5b1e 100644 --- a/drivers/media/common/siano/smscoreapi.c +++ b/drivers/media/common/siano/smscoreapi.c @@ -922,8 +922,8 @@ static int smscore_load_firmware_family2(struct smscore_device_t *coredev, u32 i, *ptr; u8 *payload = firmware->payload; int rc = 0; - firmware->start_address = le32_to_cpu(firmware->start_address); - firmware->length = le32_to_cpu(firmware->length); + firmware->start_address = le32_to_cpup((__le32 *)&firmware->start_address); + firmware->length = le32_to_cpup((__le32 *)&firmware->length); mem_address = firmware->start_address; @@ -982,7 +982,7 @@ static int smscore_load_firmware_family2(struct smscore_device_t *coredev, if (rc < 0) goto exit_fw_download; - sms_err("sending MSG_SMS_DATA_VALIDITY_REQ expecting 0x%x", + sms_debug("sending MSG_SMS_DATA_VALIDITY_REQ expecting 0x%x", calc_checksum); SMS_INIT_MSG(&msg->x_msg_header, MSG_SMS_DATA_VALIDITY_REQ, sizeof(msg->x_msg_header) + @@ -1562,7 +1562,7 @@ void smscore_onresponse(struct smscore_device_t *coredev, { struct sms_msg_data *validity = (struct sms_msg_data *) phdr; - sms_err("MSG_SMS_DATA_VALIDITY_RES, checksum = 0x%x", + sms_debug("MSG_SMS_DATA_VALIDITY_RES, checksum = 0x%x", validity->msg_data[0]); complete(&coredev->data_validity_done); break; diff --git a/drivers/media/common/siano/smsdvb-main.c b/drivers/media/common/siano/smsdvb-main.c index 63676a8b024..85151efdd94 100644 --- a/drivers/media/common/siano/smsdvb-main.c +++ b/drivers/media/common/siano/smsdvb-main.c @@ -44,14 +44,14 @@ module_param_named(debug, sms_dbg, int, 0644); MODULE_PARM_DESC(debug, "set debug level (info=1, adv=2 (or-able))"); -u32 sms_to_guard_interval_table[] = { +static u32 sms_to_guard_interval_table[] = { [0] = GUARD_INTERVAL_1_32, [1] = GUARD_INTERVAL_1_16, [2] = GUARD_INTERVAL_1_8, [3] = GUARD_INTERVAL_1_4, }; -u32 sms_to_code_rate_table[] = { +static u32 sms_to_code_rate_table[] = { [0] = FEC_1_2, [1] = FEC_2_3, [2] = FEC_3_4, @@ -60,14 +60,14 @@ u32 sms_to_code_rate_table[] = { }; -u32 sms_to_hierarchy_table[] = { +static u32 sms_to_hierarchy_table[] = { [0] = HIERARCHY_NONE, [1] = HIERARCHY_1, [2] = HIERARCHY_2, [3] = HIERARCHY_4, }; -u32 sms_to_modulation_table[] = { +static u32 sms_to_modulation_table[] = { [0] = QPSK, [1] = QAM_16, [2] = QAM_64, diff --git a/drivers/media/dvb-core/dvb_demux.c b/drivers/media/dvb-core/dvb_demux.c index 3485655fa08..58de4410c52 100644 --- a/drivers/media/dvb-core/dvb_demux.c +++ b/drivers/media/dvb-core/dvb_demux.c @@ -476,7 +476,9 @@ static void dvb_dmx_swfilter_packet(struct dvb_demux *demux, const u8 *buf) void dvb_dmx_swfilter_packets(struct dvb_demux *demux, const u8 *buf, size_t count) { - spin_lock(&demux->lock); + unsigned long flags; + + spin_lock_irqsave(&demux->lock, flags); while (count--) { if (buf[0] == 0x47) @@ -484,7 +486,7 @@ void dvb_dmx_swfilter_packets(struct dvb_demux *demux, const u8 *buf, buf += 188; } - spin_unlock(&demux->lock); + spin_unlock_irqrestore(&demux->lock, flags); } EXPORT_SYMBOL(dvb_dmx_swfilter_packets); @@ -519,8 +521,9 @@ static inline void _dvb_dmx_swfilter(struct dvb_demux *demux, const u8 *buf, { int p = 0, i, j; const u8 *q; + unsigned long flags; - spin_lock(&demux->lock); + spin_lock_irqsave(&demux->lock, flags); if (demux->tsbufp) { /* tsbuf[0] is now 0x47. */ i = demux->tsbufp; @@ -564,7 +567,7 @@ static inline void _dvb_dmx_swfilter(struct dvb_demux *demux, const u8 *buf, } bailout: - spin_unlock(&demux->lock); + spin_unlock_irqrestore(&demux->lock, flags); } void dvb_dmx_swfilter(struct dvb_demux *demux, const u8 *buf, size_t count) @@ -581,11 +584,13 @@ EXPORT_SYMBOL(dvb_dmx_swfilter_204); void dvb_dmx_swfilter_raw(struct dvb_demux *demux, const u8 *buf, size_t count) { - spin_lock(&demux->lock); + unsigned long flags; + + spin_lock_irqsave(&demux->lock, flags); demux->feed->cb.ts(buf, count, NULL, 0, &demux->feed->feed.ts, DMX_OK); - spin_unlock(&demux->lock); + spin_unlock_irqrestore(&demux->lock, flags); } EXPORT_SYMBOL(dvb_dmx_swfilter_raw); diff --git a/drivers/media/dvb-frontends/Kconfig b/drivers/media/dvb-frontends/Kconfig index 0e2ec6f73b0..bddbab43a2d 100644 --- a/drivers/media/dvb-frontends/Kconfig +++ b/drivers/media/dvb-frontends/Kconfig @@ -200,6 +200,13 @@ config DVB_CX24116 help A DVB-S/S2 tuner module. Say Y when you want to support this frontend. +config DVB_CX24117 + tristate "Conexant CX24117 based" + depends on DVB_CORE && I2C + default m if !MEDIA_SUBDRV_AUTOSELECT + help + A Dual DVB-S/S2 tuner module. Say Y when you want to support this frontend. + config DVB_SI21XX tristate "Silicon Labs SI21XX based" depends on DVB_CORE && I2C diff --git a/drivers/media/dvb-frontends/Makefile b/drivers/media/dvb-frontends/Makefile index cebc0faffab..f9cb43d9aed 100644 --- a/drivers/media/dvb-frontends/Makefile +++ b/drivers/media/dvb-frontends/Makefile @@ -76,6 +76,7 @@ obj-$(CONFIG_DVB_ATBM8830) += atbm8830.o obj-$(CONFIG_DVB_DUMMY_FE) += dvb_dummy_fe.o obj-$(CONFIG_DVB_AF9013) += af9013.o obj-$(CONFIG_DVB_CX24116) += cx24116.o +obj-$(CONFIG_DVB_CX24117) += cx24117.o obj-$(CONFIG_DVB_SI21XX) += si21xx.o obj-$(CONFIG_DVB_STV0288) += stv0288.o obj-$(CONFIG_DVB_STB6000) += stb6000.o diff --git a/drivers/media/dvb-frontends/af9013.c b/drivers/media/dvb-frontends/af9013.c index a204f282882..fb504f1e912 100644 --- a/drivers/media/dvb-frontends/af9013.c +++ b/drivers/media/dvb-frontends/af9013.c @@ -24,6 +24,9 @@ #include "af9013_priv.h" +/* Max transfer size done by I2C transfer functions */ +#define MAX_XFER_SIZE 64 + struct af9013_state { struct i2c_adapter *i2c; struct dvb_frontend fe; @@ -50,16 +53,23 @@ static int af9013_wr_regs_i2c(struct af9013_state *priv, u8 mbox, u16 reg, const u8 *val, int len) { int ret; - u8 buf[3+len]; + u8 buf[MAX_XFER_SIZE]; struct i2c_msg msg[1] = { { .addr = priv->config.i2c_addr, .flags = 0, - .len = sizeof(buf), + .len = 3 + len, .buf = buf, } }; + if (3 + len > sizeof(buf)) { + dev_warn(&priv->i2c->dev, + "%s: i2c wr reg=%04x: len=%d is too big!\n", + KBUILD_MODNAME, reg, len); + return -EINVAL; + } + buf[0] = (reg >> 8) & 0xff; buf[1] = (reg >> 0) & 0xff; buf[2] = mbox; diff --git a/drivers/media/dvb-frontends/af9033.c b/drivers/media/dvb-frontends/af9033.c index a777b4b944e..30ee5905215 100644 --- a/drivers/media/dvb-frontends/af9033.c +++ b/drivers/media/dvb-frontends/af9033.c @@ -21,6 +21,9 @@ #include "af9033_priv.h" +/* Max transfer size done by I2C transfer functions */ +#define MAX_XFER_SIZE 64 + struct af9033_state { struct i2c_adapter *i2c; struct dvb_frontend fe; @@ -40,16 +43,23 @@ static int af9033_wr_regs(struct af9033_state *state, u32 reg, const u8 *val, int len) { int ret; - u8 buf[3 + len]; + u8 buf[MAX_XFER_SIZE]; struct i2c_msg msg[1] = { { .addr = state->cfg.i2c_addr, .flags = 0, - .len = sizeof(buf), + .len = 3 + len, .buf = buf, } }; + if (3 + len > sizeof(buf)) { + dev_warn(&state->i2c->dev, + "%s: i2c wr reg=%04x: len=%d is too big!\n", + KBUILD_MODNAME, reg, len); + return -EINVAL; + } + buf[0] = (reg >> 16) & 0xff; buf[1] = (reg >> 8) & 0xff; buf[2] = (reg >> 0) & 0xff; @@ -161,7 +171,14 @@ static int af9033_wr_reg_val_tab(struct af9033_state *state, const struct reg_val *tab, int tab_len) { int ret, i, j; - u8 buf[tab_len]; + u8 buf[MAX_XFER_SIZE]; + + if (tab_len > sizeof(buf)) { + dev_warn(&state->i2c->dev, + "%s: i2c wr len=%d is too big!\n", + KBUILD_MODNAME, tab_len); + return -EINVAL; + } dev_dbg(&state->i2c->dev, "%s: tab_len=%d\n", __func__, tab_len); diff --git a/drivers/media/dvb-frontends/bcm3510.c b/drivers/media/dvb-frontends/bcm3510.c index 1b77909c0c7..39a29dd2951 100644 --- a/drivers/media/dvb-frontends/bcm3510.c +++ b/drivers/media/dvb-frontends/bcm3510.c @@ -44,6 +44,9 @@ #include "bcm3510.h" #include "bcm3510_priv.h" +/* Max transfer size done by bcm3510_do_hab_cmd() function */ +#define MAX_XFER_SIZE 128 + struct bcm3510_state { struct i2c_adapter* i2c; @@ -201,9 +204,19 @@ static int bcm3510_hab_send_request(struct bcm3510_state *st, u8 *buf, int len) static int bcm3510_do_hab_cmd(struct bcm3510_state *st, u8 cmd, u8 msgid, u8 *obuf, u8 olen, u8 *ibuf, u8 ilen) { - u8 ob[olen+2],ib[ilen+2]; + u8 ob[MAX_XFER_SIZE], ib[MAX_XFER_SIZE]; int ret = 0; + if (ilen + 2 > sizeof(ib)) { + deb_hab("do_hab_cmd: ilen=%d is too big!\n", ilen); + return -EINVAL; + } + + if (olen + 2 > sizeof(ob)) { + deb_hab("do_hab_cmd: olen=%d is too big!\n", olen); + return -EINVAL; + } + ob[0] = cmd; ob[1] = msgid; memcpy(&ob[2],obuf,olen); diff --git a/drivers/media/dvb-frontends/cx24110.c b/drivers/media/dvb-frontends/cx24110.c index 0cd6927e654..95b981cd711 100644 --- a/drivers/media/dvb-frontends/cx24110.c +++ b/drivers/media/dvb-frontends/cx24110.c @@ -378,7 +378,7 @@ static int cx24110_set_voltage (struct dvb_frontend* fe, fe_sec_voltage_t voltag return cx24110_writereg(state,0x76,(cx24110_readreg(state,0x76)&0x3b)|0x40); default: return -EINVAL; - }; + } } static int cx24110_diseqc_send_burst(struct dvb_frontend* fe, fe_sec_mini_cmd_t burst) diff --git a/drivers/media/dvb-frontends/cx24117.c b/drivers/media/dvb-frontends/cx24117.c new file mode 100644 index 00000000000..476b422ccf1 --- /dev/null +++ b/drivers/media/dvb-frontends/cx24117.c @@ -0,0 +1,1650 @@ +/* + Conexant cx24117/cx24132 - Dual DVBS/S2 Satellite demod/tuner driver + + Copyright (C) 2013 Luis Alves <ljalvs@gmail.com> + July, 6th 2013 + First release based on cx24116 driver by: + Steven Toth and Georg Acher, Darron Broad, Igor Liplianin + Cards currently supported: + TBS6980 - Dual DVBS/S2 PCIe card + TBS6981 - Dual DVBS/S2 PCIe card + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. +*/ + +#include <linux/slab.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/moduleparam.h> +#include <linux/init.h> +#include <linux/firmware.h> + +#include "tuner-i2c.h" +#include "dvb_frontend.h" +#include "cx24117.h" + + +#define CX24117_DEFAULT_FIRMWARE "dvb-fe-cx24117.fw" +#define CX24117_SEARCH_RANGE_KHZ 5000 + +/* known registers */ +#define CX24117_REG_COMMAND (0x00) /* command buffer */ +#define CX24117_REG_EXECUTE (0x1f) /* execute command */ + +#define CX24117_REG_FREQ3_0 (0x34) /* frequency */ +#define CX24117_REG_FREQ2_0 (0x35) +#define CX24117_REG_FREQ1_0 (0x36) +#define CX24117_REG_STATE0 (0x39) +#define CX24117_REG_SSTATUS0 (0x3a) /* demod0 signal high / status */ +#define CX24117_REG_SIGNAL0 (0x3b) +#define CX24117_REG_FREQ5_0 (0x3c) /* +-freq */ +#define CX24117_REG_FREQ6_0 (0x3d) +#define CX24117_REG_SRATE2_0 (0x3e) /* +- 1000 * srate */ +#define CX24117_REG_SRATE1_0 (0x3f) +#define CX24117_REG_QUALITY2_0 (0x40) +#define CX24117_REG_QUALITY1_0 (0x41) + +#define CX24117_REG_BER4_0 (0x47) +#define CX24117_REG_BER3_0 (0x48) +#define CX24117_REG_BER2_0 (0x49) +#define CX24117_REG_BER1_0 (0x4a) +#define CX24117_REG_DVBS_UCB2_0 (0x4b) +#define CX24117_REG_DVBS_UCB1_0 (0x4c) +#define CX24117_REG_DVBS2_UCB2_0 (0x50) +#define CX24117_REG_DVBS2_UCB1_0 (0x51) +#define CX24117_REG_QSTATUS0 (0x93) +#define CX24117_REG_CLKDIV0 (0xe6) +#define CX24117_REG_RATEDIV0 (0xf0) + + +#define CX24117_REG_FREQ3_1 (0x55) /* frequency */ +#define CX24117_REG_FREQ2_1 (0x56) +#define CX24117_REG_FREQ1_1 (0x57) +#define CX24117_REG_STATE1 (0x5a) +#define CX24117_REG_SSTATUS1 (0x5b) /* demod1 signal high / status */ +#define CX24117_REG_SIGNAL1 (0x5c) +#define CX24117_REG_FREQ5_1 (0x5d) /* +- freq */ +#define CX24117_REG_FREQ4_1 (0x5e) +#define CX24117_REG_SRATE2_1 (0x5f) +#define CX24117_REG_SRATE1_1 (0x60) +#define CX24117_REG_QUALITY2_1 (0x61) +#define CX24117_REG_QUALITY1_1 (0x62) +#define CX24117_REG_BER4_1 (0x68) +#define CX24117_REG_BER3_1 (0x69) +#define CX24117_REG_BER2_1 (0x6a) +#define CX24117_REG_BER1_1 (0x6b) +#define CX24117_REG_DVBS_UCB2_1 (0x6c) +#define CX24117_REG_DVBS_UCB1_1 (0x6d) +#define CX24117_REG_DVBS2_UCB2_1 (0x71) +#define CX24117_REG_DVBS2_UCB1_1 (0x72) +#define CX24117_REG_QSTATUS1 (0x9f) +#define CX24117_REG_CLKDIV1 (0xe7) +#define CX24117_REG_RATEDIV1 (0xf1) + + +/* arg buffer size */ +#define CX24117_ARGLEN (0x1e) + +/* rolloff */ +#define CX24117_ROLLOFF_020 (0x00) +#define CX24117_ROLLOFF_025 (0x01) +#define CX24117_ROLLOFF_035 (0x02) + +/* pilot bit */ +#define CX24117_PILOT_OFF (0x00) +#define CX24117_PILOT_ON (0x40) +#define CX24117_PILOT_AUTO (0x80) + +/* signal status */ +#define CX24117_HAS_SIGNAL (0x01) +#define CX24117_HAS_CARRIER (0x02) +#define CX24117_HAS_VITERBI (0x04) +#define CX24117_HAS_SYNCLOCK (0x08) +#define CX24117_STATUS_MASK (0x0f) +#define CX24117_SIGNAL_MASK (0xc0) + + +/* arg offset for DiSEqC */ +#define CX24117_DISEQC_DEMOD (1) +#define CX24117_DISEQC_BURST (2) +#define CX24117_DISEQC_ARG3_2 (3) /* unknown value=2 */ +#define CX24117_DISEQC_ARG4_0 (4) /* unknown value=0 */ +#define CX24117_DISEQC_ARG5_0 (5) /* unknown value=0 */ +#define CX24117_DISEQC_MSGLEN (6) +#define CX24117_DISEQC_MSGOFS (7) + +/* DiSEqC burst */ +#define CX24117_DISEQC_MINI_A (0) +#define CX24117_DISEQC_MINI_B (1) + + +#define CX24117_PNE (0) /* 0 disabled / 2 enabled */ +#define CX24117_OCC (1) /* 0 disabled / 1 enabled */ + + +enum cmds { + CMD_SET_VCO = 0x10, + CMD_TUNEREQUEST = 0x11, + CMD_MPEGCONFIG = 0x13, + CMD_TUNERINIT = 0x14, + CMD_LNBSEND = 0x21, /* Formerly CMD_SEND_DISEQC */ + CMD_LNBDCLEVEL = 0x22, + CMD_SET_TONE = 0x23, + CMD_UPDFWVERS = 0x35, + CMD_TUNERSLEEP = 0x36, +}; + +static LIST_HEAD(hybrid_tuner_instance_list); +static DEFINE_MUTEX(cx24117_list_mutex); + +/* The Demod/Tuner can't easily provide these, we cache them */ +struct cx24117_tuning { + u32 frequency; + u32 symbol_rate; + fe_spectral_inversion_t inversion; + fe_code_rate_t fec; + + fe_delivery_system_t delsys; + fe_modulation_t modulation; + fe_pilot_t pilot; + fe_rolloff_t rolloff; + + /* Demod values */ + u8 fec_val; + u8 fec_mask; + u8 inversion_val; + u8 pilot_val; + u8 rolloff_val; +}; + +/* Basic commands that are sent to the firmware */ +struct cx24117_cmd { + u8 len; + u8 args[CX24117_ARGLEN]; +}; + +/* common to both fe's */ +struct cx24117_priv { + u8 demod_address; + struct i2c_adapter *i2c; + u8 skip_fw_load; + struct mutex fe_lock; + + /* Used for sharing this struct between demods */ + struct tuner_i2c_props i2c_props; + struct list_head hybrid_tuner_instance_list; +}; + +/* one per each fe */ +struct cx24117_state { + struct cx24117_priv *priv; + struct dvb_frontend frontend; + + struct cx24117_tuning dcur; + struct cx24117_tuning dnxt; + struct cx24117_cmd dsec_cmd; + + int demod; +}; + +/* modfec (modulation and FEC) lookup table */ +/* Check cx24116.c for a detailed description of each field */ +static struct cx24117_modfec { + fe_delivery_system_t delivery_system; + fe_modulation_t modulation; + fe_code_rate_t fec; + u8 mask; /* In DVBS mode this is used to autodetect */ + u8 val; /* Passed to the firmware to indicate mode selection */ +} cx24117_modfec_modes[] = { + /* QPSK. For unknown rates we set hardware to auto detect 0xfe 0x30 */ + + /*mod fec mask val */ + { SYS_DVBS, QPSK, FEC_NONE, 0xfe, 0x30 }, + { SYS_DVBS, QPSK, FEC_1_2, 0x02, 0x2e }, /* 00000010 00101110 */ + { SYS_DVBS, QPSK, FEC_2_3, 0x04, 0x2f }, /* 00000100 00101111 */ + { SYS_DVBS, QPSK, FEC_3_4, 0x08, 0x30 }, /* 00001000 00110000 */ + { SYS_DVBS, QPSK, FEC_4_5, 0xfe, 0x30 }, /* 000?0000 ? */ + { SYS_DVBS, QPSK, FEC_5_6, 0x20, 0x31 }, /* 00100000 00110001 */ + { SYS_DVBS, QPSK, FEC_6_7, 0xfe, 0x30 }, /* 0?000000 ? */ + { SYS_DVBS, QPSK, FEC_7_8, 0x80, 0x32 }, /* 10000000 00110010 */ + { SYS_DVBS, QPSK, FEC_8_9, 0xfe, 0x30 }, /* 0000000? ? */ + { SYS_DVBS, QPSK, FEC_AUTO, 0xfe, 0x30 }, + /* NBC-QPSK */ + { SYS_DVBS2, QPSK, FEC_NONE, 0x00, 0x00 }, + { SYS_DVBS2, QPSK, FEC_1_2, 0x00, 0x04 }, + { SYS_DVBS2, QPSK, FEC_3_5, 0x00, 0x05 }, + { SYS_DVBS2, QPSK, FEC_2_3, 0x00, 0x06 }, + { SYS_DVBS2, QPSK, FEC_3_4, 0x00, 0x07 }, + { SYS_DVBS2, QPSK, FEC_4_5, 0x00, 0x08 }, + { SYS_DVBS2, QPSK, FEC_5_6, 0x00, 0x09 }, + { SYS_DVBS2, QPSK, FEC_8_9, 0x00, 0x0a }, + { SYS_DVBS2, QPSK, FEC_9_10, 0x00, 0x0b }, + { SYS_DVBS2, QPSK, FEC_AUTO, 0x00, 0x00 }, + /* 8PSK */ + { SYS_DVBS2, PSK_8, FEC_NONE, 0x00, 0x00 }, + { SYS_DVBS2, PSK_8, FEC_3_5, 0x00, 0x0c }, + { SYS_DVBS2, PSK_8, FEC_2_3, 0x00, 0x0d }, + { SYS_DVBS2, PSK_8, FEC_3_4, 0x00, 0x0e }, + { SYS_DVBS2, PSK_8, FEC_5_6, 0x00, 0x0f }, + { SYS_DVBS2, PSK_8, FEC_8_9, 0x00, 0x10 }, + { SYS_DVBS2, PSK_8, FEC_9_10, 0x00, 0x11 }, + { SYS_DVBS2, PSK_8, FEC_AUTO, 0x00, 0x00 }, + /* + * 'val' can be found in the FECSTATUS register when tuning. + * FECSTATUS will give the actual FEC in use if tuning was successful. + */ +}; + + +static int cx24117_writereg(struct cx24117_state *state, u8 reg, u8 data) +{ + u8 buf[] = { reg, data }; + struct i2c_msg msg = { .addr = state->priv->demod_address, + .flags = 0, .buf = buf, .len = 2 }; + int ret; + + dev_dbg(&state->priv->i2c->dev, + "%s() demod%d i2c wr @0x%02x=0x%02x\n", + __func__, state->demod, reg, data); + + ret = i2c_transfer(state->priv->i2c, &msg, 1); + if (ret < 0) { + dev_warn(&state->priv->i2c->dev, + "%s: demod%d i2c wr err(%i) @0x%02x=0x%02x\n", + KBUILD_MODNAME, state->demod, ret, reg, data); + return ret; + } + return 0; +} + +static int cx24117_writecmd(struct cx24117_state *state, + struct cx24117_cmd *cmd) +{ + struct i2c_msg msg; + u8 buf[CX24117_ARGLEN+1]; + int ret; + + dev_dbg(&state->priv->i2c->dev, + "%s() demod%d i2c wr cmd len=%d\n", + __func__, state->demod, cmd->len); + + buf[0] = CX24117_REG_COMMAND; + memcpy(&buf[1], cmd->args, cmd->len); + + msg.addr = state->priv->demod_address; + msg.flags = 0; + msg.len = cmd->len+1; + msg.buf = buf; + ret = i2c_transfer(state->priv->i2c, &msg, 1); + if (ret < 0) { + dev_warn(&state->priv->i2c->dev, + "%s: demod%d i2c wr cmd err(%i) len=%d\n", + KBUILD_MODNAME, state->demod, ret, cmd->len); + return ret; + } + return 0; +} + +static int cx24117_readreg(struct cx24117_state *state, u8 reg) +{ + int ret; + u8 recv = 0; + struct i2c_msg msg[] = { + { .addr = state->priv->demod_address, .flags = 0, + .buf = ®, .len = 1 }, + { .addr = state->priv->demod_address, .flags = I2C_M_RD, + .buf = &recv, .len = 1 } + }; + + ret = i2c_transfer(state->priv->i2c, msg, 2); + if (ret < 0) { + dev_warn(&state->priv->i2c->dev, + "%s: demod%d i2c rd err(%d) @0x%x\n", + KBUILD_MODNAME, state->demod, ret, reg); + return ret; + } + + dev_dbg(&state->priv->i2c->dev, "%s() demod%d i2c rd @0x%02x=0x%02x\n", + __func__, state->demod, reg, recv); + + return recv; +} + +static int cx24117_readregN(struct cx24117_state *state, + u8 reg, u8 *buf, int len) +{ + int ret; + struct i2c_msg msg[] = { + { .addr = state->priv->demod_address, .flags = 0, + .buf = ®, .len = 1 }, + { .addr = state->priv->demod_address, .flags = I2C_M_RD, + .buf = buf, .len = len } + }; + + ret = i2c_transfer(state->priv->i2c, msg, 2); + if (ret < 0) { + dev_warn(&state->priv->i2c->dev, + "%s: demod%d i2c rd err(%d) @0x%x\n", + KBUILD_MODNAME, state->demod, ret, reg); + return ret; + } + return 0; +} + +static int cx24117_set_inversion(struct cx24117_state *state, + fe_spectral_inversion_t inversion) +{ + dev_dbg(&state->priv->i2c->dev, "%s(%d) demod%d\n", + __func__, inversion, state->demod); + + switch (inversion) { + case INVERSION_OFF: + state->dnxt.inversion_val = 0x00; + break; + case INVERSION_ON: + state->dnxt.inversion_val = 0x04; + break; + case INVERSION_AUTO: + state->dnxt.inversion_val = 0x0C; + break; + default: + return -EINVAL; + } + + state->dnxt.inversion = inversion; + + return 0; +} + +static int cx24117_lookup_fecmod(struct cx24117_state *state, + fe_delivery_system_t d, fe_modulation_t m, fe_code_rate_t f) +{ + int i, ret = -EINVAL; + + dev_dbg(&state->priv->i2c->dev, + "%s(demod(0x%02x,0x%02x) demod%d\n", + __func__, m, f, state->demod); + + for (i = 0; i < ARRAY_SIZE(cx24117_modfec_modes); i++) { + if ((d == cx24117_modfec_modes[i].delivery_system) && + (m == cx24117_modfec_modes[i].modulation) && + (f == cx24117_modfec_modes[i].fec)) { + ret = i; + break; + } + } + + return ret; +} + +static int cx24117_set_fec(struct cx24117_state *state, + fe_delivery_system_t delsys, fe_modulation_t mod, fe_code_rate_t fec) +{ + int ret; + + dev_dbg(&state->priv->i2c->dev, + "%s(0x%02x,0x%02x) demod%d\n", + __func__, mod, fec, state->demod); + + ret = cx24117_lookup_fecmod(state, delsys, mod, fec); + if (ret < 0) + return ret; + + state->dnxt.fec = fec; + state->dnxt.fec_val = cx24117_modfec_modes[ret].val; + state->dnxt.fec_mask = cx24117_modfec_modes[ret].mask; + dev_dbg(&state->priv->i2c->dev, + "%s() demod%d mask/val = 0x%02x/0x%02x\n", __func__, + state->demod, state->dnxt.fec_mask, state->dnxt.fec_val); + + return 0; +} + +static int cx24117_set_symbolrate(struct cx24117_state *state, u32 rate) +{ + dev_dbg(&state->priv->i2c->dev, "%s(%d) demod%d\n", + __func__, rate, state->demod); + + state->dnxt.symbol_rate = rate; + + dev_dbg(&state->priv->i2c->dev, + "%s() demod%d symbol_rate = %d\n", + __func__, state->demod, rate); + + return 0; +} + +static int cx24117_load_firmware(struct dvb_frontend *fe, + const struct firmware *fw); + +static int cx24117_firmware_ondemand(struct dvb_frontend *fe) +{ + struct cx24117_state *state = fe->demodulator_priv; + const struct firmware *fw; + int ret = 0; + + dev_dbg(&state->priv->i2c->dev, "%s() demod%d skip_fw_load=%d\n", + __func__, state->demod, state->priv->skip_fw_load); + + if (state->priv->skip_fw_load) + return 0; + + /* check if firmware if already running */ + if (cx24117_readreg(state, 0xeb) != 0xa) { + /* Load firmware */ + /* request the firmware, this will block until loaded */ + dev_dbg(&state->priv->i2c->dev, + "%s: Waiting for firmware upload (%s)...\n", + __func__, CX24117_DEFAULT_FIRMWARE); + ret = request_firmware(&fw, CX24117_DEFAULT_FIRMWARE, + state->priv->i2c->dev.parent); + dev_dbg(&state->priv->i2c->dev, + "%s: Waiting for firmware upload(2)...\n", __func__); + if (ret) { + dev_err(&state->priv->i2c->dev, + "%s: No firmware uploaded " + "(timeout or file not found?)\n", __func__); + return ret; + } + + /* Make sure we don't recurse back through here + * during loading */ + state->priv->skip_fw_load = 1; + + ret = cx24117_load_firmware(fe, fw); + if (ret) + dev_err(&state->priv->i2c->dev, + "%s: Writing firmware failed\n", __func__); + release_firmware(fw); + + dev_info(&state->priv->i2c->dev, + "%s: Firmware upload %s\n", __func__, + ret == 0 ? "complete" : "failed"); + + /* Ensure firmware is always loaded if required */ + state->priv->skip_fw_load = 0; + } + + return ret; +} + +/* Take a basic firmware command structure, format it + * and forward it for processing + */ +static int cx24117_cmd_execute_nolock(struct dvb_frontend *fe, + struct cx24117_cmd *cmd) +{ + struct cx24117_state *state = fe->demodulator_priv; + int i, ret; + + dev_dbg(&state->priv->i2c->dev, "%s() demod%d\n", + __func__, state->demod); + + /* Load the firmware if required */ + ret = cx24117_firmware_ondemand(fe); + if (ret != 0) + return ret; + + /* Write the command */ + cx24117_writecmd(state, cmd); + + /* Start execution and wait for cmd to terminate */ + cx24117_writereg(state, CX24117_REG_EXECUTE, 0x01); + i = 0; + while (cx24117_readreg(state, CX24117_REG_EXECUTE)) { + msleep(20); + if (i++ > 40) { + /* Avoid looping forever if the firmware does + not respond */ + dev_warn(&state->priv->i2c->dev, + "%s() Firmware not responding\n", __func__); + return -EIO; + } + } + return 0; +} + +static int cx24117_cmd_execute(struct dvb_frontend *fe, struct cx24117_cmd *cmd) +{ + struct cx24117_state *state = fe->demodulator_priv; + int ret; + + mutex_lock(&state->priv->fe_lock); + ret = cx24117_cmd_execute_nolock(fe, cmd); + mutex_unlock(&state->priv->fe_lock); + + return ret; +} + +static int cx24117_load_firmware(struct dvb_frontend *fe, + const struct firmware *fw) +{ + struct cx24117_state *state = fe->demodulator_priv; + struct cx24117_cmd cmd; + int i, ret; + unsigned char vers[4]; + + struct i2c_msg msg; + u8 *buf; + + dev_dbg(&state->priv->i2c->dev, + "%s() demod%d FW is %zu bytes (%02x %02x .. %02x %02x)\n", + __func__, state->demod, fw->size, fw->data[0], fw->data[1], + fw->data[fw->size - 2], fw->data[fw->size - 1]); + + cx24117_writereg(state, 0xea, 0x00); + cx24117_writereg(state, 0xea, 0x01); + cx24117_writereg(state, 0xea, 0x00); + + cx24117_writereg(state, 0xce, 0x92); + + cx24117_writereg(state, 0xfb, 0x00); + cx24117_writereg(state, 0xfc, 0x00); + + cx24117_writereg(state, 0xc3, 0x04); + cx24117_writereg(state, 0xc4, 0x04); + + cx24117_writereg(state, 0xce, 0x00); + cx24117_writereg(state, 0xcf, 0x00); + + cx24117_writereg(state, 0xea, 0x00); + cx24117_writereg(state, 0xeb, 0x0c); + cx24117_writereg(state, 0xec, 0x06); + cx24117_writereg(state, 0xed, 0x05); + cx24117_writereg(state, 0xee, 0x03); + cx24117_writereg(state, 0xef, 0x05); + + cx24117_writereg(state, 0xf3, 0x03); + cx24117_writereg(state, 0xf4, 0x44); + + cx24117_writereg(state, CX24117_REG_RATEDIV0, 0x04); + cx24117_writereg(state, CX24117_REG_CLKDIV0, 0x02); + + cx24117_writereg(state, CX24117_REG_RATEDIV1, 0x04); + cx24117_writereg(state, CX24117_REG_CLKDIV1, 0x02); + + cx24117_writereg(state, 0xf2, 0x04); + cx24117_writereg(state, 0xe8, 0x02); + cx24117_writereg(state, 0xea, 0x01); + cx24117_writereg(state, 0xc8, 0x00); + cx24117_writereg(state, 0xc9, 0x00); + cx24117_writereg(state, 0xca, 0x00); + cx24117_writereg(state, 0xcb, 0x00); + cx24117_writereg(state, 0xcc, 0x00); + cx24117_writereg(state, 0xcd, 0x00); + cx24117_writereg(state, 0xe4, 0x03); + cx24117_writereg(state, 0xeb, 0x0a); + + cx24117_writereg(state, 0xfb, 0x00); + cx24117_writereg(state, 0xe0, 0x76); + cx24117_writereg(state, 0xf7, 0x81); + cx24117_writereg(state, 0xf8, 0x00); + cx24117_writereg(state, 0xf9, 0x00); + + buf = kmalloc(fw->size + 1, GFP_KERNEL); + if (buf == NULL) { + state->priv->skip_fw_load = 0; + return -ENOMEM; + } + + /* fw upload reg */ + buf[0] = 0xfa; + memcpy(&buf[1], fw->data, fw->size); + + /* prepare i2c message to send */ + msg.addr = state->priv->demod_address; + msg.flags = 0; + msg.len = fw->size + 1; + msg.buf = buf; + + /* send fw */ + ret = i2c_transfer(state->priv->i2c, &msg, 1); + if (ret < 0) + return ret; + + kfree(buf); + + cx24117_writereg(state, 0xf7, 0x0c); + cx24117_writereg(state, 0xe0, 0x00); + + /* CMD 1B */ + cmd.args[0] = 0x1b; + cmd.args[1] = 0x00; + cmd.args[2] = 0x01; + cmd.args[3] = 0x00; + cmd.len = 4; + ret = cx24117_cmd_execute_nolock(fe, &cmd); + if (ret != 0) + goto error; + + /* CMD 10 */ + cmd.args[0] = CMD_SET_VCO; + cmd.args[1] = 0x06; + cmd.args[2] = 0x2b; + cmd.args[3] = 0xd8; + cmd.args[4] = 0xa5; + cmd.args[5] = 0xee; + cmd.args[6] = 0x03; + cmd.args[7] = 0x9d; + cmd.args[8] = 0xfc; + cmd.args[9] = 0x06; + cmd.args[10] = 0x02; + cmd.args[11] = 0x9d; + cmd.args[12] = 0xfc; + cmd.len = 13; + ret = cx24117_cmd_execute_nolock(fe, &cmd); + if (ret != 0) + goto error; + + /* CMD 15 */ + cmd.args[0] = 0x15; + cmd.args[1] = 0x00; + cmd.args[2] = 0x01; + cmd.args[3] = 0x00; + cmd.args[4] = 0x00; + cmd.args[5] = 0x01; + cmd.args[6] = 0x01; + cmd.args[7] = 0x01; + cmd.args[8] = 0x00; + cmd.args[9] = 0x05; + cmd.args[10] = 0x02; + cmd.args[11] = 0x02; + cmd.args[12] = 0x00; + cmd.len = 13; + ret = cx24117_cmd_execute_nolock(fe, &cmd); + if (ret != 0) + goto error; + + /* CMD 13 */ + cmd.args[0] = CMD_MPEGCONFIG; + cmd.args[1] = 0x00; + cmd.args[2] = 0x00; + cmd.args[3] = 0x00; + cmd.args[4] = 0x01; + cmd.args[5] = 0x00; + cmd.len = 6; + ret = cx24117_cmd_execute_nolock(fe, &cmd); + if (ret != 0) + goto error; + + /* CMD 14 */ + for (i = 0; i < 2; i++) { + cmd.args[0] = CMD_TUNERINIT; + cmd.args[1] = (u8) i; + cmd.args[2] = 0x00; + cmd.args[3] = 0x05; + cmd.args[4] = 0x00; + cmd.args[5] = 0x00; + cmd.args[6] = 0x55; + cmd.args[7] = 0x00; + cmd.len = 8; + ret = cx24117_cmd_execute_nolock(fe, &cmd); + if (ret != 0) + goto error; + } + + cx24117_writereg(state, 0xce, 0xc0); + cx24117_writereg(state, 0xcf, 0x00); + cx24117_writereg(state, 0xe5, 0x04); + + /* Firmware CMD 35: Get firmware version */ + cmd.args[0] = CMD_UPDFWVERS; + cmd.len = 2; + for (i = 0; i < 4; i++) { + cmd.args[1] = i; + ret = cx24117_cmd_execute_nolock(fe, &cmd); + if (ret != 0) + goto error; + vers[i] = cx24117_readreg(state, 0x33); + } + dev_info(&state->priv->i2c->dev, + "%s: FW version %i.%i.%i.%i\n", __func__, + vers[0], vers[1], vers[2], vers[3]); + return 0; +error: + state->priv->skip_fw_load = 0; + dev_err(&state->priv->i2c->dev, "%s() Error running FW.\n", __func__); + return ret; +} + +static int cx24117_read_status(struct dvb_frontend *fe, fe_status_t *status) +{ + struct cx24117_state *state = fe->demodulator_priv; + int lock; + + lock = cx24117_readreg(state, + (state->demod == 0) ? CX24117_REG_SSTATUS0 : + CX24117_REG_SSTATUS1) & + CX24117_STATUS_MASK; + + dev_dbg(&state->priv->i2c->dev, "%s() demod%d status = 0x%02x\n", + __func__, state->demod, lock); + + *status = 0; + + if (lock & CX24117_HAS_SIGNAL) + *status |= FE_HAS_SIGNAL; + if (lock & CX24117_HAS_CARRIER) + *status |= FE_HAS_CARRIER; + if (lock & CX24117_HAS_VITERBI) + *status |= FE_HAS_VITERBI; + if (lock & CX24117_HAS_SYNCLOCK) + *status |= FE_HAS_SYNC | FE_HAS_LOCK; + + return 0; +} + +static int cx24117_read_ber(struct dvb_frontend *fe, u32 *ber) +{ + struct cx24117_state *state = fe->demodulator_priv; + int ret; + u8 buf[4]; + u8 base_reg = (state->demod == 0) ? + CX24117_REG_BER4_0 : + CX24117_REG_BER4_1; + + ret = cx24117_readregN(state, base_reg, buf, 4); + if (ret != 0) + return ret; + + *ber = (buf[0] << 24) | (buf[1] << 16) | + (buf[1] << 8) | buf[0]; + + dev_dbg(&state->priv->i2c->dev, "%s() demod%d ber=0x%04x\n", + __func__, state->demod, *ber); + + return 0; +} + +static int cx24117_read_signal_strength(struct dvb_frontend *fe, + u16 *signal_strength) +{ + struct cx24117_state *state = fe->demodulator_priv; + struct cx24117_cmd cmd; + int ret; + u16 sig_reading; + u8 buf[2]; + u8 reg = (state->demod == 0) ? + CX24117_REG_SSTATUS0 : CX24117_REG_SSTATUS1; + + /* Firmware CMD 1A */ + cmd.args[0] = 0x1a; + cmd.args[1] = (u8) state->demod; + cmd.len = 2; + ret = cx24117_cmd_execute(fe, &cmd); + if (ret != 0) + return ret; + + ret = cx24117_readregN(state, reg, buf, 2); + if (ret != 0) + return ret; + sig_reading = ((buf[0] & CX24117_SIGNAL_MASK) << 2) | buf[1]; + + *signal_strength = -100 * sig_reading + 94324; + + dev_dbg(&state->priv->i2c->dev, + "%s() demod%d raw / cooked = 0x%04x / 0x%04x\n", + __func__, state->demod, sig_reading, *signal_strength); + + return 0; +} + +static int cx24117_read_snr(struct dvb_frontend *fe, u16 *snr) +{ + struct cx24117_state *state = fe->demodulator_priv; + int ret; + u8 buf[2]; + u8 reg = (state->demod == 0) ? + CX24117_REG_QUALITY2_0 : CX24117_REG_QUALITY2_1; + + ret = cx24117_readregN(state, reg, buf, 2); + if (ret != 0) + return ret; + + *snr = (buf[0] << 8) | buf[1]; + + dev_dbg(&state->priv->i2c->dev, + "%s() demod%d snr = 0x%04x\n", + __func__, state->demod, *snr); + + return ret; +} + +static int cx24117_read_ucblocks(struct dvb_frontend *fe, u32 *ucblocks) +{ + struct cx24117_state *state = fe->demodulator_priv; + fe_delivery_system_t delsys = fe->dtv_property_cache.delivery_system; + int ret; + u8 buf[2]; + u8 reg = (state->demod == 0) ? + CX24117_REG_DVBS_UCB2_0 : + CX24117_REG_DVBS_UCB2_1; + + switch (delsys) { + case SYS_DVBS: + break; + case SYS_DVBS2: + reg += (CX24117_REG_DVBS2_UCB2_0 - CX24117_REG_DVBS_UCB2_0); + break; + default: + return -EINVAL; + } + + ret = cx24117_readregN(state, reg, buf, 2); + if (ret != 0) + return ret; + *ucblocks = (buf[0] << 8) | buf[1]; + + dev_dbg(&state->priv->i2c->dev, "%s() demod%d ucb=0x%04x\n", + __func__, state->demod, *ucblocks); + + return 0; +} + +/* Overwrite the current tuning params, we are about to tune */ +static void cx24117_clone_params(struct dvb_frontend *fe) +{ + struct cx24117_state *state = fe->demodulator_priv; + state->dcur = state->dnxt; +} + +/* Wait for LNB */ +static int cx24117_wait_for_lnb(struct dvb_frontend *fe) +{ + struct cx24117_state *state = fe->demodulator_priv; + int i; + u8 val, reg = (state->demod == 0) ? CX24117_REG_QSTATUS0 : + CX24117_REG_QSTATUS1; + + dev_dbg(&state->priv->i2c->dev, "%s() demod%d qstatus = 0x%02x\n", + __func__, state->demod, cx24117_readreg(state, reg)); + + /* Wait for up to 300 ms */ + for (i = 0; i < 10; i++) { + val = cx24117_readreg(state, reg) & 0x01; + if (val != 0) + return 0; + msleep(30); + } + + dev_warn(&state->priv->i2c->dev, "%s: demod%d LNB not ready\n", + KBUILD_MODNAME, state->demod); + + return -ETIMEDOUT; /* -EBUSY ? */ +} + +static int cx24117_set_voltage(struct dvb_frontend *fe, + fe_sec_voltage_t voltage) +{ + struct cx24117_state *state = fe->demodulator_priv; + struct cx24117_cmd cmd; + int ret; + u8 reg = (state->demod == 0) ? 0x10 : 0x20; + + dev_dbg(&state->priv->i2c->dev, "%s() demod%d %s\n", + __func__, state->demod, + voltage == SEC_VOLTAGE_13 ? "SEC_VOLTAGE_13" : + voltage == SEC_VOLTAGE_18 ? "SEC_VOLTAGE_18" : + "SEC_VOLTAGE_OFF"); + + /* CMD 32 */ + cmd.args[0] = 0x32; + cmd.args[1] = reg; + cmd.args[2] = reg; + cmd.len = 3; + ret = cx24117_cmd_execute(fe, &cmd); + if (ret) + return ret; + + if ((voltage == SEC_VOLTAGE_13) || + (voltage == SEC_VOLTAGE_18)) { + /* CMD 33 */ + cmd.args[0] = 0x33; + cmd.args[1] = reg; + cmd.args[2] = reg; + cmd.len = 3; + ret = cx24117_cmd_execute(fe, &cmd); + if (ret != 0) + return ret; + + ret = cx24117_wait_for_lnb(fe); + if (ret != 0) + return ret; + + /* Wait for voltage/min repeat delay */ + msleep(100); + + /* CMD 22 - CMD_LNBDCLEVEL */ + cmd.args[0] = CMD_LNBDCLEVEL; + cmd.args[1] = state->demod ? 0 : 1; + cmd.args[2] = (voltage == SEC_VOLTAGE_18 ? 0x01 : 0x00); + cmd.len = 3; + + /* Min delay time before DiSEqC send */ + msleep(20); + } else { + cmd.args[0] = 0x33; + cmd.args[1] = 0x00; + cmd.args[2] = reg; + cmd.len = 3; + } + + return cx24117_cmd_execute(fe, &cmd); +} + +static int cx24117_set_tone(struct dvb_frontend *fe, + fe_sec_tone_mode_t tone) +{ + struct cx24117_state *state = fe->demodulator_priv; + struct cx24117_cmd cmd; + int ret; + + dev_dbg(&state->priv->i2c->dev, "%s(%d) demod%d\n", + __func__, state->demod, tone); + if ((tone != SEC_TONE_ON) && (tone != SEC_TONE_OFF)) { + dev_warn(&state->priv->i2c->dev, "%s: demod%d invalid tone=%d\n", + KBUILD_MODNAME, state->demod, tone); + return -EINVAL; + } + + /* Wait for LNB ready */ + ret = cx24117_wait_for_lnb(fe); + if (ret != 0) + return ret; + + /* Min delay time after DiSEqC send */ + msleep(20); + + /* Set the tone */ + /* CMD 23 - CMD_SET_TONE */ + cmd.args[0] = CMD_SET_TONE; + cmd.args[1] = (state->demod ? 0 : 1); + cmd.args[2] = 0x00; + cmd.args[3] = 0x00; + cmd.len = 5; + switch (tone) { + case SEC_TONE_ON: + cmd.args[4] = 0x01; + break; + case SEC_TONE_OFF: + cmd.args[4] = 0x00; + break; + } + + msleep(20); + + return cx24117_cmd_execute(fe, &cmd); +} + +/* Initialise DiSEqC */ +static int cx24117_diseqc_init(struct dvb_frontend *fe) +{ + struct cx24117_state *state = fe->demodulator_priv; + + /* Prepare a DiSEqC command */ + state->dsec_cmd.args[0] = CMD_LNBSEND; + + /* demod */ + state->dsec_cmd.args[CX24117_DISEQC_DEMOD] = state->demod ? 0 : 1; + + /* DiSEqC burst */ + state->dsec_cmd.args[CX24117_DISEQC_BURST] = CX24117_DISEQC_MINI_A; + + /* Unknown */ + state->dsec_cmd.args[CX24117_DISEQC_ARG3_2] = 0x02; + state->dsec_cmd.args[CX24117_DISEQC_ARG4_0] = 0x00; + + /* Continuation flag? */ + state->dsec_cmd.args[CX24117_DISEQC_ARG5_0] = 0x00; + + /* DiSEqC message length */ + state->dsec_cmd.args[CX24117_DISEQC_MSGLEN] = 0x00; + + /* Command length */ + state->dsec_cmd.len = 7; + + return 0; +} + +/* Send DiSEqC message */ +static int cx24117_send_diseqc_msg(struct dvb_frontend *fe, + struct dvb_diseqc_master_cmd *d) +{ + struct cx24117_state *state = fe->demodulator_priv; + int i, ret; + + /* Dump DiSEqC message */ + dev_dbg(&state->priv->i2c->dev, "%s: demod %d (", + __func__, state->demod); + for (i = 0; i < d->msg_len; i++) + dev_dbg(&state->priv->i2c->dev, "0x%02x ", d->msg[i]); + dev_dbg(&state->priv->i2c->dev, ")\n"); + + /* Validate length */ + if (d->msg_len > 15) + return -EINVAL; + + /* DiSEqC message */ + for (i = 0; i < d->msg_len; i++) + state->dsec_cmd.args[CX24117_DISEQC_MSGOFS + i] = d->msg[i]; + + /* DiSEqC message length */ + state->dsec_cmd.args[CX24117_DISEQC_MSGLEN] = d->msg_len; + + /* Command length */ + state->dsec_cmd.len = CX24117_DISEQC_MSGOFS + + state->dsec_cmd.args[CX24117_DISEQC_MSGLEN]; + + /* + * Message is sent with derived else cached burst + * + * WRITE PORT GROUP COMMAND 38 + * + * 0/A/A: E0 10 38 F0..F3 + * 1/B/B: E0 10 38 F4..F7 + * 2/C/A: E0 10 38 F8..FB + * 3/D/B: E0 10 38 FC..FF + * + * databyte[3]= 8421:8421 + * ABCD:WXYZ + * CLR :SET + * + * WX= PORT SELECT 0..3 (X=TONEBURST) + * Y = VOLTAGE (0=13V, 1=18V) + * Z = BAND (0=LOW, 1=HIGH(22K)) + */ + if (d->msg_len >= 4 && d->msg[2] == 0x38) + state->dsec_cmd.args[CX24117_DISEQC_BURST] = + ((d->msg[3] & 4) >> 2); + + dev_dbg(&state->priv->i2c->dev, "%s() demod%d burst=%d\n", + __func__, state->demod, + state->dsec_cmd.args[CX24117_DISEQC_BURST]); + + /* Wait for LNB ready */ + ret = cx24117_wait_for_lnb(fe); + if (ret != 0) + return ret; + + /* Wait for voltage/min repeat delay */ + msleep(100); + + /* Command */ + ret = cx24117_cmd_execute(fe, &state->dsec_cmd); + if (ret != 0) + return ret; + /* + * Wait for send + * + * Eutelsat spec: + * >15ms delay + (XXX determine if FW does this, see set_tone) + * 13.5ms per byte + + * >15ms delay + + * 12.5ms burst + + * >15ms delay (XXX determine if FW does this, see set_tone) + */ + msleep((state->dsec_cmd.args[CX24117_DISEQC_MSGLEN] << 4) + 60); + + return 0; +} + +/* Send DiSEqC burst */ +static int cx24117_diseqc_send_burst(struct dvb_frontend *fe, + fe_sec_mini_cmd_t burst) +{ + struct cx24117_state *state = fe->demodulator_priv; + + dev_dbg(&state->priv->i2c->dev, "%s(%d) demod=%d\n", + __func__, burst, state->demod); + + /* DiSEqC burst */ + if (burst == SEC_MINI_A) + state->dsec_cmd.args[CX24117_DISEQC_BURST] = + CX24117_DISEQC_MINI_A; + else if (burst == SEC_MINI_B) + state->dsec_cmd.args[CX24117_DISEQC_BURST] = + CX24117_DISEQC_MINI_B; + else + return -EINVAL; + + return 0; +} + +static int cx24117_get_priv(struct cx24117_priv **priv, + struct i2c_adapter *i2c, u8 client_address) +{ + int ret; + + mutex_lock(&cx24117_list_mutex); + ret = hybrid_tuner_request_state(struct cx24117_priv, (*priv), + hybrid_tuner_instance_list, i2c, client_address, "cx24117"); + mutex_unlock(&cx24117_list_mutex); + + return ret; +} + +static void cx24117_release_priv(struct cx24117_priv *priv) +{ + mutex_lock(&cx24117_list_mutex); + if (priv != NULL) + hybrid_tuner_release_state(priv); + mutex_unlock(&cx24117_list_mutex); +} + +static void cx24117_release(struct dvb_frontend *fe) +{ + struct cx24117_state *state = fe->demodulator_priv; + dev_dbg(&state->priv->i2c->dev, "%s demod%d\n", + __func__, state->demod); + cx24117_release_priv(state->priv); + kfree(state); +} + +static struct dvb_frontend_ops cx24117_ops; + +struct dvb_frontend *cx24117_attach(const struct cx24117_config *config, + struct i2c_adapter *i2c) +{ + struct cx24117_state *state = NULL; + struct cx24117_priv *priv = NULL; + int demod = 0; + + /* get the common data struct for both demods */ + demod = cx24117_get_priv(&priv, i2c, config->demod_address); + + switch (demod) { + case 0: + dev_err(&state->priv->i2c->dev, + "%s: Error attaching frontend %d\n", + KBUILD_MODNAME, demod); + goto error1; + break; + case 1: + /* new priv instance */ + priv->i2c = i2c; + priv->demod_address = config->demod_address; + mutex_init(&priv->fe_lock); + break; + default: + /* existing priv instance */ + break; + } + + /* allocate memory for the internal state */ + state = kzalloc(sizeof(struct cx24117_state), GFP_KERNEL); + if (state == NULL) + goto error2; + + state->demod = demod - 1; + state->priv = priv; + + /* test i2c bus for ack */ + if (demod == 0) { + if (cx24117_readreg(state, 0x00) < 0) + goto error3; + } + + dev_info(&state->priv->i2c->dev, + "%s: Attaching frontend %d\n", + KBUILD_MODNAME, state->demod); + + /* create dvb_frontend */ + memcpy(&state->frontend.ops, &cx24117_ops, + sizeof(struct dvb_frontend_ops)); + state->frontend.demodulator_priv = state; + return &state->frontend; + +error3: + kfree(state); +error2: + cx24117_release_priv(priv); +error1: + return NULL; +} +EXPORT_SYMBOL_GPL(cx24117_attach); + +/* + * Initialise or wake up device + * + * Power config will reset and load initial firmware if required + */ +static int cx24117_initfe(struct dvb_frontend *fe) +{ + struct cx24117_state *state = fe->demodulator_priv; + struct cx24117_cmd cmd; + int ret; + + dev_dbg(&state->priv->i2c->dev, "%s() demod%d\n", + __func__, state->demod); + + mutex_lock(&state->priv->fe_lock); + + /* Firmware CMD 36: Power config */ + cmd.args[0] = CMD_TUNERSLEEP; + cmd.args[1] = (state->demod ? 1 : 0); + cmd.args[2] = 0; + cmd.len = 3; + ret = cx24117_cmd_execute_nolock(fe, &cmd); + if (ret != 0) + goto exit; + + ret = cx24117_diseqc_init(fe); + if (ret != 0) + goto exit; + + /* CMD 3C */ + cmd.args[0] = 0x3c; + cmd.args[1] = (state->demod ? 1 : 0); + cmd.args[2] = 0x10; + cmd.args[3] = 0x10; + cmd.len = 4; + ret = cx24117_cmd_execute_nolock(fe, &cmd); + if (ret != 0) + goto exit; + + /* CMD 34 */ + cmd.args[0] = 0x34; + cmd.args[1] = (state->demod ? 1 : 0); + cmd.args[2] = CX24117_OCC; + cmd.len = 3; + ret = cx24117_cmd_execute_nolock(fe, &cmd); + +exit: + mutex_unlock(&state->priv->fe_lock); + + return ret; +} + +/* + * Put device to sleep + */ +static int cx24117_sleep(struct dvb_frontend *fe) +{ + struct cx24117_state *state = fe->demodulator_priv; + struct cx24117_cmd cmd; + + dev_dbg(&state->priv->i2c->dev, "%s() demod%d\n", + __func__, state->demod); + + /* Firmware CMD 36: Power config */ + cmd.args[0] = CMD_TUNERSLEEP; + cmd.args[1] = (state->demod ? 1 : 0); + cmd.args[2] = 1; + cmd.len = 3; + return cx24117_cmd_execute(fe, &cmd); +} + +/* dvb-core told us to tune, the tv property cache will be complete, + * it's safe for is to pull values and use them for tuning purposes. + */ +static int cx24117_set_frontend(struct dvb_frontend *fe) +{ + struct cx24117_state *state = fe->demodulator_priv; + struct dtv_frontend_properties *c = &fe->dtv_property_cache; + struct cx24117_cmd cmd; + fe_status_t tunerstat; + int i, status, ret, retune = 1; + u8 reg_clkdiv, reg_ratediv; + + dev_dbg(&state->priv->i2c->dev, "%s() demod%d\n", + __func__, state->demod); + + switch (c->delivery_system) { + case SYS_DVBS: + dev_dbg(&state->priv->i2c->dev, "%s() demod%d DVB-S\n", + __func__, state->demod); + + /* Only QPSK is supported for DVB-S */ + if (c->modulation != QPSK) { + dev_dbg(&state->priv->i2c->dev, + "%s() demod%d unsupported modulation (%d)\n", + __func__, state->demod, c->modulation); + return -EINVAL; + } + + /* Pilot doesn't exist in DVB-S, turn bit off */ + state->dnxt.pilot_val = CX24117_PILOT_OFF; + + /* DVB-S only supports 0.35 */ + state->dnxt.rolloff_val = CX24117_ROLLOFF_035; + break; + + case SYS_DVBS2: + dev_dbg(&state->priv->i2c->dev, "%s() demod%d DVB-S2\n", + __func__, state->demod); + + /* + * NBC 8PSK/QPSK with DVB-S is supported for DVB-S2, + * but not hardware auto detection + */ + if (c->modulation != PSK_8 && c->modulation != QPSK) { + dev_dbg(&state->priv->i2c->dev, + "%s() demod%d unsupported modulation (%d)\n", + __func__, state->demod, c->modulation); + return -EOPNOTSUPP; + } + + switch (c->pilot) { + case PILOT_AUTO: + state->dnxt.pilot_val = CX24117_PILOT_AUTO; + break; + case PILOT_OFF: + state->dnxt.pilot_val = CX24117_PILOT_OFF; + break; + case PILOT_ON: + state->dnxt.pilot_val = CX24117_PILOT_ON; + break; + default: + dev_dbg(&state->priv->i2c->dev, + "%s() demod%d unsupported pilot mode (%d)\n", + __func__, state->demod, c->pilot); + return -EOPNOTSUPP; + } + + switch (c->rolloff) { + case ROLLOFF_20: + state->dnxt.rolloff_val = CX24117_ROLLOFF_020; + break; + case ROLLOFF_25: + state->dnxt.rolloff_val = CX24117_ROLLOFF_025; + break; + case ROLLOFF_35: + state->dnxt.rolloff_val = CX24117_ROLLOFF_035; + break; + case ROLLOFF_AUTO: + state->dnxt.rolloff_val = CX24117_ROLLOFF_035; + /* soft-auto rolloff */ + retune = 3; + break; + default: + dev_warn(&state->priv->i2c->dev, + "%s: demod%d unsupported rolloff (%d)\n", + KBUILD_MODNAME, state->demod, c->rolloff); + return -EOPNOTSUPP; + } + break; + + default: + dev_warn(&state->priv->i2c->dev, + "%s: demod %d unsupported delivery system (%d)\n", + KBUILD_MODNAME, state->demod, c->delivery_system); + return -EINVAL; + } + + state->dnxt.delsys = c->delivery_system; + state->dnxt.modulation = c->modulation; + state->dnxt.frequency = c->frequency; + state->dnxt.pilot = c->pilot; + state->dnxt.rolloff = c->rolloff; + + ret = cx24117_set_inversion(state, c->inversion); + if (ret != 0) + return ret; + + ret = cx24117_set_fec(state, + c->delivery_system, c->modulation, c->fec_inner); + if (ret != 0) + return ret; + + ret = cx24117_set_symbolrate(state, c->symbol_rate); + if (ret != 0) + return ret; + + /* discard the 'current' tuning parameters and prepare to tune */ + cx24117_clone_params(fe); + + dev_dbg(&state->priv->i2c->dev, + "%s: delsys = %d\n", __func__, state->dcur.delsys); + dev_dbg(&state->priv->i2c->dev, + "%s: modulation = %d\n", __func__, state->dcur.modulation); + dev_dbg(&state->priv->i2c->dev, + "%s: frequency = %d\n", __func__, state->dcur.frequency); + dev_dbg(&state->priv->i2c->dev, + "%s: pilot = %d (val = 0x%02x)\n", __func__, + state->dcur.pilot, state->dcur.pilot_val); + dev_dbg(&state->priv->i2c->dev, + "%s: retune = %d\n", __func__, retune); + dev_dbg(&state->priv->i2c->dev, + "%s: rolloff = %d (val = 0x%02x)\n", __func__, + state->dcur.rolloff, state->dcur.rolloff_val); + dev_dbg(&state->priv->i2c->dev, + "%s: symbol_rate = %d\n", __func__, state->dcur.symbol_rate); + dev_dbg(&state->priv->i2c->dev, + "%s: FEC = %d (mask/val = 0x%02x/0x%02x)\n", __func__, + state->dcur.fec, state->dcur.fec_mask, state->dcur.fec_val); + dev_dbg(&state->priv->i2c->dev, + "%s: Inversion = %d (val = 0x%02x)\n", __func__, + state->dcur.inversion, state->dcur.inversion_val); + + /* Prepare a tune request */ + cmd.args[0] = CMD_TUNEREQUEST; + + /* demod */ + cmd.args[1] = state->demod; + + /* Frequency */ + cmd.args[2] = (state->dcur.frequency & 0xff0000) >> 16; + cmd.args[3] = (state->dcur.frequency & 0x00ff00) >> 8; + cmd.args[4] = (state->dcur.frequency & 0x0000ff); + + /* Symbol Rate */ + cmd.args[5] = ((state->dcur.symbol_rate / 1000) & 0xff00) >> 8; + cmd.args[6] = ((state->dcur.symbol_rate / 1000) & 0x00ff); + + /* Automatic Inversion */ + cmd.args[7] = state->dcur.inversion_val; + + /* Modulation / FEC / Pilot */ + cmd.args[8] = state->dcur.fec_val | state->dcur.pilot_val; + + cmd.args[9] = CX24117_SEARCH_RANGE_KHZ >> 8; + cmd.args[10] = CX24117_SEARCH_RANGE_KHZ & 0xff; + + cmd.args[11] = state->dcur.rolloff_val; + cmd.args[12] = state->dcur.fec_mask; + + if (state->dcur.symbol_rate > 30000000) { + reg_ratediv = 0x04; + reg_clkdiv = 0x02; + } else if (state->dcur.symbol_rate > 10000000) { + reg_ratediv = 0x06; + reg_clkdiv = 0x03; + } else { + reg_ratediv = 0x0a; + reg_clkdiv = 0x05; + } + + cmd.args[13] = reg_ratediv; + cmd.args[14] = reg_clkdiv; + + cx24117_writereg(state, (state->demod == 0) ? + CX24117_REG_CLKDIV0 : CX24117_REG_CLKDIV1, reg_clkdiv); + cx24117_writereg(state, (state->demod == 0) ? + CX24117_REG_RATEDIV0 : CX24117_REG_RATEDIV1, reg_ratediv); + + cmd.args[15] = CX24117_PNE; + cmd.len = 16; + + do { + /* Reset status register */ + status = cx24117_readreg(state, (state->demod == 0) ? + CX24117_REG_SSTATUS0 : CX24117_REG_SSTATUS1) & + CX24117_SIGNAL_MASK; + + dev_dbg(&state->priv->i2c->dev, + "%s() demod%d status_setfe = %02x\n", + __func__, state->demod, status); + + cx24117_writereg(state, (state->demod == 0) ? + CX24117_REG_SSTATUS0 : CX24117_REG_SSTATUS1, status); + + /* Tune */ + ret = cx24117_cmd_execute(fe, &cmd); + if (ret != 0) + break; + + /* + * Wait for up to 500 ms before retrying + * + * If we are able to tune then generally it occurs within 100ms. + * If it takes longer, try a different rolloff setting. + */ + for (i = 0; i < 50; i++) { + cx24117_read_status(fe, &tunerstat); + status = tunerstat & (FE_HAS_SIGNAL | FE_HAS_SYNC); + if (status == (FE_HAS_SIGNAL | FE_HAS_SYNC)) { + dev_dbg(&state->priv->i2c->dev, + "%s() demod%d tuned\n", + __func__, state->demod); + return 0; + } + msleep(20); + } + + dev_dbg(&state->priv->i2c->dev, "%s() demod%d not tuned\n", + __func__, state->demod); + + /* try next rolloff value */ + if (state->dcur.rolloff == 3) + cmd.args[11]--; + + } while (--retune); + return -EINVAL; +} + +static int cx24117_tune(struct dvb_frontend *fe, bool re_tune, + unsigned int mode_flags, unsigned int *delay, fe_status_t *status) +{ + struct cx24117_state *state = fe->demodulator_priv; + + dev_dbg(&state->priv->i2c->dev, "%s() demod%d\n", + __func__, state->demod); + + *delay = HZ / 5; + if (re_tune) { + int ret = cx24117_set_frontend(fe); + if (ret) + return ret; + } + return cx24117_read_status(fe, status); +} + +static int cx24117_get_algo(struct dvb_frontend *fe) +{ + return DVBFE_ALGO_HW; +} + +static int cx24117_get_frontend(struct dvb_frontend *fe) +{ + struct cx24117_state *state = fe->demodulator_priv; + struct dtv_frontend_properties *c = &fe->dtv_property_cache; + struct cx24117_cmd cmd; + u8 reg, st, inv; + int ret, idx; + unsigned int freq; + short srate_os, freq_os; + + u8 buf[0x1f-4]; + + cmd.args[0] = 0x1c; + cmd.args[1] = (u8) state->demod; + cmd.len = 2; + ret = cx24117_cmd_execute(fe, &cmd); + if (ret != 0) + return ret; + + /* read all required regs at once */ + reg = (state->demod == 0) ? CX24117_REG_FREQ3_0 : CX24117_REG_FREQ3_1; + ret = cx24117_readregN(state, reg, buf, 0x1f-4); + if (ret != 0) + return ret; + + st = buf[5]; + + /* get spectral inversion */ + inv = (((state->demod == 0) ? ~st : st) >> 6) & 1; + if (inv == 0) + c->inversion = INVERSION_OFF; + else + c->inversion = INVERSION_ON; + + /* modulation and fec */ + idx = st & 0x3f; + if (c->delivery_system == SYS_DVBS2) { + if (idx > 11) + idx += 9; + else + idx += 7; + } + + c->modulation = cx24117_modfec_modes[idx].modulation; + c->fec_inner = cx24117_modfec_modes[idx].fec; + + /* frequency */ + freq = (buf[0] << 16) | (buf[1] << 8) | buf[2]; + freq_os = (buf[8] << 8) | buf[9]; + c->frequency = freq + freq_os; + + /* symbol rate */ + srate_os = (buf[10] << 8) | buf[11]; + c->symbol_rate = -1000 * srate_os + state->dcur.symbol_rate; + return 0; +} + +static struct dvb_frontend_ops cx24117_ops = { + .delsys = { SYS_DVBS, SYS_DVBS2 }, + .info = { + .name = "Conexant CX24117/CX24132", + .frequency_min = 950000, + .frequency_max = 2150000, + .frequency_stepsize = 1011, /* kHz for QPSK frontends */ + .frequency_tolerance = 5000, + .symbol_rate_min = 1000000, + .symbol_rate_max = 45000000, + .caps = FE_CAN_INVERSION_AUTO | + FE_CAN_FEC_1_2 | FE_CAN_FEC_2_3 | FE_CAN_FEC_3_4 | + FE_CAN_FEC_4_5 | FE_CAN_FEC_5_6 | FE_CAN_FEC_6_7 | + FE_CAN_FEC_7_8 | FE_CAN_FEC_AUTO | + FE_CAN_2G_MODULATION | + FE_CAN_QPSK | FE_CAN_RECOVER + }, + + .release = cx24117_release, + + .init = cx24117_initfe, + .sleep = cx24117_sleep, + .read_status = cx24117_read_status, + .read_ber = cx24117_read_ber, + .read_signal_strength = cx24117_read_signal_strength, + .read_snr = cx24117_read_snr, + .read_ucblocks = cx24117_read_ucblocks, + .set_tone = cx24117_set_tone, + .set_voltage = cx24117_set_voltage, + .diseqc_send_master_cmd = cx24117_send_diseqc_msg, + .diseqc_send_burst = cx24117_diseqc_send_burst, + .get_frontend_algo = cx24117_get_algo, + .tune = cx24117_tune, + + .set_frontend = cx24117_set_frontend, + .get_frontend = cx24117_get_frontend, +}; + + +MODULE_DESCRIPTION("DVB Frontend module for Conexant cx24117/cx24132 hardware"); +MODULE_AUTHOR("Luis Alves (ljalvs@gmail.com)"); +MODULE_LICENSE("GPL"); +MODULE_VERSION("1.1"); +MODULE_FIRMWARE(CX24117_DEFAULT_FIRMWARE); + diff --git a/drivers/media/dvb-frontends/cx24117.h b/drivers/media/dvb-frontends/cx24117.h new file mode 100644 index 00000000000..4e59e9574fa --- /dev/null +++ b/drivers/media/dvb-frontends/cx24117.h @@ -0,0 +1,47 @@ +/* + Conexant cx24117/cx24132 - Dual DVBS/S2 Satellite demod/tuner driver + + Copyright (C) 2013 Luis Alves <ljalvs@gmail.com> + (based on cx24116.h by Steven Toth) + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. +*/ + +#ifndef CX24117_H +#define CX24117_H + +#include <linux/kconfig.h> +#include <linux/dvb/frontend.h> + +struct cx24117_config { + /* the demodulator's i2c address */ + u8 demod_address; +}; + +#if IS_ENABLED(CONFIG_DVB_CX24117) +extern struct dvb_frontend *cx24117_attach( + const struct cx24117_config *config, + struct i2c_adapter *i2c); +#else +static inline struct dvb_frontend *cx24117_attach( + const struct cx24117_config *config, + struct i2c_adapter *i2c) +{ + dev_warn(&i2c->dev, "%s: driver disabled by Kconfig\n", __func__); + return NULL; +} +#endif + +#endif /* CX24117_H */ diff --git a/drivers/media/dvb-frontends/cx24123.c b/drivers/media/dvb-frontends/cx24123.c index a771da3e9f9..72fb5838cae 100644 --- a/drivers/media/dvb-frontends/cx24123.c +++ b/drivers/media/dvb-frontends/cx24123.c @@ -739,7 +739,7 @@ static int cx24123_set_voltage(struct dvb_frontend *fe, return 0; default: return -EINVAL; - }; + } return 0; } diff --git a/drivers/media/dvb-frontends/cxd2820r_core.c b/drivers/media/dvb-frontends/cxd2820r_core.c index 7ca5c69dd20..03930d5e9fe 100644 --- a/drivers/media/dvb-frontends/cxd2820r_core.c +++ b/drivers/media/dvb-frontends/cxd2820r_core.c @@ -21,21 +21,31 @@ #include "cxd2820r_priv.h" +/* Max transfer size done by I2C transfer functions */ +#define MAX_XFER_SIZE 64 + /* write multiple registers */ static int cxd2820r_wr_regs_i2c(struct cxd2820r_priv *priv, u8 i2c, u8 reg, u8 *val, int len) { int ret; - u8 buf[len+1]; + u8 buf[MAX_XFER_SIZE]; struct i2c_msg msg[1] = { { .addr = i2c, .flags = 0, - .len = sizeof(buf), + .len = len + 1, .buf = buf, } }; + if (1 + len > sizeof(buf)) { + dev_warn(&priv->i2c->dev, + "%s: i2c wr reg=%04x: len=%d is too big!\n", + KBUILD_MODNAME, reg, len); + return -EINVAL; + } + buf[0] = reg; memcpy(&buf[1], val, len); @@ -55,7 +65,7 @@ static int cxd2820r_rd_regs_i2c(struct cxd2820r_priv *priv, u8 i2c, u8 reg, u8 *val, int len) { int ret; - u8 buf[len]; + u8 buf[MAX_XFER_SIZE]; struct i2c_msg msg[2] = { { .addr = i2c, @@ -65,11 +75,18 @@ static int cxd2820r_rd_regs_i2c(struct cxd2820r_priv *priv, u8 i2c, u8 reg, }, { .addr = i2c, .flags = I2C_M_RD, - .len = sizeof(buf), + .len = len, .buf = buf, } }; + if (len > sizeof(buf)) { + dev_warn(&priv->i2c->dev, + "%s: i2c wr reg=%04x: len=%d is too big!\n", + KBUILD_MODNAME, reg, len); + return -EINVAL; + } + ret = i2c_transfer(priv->i2c, msg, 2); if (ret == 2) { memcpy(val, buf, len); diff --git a/drivers/media/dvb-frontends/dib9000.c b/drivers/media/dvb-frontends/dib9000.c index 6201c59a78d..e540cfb13ba 100644 --- a/drivers/media/dvb-frontends/dib9000.c +++ b/drivers/media/dvb-frontends/dib9000.c @@ -649,9 +649,9 @@ static int dib9000_risc_debug_buf(struct dib9000_state *state, u16 * data, u8 si b[2 * (size - 2) - 1] = '\0'; /* Bullet proof the buffer */ if (*b == '~') { b++; - dprintk(b); + dprintk("%s", b); } else - dprintk("RISC%d: %d.%04d %s", state->fe_id, ts / 10000, ts % 10000, *b ? b : "<emtpy>"); + dprintk("RISC%d: %d.%04d %s", state->fe_id, ts / 10000, ts % 10000, *b ? b : "<empty>"); return 1; } diff --git a/drivers/media/dvb-frontends/drxd_hard.c b/drivers/media/dvb-frontends/drxd_hard.c index 9a2134792cf..959ae36403b 100644 --- a/drivers/media/dvb-frontends/drxd_hard.c +++ b/drivers/media/dvb-frontends/drxd_hard.c @@ -46,10 +46,6 @@ #define DRX_I2C_MODEFLAGS 0xC0 #define DRX_I2C_FLAGS 0xF0 -#ifndef SIZEOF_ARRAY -#define SIZEOF_ARRAY(array) (sizeof((array))/sizeof((array)[0])) -#endif - #define DEFAULT_LOCK_TIMEOUT 1100 #define DRX_CHANNEL_AUTO 0 @@ -1018,7 +1014,7 @@ static int HI_CfgCommand(struct drxd_state *state) status = Write16(state, HI_RA_RAM_SRV_CMD__A, HI_RA_RAM_SRV_CMD_CONFIG, 0); else - status = HI_Command(state, HI_RA_RAM_SRV_CMD_CONFIG, 0); + status = HI_Command(state, HI_RA_RAM_SRV_CMD_CONFIG, NULL); mutex_unlock(&state->mutex); return status; } @@ -1039,7 +1035,7 @@ static int HI_ResetCommand(struct drxd_state *state) status = Write16(state, HI_RA_RAM_SRV_RST_KEY__A, HI_RA_RAM_SRV_RST_KEY_ACT, 0); if (status == 0) - status = HI_Command(state, HI_RA_RAM_SRV_CMD_RESET, 0); + status = HI_Command(state, HI_RA_RAM_SRV_CMD_RESET, NULL); mutex_unlock(&state->mutex); msleep(1); return status; @@ -2837,7 +2833,7 @@ static int drxd_init(struct dvb_frontend *fe) int err = 0; /* if (request_firmware(&state->fw, "drxd.fw", state->dev)<0) */ - return DRXD_init(state, 0, 0); + return DRXD_init(state, NULL, 0); err = DRXD_init(state, state->fw->data, state->fw->size); release_firmware(state->fw); @@ -2973,7 +2969,7 @@ struct dvb_frontend *drxd_attach(const struct drxd_config *config, mutex_init(&state->mutex); - if (Read16(state, 0, 0, 0) < 0) + if (Read16(state, 0, NULL, 0) < 0) goto error; state->frontend.ops = drxd_ops; diff --git a/drivers/media/dvb-frontends/drxk_hard.c b/drivers/media/dvb-frontends/drxk_hard.c index 082014de687..d416c15691d 100644 --- a/drivers/media/dvb-frontends/drxk_hard.c +++ b/drivers/media/dvb-frontends/drxk_hard.c @@ -1083,7 +1083,7 @@ static int hi_cfg_command(struct drxk_state *state) SIO_HI_RA_RAM_PAR_1_PAR1_SEC_KEY); if (status < 0) goto error; - status = hi_command(state, SIO_HI_RA_RAM_CMD_CONFIG, 0); + status = hi_command(state, SIO_HI_RA_RAM_CMD_CONFIG, NULL); if (status < 0) goto error; @@ -2781,7 +2781,7 @@ static int ConfigureI2CBridge(struct drxk_state *state, bool b_enable_bridge) goto error; } - status = hi_command(state, SIO_HI_RA_RAM_CMD_BRDCTRL, 0); + status = hi_command(state, SIO_HI_RA_RAM_CMD_BRDCTRL, NULL); error: if (status < 0) diff --git a/drivers/media/dvb-frontends/itd1000.c b/drivers/media/dvb-frontends/itd1000.c index c1c3400b217..cadcae4cff8 100644 --- a/drivers/media/dvb-frontends/itd1000.c +++ b/drivers/media/dvb-frontends/itd1000.c @@ -31,6 +31,9 @@ #include "itd1000.h" #include "itd1000_priv.h" +/* Max transfer size done by I2C transfer functions */ +#define MAX_XFER_SIZE 64 + static int debug; module_param(debug, int, 0644); MODULE_PARM_DESC(debug, "Turn on/off debugging (default:off)."); @@ -52,10 +55,18 @@ MODULE_PARM_DESC(debug, "Turn on/off debugging (default:off)."); /* don't write more than one byte with flexcop behind */ static int itd1000_write_regs(struct itd1000_state *state, u8 reg, u8 v[], u8 len) { - u8 buf[1+len]; + u8 buf[MAX_XFER_SIZE]; struct i2c_msg msg = { .addr = state->cfg->i2c_address, .flags = 0, .buf = buf, .len = len+1 }; + + if (1 + len > sizeof(buf)) { + printk(KERN_WARNING + "itd1000: i2c wr reg=%04x: len=%d is too big!\n", + reg, len); + return -EINVAL; + } + buf[0] = reg; memcpy(&buf[1], v, len); diff --git a/drivers/media/dvb-frontends/mt312.c b/drivers/media/dvb-frontends/mt312.c index ec388c1d691..a74ac0ddb83 100644 --- a/drivers/media/dvb-frontends/mt312.c +++ b/drivers/media/dvb-frontends/mt312.c @@ -36,6 +36,8 @@ #include "mt312_priv.h" #include "mt312.h" +/* Max transfer size done by I2C transfer functions */ +#define MAX_XFER_SIZE 64 struct mt312_state { struct i2c_adapter *i2c; @@ -96,9 +98,15 @@ static int mt312_write(struct mt312_state *state, const enum mt312_reg_addr reg, const u8 *src, const size_t count) { int ret; - u8 buf[count + 1]; + u8 buf[MAX_XFER_SIZE]; struct i2c_msg msg; + if (1 + count > sizeof(buf)) { + printk(KERN_WARNING + "mt312: write: len=%zd is too big!\n", count); + return -EINVAL; + } + if (debug) { int i; dprintk("W(%d):", reg & 0x7f); diff --git a/drivers/media/dvb-frontends/nxt200x.c b/drivers/media/dvb-frontends/nxt200x.c index 8e288940a61..fbca9856313 100644 --- a/drivers/media/dvb-frontends/nxt200x.c +++ b/drivers/media/dvb-frontends/nxt200x.c @@ -39,6 +39,9 @@ */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt +/* Max transfer size done by I2C transfer functions */ +#define MAX_XFER_SIZE 64 + #define NXT2002_DEFAULT_FIRMWARE "dvb-fe-nxt2002.fw" #define NXT2004_DEFAULT_FIRMWARE "dvb-fe-nxt2004.fw" #define CRC_CCIT_MASK 0x1021 @@ -95,10 +98,16 @@ static int i2c_readbytes(struct nxt200x_state *state, u8 addr, u8 *buf, u8 len) static int nxt200x_writebytes (struct nxt200x_state* state, u8 reg, const u8 *buf, u8 len) { - u8 buf2 [len+1]; + u8 buf2[MAX_XFER_SIZE]; int err; struct i2c_msg msg = { .addr = state->config->demod_address, .flags = 0, .buf = buf2, .len = len + 1 }; + if (1 + len > sizeof(buf2)) { + pr_warn("%s: i2c wr reg=%04x: len=%d is too big!\n", + __func__, reg, len); + return -EINVAL; + } + buf2[0] = reg; memcpy(&buf2[1], buf, len); diff --git a/drivers/media/dvb-frontends/rtl2830.c b/drivers/media/dvb-frontends/rtl2830.c index 362d26d11e8..7efb796c472 100644 --- a/drivers/media/dvb-frontends/rtl2830.c +++ b/drivers/media/dvb-frontends/rtl2830.c @@ -27,20 +27,30 @@ #include "rtl2830_priv.h" +/* Max transfer size done by I2C transfer functions */ +#define MAX_XFER_SIZE 64 + /* write multiple hardware registers */ static int rtl2830_wr(struct rtl2830_priv *priv, u8 reg, const u8 *val, int len) { int ret; - u8 buf[1+len]; + u8 buf[MAX_XFER_SIZE]; struct i2c_msg msg[1] = { { .addr = priv->cfg.i2c_addr, .flags = 0, - .len = 1+len, + .len = 1 + len, .buf = buf, } }; + if (1 + len > sizeof(buf)) { + dev_warn(&priv->i2c->dev, + "%s: i2c wr reg=%04x: len=%d is too big!\n", + KBUILD_MODNAME, reg, len); + return -EINVAL; + } + buf[0] = reg; memcpy(&buf[1], val, len); diff --git a/drivers/media/dvb-frontends/rtl2832.c b/drivers/media/dvb-frontends/rtl2832.c index facb8484151..ff73da9365e 100644 --- a/drivers/media/dvb-frontends/rtl2832.c +++ b/drivers/media/dvb-frontends/rtl2832.c @@ -22,6 +22,9 @@ #include "dvb_math.h" #include <linux/bitops.h> +/* Max transfer size done by I2C transfer functions */ +#define MAX_XFER_SIZE 64 + int rtl2832_debug; module_param_named(debug, rtl2832_debug, int, 0644); MODULE_PARM_DESC(debug, "Turn on/off frontend debugging (default:off)."); @@ -162,16 +165,23 @@ static const struct rtl2832_reg_entry registers[] = { static int rtl2832_wr(struct rtl2832_priv *priv, u8 reg, u8 *val, int len) { int ret; - u8 buf[1+len]; + u8 buf[MAX_XFER_SIZE]; struct i2c_msg msg[1] = { { .addr = priv->cfg.i2c_addr, .flags = 0, - .len = 1+len, + .len = 1 + len, .buf = buf, } }; + if (1 + len > sizeof(buf)) { + dev_warn(&priv->i2c->dev, + "%s: i2c wr reg=%04x: len=%d is too big!\n", + KBUILD_MODNAME, reg, len); + return -EINVAL; + } + buf[0] = reg; memcpy(&buf[1], val, len); @@ -489,6 +499,7 @@ static int rtl2832_init(struct dvb_frontend *fe) init = rtl2832_tuner_init_e4000; break; case RTL2832_TUNER_R820T: + case RTL2832_TUNER_R828D: len = ARRAY_SIZE(rtl2832_tuner_init_r820t); init = rtl2832_tuner_init_r820t; break; diff --git a/drivers/media/dvb-frontends/rtl2832.h b/drivers/media/dvb-frontends/rtl2832.h index 91b2dcf5a6e..2cfbb6a9706 100644 --- a/drivers/media/dvb-frontends/rtl2832.h +++ b/drivers/media/dvb-frontends/rtl2832.h @@ -53,6 +53,7 @@ struct rtl2832_config { #define RTL2832_TUNER_E4000 0x27 #define RTL2832_TUNER_FC0013 0x29 #define RTL2832_TUNER_R820T 0x2a +#define RTL2832_TUNER_R828D 0x2b u8 tuner; }; diff --git a/drivers/media/dvb-frontends/s5h1420.c b/drivers/media/dvb-frontends/s5h1420.c index e2fec9ebf94..93eeaf7118f 100644 --- a/drivers/media/dvb-frontends/s5h1420.c +++ b/drivers/media/dvb-frontends/s5h1420.c @@ -836,9 +836,16 @@ static u32 s5h1420_tuner_i2c_func(struct i2c_adapter *adapter) static int s5h1420_tuner_i2c_tuner_xfer(struct i2c_adapter *i2c_adap, struct i2c_msg msg[], int num) { struct s5h1420_state *state = i2c_get_adapdata(i2c_adap); - struct i2c_msg m[1 + num]; + struct i2c_msg m[3]; u8 tx_open[2] = { CON_1, state->CON_1_val | 1 }; /* repeater stops once there was a stop condition */ + if (1 + num > ARRAY_SIZE(m)) { + printk(KERN_WARNING + "%s: i2c xfer: num=%d is too big!\n", + KBUILD_MODNAME, num); + return -EOPNOTSUPP; + } + memset(m, 0, sizeof(struct i2c_msg) * (1 + num)); m[0].addr = state->config->demod_address; @@ -847,7 +854,7 @@ static int s5h1420_tuner_i2c_tuner_xfer(struct i2c_adapter *i2c_adap, struct i2c memcpy(&m[1], msg, sizeof(struct i2c_msg) * num); - return i2c_transfer(state->i2c, m, 1+num) == 1 + num ? num : -EIO; + return i2c_transfer(state->i2c, m, 1 + num) == 1 + num ? num : -EIO; } static struct i2c_algorithm s5h1420_tuner_i2c_algo = { diff --git a/drivers/media/dvb-frontends/stb0899_drv.c b/drivers/media/dvb-frontends/stb0899_drv.c index 3dd5714eadb..07cd5ea7a03 100644 --- a/drivers/media/dvb-frontends/stb0899_drv.c +++ b/drivers/media/dvb-frontends/stb0899_drv.c @@ -32,6 +32,9 @@ #include "stb0899_priv.h" #include "stb0899_reg.h" +/* Max transfer size done by I2C transfer functions */ +#define MAX_XFER_SIZE 64 + static unsigned int verbose = 0;//1; module_param(verbose, int, 0644); @@ -499,7 +502,7 @@ err: int stb0899_write_regs(struct stb0899_state *state, unsigned int reg, u8 *data, u32 count) { int ret; - u8 buf[2 + count]; + u8 buf[MAX_XFER_SIZE]; struct i2c_msg i2c_msg = { .addr = state->config->demod_address, .flags = 0, @@ -507,6 +510,13 @@ int stb0899_write_regs(struct stb0899_state *state, unsigned int reg, u8 *data, .len = 2 + count }; + if (2 + count > sizeof(buf)) { + printk(KERN_WARNING + "%s: i2c wr reg=%04x: len=%d is too big!\n", + KBUILD_MODNAME, reg, count); + return -EINVAL; + } + buf[0] = reg >> 8; buf[1] = reg & 0xff; memcpy(&buf[2], data, count); diff --git a/drivers/media/dvb-frontends/stb6100.c b/drivers/media/dvb-frontends/stb6100.c index 45f9523f968..cea175d1989 100644 --- a/drivers/media/dvb-frontends/stb6100.c +++ b/drivers/media/dvb-frontends/stb6100.c @@ -31,6 +31,8 @@ static unsigned int verbose; module_param(verbose, int, 0644); +/* Max transfer size done by I2C transfer functions */ +#define MAX_XFER_SIZE 64 #define FE_ERROR 0 #define FE_NOTICE 1 @@ -183,7 +185,7 @@ static int stb6100_read_reg(struct stb6100_state *state, u8 reg) static int stb6100_write_reg_range(struct stb6100_state *state, u8 buf[], int start, int len) { int rc; - u8 cmdbuf[len + 1]; + u8 cmdbuf[MAX_XFER_SIZE]; struct i2c_msg msg = { .addr = state->config->tuner_address, .flags = 0, @@ -191,6 +193,13 @@ static int stb6100_write_reg_range(struct stb6100_state *state, u8 buf[], int st .len = len + 1 }; + if (1 + len > sizeof(buf)) { + printk(KERN_WARNING + "%s: i2c wr: len=%d is too big!\n", + KBUILD_MODNAME, len); + return -EINVAL; + } + if (unlikely(start < 1 || start + len > STB6100_NUMREGS)) { dprintk(verbose, FE_ERROR, 1, "Invalid register range %d:%d", start, len); diff --git a/drivers/media/dvb-frontends/stv0367.c b/drivers/media/dvb-frontends/stv0367.c index 7b6dba3ce55..45877273942 100644 --- a/drivers/media/dvb-frontends/stv0367.c +++ b/drivers/media/dvb-frontends/stv0367.c @@ -33,6 +33,9 @@ #include "stv0367_regs.h" #include "stv0367_priv.h" +/* Max transfer size done by I2C transfer functions */ +#define MAX_XFER_SIZE 64 + static int stvdebug; module_param_named(debug, stvdebug, int, 0644); @@ -767,7 +770,7 @@ static struct st_register def0367cab[STV0367CAB_NBREGS] = { static int stv0367_writeregs(struct stv0367_state *state, u16 reg, u8 *data, int len) { - u8 buf[len + 2]; + u8 buf[MAX_XFER_SIZE]; struct i2c_msg msg = { .addr = state->config->demod_address, .flags = 0, @@ -776,6 +779,14 @@ int stv0367_writeregs(struct stv0367_state *state, u16 reg, u8 *data, int len) }; int ret; + if (2 + len > sizeof(buf)) { + printk(KERN_WARNING + "%s: i2c wr reg=%04x: len=%d is too big!\n", + KBUILD_MODNAME, reg, len); + return -EINVAL; + } + + buf[0] = MSB(reg); buf[1] = LSB(reg); memcpy(buf + 2, data, len); diff --git a/drivers/media/dvb-frontends/stv090x.c b/drivers/media/dvb-frontends/stv090x.c index 56d470ad5a8..23e872f8474 100644 --- a/drivers/media/dvb-frontends/stv090x.c +++ b/drivers/media/dvb-frontends/stv090x.c @@ -35,6 +35,9 @@ #include "stv090x.h" #include "stv090x_priv.h" +/* Max transfer size done by I2C transfer functions */ +#define MAX_XFER_SIZE 64 + static unsigned int verbose; module_param(verbose, int, 0644); @@ -722,9 +725,16 @@ static int stv090x_write_regs(struct stv090x_state *state, unsigned int reg, u8 { const struct stv090x_config *config = state->config; int ret; - u8 buf[2 + count]; + u8 buf[MAX_XFER_SIZE]; struct i2c_msg i2c_msg = { .addr = config->address, .flags = 0, .buf = buf, .len = 2 + count }; + if (2 + count > sizeof(buf)) { + printk(KERN_WARNING + "%s: i2c wr reg=%04x: len=%d is too big!\n", + KBUILD_MODNAME, reg, count); + return -EINVAL; + } + buf[0] = reg >> 8; buf[1] = reg & 0xff; memcpy(&buf[2], data, count); diff --git a/drivers/media/dvb-frontends/stv6110.c b/drivers/media/dvb-frontends/stv6110.c index 20b5fa92c53..b1425830a24 100644 --- a/drivers/media/dvb-frontends/stv6110.c +++ b/drivers/media/dvb-frontends/stv6110.c @@ -30,6 +30,9 @@ #include "stv6110.h" +/* Max transfer size done by I2C transfer functions */ +#define MAX_XFER_SIZE 64 + static int debug; struct stv6110_priv { @@ -68,7 +71,7 @@ static int stv6110_write_regs(struct dvb_frontend *fe, u8 buf[], { struct stv6110_priv *priv = fe->tuner_priv; int rc; - u8 cmdbuf[len + 1]; + u8 cmdbuf[MAX_XFER_SIZE]; struct i2c_msg msg = { .addr = priv->i2c_address, .flags = 0, @@ -78,6 +81,13 @@ static int stv6110_write_regs(struct dvb_frontend *fe, u8 buf[], dprintk("%s\n", __func__); + if (1 + len > sizeof(cmdbuf)) { + printk(KERN_WARNING + "%s: i2c wr: len=%d is too big!\n", + KBUILD_MODNAME, len); + return -EINVAL; + } + if (start + len > 8) return -EINVAL; diff --git a/drivers/media/dvb-frontends/stv6110x.c b/drivers/media/dvb-frontends/stv6110x.c index f36cab12bdc..e66154e5c1d 100644 --- a/drivers/media/dvb-frontends/stv6110x.c +++ b/drivers/media/dvb-frontends/stv6110x.c @@ -32,6 +32,9 @@ #include "stv6110x.h" #include "stv6110x_priv.h" +/* Max transfer size done by I2C transfer functions */ +#define MAX_XFER_SIZE 64 + static unsigned int verbose; module_param(verbose, int, 0644); MODULE_PARM_DESC(verbose, "Set Verbosity level"); @@ -61,7 +64,8 @@ static int stv6110x_write_regs(struct stv6110x_state *stv6110x, int start, u8 da { int ret; const struct stv6110x_config *config = stv6110x->config; - u8 buf[len + 1]; + u8 buf[MAX_XFER_SIZE]; + struct i2c_msg msg = { .addr = config->addr, .flags = 0, @@ -69,6 +73,13 @@ static int stv6110x_write_regs(struct stv6110x_state *stv6110x, int start, u8 da .len = len + 1 }; + if (1 + len > sizeof(buf)) { + printk(KERN_WARNING + "%s: i2c wr: len=%d is too big!\n", + KBUILD_MODNAME, len); + return -EINVAL; + } + if (start + len > 8) return -EINVAL; diff --git a/drivers/media/dvb-frontends/tda10071.c b/drivers/media/dvb-frontends/tda10071.c index e79749cfec8..8ad3a57cf64 100644 --- a/drivers/media/dvb-frontends/tda10071.c +++ b/drivers/media/dvb-frontends/tda10071.c @@ -20,6 +20,9 @@ #include "tda10071_priv.h" +/* Max transfer size done by I2C transfer functions */ +#define MAX_XFER_SIZE 64 + static struct dvb_frontend_ops tda10071_ops; /* write multiple registers */ @@ -27,16 +30,23 @@ static int tda10071_wr_regs(struct tda10071_priv *priv, u8 reg, u8 *val, int len) { int ret; - u8 buf[len+1]; + u8 buf[MAX_XFER_SIZE]; struct i2c_msg msg[1] = { { .addr = priv->cfg.demod_i2c_addr, .flags = 0, - .len = sizeof(buf), + .len = 1 + len, .buf = buf, } }; + if (1 + len > sizeof(buf)) { + dev_warn(&priv->i2c->dev, + "%s: i2c wr reg=%04x: len=%d is too big!\n", + KBUILD_MODNAME, reg, len); + return -EINVAL; + } + buf[0] = reg; memcpy(&buf[1], val, len); @@ -56,7 +66,7 @@ static int tda10071_rd_regs(struct tda10071_priv *priv, u8 reg, u8 *val, int len) { int ret; - u8 buf[len]; + u8 buf[MAX_XFER_SIZE]; struct i2c_msg msg[2] = { { .addr = priv->cfg.demod_i2c_addr, @@ -66,11 +76,18 @@ static int tda10071_rd_regs(struct tda10071_priv *priv, u8 reg, u8 *val, }, { .addr = priv->cfg.demod_i2c_addr, .flags = I2C_M_RD, - .len = sizeof(buf), + .len = len, .buf = buf, } }; + if (len > sizeof(buf)) { + dev_warn(&priv->i2c->dev, + "%s: i2c wr reg=%04x: len=%d is too big!\n", + KBUILD_MODNAME, reg, len); + return -EINVAL; + } + ret = i2c_transfer(priv->i2c, msg, 2); if (ret == 2) { memcpy(val, buf, len); diff --git a/drivers/media/dvb-frontends/tda18271c2dd.c b/drivers/media/dvb-frontends/tda18271c2dd.c index d281f77d5c2..2c54586ac07 100644 --- a/drivers/media/dvb-frontends/tda18271c2dd.c +++ b/drivers/media/dvb-frontends/tda18271c2dd.c @@ -34,6 +34,9 @@ #include "dvb_frontend.h" #include "tda18271c2dd.h" +/* Max transfer size done by I2C transfer functions */ +#define MAX_XFER_SIZE 64 + struct SStandardParam { s32 m_IFFrequency; u32 m_BandWidth; @@ -139,11 +142,18 @@ static int i2c_write(struct i2c_adapter *adap, u8 adr, u8 *data, int len) static int WriteRegs(struct tda_state *state, u8 SubAddr, u8 *Regs, u16 nRegs) { - u8 data[nRegs+1]; + u8 data[MAX_XFER_SIZE]; + + if (1 + nRegs > sizeof(data)) { + printk(KERN_WARNING + "%s: i2c wr: len=%d is too big!\n", + KBUILD_MODNAME, nRegs); + return -EINVAL; + } data[0] = SubAddr; memcpy(data + 1, Regs, nRegs); - return i2c_write(state->i2c, state->adr, data, nRegs+1); + return i2c_write(state->i2c, state->adr, data, nRegs + 1); } static int WriteReg(struct tda_state *state, u8 SubAddr, u8 Reg) diff --git a/drivers/media/dvb-frontends/tda8083.c b/drivers/media/dvb-frontends/tda8083.c index 9d08350fe4b..69e62f42e2e 100644 --- a/drivers/media/dvb-frontends/tda8083.c +++ b/drivers/media/dvb-frontends/tda8083.c @@ -189,7 +189,7 @@ static int tda8083_set_tone (struct tda8083_state* state, fe_sec_tone_mode_t ton return tda8083_writereg (state, 0x29, 0x80); default: return -EINVAL; - }; + } } static int tda8083_set_voltage (struct tda8083_state* state, fe_sec_voltage_t voltage) @@ -201,7 +201,7 @@ static int tda8083_set_voltage (struct tda8083_state* state, fe_sec_voltage_t vo return tda8083_writereg (state, 0x20, 0x11); default: return -EINVAL; - }; + } } static int tda8083_send_diseqc_burst (struct tda8083_state* state, fe_sec_mini_cmd_t burst) diff --git a/drivers/media/dvb-frontends/ts2020.c b/drivers/media/dvb-frontends/ts2020.c index ad7ad857ab2..9aba044dabe 100644 --- a/drivers/media/dvb-frontends/ts2020.c +++ b/drivers/media/dvb-frontends/ts2020.c @@ -31,6 +31,7 @@ struct ts2020_priv { struct i2c_adapter *i2c; u8 clk_out_div; u32 frequency; + u32 frequency_div; }; static int ts2020_release(struct dvb_frontend *fe) @@ -193,7 +194,7 @@ static int ts2020_set_params(struct dvb_frontend *fe) u8 lo = 0x01, div4 = 0x0; /* Calculate frequency divider */ - if (frequency < 1060000) { + if (frequency < priv->frequency_div) { lo |= 0x10; div4 = 0x1; ndiv = (frequency * 14 * 4) / TS2020_XTAL_FREQ; @@ -340,8 +341,12 @@ struct dvb_frontend *ts2020_attach(struct dvb_frontend *fe, priv->i2c_address = config->tuner_address; priv->i2c = i2c; priv->clk_out_div = config->clk_out_div; + priv->frequency_div = config->frequency_div; fe->tuner_priv = priv; + if (!priv->frequency_div) + priv->frequency_div = 1060000; + /* Wake Up the tuner */ if ((0x03 & ts2020_readreg(fe, 0x00)) == 0x00) { ts2020_writereg(fe, 0x00, 0x01); diff --git a/drivers/media/dvb-frontends/ts2020.h b/drivers/media/dvb-frontends/ts2020.h index 5bcb9a71ca8..b2fe6bb3a38 100644 --- a/drivers/media/dvb-frontends/ts2020.h +++ b/drivers/media/dvb-frontends/ts2020.h @@ -28,6 +28,7 @@ struct ts2020_config { u8 tuner_address; u8 clk_out_div; + u32 frequency_div; }; #if IS_ENABLED(CONFIG_DVB_TS2020) diff --git a/drivers/media/dvb-frontends/zl10039.c b/drivers/media/dvb-frontends/zl10039.c index eff9c5fde50..91b6b2e9b79 100644 --- a/drivers/media/dvb-frontends/zl10039.c +++ b/drivers/media/dvb-frontends/zl10039.c @@ -30,6 +30,9 @@ static int debug; +/* Max transfer size done by I2C transfer functions */ +#define MAX_XFER_SIZE 64 + #define dprintk(args...) \ do { \ if (debug) \ @@ -98,7 +101,7 @@ static int zl10039_write(struct zl10039_state *state, const enum zl10039_reg_addr reg, const u8 *src, const size_t count) { - u8 buf[count + 1]; + u8 buf[MAX_XFER_SIZE]; struct i2c_msg msg = { .addr = state->i2c_addr, .flags = 0, @@ -106,6 +109,13 @@ static int zl10039_write(struct zl10039_state *state, .len = count + 1, }; + if (1 + count > sizeof(buf)) { + printk(KERN_WARNING + "%s: i2c wr reg=%04x: len=%zd is too big!\n", + KBUILD_MODNAME, reg, count); + return -EINVAL; + } + dprintk("%s\n", __func__); /* Write register address and data in one go */ buf[0] = reg; diff --git a/drivers/media/i2c/Kconfig b/drivers/media/i2c/Kconfig index cbc9ee9bec2..842654d3331 100644 --- a/drivers/media/i2c/Kconfig +++ b/drivers/media/i2c/Kconfig @@ -621,6 +621,15 @@ config VIDEO_AS3645A This is a driver for the AS3645A and LM3555 flash controllers. It has build in control for flash, torch and indicator LEDs. +config VIDEO_LM3560 + tristate "LM3560 dual flash driver support" + depends on I2C && VIDEO_V4L2 && MEDIA_CONTROLLER + depends on MEDIA_CAMERA_SUPPORT + select REGMAP_I2C + ---help--- + This is a driver for the lm3560 dual flash controllers. It controls + flash, torch LEDs. + comment "Video improvement chips" config VIDEO_UPD64031A diff --git a/drivers/media/i2c/Makefile b/drivers/media/i2c/Makefile index 9f462df77b4..e03f1776f4f 100644 --- a/drivers/media/i2c/Makefile +++ b/drivers/media/i2c/Makefile @@ -70,6 +70,7 @@ obj-$(CONFIG_VIDEO_S5K4ECGX) += s5k4ecgx.o obj-$(CONFIG_VIDEO_S5C73M3) += s5c73m3/ obj-$(CONFIG_VIDEO_ADP1653) += adp1653.o obj-$(CONFIG_VIDEO_AS3645A) += as3645a.o +obj-$(CONFIG_VIDEO_LM3560) += lm3560.o obj-$(CONFIG_VIDEO_SMIAPP_PLL) += smiapp-pll.o obj-$(CONFIG_VIDEO_AK881X) += ak881x.o obj-$(CONFIG_VIDEO_IR_I2C) += ir-kbd-i2c.o diff --git a/drivers/media/i2c/adv7343.c b/drivers/media/i2c/adv7343.c index aeb56c53e39..d4e15a617c3 100644 --- a/drivers/media/i2c/adv7343.c +++ b/drivers/media/i2c/adv7343.c @@ -25,6 +25,7 @@ #include <linux/module.h> #include <linux/videodev2.h> #include <linux/uaccess.h> +#include <linux/of.h> #include <media/adv7343.h> #include <media/v4l2-async.h> diff --git a/drivers/media/i2c/lm3560.c b/drivers/media/i2c/lm3560.c new file mode 100644 index 00000000000..3317a9ae396 --- /dev/null +++ b/drivers/media/i2c/lm3560.c @@ -0,0 +1,488 @@ +/* + * drivers/media/i2c/lm3560.c + * General device driver for TI lm3560, FLASH LED Driver + * + * Copyright (C) 2013 Texas Instruments + * + * Contact: Daniel Jeong <gshark.jeong@gmail.com> + * Ldd-Mlp <ldd-mlp@list.ti.com> + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA + * 02110-1301 USA + * + */ + +#include <linux/delay.h> +#include <linux/module.h> +#include <linux/i2c.h> +#include <linux/slab.h> +#include <linux/mutex.h> +#include <linux/regmap.h> +#include <linux/videodev2.h> +#include <media/lm3560.h> +#include <media/v4l2-ctrls.h> +#include <media/v4l2-device.h> + +/* registers definitions */ +#define REG_ENABLE 0x10 +#define REG_TORCH_BR 0xa0 +#define REG_FLASH_BR 0xb0 +#define REG_FLASH_TOUT 0xc0 +#define REG_FLAG 0xd0 +#define REG_CONFIG1 0xe0 + +/* Fault Mask */ +#define FAULT_TIMEOUT (1<<0) +#define FAULT_OVERTEMP (1<<1) +#define FAULT_SHORT_CIRCUIT (1<<2) + +enum led_enable { + MODE_SHDN = 0x0, + MODE_TORCH = 0x2, + MODE_FLASH = 0x3, +}; + +/* struct lm3560_flash + * + * @pdata: platform data + * @regmap: reg. map for i2c + * @lock: muxtex for serial access. + * @led_mode: V4L2 LED mode + * @ctrls_led: V4L2 contols + * @subdev_led: V4L2 subdev + */ +struct lm3560_flash { + struct device *dev; + struct lm3560_platform_data *pdata; + struct regmap *regmap; + struct mutex lock; + + enum v4l2_flash_led_mode led_mode; + struct v4l2_ctrl_handler ctrls_led[LM3560_LED_MAX]; + struct v4l2_subdev subdev_led[LM3560_LED_MAX]; +}; + +#define to_lm3560_flash(_ctrl, _no) \ + container_of(_ctrl->handler, struct lm3560_flash, ctrls_led[_no]) + +/* enable mode control */ +static int lm3560_mode_ctrl(struct lm3560_flash *flash) +{ + int rval = -EINVAL; + + switch (flash->led_mode) { + case V4L2_FLASH_LED_MODE_NONE: + rval = regmap_update_bits(flash->regmap, + REG_ENABLE, 0x03, MODE_SHDN); + break; + case V4L2_FLASH_LED_MODE_TORCH: + rval = regmap_update_bits(flash->regmap, + REG_ENABLE, 0x03, MODE_TORCH); + break; + case V4L2_FLASH_LED_MODE_FLASH: + rval = regmap_update_bits(flash->regmap, + REG_ENABLE, 0x03, MODE_FLASH); + break; + } + return rval; +} + +/* led1/2 enable/disable */ +static int lm3560_enable_ctrl(struct lm3560_flash *flash, + enum lm3560_led_id led_no, bool on) +{ + int rval; + + if (led_no == LM3560_LED0) { + if (on == true) + rval = regmap_update_bits(flash->regmap, + REG_ENABLE, 0x08, 0x08); + else + rval = regmap_update_bits(flash->regmap, + REG_ENABLE, 0x08, 0x00); + } else { + if (on == true) + rval = regmap_update_bits(flash->regmap, + REG_ENABLE, 0x10, 0x10); + else + rval = regmap_update_bits(flash->regmap, + REG_ENABLE, 0x10, 0x00); + } + return rval; +} + +/* torch1/2 brightness control */ +static int lm3560_torch_brt_ctrl(struct lm3560_flash *flash, + enum lm3560_led_id led_no, unsigned int brt) +{ + int rval; + u8 br_bits; + + if (brt < LM3560_TORCH_BRT_MIN) + return lm3560_enable_ctrl(flash, led_no, false); + else + rval = lm3560_enable_ctrl(flash, led_no, true); + + br_bits = LM3560_TORCH_BRT_uA_TO_REG(brt); + if (led_no == LM3560_LED0) + rval = regmap_update_bits(flash->regmap, + REG_TORCH_BR, 0x07, br_bits); + else + rval = regmap_update_bits(flash->regmap, + REG_TORCH_BR, 0x38, br_bits << 3); + + return rval; +} + +/* flash1/2 brightness control */ +static int lm3560_flash_brt_ctrl(struct lm3560_flash *flash, + enum lm3560_led_id led_no, unsigned int brt) +{ + int rval; + u8 br_bits; + + if (brt < LM3560_FLASH_BRT_MIN) + return lm3560_enable_ctrl(flash, led_no, false); + else + rval = lm3560_enable_ctrl(flash, led_no, true); + + br_bits = LM3560_FLASH_BRT_uA_TO_REG(brt); + if (led_no == LM3560_LED0) + rval = regmap_update_bits(flash->regmap, + REG_FLASH_BR, 0x0f, br_bits); + else + rval = regmap_update_bits(flash->regmap, + REG_FLASH_BR, 0xf0, br_bits << 4); + + return rval; +} + +/* V4L2 controls */ +static int lm3560_get_ctrl(struct v4l2_ctrl *ctrl, enum lm3560_led_id led_no) +{ + struct lm3560_flash *flash = to_lm3560_flash(ctrl, led_no); + + mutex_lock(&flash->lock); + + if (ctrl->id == V4L2_CID_FLASH_FAULT) { + int rval; + s32 fault = 0; + unsigned int reg_val; + rval = regmap_read(flash->regmap, REG_FLAG, ®_val); + if (rval < 0) + return rval; + if (rval & FAULT_SHORT_CIRCUIT) + fault |= V4L2_FLASH_FAULT_SHORT_CIRCUIT; + if (rval & FAULT_OVERTEMP) + fault |= V4L2_FLASH_FAULT_OVER_TEMPERATURE; + if (rval & FAULT_TIMEOUT) + fault |= V4L2_FLASH_FAULT_TIMEOUT; + ctrl->cur.val = fault; + return 0; + } + + mutex_unlock(&flash->lock); + return -EINVAL; +} + +static int lm3560_set_ctrl(struct v4l2_ctrl *ctrl, enum lm3560_led_id led_no) +{ + struct lm3560_flash *flash = to_lm3560_flash(ctrl, led_no); + u8 tout_bits; + int rval = -EINVAL; + + mutex_lock(&flash->lock); + + switch (ctrl->id) { + case V4L2_CID_FLASH_LED_MODE: + flash->led_mode = ctrl->val; + if (flash->led_mode != V4L2_FLASH_LED_MODE_FLASH) + rval = lm3560_mode_ctrl(flash); + break; + + case V4L2_CID_FLASH_STROBE_SOURCE: + rval = regmap_update_bits(flash->regmap, + REG_CONFIG1, 0x04, (ctrl->val) << 2); + if (rval < 0) + goto err_out; + break; + + case V4L2_CID_FLASH_STROBE: + if (flash->led_mode != V4L2_FLASH_LED_MODE_FLASH) + return -EBUSY; + flash->led_mode = V4L2_FLASH_LED_MODE_FLASH; + rval = lm3560_mode_ctrl(flash); + break; + + case V4L2_CID_FLASH_STROBE_STOP: + if (flash->led_mode != V4L2_FLASH_LED_MODE_FLASH) + return -EBUSY; + flash->led_mode = V4L2_FLASH_LED_MODE_NONE; + rval = lm3560_mode_ctrl(flash); + break; + + case V4L2_CID_FLASH_TIMEOUT: + tout_bits = LM3560_FLASH_TOUT_ms_TO_REG(ctrl->val); + rval = regmap_update_bits(flash->regmap, + REG_FLASH_TOUT, 0x1f, tout_bits); + break; + + case V4L2_CID_FLASH_INTENSITY: + rval = lm3560_flash_brt_ctrl(flash, led_no, ctrl->val); + break; + + case V4L2_CID_FLASH_TORCH_INTENSITY: + rval = lm3560_torch_brt_ctrl(flash, led_no, ctrl->val); + break; + } + + mutex_unlock(&flash->lock); +err_out: + return rval; +} + +static int lm3560_led1_get_ctrl(struct v4l2_ctrl *ctrl) +{ + return lm3560_get_ctrl(ctrl, LM3560_LED1); +} + +static int lm3560_led1_set_ctrl(struct v4l2_ctrl *ctrl) +{ + return lm3560_set_ctrl(ctrl, LM3560_LED1); +} + +static int lm3560_led0_get_ctrl(struct v4l2_ctrl *ctrl) +{ + return lm3560_get_ctrl(ctrl, LM3560_LED0); +} + +static int lm3560_led0_set_ctrl(struct v4l2_ctrl *ctrl) +{ + return lm3560_set_ctrl(ctrl, LM3560_LED0); +} + +static const struct v4l2_ctrl_ops lm3560_led_ctrl_ops[LM3560_LED_MAX] = { + [LM3560_LED0] = { + .g_volatile_ctrl = lm3560_led0_get_ctrl, + .s_ctrl = lm3560_led0_set_ctrl, + }, + [LM3560_LED1] = { + .g_volatile_ctrl = lm3560_led1_get_ctrl, + .s_ctrl = lm3560_led1_set_ctrl, + } +}; + +static int lm3560_init_controls(struct lm3560_flash *flash, + enum lm3560_led_id led_no) +{ + struct v4l2_ctrl *fault; + u32 max_flash_brt = flash->pdata->max_flash_brt[led_no]; + u32 max_torch_brt = flash->pdata->max_torch_brt[led_no]; + struct v4l2_ctrl_handler *hdl = &flash->ctrls_led[led_no]; + const struct v4l2_ctrl_ops *ops = &lm3560_led_ctrl_ops[led_no]; + + v4l2_ctrl_handler_init(hdl, 8); + /* flash mode */ + v4l2_ctrl_new_std_menu(hdl, ops, V4L2_CID_FLASH_LED_MODE, + V4L2_FLASH_LED_MODE_TORCH, ~0x7, + V4L2_FLASH_LED_MODE_NONE); + flash->led_mode = V4L2_FLASH_LED_MODE_NONE; + + /* flash source */ + v4l2_ctrl_new_std_menu(hdl, ops, V4L2_CID_FLASH_STROBE_SOURCE, + 0x1, ~0x3, V4L2_FLASH_STROBE_SOURCE_SOFTWARE); + + /* flash strobe */ + v4l2_ctrl_new_std(hdl, ops, V4L2_CID_FLASH_STROBE, 0, 0, 0, 0); + /* flash strobe stop */ + v4l2_ctrl_new_std(hdl, ops, V4L2_CID_FLASH_STROBE_STOP, 0, 0, 0, 0); + + /* flash strobe timeout */ + v4l2_ctrl_new_std(hdl, ops, V4L2_CID_FLASH_TIMEOUT, + LM3560_FLASH_TOUT_MIN, + flash->pdata->max_flash_timeout, + LM3560_FLASH_TOUT_STEP, + flash->pdata->max_flash_timeout); + + /* flash brt */ + v4l2_ctrl_new_std(hdl, ops, V4L2_CID_FLASH_INTENSITY, + LM3560_FLASH_BRT_MIN, max_flash_brt, + LM3560_FLASH_BRT_STEP, max_flash_brt); + + /* torch brt */ + v4l2_ctrl_new_std(hdl, ops, V4L2_CID_FLASH_TORCH_INTENSITY, + LM3560_TORCH_BRT_MIN, max_torch_brt, + LM3560_TORCH_BRT_STEP, max_torch_brt); + + /* fault */ + fault = v4l2_ctrl_new_std(hdl, ops, V4L2_CID_FLASH_FAULT, 0, + V4L2_FLASH_FAULT_OVER_VOLTAGE + | V4L2_FLASH_FAULT_OVER_TEMPERATURE + | V4L2_FLASH_FAULT_SHORT_CIRCUIT + | V4L2_FLASH_FAULT_TIMEOUT, 0, 0); + if (fault != NULL) + fault->flags |= V4L2_CTRL_FLAG_VOLATILE; + + if (hdl->error) + return hdl->error; + + flash->subdev_led[led_no].ctrl_handler = hdl; + return 0; +} + +/* initialize device */ +static const struct v4l2_subdev_ops lm3560_ops = { + .core = NULL, +}; + +static const struct regmap_config lm3560_regmap = { + .reg_bits = 8, + .val_bits = 8, + .max_register = 0xFF, +}; + +static int lm3560_subdev_init(struct lm3560_flash *flash, + enum lm3560_led_id led_no, char *led_name) +{ + struct i2c_client *client = to_i2c_client(flash->dev); + int rval; + + v4l2_i2c_subdev_init(&flash->subdev_led[led_no], client, &lm3560_ops); + flash->subdev_led[led_no].flags |= V4L2_SUBDEV_FL_HAS_DEVNODE; + strcpy(flash->subdev_led[led_no].name, led_name); + rval = lm3560_init_controls(flash, led_no); + if (rval) + goto err_out; + rval = media_entity_init(&flash->subdev_led[led_no].entity, 0, NULL, 0); + if (rval < 0) + goto err_out; + flash->subdev_led[led_no].entity.type = MEDIA_ENT_T_V4L2_SUBDEV_FLASH; + + return rval; + +err_out: + v4l2_ctrl_handler_free(&flash->ctrls_led[led_no]); + return rval; +} + +static int lm3560_init_device(struct lm3560_flash *flash) +{ + int rval; + unsigned int reg_val; + + /* set peak current */ + rval = regmap_update_bits(flash->regmap, + REG_FLASH_TOUT, 0x60, flash->pdata->peak); + if (rval < 0) + return rval; + /* output disable */ + flash->led_mode = V4L2_FLASH_LED_MODE_NONE; + rval = lm3560_mode_ctrl(flash); + if (rval < 0) + return rval; + /* Reset faults */ + rval = regmap_read(flash->regmap, REG_FLAG, ®_val); + return rval; +} + +static int lm3560_probe(struct i2c_client *client, + const struct i2c_device_id *devid) +{ + struct lm3560_flash *flash; + struct lm3560_platform_data *pdata = dev_get_platdata(&client->dev); + int rval; + + flash = devm_kzalloc(&client->dev, sizeof(*flash), GFP_KERNEL); + if (flash == NULL) + return -ENOMEM; + + flash->regmap = devm_regmap_init_i2c(client, &lm3560_regmap); + if (IS_ERR(flash->regmap)) { + rval = PTR_ERR(flash->regmap); + return rval; + } + + /* if there is no platform data, use chip default value */ + if (pdata == NULL) { + pdata = + kzalloc(sizeof(struct lm3560_platform_data), GFP_KERNEL); + if (pdata == NULL) + return -ENODEV; + pdata->peak = LM3560_PEAK_3600mA; + pdata->max_flash_timeout = LM3560_FLASH_TOUT_MAX; + /* led 1 */ + pdata->max_flash_brt[LM3560_LED0] = LM3560_FLASH_BRT_MAX; + pdata->max_torch_brt[LM3560_LED0] = LM3560_TORCH_BRT_MAX; + /* led 2 */ + pdata->max_flash_brt[LM3560_LED1] = LM3560_FLASH_BRT_MAX; + pdata->max_torch_brt[LM3560_LED1] = LM3560_TORCH_BRT_MAX; + } + flash->pdata = pdata; + flash->dev = &client->dev; + mutex_init(&flash->lock); + + rval = lm3560_subdev_init(flash, LM3560_LED0, "lm3560-led0"); + if (rval < 0) + return rval; + + rval = lm3560_subdev_init(flash, LM3560_LED1, "lm3560-led1"); + if (rval < 0) + return rval; + + rval = lm3560_init_device(flash); + if (rval < 0) + return rval; + + return 0; +} + +static int lm3560_remove(struct i2c_client *client) +{ + struct v4l2_subdev *subdev = i2c_get_clientdata(client); + struct lm3560_flash *flash = container_of(subdev, struct lm3560_flash, + subdev_led[LM3560_LED_MAX]); + unsigned int i; + + for (i = LM3560_LED0; i < LM3560_LED_MAX; i++) { + v4l2_device_unregister_subdev(&flash->subdev_led[i]); + v4l2_ctrl_handler_free(&flash->ctrls_led[i]); + media_entity_cleanup(&flash->subdev_led[i].entity); + } + + return 0; +} + +static const struct i2c_device_id lm3560_id_table[] = { + {LM3560_NAME, 0}, + {} +}; + +MODULE_DEVICE_TABLE(i2c, lm3560_id_table); + +static struct i2c_driver lm3560_i2c_driver = { + .driver = { + .name = LM3560_NAME, + .pm = NULL, + }, + .probe = lm3560_probe, + .remove = lm3560_remove, + .id_table = lm3560_id_table, +}; + +module_i2c_driver(lm3560_i2c_driver); + +MODULE_AUTHOR("Daniel Jeong <gshark.jeong@gmail.com>"); +MODULE_AUTHOR("Ldd Mlp <ldd-mlp@list.ti.com>"); +MODULE_DESCRIPTION("Texas Instruments LM3560 LED flash driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/media/i2c/s5c73m3/s5c73m3-core.c b/drivers/media/i2c/s5c73m3/s5c73m3-core.c index 31f40b34204..6fec9384d86 100644 --- a/drivers/media/i2c/s5c73m3/s5c73m3-core.c +++ b/drivers/media/i2c/s5c73m3/s5c73m3-core.c @@ -1581,7 +1581,7 @@ static int s5c73m3_probe(struct i2c_client *client, oif_sd = &state->oif_sd; v4l2_subdev_init(sd, &s5c73m3_subdev_ops); - sd->owner = client->driver->driver.owner; + sd->owner = client->dev.driver->owner; v4l2_set_subdevdata(sd, state); strlcpy(sd->name, "S5C73M3", sizeof(sd->name)); diff --git a/drivers/media/i2c/soc_camera/imx074.c b/drivers/media/i2c/soc_camera/imx074.c index 1d384a371b4..5b915936c3f 100644 --- a/drivers/media/i2c/soc_camera/imx074.c +++ b/drivers/media/i2c/soc_camera/imx074.c @@ -451,7 +451,9 @@ static int imx074_probe(struct i2c_client *client, if (ret < 0) goto eprobe; - return v4l2_async_register_subdev(&priv->subdev); + ret = v4l2_async_register_subdev(&priv->subdev); + if (!ret) + return 0; epwrinit: eprobe: diff --git a/drivers/media/i2c/soc_camera/ov9640.c b/drivers/media/i2c/soc_camera/ov9640.c index e968c3fdbd9..bc74224503e 100644 --- a/drivers/media/i2c/soc_camera/ov9640.c +++ b/drivers/media/i2c/soc_camera/ov9640.c @@ -371,7 +371,7 @@ static void ov9640_alter_regs(enum v4l2_mbus_pixelcode code, alt->com13 = OV9640_COM13_RGB_AVG; alt->com15 = OV9640_COM15_RGB_565; break; - }; + } } /* Setup registers according to resolution and color encoding */ diff --git a/drivers/media/i2c/ths8200.c b/drivers/media/i2c/ths8200.c index d9f65d7e3e5..04139eec8c4 100644 --- a/drivers/media/i2c/ths8200.c +++ b/drivers/media/i2c/ths8200.c @@ -19,6 +19,7 @@ #include <linux/i2c.h> #include <linux/module.h> +#include <linux/of.h> #include <linux/v4l2-dv-timings.h> #include <media/v4l2-dv-timings.h> diff --git a/drivers/media/i2c/tvp514x.c b/drivers/media/i2c/tvp514x.c index 91f3dd4cda1..83d85df4853 100644 --- a/drivers/media/i2c/tvp514x.c +++ b/drivers/media/i2c/tvp514x.c @@ -35,6 +35,7 @@ #include <linux/videodev2.h> #include <linux/module.h> #include <linux/v4l2-mediabus.h> +#include <linux/of.h> #include <media/v4l2-async.h> #include <media/v4l2-device.h> diff --git a/drivers/media/i2c/tvp7002.c b/drivers/media/i2c/tvp7002.c index 24a08fa7e32..912e1cccdd1 100644 --- a/drivers/media/i2c/tvp7002.c +++ b/drivers/media/i2c/tvp7002.c @@ -29,6 +29,7 @@ #include <linux/slab.h> #include <linux/videodev2.h> #include <linux/module.h> +#include <linux/of.h> #include <linux/v4l2-dv-timings.h> #include <media/tvp7002.h> #include <media/v4l2-async.h> diff --git a/drivers/media/pci/b2c2/flexcop-pci.c b/drivers/media/pci/b2c2/flexcop-pci.c index 447afbd904a..8b5e0b3a92a 100644 --- a/drivers/media/pci/b2c2/flexcop-pci.c +++ b/drivers/media/pci/b2c2/flexcop-pci.c @@ -319,7 +319,6 @@ static int flexcop_pci_init(struct flexcop_pci *fc_pci) err_pci_iounmap: pci_iounmap(fc_pci->pdev, fc_pci->io_mem); - pci_set_drvdata(fc_pci->pdev, NULL); err_pci_release_regions: pci_release_regions(fc_pci->pdev); err_pci_disable_device: @@ -332,7 +331,6 @@ static void flexcop_pci_exit(struct flexcop_pci *fc_pci) if (fc_pci->init_state & FC_PCI_INIT) { free_irq(fc_pci->pdev->irq, fc_pci); pci_iounmap(fc_pci->pdev, fc_pci->io_mem); - pci_set_drvdata(fc_pci->pdev, NULL); pci_release_regions(fc_pci->pdev); pci_disable_device(fc_pci->pdev); } diff --git a/drivers/media/pci/bt8xx/bt878.c b/drivers/media/pci/bt8xx/bt878.c index 66eb0baab0e..d0c281f41a0 100644 --- a/drivers/media/pci/bt8xx/bt878.c +++ b/drivers/media/pci/bt8xx/bt878.c @@ -488,8 +488,7 @@ static int bt878_probe(struct pci_dev *dev, const struct pci_device_id *pci_id) btwrite(0, BT848_INT_MASK); result = request_irq(bt->irq, bt878_irq, - IRQF_SHARED | IRQF_DISABLED, "bt878", - (void *) bt); + IRQF_SHARED, "bt878", (void *) bt); if (result == -EINVAL) { printk(KERN_ERR "bt878(%d): Bad irq number or handler\n", bt878_num); @@ -563,7 +562,6 @@ static void bt878_remove(struct pci_dev *pci_dev) bt->shutdown = 1; bt878_mem_free(bt); - pci_set_drvdata(pci_dev, NULL); pci_disable_device(pci_dev); return; } diff --git a/drivers/media/pci/bt8xx/bttv-driver.c b/drivers/media/pci/bt8xx/bttv-driver.c index c6532de0eac..a3b1ee9c00d 100644 --- a/drivers/media/pci/bt8xx/bttv-driver.c +++ b/drivers/media/pci/bt8xx/bttv-driver.c @@ -4086,7 +4086,7 @@ static int bttv_probe(struct pci_dev *dev, const struct pci_device_id *pci_id) /* disable irqs, register irq handler */ btwrite(0, BT848_INT_MASK); result = request_irq(btv->c.pci->irq, bttv_irq, - IRQF_SHARED | IRQF_DISABLED, btv->c.v4l2_dev.name, (void *)btv); + IRQF_SHARED, btv->c.v4l2_dev.name, (void *)btv); if (result < 0) { pr_err("%d: can't get IRQ %d\n", bttv_num, btv->c.pci->irq); diff --git a/drivers/media/pci/cx18/cx18-driver.c b/drivers/media/pci/cx18/cx18-driver.c index 004d8ace501..c1f8cc6f14b 100644 --- a/drivers/media/pci/cx18/cx18-driver.c +++ b/drivers/media/pci/cx18/cx18-driver.c @@ -324,23 +324,24 @@ static void cx18_eeprom_dump(struct cx18 *cx, unsigned char *eedata, int len) /* Hauppauge card? get values from tveeprom */ void cx18_read_eeprom(struct cx18 *cx, struct tveeprom *tv) { - struct i2c_client c; + struct i2c_client *c; u8 eedata[256]; - memset(&c, 0, sizeof(c)); - strlcpy(c.name, "cx18 tveeprom tmp", sizeof(c.name)); - c.adapter = &cx->i2c_adap[0]; - c.addr = 0xA0 >> 1; + c = kzalloc(sizeof(*c), GFP_KERNEL); + + strlcpy(c->name, "cx18 tveeprom tmp", sizeof(c->name)); + c->adapter = &cx->i2c_adap[0]; + c->addr = 0xa0 >> 1; memset(tv, 0, sizeof(*tv)); - if (tveeprom_read(&c, eedata, sizeof(eedata))) - return; + if (tveeprom_read(c, eedata, sizeof(eedata))) + goto ret; switch (cx->card->type) { case CX18_CARD_HVR_1600_ESMT: case CX18_CARD_HVR_1600_SAMSUNG: case CX18_CARD_HVR_1600_S5H1411: - tveeprom_hauppauge_analog(&c, tv, eedata); + tveeprom_hauppauge_analog(c, tv, eedata); break; case CX18_CARD_YUAN_MPC718: case CX18_CARD_GOTVIEW_PCI_DVD3: @@ -354,6 +355,9 @@ void cx18_read_eeprom(struct cx18 *cx, struct tveeprom *tv) cx18_eeprom_dump(cx, eedata, sizeof(eedata)); break; } + +ret: + kfree(c); } static void cx18_process_eeprom(struct cx18 *cx) @@ -1031,8 +1035,7 @@ static int cx18_probe(struct pci_dev *pci_dev, /* Register IRQ */ retval = request_irq(cx->pci_dev->irq, cx18_irq_handler, - IRQF_SHARED | IRQF_DISABLED, - cx->v4l2_dev.name, (void *)cx); + IRQF_SHARED, cx->v4l2_dev.name, (void *)cx); if (retval) { CX18_ERR("Failed to register irq %d\n", retval); goto free_i2c; diff --git a/drivers/media/pci/cx23885/Kconfig b/drivers/media/pci/cx23885/Kconfig index 5104c802f72..d1dcb1d2e08 100644 --- a/drivers/media/pci/cx23885/Kconfig +++ b/drivers/media/pci/cx23885/Kconfig @@ -23,6 +23,7 @@ config VIDEO_CX23885 select DVB_STB6100 if MEDIA_SUBDRV_AUTOSELECT select DVB_STV6110 if MEDIA_SUBDRV_AUTOSELECT select DVB_CX24116 if MEDIA_SUBDRV_AUTOSELECT + select DVB_CX24117 if MEDIA_SUBDRV_AUTOSELECT select DVB_STV0900 if MEDIA_SUBDRV_AUTOSELECT select DVB_DS3000 if MEDIA_SUBDRV_AUTOSELECT select DVB_TS2020 if MEDIA_SUBDRV_AUTOSELECT diff --git a/drivers/media/pci/cx23885/cimax2.c b/drivers/media/pci/cx23885/cimax2.c index 7344849183a..16fa7ea4d4a 100644 --- a/drivers/media/pci/cx23885/cimax2.c +++ b/drivers/media/pci/cx23885/cimax2.c @@ -26,6 +26,10 @@ #include "cx23885.h" #include "cimax2.h" #include "dvb_ca_en50221.h" + +/* Max transfer size done by I2C transfer functions */ +#define MAX_XFER_SIZE 64 + /**** Bit definitions for MC417_RWD and MC417_OEN registers *** bits 31-16 +-----------+ @@ -125,7 +129,7 @@ static int netup_write_i2c(struct i2c_adapter *i2c_adap, u8 addr, u8 reg, u8 *buf, int len) { int ret; - u8 buffer[len + 1]; + u8 buffer[MAX_XFER_SIZE]; struct i2c_msg msg = { .addr = addr, @@ -134,6 +138,13 @@ static int netup_write_i2c(struct i2c_adapter *i2c_adap, u8 addr, u8 reg, .len = len + 1 }; + if (1 + len > sizeof(buffer)) { + printk(KERN_WARNING + "%s: i2c wr reg=%04x: len=%d is too big!\n", + KBUILD_MODNAME, reg, len); + return -EINVAL; + } + buffer[0] = reg; memcpy(&buffer[1], buf, len); diff --git a/drivers/media/pci/cx23885/cx23885-cards.c b/drivers/media/pci/cx23885/cx23885-cards.c index 6a71a965e75..79f20c8c842 100644 --- a/drivers/media/pci/cx23885/cx23885-cards.c +++ b/drivers/media/pci/cx23885/cx23885-cards.c @@ -223,6 +223,39 @@ struct cx23885_board cx23885_boards[] = { .name = "Leadtek Winfast PxDVR3200 H", .portc = CX23885_MPEG_DVB, }, + [CX23885_BOARD_LEADTEK_WINFAST_PXPVR2200] = { + .name = "Leadtek Winfast PxPVR2200", + .porta = CX23885_ANALOG_VIDEO, + .tuner_type = TUNER_XC2028, + .tuner_addr = 0x61, + .tuner_bus = 1, + .input = {{ + .type = CX23885_VMUX_TELEVISION, + .vmux = CX25840_VIN2_CH1 | + CX25840_VIN5_CH2, + .amux = CX25840_AUDIO8, + .gpio0 = 0x704040, + }, { + .type = CX23885_VMUX_COMPOSITE1, + .vmux = CX25840_COMPOSITE1, + .amux = CX25840_AUDIO7, + .gpio0 = 0x704040, + }, { + .type = CX23885_VMUX_SVIDEO, + .vmux = CX25840_SVIDEO_LUMA3 | + CX25840_SVIDEO_CHROMA4, + .amux = CX25840_AUDIO7, + .gpio0 = 0x704040, + }, { + .type = CX23885_VMUX_COMPONENT, + .vmux = CX25840_VIN7_CH1 | + CX25840_VIN6_CH2 | + CX25840_VIN8_CH3 | + CX25840_COMPONENT_ON, + .amux = CX25840_AUDIO7, + .gpio0 = 0x704040, + } }, + }, [CX23885_BOARD_LEADTEK_WINFAST_PXDVR3200_H_XC4000] = { .name = "Leadtek Winfast PxDVR3200 H XC4000", .porta = CX23885_ANALOG_VIDEO, @@ -259,6 +292,16 @@ struct cx23885_board cx23885_boards[] = { .name = "TurboSight TBS 6920", .portb = CX23885_MPEG_DVB, }, + [CX23885_BOARD_TBS_6980] = { + .name = "TurboSight TBS 6980", + .portb = CX23885_MPEG_DVB, + .portc = CX23885_MPEG_DVB, + }, + [CX23885_BOARD_TBS_6981] = { + .name = "TurboSight TBS 6981", + .portb = CX23885_MPEG_DVB, + .portc = CX23885_MPEG_DVB, + }, [CX23885_BOARD_TEVII_S470] = { .name = "TeVii S470", .portb = CX23885_MPEG_DVB, @@ -688,6 +731,10 @@ struct cx23885_subid cx23885_subids[] = { .card = CX23885_BOARD_LEADTEK_WINFAST_PXDVR3200_H, }, { .subvendor = 0x107d, + .subdevice = 0x6f21, + .card = CX23885_BOARD_LEADTEK_WINFAST_PXPVR2200, + }, { + .subvendor = 0x107d, .subdevice = 0x6f39, .card = CX23885_BOARD_LEADTEK_WINFAST_PXDVR3200_H_XC4000, }, { @@ -699,6 +746,14 @@ struct cx23885_subid cx23885_subids[] = { .subdevice = 0x8888, .card = CX23885_BOARD_TBS_6920, }, { + .subvendor = 0x6980, + .subdevice = 0x8888, + .card = CX23885_BOARD_TBS_6980, + }, { + .subvendor = 0x6981, + .subdevice = 0x8888, + .card = CX23885_BOARD_TBS_6981, + }, { .subvendor = 0xd470, .subdevice = 0x9022, .card = CX23885_BOARD_TEVII_S470, @@ -1023,6 +1078,35 @@ static void hauppauge_eeprom(struct cx23885_dev *dev, u8 *eeprom_data) dev->name, tv.model); } +/* Some TBS cards require initing a chip using a bitbanged SPI attached + to the cx23885 gpio's. If this chip doesn't get init'ed the demod + doesn't respond to any command. */ +static void tbs_card_init(struct cx23885_dev *dev) +{ + int i; + const u8 buf[] = { + 0xe0, 0x06, 0x66, 0x33, 0x65, + 0x01, 0x17, 0x06, 0xde}; + + switch (dev->board) { + case CX23885_BOARD_TBS_6980: + case CX23885_BOARD_TBS_6981: + cx_set(GP0_IO, 0x00070007); + usleep_range(1000, 10000); + cx_clear(GP0_IO, 2); + usleep_range(1000, 10000); + for (i = 0; i < 9 * 8; i++) { + cx_clear(GP0_IO, 7); + usleep_range(1000, 10000); + cx_set(GP0_IO, + ((buf[i >> 3] >> (7 - (i & 7))) & 1) | 4); + usleep_range(1000, 10000); + } + cx_set(GP0_IO, 7); + break; + } +} + int cx23885_tuner_callback(void *priv, int component, int command, int arg) { struct cx23885_tsport *port = priv; @@ -1043,6 +1127,7 @@ int cx23885_tuner_callback(void *priv, int component, int command, int arg) case CX23885_BOARD_HAUPPAUGE_HVR1500: case CX23885_BOARD_HAUPPAUGE_HVR1500Q: case CX23885_BOARD_LEADTEK_WINFAST_PXDVR3200_H: + case CX23885_BOARD_LEADTEK_WINFAST_PXPVR2200: case CX23885_BOARD_LEADTEK_WINFAST_PXDVR3200_H_XC4000: case CX23885_BOARD_COMPRO_VIDEOMATE_E650F: case CX23885_BOARD_COMPRO_VIDEOMATE_E800: @@ -1208,6 +1293,7 @@ void cx23885_gpio_setup(struct cx23885_dev *dev) cx_set(GP0_IO, 0x000f000f); break; case CX23885_BOARD_LEADTEK_WINFAST_PXDVR3200_H: + case CX23885_BOARD_LEADTEK_WINFAST_PXPVR2200: case CX23885_BOARD_LEADTEK_WINFAST_PXDVR3200_H_XC4000: case CX23885_BOARD_COMPRO_VIDEOMATE_E650F: case CX23885_BOARD_COMPRO_VIDEOMATE_E800: @@ -1225,6 +1311,8 @@ void cx23885_gpio_setup(struct cx23885_dev *dev) cx_set(GP0_IO, 0x00040004); break; case CX23885_BOARD_TBS_6920: + case CX23885_BOARD_TBS_6980: + case CX23885_BOARD_TBS_6981: case CX23885_BOARD_PROF_8000: cx_write(MC417_CTL, 0x00000036); cx_write(MC417_OEN, 0x00001000); @@ -1473,6 +1561,8 @@ int cx23885_ir_init(struct cx23885_dev *dev) case CX23885_BOARD_TERRATEC_CINERGY_T_PCIE_DUAL: case CX23885_BOARD_TEVII_S470: case CX23885_BOARD_MYGICA_X8507: + case CX23885_BOARD_TBS_6980: + case CX23885_BOARD_TBS_6981: if (!enable_885_ir) break; dev->sd_ir = cx23885_find_hw(dev, CX23885_HW_AV_CORE); @@ -1516,6 +1606,8 @@ void cx23885_ir_fini(struct cx23885_dev *dev) case CX23885_BOARD_TEVII_S470: case CX23885_BOARD_HAUPPAUGE_HVR1250: case CX23885_BOARD_MYGICA_X8507: + case CX23885_BOARD_TBS_6980: + case CX23885_BOARD_TBS_6981: cx23885_irq_remove(dev, PCI_MSK_AV_CORE); /* sd_ir is a duplicate pointer to the AV Core, just clear it */ dev->sd_ir = NULL; @@ -1561,6 +1653,8 @@ void cx23885_ir_pci_int_enable(struct cx23885_dev *dev) case CX23885_BOARD_TEVII_S470: case CX23885_BOARD_HAUPPAUGE_HVR1250: case CX23885_BOARD_MYGICA_X8507: + case CX23885_BOARD_TBS_6980: + case CX23885_BOARD_TBS_6981: if (dev->sd_ir) cx23885_irq_add_enable(dev, PCI_MSK_AV_CORE); break; @@ -1676,6 +1770,16 @@ void cx23885_card_setup(struct cx23885_dev *dev) ts2->ts_clk_en_val = 0x1; /* Enable TS_CLK */ ts2->src_sel_val = CX23885_SRC_SEL_PARALLEL_MPEG_VIDEO; break; + case CX23885_BOARD_TBS_6980: + case CX23885_BOARD_TBS_6981: + ts1->gen_ctrl_val = 0xc; /* Serial bus + punctured clock */ + ts1->ts_clk_en_val = 0x1; /* Enable TS_CLK */ + ts1->src_sel_val = CX23885_SRC_SEL_PARALLEL_MPEG_VIDEO; + ts2->gen_ctrl_val = 0xc; /* Serial bus + punctured clock */ + ts2->ts_clk_en_val = 0x1; /* Enable TS_CLK */ + ts2->src_sel_val = CX23885_SRC_SEL_PARALLEL_MPEG_VIDEO; + tbs_card_init(dev); + break; case CX23885_BOARD_MYGICA_X8506: case CX23885_BOARD_MAGICPRO_PROHDTVE2: case CX23885_BOARD_MYGICA_X8507: @@ -1704,6 +1808,7 @@ void cx23885_card_setup(struct cx23885_dev *dev) case CX23885_BOARD_HAUPPAUGE_HVR1700: case CX23885_BOARD_HAUPPAUGE_HVR1400: case CX23885_BOARD_LEADTEK_WINFAST_PXDVR3200_H: + case CX23885_BOARD_LEADTEK_WINFAST_PXPVR2200: case CX23885_BOARD_LEADTEK_WINFAST_PXDVR3200_H_XC4000: case CX23885_BOARD_COMPRO_VIDEOMATE_E650F: case CX23885_BOARD_HAUPPAUGE_HVR1270: @@ -1733,6 +1838,7 @@ void cx23885_card_setup(struct cx23885_dev *dev) case CX23885_BOARD_HAUPPAUGE_HVR1800lp: case CX23885_BOARD_HAUPPAUGE_HVR1700: case CX23885_BOARD_LEADTEK_WINFAST_PXDVR3200_H: + case CX23885_BOARD_LEADTEK_WINFAST_PXPVR2200: case CX23885_BOARD_LEADTEK_WINFAST_PXDVR3200_H_XC4000: case CX23885_BOARD_COMPRO_VIDEOMATE_E650F: case CX23885_BOARD_NETUP_DUAL_DVBS2_CI: @@ -1752,6 +1858,8 @@ void cx23885_card_setup(struct cx23885_dev *dev) case CX23885_BOARD_MYGICA_X8507: case CX23885_BOARD_TERRATEC_CINERGY_T_PCIE_DUAL: case CX23885_BOARD_AVERMEDIA_HC81R: + case CX23885_BOARD_TBS_6980: + case CX23885_BOARD_TBS_6981: dev->sd_cx25840 = v4l2_i2c_new_subdev(&dev->v4l2_dev, &dev->i2c_bus[2].i2c_adap, "cx25840", 0x88 >> 1, NULL); diff --git a/drivers/media/pci/cx23885/cx23885-core.c b/drivers/media/pci/cx23885/cx23885-core.c index 9f63d93239e..edcd79db1e4 100644 --- a/drivers/media/pci/cx23885/cx23885-core.c +++ b/drivers/media/pci/cx23885/cx23885-core.c @@ -2129,7 +2129,7 @@ static int cx23885_initdev(struct pci_dev *pci_dev, } err = request_irq(pci_dev->irq, cx23885_irq, - IRQF_SHARED | IRQF_DISABLED, dev->name, dev); + IRQF_SHARED, dev->name, dev); if (err < 0) { printk(KERN_ERR "%s: can't get IRQ %d\n", dev->name, pci_dev->irq); diff --git a/drivers/media/pci/cx23885/cx23885-dvb.c b/drivers/media/pci/cx23885/cx23885-dvb.c index 971e4ff1b87..05492053b47 100644 --- a/drivers/media/pci/cx23885/cx23885-dvb.c +++ b/drivers/media/pci/cx23885/cx23885-dvb.c @@ -51,6 +51,7 @@ #include "stv6110.h" #include "lnbh24.h" #include "cx24116.h" +#include "cx24117.h" #include "cimax2.h" #include "lgs8gxx.h" #include "netup-eeprom.h" @@ -461,6 +462,10 @@ static struct cx24116_config tbs_cx24116_config = { .demod_address = 0x55, }; +static struct cx24117_config tbs_cx24117_config = { + .demod_address = 0x55, +}; + static struct ds3000_config tevii_ds3000_config = { .demod_address = 0x68, }; @@ -1044,6 +1049,25 @@ static int dvb_register(struct cx23885_tsport *port) fe0->dvb.frontend->ops.set_voltage = f300_set_voltage; break; + case CX23885_BOARD_TBS_6980: + case CX23885_BOARD_TBS_6981: + i2c_bus = &dev->i2c_bus[1]; + + switch (port->nr) { + /* PORT B */ + case 1: + fe0->dvb.frontend = dvb_attach(cx24117_attach, + &tbs_cx24117_config, + &i2c_bus->i2c_adap); + break; + /* PORT C */ + case 2: + fe0->dvb.frontend = dvb_attach(cx24117_attach, + &tbs_cx24117_config, + &i2c_bus->i2c_adap); + break; + } + break; case CX23885_BOARD_TEVII_S470: i2c_bus = &dev->i2c_bus[1]; diff --git a/drivers/media/pci/cx23885/cx23885-input.c b/drivers/media/pci/cx23885/cx23885-input.c index 7875dfbe09f..8a49e7c9edd 100644 --- a/drivers/media/pci/cx23885/cx23885-input.c +++ b/drivers/media/pci/cx23885/cx23885-input.c @@ -90,6 +90,8 @@ void cx23885_input_rx_work_handler(struct cx23885_dev *dev, u32 events) case CX23885_BOARD_TEVII_S470: case CX23885_BOARD_HAUPPAUGE_HVR1250: case CX23885_BOARD_MYGICA_X8507: + case CX23885_BOARD_TBS_6980: + case CX23885_BOARD_TBS_6981: /* * The only boards we handle right now. However other boards * using the CX2388x integrated IR controller should be similar @@ -168,6 +170,8 @@ static int cx23885_input_ir_start(struct cx23885_dev *dev) break; case CX23885_BOARD_TERRATEC_CINERGY_T_PCIE_DUAL: case CX23885_BOARD_TEVII_S470: + case CX23885_BOARD_TBS_6980: + case CX23885_BOARD_TBS_6981: /* * The IR controller on this board only returns pulse widths. * Any other mode setting will fail to set up the device. @@ -298,6 +302,14 @@ int cx23885_input_init(struct cx23885_dev *dev) /* A guess at the remote */ rc_map = RC_MAP_TOTAL_MEDIA_IN_HAND_02; break; + case CX23885_BOARD_TBS_6980: + case CX23885_BOARD_TBS_6981: + /* Integrated CX23885 IR controller */ + driver_type = RC_DRIVER_IR_RAW; + allowed_protos = RC_BIT_ALL; + /* A guess at the remote */ + rc_map = RC_MAP_TBS_NEC; + break; default: return -ENODEV; } diff --git a/drivers/media/pci/cx23885/cx23885-video.c b/drivers/media/pci/cx23885/cx23885-video.c index 161686832b2..7891f34157d 100644 --- a/drivers/media/pci/cx23885/cx23885-video.c +++ b/drivers/media/pci/cx23885/cx23885-video.c @@ -1865,7 +1865,8 @@ int cx23885_video_register(struct cx23885_dev *dev) v4l2_subdev_call(sd, tuner, s_type_addr, &tun_setup); - if (dev->board == CX23885_BOARD_LEADTEK_WINFAST_PXTV1200) { + if ((dev->board == CX23885_BOARD_LEADTEK_WINFAST_PXTV1200) || + (dev->board == CX23885_BOARD_LEADTEK_WINFAST_PXPVR2200)) { struct xc2028_ctrl ctrl = { .fname = XC2028_DEFAULT_FIRMWARE, .max_len = 64 diff --git a/drivers/media/pci/cx23885/cx23885.h b/drivers/media/pci/cx23885/cx23885.h index 038caf53908..0fa4048ab87 100644 --- a/drivers/media/pci/cx23885/cx23885.h +++ b/drivers/media/pci/cx23885/cx23885.h @@ -93,6 +93,9 @@ #define CX23885_BOARD_PROF_8000 37 #define CX23885_BOARD_HAUPPAUGE_HVR4400 38 #define CX23885_BOARD_AVERMEDIA_HC81R 39 +#define CX23885_BOARD_TBS_6981 40 +#define CX23885_BOARD_TBS_6980 41 +#define CX23885_BOARD_LEADTEK_WINFAST_PXPVR2200 42 #define GPIO_0 0x00000001 #define GPIO_1 0x00000002 diff --git a/drivers/media/pci/cx25821/cx25821-cards.c b/drivers/media/pci/cx25821/cx25821-cards.c index 3b409feb03d..f2ebc989b30 100644 --- a/drivers/media/pci/cx25821/cx25821-cards.c +++ b/drivers/media/pci/cx25821/cx25821-cards.c @@ -45,5 +45,3 @@ struct cx25821_board cx25821_boards[] = { }, }; - -const unsigned int cx25821_bcount = ARRAY_SIZE(cx25821_boards); diff --git a/drivers/media/pci/cx25821/cx25821-medusa-video.c b/drivers/media/pci/cx25821/cx25821-medusa-video.c index 22fa04415cc..43bdfa4dfba 100644 --- a/drivers/media/pci/cx25821/cx25821-medusa-video.c +++ b/drivers/media/pci/cx25821/cx25821-medusa-video.c @@ -438,7 +438,7 @@ void medusa_set_resolution(struct cx25821_dev *dev, int width, decoder_count = decoder_select + 1; } else { decoder = 0; - decoder_count = _num_decoders; + decoder_count = dev->_max_num_decoders; } switch (width) { @@ -506,8 +506,6 @@ static void medusa_set_decoderduration(struct cx25821_dev *dev, int decoder, break; } - _display_field_cnt[decoder] = duration; - /* update hardware */ fld_cnt = cx25821_i2c_read(&dev->i2c_bus[0], disp_cnt_reg, &tmp); @@ -667,8 +665,6 @@ int medusa_video_init(struct cx25821_dev *dev) int ret_val = 0; int i = 0; - _num_decoders = dev->_max_num_decoders; - /* disable Auto source selection on all video decoders */ value = cx25821_i2c_read(&dev->i2c_bus[0], MON_A_CTRL, &tmp); value &= 0xFFFFF0FF; @@ -685,8 +681,14 @@ int medusa_video_init(struct cx25821_dev *dev) if (ret_val < 0) goto error; - for (i = 0; i < _num_decoders; i++) - medusa_set_decoderduration(dev, i, _display_field_cnt[i]); + /* + * FIXME: due to a coding bug the duration was always 0. It's + * likely that it really should be something else, but due to the + * lack of documentation I have no idea what it should be. For + * now just fill in 0 as the duration. + */ + for (i = 0; i < dev->_max_num_decoders; i++) + medusa_set_decoderduration(dev, i, 0); /* Select monitor as DENC A input, power up the DAC */ value = cx25821_i2c_read(&dev->i2c_bus[0], DENC_AB_CTRL, &tmp); @@ -717,7 +719,7 @@ int medusa_video_init(struct cx25821_dev *dev) /* Turn on all of the data out and control output pins. */ value = cx25821_i2c_read(&dev->i2c_bus[0], PIN_OE_CTRL, &tmp); value &= 0xFEF0FE00; - if (_num_decoders == MAX_DECODERS) { + if (dev->_max_num_decoders == MAX_DECODERS) { /* * Note: The octal board does not support control pins(bit16-19) * These bits are ignored in the octal board. diff --git a/drivers/media/pci/cx25821/cx25821-medusa-video.h b/drivers/media/pci/cx25821/cx25821-medusa-video.h index 6175e096185..8bf602ff27b 100644 --- a/drivers/media/pci/cx25821/cx25821-medusa-video.h +++ b/drivers/media/pci/cx25821/cx25821-medusa-video.h @@ -40,10 +40,4 @@ #define CONTRAST_DEFAULT 5000 #define HUE_DEFAULT 5000 -unsigned short _num_decoders; -unsigned short _num_cameras; - -unsigned int _video_standard; -int _display_field_cnt[MAX_DECODERS]; - #endif diff --git a/drivers/media/pci/cx25821/cx25821-video-upstream.c b/drivers/media/pci/cx25821/cx25821-video-upstream.c index 88ffef410c5..1f43be0b04c 100644 --- a/drivers/media/pci/cx25821/cx25821-video-upstream.c +++ b/drivers/media/pci/cx25821/cx25821-video-upstream.c @@ -159,10 +159,10 @@ static __le32 *cx25821_risc_field_upstream(struct cx25821_channel *chan, __le32 * For the upstream video channel, the risc engine will enable * the FIFO. */ if (fifo_enable && line == 3) { - *(rp++) = RISC_WRITECR; - *(rp++) = sram_ch->dma_ctl; - *(rp++) = FLD_VID_FIFO_EN; - *(rp++) = 0x00000001; + *(rp++) = cpu_to_le32(RISC_WRITECR); + *(rp++) = cpu_to_le32(sram_ch->dma_ctl); + *(rp++) = cpu_to_le32(FLD_VID_FIFO_EN); + *(rp++) = cpu_to_le32(0x00000001); } } diff --git a/drivers/media/pci/cx88/cx88-alsa.c b/drivers/media/pci/cx88/cx88-alsa.c index aba5b1c649e..400eb1c42d3 100644 --- a/drivers/media/pci/cx88/cx88-alsa.c +++ b/drivers/media/pci/cx88/cx88-alsa.c @@ -834,7 +834,7 @@ static int snd_cx88_create(struct snd_card *card, struct pci_dev *pci, /* get irq */ err = request_irq(chip->pci->irq, cx8801_irq, - IRQF_SHARED | IRQF_DISABLED, chip->core->name, chip); + IRQF_SHARED, chip->core->name, chip); if (err < 0) { dprintk(0, "%s: can't get IRQ %d\n", chip->core->name, chip->pci->irq); @@ -935,8 +935,6 @@ static void cx88_audio_finidev(struct pci_dev *pci) snd_card_free((void *)card); - pci_set_drvdata(pci, NULL); - devno--; } @@ -951,27 +949,4 @@ static struct pci_driver cx88_audio_pci_driver = { .remove = cx88_audio_finidev, }; -/**************************************************************************** - LINUX MODULE INIT - ****************************************************************************/ - -/* - * module init - */ -static int __init cx88_audio_init(void) -{ - printk(KERN_INFO "cx2388x alsa driver version %s loaded\n", - CX88_VERSION); - return pci_register_driver(&cx88_audio_pci_driver); -} - -/* - * module remove - */ -static void __exit cx88_audio_fini(void) -{ - pci_unregister_driver(&cx88_audio_pci_driver); -} - -module_init(cx88_audio_init); -module_exit(cx88_audio_fini); +module_pci_driver(cx88_audio_pci_driver); diff --git a/drivers/media/pci/cx88/cx88-mpeg.c b/drivers/media/pci/cx88/cx88-mpeg.c index 2d3507eb489..74b7b8614c2 100644 --- a/drivers/media/pci/cx88/cx88-mpeg.c +++ b/drivers/media/pci/cx88/cx88-mpeg.c @@ -499,7 +499,7 @@ static int cx8802_init_common(struct cx8802_dev *dev) /* get irq */ err = request_irq(dev->pci->irq, cx8802_irq, - IRQF_SHARED | IRQF_DISABLED, dev->core->name, dev); + IRQF_SHARED, dev->core->name, dev); if (err < 0) { printk(KERN_ERR "%s: can't get IRQ %d\n", dev->core->name, dev->pci->irq); @@ -520,7 +520,6 @@ static void cx8802_fini_common(struct cx8802_dev *dev) /* unregister stuff */ free_irq(dev->pci->irq, dev); - pci_set_drvdata(dev->pci, NULL); /* free memory */ btcx_riscmem_free(dev->pci,&dev->mpegq.stopper); @@ -903,20 +902,8 @@ static struct pci_driver cx8802_pci_driver = { .remove = cx8802_remove, }; -static int __init cx8802_init(void) -{ - printk(KERN_INFO "cx88/2: cx2388x MPEG-TS Driver Manager version %s loaded\n", - CX88_VERSION); - return pci_register_driver(&cx8802_pci_driver); -} - -static void __exit cx8802_fini(void) -{ - pci_unregister_driver(&cx8802_pci_driver); -} +module_pci_driver(cx8802_pci_driver); -module_init(cx8802_init); -module_exit(cx8802_fini); EXPORT_SYMBOL(cx8802_buf_prepare); EXPORT_SYMBOL(cx8802_buf_queue); EXPORT_SYMBOL(cx8802_cancel_buffers); diff --git a/drivers/media/pci/cx88/cx88-video.c b/drivers/media/pci/cx88/cx88-video.c index ecf21d9f1f3..ed8cb9037b6 100644 --- a/drivers/media/pci/cx88/cx88-video.c +++ b/drivers/media/pci/cx88/cx88-video.c @@ -1738,7 +1738,7 @@ static int cx8800_initdev(struct pci_dev *pci_dev, /* get irq */ err = request_irq(pci_dev->irq, cx8800_irq, - IRQF_SHARED | IRQF_DISABLED, core->name, dev); + IRQF_SHARED, core->name, dev); if (err < 0) { printk(KERN_ERR "%s/0: can't get IRQ %d\n", core->name,pci_dev->irq); @@ -1922,7 +1922,6 @@ static void cx8800_finidev(struct pci_dev *pci_dev) free_irq(pci_dev->irq, dev); cx8800_unregister_video(dev); - pci_set_drvdata(pci_dev, NULL); /* free memory */ btcx_riscmem_free(dev->pci,&dev->vidq.stopper); @@ -2039,17 +2038,4 @@ static struct pci_driver cx8800_pci_driver = { #endif }; -static int __init cx8800_init(void) -{ - printk(KERN_INFO "cx88/0: cx2388x v4l2 driver version %s loaded\n", - CX88_VERSION); - return pci_register_driver(&cx8800_pci_driver); -} - -static void __exit cx8800_fini(void) -{ - pci_unregister_driver(&cx8800_pci_driver); -} - -module_init(cx8800_init); -module_exit(cx8800_fini); +module_pci_driver(cx8800_pci_driver); diff --git a/drivers/media/pci/ddbridge/ddbridge-core.c b/drivers/media/pci/ddbridge/ddbridge-core.c index 36e34522b9a..9375f30d9a8 100644 --- a/drivers/media/pci/ddbridge/ddbridge-core.c +++ b/drivers/media/pci/ddbridge/ddbridge-core.c @@ -1544,7 +1544,7 @@ static void ddb_unmap(struct ddb *dev) static void ddb_remove(struct pci_dev *pdev) { - struct ddb *dev = (struct ddb *) pci_get_drvdata(pdev); + struct ddb *dev = pci_get_drvdata(pdev); ddb_ports_detach(dev); ddb_i2c_release(dev); diff --git a/drivers/media/pci/dm1105/dm1105.c b/drivers/media/pci/dm1105/dm1105.c index ab797fe466d..e60ac35fc10 100644 --- a/drivers/media/pci/dm1105/dm1105.c +++ b/drivers/media/pci/dm1105/dm1105.c @@ -1178,7 +1178,6 @@ err_pci_release_regions: err_pci_disable_device: pci_disable_device(pdev); err_kfree: - pci_set_drvdata(pdev, NULL); kfree(dev); return ret; } @@ -1202,8 +1201,7 @@ static void dm1105_remove(struct pci_dev *pdev) dvb_dmxdev_release(&dev->dmxdev); dvb_dmx_release(dvbdemux); dvb_unregister_adapter(dvb_adapter); - if (&dev->i2c_adap) - i2c_del_adapter(&dev->i2c_adap); + i2c_del_adapter(&dev->i2c_adap); dm1105_hw_exit(dev); synchronize_irq(pdev->irq); @@ -1211,7 +1209,6 @@ static void dm1105_remove(struct pci_dev *pdev) pci_iounmap(pdev, dev->io_mem); pci_release_regions(pdev); pci_disable_device(pdev); - pci_set_drvdata(pdev, NULL); dm1105_devcount--; kfree(dev); } diff --git a/drivers/media/pci/ivtv/ivtv-driver.c b/drivers/media/pci/ivtv/ivtv-driver.c index c08ae3eb955..802642d2664 100644 --- a/drivers/media/pci/ivtv/ivtv-driver.c +++ b/drivers/media/pci/ivtv/ivtv-driver.c @@ -1261,7 +1261,7 @@ static int ivtv_probe(struct pci_dev *pdev, const struct pci_device_id *pci_id) /* Register IRQ */ retval = request_irq(itv->pdev->irq, ivtv_irq_handler, - IRQF_SHARED | IRQF_DISABLED, itv->v4l2_dev.name, (void *)itv); + IRQF_SHARED, itv->v4l2_dev.name, (void *)itv); if (retval) { IVTV_ERR("Failed to register irq %d\n", retval); goto free_i2c; diff --git a/drivers/media/pci/mantis/mantis_pci.c b/drivers/media/pci/mantis/mantis_pci.c index a846036ea02..9e89e045213 100644 --- a/drivers/media/pci/mantis/mantis_pci.c +++ b/drivers/media/pci/mantis/mantis_pci.c @@ -143,7 +143,6 @@ fail1: fail0: dprintk(MANTIS_ERROR, 1, "ERROR: <%d> exiting", ret); - pci_set_drvdata(pdev, NULL); return ret; } EXPORT_SYMBOL_GPL(mantis_pci_init); @@ -161,7 +160,6 @@ void mantis_pci_exit(struct mantis_pci *mantis) } pci_disable_device(pdev); - pci_set_drvdata(pdev, NULL); } EXPORT_SYMBOL_GPL(mantis_pci_exit); diff --git a/drivers/media/pci/meye/meye.c b/drivers/media/pci/meye/meye.c index 2381b05432e..54d5c821007 100644 --- a/drivers/media/pci/meye/meye.c +++ b/drivers/media/pci/meye/meye.c @@ -1698,7 +1698,7 @@ static int meye_probe(struct pci_dev *pcidev, const struct pci_device_id *ent) meye.mchip_irq = pcidev->irq; if (request_irq(meye.mchip_irq, meye_irq, - IRQF_DISABLED | IRQF_SHARED, "meye", meye_irq)) { + IRQF_SHARED, "meye", meye_irq)) { v4l2_err(v4l2_dev, "request_irq failed\n"); goto outreqirq; } diff --git a/drivers/media/pci/ngene/ngene-core.c b/drivers/media/pci/ngene/ngene-core.c index 37ebc42392a..970e8330852 100644 --- a/drivers/media/pci/ngene/ngene-core.c +++ b/drivers/media/pci/ngene/ngene-core.c @@ -1622,7 +1622,7 @@ static void ngene_unlink(struct ngene *dev) void ngene_shutdown(struct pci_dev *pdev) { - struct ngene *dev = (struct ngene *)pci_get_drvdata(pdev); + struct ngene *dev = pci_get_drvdata(pdev); if (!dev || !shutdown_workaround) return; @@ -1648,7 +1648,6 @@ void ngene_remove(struct pci_dev *pdev) cxd_detach(dev); ngene_stop(dev); ngene_release_buffers(dev); - pci_set_drvdata(pdev, NULL); pci_disable_device(pdev); } @@ -1702,6 +1701,5 @@ fail1: ngene_release_buffers(dev); fail0: pci_disable_device(pci_dev); - pci_set_drvdata(pci_dev, NULL); return stat; } diff --git a/drivers/media/pci/pluto2/pluto2.c b/drivers/media/pci/pluto2/pluto2.c index 49382850005..8164d74b46a 100644 --- a/drivers/media/pci/pluto2/pluto2.c +++ b/drivers/media/pci/pluto2/pluto2.c @@ -736,7 +736,6 @@ err_pci_release_regions: err_pci_disable_device: pci_disable_device(pdev); err_kfree: - pci_set_drvdata(pdev, NULL); kfree(pluto); goto out; } @@ -765,7 +764,6 @@ static void pluto2_remove(struct pci_dev *pdev) pci_iounmap(pdev, pluto->io_mem); pci_release_regions(pdev); pci_disable_device(pdev); - pci_set_drvdata(pdev, NULL); kfree(pluto); } diff --git a/drivers/media/pci/pt1/pt1.c b/drivers/media/pci/pt1/pt1.c index 75ce14229e0..db887b0c37b 100644 --- a/drivers/media/pci/pt1/pt1.c +++ b/drivers/media/pci/pt1/pt1.c @@ -1076,7 +1076,6 @@ static void pt1_remove(struct pci_dev *pdev) pt1_update_power(pt1); pt1_cleanup_adapters(pt1); i2c_del_adapter(&pt1->i2c_adap); - pci_set_drvdata(pdev, NULL); kfree(pt1); pci_iounmap(pdev, regs); pci_release_regions(pdev); @@ -1198,7 +1197,6 @@ err_i2c_del_adapter: err_pt1_cleanup_adapters: pt1_cleanup_adapters(pt1); err_kfree: - pci_set_drvdata(pdev, NULL); kfree(pt1); err_pci_iounmap: pci_iounmap(pdev, regs); diff --git a/drivers/media/pci/saa7134/saa7134-alsa.c b/drivers/media/pci/saa7134/saa7134-alsa.c index dbcdfbf8aed..dd67c8a400c 100644 --- a/drivers/media/pci/saa7134/saa7134-alsa.c +++ b/drivers/media/pci/saa7134/saa7134-alsa.c @@ -1096,7 +1096,7 @@ static int alsa_card_saa7134_create(struct saa7134_dev *dev, int devnum) err = request_irq(dev->pci->irq, saa7134_alsa_irq, - IRQF_SHARED | IRQF_DISABLED, dev->name, + IRQF_SHARED, dev->name, (void*) &dev->dmasound); if (err < 0) { diff --git a/drivers/media/pci/saa7134/saa7134-core.c b/drivers/media/pci/saa7134/saa7134-core.c index 45f0aca597a..27d7ee709c5 100644 --- a/drivers/media/pci/saa7134/saa7134-core.c +++ b/drivers/media/pci/saa7134/saa7134-core.c @@ -992,7 +992,7 @@ static int saa7134_initdev(struct pci_dev *pci_dev, /* get irq */ err = request_irq(pci_dev->irq, saa7134_irq, - IRQF_SHARED | IRQF_DISABLED, dev->name, dev); + IRQF_SHARED, dev->name, dev); if (err < 0) { printk(KERN_ERR "%s: can't get IRQ %d\n", dev->name,pci_dev->irq); diff --git a/drivers/media/pci/saa7164/saa7164-core.c b/drivers/media/pci/saa7164/saa7164-core.c index d37ee37aaef..57ef5456f1e 100644 --- a/drivers/media/pci/saa7164/saa7164-core.c +++ b/drivers/media/pci/saa7164/saa7164-core.c @@ -1232,7 +1232,7 @@ static int saa7164_initdev(struct pci_dev *pci_dev, } err = request_irq(pci_dev->irq, saa7164_irq, - IRQF_SHARED | IRQF_DISABLED, dev->name, dev); + IRQF_SHARED, dev->name, dev); if (err < 0) { printk(KERN_ERR "%s: can't get IRQ %d\n", dev->name, pci_dev->irq); @@ -1439,7 +1439,6 @@ static void saa7164_finidev(struct pci_dev *pci_dev) /* unregister stuff */ free_irq(pci_dev->irq, dev); - pci_set_drvdata(pci_dev, NULL); mutex_lock(&devlist); list_del(&dev->devlist); diff --git a/drivers/media/pci/ttpci/av7110_hw.c b/drivers/media/pci/ttpci/av7110_hw.c index f1cbfe52698..6299d5dadb8 100644 --- a/drivers/media/pci/ttpci/av7110_hw.c +++ b/drivers/media/pci/ttpci/av7110_hw.c @@ -22,7 +22,7 @@ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. * Or, point your browser to http://www.gnu.org/copyleft/gpl.html * - * the project's page is at http://www.linuxtv.org/ + * the project's page is at http://www.linuxtv.org/ */ /* for debugging ARM communication: */ @@ -40,6 +40,14 @@ #define _NOHANDSHAKE +/* + * Max transfer size done by av7110_fw_cmd() + * + * The maximum size passed to this function is 6 bytes. The buffer also + * uses two additional ones for type and size. So, 8 bytes is enough. + */ +#define MAX_XFER_SIZE 8 + /**************************************************************************** * DEBI functions ****************************************************************************/ @@ -488,11 +496,18 @@ static int av7110_send_fw_cmd(struct av7110 *av7110, u16* buf, int length) int av7110_fw_cmd(struct av7110 *av7110, int type, int com, int num, ...) { va_list args; - u16 buf[num + 2]; + u16 buf[MAX_XFER_SIZE]; int i, ret; // dprintk(4, "%p\n", av7110); + if (2 + num > sizeof(buf)) { + printk(KERN_WARNING + "%s: %s len=%d is too big!\n", + KBUILD_MODNAME, __func__, num); + return -EINVAL; + } + buf[0] = ((type << 8) | com); buf[1] = num; diff --git a/drivers/media/pci/zoran/Kconfig b/drivers/media/pci/zoran/Kconfig index 26ca8702e33..39ec35bd21a 100644 --- a/drivers/media/pci/zoran/Kconfig +++ b/drivers/media/pci/zoran/Kconfig @@ -1,6 +1,7 @@ config VIDEO_ZORAN tristate "Zoran ZR36057/36067 Video For Linux" depends on PCI && I2C_ALGOBIT && VIDEO_V4L2 && VIRT_TO_BUS + depends on !ALPHA help Say Y for support for MJPEG capture cards based on the Zoran 36057/36067 PCI controller chipset. This includes the Iomega diff --git a/drivers/media/pci/zoran/zoran_card.c b/drivers/media/pci/zoran/zoran_card.c index 923d59a321f..cec5b7553f2 100644 --- a/drivers/media/pci/zoran/zoran_card.c +++ b/drivers/media/pci/zoran/zoran_card.c @@ -1293,7 +1293,7 @@ static int zoran_probe(struct pci_dev *pdev, const struct pci_device_id *ent) } result = request_irq(zr->pci_dev->irq, zoran_irq, - IRQF_SHARED | IRQF_DISABLED, ZR_DEVNAME(zr), zr); + IRQF_SHARED, ZR_DEVNAME(zr), zr); if (result < 0) { if (result == -EINVAL) { dprintk(1, diff --git a/drivers/media/platform/Kconfig b/drivers/media/platform/Kconfig index eb70dda8cbf..d7f0249e405 100644 --- a/drivers/media/platform/Kconfig +++ b/drivers/media/platform/Kconfig @@ -143,6 +143,7 @@ if V4L_MEM2MEM_DRIVERS config VIDEO_CODA tristate "Chips&Media Coda multi-standard codec IP" depends on VIDEO_DEV && VIDEO_V4L2 && ARCH_MXC + select SRAM select VIDEOBUF2_DMA_CONTIG select V4L2_MEM2MEM_DEV ---help--- @@ -212,7 +213,7 @@ config VIDEO_SH_VEU config VIDEO_RENESAS_VSP1 tristate "Renesas VSP1 Video Processing Engine" - depends on VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API + depends on VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API && HAS_DMA select VIDEOBUF2_DMA_CONTIG ---help--- This is a V4L2 driver for the Renesas VSP1 video processing engine. @@ -220,6 +221,22 @@ config VIDEO_RENESAS_VSP1 To compile this driver as a module, choose M here: the module will be called vsp1. +config VIDEO_TI_VPE + tristate "TI VPE (Video Processing Engine) driver" + depends on VIDEO_DEV && VIDEO_V4L2 && SOC_DRA7XX + select VIDEOBUF2_DMA_CONTIG + select V4L2_MEM2MEM_DEV + default n + ---help--- + Support for the TI VPE(Video Processing Engine) block + found on DRA7XX SoC. + +config VIDEO_TI_VPE_DEBUG + bool "VPE debug messages" + depends on VIDEO_TI_VPE + ---help--- + Enable debug messages on VPE driver. + endif # V4L_MEM2MEM_DRIVERS menuconfig V4L_TEST_DRIVERS diff --git a/drivers/media/platform/Makefile b/drivers/media/platform/Makefile index 4e4da482c52..1348ba1faf9 100644 --- a/drivers/media/platform/Makefile +++ b/drivers/media/platform/Makefile @@ -22,6 +22,8 @@ obj-$(CONFIG_VIDEO_VIVI) += vivi.o obj-$(CONFIG_VIDEO_MEM2MEM_TESTDEV) += mem2mem_testdev.o +obj-$(CONFIG_VIDEO_TI_VPE) += ti-vpe/ + obj-$(CONFIG_VIDEO_MX2_EMMAPRP) += mx2_emmaprp.o obj-$(CONFIG_VIDEO_CODA) += coda.o diff --git a/drivers/media/platform/coda.c b/drivers/media/platform/coda.c index 4993610051e..bd72fb97fea 100644 --- a/drivers/media/platform/coda.c +++ b/drivers/media/platform/coda.c @@ -39,7 +39,7 @@ #define CODA_NAME "coda" -#define CODA_MAX_INSTANCES 4 +#define CODADX6_MAX_INSTANCES 4 #define CODA_FMO_BUF_SIZE 32 #define CODADX6_WORK_BUF_SIZE (288 * 1024 + CODA_FMO_BUF_SIZE * 8 * 1024) @@ -54,8 +54,6 @@ #define CODA_MAX_FRAMEBUFFERS 8 -#define MAX_W 8192 -#define MAX_H 8192 #define CODA_MAX_FRAME_SIZE 0x100000 #define FMO_SLICE_SAVE_BUF_SIZE (32) #define CODA_DEFAULT_GAMMA 4096 @@ -394,14 +392,57 @@ static struct coda_codec *coda_find_codec(struct coda_dev *dev, int src_fourcc, return &codecs[k]; } +static void coda_get_max_dimensions(struct coda_dev *dev, + struct coda_codec *codec, + int *max_w, int *max_h) +{ + struct coda_codec *codecs = dev->devtype->codecs; + int num_codecs = dev->devtype->num_codecs; + unsigned int w, h; + int k; + + if (codec) { + w = codec->max_w; + h = codec->max_h; + } else { + for (k = 0, w = 0, h = 0; k < num_codecs; k++) { + w = max(w, codecs[k].max_w); + h = max(h, codecs[k].max_h); + } + } + + if (max_w) + *max_w = w; + if (max_h) + *max_h = h; +} + +static char *coda_product_name(int product) +{ + static char buf[9]; + + switch (product) { + case CODA_DX6: + return "CodaDx6"; + case CODA_7541: + return "CODA7541"; + default: + snprintf(buf, sizeof(buf), "(0x%04x)", product); + return buf; + } +} + /* * V4L2 ioctl() operations. */ -static int vidioc_querycap(struct file *file, void *priv, - struct v4l2_capability *cap) +static int coda_querycap(struct file *file, void *priv, + struct v4l2_capability *cap) { + struct coda_ctx *ctx = fh_to_ctx(priv); + strlcpy(cap->driver, CODA_NAME, sizeof(cap->driver)); - strlcpy(cap->card, CODA_NAME, sizeof(cap->card)); + strlcpy(cap->card, coda_product_name(ctx->dev->devtype->product), + sizeof(cap->card)); strlcpy(cap->bus_info, "platform:" CODA_NAME, sizeof(cap->bus_info)); /* * This is only a mem-to-mem video device. The capture and output @@ -457,6 +498,8 @@ static int enum_fmt(void *priv, struct v4l2_fmtdesc *f, fmt = &formats[i]; strlcpy(f->description, fmt->name, sizeof(f->description)); f->pixelformat = fmt->fourcc; + if (!coda_format_is_yuv(fmt->fourcc)) + f->flags |= V4L2_FMT_FLAG_COMPRESSED; return 0; } @@ -464,8 +507,8 @@ static int enum_fmt(void *priv, struct v4l2_fmtdesc *f, return -EINVAL; } -static int vidioc_enum_fmt_vid_cap(struct file *file, void *priv, - struct v4l2_fmtdesc *f) +static int coda_enum_fmt_vid_cap(struct file *file, void *priv, + struct v4l2_fmtdesc *f) { struct coda_ctx *ctx = fh_to_ctx(priv); struct vb2_queue *src_vq; @@ -483,13 +526,14 @@ static int vidioc_enum_fmt_vid_cap(struct file *file, void *priv, return enum_fmt(priv, f, V4L2_BUF_TYPE_VIDEO_CAPTURE, 0); } -static int vidioc_enum_fmt_vid_out(struct file *file, void *priv, - struct v4l2_fmtdesc *f) +static int coda_enum_fmt_vid_out(struct file *file, void *priv, + struct v4l2_fmtdesc *f) { return enum_fmt(priv, f, V4L2_BUF_TYPE_VIDEO_OUTPUT, 0); } -static int vidioc_g_fmt(struct file *file, void *priv, struct v4l2_format *f) +static int coda_g_fmt(struct file *file, void *priv, + struct v4l2_format *f) { struct vb2_queue *vq; struct coda_q_data *q_data; @@ -516,8 +560,11 @@ static int vidioc_g_fmt(struct file *file, void *priv, struct v4l2_format *f) return 0; } -static int vidioc_try_fmt(struct coda_codec *codec, struct v4l2_format *f) +static int coda_try_fmt(struct coda_ctx *ctx, struct coda_codec *codec, + struct v4l2_format *f) { + struct coda_dev *dev = ctx->dev; + struct coda_q_data *q_data; unsigned int max_w, max_h; enum v4l2_field field; @@ -531,32 +578,48 @@ static int vidioc_try_fmt(struct coda_codec *codec, struct v4l2_format *f) * if any of the dimensions is unsupported */ f->fmt.pix.field = field; - if (codec) { - max_w = codec->max_w; - max_h = codec->max_h; - } else { - max_w = MAX_W; - max_h = MAX_H; + coda_get_max_dimensions(dev, codec, &max_w, &max_h); + v4l_bound_align_image(&f->fmt.pix.width, MIN_W, max_w, W_ALIGN, + &f->fmt.pix.height, MIN_H, max_h, H_ALIGN, + S_ALIGN); + + switch (f->fmt.pix.pixelformat) { + case V4L2_PIX_FMT_YUV420: + case V4L2_PIX_FMT_YVU420: + case V4L2_PIX_FMT_H264: + case V4L2_PIX_FMT_MPEG4: + case V4L2_PIX_FMT_JPEG: + break; + default: + q_data = get_q_data(ctx, f->type); + f->fmt.pix.pixelformat = q_data->fourcc; } - v4l_bound_align_image(&f->fmt.pix.width, MIN_W, max_w, - W_ALIGN, &f->fmt.pix.height, - MIN_H, max_h, H_ALIGN, S_ALIGN); - if (coda_format_is_yuv(f->fmt.pix.pixelformat)) { + switch (f->fmt.pix.pixelformat) { + case V4L2_PIX_FMT_YUV420: + case V4L2_PIX_FMT_YVU420: /* Frame stride must be multiple of 8 */ f->fmt.pix.bytesperline = round_up(f->fmt.pix.width, 8); f->fmt.pix.sizeimage = f->fmt.pix.bytesperline * f->fmt.pix.height * 3 / 2; - } else { /*encoded formats h.264/mpeg4 */ + break; + case V4L2_PIX_FMT_H264: + case V4L2_PIX_FMT_MPEG4: + case V4L2_PIX_FMT_JPEG: f->fmt.pix.bytesperline = 0; f->fmt.pix.sizeimage = CODA_MAX_FRAME_SIZE; + break; + default: + BUG(); } + f->fmt.pix.priv = 0; + return 0; } -static int vidioc_try_fmt_vid_cap(struct file *file, void *priv, - struct v4l2_format *f) +static int coda_try_fmt_vid_cap(struct file *file, void *priv, + struct v4l2_format *f) { struct coda_ctx *ctx = fh_to_ctx(priv); struct coda_codec *codec; @@ -584,7 +647,7 @@ static int vidioc_try_fmt_vid_cap(struct file *file, void *priv, f->fmt.pix.colorspace = ctx->colorspace; - ret = vidioc_try_fmt(codec, f); + ret = coda_try_fmt(ctx, codec, f); if (ret < 0) return ret; @@ -600,8 +663,8 @@ static int vidioc_try_fmt_vid_cap(struct file *file, void *priv, return 0; } -static int vidioc_try_fmt_vid_out(struct file *file, void *priv, - struct v4l2_format *f) +static int coda_try_fmt_vid_out(struct file *file, void *priv, + struct v4l2_format *f) { struct coda_ctx *ctx = fh_to_ctx(priv); struct coda_codec *codec; @@ -613,10 +676,10 @@ static int vidioc_try_fmt_vid_out(struct file *file, void *priv, if (!f->fmt.pix.colorspace) f->fmt.pix.colorspace = V4L2_COLORSPACE_REC709; - return vidioc_try_fmt(codec, f); + return coda_try_fmt(ctx, codec, f); } -static int vidioc_s_fmt(struct coda_ctx *ctx, struct v4l2_format *f) +static int coda_s_fmt(struct coda_ctx *ctx, struct v4l2_format *f) { struct coda_q_data *q_data; struct vb2_queue *vq; @@ -646,61 +709,62 @@ static int vidioc_s_fmt(struct coda_ctx *ctx, struct v4l2_format *f) return 0; } -static int vidioc_s_fmt_vid_cap(struct file *file, void *priv, - struct v4l2_format *f) +static int coda_s_fmt_vid_cap(struct file *file, void *priv, + struct v4l2_format *f) { struct coda_ctx *ctx = fh_to_ctx(priv); int ret; - ret = vidioc_try_fmt_vid_cap(file, priv, f); + ret = coda_try_fmt_vid_cap(file, priv, f); if (ret) return ret; - return vidioc_s_fmt(ctx, f); + return coda_s_fmt(ctx, f); } -static int vidioc_s_fmt_vid_out(struct file *file, void *priv, - struct v4l2_format *f) +static int coda_s_fmt_vid_out(struct file *file, void *priv, + struct v4l2_format *f) { struct coda_ctx *ctx = fh_to_ctx(priv); int ret; - ret = vidioc_try_fmt_vid_out(file, priv, f); + ret = coda_try_fmt_vid_out(file, priv, f); if (ret) return ret; - ret = vidioc_s_fmt(ctx, f); + ret = coda_s_fmt(ctx, f); if (ret) ctx->colorspace = f->fmt.pix.colorspace; return ret; } -static int vidioc_reqbufs(struct file *file, void *priv, - struct v4l2_requestbuffers *reqbufs) +static int coda_reqbufs(struct file *file, void *priv, + struct v4l2_requestbuffers *reqbufs) { struct coda_ctx *ctx = fh_to_ctx(priv); return v4l2_m2m_reqbufs(file, ctx->m2m_ctx, reqbufs); } -static int vidioc_querybuf(struct file *file, void *priv, - struct v4l2_buffer *buf) +static int coda_querybuf(struct file *file, void *priv, + struct v4l2_buffer *buf) { struct coda_ctx *ctx = fh_to_ctx(priv); return v4l2_m2m_querybuf(file, ctx->m2m_ctx, buf); } -static int vidioc_qbuf(struct file *file, void *priv, struct v4l2_buffer *buf) +static int coda_qbuf(struct file *file, void *priv, + struct v4l2_buffer *buf) { struct coda_ctx *ctx = fh_to_ctx(priv); return v4l2_m2m_qbuf(file, ctx->m2m_ctx, buf); } -static int vidioc_expbuf(struct file *file, void *priv, - struct v4l2_exportbuffer *eb) +static int coda_expbuf(struct file *file, void *priv, + struct v4l2_exportbuffer *eb) { struct coda_ctx *ctx = fh_to_ctx(priv); @@ -718,7 +782,8 @@ static bool coda_buf_is_end_of_stream(struct coda_ctx *ctx, (buf->sequence == (ctx->qsequence - 1))); } -static int vidioc_dqbuf(struct file *file, void *priv, struct v4l2_buffer *buf) +static int coda_dqbuf(struct file *file, void *priv, + struct v4l2_buffer *buf) { struct coda_ctx *ctx = fh_to_ctx(priv); int ret; @@ -738,24 +803,24 @@ static int vidioc_dqbuf(struct file *file, void *priv, struct v4l2_buffer *buf) return ret; } -static int vidioc_create_bufs(struct file *file, void *priv, - struct v4l2_create_buffers *create) +static int coda_create_bufs(struct file *file, void *priv, + struct v4l2_create_buffers *create) { struct coda_ctx *ctx = fh_to_ctx(priv); return v4l2_m2m_create_bufs(file, ctx->m2m_ctx, create); } -static int vidioc_streamon(struct file *file, void *priv, - enum v4l2_buf_type type) +static int coda_streamon(struct file *file, void *priv, + enum v4l2_buf_type type) { struct coda_ctx *ctx = fh_to_ctx(priv); return v4l2_m2m_streamon(file, ctx->m2m_ctx, type); } -static int vidioc_streamoff(struct file *file, void *priv, - enum v4l2_buf_type type) +static int coda_streamoff(struct file *file, void *priv, + enum v4l2_buf_type type) { struct coda_ctx *ctx = fh_to_ctx(priv); int ret; @@ -772,23 +837,34 @@ static int vidioc_streamoff(struct file *file, void *priv, return ret; } -static int vidioc_decoder_cmd(struct file *file, void *fh, - struct v4l2_decoder_cmd *dc) +static int coda_try_decoder_cmd(struct file *file, void *fh, + struct v4l2_decoder_cmd *dc) { - struct coda_ctx *ctx = fh_to_ctx(fh); - if (dc->cmd != V4L2_DEC_CMD_STOP) return -EINVAL; - if ((dc->flags & V4L2_DEC_CMD_STOP_TO_BLACK) || - (dc->flags & V4L2_DEC_CMD_STOP_IMMEDIATELY)) + if (dc->flags & V4L2_DEC_CMD_STOP_TO_BLACK) return -EINVAL; - if (dc->stop.pts != 0) + if (!(dc->flags & V4L2_DEC_CMD_STOP_IMMEDIATELY) && (dc->stop.pts != 0)) return -EINVAL; + return 0; +} + +static int coda_decoder_cmd(struct file *file, void *fh, + struct v4l2_decoder_cmd *dc) +{ + struct coda_ctx *ctx = fh_to_ctx(fh); + int ret; + + ret = coda_try_decoder_cmd(file, fh, dc); + if (ret < 0) + return ret; + + /* Ignore decoder stop command silently in encoder context */ if (ctx->inst_type != CODA_INST_DECODER) - return -EINVAL; + return 0; /* Set the strem-end flag on this context */ ctx->bit_stream_param |= CODA_BIT_STREAM_END_FLAG; @@ -796,8 +872,8 @@ static int vidioc_decoder_cmd(struct file *file, void *fh, return 0; } -static int vidioc_subscribe_event(struct v4l2_fh *fh, - const struct v4l2_event_subscription *sub) +static int coda_subscribe_event(struct v4l2_fh *fh, + const struct v4l2_event_subscription *sub) { switch (sub->type) { case V4L2_EVENT_EOS: @@ -808,32 +884,33 @@ static int vidioc_subscribe_event(struct v4l2_fh *fh, } static const struct v4l2_ioctl_ops coda_ioctl_ops = { - .vidioc_querycap = vidioc_querycap, + .vidioc_querycap = coda_querycap, - .vidioc_enum_fmt_vid_cap = vidioc_enum_fmt_vid_cap, - .vidioc_g_fmt_vid_cap = vidioc_g_fmt, - .vidioc_try_fmt_vid_cap = vidioc_try_fmt_vid_cap, - .vidioc_s_fmt_vid_cap = vidioc_s_fmt_vid_cap, + .vidioc_enum_fmt_vid_cap = coda_enum_fmt_vid_cap, + .vidioc_g_fmt_vid_cap = coda_g_fmt, + .vidioc_try_fmt_vid_cap = coda_try_fmt_vid_cap, + .vidioc_s_fmt_vid_cap = coda_s_fmt_vid_cap, - .vidioc_enum_fmt_vid_out = vidioc_enum_fmt_vid_out, - .vidioc_g_fmt_vid_out = vidioc_g_fmt, - .vidioc_try_fmt_vid_out = vidioc_try_fmt_vid_out, - .vidioc_s_fmt_vid_out = vidioc_s_fmt_vid_out, + .vidioc_enum_fmt_vid_out = coda_enum_fmt_vid_out, + .vidioc_g_fmt_vid_out = coda_g_fmt, + .vidioc_try_fmt_vid_out = coda_try_fmt_vid_out, + .vidioc_s_fmt_vid_out = coda_s_fmt_vid_out, - .vidioc_reqbufs = vidioc_reqbufs, - .vidioc_querybuf = vidioc_querybuf, + .vidioc_reqbufs = coda_reqbufs, + .vidioc_querybuf = coda_querybuf, - .vidioc_qbuf = vidioc_qbuf, - .vidioc_expbuf = vidioc_expbuf, - .vidioc_dqbuf = vidioc_dqbuf, - .vidioc_create_bufs = vidioc_create_bufs, + .vidioc_qbuf = coda_qbuf, + .vidioc_expbuf = coda_expbuf, + .vidioc_dqbuf = coda_dqbuf, + .vidioc_create_bufs = coda_create_bufs, - .vidioc_streamon = vidioc_streamon, - .vidioc_streamoff = vidioc_streamoff, + .vidioc_streamon = coda_streamon, + .vidioc_streamoff = coda_streamoff, - .vidioc_decoder_cmd = vidioc_decoder_cmd, + .vidioc_try_decoder_cmd = coda_try_decoder_cmd, + .vidioc_decoder_cmd = coda_decoder_cmd, - .vidioc_subscribe_event = vidioc_subscribe_event, + .vidioc_subscribe_event = coda_subscribe_event, .vidioc_unsubscribe_event = v4l2_event_unsubscribe, }; @@ -1928,8 +2005,9 @@ static int coda_start_streaming(struct vb2_queue *q, unsigned int count) if (!(ctx->streamon_out & ctx->streamon_cap)) return 0; - /* Allow device_run with no buffers queued and after streamoff */ - v4l2_m2m_set_src_buffered(ctx->m2m_ctx, true); + /* Allow decoder device_run with no new buffers queued */ + if (ctx->inst_type == CODA_INST_DECODER) + v4l2_m2m_set_src_buffered(ctx->m2m_ctx, true); ctx->gopcounter = ctx->params.gop_size - 1; buf = v4l2_m2m_next_dst_buf(ctx->m2m_ctx); @@ -2071,10 +2149,8 @@ static int coda_start_streaming(struct vb2_queue *q, unsigned int count) coda_setup_iram(ctx); if (dst_fourcc == V4L2_PIX_FMT_H264) { - value = (FMO_SLICE_SAVE_BUF_SIZE << 7); - value |= (0 & CODA_FMOPARAM_TYPE_MASK) << CODA_FMOPARAM_TYPE_OFFSET; - value |= 0 & CODA_FMOPARAM_SLICENUM_MASK; if (dev->devtype->product == CODA_DX6) { + value = FMO_SLICE_SAVE_BUF_SIZE << 7; coda_write(dev, value, CODADX6_CMD_ENC_SEQ_FMO); } else { coda_write(dev, ctx->iram_info.search_ram_paddr, @@ -2371,7 +2447,13 @@ static int coda_queue_init(void *priv, struct vb2_queue *src_vq, static int coda_next_free_instance(struct coda_dev *dev) { - return ffz(dev->instance_mask); + int idx = ffz(dev->instance_mask); + + if ((idx < 0) || + (dev->devtype->product == CODA_DX6 && idx > CODADX6_MAX_INSTANCES)) + return -EBUSY; + + return idx; } static int coda_open(struct file *file) @@ -2386,8 +2468,8 @@ static int coda_open(struct file *file) return -ENOMEM; idx = coda_next_free_instance(dev); - if (idx >= CODA_MAX_INSTANCES) { - ret = -EBUSY; + if (idx < 0) { + ret = idx; goto err_coda_max; } set_bit(idx, &dev->instance_mask); @@ -2719,7 +2801,6 @@ static void coda_finish_encode(struct coda_ctx *ctx) dst_buf = v4l2_m2m_dst_buf_remove(ctx->m2m_ctx); /* Get results from the coda */ - coda_read(dev, CODA_RET_ENC_PIC_TYPE); start_ptr = coda_read(dev, CODA_CMD_ENC_PIC_BB_START); wr_ptr = coda_read(dev, CODA_REG_BIT_WR_PTR(ctx->reg_idx)); @@ -2739,7 +2820,7 @@ static void coda_finish_encode(struct coda_ctx *ctx) coda_read(dev, CODA_RET_ENC_PIC_SLICE_NUM); coda_read(dev, CODA_RET_ENC_PIC_FLAG); - if (src_buf->v4l2_buf.flags & V4L2_BUF_FLAG_KEYFRAME) { + if (coda_read(dev, CODA_RET_ENC_PIC_TYPE) == 0) { dst_buf->v4l2_buf.flags |= V4L2_BUF_FLAG_KEYFRAME; dst_buf->v4l2_buf.flags &= ~V4L2_BUF_FLAG_PFRAME; } else { @@ -2861,21 +2942,6 @@ static bool coda_firmware_supported(u32 vernum) return false; } -static char *coda_product_name(int product) -{ - static char buf[9]; - - switch (product) { - case CODA_DX6: - return "CodaDx6"; - case CODA_7541: - return "CODA7541"; - default: - snprintf(buf, sizeof(buf), "(0x%04x)", product); - return buf; - } -} - static int coda_hw_init(struct coda_dev *dev) { u16 product, major, minor, release; diff --git a/drivers/media/platform/davinci/vpbe_display.c b/drivers/media/platform/davinci/vpbe_display.c index 04609cc6eba..eac472b5ae8 100644 --- a/drivers/media/platform/davinci/vpbe_display.c +++ b/drivers/media/platform/davinci/vpbe_display.c @@ -1785,7 +1785,7 @@ static int vpbe_display_probe(struct platform_device *pdev) } irq = res->start; - err = devm_request_irq(&pdev->dev, irq, venc_isr, IRQF_DISABLED, + err = devm_request_irq(&pdev->dev, irq, venc_isr, 0, VPBE_DISPLAY_DRIVER, disp_dev); if (err) { v4l2_err(&disp_dev->vpbe_dev->v4l2_dev, diff --git a/drivers/media/platform/davinci/vpfe_capture.c b/drivers/media/platform/davinci/vpfe_capture.c index 93609091cb2..d762246eabf 100644 --- a/drivers/media/platform/davinci/vpfe_capture.c +++ b/drivers/media/platform/davinci/vpfe_capture.c @@ -688,7 +688,7 @@ static int vpfe_attach_irq(struct vpfe_device *vpfe_dev) frame_format = ccdc_dev->hw_ops.get_frame_format(); if (frame_format == CCDC_FRMFMT_PROGRESSIVE) { return request_irq(vpfe_dev->ccdc_irq1, vdint1_isr, - IRQF_DISABLED, "vpfe_capture1", + 0, "vpfe_capture1", vpfe_dev); } return 0; @@ -1863,7 +1863,7 @@ static int vpfe_probe(struct platform_device *pdev) } vpfe_dev->ccdc_irq1 = res1->start; - ret = request_irq(vpfe_dev->ccdc_irq0, vpfe_isr, IRQF_DISABLED, + ret = request_irq(vpfe_dev->ccdc_irq0, vpfe_isr, 0, "vpfe_capture0", vpfe_dev); if (0 != ret) { diff --git a/drivers/media/platform/davinci/vpif_capture.c b/drivers/media/platform/davinci/vpif_capture.c index 1089834a4ef..52ac5e6c862 100644 --- a/drivers/media/platform/davinci/vpif_capture.c +++ b/drivers/media/platform/davinci/vpif_capture.c @@ -2154,7 +2154,7 @@ static __init int vpif_probe(struct platform_device *pdev) if (!vpif_obj.sd[i]) { vpif_err("Error registering v4l2 subdevice\n"); - err = -ENOMEM; + err = -ENODEV; goto probe_subdev_out; } v4l2_info(&vpif_obj.v4l2_dev, diff --git a/drivers/media/platform/exynos-gsc/gsc-core.h b/drivers/media/platform/exynos-gsc/gsc-core.h index 76435d3bf62..ef0a6564cef 100644 --- a/drivers/media/platform/exynos-gsc/gsc-core.h +++ b/drivers/media/platform/exynos-gsc/gsc-core.h @@ -45,6 +45,7 @@ #define GSC_DST_FMT (1 << 2) #define GSC_CTX_M2M (1 << 3) #define GSC_CTX_STOP_REQ (1 << 6) +#define GSC_CTX_ABORT (1 << 7) enum gsc_dev_flags { /* for global */ diff --git a/drivers/media/platform/exynos-gsc/gsc-m2m.c b/drivers/media/platform/exynos-gsc/gsc-m2m.c index e576ff2de3d..810c3e13970 100644 --- a/drivers/media/platform/exynos-gsc/gsc-m2m.c +++ b/drivers/media/platform/exynos-gsc/gsc-m2m.c @@ -46,6 +46,17 @@ static int gsc_m2m_ctx_stop_req(struct gsc_ctx *ctx) return ret == 0 ? -ETIMEDOUT : ret; } +static void __gsc_m2m_job_abort(struct gsc_ctx *ctx) +{ + int ret; + + ret = gsc_m2m_ctx_stop_req(ctx); + if ((ret == -ETIMEDOUT) || (ctx->state & GSC_CTX_ABORT)) { + gsc_ctx_state_lock_clear(GSC_CTX_STOP_REQ | GSC_CTX_ABORT, ctx); + gsc_m2m_job_finish(ctx, VB2_BUF_STATE_ERROR); + } +} + static int gsc_m2m_start_streaming(struct vb2_queue *q, unsigned int count) { struct gsc_ctx *ctx = q->drv_priv; @@ -58,11 +69,8 @@ static int gsc_m2m_start_streaming(struct vb2_queue *q, unsigned int count) static int gsc_m2m_stop_streaming(struct vb2_queue *q) { struct gsc_ctx *ctx = q->drv_priv; - int ret; - ret = gsc_m2m_ctx_stop_req(ctx); - if (ret == -ETIMEDOUT) - gsc_m2m_job_finish(ctx, VB2_BUF_STATE_ERROR); + __gsc_m2m_job_abort(ctx); pm_runtime_put(&ctx->gsc_dev->pdev->dev); @@ -91,15 +99,9 @@ void gsc_m2m_job_finish(struct gsc_ctx *ctx, int vb_state) } } - static void gsc_m2m_job_abort(void *priv) { - struct gsc_ctx *ctx = priv; - int ret; - - ret = gsc_m2m_ctx_stop_req(ctx); - if (ret == -ETIMEDOUT) - gsc_m2m_job_finish(ctx, VB2_BUF_STATE_ERROR); + __gsc_m2m_job_abort((struct gsc_ctx *)priv); } static int gsc_get_bufs(struct gsc_ctx *ctx) @@ -150,9 +152,10 @@ static void gsc_m2m_device_run(void *priv) gsc->m2m.ctx = ctx; } - is_set = (ctx->state & GSC_CTX_STOP_REQ) ? 1 : 0; - ctx->state &= ~GSC_CTX_STOP_REQ; + is_set = ctx->state & GSC_CTX_STOP_REQ; if (is_set) { + ctx->state &= ~GSC_CTX_STOP_REQ; + ctx->state |= GSC_CTX_ABORT; wake_up(&gsc->irq_queue); goto put_device; } diff --git a/drivers/media/platform/exynos4-is/fimc-isp.c b/drivers/media/platform/exynos4-is/fimc-isp.c index d2e6cba3566..f3c6136aa5b 100644 --- a/drivers/media/platform/exynos4-is/fimc-isp.c +++ b/drivers/media/platform/exynos4-is/fimc-isp.c @@ -511,7 +511,7 @@ static int __ctrl_set_metering(struct fimc_is *is, unsigned int value) break; default: return -EINVAL; - }; + } __is_set_isp_metering(is, IS_METERING_CONFIG_CMD, val); return 0; diff --git a/drivers/media/platform/exynos4-is/media-dev.c b/drivers/media/platform/exynos4-is/media-dev.c index a8351127831..7a4ee4c0449 100644 --- a/drivers/media/platform/exynos4-is/media-dev.c +++ b/drivers/media/platform/exynos4-is/media-dev.c @@ -411,8 +411,8 @@ static int fimc_md_of_add_sensor(struct fimc_md *fmd, device_lock(&client->dev); - if (!client->driver || - !try_module_get(client->driver->driver.owner)) { + if (!client->dev.driver || + !try_module_get(client->dev.driver->owner)) { ret = -EPROBE_DEFER; v4l2_info(&fmd->v4l2_dev, "No driver found for %s\n", node->full_name); @@ -442,7 +442,7 @@ static int fimc_md_of_add_sensor(struct fimc_md *fmd, fmd->num_sensors++; mod_put: - module_put(client->driver->driver.owner); + module_put(client->dev.driver->owner); dev_put: device_unlock(&client->dev); put_device(&client->dev); diff --git a/drivers/media/platform/m2m-deinterlace.c b/drivers/media/platform/m2m-deinterlace.c index 540516ca872..65cab70fefc 100644 --- a/drivers/media/platform/m2m-deinterlace.c +++ b/drivers/media/platform/m2m-deinterlace.c @@ -341,8 +341,7 @@ static void deinterlace_issue_dma(struct deinterlace_ctx *ctx, int op, ctx->xt->dir = DMA_MEM_TO_MEM; ctx->xt->src_sgl = false; ctx->xt->dst_sgl = true; - flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT | - DMA_COMPL_SKIP_DEST_UNMAP | DMA_COMPL_SKIP_SRC_UNMAP; + flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT; tx = dmadev->device_prep_interleaved_dma(chan, ctx->xt, flags); if (tx == NULL) { @@ -1084,8 +1083,7 @@ free_dev: static int deinterlace_remove(struct platform_device *pdev) { - struct deinterlace_dev *pcdev = - (struct deinterlace_dev *)platform_get_drvdata(pdev); + struct deinterlace_dev *pcdev = platform_get_drvdata(pdev); v4l2_info(&pcdev->v4l2_dev, "Removing " MEM2MEM_TEST_MODULE_NAME); v4l2_m2m_release(pcdev->m2m_dev); diff --git a/drivers/media/platform/marvell-ccic/mcam-core.c b/drivers/media/platform/marvell-ccic/mcam-core.c index 5184887b155..32fab30a910 100644 --- a/drivers/media/platform/marvell-ccic/mcam-core.c +++ b/drivers/media/platform/marvell-ccic/mcam-core.c @@ -1221,16 +1221,16 @@ static int mcam_vb_sg_buf_prepare(struct vb2_buffer *vb) { struct mcam_vb_buffer *mvb = vb_to_mvb(vb); struct mcam_camera *cam = vb2_get_drv_priv(vb->vb2_queue); - struct vb2_dma_sg_desc *sgd = vb2_dma_sg_plane_desc(vb, 0); + struct sg_table *sg_table = vb2_dma_sg_plane_desc(vb, 0); struct mcam_dma_desc *desc = mvb->dma_desc; struct scatterlist *sg; int i; - mvb->dma_desc_nent = dma_map_sg(cam->dev, sgd->sglist, sgd->num_pages, - DMA_FROM_DEVICE); + mvb->dma_desc_nent = dma_map_sg(cam->dev, sg_table->sgl, + sg_table->nents, DMA_FROM_DEVICE); if (mvb->dma_desc_nent <= 0) return -EIO; /* Not sure what's right here */ - for_each_sg(sgd->sglist, sg, mvb->dma_desc_nent, i) { + for_each_sg(sg_table->sgl, sg, mvb->dma_desc_nent, i) { desc->dma_addr = sg_dma_address(sg); desc->segment_len = sg_dma_len(sg); desc++; @@ -1241,9 +1241,11 @@ static int mcam_vb_sg_buf_prepare(struct vb2_buffer *vb) static int mcam_vb_sg_buf_finish(struct vb2_buffer *vb) { struct mcam_camera *cam = vb2_get_drv_priv(vb->vb2_queue); - struct vb2_dma_sg_desc *sgd = vb2_dma_sg_plane_desc(vb, 0); + struct sg_table *sg_table = vb2_dma_sg_plane_desc(vb, 0); - dma_unmap_sg(cam->dev, sgd->sglist, sgd->num_pages, DMA_FROM_DEVICE); + if (sg_table) + dma_unmap_sg(cam->dev, sg_table->sgl, + sg_table->nents, DMA_FROM_DEVICE); return 0; } diff --git a/drivers/media/platform/marvell-ccic/mmp-driver.c b/drivers/media/platform/marvell-ccic/mmp-driver.c index b5a19af5c58..3458fa0e2fd 100644 --- a/drivers/media/platform/marvell-ccic/mmp-driver.c +++ b/drivers/media/platform/marvell-ccic/mmp-driver.c @@ -481,7 +481,6 @@ static int mmpcam_remove(struct mmp_camera *cam) struct mmp_camera_platform_data *pdata; mmpcam_remove_device(cam); - free_irq(cam->irq, mcam); mccic_shutdown(mcam); mmpcam_power_down(mcam); pdata = cam->pdev->dev.platform_data; diff --git a/drivers/media/platform/mem2mem_testdev.c b/drivers/media/platform/mem2mem_testdev.c index 6a17676f9d7..8df5975b700 100644 --- a/drivers/media/platform/mem2mem_testdev.c +++ b/drivers/media/platform/mem2mem_testdev.c @@ -1090,8 +1090,7 @@ unreg_dev: static int m2mtest_remove(struct platform_device *pdev) { - struct m2mtest_dev *dev = - (struct m2mtest_dev *)platform_get_drvdata(pdev); + struct m2mtest_dev *dev = platform_get_drvdata(pdev); v4l2_info(&dev->v4l2_dev, "Removing " MEM2MEM_TEST_MODULE_NAME); v4l2_m2m_release(dev->m2m_dev); diff --git a/drivers/media/platform/s5p-g2d/g2d.c b/drivers/media/platform/s5p-g2d/g2d.c index fd6289d60cd..0b2948376ae 100644 --- a/drivers/media/platform/s5p-g2d/g2d.c +++ b/drivers/media/platform/s5p-g2d/g2d.c @@ -840,7 +840,7 @@ put_clk: static int g2d_remove(struct platform_device *pdev) { - struct g2d_dev *dev = (struct g2d_dev *)platform_get_drvdata(pdev); + struct g2d_dev *dev = platform_get_drvdata(pdev); v4l2_info(&dev->v4l2_dev, "Removing " G2D_NAME); v4l2_m2m_release(dev->m2m_dev); diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc.c b/drivers/media/platform/s5p-mfc/s5p_mfc.c index 084263dd126..5f2c4ad6c2c 100644 --- a/drivers/media/platform/s5p-mfc/s5p_mfc.c +++ b/drivers/media/platform/s5p-mfc/s5p_mfc.c @@ -404,7 +404,11 @@ leave_handle_frame: if (test_and_clear_bit(0, &dev->hw_lock) == 0) BUG(); s5p_mfc_clock_off(); - s5p_mfc_hw_call(dev->mfc_ops, try_run, dev); + /* if suspending, wake up device and do not try_run again*/ + if (test_bit(0, &dev->enter_suspend)) + wake_up_dev(dev, reason, err); + else + s5p_mfc_hw_call(dev->mfc_ops, try_run, dev); } /* Error handling for interrupt */ @@ -1101,7 +1105,7 @@ static int s5p_mfc_probe(struct platform_device *pdev) } dev->irq = res->start; ret = devm_request_irq(&pdev->dev, dev->irq, s5p_mfc_irq, - IRQF_DISABLED, pdev->name, dev); + 0, pdev->name, dev); if (ret) { dev_err(&pdev->dev, "Failed to install irq (%d)\n", ret); goto err_res; @@ -1286,9 +1290,7 @@ static int s5p_mfc_suspend(struct device *dev) /* Try and lock the HW */ /* Wait on the interrupt waitqueue */ ret = wait_event_interruptible_timeout(m_dev->queue, - m_dev->int_cond || m_dev->ctx[m_dev->curr_ctx]->int_cond, - msecs_to_jiffies(MFC_INT_TIMEOUT)); - + m_dev->int_cond, msecs_to_jiffies(MFC_INT_TIMEOUT)); if (ret == 0) { mfc_err("Waiting for hardware to finish timed out\n"); return -EIO; diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_cmd_v5.c b/drivers/media/platform/s5p-mfc/s5p_mfc_cmd_v5.c index ad4f1df0a18..9a6efd6c132 100644 --- a/drivers/media/platform/s5p-mfc/s5p_mfc_cmd_v5.c +++ b/drivers/media/platform/s5p-mfc/s5p_mfc_cmd_v5.c @@ -111,7 +111,7 @@ static int s5p_mfc_open_inst_cmd_v5(struct s5p_mfc_ctx *ctx) break; default: h2r_args.arg[0] = S5P_FIMV_CODEC_NONE; - }; + } h2r_args.arg[1] = 0; /* no crc & no pixelcache */ h2r_args.arg[2] = ctx->ctx.ofs; h2r_args.arg[3] = ctx->ctx.size; diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_cmd_v6.c b/drivers/media/platform/s5p-mfc/s5p_mfc_cmd_v6.c index db796c8e787..ec1a5947ed7 100644 --- a/drivers/media/platform/s5p-mfc/s5p_mfc_cmd_v6.c +++ b/drivers/media/platform/s5p-mfc/s5p_mfc_cmd_v6.c @@ -113,7 +113,7 @@ static int s5p_mfc_open_inst_cmd_v6(struct s5p_mfc_ctx *ctx) break; default: codec_type = S5P_FIMV_CODEC_NONE_V6; - }; + } mfc_write(dev, codec_type, S5P_FIMV_CODEC_TYPE_V6); mfc_write(dev, ctx->ctx.dma, S5P_FIMV_CONTEXT_MEM_ADDR_V6); mfc_write(dev, ctx->ctx.size, S5P_FIMV_CONTEXT_MEM_SIZE_V6); diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_enc.c b/drivers/media/platform/s5p-mfc/s5p_mfc_enc.c index 41f5a3c10db..4ff3b6cd684 100644 --- a/drivers/media/platform/s5p-mfc/s5p_mfc_enc.c +++ b/drivers/media/platform/s5p-mfc/s5p_mfc_enc.c @@ -113,7 +113,7 @@ static struct mfc_control controls[] = { .minimum = 0, .maximum = (1 << 16) - 1, .step = 1, - .default_value = 0, + .default_value = 12, }, { .id = V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MODE, @@ -356,7 +356,7 @@ static struct mfc_control controls[] = { .minimum = 0, .maximum = 51, .step = 1, - .default_value = 1, + .default_value = 51, }, { .id = V4L2_CID_MPEG_VIDEO_H264_P_FRAME_QP, @@ -399,7 +399,7 @@ static struct mfc_control controls[] = { .minimum = 1, .maximum = 31, .step = 1, - .default_value = 1, + .default_value = 31, }, { .id = V4L2_CID_MPEG_VIDEO_H263_P_FRAME_QP, @@ -444,7 +444,7 @@ static struct mfc_control controls[] = { .minimum = 0, .maximum = 51, .step = 1, - .default_value = 1, + .default_value = 51, }, { .id = V4L2_CID_MPEG_VIDEO_MPEG4_P_FRAME_QP, diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_opr_v5.c b/drivers/media/platform/s5p-mfc/s5p_mfc_opr_v5.c index 368582b091b..58ec7bb26eb 100644 --- a/drivers/media/platform/s5p-mfc/s5p_mfc_opr_v5.c +++ b/drivers/media/platform/s5p-mfc/s5p_mfc_opr_v5.c @@ -1582,7 +1582,7 @@ static int s5p_mfc_get_int_reason_v5(struct s5p_mfc_dev *dev) break; default: reason = S5P_MFC_R2H_CMD_EMPTY; - }; + } return reason; } diff --git a/drivers/media/platform/s5p-tv/mixer_grp_layer.c b/drivers/media/platform/s5p-tv/mixer_grp_layer.c index b93a21f5aa1..74344c764da 100644 --- a/drivers/media/platform/s5p-tv/mixer_grp_layer.c +++ b/drivers/media/platform/s5p-tv/mixer_grp_layer.c @@ -226,7 +226,7 @@ static void mxr_graph_fix_geometry(struct mxr_layer *layer, src->width + src->x_offset, 32767); src->full_height = clamp_val(src->full_height, src->height + src->y_offset, 2047); - }; + } } /* PUBLIC API */ diff --git a/drivers/media/platform/s5p-tv/mixer_vp_layer.c b/drivers/media/platform/s5p-tv/mixer_vp_layer.c index 3d13a636877..c9388c45ad7 100644 --- a/drivers/media/platform/s5p-tv/mixer_vp_layer.c +++ b/drivers/media/platform/s5p-tv/mixer_vp_layer.c @@ -197,7 +197,7 @@ static void mxr_vp_fix_geometry(struct mxr_layer *layer, ALIGN(src->width + src->x_offset, 8), 8192U); src->full_height = clamp(src->full_height, src->height + src->y_offset, 8192U); - }; + } } /* PUBLIC API */ diff --git a/drivers/media/platform/soc_camera/rcar_vin.c b/drivers/media/platform/soc_camera/rcar_vin.c index d02a7e0b773..6866bb4fbeb 100644 --- a/drivers/media/platform/soc_camera/rcar_vin.c +++ b/drivers/media/platform/soc_camera/rcar_vin.c @@ -16,6 +16,7 @@ #include <linux/delay.h> #include <linux/interrupt.h> +#include <linux/io.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/platform_data/camera-rcar.h> @@ -105,6 +106,7 @@ #define VIN_MAX_HEIGHT 2048 enum chip_id { + RCAR_H2, RCAR_H1, RCAR_M1, RCAR_E1, @@ -300,7 +302,8 @@ static int rcar_vin_setup(struct rcar_vin_priv *priv) dmr = 0; break; case V4L2_PIX_FMT_RGB32: - if (priv->chip == RCAR_H1 || priv->chip == RCAR_E1) { + if (priv->chip == RCAR_H2 || priv->chip == RCAR_H1 || + priv->chip == RCAR_E1) { dmr = VNDMR_EXRGB; break; } @@ -1381,6 +1384,7 @@ static struct soc_camera_host_ops rcar_vin_host_ops = { }; static struct platform_device_id rcar_vin_id_table[] = { + { "r8a7790-vin", RCAR_H2 }, { "r8a7779-vin", RCAR_H1 }, { "r8a7778-vin", RCAR_M1 }, { "uPD35004-vin", RCAR_E1 }, diff --git a/drivers/media/platform/soc_camera/sh_mobile_ceu_camera.c b/drivers/media/platform/soc_camera/sh_mobile_ceu_camera.c index 8df22f77917..150bd4df413 100644 --- a/drivers/media/platform/soc_camera/sh_mobile_ceu_camera.c +++ b/drivers/media/platform/soc_camera/sh_mobile_ceu_camera.c @@ -1800,7 +1800,7 @@ static int sh_mobile_ceu_probe(struct platform_device *pdev) /* request irq */ err = devm_request_irq(&pdev->dev, pcdev->irq, sh_mobile_ceu_irq, - IRQF_DISABLED, dev_name(&pdev->dev), pcdev); + 0, dev_name(&pdev->dev), pcdev); if (err) { dev_err(&pdev->dev, "Unable to register CEU interrupt.\n"); goto exit_release_mem; diff --git a/drivers/media/platform/soc_camera/soc_camera.c b/drivers/media/platform/soc_camera/soc_camera.c index 387a232d95a..4b8c024fc48 100644 --- a/drivers/media/platform/soc_camera/soc_camera.c +++ b/drivers/media/platform/soc_camera/soc_camera.c @@ -71,13 +71,23 @@ static int video_dev_create(struct soc_camera_device *icd); int soc_camera_power_on(struct device *dev, struct soc_camera_subdev_desc *ssdd, struct v4l2_clk *clk) { - int ret = clk ? v4l2_clk_enable(clk) : 0; - if (ret < 0) { - dev_err(dev, "Cannot enable clock: %d\n", ret); - return ret; + int ret; + bool clock_toggle; + + if (clk && (!ssdd->unbalanced_power || + !test_and_set_bit(0, &ssdd->clock_state))) { + ret = v4l2_clk_enable(clk); + if (ret < 0) { + dev_err(dev, "Cannot enable clock: %d\n", ret); + return ret; + } + clock_toggle = true; + } else { + clock_toggle = false; } - ret = regulator_bulk_enable(ssdd->num_regulators, - ssdd->regulators); + + ret = regulator_bulk_enable(ssdd->sd_pdata.num_regulators, + ssdd->sd_pdata.regulators); if (ret < 0) { dev_err(dev, "Cannot enable regulators\n"); goto eregenable; @@ -95,10 +105,10 @@ int soc_camera_power_on(struct device *dev, struct soc_camera_subdev_desc *ssdd, return 0; epwron: - regulator_bulk_disable(ssdd->num_regulators, - ssdd->regulators); + regulator_bulk_disable(ssdd->sd_pdata.num_regulators, + ssdd->sd_pdata.regulators); eregenable: - if (clk) + if (clock_toggle) v4l2_clk_disable(clk); return ret; @@ -120,14 +130,14 @@ int soc_camera_power_off(struct device *dev, struct soc_camera_subdev_desc *ssdd } } - err = regulator_bulk_disable(ssdd->num_regulators, - ssdd->regulators); + err = regulator_bulk_disable(ssdd->sd_pdata.num_regulators, + ssdd->sd_pdata.regulators); if (err < 0) { dev_err(dev, "Cannot disable regulators\n"); ret = ret ? : err; } - if (clk) + if (clk && (!ssdd->unbalanced_power || test_and_clear_bit(0, &ssdd->clock_state))) v4l2_clk_disable(clk); return ret; @@ -137,8 +147,8 @@ EXPORT_SYMBOL(soc_camera_power_off); int soc_camera_power_init(struct device *dev, struct soc_camera_subdev_desc *ssdd) { /* Should not have any effect in synchronous case */ - return devm_regulator_bulk_get(dev, ssdd->num_regulators, - ssdd->regulators); + return devm_regulator_bulk_get(dev, ssdd->sd_pdata.num_regulators, + ssdd->sd_pdata.regulators); } EXPORT_SYMBOL(soc_camera_power_init); @@ -1346,8 +1356,8 @@ static int soc_camera_i2c_init(struct soc_camera_device *icd, * soc_camera_pdrv_probe(), make sure the subdevice driver doesn't try * to allocate them again. */ - ssdd->num_regulators = 0; - ssdd->regulators = NULL; + ssdd->sd_pdata.num_regulators = 0; + ssdd->sd_pdata.regulators = NULL; shd->board_info->platform_data = ssdd; snprintf(clk_name, sizeof(clk_name), "%d-%04x", @@ -2020,8 +2030,8 @@ static int soc_camera_pdrv_probe(struct platform_device *pdev) * that case regulators are attached to the I2C device and not to the * camera platform device. */ - ret = devm_regulator_bulk_get(&pdev->dev, ssdd->num_regulators, - ssdd->regulators); + ret = devm_regulator_bulk_get(&pdev->dev, ssdd->sd_pdata.num_regulators, + ssdd->sd_pdata.regulators); if (ret < 0) return ret; diff --git a/drivers/media/platform/ti-vpe/Makefile b/drivers/media/platform/ti-vpe/Makefile new file mode 100644 index 00000000000..cbf0a806ba1 --- /dev/null +++ b/drivers/media/platform/ti-vpe/Makefile @@ -0,0 +1,5 @@ +obj-$(CONFIG_VIDEO_TI_VPE) += ti-vpe.o + +ti-vpe-y := vpe.o vpdma.o + +ccflags-$(CONFIG_VIDEO_TI_VPE_DEBUG) += -DDEBUG diff --git a/drivers/media/platform/ti-vpe/vpdma.c b/drivers/media/platform/ti-vpe/vpdma.c new file mode 100644 index 00000000000..af0a5ffcaa9 --- /dev/null +++ b/drivers/media/platform/ti-vpe/vpdma.c @@ -0,0 +1,846 @@ +/* + * VPDMA helper library + * + * Copyright (c) 2013 Texas Instruments Inc. + * + * David Griego, <dagriego@biglakesoftware.com> + * Dale Farnsworth, <dale@farnsworth.org> + * Archit Taneja, <archit@ti.com> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published by + * the Free Software Foundation. + */ + +#include <linux/delay.h> +#include <linux/dma-mapping.h> +#include <linux/err.h> +#include <linux/firmware.h> +#include <linux/io.h> +#include <linux/module.h> +#include <linux/platform_device.h> +#include <linux/sched.h> +#include <linux/slab.h> +#include <linux/videodev2.h> + +#include "vpdma.h" +#include "vpdma_priv.h" + +#define VPDMA_FIRMWARE "vpdma-1b8.bin" + +const struct vpdma_data_format vpdma_yuv_fmts[] = { + [VPDMA_DATA_FMT_Y444] = { + .data_type = DATA_TYPE_Y444, + .depth = 8, + }, + [VPDMA_DATA_FMT_Y422] = { + .data_type = DATA_TYPE_Y422, + .depth = 8, + }, + [VPDMA_DATA_FMT_Y420] = { + .data_type = DATA_TYPE_Y420, + .depth = 8, + }, + [VPDMA_DATA_FMT_C444] = { + .data_type = DATA_TYPE_C444, + .depth = 8, + }, + [VPDMA_DATA_FMT_C422] = { + .data_type = DATA_TYPE_C422, + .depth = 8, + }, + [VPDMA_DATA_FMT_C420] = { + .data_type = DATA_TYPE_C420, + .depth = 4, + }, + [VPDMA_DATA_FMT_YC422] = { + .data_type = DATA_TYPE_YC422, + .depth = 16, + }, + [VPDMA_DATA_FMT_YC444] = { + .data_type = DATA_TYPE_YC444, + .depth = 24, + }, + [VPDMA_DATA_FMT_CY422] = { + .data_type = DATA_TYPE_CY422, + .depth = 16, + }, +}; + +const struct vpdma_data_format vpdma_rgb_fmts[] = { + [VPDMA_DATA_FMT_RGB565] = { + .data_type = DATA_TYPE_RGB16_565, + .depth = 16, + }, + [VPDMA_DATA_FMT_ARGB16_1555] = { + .data_type = DATA_TYPE_ARGB_1555, + .depth = 16, + }, + [VPDMA_DATA_FMT_ARGB16] = { + .data_type = DATA_TYPE_ARGB_4444, + .depth = 16, + }, + [VPDMA_DATA_FMT_RGBA16_5551] = { + .data_type = DATA_TYPE_RGBA_5551, + .depth = 16, + }, + [VPDMA_DATA_FMT_RGBA16] = { + .data_type = DATA_TYPE_RGBA_4444, + .depth = 16, + }, + [VPDMA_DATA_FMT_ARGB24] = { + .data_type = DATA_TYPE_ARGB24_6666, + .depth = 24, + }, + [VPDMA_DATA_FMT_RGB24] = { + .data_type = DATA_TYPE_RGB24_888, + .depth = 24, + }, + [VPDMA_DATA_FMT_ARGB32] = { + .data_type = DATA_TYPE_ARGB32_8888, + .depth = 32, + }, + [VPDMA_DATA_FMT_RGBA24] = { + .data_type = DATA_TYPE_RGBA24_6666, + .depth = 24, + }, + [VPDMA_DATA_FMT_RGBA32] = { + .data_type = DATA_TYPE_RGBA32_8888, + .depth = 32, + }, + [VPDMA_DATA_FMT_BGR565] = { + .data_type = DATA_TYPE_BGR16_565, + .depth = 16, + }, + [VPDMA_DATA_FMT_ABGR16_1555] = { + .data_type = DATA_TYPE_ABGR_1555, + .depth = 16, + }, + [VPDMA_DATA_FMT_ABGR16] = { + .data_type = DATA_TYPE_ABGR_4444, + .depth = 16, + }, + [VPDMA_DATA_FMT_BGRA16_5551] = { + .data_type = DATA_TYPE_BGRA_5551, + .depth = 16, + }, + [VPDMA_DATA_FMT_BGRA16] = { + .data_type = DATA_TYPE_BGRA_4444, + .depth = 16, + }, + [VPDMA_DATA_FMT_ABGR24] = { + .data_type = DATA_TYPE_ABGR24_6666, + .depth = 24, + }, + [VPDMA_DATA_FMT_BGR24] = { + .data_type = DATA_TYPE_BGR24_888, + .depth = 24, + }, + [VPDMA_DATA_FMT_ABGR32] = { + .data_type = DATA_TYPE_ABGR32_8888, + .depth = 32, + }, + [VPDMA_DATA_FMT_BGRA24] = { + .data_type = DATA_TYPE_BGRA24_6666, + .depth = 24, + }, + [VPDMA_DATA_FMT_BGRA32] = { + .data_type = DATA_TYPE_BGRA32_8888, + .depth = 32, + }, +}; + +const struct vpdma_data_format vpdma_misc_fmts[] = { + [VPDMA_DATA_FMT_MV] = { + .data_type = DATA_TYPE_MV, + .depth = 4, + }, +}; + +struct vpdma_channel_info { + int num; /* VPDMA channel number */ + int cstat_offset; /* client CSTAT register offset */ +}; + +static const struct vpdma_channel_info chan_info[] = { + [VPE_CHAN_LUMA1_IN] = { + .num = VPE_CHAN_NUM_LUMA1_IN, + .cstat_offset = VPDMA_DEI_LUMA1_CSTAT, + }, + [VPE_CHAN_CHROMA1_IN] = { + .num = VPE_CHAN_NUM_CHROMA1_IN, + .cstat_offset = VPDMA_DEI_CHROMA1_CSTAT, + }, + [VPE_CHAN_LUMA2_IN] = { + .num = VPE_CHAN_NUM_LUMA2_IN, + .cstat_offset = VPDMA_DEI_LUMA2_CSTAT, + }, + [VPE_CHAN_CHROMA2_IN] = { + .num = VPE_CHAN_NUM_CHROMA2_IN, + .cstat_offset = VPDMA_DEI_CHROMA2_CSTAT, + }, + [VPE_CHAN_LUMA3_IN] = { + .num = VPE_CHAN_NUM_LUMA3_IN, + .cstat_offset = VPDMA_DEI_LUMA3_CSTAT, + }, + [VPE_CHAN_CHROMA3_IN] = { + .num = VPE_CHAN_NUM_CHROMA3_IN, + .cstat_offset = VPDMA_DEI_CHROMA3_CSTAT, + }, + [VPE_CHAN_MV_IN] = { + .num = VPE_CHAN_NUM_MV_IN, + .cstat_offset = VPDMA_DEI_MV_IN_CSTAT, + }, + [VPE_CHAN_MV_OUT] = { + .num = VPE_CHAN_NUM_MV_OUT, + .cstat_offset = VPDMA_DEI_MV_OUT_CSTAT, + }, + [VPE_CHAN_LUMA_OUT] = { + .num = VPE_CHAN_NUM_LUMA_OUT, + .cstat_offset = VPDMA_VIP_UP_Y_CSTAT, + }, + [VPE_CHAN_CHROMA_OUT] = { + .num = VPE_CHAN_NUM_CHROMA_OUT, + .cstat_offset = VPDMA_VIP_UP_UV_CSTAT, + }, + [VPE_CHAN_RGB_OUT] = { + .num = VPE_CHAN_NUM_RGB_OUT, + .cstat_offset = VPDMA_VIP_UP_Y_CSTAT, + }, +}; + +static u32 read_reg(struct vpdma_data *vpdma, int offset) +{ + return ioread32(vpdma->base + offset); +} + +static void write_reg(struct vpdma_data *vpdma, int offset, u32 value) +{ + iowrite32(value, vpdma->base + offset); +} + +static int read_field_reg(struct vpdma_data *vpdma, int offset, + u32 mask, int shift) +{ + return (read_reg(vpdma, offset) & (mask << shift)) >> shift; +} + +static void write_field_reg(struct vpdma_data *vpdma, int offset, u32 field, + u32 mask, int shift) +{ + u32 val = read_reg(vpdma, offset); + + val &= ~(mask << shift); + val |= (field & mask) << shift; + + write_reg(vpdma, offset, val); +} + +void vpdma_dump_regs(struct vpdma_data *vpdma) +{ + struct device *dev = &vpdma->pdev->dev; + +#define DUMPREG(r) dev_dbg(dev, "%-35s %08x\n", #r, read_reg(vpdma, VPDMA_##r)) + + dev_dbg(dev, "VPDMA Registers:\n"); + + DUMPREG(PID); + DUMPREG(LIST_ADDR); + DUMPREG(LIST_ATTR); + DUMPREG(LIST_STAT_SYNC); + DUMPREG(BG_RGB); + DUMPREG(BG_YUV); + DUMPREG(SETUP); + DUMPREG(MAX_SIZE1); + DUMPREG(MAX_SIZE2); + DUMPREG(MAX_SIZE3); + + /* + * dumping registers of only group0 and group3, because VPE channels + * lie within group0 and group3 registers + */ + DUMPREG(INT_CHAN_STAT(0)); + DUMPREG(INT_CHAN_MASK(0)); + DUMPREG(INT_CHAN_STAT(3)); + DUMPREG(INT_CHAN_MASK(3)); + DUMPREG(INT_CLIENT0_STAT); + DUMPREG(INT_CLIENT0_MASK); + DUMPREG(INT_CLIENT1_STAT); + DUMPREG(INT_CLIENT1_MASK); + DUMPREG(INT_LIST0_STAT); + DUMPREG(INT_LIST0_MASK); + + /* + * these are registers specific to VPE clients, we can make this + * function dump client registers specific to VPE or VIP based on + * who is using it + */ + DUMPREG(DEI_CHROMA1_CSTAT); + DUMPREG(DEI_LUMA1_CSTAT); + DUMPREG(DEI_CHROMA2_CSTAT); + DUMPREG(DEI_LUMA2_CSTAT); + DUMPREG(DEI_CHROMA3_CSTAT); + DUMPREG(DEI_LUMA3_CSTAT); + DUMPREG(DEI_MV_IN_CSTAT); + DUMPREG(DEI_MV_OUT_CSTAT); + DUMPREG(VIP_UP_Y_CSTAT); + DUMPREG(VIP_UP_UV_CSTAT); + DUMPREG(VPI_CTL_CSTAT); +} + +/* + * Allocate a DMA buffer + */ +int vpdma_alloc_desc_buf(struct vpdma_buf *buf, size_t size) +{ + buf->size = size; + buf->mapped = false; + buf->addr = kzalloc(size, GFP_KERNEL); + if (!buf->addr) + return -ENOMEM; + + WARN_ON((u32) buf->addr & VPDMA_DESC_ALIGN); + + return 0; +} + +void vpdma_free_desc_buf(struct vpdma_buf *buf) +{ + WARN_ON(buf->mapped); + kfree(buf->addr); + buf->addr = NULL; + buf->size = 0; +} + +/* + * map descriptor/payload DMA buffer, enabling DMA access + */ +int vpdma_map_desc_buf(struct vpdma_data *vpdma, struct vpdma_buf *buf) +{ + struct device *dev = &vpdma->pdev->dev; + + WARN_ON(buf->mapped); + buf->dma_addr = dma_map_single(dev, buf->addr, buf->size, + DMA_TO_DEVICE); + if (dma_mapping_error(dev, buf->dma_addr)) { + dev_err(dev, "failed to map buffer\n"); + return -EINVAL; + } + + buf->mapped = true; + + return 0; +} + +/* + * unmap descriptor/payload DMA buffer, disabling DMA access and + * allowing the main processor to acces the data + */ +void vpdma_unmap_desc_buf(struct vpdma_data *vpdma, struct vpdma_buf *buf) +{ + struct device *dev = &vpdma->pdev->dev; + + if (buf->mapped) + dma_unmap_single(dev, buf->dma_addr, buf->size, DMA_TO_DEVICE); + + buf->mapped = false; +} + +/* + * create a descriptor list, the user of this list will append configuration, + * control and data descriptors to this list, this list will be submitted to + * VPDMA. VPDMA's list parser will go through each descriptor and perform the + * required DMA operations + */ +int vpdma_create_desc_list(struct vpdma_desc_list *list, size_t size, int type) +{ + int r; + + r = vpdma_alloc_desc_buf(&list->buf, size); + if (r) + return r; + + list->next = list->buf.addr; + + list->type = type; + + return 0; +} + +/* + * once a descriptor list is parsed by VPDMA, we reset the list by emptying it, + * to allow new descriptors to be added to the list. + */ +void vpdma_reset_desc_list(struct vpdma_desc_list *list) +{ + list->next = list->buf.addr; +} + +/* + * free the buffer allocated fot the VPDMA descriptor list, this should be + * called when the user doesn't want to use VPDMA any more. + */ +void vpdma_free_desc_list(struct vpdma_desc_list *list) +{ + vpdma_free_desc_buf(&list->buf); + + list->next = NULL; +} + +static bool vpdma_list_busy(struct vpdma_data *vpdma, int list_num) +{ + return read_reg(vpdma, VPDMA_LIST_STAT_SYNC) & BIT(list_num + 16); +} + +/* + * submit a list of DMA descriptors to the VPE VPDMA, do not wait for completion + */ +int vpdma_submit_descs(struct vpdma_data *vpdma, struct vpdma_desc_list *list) +{ + /* we always use the first list */ + int list_num = 0; + int list_size; + + if (vpdma_list_busy(vpdma, list_num)) + return -EBUSY; + + /* 16-byte granularity */ + list_size = (list->next - list->buf.addr) >> 4; + + write_reg(vpdma, VPDMA_LIST_ADDR, (u32) list->buf.dma_addr); + + write_reg(vpdma, VPDMA_LIST_ATTR, + (list_num << VPDMA_LIST_NUM_SHFT) | + (list->type << VPDMA_LIST_TYPE_SHFT) | + list_size); + + return 0; +} + +static void dump_cfd(struct vpdma_cfd *cfd) +{ + int class; + + class = cfd_get_class(cfd); + + pr_debug("config descriptor of payload class: %s\n", + class == CFD_CLS_BLOCK ? "simple block" : + "address data block"); + + if (class == CFD_CLS_BLOCK) + pr_debug("word0: dst_addr_offset = 0x%08x\n", + cfd->dest_addr_offset); + + if (class == CFD_CLS_BLOCK) + pr_debug("word1: num_data_wrds = %d\n", cfd->block_len); + + pr_debug("word2: payload_addr = 0x%08x\n", cfd->payload_addr); + + pr_debug("word3: pkt_type = %d, direct = %d, class = %d, dest = %d, " + "payload_len = %d\n", cfd_get_pkt_type(cfd), + cfd_get_direct(cfd), class, cfd_get_dest(cfd), + cfd_get_payload_len(cfd)); +} + +/* + * append a configuration descriptor to the given descriptor list, where the + * payload is in the form of a simple data block specified in the descriptor + * header, this is used to upload scaler coefficients to the scaler module + */ +void vpdma_add_cfd_block(struct vpdma_desc_list *list, int client, + struct vpdma_buf *blk, u32 dest_offset) +{ + struct vpdma_cfd *cfd; + int len = blk->size; + + WARN_ON(blk->dma_addr & VPDMA_DESC_ALIGN); + + cfd = list->next; + WARN_ON((void *)(cfd + 1) > (list->buf.addr + list->buf.size)); + + cfd->dest_addr_offset = dest_offset; + cfd->block_len = len; + cfd->payload_addr = (u32) blk->dma_addr; + cfd->ctl_payload_len = cfd_pkt_payload_len(CFD_INDIRECT, CFD_CLS_BLOCK, + client, len >> 4); + + list->next = cfd + 1; + + dump_cfd(cfd); +} + +/* + * append a configuration descriptor to the given descriptor list, where the + * payload is in the address data block format, this is used to a configure a + * discontiguous set of MMRs + */ +void vpdma_add_cfd_adb(struct vpdma_desc_list *list, int client, + struct vpdma_buf *adb) +{ + struct vpdma_cfd *cfd; + unsigned int len = adb->size; + + WARN_ON(len & VPDMA_ADB_SIZE_ALIGN); + WARN_ON(adb->dma_addr & VPDMA_DESC_ALIGN); + + cfd = list->next; + BUG_ON((void *)(cfd + 1) > (list->buf.addr + list->buf.size)); + + cfd->w0 = 0; + cfd->w1 = 0; + cfd->payload_addr = (u32) adb->dma_addr; + cfd->ctl_payload_len = cfd_pkt_payload_len(CFD_INDIRECT, CFD_CLS_ADB, + client, len >> 4); + + list->next = cfd + 1; + + dump_cfd(cfd); +}; + +/* + * control descriptor format change based on what type of control descriptor it + * is, we only use 'sync on channel' control descriptors for now, so assume it's + * that + */ +static void dump_ctd(struct vpdma_ctd *ctd) +{ + pr_debug("control descriptor\n"); + + pr_debug("word3: pkt_type = %d, source = %d, ctl_type = %d\n", + ctd_get_pkt_type(ctd), ctd_get_source(ctd), ctd_get_ctl(ctd)); +} + +/* + * append a 'sync on channel' type control descriptor to the given descriptor + * list, this descriptor stalls the VPDMA list till the time DMA is completed + * on the specified channel + */ +void vpdma_add_sync_on_channel_ctd(struct vpdma_desc_list *list, + enum vpdma_channel chan) +{ + struct vpdma_ctd *ctd; + + ctd = list->next; + WARN_ON((void *)(ctd + 1) > (list->buf.addr + list->buf.size)); + + ctd->w0 = 0; + ctd->w1 = 0; + ctd->w2 = 0; + ctd->type_source_ctl = ctd_type_source_ctl(chan_info[chan].num, + CTD_TYPE_SYNC_ON_CHANNEL); + + list->next = ctd + 1; + + dump_ctd(ctd); +} + +static void dump_dtd(struct vpdma_dtd *dtd) +{ + int dir, chan; + + dir = dtd_get_dir(dtd); + chan = dtd_get_chan(dtd); + + pr_debug("%s data transfer descriptor for channel %d\n", + dir == DTD_DIR_OUT ? "outbound" : "inbound", chan); + + pr_debug("word0: data_type = %d, notify = %d, field = %d, 1D = %d, " + "even_ln_skp = %d, odd_ln_skp = %d, line_stride = %d\n", + dtd_get_data_type(dtd), dtd_get_notify(dtd), dtd_get_field(dtd), + dtd_get_1d(dtd), dtd_get_even_line_skip(dtd), + dtd_get_odd_line_skip(dtd), dtd_get_line_stride(dtd)); + + if (dir == DTD_DIR_IN) + pr_debug("word1: line_length = %d, xfer_height = %d\n", + dtd_get_line_length(dtd), dtd_get_xfer_height(dtd)); + + pr_debug("word2: start_addr = 0x%08x\n", dtd->start_addr); + + pr_debug("word3: pkt_type = %d, mode = %d, dir = %d, chan = %d, " + "pri = %d, next_chan = %d\n", dtd_get_pkt_type(dtd), + dtd_get_mode(dtd), dir, chan, dtd_get_priority(dtd), + dtd_get_next_chan(dtd)); + + if (dir == DTD_DIR_IN) + pr_debug("word4: frame_width = %d, frame_height = %d\n", + dtd_get_frame_width(dtd), dtd_get_frame_height(dtd)); + else + pr_debug("word4: desc_write_addr = 0x%08x, write_desc = %d, " + "drp_data = %d, use_desc_reg = %d\n", + dtd_get_desc_write_addr(dtd), dtd_get_write_desc(dtd), + dtd_get_drop_data(dtd), dtd_get_use_desc(dtd)); + + if (dir == DTD_DIR_IN) + pr_debug("word5: hor_start = %d, ver_start = %d\n", + dtd_get_h_start(dtd), dtd_get_v_start(dtd)); + else + pr_debug("word5: max_width %d, max_height %d\n", + dtd_get_max_width(dtd), dtd_get_max_height(dtd)); + + pr_debug("word6: client specfic attr0 = 0x%08x\n", dtd->client_attr0); + pr_debug("word7: client specfic attr1 = 0x%08x\n", dtd->client_attr1); +} + +/* + * append an outbound data transfer descriptor to the given descriptor list, + * this sets up a 'client to memory' VPDMA transfer for the given VPDMA channel + */ +void vpdma_add_out_dtd(struct vpdma_desc_list *list, struct v4l2_rect *c_rect, + const struct vpdma_data_format *fmt, dma_addr_t dma_addr, + enum vpdma_channel chan, u32 flags) +{ + int priority = 0; + int field = 0; + int notify = 1; + int channel, next_chan; + int depth = fmt->depth; + int stride; + struct vpdma_dtd *dtd; + + channel = next_chan = chan_info[chan].num; + + if (fmt->data_type == DATA_TYPE_C420) + depth = 8; + + stride = (depth * c_rect->width) >> 3; + dma_addr += (c_rect->left * depth) >> 3; + + dtd = list->next; + WARN_ON((void *)(dtd + 1) > (list->buf.addr + list->buf.size)); + + dtd->type_ctl_stride = dtd_type_ctl_stride(fmt->data_type, + notify, + field, + !!(flags & VPDMA_DATA_FRAME_1D), + !!(flags & VPDMA_DATA_EVEN_LINE_SKIP), + !!(flags & VPDMA_DATA_ODD_LINE_SKIP), + stride); + dtd->w1 = 0; + dtd->start_addr = (u32) dma_addr; + dtd->pkt_ctl = dtd_pkt_ctl(!!(flags & VPDMA_DATA_MODE_TILED), + DTD_DIR_OUT, channel, priority, next_chan); + dtd->desc_write_addr = dtd_desc_write_addr(0, 0, 0, 0); + dtd->max_width_height = dtd_max_width_height(MAX_OUT_WIDTH_1920, + MAX_OUT_HEIGHT_1080); + dtd->client_attr0 = 0; + dtd->client_attr1 = 0; + + list->next = dtd + 1; + + dump_dtd(dtd); +} + +/* + * append an inbound data transfer descriptor to the given descriptor list, + * this sets up a 'memory to client' VPDMA transfer for the given VPDMA channel + */ +void vpdma_add_in_dtd(struct vpdma_desc_list *list, int frame_width, + int frame_height, struct v4l2_rect *c_rect, + const struct vpdma_data_format *fmt, dma_addr_t dma_addr, + enum vpdma_channel chan, int field, u32 flags) +{ + int priority = 0; + int notify = 1; + int depth = fmt->depth; + int channel, next_chan; + int stride; + int height = c_rect->height; + struct vpdma_dtd *dtd; + + channel = next_chan = chan_info[chan].num; + + if (fmt->data_type == DATA_TYPE_C420) { + height >>= 1; + frame_height >>= 1; + depth = 8; + } + + stride = (depth * c_rect->width) >> 3; + dma_addr += (c_rect->left * depth) >> 3; + + dtd = list->next; + WARN_ON((void *)(dtd + 1) > (list->buf.addr + list->buf.size)); + + dtd->type_ctl_stride = dtd_type_ctl_stride(fmt->data_type, + notify, + field, + !!(flags & VPDMA_DATA_FRAME_1D), + !!(flags & VPDMA_DATA_EVEN_LINE_SKIP), + !!(flags & VPDMA_DATA_ODD_LINE_SKIP), + stride); + + dtd->xfer_length_height = dtd_xfer_length_height(c_rect->width, height); + dtd->start_addr = (u32) dma_addr; + dtd->pkt_ctl = dtd_pkt_ctl(!!(flags & VPDMA_DATA_MODE_TILED), + DTD_DIR_IN, channel, priority, next_chan); + dtd->frame_width_height = dtd_frame_width_height(frame_width, + frame_height); + dtd->start_h_v = dtd_start_h_v(c_rect->left, c_rect->top); + dtd->client_attr0 = 0; + dtd->client_attr1 = 0; + + list->next = dtd + 1; + + dump_dtd(dtd); +} + +/* set or clear the mask for list complete interrupt */ +void vpdma_enable_list_complete_irq(struct vpdma_data *vpdma, int list_num, + bool enable) +{ + u32 val; + + val = read_reg(vpdma, VPDMA_INT_LIST0_MASK); + if (enable) + val |= (1 << (list_num * 2)); + else + val &= ~(1 << (list_num * 2)); + write_reg(vpdma, VPDMA_INT_LIST0_MASK, val); +} + +/* clear previosuly occured list intterupts in the LIST_STAT register */ +void vpdma_clear_list_stat(struct vpdma_data *vpdma) +{ + write_reg(vpdma, VPDMA_INT_LIST0_STAT, + read_reg(vpdma, VPDMA_INT_LIST0_STAT)); +} + +/* + * configures the output mode of the line buffer for the given client, the + * line buffer content can either be mirrored(each line repeated twice) or + * passed to the client as is + */ +void vpdma_set_line_mode(struct vpdma_data *vpdma, int line_mode, + enum vpdma_channel chan) +{ + int client_cstat = chan_info[chan].cstat_offset; + + write_field_reg(vpdma, client_cstat, line_mode, + VPDMA_CSTAT_LINE_MODE_MASK, VPDMA_CSTAT_LINE_MODE_SHIFT); +} + +/* + * configures the event which should trigger VPDMA transfer for the given + * client + */ +void vpdma_set_frame_start_event(struct vpdma_data *vpdma, + enum vpdma_frame_start_event fs_event, + enum vpdma_channel chan) +{ + int client_cstat = chan_info[chan].cstat_offset; + + write_field_reg(vpdma, client_cstat, fs_event, + VPDMA_CSTAT_FRAME_START_MASK, VPDMA_CSTAT_FRAME_START_SHIFT); +} + +static void vpdma_firmware_cb(const struct firmware *f, void *context) +{ + struct vpdma_data *vpdma = context; + struct vpdma_buf fw_dma_buf; + int i, r; + + dev_dbg(&vpdma->pdev->dev, "firmware callback\n"); + + if (!f || !f->data) { + dev_err(&vpdma->pdev->dev, "couldn't get firmware\n"); + return; + } + + /* already initialized */ + if (read_field_reg(vpdma, VPDMA_LIST_ATTR, VPDMA_LIST_RDY_MASK, + VPDMA_LIST_RDY_SHFT)) { + vpdma->ready = true; + return; + } + + r = vpdma_alloc_desc_buf(&fw_dma_buf, f->size); + if (r) { + dev_err(&vpdma->pdev->dev, + "failed to allocate dma buffer for firmware\n"); + goto rel_fw; + } + + memcpy(fw_dma_buf.addr, f->data, f->size); + + vpdma_map_desc_buf(vpdma, &fw_dma_buf); + + write_reg(vpdma, VPDMA_LIST_ADDR, (u32) fw_dma_buf.dma_addr); + + for (i = 0; i < 100; i++) { /* max 1 second */ + msleep_interruptible(10); + + if (read_field_reg(vpdma, VPDMA_LIST_ATTR, VPDMA_LIST_RDY_MASK, + VPDMA_LIST_RDY_SHFT)) + break; + } + + if (i == 100) { + dev_err(&vpdma->pdev->dev, "firmware upload failed\n"); + goto free_buf; + } + + vpdma->ready = true; + +free_buf: + vpdma_unmap_desc_buf(vpdma, &fw_dma_buf); + + vpdma_free_desc_buf(&fw_dma_buf); +rel_fw: + release_firmware(f); +} + +static int vpdma_load_firmware(struct vpdma_data *vpdma) +{ + int r; + struct device *dev = &vpdma->pdev->dev; + + r = request_firmware_nowait(THIS_MODULE, 1, + (const char *) VPDMA_FIRMWARE, dev, GFP_KERNEL, vpdma, + vpdma_firmware_cb); + if (r) { + dev_err(dev, "firmware not available %s\n", VPDMA_FIRMWARE); + return r; + } else { + dev_info(dev, "loading firmware %s\n", VPDMA_FIRMWARE); + } + + return 0; +} + +struct vpdma_data *vpdma_create(struct platform_device *pdev) +{ + struct resource *res; + struct vpdma_data *vpdma; + int r; + + dev_dbg(&pdev->dev, "vpdma_create\n"); + + vpdma = devm_kzalloc(&pdev->dev, sizeof(*vpdma), GFP_KERNEL); + if (!vpdma) { + dev_err(&pdev->dev, "couldn't alloc vpdma_dev\n"); + return ERR_PTR(-ENOMEM); + } + + vpdma->pdev = pdev; + + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "vpdma"); + if (res == NULL) { + dev_err(&pdev->dev, "missing platform resources data\n"); + return ERR_PTR(-ENODEV); + } + + vpdma->base = devm_ioremap(&pdev->dev, res->start, resource_size(res)); + if (!vpdma->base) { + dev_err(&pdev->dev, "failed to ioremap\n"); + return ERR_PTR(-ENOMEM); + } + + r = vpdma_load_firmware(vpdma); + if (r) { + pr_err("failed to load firmware %s\n", VPDMA_FIRMWARE); + return ERR_PTR(r); + } + + return vpdma; +} +MODULE_FIRMWARE(VPDMA_FIRMWARE); diff --git a/drivers/media/platform/ti-vpe/vpdma.h b/drivers/media/platform/ti-vpe/vpdma.h new file mode 100644 index 00000000000..eaa2a71a5db --- /dev/null +++ b/drivers/media/platform/ti-vpe/vpdma.h @@ -0,0 +1,203 @@ +/* + * Copyright (c) 2013 Texas Instruments Inc. + * + * David Griego, <dagriego@biglakesoftware.com> + * Dale Farnsworth, <dale@farnsworth.org> + * Archit Taneja, <archit@ti.com> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published by + * the Free Software Foundation. + */ + +#ifndef __TI_VPDMA_H_ +#define __TI_VPDMA_H_ + +/* + * A vpdma_buf tracks the size, DMA address and mapping status of each + * driver DMA area. + */ +struct vpdma_buf { + void *addr; + dma_addr_t dma_addr; + size_t size; + bool mapped; +}; + +struct vpdma_desc_list { + struct vpdma_buf buf; + void *next; + int type; +}; + +struct vpdma_data { + void __iomem *base; + + struct platform_device *pdev; + + /* tells whether vpdma firmware is loaded or not */ + bool ready; +}; + +struct vpdma_data_format { + int data_type; + u8 depth; +}; + +#define VPDMA_DESC_ALIGN 16 /* 16-byte descriptor alignment */ + +#define VPDMA_DTD_DESC_SIZE 32 /* 8 words */ +#define VPDMA_CFD_CTD_DESC_SIZE 16 /* 4 words */ + +#define VPDMA_LIST_TYPE_NORMAL 0 +#define VPDMA_LIST_TYPE_SELF_MODIFYING 1 +#define VPDMA_LIST_TYPE_DOORBELL 2 + +enum vpdma_yuv_formats { + VPDMA_DATA_FMT_Y444 = 0, + VPDMA_DATA_FMT_Y422, + VPDMA_DATA_FMT_Y420, + VPDMA_DATA_FMT_C444, + VPDMA_DATA_FMT_C422, + VPDMA_DATA_FMT_C420, + VPDMA_DATA_FMT_YC422, + VPDMA_DATA_FMT_YC444, + VPDMA_DATA_FMT_CY422, +}; + +enum vpdma_rgb_formats { + VPDMA_DATA_FMT_RGB565 = 0, + VPDMA_DATA_FMT_ARGB16_1555, + VPDMA_DATA_FMT_ARGB16, + VPDMA_DATA_FMT_RGBA16_5551, + VPDMA_DATA_FMT_RGBA16, + VPDMA_DATA_FMT_ARGB24, + VPDMA_DATA_FMT_RGB24, + VPDMA_DATA_FMT_ARGB32, + VPDMA_DATA_FMT_RGBA24, + VPDMA_DATA_FMT_RGBA32, + VPDMA_DATA_FMT_BGR565, + VPDMA_DATA_FMT_ABGR16_1555, + VPDMA_DATA_FMT_ABGR16, + VPDMA_DATA_FMT_BGRA16_5551, + VPDMA_DATA_FMT_BGRA16, + VPDMA_DATA_FMT_ABGR24, + VPDMA_DATA_FMT_BGR24, + VPDMA_DATA_FMT_ABGR32, + VPDMA_DATA_FMT_BGRA24, + VPDMA_DATA_FMT_BGRA32, +}; + +enum vpdma_misc_formats { + VPDMA_DATA_FMT_MV = 0, +}; + +extern const struct vpdma_data_format vpdma_yuv_fmts[]; +extern const struct vpdma_data_format vpdma_rgb_fmts[]; +extern const struct vpdma_data_format vpdma_misc_fmts[]; + +enum vpdma_frame_start_event { + VPDMA_FSEVENT_HDMI_FID = 0, + VPDMA_FSEVENT_DVO2_FID, + VPDMA_FSEVENT_HDCOMP_FID, + VPDMA_FSEVENT_SD_FID, + VPDMA_FSEVENT_LM_FID0, + VPDMA_FSEVENT_LM_FID1, + VPDMA_FSEVENT_LM_FID2, + VPDMA_FSEVENT_CHANNEL_ACTIVE, +}; + +/* + * VPDMA channel numbers + */ +enum vpdma_channel { + VPE_CHAN_LUMA1_IN, + VPE_CHAN_CHROMA1_IN, + VPE_CHAN_LUMA2_IN, + VPE_CHAN_CHROMA2_IN, + VPE_CHAN_LUMA3_IN, + VPE_CHAN_CHROMA3_IN, + VPE_CHAN_MV_IN, + VPE_CHAN_MV_OUT, + VPE_CHAN_LUMA_OUT, + VPE_CHAN_CHROMA_OUT, + VPE_CHAN_RGB_OUT, +}; + +/* flags for VPDMA data descriptors */ +#define VPDMA_DATA_ODD_LINE_SKIP (1 << 0) +#define VPDMA_DATA_EVEN_LINE_SKIP (1 << 1) +#define VPDMA_DATA_FRAME_1D (1 << 2) +#define VPDMA_DATA_MODE_TILED (1 << 3) + +/* + * client identifiers used for configuration descriptors + */ +#define CFD_MMR_CLIENT 0 +#define CFD_SC_CLIENT 4 + +/* Address data block header format */ +struct vpdma_adb_hdr { + u32 offset; + u32 nwords; + u32 reserved0; + u32 reserved1; +}; + +/* helpers for creating ADB headers for config descriptors MMRs as client */ +#define ADB_ADDR(dma_buf, str, fld) ((dma_buf)->addr + offsetof(str, fld)) +#define MMR_ADB_ADDR(buf, str, fld) ADB_ADDR(&(buf), struct str, fld) + +#define VPDMA_SET_MMR_ADB_HDR(buf, str, hdr, regs, offset_a) \ + do { \ + struct vpdma_adb_hdr *h; \ + struct str *adb = NULL; \ + h = MMR_ADB_ADDR(buf, str, hdr); \ + h->offset = (offset_a); \ + h->nwords = sizeof(adb->regs) >> 2; \ + } while (0) + +/* vpdma descriptor buffer allocation and management */ +int vpdma_alloc_desc_buf(struct vpdma_buf *buf, size_t size); +void vpdma_free_desc_buf(struct vpdma_buf *buf); +int vpdma_map_desc_buf(struct vpdma_data *vpdma, struct vpdma_buf *buf); +void vpdma_unmap_desc_buf(struct vpdma_data *vpdma, struct vpdma_buf *buf); + +/* vpdma descriptor list funcs */ +int vpdma_create_desc_list(struct vpdma_desc_list *list, size_t size, int type); +void vpdma_reset_desc_list(struct vpdma_desc_list *list); +void vpdma_free_desc_list(struct vpdma_desc_list *list); +int vpdma_submit_descs(struct vpdma_data *vpdma, struct vpdma_desc_list *list); + +/* helpers for creating vpdma descriptors */ +void vpdma_add_cfd_block(struct vpdma_desc_list *list, int client, + struct vpdma_buf *blk, u32 dest_offset); +void vpdma_add_cfd_adb(struct vpdma_desc_list *list, int client, + struct vpdma_buf *adb); +void vpdma_add_sync_on_channel_ctd(struct vpdma_desc_list *list, + enum vpdma_channel chan); +void vpdma_add_out_dtd(struct vpdma_desc_list *list, struct v4l2_rect *c_rect, + const struct vpdma_data_format *fmt, dma_addr_t dma_addr, + enum vpdma_channel chan, u32 flags); +void vpdma_add_in_dtd(struct vpdma_desc_list *list, int frame_width, + int frame_height, struct v4l2_rect *c_rect, + const struct vpdma_data_format *fmt, dma_addr_t dma_addr, + enum vpdma_channel chan, int field, u32 flags); + +/* vpdma list interrupt management */ +void vpdma_enable_list_complete_irq(struct vpdma_data *vpdma, int list_num, + bool enable); +void vpdma_clear_list_stat(struct vpdma_data *vpdma); + +/* vpdma client configuration */ +void vpdma_set_line_mode(struct vpdma_data *vpdma, int line_mode, + enum vpdma_channel chan); +void vpdma_set_frame_start_event(struct vpdma_data *vpdma, + enum vpdma_frame_start_event fs_event, enum vpdma_channel chan); + +void vpdma_dump_regs(struct vpdma_data *vpdma); + +/* initialize vpdma, passed with VPE's platform device pointer */ +struct vpdma_data *vpdma_create(struct platform_device *pdev); + +#endif diff --git a/drivers/media/platform/ti-vpe/vpdma_priv.h b/drivers/media/platform/ti-vpe/vpdma_priv.h new file mode 100644 index 00000000000..f0e9a8038c1 --- /dev/null +++ b/drivers/media/platform/ti-vpe/vpdma_priv.h @@ -0,0 +1,641 @@ +/* + * Copyright (c) 2013 Texas Instruments Inc. + * + * David Griego, <dagriego@biglakesoftware.com> + * Dale Farnsworth, <dale@farnsworth.org> + * Archit Taneja, <archit@ti.com> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published by + * the Free Software Foundation. + */ + +#ifndef _TI_VPDMA_PRIV_H_ +#define _TI_VPDMA_PRIV_H_ + +/* + * VPDMA Register offsets + */ + +/* Top level */ +#define VPDMA_PID 0x00 +#define VPDMA_LIST_ADDR 0x04 +#define VPDMA_LIST_ATTR 0x08 +#define VPDMA_LIST_STAT_SYNC 0x0c +#define VPDMA_BG_RGB 0x18 +#define VPDMA_BG_YUV 0x1c +#define VPDMA_SETUP 0x30 +#define VPDMA_MAX_SIZE1 0x34 +#define VPDMA_MAX_SIZE2 0x38 +#define VPDMA_MAX_SIZE3 0x3c + +/* Interrupts */ +#define VPDMA_INT_CHAN_STAT(grp) (0x40 + grp * 8) +#define VPDMA_INT_CHAN_MASK(grp) (VPDMA_INT_CHAN_STAT(grp) + 4) +#define VPDMA_INT_CLIENT0_STAT 0x78 +#define VPDMA_INT_CLIENT0_MASK 0x7c +#define VPDMA_INT_CLIENT1_STAT 0x80 +#define VPDMA_INT_CLIENT1_MASK 0x84 +#define VPDMA_INT_LIST0_STAT 0x88 +#define VPDMA_INT_LIST0_MASK 0x8c + +#define VPDMA_PERFMON(i) (0x200 + i * 4) + +/* VPE specific client registers */ +#define VPDMA_DEI_CHROMA1_CSTAT 0x0300 +#define VPDMA_DEI_LUMA1_CSTAT 0x0304 +#define VPDMA_DEI_LUMA2_CSTAT 0x0308 +#define VPDMA_DEI_CHROMA2_CSTAT 0x030c +#define VPDMA_DEI_LUMA3_CSTAT 0x0310 +#define VPDMA_DEI_CHROMA3_CSTAT 0x0314 +#define VPDMA_DEI_MV_IN_CSTAT 0x0330 +#define VPDMA_DEI_MV_OUT_CSTAT 0x033c +#define VPDMA_VIP_UP_Y_CSTAT 0x0390 +#define VPDMA_VIP_UP_UV_CSTAT 0x0394 +#define VPDMA_VPI_CTL_CSTAT 0x03d0 + +/* Reg field info for VPDMA_CLIENT_CSTAT registers */ +#define VPDMA_CSTAT_LINE_MODE_MASK 0x03 +#define VPDMA_CSTAT_LINE_MODE_SHIFT 8 +#define VPDMA_CSTAT_FRAME_START_MASK 0xf +#define VPDMA_CSTAT_FRAME_START_SHIFT 10 + +#define VPDMA_LIST_NUM_MASK 0x07 +#define VPDMA_LIST_NUM_SHFT 24 +#define VPDMA_LIST_STOP_SHFT 20 +#define VPDMA_LIST_RDY_MASK 0x01 +#define VPDMA_LIST_RDY_SHFT 19 +#define VPDMA_LIST_TYPE_MASK 0x03 +#define VPDMA_LIST_TYPE_SHFT 16 +#define VPDMA_LIST_SIZE_MASK 0xffff + +/* VPDMA data type values for data formats */ +#define DATA_TYPE_Y444 0x0 +#define DATA_TYPE_Y422 0x1 +#define DATA_TYPE_Y420 0x2 +#define DATA_TYPE_C444 0x4 +#define DATA_TYPE_C422 0x5 +#define DATA_TYPE_C420 0x6 +#define DATA_TYPE_YC422 0x7 +#define DATA_TYPE_YC444 0x8 +#define DATA_TYPE_CY422 0x23 + +#define DATA_TYPE_RGB16_565 0x0 +#define DATA_TYPE_ARGB_1555 0x1 +#define DATA_TYPE_ARGB_4444 0x2 +#define DATA_TYPE_RGBA_5551 0x3 +#define DATA_TYPE_RGBA_4444 0x4 +#define DATA_TYPE_ARGB24_6666 0x5 +#define DATA_TYPE_RGB24_888 0x6 +#define DATA_TYPE_ARGB32_8888 0x7 +#define DATA_TYPE_RGBA24_6666 0x8 +#define DATA_TYPE_RGBA32_8888 0x9 +#define DATA_TYPE_BGR16_565 0x10 +#define DATA_TYPE_ABGR_1555 0x11 +#define DATA_TYPE_ABGR_4444 0x12 +#define DATA_TYPE_BGRA_5551 0x13 +#define DATA_TYPE_BGRA_4444 0x14 +#define DATA_TYPE_ABGR24_6666 0x15 +#define DATA_TYPE_BGR24_888 0x16 +#define DATA_TYPE_ABGR32_8888 0x17 +#define DATA_TYPE_BGRA24_6666 0x18 +#define DATA_TYPE_BGRA32_8888 0x19 + +#define DATA_TYPE_MV 0x3 + +/* VPDMA channel numbers(only VPE channels for now) */ +#define VPE_CHAN_NUM_LUMA1_IN 0 +#define VPE_CHAN_NUM_CHROMA1_IN 1 +#define VPE_CHAN_NUM_LUMA2_IN 2 +#define VPE_CHAN_NUM_CHROMA2_IN 3 +#define VPE_CHAN_NUM_LUMA3_IN 4 +#define VPE_CHAN_NUM_CHROMA3_IN 5 +#define VPE_CHAN_NUM_MV_IN 12 +#define VPE_CHAN_NUM_MV_OUT 15 +#define VPE_CHAN_NUM_LUMA_OUT 102 +#define VPE_CHAN_NUM_CHROMA_OUT 103 +#define VPE_CHAN_NUM_RGB_OUT 106 + +/* + * a VPDMA address data block payload for a configuration descriptor needs to + * have each sub block length as a multiple of 16 bytes. Therefore, the overall + * size of the payload also needs to be a multiple of 16 bytes. The sub block + * lengths should be ensured to be aligned by the VPDMA user. + */ +#define VPDMA_ADB_SIZE_ALIGN 0x0f + +/* + * data transfer descriptor + */ +struct vpdma_dtd { + u32 type_ctl_stride; + union { + u32 xfer_length_height; + u32 w1; + }; + dma_addr_t start_addr; + u32 pkt_ctl; + union { + u32 frame_width_height; /* inbound */ + dma_addr_t desc_write_addr; /* outbound */ + }; + union { + u32 start_h_v; /* inbound */ + u32 max_width_height; /* outbound */ + }; + u32 client_attr0; + u32 client_attr1; +}; + +/* Data Transfer Descriptor specifics */ +#define DTD_NO_NOTIFY 0 +#define DTD_NOTIFY 1 + +#define DTD_PKT_TYPE 0xa +#define DTD_DIR_IN 0 +#define DTD_DIR_OUT 1 + +/* type_ctl_stride */ +#define DTD_DATA_TYPE_MASK 0x3f +#define DTD_DATA_TYPE_SHFT 26 +#define DTD_NOTIFY_MASK 0x01 +#define DTD_NOTIFY_SHFT 25 +#define DTD_FIELD_MASK 0x01 +#define DTD_FIELD_SHFT 24 +#define DTD_1D_MASK 0x01 +#define DTD_1D_SHFT 23 +#define DTD_EVEN_LINE_SKIP_MASK 0x01 +#define DTD_EVEN_LINE_SKIP_SHFT 20 +#define DTD_ODD_LINE_SKIP_MASK 0x01 +#define DTD_ODD_LINE_SKIP_SHFT 16 +#define DTD_LINE_STRIDE_MASK 0xffff +#define DTD_LINE_STRIDE_SHFT 0 + +/* xfer_length_height */ +#define DTD_LINE_LENGTH_MASK 0xffff +#define DTD_LINE_LENGTH_SHFT 16 +#define DTD_XFER_HEIGHT_MASK 0xffff +#define DTD_XFER_HEIGHT_SHFT 0 + +/* pkt_ctl */ +#define DTD_PKT_TYPE_MASK 0x1f +#define DTD_PKT_TYPE_SHFT 27 +#define DTD_MODE_MASK 0x01 +#define DTD_MODE_SHFT 26 +#define DTD_DIR_MASK 0x01 +#define DTD_DIR_SHFT 25 +#define DTD_CHAN_MASK 0x01ff +#define DTD_CHAN_SHFT 16 +#define DTD_PRI_MASK 0x0f +#define DTD_PRI_SHFT 9 +#define DTD_NEXT_CHAN_MASK 0x01ff +#define DTD_NEXT_CHAN_SHFT 0 + +/* frame_width_height */ +#define DTD_FRAME_WIDTH_MASK 0xffff +#define DTD_FRAME_WIDTH_SHFT 16 +#define DTD_FRAME_HEIGHT_MASK 0xffff +#define DTD_FRAME_HEIGHT_SHFT 0 + +/* start_h_v */ +#define DTD_H_START_MASK 0xffff +#define DTD_H_START_SHFT 16 +#define DTD_V_START_MASK 0xffff +#define DTD_V_START_SHFT 0 + +#define DTD_DESC_START_SHIFT 5 +#define DTD_WRITE_DESC_MASK 0x01 +#define DTD_WRITE_DESC_SHIFT 2 +#define DTD_DROP_DATA_MASK 0x01 +#define DTD_DROP_DATA_SHIFT 1 +#define DTD_USE_DESC_MASK 0x01 +#define DTD_USE_DESC_SHIFT 0 + +/* max_width_height */ +#define DTD_MAX_WIDTH_MASK 0x07 +#define DTD_MAX_WIDTH_SHFT 4 +#define DTD_MAX_HEIGHT_MASK 0x07 +#define DTD_MAX_HEIGHT_SHFT 0 + +/* max width configurations */ + /* unlimited width */ +#define MAX_OUT_WIDTH_UNLIMITED 0 +/* as specified in max_size1 reg */ +#define MAX_OUT_WIDTH_REG1 1 +/* as specified in max_size2 reg */ +#define MAX_OUT_WIDTH_REG2 2 +/* as specified in max_size3 reg */ +#define MAX_OUT_WIDTH_REG3 3 +/* maximum of 352 pixels as width */ +#define MAX_OUT_WIDTH_352 4 +/* maximum of 768 pixels as width */ +#define MAX_OUT_WIDTH_768 5 +/* maximum of 1280 pixels width */ +#define MAX_OUT_WIDTH_1280 6 +/* maximum of 1920 pixels as width */ +#define MAX_OUT_WIDTH_1920 7 + +/* max height configurations */ + /* unlimited height */ +#define MAX_OUT_HEIGHT_UNLIMITED 0 +/* as specified in max_size1 reg */ +#define MAX_OUT_HEIGHT_REG1 1 +/* as specified in max_size2 reg */ +#define MAX_OUT_HEIGHT_REG2 2 +/* as specified in max_size3 reg */ +#define MAX_OUT_HEIGHT_REG3 3 +/* maximum of 288 lines as height */ +#define MAX_OUT_HEIGHT_288 4 +/* maximum of 576 lines as height */ +#define MAX_OUT_HEIGHT_576 5 +/* maximum of 720 lines as height */ +#define MAX_OUT_HEIGHT_720 6 +/* maximum of 1080 lines as height */ +#define MAX_OUT_HEIGHT_1080 7 + +static inline u32 dtd_type_ctl_stride(int type, bool notify, int field, + bool one_d, bool even_line_skip, bool odd_line_skip, + int line_stride) +{ + return (type << DTD_DATA_TYPE_SHFT) | (notify << DTD_NOTIFY_SHFT) | + (field << DTD_FIELD_SHFT) | (one_d << DTD_1D_SHFT) | + (even_line_skip << DTD_EVEN_LINE_SKIP_SHFT) | + (odd_line_skip << DTD_ODD_LINE_SKIP_SHFT) | + line_stride; +} + +static inline u32 dtd_xfer_length_height(int line_length, int xfer_height) +{ + return (line_length << DTD_LINE_LENGTH_SHFT) | xfer_height; +} + +static inline u32 dtd_pkt_ctl(bool mode, bool dir, int chan, int pri, + int next_chan) +{ + return (DTD_PKT_TYPE << DTD_PKT_TYPE_SHFT) | (mode << DTD_MODE_SHFT) | + (dir << DTD_DIR_SHFT) | (chan << DTD_CHAN_SHFT) | + (pri << DTD_PRI_SHFT) | next_chan; +} + +static inline u32 dtd_frame_width_height(int width, int height) +{ + return (width << DTD_FRAME_WIDTH_SHFT) | height; +} + +static inline u32 dtd_desc_write_addr(unsigned int addr, bool write_desc, + bool drop_data, bool use_desc) +{ + return (addr << DTD_DESC_START_SHIFT) | + (write_desc << DTD_WRITE_DESC_SHIFT) | + (drop_data << DTD_DROP_DATA_SHIFT) | + use_desc; +} + +static inline u32 dtd_start_h_v(int h_start, int v_start) +{ + return (h_start << DTD_H_START_SHFT) | v_start; +} + +static inline u32 dtd_max_width_height(int max_width, int max_height) +{ + return (max_width << DTD_MAX_WIDTH_SHFT) | max_height; +} + +static inline int dtd_get_data_type(struct vpdma_dtd *dtd) +{ + return dtd->type_ctl_stride >> DTD_DATA_TYPE_SHFT; +} + +static inline bool dtd_get_notify(struct vpdma_dtd *dtd) +{ + return (dtd->type_ctl_stride >> DTD_NOTIFY_SHFT) & DTD_NOTIFY_MASK; +} + +static inline int dtd_get_field(struct vpdma_dtd *dtd) +{ + return (dtd->type_ctl_stride >> DTD_FIELD_SHFT) & DTD_FIELD_MASK; +} + +static inline bool dtd_get_1d(struct vpdma_dtd *dtd) +{ + return (dtd->type_ctl_stride >> DTD_1D_SHFT) & DTD_1D_MASK; +} + +static inline bool dtd_get_even_line_skip(struct vpdma_dtd *dtd) +{ + return (dtd->type_ctl_stride >> DTD_EVEN_LINE_SKIP_SHFT) + & DTD_EVEN_LINE_SKIP_MASK; +} + +static inline bool dtd_get_odd_line_skip(struct vpdma_dtd *dtd) +{ + return (dtd->type_ctl_stride >> DTD_ODD_LINE_SKIP_SHFT) + & DTD_ODD_LINE_SKIP_MASK; +} + +static inline int dtd_get_line_stride(struct vpdma_dtd *dtd) +{ + return dtd->type_ctl_stride & DTD_LINE_STRIDE_MASK; +} + +static inline int dtd_get_line_length(struct vpdma_dtd *dtd) +{ + return dtd->xfer_length_height >> DTD_LINE_LENGTH_SHFT; +} + +static inline int dtd_get_xfer_height(struct vpdma_dtd *dtd) +{ + return dtd->xfer_length_height & DTD_XFER_HEIGHT_MASK; +} + +static inline int dtd_get_pkt_type(struct vpdma_dtd *dtd) +{ + return dtd->pkt_ctl >> DTD_PKT_TYPE_SHFT; +} + +static inline bool dtd_get_mode(struct vpdma_dtd *dtd) +{ + return (dtd->pkt_ctl >> DTD_MODE_SHFT) & DTD_MODE_MASK; +} + +static inline bool dtd_get_dir(struct vpdma_dtd *dtd) +{ + return (dtd->pkt_ctl >> DTD_DIR_SHFT) & DTD_DIR_MASK; +} + +static inline int dtd_get_chan(struct vpdma_dtd *dtd) +{ + return (dtd->pkt_ctl >> DTD_CHAN_SHFT) & DTD_CHAN_MASK; +} + +static inline int dtd_get_priority(struct vpdma_dtd *dtd) +{ + return (dtd->pkt_ctl >> DTD_PRI_SHFT) & DTD_PRI_MASK; +} + +static inline int dtd_get_next_chan(struct vpdma_dtd *dtd) +{ + return (dtd->pkt_ctl >> DTD_NEXT_CHAN_SHFT) & DTD_NEXT_CHAN_MASK; +} + +static inline int dtd_get_frame_width(struct vpdma_dtd *dtd) +{ + return dtd->frame_width_height >> DTD_FRAME_WIDTH_SHFT; +} + +static inline int dtd_get_frame_height(struct vpdma_dtd *dtd) +{ + return dtd->frame_width_height & DTD_FRAME_HEIGHT_MASK; +} + +static inline int dtd_get_desc_write_addr(struct vpdma_dtd *dtd) +{ + return dtd->desc_write_addr >> DTD_DESC_START_SHIFT; +} + +static inline bool dtd_get_write_desc(struct vpdma_dtd *dtd) +{ + return (dtd->desc_write_addr >> DTD_WRITE_DESC_SHIFT) & + DTD_WRITE_DESC_MASK; +} + +static inline bool dtd_get_drop_data(struct vpdma_dtd *dtd) +{ + return (dtd->desc_write_addr >> DTD_DROP_DATA_SHIFT) & + DTD_DROP_DATA_MASK; +} + +static inline bool dtd_get_use_desc(struct vpdma_dtd *dtd) +{ + return dtd->desc_write_addr & DTD_USE_DESC_MASK; +} + +static inline int dtd_get_h_start(struct vpdma_dtd *dtd) +{ + return dtd->start_h_v >> DTD_H_START_SHFT; +} + +static inline int dtd_get_v_start(struct vpdma_dtd *dtd) +{ + return dtd->start_h_v & DTD_V_START_MASK; +} + +static inline int dtd_get_max_width(struct vpdma_dtd *dtd) +{ + return (dtd->max_width_height >> DTD_MAX_WIDTH_SHFT) & + DTD_MAX_WIDTH_MASK; +} + +static inline int dtd_get_max_height(struct vpdma_dtd *dtd) +{ + return (dtd->max_width_height >> DTD_MAX_HEIGHT_SHFT) & + DTD_MAX_HEIGHT_MASK; +} + +/* + * configuration descriptor + */ +struct vpdma_cfd { + union { + u32 dest_addr_offset; + u32 w0; + }; + union { + u32 block_len; /* in words */ + u32 w1; + }; + u32 payload_addr; + u32 ctl_payload_len; /* in words */ +}; + +/* Configuration descriptor specifics */ + +#define CFD_PKT_TYPE 0xb + +#define CFD_DIRECT 1 +#define CFD_INDIRECT 0 +#define CFD_CLS_ADB 0 +#define CFD_CLS_BLOCK 1 + +/* block_len */ +#define CFD__BLOCK_LEN_MASK 0xffff +#define CFD__BLOCK_LEN_SHFT 0 + +/* ctl_payload_len */ +#define CFD_PKT_TYPE_MASK 0x1f +#define CFD_PKT_TYPE_SHFT 27 +#define CFD_DIRECT_MASK 0x01 +#define CFD_DIRECT_SHFT 26 +#define CFD_CLASS_MASK 0x03 +#define CFD_CLASS_SHFT 24 +#define CFD_DEST_MASK 0xff +#define CFD_DEST_SHFT 16 +#define CFD_PAYLOAD_LEN_MASK 0xffff +#define CFD_PAYLOAD_LEN_SHFT 0 + +static inline u32 cfd_pkt_payload_len(bool direct, int cls, int dest, + int payload_len) +{ + return (CFD_PKT_TYPE << CFD_PKT_TYPE_SHFT) | + (direct << CFD_DIRECT_SHFT) | + (cls << CFD_CLASS_SHFT) | + (dest << CFD_DEST_SHFT) | + payload_len; +} + +static inline int cfd_get_pkt_type(struct vpdma_cfd *cfd) +{ + return cfd->ctl_payload_len >> CFD_PKT_TYPE_SHFT; +} + +static inline bool cfd_get_direct(struct vpdma_cfd *cfd) +{ + return (cfd->ctl_payload_len >> CFD_DIRECT_SHFT) & CFD_DIRECT_MASK; +} + +static inline bool cfd_get_class(struct vpdma_cfd *cfd) +{ + return (cfd->ctl_payload_len >> CFD_CLASS_SHFT) & CFD_CLASS_MASK; +} + +static inline int cfd_get_dest(struct vpdma_cfd *cfd) +{ + return (cfd->ctl_payload_len >> CFD_DEST_SHFT) & CFD_DEST_MASK; +} + +static inline int cfd_get_payload_len(struct vpdma_cfd *cfd) +{ + return cfd->ctl_payload_len & CFD_PAYLOAD_LEN_MASK; +} + +/* + * control descriptor + */ +struct vpdma_ctd { + union { + u32 timer_value; + u32 list_addr; + u32 w0; + }; + union { + u32 pixel_line_count; + u32 list_size; + u32 w1; + }; + union { + u32 event; + u32 fid_ctl; + u32 w2; + }; + u32 type_source_ctl; +}; + +/* control descriptor types */ +#define CTD_TYPE_SYNC_ON_CLIENT 0 +#define CTD_TYPE_SYNC_ON_LIST 1 +#define CTD_TYPE_SYNC_ON_EXT 2 +#define CTD_TYPE_SYNC_ON_LM_TIMER 3 +#define CTD_TYPE_SYNC_ON_CHANNEL 4 +#define CTD_TYPE_CHNG_CLIENT_IRQ 5 +#define CTD_TYPE_SEND_IRQ 6 +#define CTD_TYPE_RELOAD_LIST 7 +#define CTD_TYPE_ABORT_CHANNEL 8 + +#define CTD_PKT_TYPE 0xc + +/* timer_value */ +#define CTD_TIMER_VALUE_MASK 0xffff +#define CTD_TIMER_VALUE_SHFT 0 + +/* pixel_line_count */ +#define CTD_PIXEL_COUNT_MASK 0xffff +#define CTD_PIXEL_COUNT_SHFT 16 +#define CTD_LINE_COUNT_MASK 0xffff +#define CTD_LINE_COUNT_SHFT 0 + +/* list_size */ +#define CTD_LIST_SIZE_MASK 0xffff +#define CTD_LIST_SIZE_SHFT 0 + +/* event */ +#define CTD_EVENT_MASK 0x0f +#define CTD_EVENT_SHFT 0 + +/* fid_ctl */ +#define CTD_FID2_MASK 0x03 +#define CTD_FID2_SHFT 4 +#define CTD_FID1_MASK 0x03 +#define CTD_FID1_SHFT 2 +#define CTD_FID0_MASK 0x03 +#define CTD_FID0_SHFT 0 + +/* type_source_ctl */ +#define CTD_PKT_TYPE_MASK 0x1f +#define CTD_PKT_TYPE_SHFT 27 +#define CTD_SOURCE_MASK 0xff +#define CTD_SOURCE_SHFT 16 +#define CTD_CONTROL_MASK 0x0f +#define CTD_CONTROL_SHFT 0 + +static inline u32 ctd_pixel_line_count(int pixel_count, int line_count) +{ + return (pixel_count << CTD_PIXEL_COUNT_SHFT) | line_count; +} + +static inline u32 ctd_set_fid_ctl(int fid0, int fid1, int fid2) +{ + return (fid2 << CTD_FID2_SHFT) | (fid1 << CTD_FID1_SHFT) | fid0; +} + +static inline u32 ctd_type_source_ctl(int source, int control) +{ + return (CTD_PKT_TYPE << CTD_PKT_TYPE_SHFT) | + (source << CTD_SOURCE_SHFT) | control; +} + +static inline u32 ctd_get_pixel_count(struct vpdma_ctd *ctd) +{ + return ctd->pixel_line_count >> CTD_PIXEL_COUNT_SHFT; +} + +static inline int ctd_get_line_count(struct vpdma_ctd *ctd) +{ + return ctd->pixel_line_count & CTD_LINE_COUNT_MASK; +} + +static inline int ctd_get_event(struct vpdma_ctd *ctd) +{ + return ctd->event & CTD_EVENT_MASK; +} + +static inline int ctd_get_fid2_ctl(struct vpdma_ctd *ctd) +{ + return (ctd->fid_ctl >> CTD_FID2_SHFT) & CTD_FID2_MASK; +} + +static inline int ctd_get_fid1_ctl(struct vpdma_ctd *ctd) +{ + return (ctd->fid_ctl >> CTD_FID1_SHFT) & CTD_FID1_MASK; +} + +static inline int ctd_get_fid0_ctl(struct vpdma_ctd *ctd) +{ + return ctd->fid_ctl & CTD_FID2_MASK; +} + +static inline int ctd_get_pkt_type(struct vpdma_ctd *ctd) +{ + return ctd->type_source_ctl >> CTD_PKT_TYPE_SHFT; +} + +static inline int ctd_get_source(struct vpdma_ctd *ctd) +{ + return (ctd->type_source_ctl >> CTD_SOURCE_SHFT) & CTD_SOURCE_MASK; +} + +static inline int ctd_get_ctl(struct vpdma_ctd *ctd) +{ + return ctd->type_source_ctl & CTD_CONTROL_MASK; +} + +#endif diff --git a/drivers/media/platform/ti-vpe/vpe.c b/drivers/media/platform/ti-vpe/vpe.c new file mode 100644 index 00000000000..4e58069e24f --- /dev/null +++ b/drivers/media/platform/ti-vpe/vpe.c @@ -0,0 +1,2099 @@ +/* + * TI VPE mem2mem driver, based on the virtual v4l2-mem2mem example driver + * + * Copyright (c) 2013 Texas Instruments Inc. + * David Griego, <dagriego@biglakesoftware.com> + * Dale Farnsworth, <dale@farnsworth.org> + * Archit Taneja, <archit@ti.com> + * + * Copyright (c) 2009-2010 Samsung Electronics Co., Ltd. + * Pawel Osciak, <pawel@osciak.com> + * Marek Szyprowski, <m.szyprowski@samsung.com> + * + * Based on the virtual v4l2-mem2mem example device + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published by + * the Free Software Foundation + */ + +#include <linux/delay.h> +#include <linux/dma-mapping.h> +#include <linux/err.h> +#include <linux/fs.h> +#include <linux/interrupt.h> +#include <linux/io.h> +#include <linux/ioctl.h> +#include <linux/module.h> +#include <linux/platform_device.h> +#include <linux/pm_runtime.h> +#include <linux/sched.h> +#include <linux/slab.h> +#include <linux/videodev2.h> + +#include <media/v4l2-common.h> +#include <media/v4l2-ctrls.h> +#include <media/v4l2-device.h> +#include <media/v4l2-event.h> +#include <media/v4l2-ioctl.h> +#include <media/v4l2-mem2mem.h> +#include <media/videobuf2-core.h> +#include <media/videobuf2-dma-contig.h> + +#include "vpdma.h" +#include "vpe_regs.h" + +#define VPE_MODULE_NAME "vpe" + +/* minimum and maximum frame sizes */ +#define MIN_W 128 +#define MIN_H 128 +#define MAX_W 1920 +#define MAX_H 1080 + +/* required alignments */ +#define S_ALIGN 0 /* multiple of 1 */ +#define H_ALIGN 1 /* multiple of 2 */ +#define W_ALIGN 1 /* multiple of 2 */ + +/* multiple of 128 bits, line stride, 16 bytes */ +#define L_ALIGN 4 + +/* flags that indicate a format can be used for capture/output */ +#define VPE_FMT_TYPE_CAPTURE (1 << 0) +#define VPE_FMT_TYPE_OUTPUT (1 << 1) + +/* used as plane indices */ +#define VPE_MAX_PLANES 2 +#define VPE_LUMA 0 +#define VPE_CHROMA 1 + +/* per m2m context info */ +#define VPE_MAX_SRC_BUFS 3 /* need 3 src fields to de-interlace */ + +#define VPE_DEF_BUFS_PER_JOB 1 /* default one buffer per batch job */ + +/* + * each VPE context can need up to 3 config desciptors, 7 input descriptors, + * 3 output descriptors, and 10 control descriptors + */ +#define VPE_DESC_LIST_SIZE (10 * VPDMA_DTD_DESC_SIZE + \ + 13 * VPDMA_CFD_CTD_DESC_SIZE) + +#define vpe_dbg(vpedev, fmt, arg...) \ + dev_dbg((vpedev)->v4l2_dev.dev, fmt, ##arg) +#define vpe_err(vpedev, fmt, arg...) \ + dev_err((vpedev)->v4l2_dev.dev, fmt, ##arg) + +struct vpe_us_coeffs { + unsigned short anchor_fid0_c0; + unsigned short anchor_fid0_c1; + unsigned short anchor_fid0_c2; + unsigned short anchor_fid0_c3; + unsigned short interp_fid0_c0; + unsigned short interp_fid0_c1; + unsigned short interp_fid0_c2; + unsigned short interp_fid0_c3; + unsigned short anchor_fid1_c0; + unsigned short anchor_fid1_c1; + unsigned short anchor_fid1_c2; + unsigned short anchor_fid1_c3; + unsigned short interp_fid1_c0; + unsigned short interp_fid1_c1; + unsigned short interp_fid1_c2; + unsigned short interp_fid1_c3; +}; + +/* + * Default upsampler coefficients + */ +static const struct vpe_us_coeffs us_coeffs[] = { + { + /* Coefficients for progressive input */ + 0x00C8, 0x0348, 0x0018, 0x3FD8, 0x3FB8, 0x0378, 0x00E8, 0x3FE8, + 0x00C8, 0x0348, 0x0018, 0x3FD8, 0x3FB8, 0x0378, 0x00E8, 0x3FE8, + }, + { + /* Coefficients for Top Field Interlaced input */ + 0x0051, 0x03D5, 0x3FE3, 0x3FF7, 0x3FB5, 0x02E9, 0x018F, 0x3FD3, + /* Coefficients for Bottom Field Interlaced input */ + 0x016B, 0x0247, 0x00B1, 0x3F9D, 0x3FCF, 0x03DB, 0x005D, 0x3FF9, + }, +}; + +/* + * the following registers are for configuring some of the parameters of the + * motion and edge detection blocks inside DEI, these generally remain the same, + * these could be passed later via userspace if some one needs to tweak these. + */ +struct vpe_dei_regs { + unsigned long mdt_spacial_freq_thr_reg; /* VPE_DEI_REG2 */ + unsigned long edi_config_reg; /* VPE_DEI_REG3 */ + unsigned long edi_lut_reg0; /* VPE_DEI_REG4 */ + unsigned long edi_lut_reg1; /* VPE_DEI_REG5 */ + unsigned long edi_lut_reg2; /* VPE_DEI_REG6 */ + unsigned long edi_lut_reg3; /* VPE_DEI_REG7 */ +}; + +/* + * default expert DEI register values, unlikely to be modified. + */ +static const struct vpe_dei_regs dei_regs = { + 0x020C0804u, + 0x0118100Fu, + 0x08040200u, + 0x1010100Cu, + 0x10101010u, + 0x10101010u, +}; + +/* + * The port_data structure contains per-port data. + */ +struct vpe_port_data { + enum vpdma_channel channel; /* VPDMA channel */ + u8 vb_index; /* input frame f, f-1, f-2 index */ + u8 vb_part; /* plane index for co-panar formats */ +}; + +/* + * Define indices into the port_data tables + */ +#define VPE_PORT_LUMA1_IN 0 +#define VPE_PORT_CHROMA1_IN 1 +#define VPE_PORT_LUMA2_IN 2 +#define VPE_PORT_CHROMA2_IN 3 +#define VPE_PORT_LUMA3_IN 4 +#define VPE_PORT_CHROMA3_IN 5 +#define VPE_PORT_MV_IN 6 +#define VPE_PORT_MV_OUT 7 +#define VPE_PORT_LUMA_OUT 8 +#define VPE_PORT_CHROMA_OUT 9 +#define VPE_PORT_RGB_OUT 10 + +static const struct vpe_port_data port_data[11] = { + [VPE_PORT_LUMA1_IN] = { + .channel = VPE_CHAN_LUMA1_IN, + .vb_index = 0, + .vb_part = VPE_LUMA, + }, + [VPE_PORT_CHROMA1_IN] = { + .channel = VPE_CHAN_CHROMA1_IN, + .vb_index = 0, + .vb_part = VPE_CHROMA, + }, + [VPE_PORT_LUMA2_IN] = { + .channel = VPE_CHAN_LUMA2_IN, + .vb_index = 1, + .vb_part = VPE_LUMA, + }, + [VPE_PORT_CHROMA2_IN] = { + .channel = VPE_CHAN_CHROMA2_IN, + .vb_index = 1, + .vb_part = VPE_CHROMA, + }, + [VPE_PORT_LUMA3_IN] = { + .channel = VPE_CHAN_LUMA3_IN, + .vb_index = 2, + .vb_part = VPE_LUMA, + }, + [VPE_PORT_CHROMA3_IN] = { + .channel = VPE_CHAN_CHROMA3_IN, + .vb_index = 2, + .vb_part = VPE_CHROMA, + }, + [VPE_PORT_MV_IN] = { + .channel = VPE_CHAN_MV_IN, + }, + [VPE_PORT_MV_OUT] = { + .channel = VPE_CHAN_MV_OUT, + }, + [VPE_PORT_LUMA_OUT] = { + .channel = VPE_CHAN_LUMA_OUT, + .vb_part = VPE_LUMA, + }, + [VPE_PORT_CHROMA_OUT] = { + .channel = VPE_CHAN_CHROMA_OUT, + .vb_part = VPE_CHROMA, + }, + [VPE_PORT_RGB_OUT] = { + .channel = VPE_CHAN_RGB_OUT, + .vb_part = VPE_LUMA, + }, +}; + + +/* driver info for each of the supported video formats */ +struct vpe_fmt { + char *name; /* human-readable name */ + u32 fourcc; /* standard format identifier */ + u8 types; /* CAPTURE and/or OUTPUT */ + u8 coplanar; /* set for unpacked Luma and Chroma */ + /* vpdma format info for each plane */ + struct vpdma_data_format const *vpdma_fmt[VPE_MAX_PLANES]; +}; + +static struct vpe_fmt vpe_formats[] = { + { + .name = "YUV 422 co-planar", + .fourcc = V4L2_PIX_FMT_NV16, + .types = VPE_FMT_TYPE_CAPTURE | VPE_FMT_TYPE_OUTPUT, + .coplanar = 1, + .vpdma_fmt = { &vpdma_yuv_fmts[VPDMA_DATA_FMT_Y444], + &vpdma_yuv_fmts[VPDMA_DATA_FMT_C444], + }, + }, + { + .name = "YUV 420 co-planar", + .fourcc = V4L2_PIX_FMT_NV12, + .types = VPE_FMT_TYPE_CAPTURE | VPE_FMT_TYPE_OUTPUT, + .coplanar = 1, + .vpdma_fmt = { &vpdma_yuv_fmts[VPDMA_DATA_FMT_Y420], + &vpdma_yuv_fmts[VPDMA_DATA_FMT_C420], + }, + }, + { + .name = "YUYV 422 packed", + .fourcc = V4L2_PIX_FMT_YUYV, + .types = VPE_FMT_TYPE_CAPTURE | VPE_FMT_TYPE_OUTPUT, + .coplanar = 0, + .vpdma_fmt = { &vpdma_yuv_fmts[VPDMA_DATA_FMT_YC422], + }, + }, + { + .name = "UYVY 422 packed", + .fourcc = V4L2_PIX_FMT_UYVY, + .types = VPE_FMT_TYPE_CAPTURE | VPE_FMT_TYPE_OUTPUT, + .coplanar = 0, + .vpdma_fmt = { &vpdma_yuv_fmts[VPDMA_DATA_FMT_CY422], + }, + }, +}; + +/* + * per-queue, driver-specific private data. + * there is one source queue and one destination queue for each m2m context. + */ +struct vpe_q_data { + unsigned int width; /* frame width */ + unsigned int height; /* frame height */ + unsigned int bytesperline[VPE_MAX_PLANES]; /* bytes per line in memory */ + enum v4l2_colorspace colorspace; + enum v4l2_field field; /* supported field value */ + unsigned int flags; + unsigned int sizeimage[VPE_MAX_PLANES]; /* image size in memory */ + struct v4l2_rect c_rect; /* crop/compose rectangle */ + struct vpe_fmt *fmt; /* format info */ +}; + +/* vpe_q_data flag bits */ +#define Q_DATA_FRAME_1D (1 << 0) +#define Q_DATA_MODE_TILED (1 << 1) +#define Q_DATA_INTERLACED (1 << 2) + +enum { + Q_DATA_SRC = 0, + Q_DATA_DST = 1, +}; + +/* find our format description corresponding to the passed v4l2_format */ +static struct vpe_fmt *find_format(struct v4l2_format *f) +{ + struct vpe_fmt *fmt; + unsigned int k; + + for (k = 0; k < ARRAY_SIZE(vpe_formats); k++) { + fmt = &vpe_formats[k]; + if (fmt->fourcc == f->fmt.pix.pixelformat) + return fmt; + } + + return NULL; +} + +/* + * there is one vpe_dev structure in the driver, it is shared by + * all instances. + */ +struct vpe_dev { + struct v4l2_device v4l2_dev; + struct video_device vfd; + struct v4l2_m2m_dev *m2m_dev; + + atomic_t num_instances; /* count of driver instances */ + dma_addr_t loaded_mmrs; /* shadow mmrs in device */ + struct mutex dev_mutex; + spinlock_t lock; + + int irq; + void __iomem *base; + + struct vb2_alloc_ctx *alloc_ctx; + struct vpdma_data *vpdma; /* vpdma data handle */ +}; + +/* + * There is one vpe_ctx structure for each m2m context. + */ +struct vpe_ctx { + struct v4l2_fh fh; + struct vpe_dev *dev; + struct v4l2_m2m_ctx *m2m_ctx; + struct v4l2_ctrl_handler hdl; + + unsigned int field; /* current field */ + unsigned int sequence; /* current frame/field seq */ + unsigned int aborting; /* abort after next irq */ + + unsigned int bufs_per_job; /* input buffers per batch */ + unsigned int bufs_completed; /* bufs done in this batch */ + + struct vpe_q_data q_data[2]; /* src & dst queue data */ + struct vb2_buffer *src_vbs[VPE_MAX_SRC_BUFS]; + struct vb2_buffer *dst_vb; + + dma_addr_t mv_buf_dma[2]; /* dma addrs of motion vector in/out bufs */ + void *mv_buf[2]; /* virtual addrs of motion vector bufs */ + size_t mv_buf_size; /* current motion vector buffer size */ + struct vpdma_buf mmr_adb; /* shadow reg addr/data block */ + struct vpdma_desc_list desc_list; /* DMA descriptor list */ + + bool deinterlacing; /* using de-interlacer */ + bool load_mmrs; /* have new shadow reg values */ + + unsigned int src_mv_buf_selector; +}; + + +/* + * M2M devices get 2 queues. + * Return the queue given the type. + */ +static struct vpe_q_data *get_q_data(struct vpe_ctx *ctx, + enum v4l2_buf_type type) +{ + switch (type) { + case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE: + return &ctx->q_data[Q_DATA_SRC]; + case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE: + return &ctx->q_data[Q_DATA_DST]; + default: + BUG(); + } + return NULL; +} + +static u32 read_reg(struct vpe_dev *dev, int offset) +{ + return ioread32(dev->base + offset); +} + +static void write_reg(struct vpe_dev *dev, int offset, u32 value) +{ + iowrite32(value, dev->base + offset); +} + +/* register field read/write helpers */ +static int get_field(u32 value, u32 mask, int shift) +{ + return (value & (mask << shift)) >> shift; +} + +static int read_field_reg(struct vpe_dev *dev, int offset, u32 mask, int shift) +{ + return get_field(read_reg(dev, offset), mask, shift); +} + +static void write_field(u32 *valp, u32 field, u32 mask, int shift) +{ + u32 val = *valp; + + val &= ~(mask << shift); + val |= (field & mask) << shift; + *valp = val; +} + +static void write_field_reg(struct vpe_dev *dev, int offset, u32 field, + u32 mask, int shift) +{ + u32 val = read_reg(dev, offset); + + write_field(&val, field, mask, shift); + + write_reg(dev, offset, val); +} + +/* + * DMA address/data block for the shadow registers + */ +struct vpe_mmr_adb { + struct vpdma_adb_hdr out_fmt_hdr; + u32 out_fmt_reg[1]; + u32 out_fmt_pad[3]; + struct vpdma_adb_hdr us1_hdr; + u32 us1_regs[8]; + struct vpdma_adb_hdr us2_hdr; + u32 us2_regs[8]; + struct vpdma_adb_hdr us3_hdr; + u32 us3_regs[8]; + struct vpdma_adb_hdr dei_hdr; + u32 dei_regs[8]; + struct vpdma_adb_hdr sc_hdr; + u32 sc_regs[1]; + u32 sc_pad[3]; + struct vpdma_adb_hdr csc_hdr; + u32 csc_regs[6]; + u32 csc_pad[2]; +}; + +#define VPE_SET_MMR_ADB_HDR(ctx, hdr, regs, offset_a) \ + VPDMA_SET_MMR_ADB_HDR(ctx->mmr_adb, vpe_mmr_adb, hdr, regs, offset_a) +/* + * Set the headers for all of the address/data block structures. + */ +static void init_adb_hdrs(struct vpe_ctx *ctx) +{ + VPE_SET_MMR_ADB_HDR(ctx, out_fmt_hdr, out_fmt_reg, VPE_CLK_FORMAT_SELECT); + VPE_SET_MMR_ADB_HDR(ctx, us1_hdr, us1_regs, VPE_US1_R0); + VPE_SET_MMR_ADB_HDR(ctx, us2_hdr, us2_regs, VPE_US2_R0); + VPE_SET_MMR_ADB_HDR(ctx, us3_hdr, us3_regs, VPE_US3_R0); + VPE_SET_MMR_ADB_HDR(ctx, dei_hdr, dei_regs, VPE_DEI_FRAME_SIZE); + VPE_SET_MMR_ADB_HDR(ctx, sc_hdr, sc_regs, VPE_SC_MP_SC0); + VPE_SET_MMR_ADB_HDR(ctx, csc_hdr, csc_regs, VPE_CSC_CSC00); +}; + +/* + * Allocate or re-allocate the motion vector DMA buffers + * There are two buffers, one for input and one for output. + * However, the roles are reversed after each field is processed. + * In other words, after each field is processed, the previous + * output (dst) MV buffer becomes the new input (src) MV buffer. + */ +static int realloc_mv_buffers(struct vpe_ctx *ctx, size_t size) +{ + struct device *dev = ctx->dev->v4l2_dev.dev; + + if (ctx->mv_buf_size == size) + return 0; + + if (ctx->mv_buf[0]) + dma_free_coherent(dev, ctx->mv_buf_size, ctx->mv_buf[0], + ctx->mv_buf_dma[0]); + + if (ctx->mv_buf[1]) + dma_free_coherent(dev, ctx->mv_buf_size, ctx->mv_buf[1], + ctx->mv_buf_dma[1]); + + if (size == 0) + return 0; + + ctx->mv_buf[0] = dma_alloc_coherent(dev, size, &ctx->mv_buf_dma[0], + GFP_KERNEL); + if (!ctx->mv_buf[0]) { + vpe_err(ctx->dev, "failed to allocate motion vector buffer\n"); + return -ENOMEM; + } + + ctx->mv_buf[1] = dma_alloc_coherent(dev, size, &ctx->mv_buf_dma[1], + GFP_KERNEL); + if (!ctx->mv_buf[1]) { + vpe_err(ctx->dev, "failed to allocate motion vector buffer\n"); + dma_free_coherent(dev, size, ctx->mv_buf[0], + ctx->mv_buf_dma[0]); + + return -ENOMEM; + } + + ctx->mv_buf_size = size; + ctx->src_mv_buf_selector = 0; + + return 0; +} + +static void free_mv_buffers(struct vpe_ctx *ctx) +{ + realloc_mv_buffers(ctx, 0); +} + +/* + * While de-interlacing, we keep the two most recent input buffers + * around. This function frees those two buffers when we have + * finished processing the current stream. + */ +static void free_vbs(struct vpe_ctx *ctx) +{ + struct vpe_dev *dev = ctx->dev; + unsigned long flags; + + if (ctx->src_vbs[2] == NULL) + return; + + spin_lock_irqsave(&dev->lock, flags); + if (ctx->src_vbs[2]) { + v4l2_m2m_buf_done(ctx->src_vbs[2], VB2_BUF_STATE_DONE); + v4l2_m2m_buf_done(ctx->src_vbs[1], VB2_BUF_STATE_DONE); + } + spin_unlock_irqrestore(&dev->lock, flags); +} + +/* + * Enable or disable the VPE clocks + */ +static void vpe_set_clock_enable(struct vpe_dev *dev, bool on) +{ + u32 val = 0; + + if (on) + val = VPE_DATA_PATH_CLK_ENABLE | VPE_VPEDMA_CLK_ENABLE; + write_reg(dev, VPE_CLK_ENABLE, val); +} + +static void vpe_top_reset(struct vpe_dev *dev) +{ + + write_field_reg(dev, VPE_CLK_RESET, 1, VPE_DATA_PATH_CLK_RESET_MASK, + VPE_DATA_PATH_CLK_RESET_SHIFT); + + usleep_range(100, 150); + + write_field_reg(dev, VPE_CLK_RESET, 0, VPE_DATA_PATH_CLK_RESET_MASK, + VPE_DATA_PATH_CLK_RESET_SHIFT); +} + +static void vpe_top_vpdma_reset(struct vpe_dev *dev) +{ + write_field_reg(dev, VPE_CLK_RESET, 1, VPE_VPDMA_CLK_RESET_MASK, + VPE_VPDMA_CLK_RESET_SHIFT); + + usleep_range(100, 150); + + write_field_reg(dev, VPE_CLK_RESET, 0, VPE_VPDMA_CLK_RESET_MASK, + VPE_VPDMA_CLK_RESET_SHIFT); +} + +/* + * Load the correct of upsampler coefficients into the shadow MMRs + */ +static void set_us_coefficients(struct vpe_ctx *ctx) +{ + struct vpe_mmr_adb *mmr_adb = ctx->mmr_adb.addr; + struct vpe_q_data *s_q_data = &ctx->q_data[Q_DATA_SRC]; + u32 *us1_reg = &mmr_adb->us1_regs[0]; + u32 *us2_reg = &mmr_adb->us2_regs[0]; + u32 *us3_reg = &mmr_adb->us3_regs[0]; + const unsigned short *cp, *end_cp; + + cp = &us_coeffs[0].anchor_fid0_c0; + + if (s_q_data->flags & Q_DATA_INTERLACED) /* interlaced */ + cp += sizeof(us_coeffs[0]) / sizeof(*cp); + + end_cp = cp + sizeof(us_coeffs[0]) / sizeof(*cp); + + while (cp < end_cp) { + write_field(us1_reg, *cp++, VPE_US_C0_MASK, VPE_US_C0_SHIFT); + write_field(us1_reg, *cp++, VPE_US_C1_MASK, VPE_US_C1_SHIFT); + *us2_reg++ = *us1_reg; + *us3_reg++ = *us1_reg++; + } + ctx->load_mmrs = true; +} + +/* + * Set the upsampler config mode and the VPDMA line mode in the shadow MMRs. + */ +static void set_cfg_and_line_modes(struct vpe_ctx *ctx) +{ + struct vpe_fmt *fmt = ctx->q_data[Q_DATA_SRC].fmt; + struct vpe_mmr_adb *mmr_adb = ctx->mmr_adb.addr; + u32 *us1_reg0 = &mmr_adb->us1_regs[0]; + u32 *us2_reg0 = &mmr_adb->us2_regs[0]; + u32 *us3_reg0 = &mmr_adb->us3_regs[0]; + int line_mode = 1; + int cfg_mode = 1; + + /* + * Cfg Mode 0: YUV420 source, enable upsampler, DEI is de-interlacing. + * Cfg Mode 1: YUV422 source, disable upsampler, DEI is de-interlacing. + */ + + if (fmt->fourcc == V4L2_PIX_FMT_NV12) { + cfg_mode = 0; + line_mode = 0; /* double lines to line buffer */ + } + + write_field(us1_reg0, cfg_mode, VPE_US_MODE_MASK, VPE_US_MODE_SHIFT); + write_field(us2_reg0, cfg_mode, VPE_US_MODE_MASK, VPE_US_MODE_SHIFT); + write_field(us3_reg0, cfg_mode, VPE_US_MODE_MASK, VPE_US_MODE_SHIFT); + + /* regs for now */ + vpdma_set_line_mode(ctx->dev->vpdma, line_mode, VPE_CHAN_CHROMA1_IN); + vpdma_set_line_mode(ctx->dev->vpdma, line_mode, VPE_CHAN_CHROMA2_IN); + vpdma_set_line_mode(ctx->dev->vpdma, line_mode, VPE_CHAN_CHROMA3_IN); + + /* frame start for input luma */ + vpdma_set_frame_start_event(ctx->dev->vpdma, VPDMA_FSEVENT_CHANNEL_ACTIVE, + VPE_CHAN_LUMA1_IN); + vpdma_set_frame_start_event(ctx->dev->vpdma, VPDMA_FSEVENT_CHANNEL_ACTIVE, + VPE_CHAN_LUMA2_IN); + vpdma_set_frame_start_event(ctx->dev->vpdma, VPDMA_FSEVENT_CHANNEL_ACTIVE, + VPE_CHAN_LUMA3_IN); + + /* frame start for input chroma */ + vpdma_set_frame_start_event(ctx->dev->vpdma, VPDMA_FSEVENT_CHANNEL_ACTIVE, + VPE_CHAN_CHROMA1_IN); + vpdma_set_frame_start_event(ctx->dev->vpdma, VPDMA_FSEVENT_CHANNEL_ACTIVE, + VPE_CHAN_CHROMA2_IN); + vpdma_set_frame_start_event(ctx->dev->vpdma, VPDMA_FSEVENT_CHANNEL_ACTIVE, + VPE_CHAN_CHROMA3_IN); + + /* frame start for MV in client */ + vpdma_set_frame_start_event(ctx->dev->vpdma, VPDMA_FSEVENT_CHANNEL_ACTIVE, + VPE_CHAN_MV_IN); + + ctx->load_mmrs = true; +} + +/* + * Set the shadow registers that are modified when the source + * format changes. + */ +static void set_src_registers(struct vpe_ctx *ctx) +{ + set_us_coefficients(ctx); +} + +/* + * Set the shadow registers that are modified when the destination + * format changes. + */ +static void set_dst_registers(struct vpe_ctx *ctx) +{ + struct vpe_mmr_adb *mmr_adb = ctx->mmr_adb.addr; + struct vpe_fmt *fmt = ctx->q_data[Q_DATA_DST].fmt; + u32 val = 0; + + /* select RGB path when color space conversion is supported in future */ + if (fmt->fourcc == V4L2_PIX_FMT_RGB24) + val |= VPE_RGB_OUT_SELECT | VPE_CSC_SRC_DEI_SCALER; + else if (fmt->fourcc == V4L2_PIX_FMT_NV16) + val |= VPE_COLOR_SEPARATE_422; + + /* The source of CHR_DS is always the scaler, whether it's used or not */ + val |= VPE_DS_SRC_DEI_SCALER; + + if (fmt->fourcc != V4L2_PIX_FMT_NV12) + val |= VPE_DS_BYPASS; + + mmr_adb->out_fmt_reg[0] = val; + + ctx->load_mmrs = true; +} + +/* + * Set the de-interlacer shadow register values + */ +static void set_dei_regs(struct vpe_ctx *ctx) +{ + struct vpe_mmr_adb *mmr_adb = ctx->mmr_adb.addr; + struct vpe_q_data *s_q_data = &ctx->q_data[Q_DATA_SRC]; + unsigned int src_h = s_q_data->c_rect.height; + unsigned int src_w = s_q_data->c_rect.width; + u32 *dei_mmr0 = &mmr_adb->dei_regs[0]; + bool deinterlace = true; + u32 val = 0; + + /* + * according to TRM, we should set DEI in progressive bypass mode when + * the input content is progressive, however, DEI is bypassed correctly + * for both progressive and interlace content in interlace bypass mode. + * It has been recommended not to use progressive bypass mode. + */ + if ((!ctx->deinterlacing && (s_q_data->flags & Q_DATA_INTERLACED)) || + !(s_q_data->flags & Q_DATA_INTERLACED)) { + deinterlace = false; + val = VPE_DEI_INTERLACE_BYPASS; + } + + src_h = deinterlace ? src_h * 2 : src_h; + + val |= (src_h << VPE_DEI_HEIGHT_SHIFT) | + (src_w << VPE_DEI_WIDTH_SHIFT) | + VPE_DEI_FIELD_FLUSH; + + *dei_mmr0 = val; + + ctx->load_mmrs = true; +} + +static void set_dei_shadow_registers(struct vpe_ctx *ctx) +{ + struct vpe_mmr_adb *mmr_adb = ctx->mmr_adb.addr; + u32 *dei_mmr = &mmr_adb->dei_regs[0]; + const struct vpe_dei_regs *cur = &dei_regs; + + dei_mmr[2] = cur->mdt_spacial_freq_thr_reg; + dei_mmr[3] = cur->edi_config_reg; + dei_mmr[4] = cur->edi_lut_reg0; + dei_mmr[5] = cur->edi_lut_reg1; + dei_mmr[6] = cur->edi_lut_reg2; + dei_mmr[7] = cur->edi_lut_reg3; + + ctx->load_mmrs = true; +} + +static void set_csc_coeff_bypass(struct vpe_ctx *ctx) +{ + struct vpe_mmr_adb *mmr_adb = ctx->mmr_adb.addr; + u32 *shadow_csc_reg5 = &mmr_adb->csc_regs[5]; + + *shadow_csc_reg5 |= VPE_CSC_BYPASS; + + ctx->load_mmrs = true; +} + +static void set_sc_regs_bypass(struct vpe_ctx *ctx) +{ + struct vpe_mmr_adb *mmr_adb = ctx->mmr_adb.addr; + u32 *sc_reg0 = &mmr_adb->sc_regs[0]; + u32 val = 0; + + val |= VPE_SC_BYPASS; + *sc_reg0 = val; + + ctx->load_mmrs = true; +} + +/* + * Set the shadow registers whose values are modified when either the + * source or destination format is changed. + */ +static int set_srcdst_params(struct vpe_ctx *ctx) +{ + struct vpe_q_data *s_q_data = &ctx->q_data[Q_DATA_SRC]; + struct vpe_q_data *d_q_data = &ctx->q_data[Q_DATA_DST]; + size_t mv_buf_size; + int ret; + + ctx->sequence = 0; + ctx->field = V4L2_FIELD_TOP; + + if ((s_q_data->flags & Q_DATA_INTERLACED) && + !(d_q_data->flags & Q_DATA_INTERLACED)) { + const struct vpdma_data_format *mv = + &vpdma_misc_fmts[VPDMA_DATA_FMT_MV]; + + ctx->deinterlacing = 1; + mv_buf_size = + (s_q_data->width * s_q_data->height * mv->depth) >> 3; + } else { + ctx->deinterlacing = 0; + mv_buf_size = 0; + } + + free_vbs(ctx); + + ret = realloc_mv_buffers(ctx, mv_buf_size); + if (ret) + return ret; + + set_cfg_and_line_modes(ctx); + set_dei_regs(ctx); + set_csc_coeff_bypass(ctx); + set_sc_regs_bypass(ctx); + + return 0; +} + +/* + * Return the vpe_ctx structure for a given struct file + */ +static struct vpe_ctx *file2ctx(struct file *file) +{ + return container_of(file->private_data, struct vpe_ctx, fh); +} + +/* + * mem2mem callbacks + */ + +/** + * job_ready() - check whether an instance is ready to be scheduled to run + */ +static int job_ready(void *priv) +{ + struct vpe_ctx *ctx = priv; + int needed = ctx->bufs_per_job; + + if (ctx->deinterlacing && ctx->src_vbs[2] == NULL) + needed += 2; /* need additional two most recent fields */ + + if (v4l2_m2m_num_src_bufs_ready(ctx->m2m_ctx) < needed) + return 0; + + return 1; +} + +static void job_abort(void *priv) +{ + struct vpe_ctx *ctx = priv; + + /* Will cancel the transaction in the next interrupt handler */ + ctx->aborting = 1; +} + +/* + * Lock access to the device + */ +static void vpe_lock(void *priv) +{ + struct vpe_ctx *ctx = priv; + struct vpe_dev *dev = ctx->dev; + mutex_lock(&dev->dev_mutex); +} + +static void vpe_unlock(void *priv) +{ + struct vpe_ctx *ctx = priv; + struct vpe_dev *dev = ctx->dev; + mutex_unlock(&dev->dev_mutex); +} + +static void vpe_dump_regs(struct vpe_dev *dev) +{ +#define DUMPREG(r) vpe_dbg(dev, "%-35s %08x\n", #r, read_reg(dev, VPE_##r)) + + vpe_dbg(dev, "VPE Registers:\n"); + + DUMPREG(PID); + DUMPREG(SYSCONFIG); + DUMPREG(INT0_STATUS0_RAW); + DUMPREG(INT0_STATUS0); + DUMPREG(INT0_ENABLE0); + DUMPREG(INT0_STATUS1_RAW); + DUMPREG(INT0_STATUS1); + DUMPREG(INT0_ENABLE1); + DUMPREG(CLK_ENABLE); + DUMPREG(CLK_RESET); + DUMPREG(CLK_FORMAT_SELECT); + DUMPREG(CLK_RANGE_MAP); + DUMPREG(US1_R0); + DUMPREG(US1_R1); + DUMPREG(US1_R2); + DUMPREG(US1_R3); + DUMPREG(US1_R4); + DUMPREG(US1_R5); + DUMPREG(US1_R6); + DUMPREG(US1_R7); + DUMPREG(US2_R0); + DUMPREG(US2_R1); + DUMPREG(US2_R2); + DUMPREG(US2_R3); + DUMPREG(US2_R4); + DUMPREG(US2_R5); + DUMPREG(US2_R6); + DUMPREG(US2_R7); + DUMPREG(US3_R0); + DUMPREG(US3_R1); + DUMPREG(US3_R2); + DUMPREG(US3_R3); + DUMPREG(US3_R4); + DUMPREG(US3_R5); + DUMPREG(US3_R6); + DUMPREG(US3_R7); + DUMPREG(DEI_FRAME_SIZE); + DUMPREG(MDT_BYPASS); + DUMPREG(MDT_SF_THRESHOLD); + DUMPREG(EDI_CONFIG); + DUMPREG(DEI_EDI_LUT_R0); + DUMPREG(DEI_EDI_LUT_R1); + DUMPREG(DEI_EDI_LUT_R2); + DUMPREG(DEI_EDI_LUT_R3); + DUMPREG(DEI_FMD_WINDOW_R0); + DUMPREG(DEI_FMD_WINDOW_R1); + DUMPREG(DEI_FMD_CONTROL_R0); + DUMPREG(DEI_FMD_CONTROL_R1); + DUMPREG(DEI_FMD_STATUS_R0); + DUMPREG(DEI_FMD_STATUS_R1); + DUMPREG(DEI_FMD_STATUS_R2); + DUMPREG(SC_MP_SC0); + DUMPREG(SC_MP_SC1); + DUMPREG(SC_MP_SC2); + DUMPREG(SC_MP_SC3); + DUMPREG(SC_MP_SC4); + DUMPREG(SC_MP_SC5); + DUMPREG(SC_MP_SC6); + DUMPREG(SC_MP_SC8); + DUMPREG(SC_MP_SC9); + DUMPREG(SC_MP_SC10); + DUMPREG(SC_MP_SC11); + DUMPREG(SC_MP_SC12); + DUMPREG(SC_MP_SC13); + DUMPREG(SC_MP_SC17); + DUMPREG(SC_MP_SC18); + DUMPREG(SC_MP_SC19); + DUMPREG(SC_MP_SC20); + DUMPREG(SC_MP_SC21); + DUMPREG(SC_MP_SC22); + DUMPREG(SC_MP_SC23); + DUMPREG(SC_MP_SC24); + DUMPREG(SC_MP_SC25); + DUMPREG(CSC_CSC00); + DUMPREG(CSC_CSC01); + DUMPREG(CSC_CSC02); + DUMPREG(CSC_CSC03); + DUMPREG(CSC_CSC04); + DUMPREG(CSC_CSC05); +#undef DUMPREG +} + +static void add_out_dtd(struct vpe_ctx *ctx, int port) +{ + struct vpe_q_data *q_data = &ctx->q_data[Q_DATA_DST]; + const struct vpe_port_data *p_data = &port_data[port]; + struct vb2_buffer *vb = ctx->dst_vb; + struct v4l2_rect *c_rect = &q_data->c_rect; + struct vpe_fmt *fmt = q_data->fmt; + const struct vpdma_data_format *vpdma_fmt; + int mv_buf_selector = !ctx->src_mv_buf_selector; + dma_addr_t dma_addr; + u32 flags = 0; + + if (port == VPE_PORT_MV_OUT) { + vpdma_fmt = &vpdma_misc_fmts[VPDMA_DATA_FMT_MV]; + dma_addr = ctx->mv_buf_dma[mv_buf_selector]; + } else { + /* to incorporate interleaved formats */ + int plane = fmt->coplanar ? p_data->vb_part : 0; + + vpdma_fmt = fmt->vpdma_fmt[plane]; + dma_addr = vb2_dma_contig_plane_dma_addr(vb, plane); + if (!dma_addr) { + vpe_err(ctx->dev, + "acquiring output buffer(%d) dma_addr failed\n", + port); + return; + } + } + + if (q_data->flags & Q_DATA_FRAME_1D) + flags |= VPDMA_DATA_FRAME_1D; + if (q_data->flags & Q_DATA_MODE_TILED) + flags |= VPDMA_DATA_MODE_TILED; + + vpdma_add_out_dtd(&ctx->desc_list, c_rect, vpdma_fmt, dma_addr, + p_data->channel, flags); +} + +static void add_in_dtd(struct vpe_ctx *ctx, int port) +{ + struct vpe_q_data *q_data = &ctx->q_data[Q_DATA_SRC]; + const struct vpe_port_data *p_data = &port_data[port]; + struct vb2_buffer *vb = ctx->src_vbs[p_data->vb_index]; + struct v4l2_rect *c_rect = &q_data->c_rect; + struct vpe_fmt *fmt = q_data->fmt; + const struct vpdma_data_format *vpdma_fmt; + int mv_buf_selector = ctx->src_mv_buf_selector; + int field = vb->v4l2_buf.field == V4L2_FIELD_BOTTOM; + dma_addr_t dma_addr; + u32 flags = 0; + + if (port == VPE_PORT_MV_IN) { + vpdma_fmt = &vpdma_misc_fmts[VPDMA_DATA_FMT_MV]; + dma_addr = ctx->mv_buf_dma[mv_buf_selector]; + } else { + /* to incorporate interleaved formats */ + int plane = fmt->coplanar ? p_data->vb_part : 0; + + vpdma_fmt = fmt->vpdma_fmt[plane]; + + dma_addr = vb2_dma_contig_plane_dma_addr(vb, plane); + if (!dma_addr) { + vpe_err(ctx->dev, + "acquiring input buffer(%d) dma_addr failed\n", + port); + return; + } + } + + if (q_data->flags & Q_DATA_FRAME_1D) + flags |= VPDMA_DATA_FRAME_1D; + if (q_data->flags & Q_DATA_MODE_TILED) + flags |= VPDMA_DATA_MODE_TILED; + + vpdma_add_in_dtd(&ctx->desc_list, q_data->width, q_data->height, + c_rect, vpdma_fmt, dma_addr, p_data->channel, field, flags); +} + +/* + * Enable the expected IRQ sources + */ +static void enable_irqs(struct vpe_ctx *ctx) +{ + write_reg(ctx->dev, VPE_INT0_ENABLE0_SET, VPE_INT0_LIST0_COMPLETE); + write_reg(ctx->dev, VPE_INT0_ENABLE1_SET, VPE_DEI_ERROR_INT | + VPE_DS1_UV_ERROR_INT); + + vpdma_enable_list_complete_irq(ctx->dev->vpdma, 0, true); +} + +static void disable_irqs(struct vpe_ctx *ctx) +{ + write_reg(ctx->dev, VPE_INT0_ENABLE0_CLR, 0xffffffff); + write_reg(ctx->dev, VPE_INT0_ENABLE1_CLR, 0xffffffff); + + vpdma_enable_list_complete_irq(ctx->dev->vpdma, 0, false); +} + +/* device_run() - prepares and starts the device + * + * This function is only called when both the source and destination + * buffers are in place. + */ +static void device_run(void *priv) +{ + struct vpe_ctx *ctx = priv; + struct vpe_q_data *d_q_data = &ctx->q_data[Q_DATA_DST]; + + if (ctx->deinterlacing && ctx->src_vbs[2] == NULL) { + ctx->src_vbs[2] = v4l2_m2m_src_buf_remove(ctx->m2m_ctx); + WARN_ON(ctx->src_vbs[2] == NULL); + ctx->src_vbs[1] = v4l2_m2m_src_buf_remove(ctx->m2m_ctx); + WARN_ON(ctx->src_vbs[1] == NULL); + } + + ctx->src_vbs[0] = v4l2_m2m_src_buf_remove(ctx->m2m_ctx); + WARN_ON(ctx->src_vbs[0] == NULL); + ctx->dst_vb = v4l2_m2m_dst_buf_remove(ctx->m2m_ctx); + WARN_ON(ctx->dst_vb == NULL); + + /* config descriptors */ + if (ctx->dev->loaded_mmrs != ctx->mmr_adb.dma_addr || ctx->load_mmrs) { + vpdma_map_desc_buf(ctx->dev->vpdma, &ctx->mmr_adb); + vpdma_add_cfd_adb(&ctx->desc_list, CFD_MMR_CLIENT, &ctx->mmr_adb); + ctx->dev->loaded_mmrs = ctx->mmr_adb.dma_addr; + ctx->load_mmrs = false; + } + + /* output data descriptors */ + if (ctx->deinterlacing) + add_out_dtd(ctx, VPE_PORT_MV_OUT); + + add_out_dtd(ctx, VPE_PORT_LUMA_OUT); + if (d_q_data->fmt->coplanar) + add_out_dtd(ctx, VPE_PORT_CHROMA_OUT); + + /* input data descriptors */ + if (ctx->deinterlacing) { + add_in_dtd(ctx, VPE_PORT_LUMA3_IN); + add_in_dtd(ctx, VPE_PORT_CHROMA3_IN); + + add_in_dtd(ctx, VPE_PORT_LUMA2_IN); + add_in_dtd(ctx, VPE_PORT_CHROMA2_IN); + } + + add_in_dtd(ctx, VPE_PORT_LUMA1_IN); + add_in_dtd(ctx, VPE_PORT_CHROMA1_IN); + + if (ctx->deinterlacing) + add_in_dtd(ctx, VPE_PORT_MV_IN); + + /* sync on channel control descriptors for input ports */ + vpdma_add_sync_on_channel_ctd(&ctx->desc_list, VPE_CHAN_LUMA1_IN); + vpdma_add_sync_on_channel_ctd(&ctx->desc_list, VPE_CHAN_CHROMA1_IN); + + if (ctx->deinterlacing) { + vpdma_add_sync_on_channel_ctd(&ctx->desc_list, + VPE_CHAN_LUMA2_IN); + vpdma_add_sync_on_channel_ctd(&ctx->desc_list, + VPE_CHAN_CHROMA2_IN); + + vpdma_add_sync_on_channel_ctd(&ctx->desc_list, + VPE_CHAN_LUMA3_IN); + vpdma_add_sync_on_channel_ctd(&ctx->desc_list, + VPE_CHAN_CHROMA3_IN); + + vpdma_add_sync_on_channel_ctd(&ctx->desc_list, VPE_CHAN_MV_IN); + } + + /* sync on channel control descriptors for output ports */ + vpdma_add_sync_on_channel_ctd(&ctx->desc_list, VPE_CHAN_LUMA_OUT); + if (d_q_data->fmt->coplanar) + vpdma_add_sync_on_channel_ctd(&ctx->desc_list, VPE_CHAN_CHROMA_OUT); + + if (ctx->deinterlacing) + vpdma_add_sync_on_channel_ctd(&ctx->desc_list, VPE_CHAN_MV_OUT); + + enable_irqs(ctx); + + vpdma_map_desc_buf(ctx->dev->vpdma, &ctx->desc_list.buf); + vpdma_submit_descs(ctx->dev->vpdma, &ctx->desc_list); +} + +static void dei_error(struct vpe_ctx *ctx) +{ + dev_warn(ctx->dev->v4l2_dev.dev, + "received DEI error interrupt\n"); +} + +static void ds1_uv_error(struct vpe_ctx *ctx) +{ + dev_warn(ctx->dev->v4l2_dev.dev, + "received downsampler error interrupt\n"); +} + +static irqreturn_t vpe_irq(int irq_vpe, void *data) +{ + struct vpe_dev *dev = (struct vpe_dev *)data; + struct vpe_ctx *ctx; + struct vpe_q_data *d_q_data; + struct vb2_buffer *s_vb, *d_vb; + struct v4l2_buffer *s_buf, *d_buf; + unsigned long flags; + u32 irqst0, irqst1; + + irqst0 = read_reg(dev, VPE_INT0_STATUS0); + if (irqst0) { + write_reg(dev, VPE_INT0_STATUS0_CLR, irqst0); + vpe_dbg(dev, "INT0_STATUS0 = 0x%08x\n", irqst0); + } + + irqst1 = read_reg(dev, VPE_INT0_STATUS1); + if (irqst1) { + write_reg(dev, VPE_INT0_STATUS1_CLR, irqst1); + vpe_dbg(dev, "INT0_STATUS1 = 0x%08x\n", irqst1); + } + + ctx = v4l2_m2m_get_curr_priv(dev->m2m_dev); + if (!ctx) { + vpe_err(dev, "instance released before end of transaction\n"); + goto handled; + } + + if (irqst1) { + if (irqst1 & VPE_DEI_ERROR_INT) { + irqst1 &= ~VPE_DEI_ERROR_INT; + dei_error(ctx); + } + if (irqst1 & VPE_DS1_UV_ERROR_INT) { + irqst1 &= ~VPE_DS1_UV_ERROR_INT; + ds1_uv_error(ctx); + } + } + + if (irqst0) { + if (irqst0 & VPE_INT0_LIST0_COMPLETE) + vpdma_clear_list_stat(ctx->dev->vpdma); + + irqst0 &= ~(VPE_INT0_LIST0_COMPLETE); + } + + if (irqst0 | irqst1) { + dev_warn(dev->v4l2_dev.dev, "Unexpected interrupt: " + "INT0_STATUS0 = 0x%08x, INT0_STATUS1 = 0x%08x\n", + irqst0, irqst1); + } + + disable_irqs(ctx); + + vpdma_unmap_desc_buf(dev->vpdma, &ctx->desc_list.buf); + vpdma_unmap_desc_buf(dev->vpdma, &ctx->mmr_adb); + + vpdma_reset_desc_list(&ctx->desc_list); + + /* the previous dst mv buffer becomes the next src mv buffer */ + ctx->src_mv_buf_selector = !ctx->src_mv_buf_selector; + + if (ctx->aborting) + goto finished; + + s_vb = ctx->src_vbs[0]; + d_vb = ctx->dst_vb; + s_buf = &s_vb->v4l2_buf; + d_buf = &d_vb->v4l2_buf; + + d_buf->timestamp = s_buf->timestamp; + if (s_buf->flags & V4L2_BUF_FLAG_TIMECODE) { + d_buf->flags |= V4L2_BUF_FLAG_TIMECODE; + d_buf->timecode = s_buf->timecode; + } + d_buf->sequence = ctx->sequence; + d_buf->field = ctx->field; + + d_q_data = &ctx->q_data[Q_DATA_DST]; + if (d_q_data->flags & Q_DATA_INTERLACED) { + if (ctx->field == V4L2_FIELD_BOTTOM) { + ctx->sequence++; + ctx->field = V4L2_FIELD_TOP; + } else { + WARN_ON(ctx->field != V4L2_FIELD_TOP); + ctx->field = V4L2_FIELD_BOTTOM; + } + } else { + ctx->sequence++; + } + + if (ctx->deinterlacing) + s_vb = ctx->src_vbs[2]; + + spin_lock_irqsave(&dev->lock, flags); + v4l2_m2m_buf_done(s_vb, VB2_BUF_STATE_DONE); + v4l2_m2m_buf_done(d_vb, VB2_BUF_STATE_DONE); + spin_unlock_irqrestore(&dev->lock, flags); + + if (ctx->deinterlacing) { + ctx->src_vbs[2] = ctx->src_vbs[1]; + ctx->src_vbs[1] = ctx->src_vbs[0]; + } + + ctx->bufs_completed++; + if (ctx->bufs_completed < ctx->bufs_per_job) { + device_run(ctx); + goto handled; + } + +finished: + vpe_dbg(ctx->dev, "finishing transaction\n"); + ctx->bufs_completed = 0; + v4l2_m2m_job_finish(dev->m2m_dev, ctx->m2m_ctx); +handled: + return IRQ_HANDLED; +} + +/* + * video ioctls + */ +static int vpe_querycap(struct file *file, void *priv, + struct v4l2_capability *cap) +{ + strncpy(cap->driver, VPE_MODULE_NAME, sizeof(cap->driver) - 1); + strncpy(cap->card, VPE_MODULE_NAME, sizeof(cap->card) - 1); + strlcpy(cap->bus_info, VPE_MODULE_NAME, sizeof(cap->bus_info)); + cap->device_caps = V4L2_CAP_VIDEO_M2M | V4L2_CAP_STREAMING; + cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS; + return 0; +} + +static int __enum_fmt(struct v4l2_fmtdesc *f, u32 type) +{ + int i, index; + struct vpe_fmt *fmt = NULL; + + index = 0; + for (i = 0; i < ARRAY_SIZE(vpe_formats); ++i) { + if (vpe_formats[i].types & type) { + if (index == f->index) { + fmt = &vpe_formats[i]; + break; + } + index++; + } + } + + if (!fmt) + return -EINVAL; + + strncpy(f->description, fmt->name, sizeof(f->description) - 1); + f->pixelformat = fmt->fourcc; + return 0; +} + +static int vpe_enum_fmt(struct file *file, void *priv, + struct v4l2_fmtdesc *f) +{ + if (V4L2_TYPE_IS_OUTPUT(f->type)) + return __enum_fmt(f, VPE_FMT_TYPE_OUTPUT); + + return __enum_fmt(f, VPE_FMT_TYPE_CAPTURE); +} + +static int vpe_g_fmt(struct file *file, void *priv, struct v4l2_format *f) +{ + struct v4l2_pix_format_mplane *pix = &f->fmt.pix_mp; + struct vpe_ctx *ctx = file2ctx(file); + struct vb2_queue *vq; + struct vpe_q_data *q_data; + int i; + + vq = v4l2_m2m_get_vq(ctx->m2m_ctx, f->type); + if (!vq) + return -EINVAL; + + q_data = get_q_data(ctx, f->type); + + pix->width = q_data->width; + pix->height = q_data->height; + pix->pixelformat = q_data->fmt->fourcc; + pix->field = q_data->field; + + if (V4L2_TYPE_IS_OUTPUT(f->type)) { + pix->colorspace = q_data->colorspace; + } else { + struct vpe_q_data *s_q_data; + + /* get colorspace from the source queue */ + s_q_data = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE); + + pix->colorspace = s_q_data->colorspace; + } + + pix->num_planes = q_data->fmt->coplanar ? 2 : 1; + + for (i = 0; i < pix->num_planes; i++) { + pix->plane_fmt[i].bytesperline = q_data->bytesperline[i]; + pix->plane_fmt[i].sizeimage = q_data->sizeimage[i]; + } + + return 0; +} + +static int __vpe_try_fmt(struct vpe_ctx *ctx, struct v4l2_format *f, + struct vpe_fmt *fmt, int type) +{ + struct v4l2_pix_format_mplane *pix = &f->fmt.pix_mp; + struct v4l2_plane_pix_format *plane_fmt; + int i; + + if (!fmt || !(fmt->types & type)) { + vpe_err(ctx->dev, "Fourcc format (0x%08x) invalid.\n", + pix->pixelformat); + return -EINVAL; + } + + if (pix->field != V4L2_FIELD_NONE && pix->field != V4L2_FIELD_ALTERNATE) + pix->field = V4L2_FIELD_NONE; + + v4l_bound_align_image(&pix->width, MIN_W, MAX_W, W_ALIGN, + &pix->height, MIN_H, MAX_H, H_ALIGN, + S_ALIGN); + + pix->num_planes = fmt->coplanar ? 2 : 1; + pix->pixelformat = fmt->fourcc; + + if (type == VPE_FMT_TYPE_CAPTURE) { + struct vpe_q_data *s_q_data; + + /* get colorspace from the source queue */ + s_q_data = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE); + + pix->colorspace = s_q_data->colorspace; + } else { + if (!pix->colorspace) + pix->colorspace = V4L2_COLORSPACE_SMPTE240M; + } + + for (i = 0; i < pix->num_planes; i++) { + int depth; + + plane_fmt = &pix->plane_fmt[i]; + depth = fmt->vpdma_fmt[i]->depth; + + if (i == VPE_LUMA) + plane_fmt->bytesperline = + round_up((pix->width * depth) >> 3, + 1 << L_ALIGN); + else + plane_fmt->bytesperline = pix->width; + + plane_fmt->sizeimage = + (pix->height * pix->width * depth) >> 3; + } + + return 0; +} + +static int vpe_try_fmt(struct file *file, void *priv, struct v4l2_format *f) +{ + struct vpe_ctx *ctx = file2ctx(file); + struct vpe_fmt *fmt = find_format(f); + + if (V4L2_TYPE_IS_OUTPUT(f->type)) + return __vpe_try_fmt(ctx, f, fmt, VPE_FMT_TYPE_OUTPUT); + else + return __vpe_try_fmt(ctx, f, fmt, VPE_FMT_TYPE_CAPTURE); +} + +static int __vpe_s_fmt(struct vpe_ctx *ctx, struct v4l2_format *f) +{ + struct v4l2_pix_format_mplane *pix = &f->fmt.pix_mp; + struct v4l2_plane_pix_format *plane_fmt; + struct vpe_q_data *q_data; + struct vb2_queue *vq; + int i; + + vq = v4l2_m2m_get_vq(ctx->m2m_ctx, f->type); + if (!vq) + return -EINVAL; + + if (vb2_is_busy(vq)) { + vpe_err(ctx->dev, "queue busy\n"); + return -EBUSY; + } + + q_data = get_q_data(ctx, f->type); + if (!q_data) + return -EINVAL; + + q_data->fmt = find_format(f); + q_data->width = pix->width; + q_data->height = pix->height; + q_data->colorspace = pix->colorspace; + q_data->field = pix->field; + + for (i = 0; i < pix->num_planes; i++) { + plane_fmt = &pix->plane_fmt[i]; + + q_data->bytesperline[i] = plane_fmt->bytesperline; + q_data->sizeimage[i] = plane_fmt->sizeimage; + } + + q_data->c_rect.left = 0; + q_data->c_rect.top = 0; + q_data->c_rect.width = q_data->width; + q_data->c_rect.height = q_data->height; + + if (q_data->field == V4L2_FIELD_ALTERNATE) + q_data->flags |= Q_DATA_INTERLACED; + else + q_data->flags &= ~Q_DATA_INTERLACED; + + vpe_dbg(ctx->dev, "Setting format for type %d, wxh: %dx%d, fmt: %d bpl_y %d", + f->type, q_data->width, q_data->height, q_data->fmt->fourcc, + q_data->bytesperline[VPE_LUMA]); + if (q_data->fmt->coplanar) + vpe_dbg(ctx->dev, " bpl_uv %d\n", + q_data->bytesperline[VPE_CHROMA]); + + return 0; +} + +static int vpe_s_fmt(struct file *file, void *priv, struct v4l2_format *f) +{ + int ret; + struct vpe_ctx *ctx = file2ctx(file); + + ret = vpe_try_fmt(file, priv, f); + if (ret) + return ret; + + ret = __vpe_s_fmt(ctx, f); + if (ret) + return ret; + + if (V4L2_TYPE_IS_OUTPUT(f->type)) + set_src_registers(ctx); + else + set_dst_registers(ctx); + + return set_srcdst_params(ctx); +} + +static int vpe_reqbufs(struct file *file, void *priv, + struct v4l2_requestbuffers *reqbufs) +{ + struct vpe_ctx *ctx = file2ctx(file); + + return v4l2_m2m_reqbufs(file, ctx->m2m_ctx, reqbufs); +} + +static int vpe_querybuf(struct file *file, void *priv, struct v4l2_buffer *buf) +{ + struct vpe_ctx *ctx = file2ctx(file); + + return v4l2_m2m_querybuf(file, ctx->m2m_ctx, buf); +} + +static int vpe_qbuf(struct file *file, void *priv, struct v4l2_buffer *buf) +{ + struct vpe_ctx *ctx = file2ctx(file); + + return v4l2_m2m_qbuf(file, ctx->m2m_ctx, buf); +} + +static int vpe_dqbuf(struct file *file, void *priv, struct v4l2_buffer *buf) +{ + struct vpe_ctx *ctx = file2ctx(file); + + return v4l2_m2m_dqbuf(file, ctx->m2m_ctx, buf); +} + +static int vpe_streamon(struct file *file, void *priv, enum v4l2_buf_type type) +{ + struct vpe_ctx *ctx = file2ctx(file); + + return v4l2_m2m_streamon(file, ctx->m2m_ctx, type); +} + +static int vpe_streamoff(struct file *file, void *priv, enum v4l2_buf_type type) +{ + struct vpe_ctx *ctx = file2ctx(file); + + vpe_dump_regs(ctx->dev); + vpdma_dump_regs(ctx->dev->vpdma); + + return v4l2_m2m_streamoff(file, ctx->m2m_ctx, type); +} + +/* + * defines number of buffers/frames a context can process with VPE before + * switching to a different context. default value is 1 buffer per context + */ +#define V4L2_CID_VPE_BUFS_PER_JOB (V4L2_CID_USER_TI_VPE_BASE + 0) + +static int vpe_s_ctrl(struct v4l2_ctrl *ctrl) +{ + struct vpe_ctx *ctx = + container_of(ctrl->handler, struct vpe_ctx, hdl); + + switch (ctrl->id) { + case V4L2_CID_VPE_BUFS_PER_JOB: + ctx->bufs_per_job = ctrl->val; + break; + + default: + vpe_err(ctx->dev, "Invalid control\n"); + return -EINVAL; + } + + return 0; +} + +static const struct v4l2_ctrl_ops vpe_ctrl_ops = { + .s_ctrl = vpe_s_ctrl, +}; + +static const struct v4l2_ioctl_ops vpe_ioctl_ops = { + .vidioc_querycap = vpe_querycap, + + .vidioc_enum_fmt_vid_cap_mplane = vpe_enum_fmt, + .vidioc_g_fmt_vid_cap_mplane = vpe_g_fmt, + .vidioc_try_fmt_vid_cap_mplane = vpe_try_fmt, + .vidioc_s_fmt_vid_cap_mplane = vpe_s_fmt, + + .vidioc_enum_fmt_vid_out_mplane = vpe_enum_fmt, + .vidioc_g_fmt_vid_out_mplane = vpe_g_fmt, + .vidioc_try_fmt_vid_out_mplane = vpe_try_fmt, + .vidioc_s_fmt_vid_out_mplane = vpe_s_fmt, + + .vidioc_reqbufs = vpe_reqbufs, + .vidioc_querybuf = vpe_querybuf, + + .vidioc_qbuf = vpe_qbuf, + .vidioc_dqbuf = vpe_dqbuf, + + .vidioc_streamon = vpe_streamon, + .vidioc_streamoff = vpe_streamoff, + .vidioc_subscribe_event = v4l2_ctrl_subscribe_event, + .vidioc_unsubscribe_event = v4l2_event_unsubscribe, +}; + +/* + * Queue operations + */ +static int vpe_queue_setup(struct vb2_queue *vq, + const struct v4l2_format *fmt, + unsigned int *nbuffers, unsigned int *nplanes, + unsigned int sizes[], void *alloc_ctxs[]) +{ + int i; + struct vpe_ctx *ctx = vb2_get_drv_priv(vq); + struct vpe_q_data *q_data; + + q_data = get_q_data(ctx, vq->type); + + *nplanes = q_data->fmt->coplanar ? 2 : 1; + + for (i = 0; i < *nplanes; i++) { + sizes[i] = q_data->sizeimage[i]; + alloc_ctxs[i] = ctx->dev->alloc_ctx; + } + + vpe_dbg(ctx->dev, "get %d buffer(s) of size %d", *nbuffers, + sizes[VPE_LUMA]); + if (q_data->fmt->coplanar) + vpe_dbg(ctx->dev, " and %d\n", sizes[VPE_CHROMA]); + + return 0; +} + +static int vpe_buf_prepare(struct vb2_buffer *vb) +{ + struct vpe_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue); + struct vpe_q_data *q_data; + int i, num_planes; + + vpe_dbg(ctx->dev, "type: %d\n", vb->vb2_queue->type); + + q_data = get_q_data(ctx, vb->vb2_queue->type); + num_planes = q_data->fmt->coplanar ? 2 : 1; + + for (i = 0; i < num_planes; i++) { + if (vb2_plane_size(vb, i) < q_data->sizeimage[i]) { + vpe_err(ctx->dev, + "data will not fit into plane (%lu < %lu)\n", + vb2_plane_size(vb, i), + (long) q_data->sizeimage[i]); + return -EINVAL; + } + } + + for (i = 0; i < num_planes; i++) + vb2_set_plane_payload(vb, i, q_data->sizeimage[i]); + + return 0; +} + +static void vpe_buf_queue(struct vb2_buffer *vb) +{ + struct vpe_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue); + v4l2_m2m_buf_queue(ctx->m2m_ctx, vb); +} + +static void vpe_wait_prepare(struct vb2_queue *q) +{ + struct vpe_ctx *ctx = vb2_get_drv_priv(q); + vpe_unlock(ctx); +} + +static void vpe_wait_finish(struct vb2_queue *q) +{ + struct vpe_ctx *ctx = vb2_get_drv_priv(q); + vpe_lock(ctx); +} + +static struct vb2_ops vpe_qops = { + .queue_setup = vpe_queue_setup, + .buf_prepare = vpe_buf_prepare, + .buf_queue = vpe_buf_queue, + .wait_prepare = vpe_wait_prepare, + .wait_finish = vpe_wait_finish, +}; + +static int queue_init(void *priv, struct vb2_queue *src_vq, + struct vb2_queue *dst_vq) +{ + struct vpe_ctx *ctx = priv; + int ret; + + memset(src_vq, 0, sizeof(*src_vq)); + src_vq->type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE; + src_vq->io_modes = VB2_MMAP; + src_vq->drv_priv = ctx; + src_vq->buf_struct_size = sizeof(struct v4l2_m2m_buffer); + src_vq->ops = &vpe_qops; + src_vq->mem_ops = &vb2_dma_contig_memops; + src_vq->timestamp_type = V4L2_BUF_FLAG_TIMESTAMP_COPY; + + ret = vb2_queue_init(src_vq); + if (ret) + return ret; + + memset(dst_vq, 0, sizeof(*dst_vq)); + dst_vq->type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE; + dst_vq->io_modes = VB2_MMAP; + dst_vq->drv_priv = ctx; + dst_vq->buf_struct_size = sizeof(struct v4l2_m2m_buffer); + dst_vq->ops = &vpe_qops; + dst_vq->mem_ops = &vb2_dma_contig_memops; + dst_vq->timestamp_type = V4L2_BUF_FLAG_TIMESTAMP_COPY; + + return vb2_queue_init(dst_vq); +} + +static const struct v4l2_ctrl_config vpe_bufs_per_job = { + .ops = &vpe_ctrl_ops, + .id = V4L2_CID_VPE_BUFS_PER_JOB, + .name = "Buffers Per Transaction", + .type = V4L2_CTRL_TYPE_INTEGER, + .def = VPE_DEF_BUFS_PER_JOB, + .min = 1, + .max = VIDEO_MAX_FRAME, + .step = 1, +}; + +/* + * File operations + */ +static int vpe_open(struct file *file) +{ + struct vpe_dev *dev = video_drvdata(file); + struct vpe_ctx *ctx = NULL; + struct vpe_q_data *s_q_data; + struct v4l2_ctrl_handler *hdl; + int ret; + + vpe_dbg(dev, "vpe_open\n"); + + if (!dev->vpdma->ready) { + vpe_err(dev, "vpdma firmware not loaded\n"); + return -ENODEV; + } + + ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); + if (!ctx) + return -ENOMEM; + + ctx->dev = dev; + + if (mutex_lock_interruptible(&dev->dev_mutex)) { + ret = -ERESTARTSYS; + goto free_ctx; + } + + ret = vpdma_create_desc_list(&ctx->desc_list, VPE_DESC_LIST_SIZE, + VPDMA_LIST_TYPE_NORMAL); + if (ret != 0) + goto unlock; + + ret = vpdma_alloc_desc_buf(&ctx->mmr_adb, sizeof(struct vpe_mmr_adb)); + if (ret != 0) + goto free_desc_list; + + init_adb_hdrs(ctx); + + v4l2_fh_init(&ctx->fh, video_devdata(file)); + file->private_data = &ctx->fh; + + hdl = &ctx->hdl; + v4l2_ctrl_handler_init(hdl, 1); + v4l2_ctrl_new_custom(hdl, &vpe_bufs_per_job, NULL); + if (hdl->error) { + ret = hdl->error; + goto exit_fh; + } + ctx->fh.ctrl_handler = hdl; + v4l2_ctrl_handler_setup(hdl); + + s_q_data = &ctx->q_data[Q_DATA_SRC]; + s_q_data->fmt = &vpe_formats[2]; + s_q_data->width = 1920; + s_q_data->height = 1080; + s_q_data->sizeimage[VPE_LUMA] = (s_q_data->width * s_q_data->height * + s_q_data->fmt->vpdma_fmt[VPE_LUMA]->depth) >> 3; + s_q_data->colorspace = V4L2_COLORSPACE_SMPTE240M; + s_q_data->field = V4L2_FIELD_NONE; + s_q_data->c_rect.left = 0; + s_q_data->c_rect.top = 0; + s_q_data->c_rect.width = s_q_data->width; + s_q_data->c_rect.height = s_q_data->height; + s_q_data->flags = 0; + + ctx->q_data[Q_DATA_DST] = *s_q_data; + + set_dei_shadow_registers(ctx); + set_src_registers(ctx); + set_dst_registers(ctx); + ret = set_srcdst_params(ctx); + if (ret) + goto exit_fh; + + ctx->m2m_ctx = v4l2_m2m_ctx_init(dev->m2m_dev, ctx, &queue_init); + + if (IS_ERR(ctx->m2m_ctx)) { + ret = PTR_ERR(ctx->m2m_ctx); + goto exit_fh; + } + + v4l2_fh_add(&ctx->fh); + + /* + * for now, just report the creation of the first instance, we can later + * optimize the driver to enable or disable clocks when the first + * instance is created or the last instance released + */ + if (atomic_inc_return(&dev->num_instances) == 1) + vpe_dbg(dev, "first instance created\n"); + + ctx->bufs_per_job = VPE_DEF_BUFS_PER_JOB; + + ctx->load_mmrs = true; + + vpe_dbg(dev, "created instance %p, m2m_ctx: %p\n", + ctx, ctx->m2m_ctx); + + mutex_unlock(&dev->dev_mutex); + + return 0; +exit_fh: + v4l2_ctrl_handler_free(hdl); + v4l2_fh_exit(&ctx->fh); + vpdma_free_desc_buf(&ctx->mmr_adb); +free_desc_list: + vpdma_free_desc_list(&ctx->desc_list); +unlock: + mutex_unlock(&dev->dev_mutex); +free_ctx: + kfree(ctx); + return ret; +} + +static int vpe_release(struct file *file) +{ + struct vpe_dev *dev = video_drvdata(file); + struct vpe_ctx *ctx = file2ctx(file); + + vpe_dbg(dev, "releasing instance %p\n", ctx); + + mutex_lock(&dev->dev_mutex); + free_vbs(ctx); + free_mv_buffers(ctx); + vpdma_free_desc_list(&ctx->desc_list); + vpdma_free_desc_buf(&ctx->mmr_adb); + + v4l2_fh_del(&ctx->fh); + v4l2_fh_exit(&ctx->fh); + v4l2_ctrl_handler_free(&ctx->hdl); + v4l2_m2m_ctx_release(ctx->m2m_ctx); + + kfree(ctx); + + /* + * for now, just report the release of the last instance, we can later + * optimize the driver to enable or disable clocks when the first + * instance is created or the last instance released + */ + if (atomic_dec_return(&dev->num_instances) == 0) + vpe_dbg(dev, "last instance released\n"); + + mutex_unlock(&dev->dev_mutex); + + return 0; +} + +static unsigned int vpe_poll(struct file *file, + struct poll_table_struct *wait) +{ + struct vpe_ctx *ctx = file2ctx(file); + struct vpe_dev *dev = ctx->dev; + int ret; + + mutex_lock(&dev->dev_mutex); + ret = v4l2_m2m_poll(file, ctx->m2m_ctx, wait); + mutex_unlock(&dev->dev_mutex); + return ret; +} + +static int vpe_mmap(struct file *file, struct vm_area_struct *vma) +{ + struct vpe_ctx *ctx = file2ctx(file); + struct vpe_dev *dev = ctx->dev; + int ret; + + if (mutex_lock_interruptible(&dev->dev_mutex)) + return -ERESTARTSYS; + ret = v4l2_m2m_mmap(file, ctx->m2m_ctx, vma); + mutex_unlock(&dev->dev_mutex); + return ret; +} + +static const struct v4l2_file_operations vpe_fops = { + .owner = THIS_MODULE, + .open = vpe_open, + .release = vpe_release, + .poll = vpe_poll, + .unlocked_ioctl = video_ioctl2, + .mmap = vpe_mmap, +}; + +static struct video_device vpe_videodev = { + .name = VPE_MODULE_NAME, + .fops = &vpe_fops, + .ioctl_ops = &vpe_ioctl_ops, + .minor = -1, + .release = video_device_release, + .vfl_dir = VFL_DIR_M2M, +}; + +static struct v4l2_m2m_ops m2m_ops = { + .device_run = device_run, + .job_ready = job_ready, + .job_abort = job_abort, + .lock = vpe_lock, + .unlock = vpe_unlock, +}; + +static int vpe_runtime_get(struct platform_device *pdev) +{ + int r; + + dev_dbg(&pdev->dev, "vpe_runtime_get\n"); + + r = pm_runtime_get_sync(&pdev->dev); + WARN_ON(r < 0); + return r < 0 ? r : 0; +} + +static void vpe_runtime_put(struct platform_device *pdev) +{ + + int r; + + dev_dbg(&pdev->dev, "vpe_runtime_put\n"); + + r = pm_runtime_put_sync(&pdev->dev); + WARN_ON(r < 0 && r != -ENOSYS); +} + +static int vpe_probe(struct platform_device *pdev) +{ + struct vpe_dev *dev; + struct video_device *vfd; + struct resource *res; + int ret, irq, func; + + dev = devm_kzalloc(&pdev->dev, sizeof(*dev), GFP_KERNEL); + if (IS_ERR(dev)) + return PTR_ERR(dev); + + spin_lock_init(&dev->lock); + + ret = v4l2_device_register(&pdev->dev, &dev->v4l2_dev); + if (ret) + return ret; + + atomic_set(&dev->num_instances, 0); + mutex_init(&dev->dev_mutex); + + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "vpe_top"); + /* + * HACK: we get resource info from device tree in the form of a list of + * VPE sub blocks, the driver currently uses only the base of vpe_top + * for register access, the driver should be changed later to access + * registers based on the sub block base addresses + */ + dev->base = devm_ioremap(&pdev->dev, res->start, SZ_32K); + if (IS_ERR(dev->base)) { + ret = PTR_ERR(dev->base); + goto v4l2_dev_unreg; + } + + irq = platform_get_irq(pdev, 0); + ret = devm_request_irq(&pdev->dev, irq, vpe_irq, 0, VPE_MODULE_NAME, + dev); + if (ret) + goto v4l2_dev_unreg; + + platform_set_drvdata(pdev, dev); + + dev->alloc_ctx = vb2_dma_contig_init_ctx(&pdev->dev); + if (IS_ERR(dev->alloc_ctx)) { + vpe_err(dev, "Failed to alloc vb2 context\n"); + ret = PTR_ERR(dev->alloc_ctx); + goto v4l2_dev_unreg; + } + + dev->m2m_dev = v4l2_m2m_init(&m2m_ops); + if (IS_ERR(dev->m2m_dev)) { + vpe_err(dev, "Failed to init mem2mem device\n"); + ret = PTR_ERR(dev->m2m_dev); + goto rel_ctx; + } + + pm_runtime_enable(&pdev->dev); + + ret = vpe_runtime_get(pdev); + if (ret) + goto rel_m2m; + + /* Perform clk enable followed by reset */ + vpe_set_clock_enable(dev, 1); + + vpe_top_reset(dev); + + func = read_field_reg(dev, VPE_PID, VPE_PID_FUNC_MASK, + VPE_PID_FUNC_SHIFT); + vpe_dbg(dev, "VPE PID function %x\n", func); + + vpe_top_vpdma_reset(dev); + + dev->vpdma = vpdma_create(pdev); + if (IS_ERR(dev->vpdma)) + goto runtime_put; + + vfd = &dev->vfd; + *vfd = vpe_videodev; + vfd->lock = &dev->dev_mutex; + vfd->v4l2_dev = &dev->v4l2_dev; + + ret = video_register_device(vfd, VFL_TYPE_GRABBER, 0); + if (ret) { + vpe_err(dev, "Failed to register video device\n"); + goto runtime_put; + } + + video_set_drvdata(vfd, dev); + snprintf(vfd->name, sizeof(vfd->name), "%s", vpe_videodev.name); + dev_info(dev->v4l2_dev.dev, "Device registered as /dev/video%d\n", + vfd->num); + + return 0; + +runtime_put: + vpe_runtime_put(pdev); +rel_m2m: + pm_runtime_disable(&pdev->dev); + v4l2_m2m_release(dev->m2m_dev); +rel_ctx: + vb2_dma_contig_cleanup_ctx(dev->alloc_ctx); +v4l2_dev_unreg: + v4l2_device_unregister(&dev->v4l2_dev); + + return ret; +} + +static int vpe_remove(struct platform_device *pdev) +{ + struct vpe_dev *dev = + (struct vpe_dev *) platform_get_drvdata(pdev); + + v4l2_info(&dev->v4l2_dev, "Removing " VPE_MODULE_NAME); + + v4l2_m2m_release(dev->m2m_dev); + video_unregister_device(&dev->vfd); + v4l2_device_unregister(&dev->v4l2_dev); + vb2_dma_contig_cleanup_ctx(dev->alloc_ctx); + + vpe_set_clock_enable(dev, 0); + vpe_runtime_put(pdev); + pm_runtime_disable(&pdev->dev); + + return 0; +} + +#if defined(CONFIG_OF) +static const struct of_device_id vpe_of_match[] = { + { + .compatible = "ti,vpe", + }, + {}, +}; +#else +#define vpe_of_match NULL +#endif + +static struct platform_driver vpe_pdrv = { + .probe = vpe_probe, + .remove = vpe_remove, + .driver = { + .name = VPE_MODULE_NAME, + .owner = THIS_MODULE, + .of_match_table = vpe_of_match, + }, +}; + +static void __exit vpe_exit(void) +{ + platform_driver_unregister(&vpe_pdrv); +} + +static int __init vpe_init(void) +{ + return platform_driver_register(&vpe_pdrv); +} + +module_init(vpe_init); +module_exit(vpe_exit); + +MODULE_DESCRIPTION("TI VPE driver"); +MODULE_AUTHOR("Dale Farnsworth, <dale@farnsworth.org>"); +MODULE_LICENSE("GPL"); diff --git a/drivers/media/platform/ti-vpe/vpe_regs.h b/drivers/media/platform/ti-vpe/vpe_regs.h new file mode 100644 index 00000000000..ed214e82839 --- /dev/null +++ b/drivers/media/platform/ti-vpe/vpe_regs.h @@ -0,0 +1,496 @@ +/* + * Copyright (c) 2013 Texas Instruments Inc. + * + * David Griego, <dagriego@biglakesoftware.com> + * Dale Farnsworth, <dale@farnsworth.org> + * Archit Taneja, <archit@ti.com> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published by + * the Free Software Foundation. + */ + +#ifndef __TI_VPE_REGS_H +#define __TI_VPE_REGS_H + +/* VPE register offsets and field selectors */ + +/* VPE top level regs */ +#define VPE_PID 0x0000 +#define VPE_PID_MINOR_MASK 0x3f +#define VPE_PID_MINOR_SHIFT 0 +#define VPE_PID_CUSTOM_MASK 0x03 +#define VPE_PID_CUSTOM_SHIFT 6 +#define VPE_PID_MAJOR_MASK 0x07 +#define VPE_PID_MAJOR_SHIFT 8 +#define VPE_PID_RTL_MASK 0x1f +#define VPE_PID_RTL_SHIFT 11 +#define VPE_PID_FUNC_MASK 0xfff +#define VPE_PID_FUNC_SHIFT 16 +#define VPE_PID_SCHEME_MASK 0x03 +#define VPE_PID_SCHEME_SHIFT 30 + +#define VPE_SYSCONFIG 0x0010 +#define VPE_SYSCONFIG_IDLE_MASK 0x03 +#define VPE_SYSCONFIG_IDLE_SHIFT 2 +#define VPE_SYSCONFIG_STANDBY_MASK 0x03 +#define VPE_SYSCONFIG_STANDBY_SHIFT 4 +#define VPE_FORCE_IDLE_MODE 0 +#define VPE_NO_IDLE_MODE 1 +#define VPE_SMART_IDLE_MODE 2 +#define VPE_SMART_IDLE_WAKEUP_MODE 3 +#define VPE_FORCE_STANDBY_MODE 0 +#define VPE_NO_STANDBY_MODE 1 +#define VPE_SMART_STANDBY_MODE 2 +#define VPE_SMART_STANDBY_WAKEUP_MODE 3 + +#define VPE_INT0_STATUS0_RAW_SET 0x0020 +#define VPE_INT0_STATUS0_RAW VPE_INT0_STATUS0_RAW_SET +#define VPE_INT0_STATUS0_CLR 0x0028 +#define VPE_INT0_STATUS0 VPE_INT0_STATUS0_CLR +#define VPE_INT0_ENABLE0_SET 0x0030 +#define VPE_INT0_ENABLE0 VPE_INT0_ENABLE0_SET +#define VPE_INT0_ENABLE0_CLR 0x0038 +#define VPE_INT0_LIST0_COMPLETE (1 << 0) +#define VPE_INT0_LIST0_NOTIFY (1 << 1) +#define VPE_INT0_LIST1_COMPLETE (1 << 2) +#define VPE_INT0_LIST1_NOTIFY (1 << 3) +#define VPE_INT0_LIST2_COMPLETE (1 << 4) +#define VPE_INT0_LIST2_NOTIFY (1 << 5) +#define VPE_INT0_LIST3_COMPLETE (1 << 6) +#define VPE_INT0_LIST3_NOTIFY (1 << 7) +#define VPE_INT0_LIST4_COMPLETE (1 << 8) +#define VPE_INT0_LIST4_NOTIFY (1 << 9) +#define VPE_INT0_LIST5_COMPLETE (1 << 10) +#define VPE_INT0_LIST5_NOTIFY (1 << 11) +#define VPE_INT0_LIST6_COMPLETE (1 << 12) +#define VPE_INT0_LIST6_NOTIFY (1 << 13) +#define VPE_INT0_LIST7_COMPLETE (1 << 14) +#define VPE_INT0_LIST7_NOTIFY (1 << 15) +#define VPE_INT0_DESCRIPTOR (1 << 16) +#define VPE_DEI_FMD_INT (1 << 18) + +#define VPE_INT0_STATUS1_RAW_SET 0x0024 +#define VPE_INT0_STATUS1_RAW VPE_INT0_STATUS1_RAW_SET +#define VPE_INT0_STATUS1_CLR 0x002c +#define VPE_INT0_STATUS1 VPE_INT0_STATUS1_CLR +#define VPE_INT0_ENABLE1_SET 0x0034 +#define VPE_INT0_ENABLE1 VPE_INT0_ENABLE1_SET +#define VPE_INT0_ENABLE1_CLR 0x003c +#define VPE_INT0_CHANNEL_GROUP0 (1 << 0) +#define VPE_INT0_CHANNEL_GROUP1 (1 << 1) +#define VPE_INT0_CHANNEL_GROUP2 (1 << 2) +#define VPE_INT0_CHANNEL_GROUP3 (1 << 3) +#define VPE_INT0_CHANNEL_GROUP4 (1 << 4) +#define VPE_INT0_CHANNEL_GROUP5 (1 << 5) +#define VPE_INT0_CLIENT (1 << 7) +#define VPE_DEI_ERROR_INT (1 << 16) +#define VPE_DS1_UV_ERROR_INT (1 << 22) + +#define VPE_INTC_EOI 0x00a0 + +#define VPE_CLK_ENABLE 0x0100 +#define VPE_VPEDMA_CLK_ENABLE (1 << 0) +#define VPE_DATA_PATH_CLK_ENABLE (1 << 1) + +#define VPE_CLK_RESET 0x0104 +#define VPE_VPDMA_CLK_RESET_MASK 0x1 +#define VPE_VPDMA_CLK_RESET_SHIFT 0 +#define VPE_DATA_PATH_CLK_RESET_MASK 0x1 +#define VPE_DATA_PATH_CLK_RESET_SHIFT 1 +#define VPE_MAIN_RESET_MASK 0x1 +#define VPE_MAIN_RESET_SHIFT 31 + +#define VPE_CLK_FORMAT_SELECT 0x010c +#define VPE_CSC_SRC_SELECT_MASK 0x03 +#define VPE_CSC_SRC_SELECT_SHIFT 0 +#define VPE_RGB_OUT_SELECT (1 << 8) +#define VPE_DS_SRC_SELECT_MASK 0x07 +#define VPE_DS_SRC_SELECT_SHIFT 9 +#define VPE_DS_BYPASS (1 << 16) +#define VPE_COLOR_SEPARATE_422 (1 << 18) + +#define VPE_DS_SRC_DEI_SCALER (5 << VPE_DS_SRC_SELECT_SHIFT) +#define VPE_CSC_SRC_DEI_SCALER (3 << VPE_CSC_SRC_SELECT_SHIFT) + +#define VPE_CLK_RANGE_MAP 0x011c +#define VPE_RANGE_RANGE_MAP_Y_MASK 0x07 +#define VPE_RANGE_RANGE_MAP_Y_SHIFT 0 +#define VPE_RANGE_RANGE_MAP_UV_MASK 0x07 +#define VPE_RANGE_RANGE_MAP_UV_SHIFT 3 +#define VPE_RANGE_MAP_ON (1 << 6) +#define VPE_RANGE_REDUCTION_ON (1 << 28) + +/* VPE chrominance upsampler regs */ +#define VPE_US1_R0 0x0304 +#define VPE_US2_R0 0x0404 +#define VPE_US3_R0 0x0504 +#define VPE_US_C1_MASK 0x3fff +#define VPE_US_C1_SHIFT 2 +#define VPE_US_C0_MASK 0x3fff +#define VPE_US_C0_SHIFT 18 +#define VPE_US_MODE_MASK 0x03 +#define VPE_US_MODE_SHIFT 16 +#define VPE_ANCHOR_FID0_C1_MASK 0x3fff +#define VPE_ANCHOR_FID0_C1_SHIFT 2 +#define VPE_ANCHOR_FID0_C0_MASK 0x3fff +#define VPE_ANCHOR_FID0_C0_SHIFT 18 + +#define VPE_US1_R1 0x0308 +#define VPE_US2_R1 0x0408 +#define VPE_US3_R1 0x0508 +#define VPE_ANCHOR_FID0_C3_MASK 0x3fff +#define VPE_ANCHOR_FID0_C3_SHIFT 2 +#define VPE_ANCHOR_FID0_C2_MASK 0x3fff +#define VPE_ANCHOR_FID0_C2_SHIFT 18 + +#define VPE_US1_R2 0x030c +#define VPE_US2_R2 0x040c +#define VPE_US3_R2 0x050c +#define VPE_INTERP_FID0_C1_MASK 0x3fff +#define VPE_INTERP_FID0_C1_SHIFT 2 +#define VPE_INTERP_FID0_C0_MASK 0x3fff +#define VPE_INTERP_FID0_C0_SHIFT 18 + +#define VPE_US1_R3 0x0310 +#define VPE_US2_R3 0x0410 +#define VPE_US3_R3 0x0510 +#define VPE_INTERP_FID0_C3_MASK 0x3fff +#define VPE_INTERP_FID0_C3_SHIFT 2 +#define VPE_INTERP_FID0_C2_MASK 0x3fff +#define VPE_INTERP_FID0_C2_SHIFT 18 + +#define VPE_US1_R4 0x0314 +#define VPE_US2_R4 0x0414 +#define VPE_US3_R4 0x0514 +#define VPE_ANCHOR_FID1_C1_MASK 0x3fff +#define VPE_ANCHOR_FID1_C1_SHIFT 2 +#define VPE_ANCHOR_FID1_C0_MASK 0x3fff +#define VPE_ANCHOR_FID1_C0_SHIFT 18 + +#define VPE_US1_R5 0x0318 +#define VPE_US2_R5 0x0418 +#define VPE_US3_R5 0x0518 +#define VPE_ANCHOR_FID1_C3_MASK 0x3fff +#define VPE_ANCHOR_FID1_C3_SHIFT 2 +#define VPE_ANCHOR_FID1_C2_MASK 0x3fff +#define VPE_ANCHOR_FID1_C2_SHIFT 18 + +#define VPE_US1_R6 0x031c +#define VPE_US2_R6 0x041c +#define VPE_US3_R6 0x051c +#define VPE_INTERP_FID1_C1_MASK 0x3fff +#define VPE_INTERP_FID1_C1_SHIFT 2 +#define VPE_INTERP_FID1_C0_MASK 0x3fff +#define VPE_INTERP_FID1_C0_SHIFT 18 + +#define VPE_US1_R7 0x0320 +#define VPE_US2_R7 0x0420 +#define VPE_US3_R7 0x0520 +#define VPE_INTERP_FID0_C3_MASK 0x3fff +#define VPE_INTERP_FID0_C3_SHIFT 2 +#define VPE_INTERP_FID0_C2_MASK 0x3fff +#define VPE_INTERP_FID0_C2_SHIFT 18 + +/* VPE de-interlacer regs */ +#define VPE_DEI_FRAME_SIZE 0x0600 +#define VPE_DEI_WIDTH_MASK 0x07ff +#define VPE_DEI_WIDTH_SHIFT 0 +#define VPE_DEI_HEIGHT_MASK 0x07ff +#define VPE_DEI_HEIGHT_SHIFT 16 +#define VPE_DEI_INTERLACE_BYPASS (1 << 29) +#define VPE_DEI_FIELD_FLUSH (1 << 30) +#define VPE_DEI_PROGRESSIVE (1 << 31) + +#define VPE_MDT_BYPASS 0x0604 +#define VPE_MDT_TEMPMAX_BYPASS (1 << 0) +#define VPE_MDT_SPATMAX_BYPASS (1 << 1) + +#define VPE_MDT_SF_THRESHOLD 0x0608 +#define VPE_MDT_SF_SC_THR1_MASK 0xff +#define VPE_MDT_SF_SC_THR1_SHIFT 0 +#define VPE_MDT_SF_SC_THR2_MASK 0xff +#define VPE_MDT_SF_SC_THR2_SHIFT 0 +#define VPE_MDT_SF_SC_THR3_MASK 0xff +#define VPE_MDT_SF_SC_THR3_SHIFT 0 + +#define VPE_EDI_CONFIG 0x060c +#define VPE_EDI_INP_MODE_MASK 0x03 +#define VPE_EDI_INP_MODE_SHIFT 0 +#define VPE_EDI_ENABLE_3D (1 << 2) +#define VPE_EDI_ENABLE_CHROMA_3D (1 << 3) +#define VPE_EDI_CHROMA3D_COR_THR_MASK 0xff +#define VPE_EDI_CHROMA3D_COR_THR_SHIFT 8 +#define VPE_EDI_DIR_COR_LOWER_THR_MASK 0xff +#define VPE_EDI_DIR_COR_LOWER_THR_SHIFT 16 +#define VPE_EDI_COR_SCALE_FACTOR_MASK 0xff +#define VPE_EDI_COR_SCALE_FACTOR_SHIFT 23 + +#define VPE_DEI_EDI_LUT_R0 0x0610 +#define VPE_EDI_LUT0_MASK 0x1f +#define VPE_EDI_LUT0_SHIFT 0 +#define VPE_EDI_LUT1_MASK 0x1f +#define VPE_EDI_LUT1_SHIFT 8 +#define VPE_EDI_LUT2_MASK 0x1f +#define VPE_EDI_LUT2_SHIFT 16 +#define VPE_EDI_LUT3_MASK 0x1f +#define VPE_EDI_LUT3_SHIFT 24 + +#define VPE_DEI_EDI_LUT_R1 0x0614 +#define VPE_EDI_LUT0_MASK 0x1f +#define VPE_EDI_LUT0_SHIFT 0 +#define VPE_EDI_LUT1_MASK 0x1f +#define VPE_EDI_LUT1_SHIFT 8 +#define VPE_EDI_LUT2_MASK 0x1f +#define VPE_EDI_LUT2_SHIFT 16 +#define VPE_EDI_LUT3_MASK 0x1f +#define VPE_EDI_LUT3_SHIFT 24 + +#define VPE_DEI_EDI_LUT_R2 0x0618 +#define VPE_EDI_LUT4_MASK 0x1f +#define VPE_EDI_LUT4_SHIFT 0 +#define VPE_EDI_LUT5_MASK 0x1f +#define VPE_EDI_LUT5_SHIFT 8 +#define VPE_EDI_LUT6_MASK 0x1f +#define VPE_EDI_LUT6_SHIFT 16 +#define VPE_EDI_LUT7_MASK 0x1f +#define VPE_EDI_LUT7_SHIFT 24 + +#define VPE_DEI_EDI_LUT_R3 0x061c +#define VPE_EDI_LUT8_MASK 0x1f +#define VPE_EDI_LUT8_SHIFT 0 +#define VPE_EDI_LUT9_MASK 0x1f +#define VPE_EDI_LUT9_SHIFT 8 +#define VPE_EDI_LUT10_MASK 0x1f +#define VPE_EDI_LUT10_SHIFT 16 +#define VPE_EDI_LUT11_MASK 0x1f +#define VPE_EDI_LUT11_SHIFT 24 + +#define VPE_DEI_FMD_WINDOW_R0 0x0620 +#define VPE_FMD_WINDOW_MINX_MASK 0x07ff +#define VPE_FMD_WINDOW_MINX_SHIFT 0 +#define VPE_FMD_WINDOW_MAXX_MASK 0x07ff +#define VPE_FMD_WINDOW_MAXX_SHIFT 16 +#define VPE_FMD_WINDOW_ENABLE (1 << 31) + +#define VPE_DEI_FMD_WINDOW_R1 0x0624 +#define VPE_FMD_WINDOW_MINY_MASK 0x07ff +#define VPE_FMD_WINDOW_MINY_SHIFT 0 +#define VPE_FMD_WINDOW_MAXY_MASK 0x07ff +#define VPE_FMD_WINDOW_MAXY_SHIFT 16 + +#define VPE_DEI_FMD_CONTROL_R0 0x0628 +#define VPE_FMD_ENABLE (1 << 0) +#define VPE_FMD_LOCK (1 << 1) +#define VPE_FMD_JAM_DIR (1 << 2) +#define VPE_FMD_BED_ENABLE (1 << 3) +#define VPE_FMD_CAF_FIELD_THR_MASK 0xff +#define VPE_FMD_CAF_FIELD_THR_SHIFT 16 +#define VPE_FMD_CAF_LINE_THR_MASK 0xff +#define VPE_FMD_CAF_LINE_THR_SHIFT 24 + +#define VPE_DEI_FMD_CONTROL_R1 0x062c +#define VPE_FMD_CAF_THR_MASK 0x000fffff +#define VPE_FMD_CAF_THR_SHIFT 0 + +#define VPE_DEI_FMD_STATUS_R0 0x0630 +#define VPE_FMD_CAF_MASK 0x000fffff +#define VPE_FMD_CAF_SHIFT 0 +#define VPE_FMD_RESET (1 << 24) + +#define VPE_DEI_FMD_STATUS_R1 0x0634 +#define VPE_FMD_FIELD_DIFF_MASK 0x0fffffff +#define VPE_FMD_FIELD_DIFF_SHIFT 0 + +#define VPE_DEI_FMD_STATUS_R2 0x0638 +#define VPE_FMD_FRAME_DIFF_MASK 0x000fffff +#define VPE_FMD_FRAME_DIFF_SHIFT 0 + +/* VPE scaler regs */ +#define VPE_SC_MP_SC0 0x0700 +#define VPE_INTERLACE_O (1 << 0) +#define VPE_LINEAR (1 << 1) +#define VPE_SC_BYPASS (1 << 2) +#define VPE_INVT_FID (1 << 3) +#define VPE_USE_RAV (1 << 4) +#define VPE_ENABLE_EV (1 << 5) +#define VPE_AUTO_HS (1 << 6) +#define VPE_DCM_2X (1 << 7) +#define VPE_DCM_4X (1 << 8) +#define VPE_HP_BYPASS (1 << 9) +#define VPE_INTERLACE_I (1 << 10) +#define VPE_ENABLE_SIN2_VER_INTP (1 << 11) +#define VPE_Y_PK_EN (1 << 14) +#define VPE_TRIM (1 << 15) +#define VPE_SELFGEN_FID (1 << 16) + +#define VPE_SC_MP_SC1 0x0704 +#define VPE_ROW_ACC_INC_MASK 0x07ffffff +#define VPE_ROW_ACC_INC_SHIFT 0 + +#define VPE_SC_MP_SC2 0x0708 +#define VPE_ROW_ACC_OFFSET_MASK 0x0fffffff +#define VPE_ROW_ACC_OFFSET_SHIFT 0 + +#define VPE_SC_MP_SC3 0x070c +#define VPE_ROW_ACC_OFFSET_B_MASK 0x0fffffff +#define VPE_ROW_ACC_OFFSET_B_SHIFT 0 + +#define VPE_SC_MP_SC4 0x0710 +#define VPE_TAR_H_MASK 0x07ff +#define VPE_TAR_H_SHIFT 0 +#define VPE_TAR_W_MASK 0x07ff +#define VPE_TAR_W_SHIFT 12 +#define VPE_LIN_ACC_INC_U_MASK 0x07 +#define VPE_LIN_ACC_INC_U_SHIFT 24 +#define VPE_NLIN_ACC_INIT_U_MASK 0x07 +#define VPE_NLIN_ACC_INIT_U_SHIFT 28 + +#define VPE_SC_MP_SC5 0x0714 +#define VPE_SRC_H_MASK 0x07ff +#define VPE_SRC_H_SHIFT 0 +#define VPE_SRC_W_MASK 0x07ff +#define VPE_SRC_W_SHIFT 12 +#define VPE_NLIN_ACC_INC_U_MASK 0x07 +#define VPE_NLIN_ACC_INC_U_SHIFT 24 + +#define VPE_SC_MP_SC6 0x0718 +#define VPE_ROW_ACC_INIT_RAV_MASK 0x03ff +#define VPE_ROW_ACC_INIT_RAV_SHIFT 0 +#define VPE_ROW_ACC_INIT_RAV_B_MASK 0x03ff +#define VPE_ROW_ACC_INIT_RAV_B_SHIFT 10 + +#define VPE_SC_MP_SC8 0x0720 +#define VPE_NLIN_LEFT_MASK 0x07ff +#define VPE_NLIN_LEFT_SHIFT 0 +#define VPE_NLIN_RIGHT_MASK 0x07ff +#define VPE_NLIN_RIGHT_SHIFT 12 + +#define VPE_SC_MP_SC9 0x0724 +#define VPE_LIN_ACC_INC VPE_SC_MP_SC9 + +#define VPE_SC_MP_SC10 0x0728 +#define VPE_NLIN_ACC_INIT VPE_SC_MP_SC10 + +#define VPE_SC_MP_SC11 0x072c +#define VPE_NLIN_ACC_INC VPE_SC_MP_SC11 + +#define VPE_SC_MP_SC12 0x0730 +#define VPE_COL_ACC_OFFSET_MASK 0x01ffffff +#define VPE_COL_ACC_OFFSET_SHIFT 0 + +#define VPE_SC_MP_SC13 0x0734 +#define VPE_SC_FACTOR_RAV_MASK 0x03ff +#define VPE_SC_FACTOR_RAV_SHIFT 0 +#define VPE_CHROMA_INTP_THR_MASK 0x03ff +#define VPE_CHROMA_INTP_THR_SHIFT 12 +#define VPE_DELTA_CHROMA_THR_MASK 0x0f +#define VPE_DELTA_CHROMA_THR_SHIFT 24 + +#define VPE_SC_MP_SC17 0x0744 +#define VPE_EV_THR_MASK 0x03ff +#define VPE_EV_THR_SHIFT 12 +#define VPE_DELTA_LUMA_THR_MASK 0x0f +#define VPE_DELTA_LUMA_THR_SHIFT 24 +#define VPE_DELTA_EV_THR_MASK 0x0f +#define VPE_DELTA_EV_THR_SHIFT 28 + +#define VPE_SC_MP_SC18 0x0748 +#define VPE_HS_FACTOR_MASK 0x03ff +#define VPE_HS_FACTOR_SHIFT 0 +#define VPE_CONF_DEFAULT_MASK 0x01ff +#define VPE_CONF_DEFAULT_SHIFT 16 + +#define VPE_SC_MP_SC19 0x074c +#define VPE_HPF_COEFF0_MASK 0xff +#define VPE_HPF_COEFF0_SHIFT 0 +#define VPE_HPF_COEFF1_MASK 0xff +#define VPE_HPF_COEFF1_SHIFT 8 +#define VPE_HPF_COEFF2_MASK 0xff +#define VPE_HPF_COEFF2_SHIFT 16 +#define VPE_HPF_COEFF3_MASK 0xff +#define VPE_HPF_COEFF3_SHIFT 23 + +#define VPE_SC_MP_SC20 0x0750 +#define VPE_HPF_COEFF4_MASK 0xff +#define VPE_HPF_COEFF4_SHIFT 0 +#define VPE_HPF_COEFF5_MASK 0xff +#define VPE_HPF_COEFF5_SHIFT 8 +#define VPE_HPF_NORM_SHIFT_MASK 0x07 +#define VPE_HPF_NORM_SHIFT_SHIFT 16 +#define VPE_NL_LIMIT_MASK 0x1ff +#define VPE_NL_LIMIT_SHIFT 20 + +#define VPE_SC_MP_SC21 0x0754 +#define VPE_NL_LO_THR_MASK 0x01ff +#define VPE_NL_LO_THR_SHIFT 0 +#define VPE_NL_LO_SLOPE_MASK 0xff +#define VPE_NL_LO_SLOPE_SHIFT 16 + +#define VPE_SC_MP_SC22 0x0758 +#define VPE_NL_HI_THR_MASK 0x01ff +#define VPE_NL_HI_THR_SHIFT 0 +#define VPE_NL_HI_SLOPE_SH_MASK 0x07 +#define VPE_NL_HI_SLOPE_SH_SHIFT 16 + +#define VPE_SC_MP_SC23 0x075c +#define VPE_GRADIENT_THR_MASK 0x07ff +#define VPE_GRADIENT_THR_SHIFT 0 +#define VPE_GRADIENT_THR_RANGE_MASK 0x0f +#define VPE_GRADIENT_THR_RANGE_SHIFT 12 +#define VPE_MIN_GY_THR_MASK 0xff +#define VPE_MIN_GY_THR_SHIFT 16 +#define VPE_MIN_GY_THR_RANGE_MASK 0x0f +#define VPE_MIN_GY_THR_RANGE_SHIFT 28 + +#define VPE_SC_MP_SC24 0x0760 +#define VPE_ORG_H_MASK 0x07ff +#define VPE_ORG_H_SHIFT 0 +#define VPE_ORG_W_MASK 0x07ff +#define VPE_ORG_W_SHIFT 16 + +#define VPE_SC_MP_SC25 0x0764 +#define VPE_OFF_H_MASK 0x07ff +#define VPE_OFF_H_SHIFT 0 +#define VPE_OFF_W_MASK 0x07ff +#define VPE_OFF_W_SHIFT 16 + +/* VPE color space converter regs */ +#define VPE_CSC_CSC00 0x5700 +#define VPE_CSC_A0_MASK 0x1fff +#define VPE_CSC_A0_SHIFT 0 +#define VPE_CSC_B0_MASK 0x1fff +#define VPE_CSC_B0_SHIFT 16 + +#define VPE_CSC_CSC01 0x5704 +#define VPE_CSC_C0_MASK 0x1fff +#define VPE_CSC_C0_SHIFT 0 +#define VPE_CSC_A1_MASK 0x1fff +#define VPE_CSC_A1_SHIFT 16 + +#define VPE_CSC_CSC02 0x5708 +#define VPE_CSC_B1_MASK 0x1fff +#define VPE_CSC_B1_SHIFT 0 +#define VPE_CSC_C1_MASK 0x1fff +#define VPE_CSC_C1_SHIFT 16 + +#define VPE_CSC_CSC03 0x570c +#define VPE_CSC_A2_MASK 0x1fff +#define VPE_CSC_A2_SHIFT 0 +#define VPE_CSC_B2_MASK 0x1fff +#define VPE_CSC_B2_SHIFT 16 + +#define VPE_CSC_CSC04 0x5710 +#define VPE_CSC_C2_MASK 0x1fff +#define VPE_CSC_C2_SHIFT 0 +#define VPE_CSC_D0_MASK 0x0fff +#define VPE_CSC_D0_SHIFT 16 + +#define VPE_CSC_CSC05 0x5714 +#define VPE_CSC_D1_MASK 0x0fff +#define VPE_CSC_D1_SHIFT 0 +#define VPE_CSC_D2_MASK 0x0fff +#define VPE_CSC_D2_SHIFT 16 +#define VPE_CSC_BYPASS (1 << 28) + +#endif diff --git a/drivers/media/platform/timblogiw.c b/drivers/media/platform/timblogiw.c index b557caf5b1a..ccdadd623a3 100644 --- a/drivers/media/platform/timblogiw.c +++ b/drivers/media/platform/timblogiw.c @@ -403,7 +403,7 @@ static int timblogiw_s_input(struct file *file, void *priv, unsigned int input) return 0; } -static int timblogiw_streamon(struct file *file, void *priv, unsigned int type) +static int timblogiw_streamon(struct file *file, void *priv, enum v4l2_buf_type type) { struct video_device *vdev = video_devdata(file); struct timblogiw_fh *fh = priv; @@ -420,7 +420,7 @@ static int timblogiw_streamon(struct file *file, void *priv, unsigned int type) } static int timblogiw_streamoff(struct file *file, void *priv, - unsigned int type) + enum v4l2_buf_type type) { struct video_device *vdev = video_devdata(file); struct timblogiw_fh *fh = priv; @@ -565,7 +565,7 @@ static void buffer_queue(struct videobuf_queue *vq, struct videobuf_buffer *vb) desc = dmaengine_prep_slave_sg(fh->chan, buf->sg, sg_elems, DMA_DEV_TO_MEM, - DMA_PREP_INTERRUPT | DMA_COMPL_SKIP_SRC_UNMAP); + DMA_PREP_INTERRUPT); if (!desc) { spin_lock_irq(&fh->queue_lock); list_del_init(&vb->queue); diff --git a/drivers/media/radio/radio-keene.c b/drivers/media/radio/radio-keene.c index 21db23b196b..fa3964022b9 100644 --- a/drivers/media/radio/radio-keene.c +++ b/drivers/media/radio/radio-keene.c @@ -123,7 +123,7 @@ static int keene_cmd_set(struct keene_device *radio) /* If bit 0 is set, then transmit mono, otherwise stereo. If bit 2 is set, then enable 75 us preemphasis, otherwise it is 50 us. */ - radio->buffer[3] = (!radio->stereo) | (radio->preemph_75_us ? 4 : 0); + radio->buffer[3] = (radio->stereo ? 0 : 1) | (radio->preemph_75_us ? 4 : 0); radio->buffer[4] = 0x00; radio->buffer[5] = 0x00; radio->buffer[6] = 0x00; diff --git a/drivers/media/radio/radio-sf16fmr2.c b/drivers/media/radio/radio-sf16fmr2.c index f1e3714b5f1..93d864eb830 100644 --- a/drivers/media/radio/radio-sf16fmr2.c +++ b/drivers/media/radio/radio-sf16fmr2.c @@ -74,8 +74,8 @@ static u8 fmr2_tea575x_get_pins(struct snd_tea575x *tea) struct fmr2 *fmr2 = tea->private_data; u8 bits = inb(fmr2->io); - return (bits & STR_DATA) ? TEA575X_DATA : 0 | - (bits & STR_MOST) ? TEA575X_MOST : 0; + return ((bits & STR_DATA) ? TEA575X_DATA : 0) | + ((bits & STR_MOST) ? TEA575X_MOST : 0); } static void fmr2_tea575x_set_direction(struct snd_tea575x *tea, bool output) @@ -295,7 +295,6 @@ static void fmr2_remove(struct fmr2 *fmr2) static int fmr2_isa_remove(struct device *pdev, unsigned int ndev) { fmr2_remove(dev_get_drvdata(pdev)); - dev_set_drvdata(pdev, NULL); return 0; } diff --git a/drivers/media/radio/radio-shark.c b/drivers/media/radio/radio-shark.c index b9147721241..3db8a8cfe1a 100644 --- a/drivers/media/radio/radio-shark.c +++ b/drivers/media/radio/radio-shark.c @@ -271,6 +271,7 @@ static void shark_unregister_leds(struct shark_device *shark) cancel_work_sync(&shark->led_work); } +#ifdef CONFIG_PM static void shark_resume_leds(struct shark_device *shark) { if (test_bit(BLUE_IS_PULSE, &shark->brightness_new)) @@ -280,6 +281,7 @@ static void shark_resume_leds(struct shark_device *shark) set_bit(RED_LED, &shark->brightness_new); schedule_work(&shark->led_work); } +#endif #else static int shark_register_leds(struct shark_device *shark, struct device *dev) { diff --git a/drivers/media/radio/radio-shark2.c b/drivers/media/radio/radio-shark2.c index 9fb669721e6..d86d90dab8b 100644 --- a/drivers/media/radio/radio-shark2.c +++ b/drivers/media/radio/radio-shark2.c @@ -237,6 +237,7 @@ static void shark_unregister_leds(struct shark_device *shark) cancel_work_sync(&shark->led_work); } +#ifdef CONFIG_PM static void shark_resume_leds(struct shark_device *shark) { int i; @@ -246,6 +247,7 @@ static void shark_resume_leds(struct shark_device *shark) schedule_work(&shark->led_work); } +#endif #else static int shark_register_leds(struct shark_device *shark, struct device *dev) { diff --git a/drivers/media/radio/si470x/radio-si470x-common.c b/drivers/media/radio/si470x/radio-si470x-common.c index 0bd25006828..0e750aef656 100644 --- a/drivers/media/radio/si470x/radio-si470x-common.c +++ b/drivers/media/radio/si470x/radio-si470x-common.c @@ -254,7 +254,7 @@ static unsigned int si470x_get_step(struct si470x_device *radio) /* 2: 50 kHz */ default: return 50 * 16; - }; + } } diff --git a/drivers/media/radio/si470x/radio-si470x-i2c.c b/drivers/media/radio/si470x/radio-si470x-i2c.c index e5fc9acd0c4..2a497c80c77 100644 --- a/drivers/media/radio/si470x/radio-si470x-i2c.c +++ b/drivers/media/radio/si470x/radio-si470x-i2c.c @@ -463,7 +463,7 @@ static int si470x_i2c_remove(struct i2c_client *client) } -#ifdef CONFIG_PM +#ifdef CONFIG_PM_SLEEP /* * si470x_i2c_suspend - suspend the device */ @@ -509,7 +509,7 @@ static struct i2c_driver si470x_i2c_driver = { .driver = { .name = "si470x", .owner = THIS_MODULE, -#ifdef CONFIG_PM +#ifdef CONFIG_PM_SLEEP .pm = &si470x_i2c_pm, #endif }, diff --git a/drivers/media/radio/si4713-i2c.c b/drivers/media/radio/si4713-i2c.c index fe160882ee1..9ec48ccbcf0 100644 --- a/drivers/media/radio/si4713-i2c.c +++ b/drivers/media/radio/si4713-i2c.c @@ -1456,7 +1456,7 @@ static int si4713_probe(struct i2c_client *client, if (client->irq) { rval = request_irq(client->irq, - si4713_handler, IRQF_TRIGGER_FALLING | IRQF_DISABLED, + si4713_handler, IRQF_TRIGGER_FALLING, client->name, sdev); if (rval < 0) { v4l2_err(&sdev->sd, "Could not request IRQ\n"); diff --git a/drivers/media/radio/tef6862.c b/drivers/media/radio/tef6862.c index 06ac69245ca..69e3245a58a 100644 --- a/drivers/media/radio/tef6862.c +++ b/drivers/media/radio/tef6862.c @@ -48,15 +48,15 @@ #define WM_SUB_TEST 0xF /* Different modes of the MSA register */ -#define MODE_BUFFER 0x0 -#define MODE_PRESET 0x1 -#define MODE_SEARCH 0x2 -#define MODE_AF_UPDATE 0x3 -#define MODE_JUMP 0x4 -#define MODE_CHECK 0x5 -#define MODE_LOAD 0x6 -#define MODE_END 0x7 -#define MODE_SHIFT 5 +#define MSA_MODE_BUFFER 0x0 +#define MSA_MODE_PRESET 0x1 +#define MSA_MODE_SEARCH 0x2 +#define MSA_MODE_AF_UPDATE 0x3 +#define MSA_MODE_JUMP 0x4 +#define MSA_MODE_CHECK 0x5 +#define MSA_MODE_LOAD 0x6 +#define MSA_MODE_END 0x7 +#define MSA_MODE_SHIFT 5 struct tef6862_state { struct v4l2_subdev sd; @@ -114,7 +114,7 @@ static int tef6862_s_frequency(struct v4l2_subdev *sd, const struct v4l2_frequen clamp(freq, TEF6862_LO_FREQ, TEF6862_HI_FREQ); pll = 1964 + ((freq - TEF6862_LO_FREQ) * 20) / FREQ_MUL; - i2cmsg[0] = (MODE_PRESET << MODE_SHIFT) | WM_SUB_PLLM; + i2cmsg[0] = (MSA_MODE_PRESET << MSA_MODE_SHIFT) | WM_SUB_PLLM; i2cmsg[1] = (pll >> 8) & 0xff; i2cmsg[2] = pll & 0xff; diff --git a/drivers/media/radio/wl128x/fmdrv_common.c b/drivers/media/radio/wl128x/fmdrv_common.c index 253f307f0b3..4b2e9e8298e 100644 --- a/drivers/media/radio/wl128x/fmdrv_common.c +++ b/drivers/media/radio/wl128x/fmdrv_common.c @@ -175,7 +175,7 @@ static int_handler_prototype int_handler_table[] = { fm_irq_handle_intmsk_cmd_resp }; -long (*g_st_write) (struct sk_buff *skb); +static long (*g_st_write) (struct sk_buff *skb); static struct completion wait_for_fmdrv_reg_comp; static inline void fm_irq_call(struct fmdev *fmdev) diff --git a/drivers/media/rc/Kconfig b/drivers/media/rc/Kconfig index 11e84bcc23a..904f11367c2 100644 --- a/drivers/media/rc/Kconfig +++ b/drivers/media/rc/Kconfig @@ -322,4 +322,14 @@ config IR_GPIO_CIR To compile this driver as a module, choose M here: the module will be called gpio-ir-recv. +config RC_ST + tristate "ST remote control receiver" + depends on ARCH_STI && RC_CORE + help + Say Y here if you want support for ST remote control driver + which allows both IR and UHF RX. + The driver passes raw pulse and space information to the LIRC decoder. + + If you're not sure, select N here. + endif #RC_DEVICES diff --git a/drivers/media/rc/Makefile b/drivers/media/rc/Makefile index 56bacf07b36..f4eb32c0a45 100644 --- a/drivers/media/rc/Makefile +++ b/drivers/media/rc/Makefile @@ -30,3 +30,4 @@ obj-$(CONFIG_RC_LOOPBACK) += rc-loopback.o obj-$(CONFIG_IR_GPIO_CIR) += gpio-ir-recv.o obj-$(CONFIG_IR_IGUANA) += iguanair.o obj-$(CONFIG_IR_TTUSBIR) += ttusbir.o +obj-$(CONFIG_RC_ST) += st_rc.o diff --git a/drivers/media/rc/fintek-cir.h b/drivers/media/rc/fintek-cir.h index 82516a1d39b..b698f3d2ced 100644 --- a/drivers/media/rc/fintek-cir.h +++ b/drivers/media/rc/fintek-cir.h @@ -76,8 +76,8 @@ struct fintek_dev { } tx; /* Config register index/data port pair */ - u8 cr_ip; - u8 cr_dp; + u32 cr_ip; + u32 cr_dp; /* hardware I/O settings */ unsigned long cir_addr; diff --git a/drivers/media/rc/gpio-ir-recv.c b/drivers/media/rc/gpio-ir-recv.c index 07aacfa5903..80c611c2e8c 100644 --- a/drivers/media/rc/gpio-ir-recv.c +++ b/drivers/media/rc/gpio-ir-recv.c @@ -16,6 +16,7 @@ #include <linux/interrupt.h> #include <linux/gpio.h> #include <linux/slab.h> +#include <linux/of.h> #include <linux/of_gpio.h> #include <linux/platform_device.h> #include <linux/irq.h> diff --git a/drivers/media/rc/iguanair.c b/drivers/media/rc/iguanair.c index b53626ba6f4..fdae05c4f37 100644 --- a/drivers/media/rc/iguanair.c +++ b/drivers/media/rc/iguanair.c @@ -308,22 +308,12 @@ static int iguanair_set_tx_carrier(struct rc_dev *dev, uint32_t carrier) cycles = DIV_ROUND_CLOSEST(24000000, carrier * 2) - ir->cycle_overhead; - /* make up the the remainer of 4-cycle blocks */ - switch (cycles & 3) { - case 0: - sevens = 0; - break; - case 1: - sevens = 3; - break; - case 2: - sevens = 2; - break; - case 3: - sevens = 1; - break; - } - + /* + * Calculate minimum number of 7 cycles needed so + * we are left with a multiple of 4; so we want to have + * (sevens * 7) & 3 == cycles & 3 + */ + sevens = (4 - cycles) & 3; fours = (cycles - sevens * 7) / 4; /* magic happens here */ diff --git a/drivers/media/rc/ir-rx51.c b/drivers/media/rc/ir-rx51.c index 31b955bf766..b1e19a26208 100644 --- a/drivers/media/rc/ir-rx51.c +++ b/drivers/media/rc/ir-rx51.c @@ -201,8 +201,7 @@ static int lirc_rx51_init_port(struct lirc_rx51 *lirc_rx51) lirc_rx51->irq_num = omap_dm_timer_get_irq(lirc_rx51->pulse_timer); retval = request_irq(lirc_rx51->irq_num, lirc_rx51_interrupt_handler, - IRQF_DISABLED | IRQF_SHARED, - "lirc_pulse_timer", lirc_rx51); + IRQF_SHARED, "lirc_pulse_timer", lirc_rx51); if (retval) { dev_err(lirc_rx51->dev, ": Failed to request interrupt line\n"); goto err2; diff --git a/drivers/media/rc/nuvoton-cir.h b/drivers/media/rc/nuvoton-cir.h index 7c3674ff5ea..07e83108df0 100644 --- a/drivers/media/rc/nuvoton-cir.h +++ b/drivers/media/rc/nuvoton-cir.h @@ -84,8 +84,8 @@ struct nvt_dev { } tx; /* EFER Config register index/data pair */ - u8 cr_efir; - u8 cr_efdr; + u32 cr_efir; + u32 cr_efdr; /* hardware I/O settings */ unsigned long cir_addr; diff --git a/drivers/media/rc/st_rc.c b/drivers/media/rc/st_rc.c new file mode 100644 index 00000000000..65120c2d47a --- /dev/null +++ b/drivers/media/rc/st_rc.c @@ -0,0 +1,395 @@ +/* + * Copyright (C) 2013 STMicroelectronics Limited + * Author: Srinivas Kandagatla <srinivas.kandagatla@st.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ +#include <linux/kernel.h> +#include <linux/clk.h> +#include <linux/interrupt.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/platform_device.h> +#include <media/rc-core.h> +#include <linux/pinctrl/consumer.h> + +struct st_rc_device { + struct device *dev; + int irq; + int irq_wake; + struct clk *sys_clock; + void *base; /* Register base address */ + void *rx_base;/* RX Register base address */ + struct rc_dev *rdev; + bool overclocking; + int sample_mult; + int sample_div; + bool rxuhfmode; +}; + +/* Registers */ +#define IRB_SAMPLE_RATE_COMM 0x64 /* sample freq divisor*/ +#define IRB_CLOCK_SEL 0x70 /* clock select */ +#define IRB_CLOCK_SEL_STATUS 0x74 /* clock status */ +/* IRB IR/UHF receiver registers */ +#define IRB_RX_ON 0x40 /* pulse time capture */ +#define IRB_RX_SYS 0X44 /* sym period capture */ +#define IRB_RX_INT_EN 0x48 /* IRQ enable (R/W) */ +#define IRB_RX_INT_STATUS 0x4c /* IRQ status (R/W) */ +#define IRB_RX_EN 0x50 /* Receive enable */ +#define IRB_MAX_SYM_PERIOD 0x54 /* max sym value */ +#define IRB_RX_INT_CLEAR 0x58 /* overrun status */ +#define IRB_RX_STATUS 0x6c /* receive status */ +#define IRB_RX_NOISE_SUPPR 0x5c /* noise suppression */ +#define IRB_RX_POLARITY_INV 0x68 /* polarity inverter */ + +/** + * IRQ set: Enable full FIFO 1 -> bit 3; + * Enable overrun IRQ 1 -> bit 2; + * Enable last symbol IRQ 1 -> bit 1: + * Enable RX interrupt 1 -> bit 0; + */ +#define IRB_RX_INTS 0x0f +#define IRB_RX_OVERRUN_INT 0x04 + /* maximum symbol period (microsecs),timeout to detect end of symbol train */ +#define MAX_SYMB_TIME 0x5000 +#define IRB_SAMPLE_FREQ 10000000 +#define IRB_FIFO_NOT_EMPTY 0xff00 +#define IRB_OVERFLOW 0x4 +#define IRB_TIMEOUT 0xffff +#define IR_ST_NAME "st-rc" + +static void st_rc_send_lirc_timeout(struct rc_dev *rdev) +{ + DEFINE_IR_RAW_EVENT(ev); + ev.timeout = true; + ir_raw_event_store(rdev, &ev); +} + +/** + * RX graphical example to better understand the difference between ST IR block + * output and standard definition used by LIRC (and most of the world!) + * + * mark mark + * |-IRB_RX_ON-| |-IRB_RX_ON-| + * ___ ___ ___ ___ ___ ___ _ + * | | | | | | | | | | | | | + * | | | | | | space 0 | | | | | | space 1 | + * _____| |__| |__| |____________________________| |__| |__| |_____________| + * + * |--------------- IRB_RX_SYS -------------|------ IRB_RX_SYS -------| + * + * |------------- encoding bit 0 -----------|---- encoding bit 1 -----| + * + * ST hardware returns mark (IRB_RX_ON) and total symbol time (IRB_RX_SYS), so + * convert to standard mark/space we have to calculate space=(IRB_RX_SYS-mark) + * The mark time represents the amount of time the carrier (usually 36-40kHz) + * is detected.The above examples shows Pulse Width Modulation encoding where + * bit 0 is represented by space>mark. + */ + +static irqreturn_t st_rc_rx_interrupt(int irq, void *data) +{ + unsigned int symbol, mark = 0; + struct st_rc_device *dev = data; + int last_symbol = 0; + u32 status; + DEFINE_IR_RAW_EVENT(ev); + + if (dev->irq_wake) + pm_wakeup_event(dev->dev, 0); + + status = readl(dev->rx_base + IRB_RX_STATUS); + + while (status & (IRB_FIFO_NOT_EMPTY | IRB_OVERFLOW)) { + u32 int_status = readl(dev->rx_base + IRB_RX_INT_STATUS); + if (unlikely(int_status & IRB_RX_OVERRUN_INT)) { + /* discard the entire collection in case of errors! */ + ir_raw_event_reset(dev->rdev); + dev_info(dev->dev, "IR RX overrun\n"); + writel(IRB_RX_OVERRUN_INT, + dev->rx_base + IRB_RX_INT_CLEAR); + continue; + } + + symbol = readl(dev->rx_base + IRB_RX_SYS); + mark = readl(dev->rx_base + IRB_RX_ON); + + if (symbol == IRB_TIMEOUT) + last_symbol = 1; + + /* Ignore any noise */ + if ((mark > 2) && (symbol > 1)) { + symbol -= mark; + if (dev->overclocking) { /* adjustments to timings */ + symbol *= dev->sample_mult; + symbol /= dev->sample_div; + mark *= dev->sample_mult; + mark /= dev->sample_div; + } + + ev.duration = US_TO_NS(mark); + ev.pulse = true; + ir_raw_event_store(dev->rdev, &ev); + + if (!last_symbol) { + ev.duration = US_TO_NS(symbol); + ev.pulse = false; + ir_raw_event_store(dev->rdev, &ev); + } else { + st_rc_send_lirc_timeout(dev->rdev); + } + + } + last_symbol = 0; + status = readl(dev->rx_base + IRB_RX_STATUS); + } + + writel(IRB_RX_INTS, dev->rx_base + IRB_RX_INT_CLEAR); + + /* Empty software fifo */ + ir_raw_event_handle(dev->rdev); + return IRQ_HANDLED; +} + +static void st_rc_hardware_init(struct st_rc_device *dev) +{ + int baseclock, freqdiff; + unsigned int rx_max_symbol_per = MAX_SYMB_TIME; + unsigned int rx_sampling_freq_div; + + clk_prepare_enable(dev->sys_clock); + baseclock = clk_get_rate(dev->sys_clock); + + /* IRB input pins are inverted internally from high to low. */ + writel(1, dev->rx_base + IRB_RX_POLARITY_INV); + + rx_sampling_freq_div = baseclock / IRB_SAMPLE_FREQ; + writel(rx_sampling_freq_div, dev->base + IRB_SAMPLE_RATE_COMM); + + freqdiff = baseclock - (rx_sampling_freq_div * IRB_SAMPLE_FREQ); + if (freqdiff) { /* over clocking, workout the adjustment factors */ + dev->overclocking = true; + dev->sample_mult = 1000; + dev->sample_div = baseclock / (10000 * rx_sampling_freq_div); + rx_max_symbol_per = (rx_max_symbol_per * 1000)/dev->sample_div; + } + + writel(rx_max_symbol_per, dev->rx_base + IRB_MAX_SYM_PERIOD); +} + +static int st_rc_remove(struct platform_device *pdev) +{ + struct st_rc_device *rc_dev = platform_get_drvdata(pdev); + clk_disable_unprepare(rc_dev->sys_clock); + rc_unregister_device(rc_dev->rdev); + return 0; +} + +static int st_rc_open(struct rc_dev *rdev) +{ + struct st_rc_device *dev = rdev->priv; + unsigned long flags; + local_irq_save(flags); + /* enable interrupts and receiver */ + writel(IRB_RX_INTS, dev->rx_base + IRB_RX_INT_EN); + writel(0x01, dev->rx_base + IRB_RX_EN); + local_irq_restore(flags); + + return 0; +} + +static void st_rc_close(struct rc_dev *rdev) +{ + struct st_rc_device *dev = rdev->priv; + /* disable interrupts and receiver */ + writel(0x00, dev->rx_base + IRB_RX_EN); + writel(0x00, dev->rx_base + IRB_RX_INT_EN); +} + +static int st_rc_probe(struct platform_device *pdev) +{ + int ret = -EINVAL; + struct rc_dev *rdev; + struct device *dev = &pdev->dev; + struct resource *res; + struct st_rc_device *rc_dev; + struct device_node *np = pdev->dev.of_node; + const char *rx_mode; + + rc_dev = devm_kzalloc(dev, sizeof(struct st_rc_device), GFP_KERNEL); + + if (!rc_dev) + return -ENOMEM; + + rdev = rc_allocate_device(); + + if (!rdev) + return -ENOMEM; + + if (np && !of_property_read_string(np, "rx-mode", &rx_mode)) { + + if (!strcmp(rx_mode, "uhf")) { + rc_dev->rxuhfmode = true; + } else if (!strcmp(rx_mode, "infrared")) { + rc_dev->rxuhfmode = false; + } else { + dev_err(dev, "Unsupported rx mode [%s]\n", rx_mode); + goto err; + } + + } else { + goto err; + } + + rc_dev->sys_clock = devm_clk_get(dev, NULL); + if (IS_ERR(rc_dev->sys_clock)) { + dev_err(dev, "System clock not found\n"); + ret = PTR_ERR(rc_dev->sys_clock); + goto err; + } + + rc_dev->irq = platform_get_irq(pdev, 0); + if (rc_dev->irq < 0) { + ret = rc_dev->irq; + goto err; + } + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + + rc_dev->base = devm_ioremap_resource(dev, res); + if (IS_ERR(rc_dev->base)) { + ret = PTR_ERR(rc_dev->base); + goto err; + } + + if (rc_dev->rxuhfmode) + rc_dev->rx_base = rc_dev->base + 0x40; + else + rc_dev->rx_base = rc_dev->base; + + rc_dev->dev = dev; + platform_set_drvdata(pdev, rc_dev); + st_rc_hardware_init(rc_dev); + + rdev->driver_type = RC_DRIVER_IR_RAW; + rdev->allowed_protos = RC_BIT_ALL; + /* rx sampling rate is 10Mhz */ + rdev->rx_resolution = 100; + rdev->timeout = US_TO_NS(MAX_SYMB_TIME); + rdev->priv = rc_dev; + rdev->open = st_rc_open; + rdev->close = st_rc_close; + rdev->driver_name = IR_ST_NAME; + rdev->map_name = RC_MAP_LIRC; + rdev->input_name = "ST Remote Control Receiver"; + + /* enable wake via this device */ + device_set_wakeup_capable(dev, true); + device_set_wakeup_enable(dev, true); + + ret = rc_register_device(rdev); + if (ret < 0) + goto clkerr; + + rc_dev->rdev = rdev; + if (devm_request_irq(dev, rc_dev->irq, st_rc_rx_interrupt, + IRQF_NO_SUSPEND, IR_ST_NAME, rc_dev) < 0) { + dev_err(dev, "IRQ %d register failed\n", rc_dev->irq); + ret = -EINVAL; + goto rcerr; + } + + /** + * for LIRC_MODE_MODE2 or LIRC_MODE_PULSE or LIRC_MODE_RAW + * lircd expects a long space first before a signal train to sync. + */ + st_rc_send_lirc_timeout(rdev); + + dev_info(dev, "setup in %s mode\n", rc_dev->rxuhfmode ? "UHF" : "IR"); + + return ret; +rcerr: + rc_unregister_device(rdev); + rdev = NULL; +clkerr: + clk_disable_unprepare(rc_dev->sys_clock); +err: + rc_free_device(rdev); + dev_err(dev, "Unable to register device (%d)\n", ret); + return ret; +} + +#ifdef CONFIG_PM +static int st_rc_suspend(struct device *dev) +{ + struct st_rc_device *rc_dev = dev_get_drvdata(dev); + + if (device_may_wakeup(dev)) { + if (!enable_irq_wake(rc_dev->irq)) + rc_dev->irq_wake = 1; + else + return -EINVAL; + } else { + pinctrl_pm_select_sleep_state(dev); + writel(0x00, rc_dev->rx_base + IRB_RX_EN); + writel(0x00, rc_dev->rx_base + IRB_RX_INT_EN); + clk_disable_unprepare(rc_dev->sys_clock); + } + + return 0; +} + +static int st_rc_resume(struct device *dev) +{ + struct st_rc_device *rc_dev = dev_get_drvdata(dev); + struct rc_dev *rdev = rc_dev->rdev; + + if (rc_dev->irq_wake) { + disable_irq_wake(rc_dev->irq); + rc_dev->irq_wake = 0; + } else { + pinctrl_pm_select_default_state(dev); + st_rc_hardware_init(rc_dev); + if (rdev->users) { + writel(IRB_RX_INTS, rc_dev->rx_base + IRB_RX_INT_EN); + writel(0x01, rc_dev->rx_base + IRB_RX_EN); + } + } + + return 0; +} + +static SIMPLE_DEV_PM_OPS(st_rc_pm_ops, st_rc_suspend, st_rc_resume); +#endif + +#ifdef CONFIG_OF +static struct of_device_id st_rc_match[] = { + { .compatible = "st,comms-irb", }, + {}, +}; + +MODULE_DEVICE_TABLE(of, st_rc_match); +#endif + +static struct platform_driver st_rc_driver = { + .driver = { + .name = IR_ST_NAME, + .owner = THIS_MODULE, + .of_match_table = of_match_ptr(st_rc_match), +#ifdef CONFIG_PM + .pm = &st_rc_pm_ops, +#endif + }, + .probe = st_rc_probe, + .remove = st_rc_remove, +}; + +module_platform_driver(st_rc_driver); + +MODULE_DESCRIPTION("RC Transceiver driver for STMicroelectronics platforms"); +MODULE_AUTHOR("STMicroelectronics (R&D) Ltd"); +MODULE_LICENSE("GPL"); diff --git a/drivers/media/rc/winbond-cir.c b/drivers/media/rc/winbond-cir.c index 98bd4960c75..904baf4eec2 100644 --- a/drivers/media/rc/winbond-cir.c +++ b/drivers/media/rc/winbond-cir.c @@ -1110,7 +1110,7 @@ wbcir_probe(struct pnp_dev *device, const struct pnp_device_id *dev_id) } err = request_irq(data->irq, wbcir_irq_handler, - IRQF_DISABLED, DRVNAME, device); + 0, DRVNAME, device); if (err) { dev_err(dev, "Failed to claim IRQ %u\n", data->irq); err = -EBUSY; diff --git a/drivers/media/tuners/e4000.c b/drivers/media/tuners/e4000.c index 6c96e489877..72971a8d3c3 100644 --- a/drivers/media/tuners/e4000.c +++ b/drivers/media/tuners/e4000.c @@ -21,20 +21,30 @@ #include "e4000_priv.h" #include <linux/math64.h> +/* Max transfer size done by I2C transfer functions */ +#define MAX_XFER_SIZE 64 + /* write multiple registers */ static int e4000_wr_regs(struct e4000_priv *priv, u8 reg, u8 *val, int len) { int ret; - u8 buf[1 + len]; + u8 buf[MAX_XFER_SIZE]; struct i2c_msg msg[1] = { { .addr = priv->cfg->i2c_addr, .flags = 0, - .len = sizeof(buf), + .len = 1 + len, .buf = buf, } }; + if (1 + len > sizeof(buf)) { + dev_warn(&priv->i2c->dev, + "%s: i2c wr reg=%04x: len=%d is too big!\n", + KBUILD_MODNAME, reg, len); + return -EINVAL; + } + buf[0] = reg; memcpy(&buf[1], val, len); @@ -54,7 +64,7 @@ static int e4000_wr_regs(struct e4000_priv *priv, u8 reg, u8 *val, int len) static int e4000_rd_regs(struct e4000_priv *priv, u8 reg, u8 *val, int len) { int ret; - u8 buf[len]; + u8 buf[MAX_XFER_SIZE]; struct i2c_msg msg[2] = { { .addr = priv->cfg->i2c_addr, @@ -64,11 +74,18 @@ static int e4000_rd_regs(struct e4000_priv *priv, u8 reg, u8 *val, int len) }, { .addr = priv->cfg->i2c_addr, .flags = I2C_M_RD, - .len = sizeof(buf), + .len = len, .buf = buf, } }; + if (len > sizeof(buf)) { + dev_warn(&priv->i2c->dev, + "%s: i2c rd reg=%04x: len=%d is too big!\n", + KBUILD_MODNAME, reg, len); + return -EINVAL; + } + ret = i2c_transfer(priv->i2c, msg, 2); if (ret == 2) { memcpy(val, buf, len); diff --git a/drivers/media/tuners/fc0012.c b/drivers/media/tuners/fc0012.c index f4d0e797a6c..d74e9205681 100644 --- a/drivers/media/tuners/fc0012.c +++ b/drivers/media/tuners/fc0012.c @@ -139,7 +139,7 @@ static int fc0012_set_params(struct dvb_frontend *fe) unsigned char reg[7], am, pm, multi, tmp; unsigned long f_vco; unsigned short xtal_freq_khz_2, xin, xdiv; - int vco_select = false; + bool vco_select = false; if (fe->callback) { ret = fe->callback(priv->i2c, DVB_FRONTEND_COMPONENT_TUNER, diff --git a/drivers/media/tuners/fc0013.c b/drivers/media/tuners/fc0013.c index bd8f0f1e8f3..b4162315773 100644 --- a/drivers/media/tuners/fc0013.c +++ b/drivers/media/tuners/fc0013.c @@ -233,7 +233,7 @@ static int fc0013_set_params(struct dvb_frontend *fe) unsigned char reg[7], am, pm, multi, tmp; unsigned long f_vco; unsigned short xtal_freq_khz_2, xin, xdiv; - int vco_select = false; + bool vco_select = false; if (fe->callback) { ret = fe->callback(priv->i2c, DVB_FRONTEND_COMPONENT_TUNER, diff --git a/drivers/media/tuners/fc2580.c b/drivers/media/tuners/fc2580.c index 81f38aae9c6..3aecaf46509 100644 --- a/drivers/media/tuners/fc2580.c +++ b/drivers/media/tuners/fc2580.c @@ -20,6 +20,9 @@ #include "fc2580_priv.h" +/* Max transfer size done by I2C transfer functions */ +#define MAX_XFER_SIZE 64 + /* * TODO: * I2C write and read works only for one single register. Multiple registers @@ -41,16 +44,23 @@ static int fc2580_wr_regs(struct fc2580_priv *priv, u8 reg, u8 *val, int len) { int ret; - u8 buf[1 + len]; + u8 buf[MAX_XFER_SIZE]; struct i2c_msg msg[1] = { { .addr = priv->cfg->i2c_addr, .flags = 0, - .len = sizeof(buf), + .len = 1 + len, .buf = buf, } }; + if (1 + len > sizeof(buf)) { + dev_warn(&priv->i2c->dev, + "%s: i2c wr reg=%04x: len=%d is too big!\n", + KBUILD_MODNAME, reg, len); + return -EINVAL; + } + buf[0] = reg; memcpy(&buf[1], val, len); @@ -69,7 +79,7 @@ static int fc2580_wr_regs(struct fc2580_priv *priv, u8 reg, u8 *val, int len) static int fc2580_rd_regs(struct fc2580_priv *priv, u8 reg, u8 *val, int len) { int ret; - u8 buf[len]; + u8 buf[MAX_XFER_SIZE]; struct i2c_msg msg[2] = { { .addr = priv->cfg->i2c_addr, @@ -79,11 +89,18 @@ static int fc2580_rd_regs(struct fc2580_priv *priv, u8 reg, u8 *val, int len) }, { .addr = priv->cfg->i2c_addr, .flags = I2C_M_RD, - .len = sizeof(buf), + .len = len, .buf = buf, } }; + if (len > sizeof(buf)) { + dev_warn(&priv->i2c->dev, + "%s: i2c rd reg=%04x: len=%d is too big!\n", + KBUILD_MODNAME, reg, len); + return -EINVAL; + } + ret = i2c_transfer(priv->i2c, msg, 2); if (ret == 2) { memcpy(val, buf, len); diff --git a/drivers/media/tuners/r820t.c b/drivers/media/tuners/r820t.c index 1c23666468c..d9ee43fae62 100644 --- a/drivers/media/tuners/r820t.c +++ b/drivers/media/tuners/r820t.c @@ -612,10 +612,19 @@ static int r820t_set_pll(struct r820t_priv *priv, enum v4l2_tuner_type type, vco_fine_tune = (data[4] & 0x30) >> 4; - if (vco_fine_tune > VCO_POWER_REF) - div_num = div_num - 1; - else if (vco_fine_tune < VCO_POWER_REF) - div_num = div_num + 1; + tuner_dbg("mix_div=%d div_num=%d vco_fine_tune=%d\n", + mix_div, div_num, vco_fine_tune); + + /* + * XXX: R828D/16MHz seems to have always vco_fine_tune=1. + * Due to that, this calculation goes wrong. + */ + if (priv->cfg->rafael_chip != CHIP_R828D) { + if (vco_fine_tune > VCO_POWER_REF) + div_num = div_num - 1; + else if (vco_fine_tune < VCO_POWER_REF) + div_num = div_num + 1; + } rc = r820t_write_reg_mask(priv, 0x10, div_num << 5, 0xe0); if (rc < 0) @@ -637,11 +646,6 @@ static int r820t_set_pll(struct r820t_priv *priv, enum v4l2_tuner_type type, vco_fra = pll_ref * 129 / 128; } - if (nint > 63) { - tuner_info("No valid PLL values for %u kHz!\n", freq); - return -EINVAL; - } - ni = (nint - 13) / 4; si = nint - 4 * ni - 13; diff --git a/drivers/media/tuners/tda18212.c b/drivers/media/tuners/tda18212.c index e4a84ee231c..abe256e1f84 100644 --- a/drivers/media/tuners/tda18212.c +++ b/drivers/media/tuners/tda18212.c @@ -20,6 +20,9 @@ #include "tda18212.h" +/* Max transfer size done by I2C transfer functions */ +#define MAX_XFER_SIZE 64 + struct tda18212_priv { struct tda18212_config *cfg; struct i2c_adapter *i2c; @@ -32,16 +35,23 @@ static int tda18212_wr_regs(struct tda18212_priv *priv, u8 reg, u8 *val, int len) { int ret; - u8 buf[len+1]; + u8 buf[MAX_XFER_SIZE]; struct i2c_msg msg[1] = { { .addr = priv->cfg->i2c_address, .flags = 0, - .len = sizeof(buf), + .len = 1 + len, .buf = buf, } }; + if (1 + len > sizeof(buf)) { + dev_warn(&priv->i2c->dev, + "%s: i2c wr reg=%04x: len=%d is too big!\n", + KBUILD_MODNAME, reg, len); + return -EINVAL; + } + buf[0] = reg; memcpy(&buf[1], val, len); @@ -61,7 +71,7 @@ static int tda18212_rd_regs(struct tda18212_priv *priv, u8 reg, u8 *val, int len) { int ret; - u8 buf[len]; + u8 buf[MAX_XFER_SIZE]; struct i2c_msg msg[2] = { { .addr = priv->cfg->i2c_address, @@ -71,11 +81,18 @@ static int tda18212_rd_regs(struct tda18212_priv *priv, u8 reg, u8 *val, }, { .addr = priv->cfg->i2c_address, .flags = I2C_M_RD, - .len = sizeof(buf), + .len = len, .buf = buf, } }; + if (len > sizeof(buf)) { + dev_warn(&priv->i2c->dev, + "%s: i2c rd reg=%04x: len=%d is too big!\n", + KBUILD_MODNAME, reg, len); + return -EINVAL; + } + ret = i2c_transfer(priv->i2c, msg, 2); if (ret == 2) { memcpy(val, buf, len); diff --git a/drivers/media/tuners/tda18218.c b/drivers/media/tuners/tda18218.c index 2d31aeb6b08..9300e9361e3 100644 --- a/drivers/media/tuners/tda18218.c +++ b/drivers/media/tuners/tda18218.c @@ -20,11 +20,14 @@ #include "tda18218_priv.h" +/* Max transfer size done by I2C transfer functions */ +#define MAX_XFER_SIZE 64 + /* write multiple registers */ static int tda18218_wr_regs(struct tda18218_priv *priv, u8 reg, u8 *val, u8 len) { int ret = 0, len2, remaining; - u8 buf[1 + len]; + u8 buf[MAX_XFER_SIZE]; struct i2c_msg msg[1] = { { .addr = priv->cfg->i2c_address, @@ -33,6 +36,13 @@ static int tda18218_wr_regs(struct tda18218_priv *priv, u8 reg, u8 *val, u8 len) } }; + if (1 + len > sizeof(buf)) { + dev_warn(&priv->i2c->dev, + "%s: i2c wr reg=%04x: len=%d is too big!\n", + KBUILD_MODNAME, reg, len); + return -EINVAL; + } + for (remaining = len; remaining > 0; remaining -= (priv->cfg->i2c_wr_max - 1)) { len2 = remaining; @@ -63,7 +73,7 @@ static int tda18218_wr_regs(struct tda18218_priv *priv, u8 reg, u8 *val, u8 len) static int tda18218_rd_regs(struct tda18218_priv *priv, u8 reg, u8 *val, u8 len) { int ret; - u8 buf[reg+len]; /* we must start read always from reg 0x00 */ + u8 buf[MAX_XFER_SIZE]; /* we must start read always from reg 0x00 */ struct i2c_msg msg[2] = { { .addr = priv->cfg->i2c_address, @@ -73,11 +83,18 @@ static int tda18218_rd_regs(struct tda18218_priv *priv, u8 reg, u8 *val, u8 len) }, { .addr = priv->cfg->i2c_address, .flags = I2C_M_RD, - .len = sizeof(buf), + .len = reg + len, .buf = buf, } }; + if (reg + len > sizeof(buf)) { + dev_warn(&priv->i2c->dev, + "%s: i2c wr reg=%04x: len=%d is too big!\n", + KBUILD_MODNAME, reg, len); + return -EINVAL; + } + ret = i2c_transfer(priv->i2c, msg, 2); if (ret == 2) { memcpy(val, &buf[reg], len); diff --git a/drivers/media/tuners/tda9887.c b/drivers/media/tuners/tda9887.c index 300005c535b..9823248d743 100644 --- a/drivers/media/tuners/tda9887.c +++ b/drivers/media/tuners/tda9887.c @@ -536,8 +536,8 @@ static int tda9887_status(struct dvb_frontend *fe) unsigned char buf[1]; int rc; - memset(buf,0,sizeof(buf)); - if (1 != (rc = tuner_i2c_xfer_recv(&priv->i2c_props,buf,1))) + rc = tuner_i2c_xfer_recv(&priv->i2c_props, buf, 1); + if (rc != 1) tuner_info("i2c i/o error: rc == %d (should be 1)\n", rc); dump_read_message(fe, buf); return 0; diff --git a/drivers/media/tuners/tuner-xc2028.c b/drivers/media/tuners/tuner-xc2028.c index 878d2c4d9e8..4be5cf808a4 100644 --- a/drivers/media/tuners/tuner-xc2028.c +++ b/drivers/media/tuners/tuner-xc2028.c @@ -24,6 +24,9 @@ #include <linux/dvb/frontend.h> #include "dvb_frontend.h" +/* Max transfer size done by I2C transfer functions */ +#define MAX_XFER_SIZE 80 + /* Registers (Write-only) */ #define XREG_INIT 0x00 #define XREG_RF_FREQ 0x02 @@ -547,7 +550,10 @@ static int load_firmware(struct dvb_frontend *fe, unsigned int type, { struct xc2028_data *priv = fe->tuner_priv; int pos, rc; - unsigned char *p, *endp, buf[priv->ctrl.max_len]; + unsigned char *p, *endp, buf[MAX_XFER_SIZE]; + + if (priv->ctrl.max_len > sizeof(buf)) + priv->ctrl.max_len = sizeof(buf); tuner_dbg("%s called\n", __func__); @@ -572,7 +578,7 @@ static int load_firmware(struct dvb_frontend *fe, unsigned int type, return -EINVAL; } - size = le16_to_cpu(*(__u16 *) p); + size = le16_to_cpu(*(__le16 *) p); p += sizeof(size); if (size == 0xffff) @@ -683,7 +689,7 @@ static int load_scode(struct dvb_frontend *fe, unsigned int type, /* 16 SCODE entries per file; each SCODE entry is 12 bytes and * has a 2-byte size header in the firmware format. */ if (priv->firm[pos].size != 14 * 16 || scode >= 16 || - le16_to_cpu(*(__u16 *)(p + 14 * scode)) != 12) + le16_to_cpu(*(__le16 *)(p + 14 * scode)) != 12) return -EINVAL; p += 14 * scode + 2; } diff --git a/drivers/media/usb/b2c2/flexcop-usb.c b/drivers/media/usb/b2c2/flexcop-usb.c index 8b6275f8590..0bd96906339 100644 --- a/drivers/media/usb/b2c2/flexcop-usb.c +++ b/drivers/media/usb/b2c2/flexcop-usb.c @@ -390,7 +390,7 @@ static void flexcop_usb_transfer_exit(struct flexcop_usb *fc_usb) } if (fc_usb->iso_buffer != NULL) - pci_free_consistent(NULL, + usb_free_coherent(fc_usb->udev, fc_usb->buffer_size, fc_usb->iso_buffer, fc_usb->dma_addr); } @@ -407,8 +407,8 @@ static int flexcop_usb_transfer_init(struct flexcop_usb *fc_usb) "each of %d bytes size = %d.\n", B2C2_USB_NUM_ISO_URB, B2C2_USB_FRAMES_PER_ISO, frame_size, bufsize); - fc_usb->iso_buffer = pci_alloc_consistent(NULL, - bufsize, &fc_usb->dma_addr); + fc_usb->iso_buffer = usb_alloc_coherent(fc_usb->udev, + bufsize, GFP_KERNEL, &fc_usb->dma_addr); if (fc_usb->iso_buffer == NULL) return -ENOMEM; diff --git a/drivers/media/usb/cpia2/cpia2_usb.c b/drivers/media/usb/cpia2/cpia2_usb.c index be171928360..351a78a84c3 100644 --- a/drivers/media/usb/cpia2/cpia2_usb.c +++ b/drivers/media/usb/cpia2/cpia2_usb.c @@ -209,7 +209,7 @@ static void cpia2_usb_complete(struct urb *urb) { int i; unsigned char *cdata; - static int frame_ready = false; + static bool frame_ready = false; struct camera_data *cam = (struct camera_data *) urb->context; if (urb->status!=0) { diff --git a/drivers/media/usb/cx231xx/cx231xx-cards.c b/drivers/media/usb/cx231xx/cx231xx-cards.c index a384f80f595..e9d017bea37 100644 --- a/drivers/media/usb/cx231xx/cx231xx-cards.c +++ b/drivers/media/usb/cx231xx/cx231xx-cards.c @@ -978,7 +978,6 @@ static int cx231xx_init_dev(struct cx231xx *dev, struct usb_device *udev, int minor) { int retval = -ENOMEM; - int errCode; unsigned int maxh, maxw; dev->udev = udev; @@ -1014,8 +1013,8 @@ static int cx231xx_init_dev(struct cx231xx *dev, struct usb_device *udev, /* Cx231xx pre card setup */ cx231xx_pre_card_setup(dev); - errCode = cx231xx_config(dev); - if (errCode) { + retval = cx231xx_config(dev); + if (retval) { cx231xx_errdev("error configuring device\n"); return -ENOMEM; } @@ -1024,12 +1023,11 @@ static int cx231xx_init_dev(struct cx231xx *dev, struct usb_device *udev, dev->norm = dev->board.norm; /* register i2c bus */ - errCode = cx231xx_dev_init(dev); - if (errCode < 0) { - cx231xx_dev_uninit(dev); + retval = cx231xx_dev_init(dev); + if (retval) { cx231xx_errdev("%s: cx231xx_i2c_register - errCode [%d]!\n", - __func__, errCode); - return errCode; + __func__, retval); + goto err_dev_init; } /* Do board specific init */ @@ -1047,11 +1045,11 @@ static int cx231xx_init_dev(struct cx231xx *dev, struct usb_device *udev, dev->interlaced = 0; dev->video_input = 0; - errCode = cx231xx_config(dev); - if (errCode < 0) { + retval = cx231xx_config(dev); + if (retval) { cx231xx_errdev("%s: cx231xx_config - errCode [%d]!\n", - __func__, errCode); - return errCode; + __func__, retval); + goto err_dev_init; } /* init video dma queues */ @@ -1075,9 +1073,9 @@ static int cx231xx_init_dev(struct cx231xx *dev, struct usb_device *udev, } retval = cx231xx_register_analog_devices(dev); - if (retval < 0) { - cx231xx_release_resources(dev); - return retval; + if (retval) { + cx231xx_release_analog_resources(dev); + goto err_analog; } cx231xx_ir_init(dev); @@ -1085,6 +1083,11 @@ static int cx231xx_init_dev(struct cx231xx *dev, struct usb_device *udev, cx231xx_init_extension(dev); return 0; +err_analog: + cx231xx_remove_from_devlist(dev); +err_dev_init: + cx231xx_dev_uninit(dev); + return retval; } #if defined(CONFIG_MODULES) && defined(MODULE) @@ -1132,7 +1135,6 @@ static int cx231xx_usb_probe(struct usb_interface *interface, char *speed; struct usb_interface_assoc_descriptor *assoc_desc; - udev = usb_get_dev(interface_to_usbdev(interface)); ifnum = interface->altsetting[0].desc.bInterfaceNumber; /* @@ -1161,6 +1163,8 @@ static int cx231xx_usb_probe(struct usb_interface *interface, return -ENOMEM; } + udev = usb_get_dev(interface_to_usbdev(interface)); + snprintf(dev->name, 29, "cx231xx #%d", nr); dev->devno = nr; dev->model = id->driver_info; @@ -1223,10 +1227,8 @@ static int cx231xx_usb_probe(struct usb_interface *interface, if (assoc_desc->bFirstInterface != ifnum) { cx231xx_err(DRIVER_NAME ": Not found " "matching IAD interface\n"); - clear_bit(dev->devno, &cx231xx_devused); - kfree(dev); - dev = NULL; - return -ENODEV; + retval = -ENODEV; + goto err_if; } cx231xx_info("registering interface %d\n", ifnum); @@ -1242,22 +1244,13 @@ static int cx231xx_usb_probe(struct usb_interface *interface, retval = v4l2_device_register(&interface->dev, &dev->v4l2_dev); if (retval) { cx231xx_errdev("v4l2_device_register failed\n"); - clear_bit(dev->devno, &cx231xx_devused); - kfree(dev); - dev = NULL; - return -EIO; + retval = -EIO; + goto err_v4l2; } /* allocate device struct */ retval = cx231xx_init_dev(dev, udev, nr); - if (retval) { - clear_bit(dev->devno, &cx231xx_devused); - v4l2_device_unregister(&dev->v4l2_dev); - kfree(dev); - dev = NULL; - usb_set_intfdata(interface, NULL); - - return retval; - } + if (retval) + goto err_init; /* compute alternate max packet sizes for video */ uif = udev->actconfig->interface[dev->current_pcb_config. @@ -1275,11 +1268,8 @@ static int cx231xx_usb_probe(struct usb_interface *interface, if (dev->video_mode.alt_max_pkt_size == NULL) { cx231xx_errdev("out of memory!\n"); - clear_bit(dev->devno, &cx231xx_devused); - v4l2_device_unregister(&dev->v4l2_dev); - kfree(dev); - dev = NULL; - return -ENOMEM; + retval = -ENOMEM; + goto err_video_alt; } for (i = 0; i < dev->video_mode.num_alt; i++) { @@ -1309,11 +1299,8 @@ static int cx231xx_usb_probe(struct usb_interface *interface, if (dev->vbi_mode.alt_max_pkt_size == NULL) { cx231xx_errdev("out of memory!\n"); - clear_bit(dev->devno, &cx231xx_devused); - v4l2_device_unregister(&dev->v4l2_dev); - kfree(dev); - dev = NULL; - return -ENOMEM; + retval = -ENOMEM; + goto err_vbi_alt; } for (i = 0; i < dev->vbi_mode.num_alt; i++) { @@ -1344,11 +1331,8 @@ static int cx231xx_usb_probe(struct usb_interface *interface, if (dev->sliced_cc_mode.alt_max_pkt_size == NULL) { cx231xx_errdev("out of memory!\n"); - clear_bit(dev->devno, &cx231xx_devused); - v4l2_device_unregister(&dev->v4l2_dev); - kfree(dev); - dev = NULL; - return -ENOMEM; + retval = -ENOMEM; + goto err_sliced_cc_alt; } for (i = 0; i < dev->sliced_cc_mode.num_alt; i++) { @@ -1380,11 +1364,8 @@ static int cx231xx_usb_probe(struct usb_interface *interface, if (dev->ts1_mode.alt_max_pkt_size == NULL) { cx231xx_errdev("out of memory!\n"); - clear_bit(dev->devno, &cx231xx_devused); - v4l2_device_unregister(&dev->v4l2_dev); - kfree(dev); - dev = NULL; - return -ENOMEM; + retval = -ENOMEM; + goto err_ts1_alt; } for (i = 0; i < dev->ts1_mode.num_alt; i++) { @@ -1411,6 +1392,29 @@ static int cx231xx_usb_probe(struct usb_interface *interface, request_modules(dev); return 0; +err_ts1_alt: + kfree(dev->sliced_cc_mode.alt_max_pkt_size); +err_sliced_cc_alt: + kfree(dev->vbi_mode.alt_max_pkt_size); +err_vbi_alt: + kfree(dev->video_mode.alt_max_pkt_size); +err_video_alt: + /* cx231xx_uninit_dev: */ + cx231xx_close_extension(dev); + cx231xx_ir_exit(dev); + cx231xx_release_analog_resources(dev); + cx231xx_417_unregister(dev); + cx231xx_remove_from_devlist(dev); + cx231xx_dev_uninit(dev); +err_init: + v4l2_device_unregister(&dev->v4l2_dev); +err_v4l2: + usb_set_intfdata(interface, NULL); +err_if: + usb_put_dev(udev); + kfree(dev); + clear_bit(dev->devno, &cx231xx_devused); + return retval; } /* diff --git a/drivers/media/usb/cx231xx/cx231xx-pcb-cfg.c b/drivers/media/usb/cx231xx/cx231xx-pcb-cfg.c index d7308ab7a90..2a34ceee480 100644 --- a/drivers/media/usb/cx231xx/cx231xx-pcb-cfg.c +++ b/drivers/media/usb/cx231xx/cx231xx-pcb-cfg.c @@ -28,7 +28,7 @@ MODULE_PARM_DESC(pcb_debug, "enable pcb config debug messages [video]"); /******************************************************************************/ -struct pcb_config cx231xx_Scenario[] = { +static struct pcb_config cx231xx_Scenario[] = { { INDEX_SELFPOWER_DIGITAL_ONLY, /* index */ USB_SELF_POWER, /* power_type */ @@ -672,7 +672,7 @@ u32 initialize_cx231xx(struct cx231xx *dev) pcb config it is related to */ cx231xx_read_ctrl_reg(dev, VRT_GET_REGISTER, BOARD_CFG_STAT, data, 4); - config_info = le32_to_cpu(*((u32 *) data)); + config_info = le32_to_cpu(*((__le32 *)data)); usb_speed = (u8) (config_info & 0x1); /* Verify this device belongs to Bus power or Self power device */ diff --git a/drivers/media/usb/dvb-usb-v2/af9015.c b/drivers/media/usb/dvb-usb-v2/af9015.c index d556042cf31..da47d2392f2 100644 --- a/drivers/media/usb/dvb-usb-v2/af9015.c +++ b/drivers/media/usb/dvb-usb-v2/af9015.c @@ -397,12 +397,13 @@ error: return ret; } +#define AF9015_EEPROM_SIZE 256 + /* hash (and dump) eeprom */ static int af9015_eeprom_hash(struct dvb_usb_device *d) { struct af9015_state *state = d_to_priv(d); int ret, i; - static const unsigned int AF9015_EEPROM_SIZE = 256; u8 buf[AF9015_EEPROM_SIZE]; struct req_t req = {READ_I2C, AF9015_I2C_EEPROM, 0, 0, 1, 1, NULL}; diff --git a/drivers/media/usb/dvb-usb-v2/af9035.c b/drivers/media/usb/dvb-usb-v2/af9035.c index 1ea17dc2a76..c8fcd78425b 100644 --- a/drivers/media/usb/dvb-usb-v2/af9035.c +++ b/drivers/media/usb/dvb-usb-v2/af9035.c @@ -21,6 +21,9 @@ #include "af9035.h" +/* Max transfer size done by I2C transfer functions */ +#define MAX_XFER_SIZE 64 + DVB_DEFINE_MOD_OPT_ADAPTER_NR(adapter_nr); static u16 af9035_checksum(const u8 *buf, size_t len) @@ -126,10 +129,16 @@ exit: /* write multiple registers */ static int af9035_wr_regs(struct dvb_usb_device *d, u32 reg, u8 *val, int len) { - u8 wbuf[6 + len]; + u8 wbuf[MAX_XFER_SIZE]; u8 mbox = (reg >> 16) & 0xff; struct usb_req req = { CMD_MEM_WR, mbox, sizeof(wbuf), wbuf, 0, NULL }; + if (6 + len > sizeof(wbuf)) { + dev_warn(&d->udev->dev, "%s: i2c wr: len=%d is too big!\n", + KBUILD_MODNAME, len); + return -EOPNOTSUPP; + } + wbuf[0] = len; wbuf[1] = 2; wbuf[2] = 0; @@ -228,9 +237,16 @@ static int af9035_i2c_master_xfer(struct i2c_adapter *adap, msg[1].len); } else { /* I2C */ - u8 buf[5 + msg[0].len]; + u8 buf[MAX_XFER_SIZE]; struct usb_req req = { CMD_I2C_RD, 0, sizeof(buf), buf, msg[1].len, msg[1].buf }; + + if (5 + msg[0].len > sizeof(buf)) { + dev_warn(&d->udev->dev, + "%s: i2c xfer: len=%d is too big!\n", + KBUILD_MODNAME, msg[0].len); + return -EOPNOTSUPP; + } req.mbox |= ((msg[0].addr & 0x80) >> 3); buf[0] = msg[1].len; buf[1] = msg[0].addr << 1; @@ -257,9 +273,16 @@ static int af9035_i2c_master_xfer(struct i2c_adapter *adap, msg[0].len - 3); } else { /* I2C */ - u8 buf[5 + msg[0].len]; + u8 buf[MAX_XFER_SIZE]; struct usb_req req = { CMD_I2C_WR, 0, sizeof(buf), buf, 0, NULL }; + + if (5 + msg[0].len > sizeof(buf)) { + dev_warn(&d->udev->dev, + "%s: i2c xfer: len=%d is too big!\n", + KBUILD_MODNAME, msg[0].len); + return -EOPNOTSUPP; + } req.mbox |= ((msg[0].addr & 0x80) >> 3); buf[0] = msg[0].len; buf[1] = msg[0].addr << 1; diff --git a/drivers/media/usb/dvb-usb-v2/mxl111sf.c b/drivers/media/usb/dvb-usb-v2/mxl111sf.c index e97964ef7f5..2627553f7de 100644 --- a/drivers/media/usb/dvb-usb-v2/mxl111sf.c +++ b/drivers/media/usb/dvb-usb-v2/mxl111sf.c @@ -23,6 +23,9 @@ #include "lgdt3305.h" #include "lg2160.h" +/* Max transfer size done by I2C transfer functions */ +#define MAX_XFER_SIZE 64 + int dvb_usb_mxl111sf_debug; module_param_named(debug, dvb_usb_mxl111sf_debug, int, 0644); MODULE_PARM_DESC(debug, "set debugging level " @@ -57,7 +60,12 @@ int mxl111sf_ctrl_msg(struct dvb_usb_device *d, { int wo = (rbuf == NULL || rlen == 0); /* write-only */ int ret; - u8 sndbuf[1+wlen]; + u8 sndbuf[MAX_XFER_SIZE]; + + if (1 + wlen > sizeof(sndbuf)) { + pr_warn("%s: len=%d is too big!\n", __func__, wlen); + return -EOPNOTSUPP; + } pr_debug("%s(wlen = %d, rlen = %d)\n", __func__, wlen, rlen); diff --git a/drivers/media/usb/dvb-usb-v2/rtl28xxu.c b/drivers/media/usb/dvb-usb-v2/rtl28xxu.c index c0cd0848631..ecca03667f9 100644 --- a/drivers/media/usb/dvb-usb-v2/rtl28xxu.c +++ b/drivers/media/usb/dvb-usb-v2/rtl28xxu.c @@ -377,6 +377,7 @@ static int rtl2832u_read_config(struct dvb_usb_device *d) struct rtl28xxu_req req_e4000 = {0x02c8, CMD_I2C_RD, 1, buf}; struct rtl28xxu_req req_tda18272 = {0x00c0, CMD_I2C_RD, 2, buf}; struct rtl28xxu_req req_r820t = {0x0034, CMD_I2C_RD, 1, buf}; + struct rtl28xxu_req req_r828d = {0x0074, CMD_I2C_RD, 1, buf}; dev_dbg(&d->udev->dev, "%s:\n", __func__); @@ -489,6 +490,15 @@ static int rtl2832u_read_config(struct dvb_usb_device *d) goto found; } + /* check R828D ID register; reg=00 val=69 */ + ret = rtl28xxu_ctrl_msg(d, &req_r828d); + if (ret == 0 && buf[0] == 0x69) { + priv->tuner = TUNER_RTL2832_R828D; + priv->tuner_name = "R828D"; + goto found; + } + + found: dev_dbg(&d->udev->dev, "%s: tuner=%s\n", __func__, priv->tuner_name); @@ -745,6 +755,7 @@ static int rtl2832u_frontend_attach(struct dvb_usb_adapter *adap) rtl2832_config = &rtl28xxu_rtl2832_e4000_config; break; case TUNER_RTL2832_R820T: + case TUNER_RTL2832_R828D: rtl2832_config = &rtl28xxu_rtl2832_r820t_config; break; default: @@ -866,6 +877,13 @@ static const struct r820t_config rtl2832u_r820t_config = { .rafael_chip = CHIP_R820T, }; +static const struct r820t_config rtl2832u_r828d_config = { + .i2c_addr = 0x3a, + .xtal = 16000000, + .max_i2c_msg_len = 2, + .rafael_chip = CHIP_R828D, +}; + static int rtl2832u_tuner_attach(struct dvb_usb_adapter *adap) { int ret; @@ -923,6 +941,27 @@ static int rtl2832u_tuner_attach(struct dvb_usb_adapter *adap) adap->fe[0]->ops.read_signal_strength = adap->fe[0]->ops.tuner_ops.get_rf_strength; break; + case TUNER_RTL2832_R828D: + /* power off mn88472 demod on GPIO0 */ + ret = rtl28xx_wr_reg_mask(d, SYS_GPIO_OUT_VAL, 0x00, 0x01); + if (ret) + goto err; + + ret = rtl28xx_wr_reg_mask(d, SYS_GPIO_DIR, 0x00, 0x01); + if (ret) + goto err; + + ret = rtl28xx_wr_reg_mask(d, SYS_GPIO_OUT_EN, 0x01, 0x01); + if (ret) + goto err; + + fe = dvb_attach(r820t_attach, adap->fe[0], &d->i2c_adap, + &rtl2832u_r828d_config); + + /* Use tuner to get the signal strength */ + adap->fe[0]->ops.read_signal_strength = + adap->fe[0]->ops.tuner_ops.get_rf_strength; + break; default: fe = NULL; dev_err(&d->udev->dev, "%s: unknown tuner=%d\n", KBUILD_MODNAME, @@ -1388,6 +1427,9 @@ static const struct usb_device_id rtl28xxu_id_table[] = { &rtl2832u_props, "Leadtek WinFast DTV Dongle mini", NULL) }, { DVB_USB_DEVICE(USB_VID_GTEK, USB_PID_CPYTO_REDI_PC50A, &rtl2832u_props, "Crypto ReDi PC 50 A", NULL) }, + + { DVB_USB_DEVICE(USB_VID_HANFTEK, 0x0131, + &rtl2832u_props, "Astrometa DVB-T2", NULL) }, { } }; MODULE_DEVICE_TABLE(usb, rtl28xxu_id_table); diff --git a/drivers/media/usb/dvb-usb-v2/rtl28xxu.h b/drivers/media/usb/dvb-usb-v2/rtl28xxu.h index 729b3540c2f..2142bcb41b4 100644 --- a/drivers/media/usb/dvb-usb-v2/rtl28xxu.h +++ b/drivers/media/usb/dvb-usb-v2/rtl28xxu.h @@ -83,6 +83,7 @@ enum rtl28xxu_tuner { TUNER_RTL2832_TDA18272, TUNER_RTL2832_FC0013, TUNER_RTL2832_R820T, + TUNER_RTL2832_R828D, }; struct rtl28xxu_req { diff --git a/drivers/media/usb/dvb-usb/az6027.c b/drivers/media/usb/dvb-usb/az6027.c index ea2d5ee8657..c11138ebf6f 100644 --- a/drivers/media/usb/dvb-usb/az6027.c +++ b/drivers/media/usb/dvb-usb/az6027.c @@ -254,7 +254,7 @@ static const struct stb0899_s1_reg az6027_stb0899_s1_init_3[] = { -struct stb0899_config az6027_stb0899_config = { +static struct stb0899_config az6027_stb0899_config = { .init_dev = az6027_stb0899_s1_init_1, .init_s2_demod = stb0899_s2_init_2, .init_s1_demod = az6027_stb0899_s1_init_3, @@ -291,7 +291,7 @@ struct stb0899_config az6027_stb0899_config = { .tuner_set_rfsiggain = NULL, }; -struct stb6100_config az6027_stb6100_config = { +static struct stb6100_config az6027_stb6100_config = { .tuner_address = 0xc0, .refclock = 27000000, }; diff --git a/drivers/media/usb/dvb-usb/cxusb.c b/drivers/media/usb/dvb-usb/cxusb.c index 3940bb0f9ef..20e345d9fe8 100644 --- a/drivers/media/usb/dvb-usb/cxusb.c +++ b/drivers/media/usb/dvb-usb/cxusb.c @@ -43,6 +43,9 @@ #include "lgs8gxx.h" #include "atbm8830.h" +/* Max transfer size done by I2C transfer functions */ +#define MAX_XFER_SIZE 64 + /* debug */ static int dvb_usb_cxusb_debug; module_param_named(debug, dvb_usb_cxusb_debug, int, 0644); @@ -57,7 +60,14 @@ static int cxusb_ctrl_msg(struct dvb_usb_device *d, u8 cmd, u8 *wbuf, int wlen, u8 *rbuf, int rlen) { int wo = (rbuf == NULL || rlen == 0); /* write-only */ - u8 sndbuf[1+wlen]; + u8 sndbuf[MAX_XFER_SIZE]; + + if (1 + wlen > sizeof(sndbuf)) { + warn("i2c wr: len=%d is too big!\n", + wlen); + return -EOPNOTSUPP; + } + memset(sndbuf, 0, 1+wlen); sndbuf[0] = cmd; @@ -158,7 +168,13 @@ static int cxusb_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msg[], if (msg[i].flags & I2C_M_RD) { /* read only */ - u8 obuf[3], ibuf[1+msg[i].len]; + u8 obuf[3], ibuf[MAX_XFER_SIZE]; + + if (1 + msg[i].len > sizeof(ibuf)) { + warn("i2c rd: len=%d is too big!\n", + msg[i].len); + return -EOPNOTSUPP; + } obuf[0] = 0; obuf[1] = msg[i].len; obuf[2] = msg[i].addr; @@ -172,7 +188,18 @@ static int cxusb_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msg[], } else if (i+1 < num && (msg[i+1].flags & I2C_M_RD) && msg[i].addr == msg[i+1].addr) { /* write to then read from same address */ - u8 obuf[3+msg[i].len], ibuf[1+msg[i+1].len]; + u8 obuf[MAX_XFER_SIZE], ibuf[MAX_XFER_SIZE]; + + if (3 + msg[i].len > sizeof(obuf)) { + warn("i2c wr: len=%d is too big!\n", + msg[i].len); + return -EOPNOTSUPP; + } + if (1 + msg[i + 1].len > sizeof(ibuf)) { + warn("i2c rd: len=%d is too big!\n", + msg[i + 1].len); + return -EOPNOTSUPP; + } obuf[0] = msg[i].len; obuf[1] = msg[i+1].len; obuf[2] = msg[i].addr; @@ -191,7 +218,13 @@ static int cxusb_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msg[], i++; } else { /* write only */ - u8 obuf[2+msg[i].len], ibuf; + u8 obuf[MAX_XFER_SIZE], ibuf; + + if (2 + msg[i].len > sizeof(obuf)) { + warn("i2c wr: len=%d is too big!\n", + msg[i].len); + return -EOPNOTSUPP; + } obuf[0] = msg[i].addr; obuf[1] = msg[i].len; memcpy(&obuf[2], msg[i].buf, msg[i].len); diff --git a/drivers/media/usb/dvb-usb/dibusb-common.c b/drivers/media/usb/dvb-usb/dibusb-common.c index c2dded92f1d..6d68af0c49c 100644 --- a/drivers/media/usb/dvb-usb/dibusb-common.c +++ b/drivers/media/usb/dvb-usb/dibusb-common.c @@ -12,6 +12,9 @@ #include <linux/kconfig.h> #include "dibusb.h" +/* Max transfer size done by I2C transfer functions */ +#define MAX_XFER_SIZE 64 + static int debug; module_param(debug, int, 0644); MODULE_PARM_DESC(debug, "set debugging level (1=info (|-able))." DVB_USB_DEBUG_STATUS); @@ -105,11 +108,16 @@ EXPORT_SYMBOL(dibusb2_0_power_ctrl); static int dibusb_i2c_msg(struct dvb_usb_device *d, u8 addr, u8 *wbuf, u16 wlen, u8 *rbuf, u16 rlen) { - u8 sndbuf[wlen+4]; /* lead(1) devaddr,direction(1) addr(2) data(wlen) (len(2) (when reading)) */ + u8 sndbuf[MAX_XFER_SIZE]; /* lead(1) devaddr,direction(1) addr(2) data(wlen) (len(2) (when reading)) */ /* write only ? */ int wo = (rbuf == NULL || rlen == 0), len = 2 + wlen + (wo ? 0 : 2); + if (4 + wlen > sizeof(sndbuf)) { + warn("i2c wr: len=%d is too big!\n", wlen); + return -EOPNOTSUPP; + } + sndbuf[0] = wo ? DIBUSB_REQ_I2C_WRITE : DIBUSB_REQ_I2C_READ; sndbuf[1] = (addr << 1) | (wo ? 0 : 1); diff --git a/drivers/media/usb/dvb-usb/dw2102.c b/drivers/media/usb/dvb-usb/dw2102.c index 6e237b6dd0a..c1a63b2a6ba 100644 --- a/drivers/media/usb/dvb-usb/dw2102.c +++ b/drivers/media/usb/dvb-usb/dw2102.c @@ -30,6 +30,9 @@ #include "stb6100_proc.h" #include "m88rs2000.h" +/* Max transfer size done by I2C transfer functions */ +#define MAX_XFER_SIZE 64 + #ifndef USB_PID_DW2102 #define USB_PID_DW2102 0x2102 #endif @@ -308,7 +311,14 @@ static int dw2102_earda_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg ms case 2: { /* read */ /* first write first register number */ - u8 ibuf[msg[1].len + 2], obuf[3]; + u8 ibuf[MAX_XFER_SIZE], obuf[3]; + + if (2 + msg[1].len > sizeof(ibuf)) { + warn("i2c rd: len=%d is too big!\n", + msg[1].len); + return -EOPNOTSUPP; + } + obuf[0] = msg[0].addr << 1; obuf[1] = msg[0].len; obuf[2] = msg[0].buf[0]; @@ -325,7 +335,14 @@ static int dw2102_earda_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg ms switch (msg[0].addr) { case 0x68: { /* write to register */ - u8 obuf[msg[0].len + 2]; + u8 obuf[MAX_XFER_SIZE]; + + if (2 + msg[0].len > sizeof(obuf)) { + warn("i2c wr: len=%d is too big!\n", + msg[1].len); + return -EOPNOTSUPP; + } + obuf[0] = msg[0].addr << 1; obuf[1] = msg[0].len; memcpy(obuf + 2, msg[0].buf, msg[0].len); @@ -335,7 +352,14 @@ static int dw2102_earda_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg ms } case 0x61: { /* write to tuner */ - u8 obuf[msg[0].len + 2]; + u8 obuf[MAX_XFER_SIZE]; + + if (2 + msg[0].len > sizeof(obuf)) { + warn("i2c wr: len=%d is too big!\n", + msg[1].len); + return -EOPNOTSUPP; + } + obuf[0] = msg[0].addr << 1; obuf[1] = msg[0].len; memcpy(obuf + 2, msg[0].buf, msg[0].len); @@ -401,7 +425,14 @@ static int dw2104_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg msg[], i default: { if (msg[j].flags == I2C_M_RD) { /* read registers */ - u8 ibuf[msg[j].len + 2]; + u8 ibuf[MAX_XFER_SIZE]; + + if (2 + msg[j].len > sizeof(ibuf)) { + warn("i2c rd: len=%d is too big!\n", + msg[j].len); + return -EOPNOTSUPP; + } + dw210x_op_rw(d->udev, 0xc3, (msg[j].addr << 1) + 1, 0, ibuf, msg[j].len + 2, @@ -430,7 +461,14 @@ static int dw2104_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg msg[], i } while (len > 0); } else { /* write registers */ - u8 obuf[msg[j].len + 2]; + u8 obuf[MAX_XFER_SIZE]; + + if (2 + msg[j].len > sizeof(obuf)) { + warn("i2c wr: len=%d is too big!\n", + msg[j].len); + return -EOPNOTSUPP; + } + obuf[0] = msg[j].addr << 1; obuf[1] = msg[j].len; memcpy(obuf + 2, msg[j].buf, msg[j].len); @@ -463,7 +501,13 @@ static int dw3101_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg msg[], case 2: { /* read */ /* first write first register number */ - u8 ibuf[msg[1].len + 2], obuf[3]; + u8 ibuf[MAX_XFER_SIZE], obuf[3]; + + if (2 + msg[1].len > sizeof(ibuf)) { + warn("i2c rd: len=%d is too big!\n", + msg[1].len); + return -EOPNOTSUPP; + } obuf[0] = msg[0].addr << 1; obuf[1] = msg[0].len; obuf[2] = msg[0].buf[0]; @@ -481,7 +525,13 @@ static int dw3101_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg msg[], case 0x60: case 0x0c: { /* write to register */ - u8 obuf[msg[0].len + 2]; + u8 obuf[MAX_XFER_SIZE]; + + if (2 + msg[0].len > sizeof(obuf)) { + warn("i2c wr: len=%d is too big!\n", + msg[0].len); + return -EOPNOTSUPP; + } obuf[0] = msg[0].addr << 1; obuf[1] = msg[0].len; memcpy(obuf + 2, msg[0].buf, msg[0].len); @@ -563,7 +613,14 @@ static int s6x0_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg msg[], default: { if (msg[j].flags == I2C_M_RD) { /* read registers */ - u8 ibuf[msg[j].len]; + u8 ibuf[MAX_XFER_SIZE]; + + if (msg[j].len > sizeof(ibuf)) { + warn("i2c rd: len=%d is too big!\n", + msg[j].len); + return -EOPNOTSUPP; + } + dw210x_op_rw(d->udev, 0x91, 0, 0, ibuf, msg[j].len, DW210X_READ_MSG); @@ -590,7 +647,14 @@ static int s6x0_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg msg[], } while (len > 0); } else if (j < (num - 1)) { /* write register addr before read */ - u8 obuf[msg[j].len + 2]; + u8 obuf[MAX_XFER_SIZE]; + + if (2 + msg[j].len > sizeof(obuf)) { + warn("i2c wr: len=%d is too big!\n", + msg[j].len); + return -EOPNOTSUPP; + } + obuf[0] = msg[j + 1].len; obuf[1] = (msg[j].addr << 1); memcpy(obuf + 2, msg[j].buf, msg[j].len); @@ -602,7 +666,13 @@ static int s6x0_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg msg[], break; } else { /* write registers */ - u8 obuf[msg[j].len + 2]; + u8 obuf[MAX_XFER_SIZE]; + + if (2 + msg[j].len > sizeof(obuf)) { + warn("i2c wr: len=%d is too big!\n", + msg[j].len); + return -EOPNOTSUPP; + } obuf[0] = msg[j].len + 1; obuf[1] = (msg[j].addr << 1); memcpy(obuf + 2, msg[j].buf, msg[j].len); @@ -955,9 +1025,10 @@ static struct ds3000_config dw2104_ds3000_config = { .demod_address = 0x68, }; -static struct ts2020_config dw2104_ts2020_config = { +static struct ts2020_config dw2104_ts2020_config = { .tuner_address = 0x60, .clk_out_div = 1, + .frequency_div = 1060000, }; static struct ds3000_config s660_ds3000_config = { @@ -966,6 +1037,12 @@ static struct ds3000_config s660_ds3000_config = { .set_lock_led = dw210x_led_ctrl, }; +static struct ts2020_config s660_ts2020_config = { + .tuner_address = 0x60, + .clk_out_div = 1, + .frequency_div = 1146000, +}; + static struct stv0900_config dw2104a_stv0900_config = { .demod_address = 0x6a, .demod_mode = 0, @@ -1205,7 +1282,7 @@ static int ds3000_frontend_attach(struct dvb_usb_adapter *d) if (d->fe_adap[0].fe == NULL) return -EIO; - dvb_attach(ts2020_attach, d->fe_adap[0].fe, &dw2104_ts2020_config, + dvb_attach(ts2020_attach, d->fe_adap[0].fe, &s660_ts2020_config, &d->dev->i2c_adap); st->old_set_voltage = d->fe_adap[0].fe->ops.set_voltage; @@ -1213,7 +1290,7 @@ static int ds3000_frontend_attach(struct dvb_usb_adapter *d) dw210x_op_rw(d->dev->udev, 0x8a, 0, 0, obuf, 2, DW210X_WRITE_MSG); - info("Attached ds3000+ds2020!\n"); + info("Attached ds3000+ts2020!\n"); return 0; } diff --git a/drivers/media/usb/em28xx/em28xx-camera.c b/drivers/media/usb/em28xx/em28xx-camera.c index 73cc50afa5e..d666741797d 100644 --- a/drivers/media/usb/em28xx/em28xx-camera.c +++ b/drivers/media/usb/em28xx/em28xx-camera.c @@ -22,6 +22,7 @@ #include <linux/i2c.h> #include <media/soc_camera.h> #include <media/mt9v011.h> +#include <media/v4l2-clk.h> #include <media/v4l2-common.h> #include "em28xx.h" @@ -47,6 +48,7 @@ static struct soc_camera_link camlink = { .bus_id = 0, .flags = 0, .module_name = "em28xx", + .unbalanced_power = true, }; @@ -325,13 +327,24 @@ int em28xx_detect_sensor(struct em28xx *dev) int em28xx_init_camera(struct em28xx *dev) { + char clk_name[V4L2_SUBDEV_NAME_SIZE]; + struct i2c_client *client = &dev->i2c_client[dev->def_i2c_bus]; + struct i2c_adapter *adap = &dev->i2c_adap[dev->def_i2c_bus]; + int ret = 0; + + v4l2_clk_name_i2c(clk_name, sizeof(clk_name), + i2c_adapter_id(adap), client->addr); + dev->clk = v4l2_clk_register_fixed(clk_name, "mclk", -EINVAL); + if (IS_ERR(dev->clk)) + return PTR_ERR(dev->clk); + switch (dev->em28xx_sensor) { case EM28XX_MT9V011: { struct mt9v011_platform_data pdata; struct i2c_board_info mt9v011_info = { .type = "mt9v011", - .addr = dev->i2c_client[dev->def_i2c_bus].addr, + .addr = client->addr, .platform_data = &pdata, }; @@ -352,10 +365,11 @@ int em28xx_init_camera(struct em28xx *dev) dev->sensor_xtal = 4300000; pdata.xtal = dev->sensor_xtal; if (NULL == - v4l2_i2c_new_subdev_board(&dev->v4l2_dev, - &dev->i2c_adap[dev->def_i2c_bus], - &mt9v011_info, NULL)) - return -ENODEV; + v4l2_i2c_new_subdev_board(&dev->v4l2_dev, adap, + &mt9v011_info, NULL)) { + ret = -ENODEV; + break; + } /* probably means GRGB 16 bit bayer */ dev->vinmode = 0x0d; dev->vinctl = 0x00; @@ -391,7 +405,7 @@ int em28xx_init_camera(struct em28xx *dev) struct i2c_board_info ov2640_info = { .type = "ov2640", .flags = I2C_CLIENT_SCCB, - .addr = dev->i2c_client[dev->def_i2c_bus].addr, + .addr = client->addr, .platform_data = &camlink, }; struct v4l2_mbus_framefmt fmt; @@ -408,9 +422,12 @@ int em28xx_init_camera(struct em28xx *dev) dev->sensor_yres = 480; subdev = - v4l2_i2c_new_subdev_board(&dev->v4l2_dev, - &dev->i2c_adap[dev->def_i2c_bus], + v4l2_i2c_new_subdev_board(&dev->v4l2_dev, adap, &ov2640_info, NULL); + if (NULL == subdev) { + ret = -ENODEV; + break; + } fmt.code = V4L2_MBUS_FMT_YUYV8_2X8; fmt.width = 640; @@ -427,8 +444,13 @@ int em28xx_init_camera(struct em28xx *dev) } case EM28XX_NOSENSOR: default: - return -EINVAL; + ret = -EINVAL; } - return 0; + if (ret < 0) { + v4l2_clk_unregister_fixed(dev->clk); + dev->clk = NULL; + } + + return ret; } diff --git a/drivers/media/usb/em28xx/em28xx-cards.c b/drivers/media/usb/em28xx/em28xx-cards.c index dc65742c4bb..a5196697627 100644 --- a/drivers/media/usb/em28xx/em28xx-cards.c +++ b/drivers/media/usb/em28xx/em28xx-cards.c @@ -36,6 +36,7 @@ #include <media/tvaudio.h> #include <media/i2c-addr.h> #include <media/tveeprom.h> +#include <media/v4l2-clk.h> #include <media/v4l2-common.h> #include "em28xx.h" @@ -95,8 +96,8 @@ static struct em28xx_reg_seq default_digital[] = { /* Board Hauppauge WinTV HVR 900 analog */ static struct em28xx_reg_seq hauppauge_wintv_hvr_900_analog[] = { {EM2820_R08_GPIO_CTRL, 0x2d, ~EM_GPIO_4, 10}, - {0x05, 0xff, 0x10, 10}, - { -1, -1, -1, -1}, + { 0x05, 0xff, 0x10, 10}, + { -1, -1, -1, -1}, }; /* Board Hauppauge WinTV HVR 900 digital */ @@ -104,20 +105,20 @@ static struct em28xx_reg_seq hauppauge_wintv_hvr_900_digital[] = { {EM2820_R08_GPIO_CTRL, 0x2e, ~EM_GPIO_4, 10}, {EM2880_R04_GPO, 0x04, 0x0f, 10}, {EM2880_R04_GPO, 0x0c, 0x0f, 10}, - { -1, -1, -1, -1}, + { -1, -1, -1, -1}, }; /* Board Hauppauge WinTV HVR 900 (R2) digital */ static struct em28xx_reg_seq hauppauge_wintv_hvr_900R2_digital[] = { {EM2820_R08_GPIO_CTRL, 0x2e, ~EM_GPIO_4, 10}, {EM2880_R04_GPO, 0x0c, 0x0f, 10}, - { -1, -1, -1, -1}, + { -1, -1, -1, -1}, }; /* Boards - EM2880 MSI DIGIVOX AD and EM2880_BOARD_MSI_DIGIVOX_AD_II */ static struct em28xx_reg_seq em2880_msi_digivox_ad_analog[] = { - {EM2820_R08_GPIO_CTRL, 0x69, ~EM_GPIO_4, 10}, - { -1, -1, -1, -1}, + {EM2820_R08_GPIO_CTRL, 0x69, ~EM_GPIO_4, 10}, + { -1, -1, -1, -1}, }; /* Boards - EM2880 MSI DIGIVOX AD and EM2880_BOARD_MSI_DIGIVOX_AD_II */ @@ -132,7 +133,7 @@ static struct em28xx_reg_seq em2882_kworld_315u_digital[] = { {EM2880_R04_GPO, 0x04, 0xff, 10}, {EM2880_R04_GPO, 0x0c, 0xff, 10}, {EM2820_R08_GPIO_CTRL, 0x7e, 0xff, 10}, - { -1, -1, -1, -1}, + { -1, -1, -1, -1}, }; static struct em28xx_reg_seq em2882_kworld_315u_tuner_gpio[] = { @@ -140,19 +141,19 @@ static struct em28xx_reg_seq em2882_kworld_315u_tuner_gpio[] = { {EM2880_R04_GPO, 0x0c, 0xff, 10}, {EM2880_R04_GPO, 0x08, 0xff, 10}, {EM2880_R04_GPO, 0x0c, 0xff, 10}, - { -1, -1, -1, -1}, + { -1, -1, -1, -1}, }; static struct em28xx_reg_seq kworld_330u_analog[] = { {EM2820_R08_GPIO_CTRL, 0x6d, ~EM_GPIO_4, 10}, {EM2880_R04_GPO, 0x00, 0xff, 10}, - { -1, -1, -1, -1}, + { -1, -1, -1, -1}, }; static struct em28xx_reg_seq kworld_330u_digital[] = { {EM2820_R08_GPIO_CTRL, 0x6e, ~EM_GPIO_4, 10}, {EM2880_R04_GPO, 0x08, 0xff, 10}, - { -1, -1, -1, -1}, + { -1, -1, -1, -1}, }; /* Evga inDtube @@ -170,11 +171,11 @@ static struct em28xx_reg_seq evga_indtube_digital[] = { {EM2820_R08_GPIO_CTRL, 0x7a, 0xff, 1}, {EM2880_R04_GPO, 0x04, 0xff, 10}, {EM2880_R04_GPO, 0x0c, 0xff, 1}, - { -1, -1, -1, -1}, + { -1, -1, -1, -1}, }; /* - * KWorld PlusTV 340U and UB435-Q (ATSC) GPIOs map: + * KWorld PlusTV 340U, UB435-Q and UB435-Q V2 (ATSC) GPIOs map: * EM_GPIO_0 - currently unknown * EM_GPIO_1 - LED disable/enable (1 = off, 0 = on) * EM_GPIO_2 - currently unknown @@ -185,8 +186,8 @@ static struct em28xx_reg_seq evga_indtube_digital[] = { * EM_GPIO_7 - currently unknown */ static struct em28xx_reg_seq kworld_a340_digital[] = { - {EM2820_R08_GPIO_CTRL, 0x6d, ~EM_GPIO_4, 10}, - { -1, -1, -1, -1}, + {EM2820_R08_GPIO_CTRL, 0x6d, ~EM_GPIO_4, 10}, + { -1, -1, -1, -1}, }; /* Pinnacle Hybrid Pro eb1a:2881 */ @@ -205,13 +206,13 @@ static struct em28xx_reg_seq pinnacle_hybrid_pro_digital[] = { static struct em28xx_reg_seq terratec_cinergy_USB_XS_FR_analog[] = { {EM2820_R08_GPIO_CTRL, 0x6d, ~EM_GPIO_4, 10}, {EM2880_R04_GPO, 0x00, 0xff, 10}, - { -1, -1, -1, -1}, + { -1, -1, -1, -1}, }; static struct em28xx_reg_seq terratec_cinergy_USB_XS_FR_digital[] = { {EM2820_R08_GPIO_CTRL, 0x6e, ~EM_GPIO_4, 10}, {EM2880_R04_GPO, 0x08, 0xff, 10}, - { -1, -1, -1, -1}, + { -1, -1, -1, -1}, }; /* eb1a:2868 Reddo DVB-C USB TV Box @@ -225,7 +226,7 @@ static struct em28xx_reg_seq reddo_dvb_c_usb_box[] = { {EM2820_R08_GPIO_CTRL, 0x7f, 0xff, 10}, {EM2820_R08_GPIO_CTRL, 0x6f, 0xff, 10}, {EM2820_R08_GPIO_CTRL, 0xff, 0xff, 10}, - {-1, -1, -1, -1}, + { -1, -1, -1, -1}, }; /* Callback for the most boards */ @@ -233,23 +234,23 @@ static struct em28xx_reg_seq default_tuner_gpio[] = { {EM2820_R08_GPIO_CTRL, EM_GPIO_4, EM_GPIO_4, 10}, {EM2820_R08_GPIO_CTRL, 0, EM_GPIO_4, 10}, {EM2820_R08_GPIO_CTRL, EM_GPIO_4, EM_GPIO_4, 10}, - { -1, -1, -1, -1}, + { -1, -1, -1, -1}, }; /* Mute/unmute */ static struct em28xx_reg_seq compro_unmute_tv_gpio[] = { - {EM2820_R08_GPIO_CTRL, 5, 7, 10}, - { -1, -1, -1, -1}, + {EM2820_R08_GPIO_CTRL, 5, 7, 10}, + { -1, -1, -1, -1}, }; static struct em28xx_reg_seq compro_unmute_svid_gpio[] = { - {EM2820_R08_GPIO_CTRL, 4, 7, 10}, - { -1, -1, -1, -1}, + {EM2820_R08_GPIO_CTRL, 4, 7, 10}, + { -1, -1, -1, -1}, }; static struct em28xx_reg_seq compro_mute_gpio[] = { - {EM2820_R08_GPIO_CTRL, 6, 7, 10}, - { -1, -1, -1, -1}, + {EM2820_R08_GPIO_CTRL, 6, 7, 10}, + { -1, -1, -1, -1}, }; /* Terratec AV350 */ @@ -279,21 +280,21 @@ static struct em28xx_reg_seq vc211a_enable[] = { static struct em28xx_reg_seq dikom_dk300_digital[] = { {EM2820_R08_GPIO_CTRL, 0x6e, ~EM_GPIO_4, 10}, {EM2880_R04_GPO, 0x08, 0xff, 10}, - { -1, -1, -1, -1}, + { -1, -1, -1, -1}, }; /* Reset for the most [digital] boards */ static struct em28xx_reg_seq leadership_digital[] = { {EM2874_R80_GPIO_P0_CTRL, 0x70, 0xff, 10}, - { -1, -1, -1, -1}, + { -1, -1, -1, -1}, }; static struct em28xx_reg_seq leadership_reset[] = { {EM2874_R80_GPIO_P0_CTRL, 0xf0, 0xff, 10}, {EM2874_R80_GPIO_P0_CTRL, 0xb0, 0xff, 10}, {EM2874_R80_GPIO_P0_CTRL, 0xf0, 0xff, 10}, - { -1, -1, -1, -1}, + { -1, -1, -1, -1}, }; /* 2013:024f PCTV nanoStick T2 290e @@ -304,7 +305,7 @@ static struct em28xx_reg_seq pctv_290e[] = { {EM2874_R80_GPIO_P0_CTRL, 0x00, 0xff, 80}, {EM2874_R80_GPIO_P0_CTRL, 0x40, 0xff, 80}, /* GPIO_6 = 1 */ {EM2874_R80_GPIO_P0_CTRL, 0xc0, 0xff, 80}, /* GPIO_7 = 1 */ - {-1, -1, -1, -1}, + { -1, -1, -1, -1}, }; #if 0 @@ -313,14 +314,14 @@ static struct em28xx_reg_seq terratec_h5_gpio[] = { {EM2874_R80_GPIO_P0_CTRL, 0xf6, 0xff, 100}, {EM2874_R80_GPIO_P0_CTRL, 0xf2, 0xff, 50}, {EM2874_R80_GPIO_P0_CTRL, 0xf6, 0xff, 50}, - { -1, -1, -1, -1}, + { -1, -1, -1, -1}, }; static struct em28xx_reg_seq terratec_h5_digital[] = { {EM2874_R80_GPIO_P0_CTRL, 0xf6, 0xff, 10}, {EM2874_R80_GPIO_P0_CTRL, 0xe6, 0xff, 100}, {EM2874_R80_GPIO_P0_CTRL, 0xa6, 0xff, 10}, - { -1, -1, -1, -1}, + { -1, -1, -1, -1}, }; #endif @@ -335,12 +336,12 @@ static struct em28xx_reg_seq terratec_h5_digital[] = { * GPIO_7 - LED (green LED) */ static struct em28xx_reg_seq pctv_460e[] = { - {EM2874_R80_GPIO_P0_CTRL, 0x01, 0xff, 50}, - {0x0d, 0xff, 0xff, 50}, - {EM2874_R80_GPIO_P0_CTRL, 0x41, 0xff, 50}, /* GPIO_6=1 */ - {0x0d, 0x42, 0xff, 50}, - {EM2874_R80_GPIO_P0_CTRL, 0x61, 0xff, 50}, /* GPIO_5=1 */ - { -1, -1, -1, -1}, + {EM2874_R80_GPIO_P0_CTRL, 0x01, 0xff, 50}, + { 0x0d, 0xff, 0xff, 50}, + {EM2874_R80_GPIO_P0_CTRL, 0x41, 0xff, 50}, /* GPIO_6=1 */ + { 0x0d, 0x42, 0xff, 50}, + {EM2874_R80_GPIO_P0_CTRL, 0x61, 0xff, 50}, /* GPIO_5=1 */ + { -1, -1, -1, -1}, }; static struct em28xx_reg_seq c3tech_digital_duo_digital[] = { @@ -352,7 +353,7 @@ static struct em28xx_reg_seq c3tech_digital_duo_digital[] = { {EM2874_R80_GPIO_P0_CTRL, 0xfe, 0xff, 10}, {EM2874_R80_GPIO_P0_CTRL, 0xbe, 0xff, 10}, {EM2874_R80_GPIO_P0_CTRL, 0xfe, 0xff, 20}, - { -1, -1, -1, -1}, + { -1, -1, -1, -1}, }; #if 0 @@ -361,14 +362,14 @@ static struct em28xx_reg_seq hauppauge_930c_gpio[] = { {EM2874_R80_GPIO_P0_CTRL, 0x4f, 0xff, 10}, /* xc5000 reset */ {EM2874_R80_GPIO_P0_CTRL, 0x6f, 0xff, 10}, {EM2874_R80_GPIO_P0_CTRL, 0x4f, 0xff, 10}, - { -1, -1, -1, -1}, + { -1, -1, -1, -1}, }; static struct em28xx_reg_seq hauppauge_930c_digital[] = { {EM2874_R80_GPIO_P0_CTRL, 0xf6, 0xff, 10}, {EM2874_R80_GPIO_P0_CTRL, 0xe6, 0xff, 100}, {EM2874_R80_GPIO_P0_CTRL, 0xa6, 0xff, 10}, - { -1, -1, -1, -1}, + { -1, -1, -1, -1}, }; #endif @@ -378,10 +379,10 @@ static struct em28xx_reg_seq hauppauge_930c_digital[] = { * GPIO_7 - LED, 0=active */ static struct em28xx_reg_seq maxmedia_ub425_tc[] = { - {EM2874_R80_GPIO_P0_CTRL, 0x83, 0xff, 100}, - {EM2874_R80_GPIO_P0_CTRL, 0xc3, 0xff, 100}, /* GPIO_6 = 1 */ - {EM2874_R80_GPIO_P0_CTRL, 0x43, 0xff, 000}, /* GPIO_7 = 0 */ - {-1, -1, -1, -1}, + {EM2874_R80_GPIO_P0_CTRL, 0x83, 0xff, 100}, + {EM2874_R80_GPIO_P0_CTRL, 0xc3, 0xff, 100}, /* GPIO_6 = 1 */ + {EM2874_R80_GPIO_P0_CTRL, 0x43, 0xff, 000}, /* GPIO_7 = 0 */ + { -1, -1, -1, -1}, }; /* 2304:0242 PCTV QuatroStick (510e) @@ -391,10 +392,10 @@ static struct em28xx_reg_seq maxmedia_ub425_tc[] = { * GPIO_7: LED, 1=active */ static struct em28xx_reg_seq pctv_510e[] = { - {EM2874_R80_GPIO_P0_CTRL, 0x10, 0xff, 100}, - {EM2874_R80_GPIO_P0_CTRL, 0x14, 0xff, 100}, /* GPIO_2 = 1 */ - {EM2874_R80_GPIO_P0_CTRL, 0x54, 0xff, 050}, /* GPIO_6 = 1 */ - { -1, -1, -1, -1}, + {EM2874_R80_GPIO_P0_CTRL, 0x10, 0xff, 100}, + {EM2874_R80_GPIO_P0_CTRL, 0x14, 0xff, 100}, /* GPIO_2 = 1 */ + {EM2874_R80_GPIO_P0_CTRL, 0x54, 0xff, 050}, /* GPIO_6 = 1 */ + { -1, -1, -1, -1}, }; /* 2013:0251 PCTV QuatroStick nano (520e) @@ -404,11 +405,11 @@ static struct em28xx_reg_seq pctv_510e[] = { * GPIO_7: LED, 1=active */ static struct em28xx_reg_seq pctv_520e[] = { - {EM2874_R80_GPIO_P0_CTRL, 0x10, 0xff, 100}, - {EM2874_R80_GPIO_P0_CTRL, 0x14, 0xff, 100}, /* GPIO_2 = 1 */ - {EM2874_R80_GPIO_P0_CTRL, 0x54, 0xff, 050}, /* GPIO_6 = 1 */ - {EM2874_R80_GPIO_P0_CTRL, 0xd4, 0xff, 000}, /* GPIO_7 = 1 */ - { -1, -1, -1, -1}, + {EM2874_R80_GPIO_P0_CTRL, 0x10, 0xff, 100}, + {EM2874_R80_GPIO_P0_CTRL, 0x14, 0xff, 100}, /* GPIO_2 = 1 */ + {EM2874_R80_GPIO_P0_CTRL, 0x54, 0xff, 050}, /* GPIO_6 = 1 */ + {EM2874_R80_GPIO_P0_CTRL, 0xd4, 0xff, 000}, /* GPIO_7 = 1 */ + { -1, -1, -1, -1}, }; /* @@ -2030,6 +2031,18 @@ struct em28xx_board em28xx_boards[] = { .i2c_speed = EM28XX_I2C_CLK_WAIT_ENABLE | EM28XX_I2C_FREQ_400_KHZ, }, + /* + * 1b80:e346 KWorld USB ATSC TV Stick UB435-Q V2 + * Empia EM2874B + LG DT3305 + NXP TDA18271HDC2 + */ + [EM2874_BOARD_KWORLD_UB435Q_V2] = { + .name = "KWorld USB ATSC TV Stick UB435-Q V2", + .tuner_type = TUNER_ABSENT, + .has_dvb = 1, + .dvb_gpio = kworld_a340_digital, + .tuner_gpio = default_tuner_gpio, + .def_i2c_bus = 1, + }, }; const unsigned int em28xx_bcount = ARRAY_SIZE(em28xx_boards); @@ -2173,6 +2186,8 @@ struct usb_device_id em28xx_id_table[] = { .driver_info = EM2860_BOARD_GADMEI_UTV330 }, { USB_DEVICE(0x1b80, 0xa340), .driver_info = EM2870_BOARD_KWORLD_A340 }, + { USB_DEVICE(0x1b80, 0xe346), + .driver_info = EM2874_BOARD_KWORLD_UB435Q_V2 }, { USB_DEVICE(0x2013, 0x024f), .driver_info = EM28174_BOARD_PCTV_290E }, { USB_DEVICE(0x2013, 0x024c), @@ -2857,6 +2872,8 @@ void em28xx_release_resources(struct em28xx *dev) if (dev->def_i2c_bus) em28xx_i2c_unregister(dev, 1); em28xx_i2c_unregister(dev, 0); + if (dev->clk) + v4l2_clk_unregister_fixed(dev->clk); v4l2_ctrl_handler_free(&dev->ctrl_handler); diff --git a/drivers/media/usb/em28xx/em28xx-dvb.c b/drivers/media/usb/em28xx/em28xx-dvb.c index bb1e8dca80c..344042bb845 100644 --- a/drivers/media/usb/em28xx/em28xx-dvb.c +++ b/drivers/media/usb/em28xx/em28xx-dvb.c @@ -298,6 +298,18 @@ static struct lgdt3305_config em2870_lgdt3304_dev = { .qam_if_khz = 4000, }; +static struct lgdt3305_config em2874_lgdt3305_dev = { + .i2c_addr = 0x0e, + .demod_chip = LGDT3305, + .spectral_inversion = 1, + .deny_i2c_rptr = 0, + .mpeg_mode = LGDT3305_MPEG_SERIAL, + .tpclk_edge = LGDT3305_TPCLK_FALLING_EDGE, + .tpvalid_polarity = LGDT3305_TP_VALID_HIGH, + .vsb_if_khz = 3250, + .qam_if_khz = 4000, +}; + static struct s921_config sharp_isdbt = { .demod_address = 0x30 >> 1 }; @@ -329,6 +341,11 @@ static struct tda18271_config kworld_a340_config = { .std_map = &kworld_a340_std_map, }; +static struct tda18271_config kworld_ub435q_v2_config = { + .std_map = &kworld_a340_std_map, + .gate = TDA18271_GATE_DIGITAL, +}; + static struct zl10353_config em28xx_zl10353_xc3028_no_i2c_gate = { .demod_address = (0x1e >> 1), .no_tuner = 1, @@ -384,7 +401,10 @@ static struct drxk_config maxmedia_ub425_tc_drxk = { .adr = 0x29, .single_master = 1, .no_i2c_bridge = 1, + .microcode_name = "dvb-demod-drxk-01.fw", + .chunk_size = 62, .load_firmware_sync = true, + .qam_demod_parameter_count = 2, }; static struct drxk_config pctv_520e_drxk = { @@ -424,7 +444,7 @@ static void hauppauge_hvr930c_init(struct em28xx *dev) {EM2874_R80_GPIO_P0_CTRL, 0xff, 0xff, 0x65}, {EM2874_R80_GPIO_P0_CTRL, 0xfb, 0xff, 0x32}, {EM2874_R80_GPIO_P0_CTRL, 0xff, 0xff, 0xb8}, - { -1, -1, -1, -1}, + { -1, -1, -1, -1}, }; struct em28xx_reg_seq hauppauge_hvr930c_end[] = { {EM2874_R80_GPIO_P0_CTRL, 0xef, 0xff, 0x01}, @@ -439,7 +459,7 @@ static void hauppauge_hvr930c_init(struct em28xx *dev) {EM2874_R80_GPIO_P0_CTRL, 0xcf, 0xff, 0x0b}, {EM2874_R80_GPIO_P0_CTRL, 0xef, 0xff, 0x65}, - { -1, -1, -1, -1}, + { -1, -1, -1, -1}, }; struct { @@ -491,13 +511,13 @@ static void terratec_h5_init(struct em28xx *dev) {EM2874_R80_GPIO_P0_CTRL, 0xf6, 0xff, 100}, {EM2874_R80_GPIO_P0_CTRL, 0xf2, 0xff, 50}, {EM2874_R80_GPIO_P0_CTRL, 0xf6, 0xff, 100}, - { -1, -1, -1, -1}, + { -1, -1, -1, -1}, }; struct em28xx_reg_seq terratec_h5_end[] = { {EM2874_R80_GPIO_P0_CTRL, 0xe6, 0xff, 100}, {EM2874_R80_GPIO_P0_CTRL, 0xa6, 0xff, 50}, {EM2874_R80_GPIO_P0_CTRL, 0xe6, 0xff, 100}, - { -1, -1, -1, -1}, + { -1, -1, -1, -1}, }; struct { unsigned char r[4]; @@ -547,12 +567,12 @@ static void terratec_htc_stick_init(struct em28xx *dev) {EM2874_R80_GPIO_P0_CTRL, 0xf6, 0xff, 100}, {EM2874_R80_GPIO_P0_CTRL, 0xe6, 0xff, 50}, {EM2874_R80_GPIO_P0_CTRL, 0xf6, 0xff, 100}, - { -1, -1, -1, -1}, + { -1, -1, -1, -1}, }; struct em28xx_reg_seq terratec_htc_stick_end[] = { {EM2874_R80_GPIO_P0_CTRL, 0xb6, 0xff, 100}, {EM2874_R80_GPIO_P0_CTRL, 0xf6, 0xff, 50}, - { -1, -1, -1, -1}, + { -1, -1, -1, -1}, }; /* @@ -594,13 +614,13 @@ static void terratec_htc_usb_xs_init(struct em28xx *dev) {EM2874_R80_GPIO_P0_CTRL, 0xb2, 0xff, 100}, {EM2874_R80_GPIO_P0_CTRL, 0xb2, 0xff, 50}, {EM2874_R80_GPIO_P0_CTRL, 0xb6, 0xff, 100}, - { -1, -1, -1, -1}, + { -1, -1, -1, -1}, }; struct em28xx_reg_seq terratec_htc_usb_xs_end[] = { {EM2874_R80_GPIO_P0_CTRL, 0xa6, 0xff, 100}, {EM2874_R80_GPIO_P0_CTRL, 0xa6, 0xff, 50}, {EM2874_R80_GPIO_P0_CTRL, 0xe6, 0xff, 100}, - { -1, -1, -1, -1}, + { -1, -1, -1, -1}, }; /* @@ -1227,18 +1247,14 @@ static int em28xx_dvb_init(struct em28xx *dev) dvb->fe[0]->ops.i2c_gate_ctrl = NULL; /* attach tuner */ - if (!dvb_attach(tda18271c2dd_attach, dvb->fe[0], - &dev->i2c_adap[dev->def_i2c_bus], 0x60)) { + if (!dvb_attach(tda18271_attach, dvb->fe[0], 0x60, + &dev->i2c_adap[dev->def_i2c_bus], + &em28xx_cxd2820r_tda18271_config)) { dvb_frontend_detach(dvb->fe[0]); result = -EINVAL; goto out_free; } } - - /* TODO: we need drx-3913k firmware in order to support DVB-T */ - em28xx_info("MaxMedia UB425-TC/Delock 61959: only DVB-C " \ - "supported by that driver version\n"); - break; case EM2884_BOARD_PCTV_510E: case EM2884_BOARD_PCTV_520E: @@ -1297,6 +1313,23 @@ static int em28xx_dvb_init(struct em28xx *dev) goto out_free; } break; + case EM2874_BOARD_KWORLD_UB435Q_V2: + dvb->fe[0] = dvb_attach(lgdt3305_attach, + &em2874_lgdt3305_dev, + &dev->i2c_adap[dev->def_i2c_bus]); + if (!dvb->fe[0]) { + result = -EINVAL; + goto out_free; + } + + /* Attach the demodulator. */ + if (!dvb_attach(tda18271_attach, dvb->fe[0], 0x60, + &dev->i2c_adap[dev->def_i2c_bus], + &kworld_ub435q_v2_config)) { + result = -EINVAL; + goto out_free; + } + break; default: em28xx_errdev("/2: The frontend of your DVB/ATSC card" " isn't supported yet\n"); diff --git a/drivers/media/usb/em28xx/em28xx-video.c b/drivers/media/usb/em28xx/em28xx-video.c index 9d103344f34..fc5d60efd4a 100644 --- a/drivers/media/usb/em28xx/em28xx-video.c +++ b/drivers/media/usb/em28xx/em28xx-video.c @@ -638,7 +638,7 @@ int em28xx_start_analog_streaming(struct vb2_queue *vq, unsigned int count) if (rc) return rc; - if (dev->streaming_users++ == 0) { + if (dev->streaming_users == 0) { /* First active streaming user, so allocate all the URBs */ /* Allocate the USB bandwidth */ @@ -657,7 +657,7 @@ int em28xx_start_analog_streaming(struct vb2_queue *vq, unsigned int count) dev->packet_multiplier, em28xx_urb_data_copy); if (rc < 0) - goto fail; + return rc; /* * djh: it's not clear whether this code is still needed. I'm @@ -675,7 +675,8 @@ int em28xx_start_analog_streaming(struct vb2_queue *vq, unsigned int count) v4l2_device_call_all(&dev->v4l2_dev, 0, tuner, s_frequency, &f); } -fail: + dev->streaming_users++; + return rc; } diff --git a/drivers/media/usb/em28xx/em28xx.h b/drivers/media/usb/em28xx/em28xx.h index 205e9038b1c..f8726ad5d0a 100644 --- a/drivers/media/usb/em28xx/em28xx.h +++ b/drivers/media/usb/em28xx/em28xx.h @@ -131,6 +131,7 @@ #define EM2884_BOARD_TERRATEC_HTC_USB_XS 87 #define EM2884_BOARD_C3TECH_DIGITAL_DUO 88 #define EM2874_BOARD_DELOCK_61959 89 +#define EM2874_BOARD_KWORLD_UB435Q_V2 90 /* Limits minimum and default number of buffers */ #define EM28XX_MIN_BUF 4 @@ -492,6 +493,7 @@ struct em28xx { struct v4l2_device v4l2_dev; struct v4l2_ctrl_handler ctrl_handler; + struct v4l2_clk *clk; struct em28xx_board board; /* Webcam specific fields */ diff --git a/drivers/media/usb/gspca/conex.c b/drivers/media/usb/gspca/conex.c index 38714df31ac..2e15c80d6e3 100644 --- a/drivers/media/usb/gspca/conex.c +++ b/drivers/media/usb/gspca/conex.c @@ -783,7 +783,8 @@ static int sd_start(struct gspca_dev *gspca_dev) struct sd *sd = (struct sd *) gspca_dev; /* create the JPEG header */ - jpeg_define(sd->jpeg_hdr, gspca_dev->height, gspca_dev->width, + jpeg_define(sd->jpeg_hdr, gspca_dev->pixfmt.height, + gspca_dev->pixfmt.width, 0x22); /* JPEG 411 */ jpeg_set_qual(sd->jpeg_hdr, QUALITY); diff --git a/drivers/media/usb/gspca/cpia1.c b/drivers/media/usb/gspca/cpia1.c index 064b53043b1..f23df4a9d8c 100644 --- a/drivers/media/usb/gspca/cpia1.c +++ b/drivers/media/usb/gspca/cpia1.c @@ -1553,9 +1553,9 @@ static int sd_start(struct gspca_dev *gspca_dev) sd->params.format.videoSize = VIDEOSIZE_CIF; sd->params.roi.colEnd = sd->params.roi.colStart + - (gspca_dev->width >> 3); + (gspca_dev->pixfmt.width >> 3); sd->params.roi.rowEnd = sd->params.roi.rowStart + - (gspca_dev->height >> 2); + (gspca_dev->pixfmt.height >> 2); /* And now set the camera to a known state */ ret = do_command(gspca_dev, CPIA_COMMAND_SetGrabMode, diff --git a/drivers/media/usb/gspca/gspca.c b/drivers/media/usb/gspca/gspca.c index 048507b27bb..f3a7ace0fac 100644 --- a/drivers/media/usb/gspca/gspca.c +++ b/drivers/media/usb/gspca/gspca.c @@ -504,8 +504,7 @@ static int frame_alloc(struct gspca_dev *gspca_dev, struct file *file, unsigned int frsz; int i; - i = gspca_dev->curr_mode; - frsz = gspca_dev->cam.cam_mode[i].sizeimage; + frsz = gspca_dev->pixfmt.sizeimage; PDEBUG(D_STREAM, "frame alloc frsz: %d", frsz); frsz = PAGE_ALIGN(frsz); if (count >= GSPCA_MAX_FRAMES) @@ -627,16 +626,14 @@ static struct usb_host_endpoint *alt_xfer(struct usb_host_interface *alt, static u32 which_bandwidth(struct gspca_dev *gspca_dev) { u32 bandwidth; - int i; /* get the (max) image size */ - i = gspca_dev->curr_mode; - bandwidth = gspca_dev->cam.cam_mode[i].sizeimage; + bandwidth = gspca_dev->pixfmt.sizeimage; /* if the image is compressed, estimate its mean size */ if (!gspca_dev->cam.needs_full_bandwidth && - bandwidth < gspca_dev->cam.cam_mode[i].width * - gspca_dev->cam.cam_mode[i].height) + bandwidth < gspca_dev->pixfmt.width * + gspca_dev->pixfmt.height) bandwidth = bandwidth * 3 / 8; /* 0.375 */ /* estimate the frame rate */ @@ -650,7 +647,7 @@ static u32 which_bandwidth(struct gspca_dev *gspca_dev) /* don't hope more than 15 fps with USB 1.1 and * image resolution >= 640x480 */ - if (gspca_dev->width >= 640 + if (gspca_dev->pixfmt.width >= 640 && gspca_dev->dev->speed == USB_SPEED_FULL) bandwidth *= 15; /* 15 fps */ else @@ -982,9 +979,7 @@ static void gspca_set_default_mode(struct gspca_dev *gspca_dev) i = gspca_dev->cam.nmodes - 1; /* take the highest mode */ gspca_dev->curr_mode = i; - gspca_dev->width = gspca_dev->cam.cam_mode[i].width; - gspca_dev->height = gspca_dev->cam.cam_mode[i].height; - gspca_dev->pixfmt = gspca_dev->cam.cam_mode[i].pixelformat; + gspca_dev->pixfmt = gspca_dev->cam.cam_mode[i]; /* does nothing if ctrl_handler == NULL */ v4l2_ctrl_handler_setup(gspca_dev->vdev.ctrl_handler); @@ -1105,10 +1100,8 @@ static int vidioc_g_fmt_vid_cap(struct file *file, void *priv, struct v4l2_format *fmt) { struct gspca_dev *gspca_dev = video_drvdata(file); - int mode; - mode = gspca_dev->curr_mode; - fmt->fmt.pix = gspca_dev->cam.cam_mode[mode]; + fmt->fmt.pix = gspca_dev->pixfmt; /* some drivers use priv internally, zero it before giving it to userspace */ fmt->fmt.pix.priv = 0; @@ -1140,6 +1133,12 @@ static int try_fmt_vid_cap(struct gspca_dev *gspca_dev, mode = mode2; } fmt->fmt.pix = gspca_dev->cam.cam_mode[mode]; + if (gspca_dev->sd_desc->try_fmt) { + /* pass original resolution to subdriver try_fmt */ + fmt->fmt.pix.width = w; + fmt->fmt.pix.height = h; + gspca_dev->sd_desc->try_fmt(gspca_dev, fmt); + } /* some drivers use priv internally, zero it before giving it to userspace */ fmt->fmt.pix.priv = 0; @@ -1178,19 +1177,16 @@ static int vidioc_s_fmt_vid_cap(struct file *file, void *priv, goto out; } - if (ret == gspca_dev->curr_mode) { - ret = 0; - goto out; /* same mode */ - } - if (gspca_dev->streaming) { ret = -EBUSY; goto out; } - gspca_dev->width = fmt->fmt.pix.width; - gspca_dev->height = fmt->fmt.pix.height; - gspca_dev->pixfmt = fmt->fmt.pix.pixelformat; gspca_dev->curr_mode = ret; + if (gspca_dev->sd_desc->try_fmt) + /* subdriver try_fmt can modify format parameters */ + gspca_dev->pixfmt = fmt->fmt.pix; + else + gspca_dev->pixfmt = gspca_dev->cam.cam_mode[ret]; ret = 0; out: @@ -1205,6 +1201,9 @@ static int vidioc_enum_framesizes(struct file *file, void *priv, int i; __u32 index = 0; + if (gspca_dev->sd_desc->enum_framesizes) + return gspca_dev->sd_desc->enum_framesizes(gspca_dev, fsize); + for (i = 0; i < gspca_dev->cam.nmodes; i++) { if (fsize->pixel_format != gspca_dev->cam.cam_mode[i].pixelformat) @@ -1471,8 +1470,9 @@ static int vidioc_streamon(struct file *file, void *priv, if (ret < 0) goto out; } - PDEBUG_MODE(gspca_dev, D_STREAM, "stream on OK", gspca_dev->pixfmt, - gspca_dev->width, gspca_dev->height); + PDEBUG_MODE(gspca_dev, D_STREAM, "stream on OK", + gspca_dev->pixfmt.pixelformat, + gspca_dev->pixfmt.width, gspca_dev->pixfmt.height); ret = 0; out: mutex_unlock(&gspca_dev->queue_lock); diff --git a/drivers/media/usb/gspca/gspca.h b/drivers/media/usb/gspca/gspca.h index ac0b11f46f5..300642dc1a1 100644 --- a/drivers/media/usb/gspca/gspca.h +++ b/drivers/media/usb/gspca/gspca.h @@ -88,6 +88,10 @@ typedef void (*cam_pkt_op) (struct gspca_dev *gspca_dev, typedef int (*cam_int_pkt_op) (struct gspca_dev *gspca_dev, u8 *data, int len); +typedef void (*cam_format_op) (struct gspca_dev *gspca_dev, + struct v4l2_format *fmt); +typedef int (*cam_frmsize_op) (struct gspca_dev *gspca_dev, + struct v4l2_frmsizeenum *fsize); /* subdriver description */ struct sd_desc { @@ -109,6 +113,8 @@ struct sd_desc { cam_set_jpg_op set_jcomp; cam_streamparm_op get_streamparm; cam_streamparm_op set_streamparm; + cam_format_op try_fmt; + cam_frmsize_op enum_framesizes; #ifdef CONFIG_VIDEO_ADV_DEBUG cam_set_reg_op set_register; cam_get_reg_op get_register; @@ -183,9 +189,7 @@ struct gspca_dev { __u8 streaming; /* protected by both mutexes (*) */ __u8 curr_mode; /* current camera mode */ - __u32 pixfmt; /* current mode parameters */ - __u16 width; - __u16 height; + struct v4l2_pix_format pixfmt; /* current mode parameters */ __u32 sequence; /* frame sequence number */ wait_queue_head_t wq; /* wait queue */ diff --git a/drivers/media/usb/gspca/jeilinj.c b/drivers/media/usb/gspca/jeilinj.c index 8da3dde3838..19736e237b3 100644 --- a/drivers/media/usb/gspca/jeilinj.c +++ b/drivers/media/usb/gspca/jeilinj.c @@ -378,11 +378,12 @@ static int sd_start(struct gspca_dev *gspca_dev) struct sd *dev = (struct sd *) gspca_dev; /* create the JPEG header */ - jpeg_define(dev->jpeg_hdr, gspca_dev->height, gspca_dev->width, + jpeg_define(dev->jpeg_hdr, gspca_dev->pixfmt.height, + gspca_dev->pixfmt.width, 0x21); /* JPEG 422 */ jpeg_set_qual(dev->jpeg_hdr, dev->quality); PDEBUG(D_STREAM, "Start streaming at %dx%d", - gspca_dev->height, gspca_dev->width); + gspca_dev->pixfmt.height, gspca_dev->pixfmt.width); jlj_start(gspca_dev); return gspca_dev->usb_err; } diff --git a/drivers/media/usb/gspca/jl2005bcd.c b/drivers/media/usb/gspca/jl2005bcd.c index fdaeeb14453..5b481fa4309 100644 --- a/drivers/media/usb/gspca/jl2005bcd.c +++ b/drivers/media/usb/gspca/jl2005bcd.c @@ -455,7 +455,7 @@ static int sd_start(struct gspca_dev *gspca_dev) struct sd *sd = (struct sd *) gspca_dev; sd->cap_mode = gspca_dev->cam.cam_mode; - switch (gspca_dev->width) { + switch (gspca_dev->pixfmt.width) { case 640: PDEBUG(D_STREAM, "Start streaming at vga resolution"); jl2005c_stream_start_vga_lg(gspca_dev); diff --git a/drivers/media/usb/gspca/m5602/m5602_mt9m111.c b/drivers/media/usb/gspca/m5602/m5602_mt9m111.c index cfa4663f893..27fcef11aef 100644 --- a/drivers/media/usb/gspca/m5602/m5602_mt9m111.c +++ b/drivers/media/usb/gspca/m5602/m5602_mt9m111.c @@ -266,7 +266,7 @@ static int mt9m111_set_hvflip(struct gspca_dev *gspca_dev) return err; data[0] = MT9M111_RMB_OVER_SIZED; - if (gspca_dev->width == 640) { + if (gspca_dev->pixfmt.width == 640) { data[1] = MT9M111_RMB_ROW_SKIP_2X | MT9M111_RMB_COLUMN_SKIP_2X | (hflip << 1) | vflip; diff --git a/drivers/media/usb/gspca/mars.c b/drivers/media/usb/gspca/mars.c index ff2c5abf115..779a8785f42 100644 --- a/drivers/media/usb/gspca/mars.c +++ b/drivers/media/usb/gspca/mars.c @@ -254,7 +254,8 @@ static int sd_start(struct gspca_dev *gspca_dev) int i; /* create the JPEG header */ - jpeg_define(sd->jpeg_hdr, gspca_dev->height, gspca_dev->width, + jpeg_define(sd->jpeg_hdr, gspca_dev->pixfmt.height, + gspca_dev->pixfmt.width, 0x21); /* JPEG 422 */ jpeg_set_qual(sd->jpeg_hdr, QUALITY); @@ -270,8 +271,8 @@ static int sd_start(struct gspca_dev *gspca_dev) data[0] = 0x00; /* address */ data[1] = 0x0c | 0x01; /* reg 0 */ data[2] = 0x01; /* reg 1 */ - data[3] = gspca_dev->width / 8; /* h_size , reg 2 */ - data[4] = gspca_dev->height / 8; /* v_size , reg 3 */ + data[3] = gspca_dev->pixfmt.width / 8; /* h_size , reg 2 */ + data[4] = gspca_dev->pixfmt.height / 8; /* v_size , reg 3 */ data[5] = 0x30; /* reg 4, MI, PAS5101 : * 0x30 for 24mhz , 0x28 for 12mhz */ data[6] = 0x02; /* reg 5, H start - was 0x04 */ diff --git a/drivers/media/usb/gspca/mr97310a.c b/drivers/media/usb/gspca/mr97310a.c index 68bb2f35966..f006e29ca01 100644 --- a/drivers/media/usb/gspca/mr97310a.c +++ b/drivers/media/usb/gspca/mr97310a.c @@ -521,7 +521,7 @@ static int start_cif_cam(struct gspca_dev *gspca_dev) if (sd->sensor_type) data[5] = 0xbb; - switch (gspca_dev->width) { + switch (gspca_dev->pixfmt.width) { case 160: data[9] |= 0x04; /* reg 8, 2:1 scale down from 320 */ /* fall thru */ @@ -618,7 +618,7 @@ static int start_vga_cam(struct gspca_dev *gspca_dev) data[10] = 0x18; } - switch (gspca_dev->width) { + switch (gspca_dev->pixfmt.width) { case 160: data[9] |= 0x0c; /* reg 8, 4:1 scale down */ /* fall thru */ @@ -847,7 +847,7 @@ static void setexposure(struct gspca_dev *gspca_dev, s32 expo, s32 min_clockdiv) u8 clockdiv = (60 * expo + 7999) / 8000; /* Limit framerate to not exceed usb bandwidth */ - if (clockdiv < min_clockdiv && gspca_dev->width >= 320) + if (clockdiv < min_clockdiv && gspca_dev->pixfmt.width >= 320) clockdiv = min_clockdiv; else if (clockdiv < 2) clockdiv = 2; diff --git a/drivers/media/usb/gspca/nw80x.c b/drivers/media/usb/gspca/nw80x.c index 44c9964b1b3..599f755e75b 100644 --- a/drivers/media/usb/gspca/nw80x.c +++ b/drivers/media/usb/gspca/nw80x.c @@ -1708,7 +1708,7 @@ static void setautogain(struct gspca_dev *gspca_dev, s32 val) reg_r(gspca_dev, 0x1004, 1); if (gspca_dev->usb_buf[0] & 0x04) { /* if AE_FULL_FRM */ - sd->ae_res = gspca_dev->width * gspca_dev->height; + sd->ae_res = gspca_dev->pixfmt.width * gspca_dev->pixfmt.height; } else { /* get the AE window size */ reg_r(gspca_dev, 0x1011, 8); w = (gspca_dev->usb_buf[1] << 8) + gspca_dev->usb_buf[0] @@ -1717,7 +1717,8 @@ static void setautogain(struct gspca_dev *gspca_dev, s32 val) - (gspca_dev->usb_buf[7] << 8) - gspca_dev->usb_buf[6]; sd->ae_res = h * w; if (sd->ae_res == 0) - sd->ae_res = gspca_dev->width * gspca_dev->height; + sd->ae_res = gspca_dev->pixfmt.width * + gspca_dev->pixfmt.height; } } @@ -1856,21 +1857,21 @@ static int sd_start(struct gspca_dev *gspca_dev) reg_w_buf(gspca_dev, cmd); switch (sd->webcam) { case P35u: - if (gspca_dev->width == 320) + if (gspca_dev->pixfmt.width == 320) reg_w_buf(gspca_dev, nw801_start_qvga); else reg_w_buf(gspca_dev, nw801_start_vga); reg_w_buf(gspca_dev, nw801_start_2); break; case Kr651us: - if (gspca_dev->width == 320) + if (gspca_dev->pixfmt.width == 320) reg_w_buf(gspca_dev, kr651_start_qvga); else reg_w_buf(gspca_dev, kr651_start_vga); reg_w_buf(gspca_dev, kr651_start_2); break; case Proscope: - if (gspca_dev->width == 320) + if (gspca_dev->pixfmt.width == 320) reg_w_buf(gspca_dev, proscope_start_qvga); else reg_w_buf(gspca_dev, proscope_start_vga); diff --git a/drivers/media/usb/gspca/ov519.c b/drivers/media/usb/gspca/ov519.c index 8937d79fd17..c95f32a0c02 100644 --- a/drivers/media/usb/gspca/ov519.c +++ b/drivers/media/usb/gspca/ov519.c @@ -3468,7 +3468,7 @@ static int sd_isoc_init(struct gspca_dev *gspca_dev) switch (sd->bridge) { case BRIDGE_OVFX2: - if (gspca_dev->width != 800) + if (gspca_dev->pixfmt.width != 800) gspca_dev->cam.bulk_size = OVFX2_BULK_SIZE; else gspca_dev->cam.bulk_size = 7 * 4096; @@ -3507,8 +3507,8 @@ static void ov511_mode_init_regs(struct sd *sd) /* Here I'm assuming that snapshot size == image size. * I hope that's always true. --claudio */ - hsegs = (sd->gspca_dev.width >> 3) - 1; - vsegs = (sd->gspca_dev.height >> 3) - 1; + hsegs = (sd->gspca_dev.pixfmt.width >> 3) - 1; + vsegs = (sd->gspca_dev.pixfmt.height >> 3) - 1; reg_w(sd, R511_CAM_PXCNT, hsegs); reg_w(sd, R511_CAM_LNCNT, vsegs); @@ -3541,7 +3541,7 @@ static void ov511_mode_init_regs(struct sd *sd) case SEN_OV7640: case SEN_OV7648: case SEN_OV76BE: - if (sd->gspca_dev.width == 320) + if (sd->gspca_dev.pixfmt.width == 320) interlaced = 1; /* Fall through */ case SEN_OV6630: @@ -3551,7 +3551,7 @@ static void ov511_mode_init_regs(struct sd *sd) case 30: case 25: /* Not enough bandwidth to do 640x480 @ 30 fps */ - if (sd->gspca_dev.width != 640) { + if (sd->gspca_dev.pixfmt.width != 640) { sd->clockdiv = 0; break; } @@ -3584,7 +3584,8 @@ static void ov511_mode_init_regs(struct sd *sd) /* Check if we have enough bandwidth to disable compression */ fps = (interlaced ? 60 : 30) / (sd->clockdiv + 1) + 1; - needed = fps * sd->gspca_dev.width * sd->gspca_dev.height * 3 / 2; + needed = fps * sd->gspca_dev.pixfmt.width * + sd->gspca_dev.pixfmt.height * 3 / 2; /* 1000 isoc packets/sec */ if (needed > 1000 * packet_size) { /* Enable Y and UV quantization and compression */ @@ -3646,8 +3647,8 @@ static void ov518_mode_init_regs(struct sd *sd) reg_w(sd, 0x38, 0x80); } - hsegs = sd->gspca_dev.width / 16; - vsegs = sd->gspca_dev.height / 4; + hsegs = sd->gspca_dev.pixfmt.width / 16; + vsegs = sd->gspca_dev.pixfmt.height / 4; reg_w(sd, 0x29, hsegs); reg_w(sd, 0x2a, vsegs); @@ -3686,7 +3687,8 @@ static void ov518_mode_init_regs(struct sd *sd) * happened to be with revision < 2 cams using an * OV7620 and revision 2 cams using an OV7620AE. */ - if (sd->revision > 0 && sd->gspca_dev.width == 640) { + if (sd->revision > 0 && + sd->gspca_dev.pixfmt.width == 640) { reg_w(sd, 0x20, 0x60); reg_w(sd, 0x21, 0x1f); } else { @@ -3812,8 +3814,8 @@ static void ov519_mode_init_regs(struct sd *sd) break; } - reg_w(sd, OV519_R10_H_SIZE, sd->gspca_dev.width >> 4); - reg_w(sd, OV519_R11_V_SIZE, sd->gspca_dev.height >> 3); + reg_w(sd, OV519_R10_H_SIZE, sd->gspca_dev.pixfmt.width >> 4); + reg_w(sd, OV519_R11_V_SIZE, sd->gspca_dev.pixfmt.height >> 3); if (sd->sensor == SEN_OV7670 && sd->gspca_dev.cam.cam_mode[sd->gspca_dev.curr_mode].priv) reg_w(sd, OV519_R12_X_OFFSETL, 0x04); @@ -3947,14 +3949,16 @@ static void mode_init_ov_sensor_regs(struct sd *sd) } case SEN_OV3610: if (qvga) { - xstart = (1040 - gspca_dev->width) / 2 + (0x1f << 4); - ystart = (776 - gspca_dev->height) / 2; + xstart = (1040 - gspca_dev->pixfmt.width) / 2 + + (0x1f << 4); + ystart = (776 - gspca_dev->pixfmt.height) / 2; } else { - xstart = (2076 - gspca_dev->width) / 2 + (0x10 << 4); - ystart = (1544 - gspca_dev->height) / 2; + xstart = (2076 - gspca_dev->pixfmt.width) / 2 + + (0x10 << 4); + ystart = (1544 - gspca_dev->pixfmt.height) / 2; } - xend = xstart + gspca_dev->width; - yend = ystart + gspca_dev->height; + xend = xstart + gspca_dev->pixfmt.width; + yend = ystart + gspca_dev->pixfmt.height; /* Writing to the COMH register resets the other windowing regs to their default values, so we must do this first. */ i2c_w_mask(sd, 0x12, qvga ? 0x40 : 0x00, 0xf0); @@ -4229,8 +4233,8 @@ static int sd_start(struct gspca_dev *gspca_dev) struct sd *sd = (struct sd *) gspca_dev; /* Default for most bridges, allow bridge_mode_init_regs to override */ - sd->sensor_width = sd->gspca_dev.width; - sd->sensor_height = sd->gspca_dev.height; + sd->sensor_width = sd->gspca_dev.pixfmt.width; + sd->sensor_height = sd->gspca_dev.pixfmt.height; switch (sd->bridge) { case BRIDGE_OV511: @@ -4345,12 +4349,13 @@ static void ov511_pkt_scan(struct gspca_dev *gspca_dev, ov51x_handle_button(gspca_dev, (in[8] >> 2) & 1); if (in[8] & 0x80) { /* Frame end */ - if ((in[9] + 1) * 8 != gspca_dev->width || - (in[10] + 1) * 8 != gspca_dev->height) { + if ((in[9] + 1) * 8 != gspca_dev->pixfmt.width || + (in[10] + 1) * 8 != gspca_dev->pixfmt.height) { PERR("Invalid frame size, got: %dx%d," " requested: %dx%d\n", (in[9] + 1) * 8, (in[10] + 1) * 8, - gspca_dev->width, gspca_dev->height); + gspca_dev->pixfmt.width, + gspca_dev->pixfmt.height); gspca_dev->last_packet_type = DISCARD_PACKET; return; } @@ -4470,7 +4475,8 @@ static void ovfx2_pkt_scan(struct gspca_dev *gspca_dev, if (sd->first_frame) { sd->first_frame--; if (gspca_dev->image_len < - sd->gspca_dev.width * sd->gspca_dev.height) + sd->gspca_dev.pixfmt.width * + sd->gspca_dev.pixfmt.height) gspca_dev->last_packet_type = DISCARD_PACKET; } gspca_frame_add(gspca_dev, LAST_PACKET, NULL, 0); diff --git a/drivers/media/usb/gspca/ov534.c b/drivers/media/usb/gspca/ov534.c index 03a33c46ca2..90f0d637cd9 100644 --- a/drivers/media/usb/gspca/ov534.c +++ b/drivers/media/usb/gspca/ov534.c @@ -1440,9 +1440,10 @@ static void sd_pkt_scan(struct gspca_dev *gspca_dev, /* If this packet is marked as EOF, end the frame */ } else if (data[1] & UVC_STREAM_EOF) { sd->last_pts = 0; - if (gspca_dev->pixfmt == V4L2_PIX_FMT_YUYV + if (gspca_dev->pixfmt.pixelformat == V4L2_PIX_FMT_YUYV && gspca_dev->image_len + len - 12 != - gspca_dev->width * gspca_dev->height * 2) { + gspca_dev->pixfmt.width * + gspca_dev->pixfmt.height * 2) { PDEBUG(D_PACK, "wrong sized frame"); goto discard; } diff --git a/drivers/media/usb/gspca/ov534_9.c b/drivers/media/usb/gspca/ov534_9.c index c4cd028fe0b..47085cf2d72 100644 --- a/drivers/media/usb/gspca/ov534_9.c +++ b/drivers/media/usb/gspca/ov534_9.c @@ -59,6 +59,7 @@ enum sensors { SENSOR_OV965x, /* ov9657 */ SENSOR_OV971x, /* ov9712 */ SENSOR_OV562x, /* ov5621 */ + SENSOR_OV361x, /* ov3610 */ NSENSORS }; @@ -106,6 +107,274 @@ static const struct v4l2_pix_format ov562x_mode[] = { } }; +enum ov361x { + ov361x_2048 = 0, + ov361x_1600, + ov361x_1024, + ov361x_640, + ov361x_320, + ov361x_160, + ov361x_last +}; + +static const struct v4l2_pix_format ov361x_mode[] = { + {0x800, 0x600, V4L2_PIX_FMT_SBGGR8, V4L2_FIELD_NONE, + .bytesperline = 0x800, + .sizeimage = 0x800 * 0x600, + .colorspace = V4L2_COLORSPACE_SRGB}, + {1600, 1200, V4L2_PIX_FMT_SBGGR8, V4L2_FIELD_NONE, + .bytesperline = 1600, + .sizeimage = 1600 * 1200, + .colorspace = V4L2_COLORSPACE_SRGB}, + {1024, 768, V4L2_PIX_FMT_SBGGR8, V4L2_FIELD_NONE, + .bytesperline = 768, + .sizeimage = 1024 * 768, + .colorspace = V4L2_COLORSPACE_SRGB}, + {640, 480, V4L2_PIX_FMT_SBGGR8, V4L2_FIELD_NONE, + .bytesperline = 640, + .sizeimage = 640 * 480, + .colorspace = V4L2_COLORSPACE_SRGB}, + {320, 240, V4L2_PIX_FMT_SBGGR8, V4L2_FIELD_NONE, + .bytesperline = 320, + .sizeimage = 320 * 240, + .colorspace = V4L2_COLORSPACE_SRGB}, + {160, 120, V4L2_PIX_FMT_SBGGR8, V4L2_FIELD_NONE, + .bytesperline = 160, + .sizeimage = 160 * 120, + .colorspace = V4L2_COLORSPACE_SRGB} +}; + +static const u8 ov361x_start_2048[][2] = { + {0x12, 0x80}, + {0x13, 0xcf}, + {0x14, 0x40}, + {0x15, 0x00}, + {0x01, 0x80}, + {0x02, 0x80}, + {0x04, 0x70}, + {0x0d, 0x40}, + {0x0f, 0x47}, + {0x11, 0x81}, + {0x32, 0x36}, + {0x33, 0x0c}, + {0x34, 0x00}, + {0x35, 0x90}, + {0x12, 0x00}, + {0x17, 0x10}, + {0x18, 0x90}, + {0x19, 0x00}, + {0x1a, 0xc0}, +}; +static const u8 ov361x_bridge_start_2048[][2] = { + {0xf1, 0x60}, + {0x88, 0x00}, + {0x89, 0x08}, + {0x8a, 0x00}, + {0x8b, 0x06}, + {0x8c, 0x01}, + {0x8d, 0x10}, + {0x1c, 0x00}, + {0x1d, 0x48}, + {0x1d, 0x00}, + {0x1d, 0xff}, + {0x1c, 0x0a}, + {0x1d, 0x2e}, + {0x1d, 0x1e}, +}; + +static const u8 ov361x_start_1600[][2] = { + {0x12, 0x80}, + {0x13, 0xcf}, + {0x14, 0x40}, + {0x15, 0x00}, + {0x01, 0x80}, + {0x02, 0x80}, + {0x04, 0x70}, + {0x0d, 0x40}, + {0x0f, 0x47}, + {0x11, 0x81}, + {0x32, 0x36}, + {0x33, 0x0C}, + {0x34, 0x00}, + {0x35, 0x90}, + {0x12, 0x00}, + {0x17, 0x10}, + {0x18, 0x90}, + {0x19, 0x00}, + {0x1a, 0xc0}, +}; +static const u8 ov361x_bridge_start_1600[][2] = { + {0xf1, 0x60}, /* Hsize[7:0] */ + {0x88, 0x00}, /* Hsize[15:8] Write Only, can't read */ + {0x89, 0x08}, /* Vsize[7:0] */ + {0x8a, 0x00}, /* Vsize[15:8] Write Only, can't read */ + {0x8b, 0x06}, /* for Iso */ + {0x8c, 0x01}, /* RAW input */ + {0x8d, 0x10}, + {0x1c, 0x00}, /* RAW output, Iso transfer */ + {0x1d, 0x48}, + {0x1d, 0x00}, + {0x1d, 0xff}, + {0x1c, 0x0a}, /* turn off JPEG, Iso mode */ + {0x1d, 0x2e}, /* for Iso */ + {0x1d, 0x1e}, +}; + +static const u8 ov361x_start_1024[][2] = { + {0x12, 0x80}, + {0x13, 0xcf}, + {0x14, 0x40}, + {0x15, 0x00}, + {0x01, 0x80}, + {0x02, 0x80}, + {0x04, 0x70}, + {0x0d, 0x40}, + {0x0f, 0x47}, + {0x11, 0x81}, + {0x32, 0x36}, + {0x33, 0x0C}, + {0x34, 0x00}, + {0x35, 0x90}, + {0x12, 0x40}, + {0x17, 0x1f}, + {0x18, 0x5f}, + {0x19, 0x00}, + {0x1a, 0x68}, +}; +static const u8 ov361x_bridge_start_1024[][2] = { + {0xf1, 0x60}, /* Hsize[7:0] */ + {0x88, 0x00}, /* Hsize[15:8] Write Only, can't read */ + {0x89, 0x04}, /* Vsize[7:0] */ + {0x8a, 0x00}, /* Vsize[15:8] Write Only, can't read */ + {0x8b, 0x03}, /* for Iso */ + {0x8c, 0x01}, /* RAW input */ + {0x8d, 0x10}, + {0x1c, 0x00}, /* RAW output, Iso transfer */ + {0x1d, 0x48}, + {0x1d, 0x00}, + {0x1d, 0xff}, + {0x1c, 0x0a}, /* turn off JPEG, Iso mode */ + {0x1d, 0x2e}, /* for Iso */ + {0x1d, 0x1e}, +}; + +static const u8 ov361x_start_640[][2] = { + {0x12, 0x80}, + {0x13, 0xcf}, + {0x14, 0x40}, + {0x15, 0x00}, + {0x01, 0x80}, + {0x02, 0x80}, + {0x04, 0x70}, + {0x0d, 0x40}, + {0x0f, 0x47}, + {0x11, 0x81}, + {0x32, 0x36}, + {0x33, 0x0C}, + {0x34, 0x00}, + {0x35, 0x90}, + {0x12, 0x40}, + {0x17, 0x1f}, + {0x18, 0x5f}, + {0x19, 0x00}, + {0x1a, 0x68}, +}; + +static const u8 ov361x_bridge_start_640[][2] = { + {0xf1, 0x60}, /* Hsize[7:0]*/ + {0x88, 0x00}, /* Hsize[15:8] Write Only, can't read */ + {0x89, 0x04}, /* Vsize[7:0] */ + {0x8a, 0x00}, /* Vsize[15:8] Write Only, can't read */ + {0x8b, 0x03}, /* for Iso */ + {0x8c, 0x01}, /* RAW input */ + {0x8d, 0x10}, + {0x1c, 0x00}, /* RAW output, Iso transfer */ + {0x1d, 0x48}, + {0x1d, 0x00}, + {0x1d, 0xff}, + {0x1c, 0x0a}, /* turn off JPEG, Iso mode */ + {0x1d, 0x2e}, /* for Iso */ + {0x1d, 0x1e}, +}; + +static const u8 ov361x_start_320[][2] = { + {0x12, 0x80}, + {0x13, 0xcf}, + {0x14, 0x40}, + {0x15, 0x00}, + {0x01, 0x80}, + {0x02, 0x80}, + {0x04, 0x70}, + {0x0d, 0x40}, + {0x0f, 0x47}, + {0x11, 0x81}, + {0x32, 0x36}, + {0x33, 0x0C}, + {0x34, 0x00}, + {0x35, 0x90}, + {0x12, 0x40}, + {0x17, 0x1f}, + {0x18, 0x5f}, + {0x19, 0x00}, + {0x1a, 0x68}, +}; + +static const u8 ov361x_bridge_start_320[][2] = { + {0xf1, 0x60}, /* Hsize[7:0] */ + {0x88, 0x00}, /* Hsize[15:8] Write Only, can't read */ + {0x89, 0x04}, /* Vsize[7:0] */ + {0x8a, 0x00}, /* Vsize[15:8] Write Only, can't read */ + {0x8b, 0x03}, /* for Iso */ + {0x8c, 0x01}, /* RAW input */ + {0x8d, 0x10}, + {0x1c, 0x00}, /* RAW output, Iso transfer; */ + {0x1d, 0x48}, + {0x1d, 0x00}, + {0x1d, 0xff}, + {0x1c, 0x0a}, /* turn off JPEG, Iso mode */ + {0x1d, 0x2e}, /* for Iso */ + {0x1d, 0x1e}, +}; + +static const u8 ov361x_start_160[][2] = { + {0x12, 0x80}, + {0x13, 0xcf}, + {0x14, 0x40}, + {0x15, 0x00}, + {0x01, 0x80}, + {0x02, 0x80}, + {0x04, 0x70}, + {0x0d, 0x40}, + {0x0f, 0x47}, + {0x11, 0x81}, + {0x32, 0x36}, + {0x33, 0x0C}, + {0x34, 0x00}, + {0x35, 0x90}, + {0x12, 0x40}, + {0x17, 0x1f}, + {0x18, 0x5f}, + {0x19, 0x00}, + {0x1a, 0x68}, +}; + +static const u8 ov361x_bridge_start_160[][2] = { + {0xf1, 0x60}, /* Hsize[7:0] */ + {0x88, 0x00}, /* Hsize[15:8] Write Only, can't read */ + {0x89, 0x04}, /* Vsize[7:0] */ + {0x8a, 0x00}, /* Vsize[15:8] Write Only, can't read */ + {0x8b, 0x03}, /* for Iso */ + {0x8c, 0x01}, /* RAW input */ + {0x8d, 0x10}, + {0x1c, 0x00}, /* RAW output, Iso transfer */ + {0x1d, 0x48}, + {0x1d, 0x00}, + {0x1d, 0xff}, + {0x1c, 0x0a}, /* turn off JPEG, Iso mode */ + {0x1d, 0x2e}, /* for Iso */ + {0x1d, 0x1e}, +}; + static const u8 bridge_init[][2] = { {0x88, 0xf8}, {0x89, 0xff}, @@ -898,7 +1167,7 @@ static int sccb_check_status(struct gspca_dev *gspca_dev) int i; for (i = 0; i < 5; i++) { - msleep(10); + msleep(20); data = reg_r(gspca_dev, OV534_REG_STATUS); switch (data) { @@ -1221,6 +1490,13 @@ static int sd_init(struct gspca_dev *gspca_dev) sccb_w_array(gspca_dev, ov562x_init_2, ARRAY_SIZE(ov562x_init_2)); reg_w(gspca_dev, 0xe0, 0x00); + } else if ((sensor_id & 0xfff0) == 0x3610) { + sd->sensor = SENSOR_OV361x; + gspca_dev->cam.cam_mode = ov361x_mode; + gspca_dev->cam.nmodes = ARRAY_SIZE(ov361x_mode); + reg_w(gspca_dev, 0xe7, 0x3a); + reg_w(gspca_dev, 0xf1, 0x60); + sccb_write(gspca_dev, 0x12, 0x80); } else { pr_err("Unknown sensor %04x", sensor_id); return -EINVAL; @@ -1229,6 +1505,53 @@ static int sd_init(struct gspca_dev *gspca_dev) return gspca_dev->usb_err; } +static int sd_start_ov361x(struct gspca_dev *gspca_dev) +{ + sccb_write(gspca_dev, 0x12, 0x80); + msleep(20); + switch (gspca_dev->curr_mode % (ov361x_last)) { + case ov361x_2048: + reg_w_array(gspca_dev, ov361x_bridge_start_2048, + ARRAY_SIZE(ov361x_bridge_start_2048)); + sccb_w_array(gspca_dev, ov361x_start_2048, + ARRAY_SIZE(ov361x_start_2048)); + break; + case ov361x_1600: + reg_w_array(gspca_dev, ov361x_bridge_start_1600, + ARRAY_SIZE(ov361x_bridge_start_1600)); + sccb_w_array(gspca_dev, ov361x_start_1600, + ARRAY_SIZE(ov361x_start_1600)); + break; + case ov361x_1024: + reg_w_array(gspca_dev, ov361x_bridge_start_1024, + ARRAY_SIZE(ov361x_bridge_start_1024)); + sccb_w_array(gspca_dev, ov361x_start_1024, + ARRAY_SIZE(ov361x_start_1024)); + break; + case ov361x_640: + reg_w_array(gspca_dev, ov361x_bridge_start_640, + ARRAY_SIZE(ov361x_bridge_start_640)); + sccb_w_array(gspca_dev, ov361x_start_640, + ARRAY_SIZE(ov361x_start_640)); + break; + case ov361x_320: + reg_w_array(gspca_dev, ov361x_bridge_start_320, + ARRAY_SIZE(ov361x_bridge_start_320)); + sccb_w_array(gspca_dev, ov361x_start_320, + ARRAY_SIZE(ov361x_start_320)); + break; + case ov361x_160: + reg_w_array(gspca_dev, ov361x_bridge_start_160, + ARRAY_SIZE(ov361x_bridge_start_160)); + sccb_w_array(gspca_dev, ov361x_start_160, + ARRAY_SIZE(ov361x_start_160)); + break; + } + reg_w(gspca_dev, 0xe0, 0x00); /* start transfer */ + + return gspca_dev->usb_err; +} + static int sd_start(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *) gspca_dev; @@ -1237,6 +1560,8 @@ static int sd_start(struct gspca_dev *gspca_dev) return gspca_dev->usb_err; if (sd->sensor == SENSOR_OV562x) return gspca_dev->usb_err; + if (sd->sensor == SENSOR_OV361x) + return sd_start_ov361x(gspca_dev); switch (gspca_dev->curr_mode) { case QVGA_MODE: /* 320x240 */ @@ -1290,6 +1615,11 @@ static int sd_start(struct gspca_dev *gspca_dev) static void sd_stopN(struct gspca_dev *gspca_dev) { + if (((struct sd *)gspca_dev)->sensor == SENSOR_OV361x) { + reg_w(gspca_dev, 0xe0, 0x01); /* stop transfer */ + /* reg_w(gspca_dev, 0x31, 0x09); */ + return; + } reg_w(gspca_dev, 0xe0, 0x01); set_led(gspca_dev, 0); reg_w(gspca_dev, 0xe0, 0x00); @@ -1425,6 +1755,8 @@ static int sd_init_controls(struct gspca_dev *gspca_dev) if (sd->sensor == SENSOR_OV971x) return 0; + if (sd->sensor == SENSOR_OV361x) + return 0; gspca_dev->vdev.ctrl_handler = hdl; v4l2_ctrl_handler_init(hdl, 7); if (sd->sensor == SENSOR_OV562x) { diff --git a/drivers/media/usb/gspca/pac207.c b/drivers/media/usb/gspca/pac207.c index 83519be94e5..cd79c180f67 100644 --- a/drivers/media/usb/gspca/pac207.c +++ b/drivers/media/usb/gspca/pac207.c @@ -299,7 +299,7 @@ static int sd_start(struct gspca_dev *gspca_dev) pac207_write_regs(gspca_dev, 0x0042, pac207_sensor_init[3], 8); /* Compression Balance */ - if (gspca_dev->width == 176) + if (gspca_dev->pixfmt.width == 176) pac207_write_reg(gspca_dev, 0x4a, 0xff); else pac207_write_reg(gspca_dev, 0x4a, 0x30); @@ -317,7 +317,7 @@ static int sd_start(struct gspca_dev *gspca_dev) mode = 0x00; else mode = 0x02; - if (gspca_dev->width == 176) { /* 176x144 */ + if (gspca_dev->pixfmt.width == 176) { /* 176x144 */ mode |= 0x01; PDEBUG(D_STREAM, "pac207_start mode 176x144"); } else { /* 352x288 */ diff --git a/drivers/media/usb/gspca/pac7311.c b/drivers/media/usb/gspca/pac7311.c index 1a5bdc853a8..25f86b1e74a 100644 --- a/drivers/media/usb/gspca/pac7311.c +++ b/drivers/media/usb/gspca/pac7311.c @@ -326,7 +326,7 @@ static void setexposure(struct gspca_dev *gspca_dev, s32 val) * 640x480 mode and page 4 reg 2 <= 3 then it must be 9 */ reg_w(gspca_dev, 0xff, 0x01); - if (gspca_dev->width != 640 && val <= 3) + if (gspca_dev->pixfmt.width != 640 && val <= 3) reg_w(gspca_dev, 0x08, 0x09); else reg_w(gspca_dev, 0x08, 0x08); @@ -337,7 +337,7 @@ static void setexposure(struct gspca_dev *gspca_dev, s32 val) * camera to use higher compression or we may run out of * bandwidth. */ - if (gspca_dev->width == 640 && val == 2) + if (gspca_dev->pixfmt.width == 640 && val == 2) reg_w(gspca_dev, 0x80, 0x01); else reg_w(gspca_dev, 0x80, 0x1c); @@ -615,7 +615,7 @@ static void sd_pkt_scan(struct gspca_dev *gspca_dev, /* Start the new frame with the jpeg header */ pac_start_frame(gspca_dev, - gspca_dev->height, gspca_dev->width); + gspca_dev->pixfmt.height, gspca_dev->pixfmt.width); } gspca_frame_add(gspca_dev, INTER_PACKET, data, len); } diff --git a/drivers/media/usb/gspca/se401.c b/drivers/media/usb/gspca/se401.c index 5f729b8aa2b..5102cea5047 100644 --- a/drivers/media/usb/gspca/se401.c +++ b/drivers/media/usb/gspca/se401.c @@ -354,9 +354,9 @@ static int sd_start(struct gspca_dev *gspca_dev) /* set size + mode */ se401_write_req(gspca_dev, SE401_REQ_SET_WIDTH, - gspca_dev->width * mult, 0); + gspca_dev->pixfmt.width * mult, 0); se401_write_req(gspca_dev, SE401_REQ_SET_HEIGHT, - gspca_dev->height * mult, 0); + gspca_dev->pixfmt.height * mult, 0); /* * HDG: disabled this as it does not seem to do anything * se401_write_req(gspca_dev, SE401_REQ_SET_OUTPUT_MODE, @@ -480,7 +480,7 @@ static void sd_complete_frame(struct gspca_dev *gspca_dev, u8 *data, int len) static void sd_pkt_scan_janggu(struct gspca_dev *gspca_dev, u8 *data, int len) { struct sd *sd = (struct sd *)gspca_dev; - int imagesize = gspca_dev->width * gspca_dev->height; + int imagesize = gspca_dev->pixfmt.width * gspca_dev->pixfmt.height; int i, plen, bits, pixels, info, count; if (sd->restart_stream) diff --git a/drivers/media/usb/gspca/sn9c20x.c b/drivers/media/usb/gspca/sn9c20x.c index f4453d52801..2a38621cf71 100644 --- a/drivers/media/usb/gspca/sn9c20x.c +++ b/drivers/media/usb/gspca/sn9c20x.c @@ -1955,7 +1955,7 @@ static int sd_isoc_init(struct gspca_dev *gspca_dev) return 0; } - switch (gspca_dev->width) { + switch (gspca_dev->pixfmt.width) { case 160: /* 160x120 */ gspca_dev->alt = 2; break; @@ -1985,8 +1985,8 @@ static int sd_start(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *) gspca_dev; int mode = gspca_dev->cam.cam_mode[(int) gspca_dev->curr_mode].priv; - int width = gspca_dev->width; - int height = gspca_dev->height; + int width = gspca_dev->pixfmt.width; + int height = gspca_dev->pixfmt.height; u8 fmt, scale = 0; jpeg_define(sd->jpeg_hdr, height, width, diff --git a/drivers/media/usb/gspca/sonixb.c b/drivers/media/usb/gspca/sonixb.c index d7ff3b9687c..7277dbd2afc 100644 --- a/drivers/media/usb/gspca/sonixb.c +++ b/drivers/media/usb/gspca/sonixb.c @@ -513,10 +513,7 @@ static void i2c_w(struct gspca_dev *gspca_dev, const u8 *buf) if (gspca_dev->usb_buf[0] & 0x04) { if (gspca_dev->usb_buf[0] & 0x08) { dev_err(gspca_dev->v4l2_dev.dev, - "i2c error writing %02x %02x %02x %02x" - " %02x %02x %02x %02x\n", - buf[0], buf[1], buf[2], buf[3], - buf[4], buf[5], buf[6], buf[7]); + "i2c error writing %8ph\n", buf); gspca_dev->usb_err = -EIO; } return; @@ -753,7 +750,7 @@ static void setexposure(struct gspca_dev *gspca_dev) /* In 640x480, if the reg11 has less than 4, the image is unstable (the bridge goes into a higher compression mode which we have not reverse engineered yet). */ - if (gspca_dev->width == 640 && reg11 < 4) + if (gspca_dev->pixfmt.width == 640 && reg11 < 4) reg11 = 4; /* frame exposure time in ms = 1000 * reg11 / 30 -> diff --git a/drivers/media/usb/gspca/sonixj.c b/drivers/media/usb/gspca/sonixj.c index 3b5ccb1c4cd..c69b45d7cfb 100644 --- a/drivers/media/usb/gspca/sonixj.c +++ b/drivers/media/usb/gspca/sonixj.c @@ -2204,7 +2204,8 @@ static int sd_start(struct gspca_dev *gspca_dev) { 0x14, 0xe7, 0x1e, 0xdd }; /* create the JPEG header */ - jpeg_define(sd->jpeg_hdr, gspca_dev->height, gspca_dev->width, + jpeg_define(sd->jpeg_hdr, gspca_dev->pixfmt.height, + gspca_dev->pixfmt.width, 0x21); /* JPEG 422 */ /* initialize the bridge */ diff --git a/drivers/media/usb/gspca/spca1528.c b/drivers/media/usb/gspca/spca1528.c index 688592b289e..f38fd894960 100644 --- a/drivers/media/usb/gspca/spca1528.c +++ b/drivers/media/usb/gspca/spca1528.c @@ -255,7 +255,8 @@ static int sd_start(struct gspca_dev *gspca_dev) struct sd *sd = (struct sd *) gspca_dev; /* initialize the JPEG header */ - jpeg_define(sd->jpeg_hdr, gspca_dev->height, gspca_dev->width, + jpeg_define(sd->jpeg_hdr, gspca_dev->pixfmt.height, + gspca_dev->pixfmt.width, 0x22); /* JPEG 411 */ /* the JPEG quality shall be 85% */ diff --git a/drivers/media/usb/gspca/spca500.c b/drivers/media/usb/gspca/spca500.c index 9f8bf51fd64..f011a309dd6 100644 --- a/drivers/media/usb/gspca/spca500.c +++ b/drivers/media/usb/gspca/spca500.c @@ -608,7 +608,8 @@ static int sd_start(struct gspca_dev *gspca_dev) __u8 xmult, ymult; /* create the JPEG header */ - jpeg_define(sd->jpeg_hdr, gspca_dev->height, gspca_dev->width, + jpeg_define(sd->jpeg_hdr, gspca_dev->pixfmt.height, + gspca_dev->pixfmt.width, 0x22); /* JPEG 411 */ jpeg_set_qual(sd->jpeg_hdr, QUALITY); diff --git a/drivers/media/usb/gspca/sq905c.c b/drivers/media/usb/gspca/sq905c.c index acb19fb9a3d..aa21edc9502 100644 --- a/drivers/media/usb/gspca/sq905c.c +++ b/drivers/media/usb/gspca/sq905c.c @@ -272,7 +272,7 @@ static int sd_start(struct gspca_dev *gspca_dev) dev->cap_mode = gspca_dev->cam.cam_mode; /* "Open the shutter" and set size, to start capture */ - switch (gspca_dev->width) { + switch (gspca_dev->pixfmt.width) { case 640: PDEBUG(D_STREAM, "Start streaming at high resolution"); dev->cap_mode++; diff --git a/drivers/media/usb/gspca/sq930x.c b/drivers/media/usb/gspca/sq930x.c index b10d0821111..e274cf19a3e 100644 --- a/drivers/media/usb/gspca/sq930x.c +++ b/drivers/media/usb/gspca/sq930x.c @@ -906,7 +906,8 @@ static int sd_isoc_init(struct gspca_dev *gspca_dev) gspca_dev->cam.bulk_nurbs = 1; /* there must be one URB only */ sd->do_ctrl = 0; - gspca_dev->cam.bulk_size = gspca_dev->width * gspca_dev->height + 8; + gspca_dev->cam.bulk_size = gspca_dev->pixfmt.width * + gspca_dev->pixfmt.height + 8; return 0; } diff --git a/drivers/media/usb/gspca/stk014.c b/drivers/media/usb/gspca/stk014.c index 8c0982607f2..b0c70fea760 100644 --- a/drivers/media/usb/gspca/stk014.c +++ b/drivers/media/usb/gspca/stk014.c @@ -250,7 +250,8 @@ static int sd_start(struct gspca_dev *gspca_dev) int ret, value; /* create the JPEG header */ - jpeg_define(sd->jpeg_hdr, gspca_dev->height, gspca_dev->width, + jpeg_define(sd->jpeg_hdr, gspca_dev->pixfmt.height, + gspca_dev->pixfmt.width, 0x22); /* JPEG 411 */ jpeg_set_qual(sd->jpeg_hdr, QUALITY); @@ -261,7 +262,7 @@ static int sd_start(struct gspca_dev *gspca_dev) set_par(gspca_dev, 0x00000000); set_par(gspca_dev, 0x8002e001); set_par(gspca_dev, 0x14000000); - if (gspca_dev->width > 320) + if (gspca_dev->pixfmt.width > 320) value = 0x8002e001; /* 640x480 */ else value = 0x4001f000; /* 320x240 */ diff --git a/drivers/media/usb/gspca/stk1135.c b/drivers/media/usb/gspca/stk1135.c index 585868835ac..1fc80af2a18 100644 --- a/drivers/media/usb/gspca/stk1135.c +++ b/drivers/media/usb/gspca/stk1135.c @@ -48,42 +48,11 @@ struct sd { }; static const struct v4l2_pix_format stk1135_modes[] = { - {160, 120, V4L2_PIX_FMT_SBGGR8, V4L2_FIELD_NONE, - .bytesperline = 160, - .sizeimage = 160 * 120, - .colorspace = V4L2_COLORSPACE_SRGB}, - {176, 144, V4L2_PIX_FMT_SBGGR8, V4L2_FIELD_NONE, - .bytesperline = 176, - .sizeimage = 176 * 144, - .colorspace = V4L2_COLORSPACE_SRGB}, - {320, 240, V4L2_PIX_FMT_SBGGR8, V4L2_FIELD_NONE, - .bytesperline = 320, - .sizeimage = 320 * 240, - .colorspace = V4L2_COLORSPACE_SRGB}, - {352, 288, V4L2_PIX_FMT_SBGGR8, V4L2_FIELD_NONE, - .bytesperline = 352, - .sizeimage = 352 * 288, - .colorspace = V4L2_COLORSPACE_SRGB}, + /* default mode (this driver supports variable resolution) */ {640, 480, V4L2_PIX_FMT_SBGGR8, V4L2_FIELD_NONE, .bytesperline = 640, .sizeimage = 640 * 480, .colorspace = V4L2_COLORSPACE_SRGB}, - {720, 576, V4L2_PIX_FMT_SBGGR8, V4L2_FIELD_NONE, - .bytesperline = 720, - .sizeimage = 720 * 576, - .colorspace = V4L2_COLORSPACE_SRGB}, - {800, 600, V4L2_PIX_FMT_SBGGR8, V4L2_FIELD_NONE, - .bytesperline = 800, - .sizeimage = 800 * 600, - .colorspace = V4L2_COLORSPACE_SRGB}, - {1024, 768, V4L2_PIX_FMT_SBGGR8, V4L2_FIELD_NONE, - .bytesperline = 1024, - .sizeimage = 1024 * 768, - .colorspace = V4L2_COLORSPACE_SRGB}, - {1280, 1024, V4L2_PIX_FMT_SBGGR8, V4L2_FIELD_NONE, - .bytesperline = 1280, - .sizeimage = 1280 * 1024, - .colorspace = V4L2_COLORSPACE_SRGB}, }; /* -- read a register -- */ @@ -347,16 +316,16 @@ static void stk1135_configure_mt9m112(struct gspca_dev *gspca_dev) sensor_write(gspca_dev, cfg[i].reg, cfg[i].val); /* set output size */ - width = gspca_dev->cam.cam_mode[gspca_dev->curr_mode].width; - height = gspca_dev->cam.cam_mode[gspca_dev->curr_mode].height; - if (width <= 640) { /* use context A (half readout speed by default) */ + width = gspca_dev->pixfmt.width; + height = gspca_dev->pixfmt.height; + if (width <= 640 && height <= 512) { /* context A (half readout speed)*/ sensor_write(gspca_dev, 0x1a7, width); sensor_write(gspca_dev, 0x1aa, height); /* set read mode context A */ sensor_write(gspca_dev, 0x0c8, 0x0000); /* set resize, read mode, vblank, hblank context A */ sensor_write(gspca_dev, 0x2c8, 0x0000); - } else { /* use context B (full readout speed by default) */ + } else { /* context B (full readout speed) */ sensor_write(gspca_dev, 0x1a1, width); sensor_write(gspca_dev, 0x1a4, height); /* set read mode context B */ @@ -484,8 +453,8 @@ static int sd_start(struct gspca_dev *gspca_dev) reg_w(gspca_dev, STK1135_REG_CISPO + 3, 0x00); /* set capture end position */ - width = gspca_dev->cam.cam_mode[gspca_dev->curr_mode].width; - height = gspca_dev->cam.cam_mode[gspca_dev->curr_mode].height; + width = gspca_dev->pixfmt.width; + height = gspca_dev->pixfmt.height; reg_w(gspca_dev, STK1135_REG_CIEPO + 0, width & 0xff); reg_w(gspca_dev, STK1135_REG_CIEPO + 1, width >> 8); reg_w(gspca_dev, STK1135_REG_CIEPO + 2, height & 0xff); @@ -643,6 +612,35 @@ static int sd_init_controls(struct gspca_dev *gspca_dev) return 0; } +static void stk1135_try_fmt(struct gspca_dev *gspca_dev, struct v4l2_format *fmt) +{ + fmt->fmt.pix.width = clamp(fmt->fmt.pix.width, 32U, 1280U); + fmt->fmt.pix.height = clamp(fmt->fmt.pix.height, 32U, 1024U); + /* round up to even numbers */ + fmt->fmt.pix.width += (fmt->fmt.pix.width & 1); + fmt->fmt.pix.height += (fmt->fmt.pix.height & 1); + + fmt->fmt.pix.bytesperline = fmt->fmt.pix.width; + fmt->fmt.pix.sizeimage = fmt->fmt.pix.width * fmt->fmt.pix.height; +} + +static int stk1135_enum_framesizes(struct gspca_dev *gspca_dev, + struct v4l2_frmsizeenum *fsize) +{ + if (fsize->index != 0 || fsize->pixel_format != V4L2_PIX_FMT_SBGGR8) + return -EINVAL; + + fsize->type = V4L2_FRMSIZE_TYPE_STEPWISE; + fsize->stepwise.min_width = 32; + fsize->stepwise.min_height = 32; + fsize->stepwise.max_width = 1280; + fsize->stepwise.max_height = 1024; + fsize->stepwise.step_width = 2; + fsize->stepwise.step_height = 2; + + return 0; +} + /* sub-driver description */ static const struct sd_desc sd_desc = { .name = MODULE_NAME, @@ -653,6 +651,8 @@ static const struct sd_desc sd_desc = { .stopN = sd_stopN, .pkt_scan = sd_pkt_scan, .dq_callback = stk1135_dq_callback, + .try_fmt = stk1135_try_fmt, + .enum_framesizes = stk1135_enum_framesizes, }; /* -- module initialisation -- */ diff --git a/drivers/media/usb/gspca/stv06xx/stv06xx.c b/drivers/media/usb/gspca/stv06xx/stv06xx.c index 55ee7a61c67..49d209bbf9e 100644 --- a/drivers/media/usb/gspca/stv06xx/stv06xx.c +++ b/drivers/media/usb/gspca/stv06xx/stv06xx.c @@ -452,7 +452,7 @@ frame_data: NULL, 0); if (sd->bridge == BRIDGE_ST6422) - sd->to_skip = gspca_dev->width * 4; + sd->to_skip = gspca_dev->pixfmt.width * 4; if (chunk_len) PERR("Chunk length is " diff --git a/drivers/media/usb/gspca/stv06xx/stv06xx_pb0100.c b/drivers/media/usb/gspca/stv06xx/stv06xx_pb0100.c index 8206b774330..8d785edcccf 100644 --- a/drivers/media/usb/gspca/stv06xx/stv06xx_pb0100.c +++ b/drivers/media/usb/gspca/stv06xx/stv06xx_pb0100.c @@ -421,7 +421,7 @@ static int pb0100_set_autogain_target(struct gspca_dev *gspca_dev, __s32 val) /* Number of pixels counted by the sensor when subsampling the pixels. * Slightly larger than the real value to avoid oscillation */ - totalpixels = gspca_dev->width * gspca_dev->height; + totalpixels = gspca_dev->pixfmt.width * gspca_dev->pixfmt.height; totalpixels = totalpixels/(8*8) + totalpixels/(64*64); brightpixels = (totalpixels * val) >> 8; diff --git a/drivers/media/usb/gspca/sunplus.c b/drivers/media/usb/gspca/sunplus.c index af8767a9bd4..a517d185feb 100644 --- a/drivers/media/usb/gspca/sunplus.c +++ b/drivers/media/usb/gspca/sunplus.c @@ -715,7 +715,8 @@ static int sd_start(struct gspca_dev *gspca_dev) int enable; /* create the JPEG header */ - jpeg_define(sd->jpeg_hdr, gspca_dev->height, gspca_dev->width, + jpeg_define(sd->jpeg_hdr, gspca_dev->pixfmt.height, + gspca_dev->pixfmt.width, 0x22); /* JPEG 411 */ jpeg_set_qual(sd->jpeg_hdr, QUALITY); diff --git a/drivers/media/usb/gspca/topro.c b/drivers/media/usb/gspca/topro.c index 4cb511ccc5f..640c2fe760b 100644 --- a/drivers/media/usb/gspca/topro.c +++ b/drivers/media/usb/gspca/topro.c @@ -3856,7 +3856,7 @@ static void setsharpness(struct gspca_dev *gspca_dev, s32 val) if (sd->bridge == BRIDGE_TP6800) { val |= 0x08; /* grid compensation enable */ - if (gspca_dev->width == 640) + if (gspca_dev->pixfmt.width == 640) reg_w(gspca_dev, TP6800_R78_FORMAT, 0x00); /* vga */ else val |= 0x04; /* scaling down enable */ @@ -3880,7 +3880,7 @@ static void set_resolution(struct gspca_dev *gspca_dev) struct sd *sd = (struct sd *) gspca_dev; reg_w(gspca_dev, TP6800_R21_ENDP_1_CTL, 0x00); - if (gspca_dev->width == 320) { + if (gspca_dev->pixfmt.width == 320) { reg_w(gspca_dev, TP6800_R3F_FRAME_RATE, 0x06); msleep(100); i2c_w(gspca_dev, CX0342_AUTO_ADC_CALIB, 0x01); @@ -3924,7 +3924,7 @@ static int get_fr_idx(struct gspca_dev *gspca_dev) /* 640x480 * 30 fps does not work */ if (i == 6 /* if 30 fps */ - && gspca_dev->width == 640) + && gspca_dev->pixfmt.width == 640) i = 0x05; /* 15 fps */ } else { for (i = 0; i < ARRAY_SIZE(rates_6810) - 1; i++) { @@ -3935,7 +3935,7 @@ static int get_fr_idx(struct gspca_dev *gspca_dev) /* 640x480 * 30 fps does not work */ if (i == 7 /* if 30 fps */ - && gspca_dev->width == 640) + && gspca_dev->pixfmt.width == 640) i = 6; /* 15 fps */ i |= 0x80; /* clock * 1 */ } @@ -4554,7 +4554,8 @@ static int sd_start(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *) gspca_dev; - jpeg_define(sd->jpeg_hdr, gspca_dev->height, gspca_dev->width); + jpeg_define(sd->jpeg_hdr, gspca_dev->pixfmt.height, + gspca_dev->pixfmt.width); set_dqt(gspca_dev, sd->quality); if (sd->bridge == BRIDGE_TP6800) { if (sd->sensor == SENSOR_CX0342) @@ -4737,7 +4738,7 @@ static void sd_dq_callback(struct gspca_dev *gspca_dev) (gspca_dev->usb_buf[26] << 8) + gspca_dev->usb_buf[25] + (gspca_dev->usb_buf[29] << 8) + gspca_dev->usb_buf[28]) / 8; - if (gspca_dev->width == 640) + if (gspca_dev->pixfmt.width == 640) luma /= 4; reg_w(gspca_dev, 0x7d, 0x00); diff --git a/drivers/media/usb/gspca/tv8532.c b/drivers/media/usb/gspca/tv8532.c index 8591324a53e..d497ba38af0 100644 --- a/drivers/media/usb/gspca/tv8532.c +++ b/drivers/media/usb/gspca/tv8532.c @@ -268,7 +268,7 @@ static void sd_pkt_scan(struct gspca_dev *gspca_dev, packet_type0 = packet_type1 = INTER_PACKET; if (gspca_dev->empty_packet) { gspca_dev->empty_packet = 0; - sd->packet = gspca_dev->height / 2; + sd->packet = gspca_dev->pixfmt.height / 2; packet_type0 = FIRST_PACKET; } else if (sd->packet == 0) return; /* 2 more lines in 352x288 ! */ @@ -284,9 +284,10 @@ static void sd_pkt_scan(struct gspca_dev *gspca_dev, * - 4 bytes */ gspca_frame_add(gspca_dev, packet_type0, - data + 2, gspca_dev->width); + data + 2, gspca_dev->pixfmt.width); gspca_frame_add(gspca_dev, packet_type1, - data + gspca_dev->width + 5, gspca_dev->width); + data + gspca_dev->pixfmt.width + 5, + gspca_dev->pixfmt.width); } static int sd_s_ctrl(struct v4l2_ctrl *ctrl) diff --git a/drivers/media/usb/gspca/vicam.c b/drivers/media/usb/gspca/vicam.c index a2275cfe0b8..103f6c4236b 100644 --- a/drivers/media/usb/gspca/vicam.c +++ b/drivers/media/usb/gspca/vicam.c @@ -121,13 +121,13 @@ static int vicam_read_frame(struct gspca_dev *gspca_dev, u8 *data, int size) memset(req_data, 0, 16); req_data[0] = gain; - if (gspca_dev->width == 256) + if (gspca_dev->pixfmt.width == 256) req_data[1] |= 0x01; /* low nibble x-scale */ - if (gspca_dev->height <= 122) { + if (gspca_dev->pixfmt.height <= 122) { req_data[1] |= 0x10; /* high nibble y-scale */ - unscaled_height = gspca_dev->height * 2; + unscaled_height = gspca_dev->pixfmt.height * 2; } else - unscaled_height = gspca_dev->height; + unscaled_height = gspca_dev->pixfmt.height; req_data[2] = 0x90; /* unknown, does not seem to do anything */ if (unscaled_height <= 200) req_data[3] = 0x06; /* vend? */ diff --git a/drivers/media/usb/gspca/w996Xcf.c b/drivers/media/usb/gspca/w996Xcf.c index 2165da0c7ce..fb9fe2ef3a6 100644 --- a/drivers/media/usb/gspca/w996Xcf.c +++ b/drivers/media/usb/gspca/w996Xcf.c @@ -430,11 +430,11 @@ static void w9968cf_set_crop_window(struct sd *sd) #define SC(x) ((x) << 10) /* Scaling factors */ - fw = SC(sd->gspca_dev.width) / max_width; - fh = SC(sd->gspca_dev.height) / max_height; + fw = SC(sd->gspca_dev.pixfmt.width) / max_width; + fh = SC(sd->gspca_dev.pixfmt.height) / max_height; - cw = (fw >= fh) ? max_width : SC(sd->gspca_dev.width) / fh; - ch = (fw >= fh) ? SC(sd->gspca_dev.height) / fw : max_height; + cw = (fw >= fh) ? max_width : SC(sd->gspca_dev.pixfmt.width) / fh; + ch = (fw >= fh) ? SC(sd->gspca_dev.pixfmt.height) / fw : max_height; sd->sensor_width = max_width; sd->sensor_height = max_height; @@ -454,34 +454,34 @@ static void w9968cf_mode_init_regs(struct sd *sd) w9968cf_set_crop_window(sd); - reg_w(sd, 0x14, sd->gspca_dev.width); - reg_w(sd, 0x15, sd->gspca_dev.height); + reg_w(sd, 0x14, sd->gspca_dev.pixfmt.width); + reg_w(sd, 0x15, sd->gspca_dev.pixfmt.height); /* JPEG width & height */ - reg_w(sd, 0x30, sd->gspca_dev.width); - reg_w(sd, 0x31, sd->gspca_dev.height); + reg_w(sd, 0x30, sd->gspca_dev.pixfmt.width); + reg_w(sd, 0x31, sd->gspca_dev.pixfmt.height); /* Y & UV frame buffer strides (in WORD) */ if (w9968cf_vga_mode[sd->gspca_dev.curr_mode].pixelformat == V4L2_PIX_FMT_JPEG) { - reg_w(sd, 0x2c, sd->gspca_dev.width / 2); - reg_w(sd, 0x2d, sd->gspca_dev.width / 4); + reg_w(sd, 0x2c, sd->gspca_dev.pixfmt.width / 2); + reg_w(sd, 0x2d, sd->gspca_dev.pixfmt.width / 4); } else - reg_w(sd, 0x2c, sd->gspca_dev.width); + reg_w(sd, 0x2c, sd->gspca_dev.pixfmt.width); reg_w(sd, 0x00, 0xbf17); /* reset everything */ reg_w(sd, 0x00, 0xbf10); /* normal operation */ /* Transfer size in WORDS (for UYVY format only) */ - val = sd->gspca_dev.width * sd->gspca_dev.height; + val = sd->gspca_dev.pixfmt.width * sd->gspca_dev.pixfmt.height; reg_w(sd, 0x3d, val & 0xffff); /* low bits */ reg_w(sd, 0x3e, val >> 16); /* high bits */ if (w9968cf_vga_mode[sd->gspca_dev.curr_mode].pixelformat == V4L2_PIX_FMT_JPEG) { /* We may get called multiple times (usb isoc bw negotiat.) */ - jpeg_define(sd->jpeg_hdr, sd->gspca_dev.height, - sd->gspca_dev.width, 0x22); /* JPEG 420 */ + jpeg_define(sd->jpeg_hdr, sd->gspca_dev.pixfmt.height, + sd->gspca_dev.pixfmt.width, 0x22); /* JPEG 420 */ jpeg_set_qual(sd->jpeg_hdr, v4l2_ctrl_g_ctrl(sd->jpegqual)); w9968cf_upload_quantizationtables(sd); v4l2_ctrl_grab(sd->jpegqual, true); diff --git a/drivers/media/usb/gspca/xirlink_cit.c b/drivers/media/usb/gspca/xirlink_cit.c index 7eaf64eb867..a41aa7817c5 100644 --- a/drivers/media/usb/gspca/xirlink_cit.c +++ b/drivers/media/usb/gspca/xirlink_cit.c @@ -1471,14 +1471,14 @@ static int cit_get_clock_div(struct gspca_dev *gspca_dev) while (clock_div > 3 && 1000 * packet_size > - gspca_dev->width * gspca_dev->height * + gspca_dev->pixfmt.width * gspca_dev->pixfmt.height * fps[clock_div - 1] * 3 / 2) clock_div--; PDEBUG(D_PROBE, "PacketSize: %d, res: %dx%d -> using clockdiv: %d (%d fps)", - packet_size, gspca_dev->width, gspca_dev->height, clock_div, - fps[clock_div]); + packet_size, gspca_dev->pixfmt.width, gspca_dev->pixfmt.height, + clock_div, fps[clock_div]); return clock_div; } @@ -1502,7 +1502,7 @@ static int cit_start_model0(struct gspca_dev *gspca_dev) cit_write_reg(gspca_dev, 0x0002, 0x0426); cit_write_reg(gspca_dev, 0x0014, 0x0427); - switch (gspca_dev->width) { + switch (gspca_dev->pixfmt.width) { case 160: /* 160x120 */ cit_write_reg(gspca_dev, 0x0004, 0x010b); cit_write_reg(gspca_dev, 0x0001, 0x010a); @@ -1643,7 +1643,7 @@ static int cit_start_model1(struct gspca_dev *gspca_dev) cit_write_reg(gspca_dev, 0x00, 0x0101); cit_write_reg(gspca_dev, 0x00, 0x010a); - switch (gspca_dev->width) { + switch (gspca_dev->pixfmt.width) { case 128: /* 128x96 */ cit_write_reg(gspca_dev, 0x80, 0x0103); cit_write_reg(gspca_dev, 0x60, 0x0105); @@ -1700,7 +1700,7 @@ static int cit_start_model1(struct gspca_dev *gspca_dev) } /* Assorted init */ - switch (gspca_dev->width) { + switch (gspca_dev->pixfmt.width) { case 128: /* 128x96 */ cit_Packet_Format1(gspca_dev, 0x2b, 0x1e); cit_write_reg(gspca_dev, 0xc9, 0x0119); /* Same everywhere */ @@ -1753,7 +1753,7 @@ static int cit_start_model2(struct gspca_dev *gspca_dev) cit_write_reg(gspca_dev, 0x0000, 0x0108); cit_write_reg(gspca_dev, 0x0001, 0x0133); cit_write_reg(gspca_dev, 0x0001, 0x0102); - switch (gspca_dev->width) { + switch (gspca_dev->pixfmt.width) { case 176: /* 176x144 */ cit_write_reg(gspca_dev, 0x002c, 0x0103); /* All except 320x240 */ cit_write_reg(gspca_dev, 0x0000, 0x0104); /* Same */ @@ -1792,7 +1792,7 @@ static int cit_start_model2(struct gspca_dev *gspca_dev) cit_write_reg(gspca_dev, 0x0000, 0x0100); /* LED on */ - switch (gspca_dev->width) { + switch (gspca_dev->pixfmt.width) { case 176: /* 176x144 */ cit_write_reg(gspca_dev, 0x0050, 0x0111); cit_write_reg(gspca_dev, 0x00d0, 0x0111); @@ -1840,7 +1840,7 @@ static int cit_start_model2(struct gspca_dev *gspca_dev) * Magic control of CMOS sensor. Only lower values like * 0-3 work, and picture shifts left or right. Don't change. */ - switch (gspca_dev->width) { + switch (gspca_dev->pixfmt.width) { case 176: /* 176x144 */ cit_model2_Packet1(gspca_dev, 0x0014, 0x0002); cit_model2_Packet1(gspca_dev, 0x0016, 0x0002); /* Horizontal shift */ @@ -1899,7 +1899,7 @@ static int cit_start_model2(struct gspca_dev *gspca_dev) * does not allow arbitrary values and apparently is a bit mask, to * be activated only at appropriate time. Don't change it randomly! */ - switch (gspca_dev->width) { + switch (gspca_dev->pixfmt.width) { case 176: /* 176x144 */ cit_model2_Packet1(gspca_dev, 0x0026, 0x00c2); break; @@ -2023,7 +2023,7 @@ static int cit_start_model3(struct gspca_dev *gspca_dev) cit_model3_Packet1(gspca_dev, 0x009e, 0x0096); cit_model3_Packet1(gspca_dev, 0x009f, 0x000a); - switch (gspca_dev->width) { + switch (gspca_dev->pixfmt.width) { case 160: cit_write_reg(gspca_dev, 0x0000, 0x0101); /* Same on 160x120, 320x240 */ cit_write_reg(gspca_dev, 0x00a0, 0x0103); /* Same on 160x120, 320x240 */ @@ -2134,7 +2134,7 @@ static int cit_start_model3(struct gspca_dev *gspca_dev) like with the IBM netcam pro). */ cit_write_reg(gspca_dev, clock_div, 0x0111); /* Clock Divider */ - switch (gspca_dev->width) { + switch (gspca_dev->pixfmt.width) { case 160: cit_model3_Packet1(gspca_dev, 0x001f, 0x0000); /* Same */ cit_model3_Packet1(gspca_dev, 0x0039, 0x001f); /* Same */ @@ -2211,7 +2211,7 @@ static int cit_start_model4(struct gspca_dev *gspca_dev) cit_write_reg(gspca_dev, 0xfffa, 0x0124); cit_model4_Packet1(gspca_dev, 0x0034, 0x0000); - switch (gspca_dev->width) { + switch (gspca_dev->pixfmt.width) { case 128: /* 128x96 */ cit_write_reg(gspca_dev, 0x0070, 0x0119); cit_write_reg(gspca_dev, 0x00d0, 0x0111); @@ -2531,7 +2531,7 @@ static int cit_start_ibm_netcam_pro(struct gspca_dev *gspca_dev) cit_write_reg(gspca_dev, 0x00fc, 0x012b); /* Same */ cit_write_reg(gspca_dev, 0x0022, 0x012a); /* Same */ - switch (gspca_dev->width) { + switch (gspca_dev->pixfmt.width) { case 160: /* 160x120 */ cit_write_reg(gspca_dev, 0x0024, 0x010b); cit_write_reg(gspca_dev, 0x0089, 0x0119); @@ -2635,7 +2635,7 @@ static int sd_isoc_init(struct gspca_dev *gspca_dev) struct usb_host_interface *alt; int max_packet_size; - switch (gspca_dev->width) { + switch (gspca_dev->pixfmt.width) { case 160: max_packet_size = 450; break; @@ -2659,7 +2659,7 @@ static int sd_isoc_nego(struct gspca_dev *gspca_dev) int ret, packet_size, min_packet_size; struct usb_host_interface *alt; - switch (gspca_dev->width) { + switch (gspca_dev->pixfmt.width) { case 160: min_packet_size = 200; break; @@ -2780,7 +2780,7 @@ static u8 *cit_find_sof(struct gspca_dev *gspca_dev, u8 *data, int len) case CIT_MODEL1: case CIT_MODEL3: case CIT_IBM_NETCAM_PRO: - switch (gspca_dev->width) { + switch (gspca_dev->pixfmt.width) { case 160: /* 160x120 */ byte3 = 0x02; byte4 = 0x0a; @@ -2864,20 +2864,16 @@ static u8 *cit_find_sof(struct gspca_dev *gspca_dev, u8 *data, int len) if (data[i] == 0xff) { if (i >= 4) PDEBUG(D_FRAM, - "header found at offset: %d: %02x %02x 00 %02x %02x %02x\n", + "header found at offset: %d: %02x %02x 00 %3ph\n", i - 1, data[i - 4], data[i - 3], - data[i], - data[i + 1], - data[i + 2]); + &data[i]); else PDEBUG(D_FRAM, - "header found at offset: %d: 00 %02x %02x %02x\n", + "header found at offset: %d: 00 %3ph\n", i - 1, - data[i], - data[i + 1], - data[i + 2]); + &data[i]); return data + i + (sd->sof_len - 1); } break; diff --git a/drivers/media/usb/gspca/zc3xx.c b/drivers/media/usb/gspca/zc3xx.c index cbfc2f92142..7b95d8e88a2 100644 --- a/drivers/media/usb/gspca/zc3xx.c +++ b/drivers/media/usb/gspca/zc3xx.c @@ -6700,7 +6700,8 @@ static int sd_start(struct gspca_dev *gspca_dev) }; /* create the JPEG header */ - jpeg_define(sd->jpeg_hdr, gspca_dev->height, gspca_dev->width, + jpeg_define(sd->jpeg_hdr, gspca_dev->pixfmt.height, + gspca_dev->pixfmt.width, 0x21); /* JPEG 422 */ mode = gspca_dev->cam.cam_mode[gspca_dev->curr_mode].priv; diff --git a/drivers/media/usb/hdpvr/hdpvr-core.c b/drivers/media/usb/hdpvr/hdpvr-core.c index 6e5070774dc..2f0c89cbac7 100644 --- a/drivers/media/usb/hdpvr/hdpvr-core.c +++ b/drivers/media/usb/hdpvr/hdpvr-core.c @@ -78,7 +78,8 @@ void hdpvr_delete(struct hdpvr_device *dev) static void challenge(u8 *bytes) { - u64 *i64P, tmp64; + __le64 *i64P; + u64 tmp64; uint i, idx; for (idx = 0; idx < 32; ++idx) { @@ -106,10 +107,10 @@ static void challenge(u8 *bytes) for (i = 0; i < 3; i++) bytes[1] *= bytes[6] + 1; for (i = 0; i < 3; i++) { - i64P = (u64 *)bytes; + i64P = (__le64 *)bytes; tmp64 = le64_to_cpup(i64P); - tmp64 <<= bytes[7] & 0x0f; - *i64P += cpu_to_le64(tmp64); + tmp64 = tmp64 + (tmp64 << (bytes[7] & 0x0f)); + *i64P = cpu_to_le64(tmp64); } break; } @@ -301,8 +302,6 @@ static int hdpvr_probe(struct usb_interface *interface, goto error; } - dev->workqueue = 0; - /* init video transfer queues first of all */ /* to prevent oops in hdpvr_delete() on error paths */ INIT_LIST_HEAD(&dev->free_buff_list); diff --git a/drivers/media/usb/pvrusb2/pvrusb2-hdw.c b/drivers/media/usb/pvrusb2/pvrusb2-hdw.c index c4d51d78f83..ea05f678b55 100644 --- a/drivers/media/usb/pvrusb2/pvrusb2-hdw.c +++ b/drivers/media/usb/pvrusb2/pvrusb2-hdw.c @@ -2868,7 +2868,7 @@ static void pvr2_subdev_set_control(struct pvr2_hdw *hdw, int id, pvr2_subdev_set_control(hdw, id, #lab, (hdw)->lab##_val); \ } -v4l2_std_id pvr2_hdw_get_detected_std(struct pvr2_hdw *hdw) +static v4l2_std_id pvr2_hdw_get_detected_std(struct pvr2_hdw *hdw) { v4l2_std_id std; std = (v4l2_std_id)hdw->std_mask_avail; diff --git a/drivers/media/usb/siano/smsusb.c b/drivers/media/usb/siano/smsusb.c index 03761c6f472..05bd91a60c0 100644 --- a/drivers/media/usb/siano/smsusb.c +++ b/drivers/media/usb/siano/smsusb.c @@ -209,8 +209,10 @@ static int smsusb_sendrequest(void *context, void *buffer, size_t size) struct sms_msg_hdr *phdr = (struct sms_msg_hdr *) buffer; int dummy; - if (dev->state != SMSUSB_ACTIVE) + if (dev->state != SMSUSB_ACTIVE) { + sms_debug("Device not active yet"); return -ENOENT; + } sms_debug("sending %s(%d) size: %d", smscore_translate_msg(phdr->msg_type), phdr->msg_type, @@ -243,6 +245,9 @@ static int smsusb1_load_firmware(struct usb_device *udev, int id, int board_id) int rc, dummy; char *fw_filename; + if (id < 0) + id = sms_get_board(board_id)->default_mode; + if (id < DEVICE_MODE_DVBT || id > DEVICE_MODE_DVBT_BDA) { sms_err("invalid firmware id specified %d", id); return -EINVAL; @@ -445,14 +450,15 @@ static int smsusb_probe(struct usb_interface *intf, char devpath[32]; int i, rc; - sms_info("interface number %d", + sms_info("board id=%lu, interface number %d", + id->driver_info, intf->cur_altsetting->desc.bInterfaceNumber); if (sms_get_board(id->driver_info)->intf_num != intf->cur_altsetting->desc.bInterfaceNumber) { - sms_err("interface number is %d expecting %d", - sms_get_board(id->driver_info)->intf_num, - intf->cur_altsetting->desc.bInterfaceNumber); + sms_debug("interface %d won't be used. Expecting interface %d to popup", + intf->cur_altsetting->desc.bInterfaceNumber, + sms_get_board(id->driver_info)->intf_num); return -ENODEV; } @@ -483,22 +489,32 @@ static int smsusb_probe(struct usb_interface *intf, } if ((udev->actconfig->desc.bNumInterfaces == 2) && (intf->cur_altsetting->desc.bInterfaceNumber == 0)) { - sms_err("rom interface 0 is not used"); + sms_debug("rom interface 0 is not used"); return -ENODEV; } if (id->driver_info == SMS1XXX_BOARD_SIANO_STELLAR_ROM) { - sms_info("stellar device was found."); + /* Detected a Siano Stellar uninitialized */ + snprintf(devpath, sizeof(devpath), "usb\\%d-%s", udev->bus->busnum, udev->devpath); - sms_info("stellar device was found."); - return smsusb1_load_firmware( + sms_info("stellar device in cold state was found at %s.", devpath); + rc = smsusb1_load_firmware( udev, smscore_registry_getmode(devpath), id->driver_info); + + /* This device will reset and gain another USB ID */ + if (!rc) + sms_info("stellar device now in warm state"); + else + sms_err("Failed to put stellar in warm state. Error: %d", rc); + + return rc; + } else { + rc = smsusb_init_device(intf, id->driver_info); } - rc = smsusb_init_device(intf, id->driver_info); - sms_info("rc %d", rc); + sms_info("Device initialized with return code %d", rc); sms_board_load_modules(id->driver_info); return rc; } @@ -550,10 +566,13 @@ static int smsusb_resume(struct usb_interface *intf) } static const struct usb_device_id smsusb_id_table[] = { + /* This device is only present before firmware load */ { USB_DEVICE(0x187f, 0x0010), - .driver_info = SMS1XXX_BOARD_SIANO_STELLAR }, + .driver_info = SMS1XXX_BOARD_SIANO_STELLAR_ROM }, + /* This device pops up after firmware load */ { USB_DEVICE(0x187f, 0x0100), .driver_info = SMS1XXX_BOARD_SIANO_STELLAR }, + { USB_DEVICE(0x187f, 0x0200), .driver_info = SMS1XXX_BOARD_SIANO_NOVA_A }, { USB_DEVICE(0x187f, 0x0201), diff --git a/drivers/media/usb/tlg2300/pd-main.c b/drivers/media/usb/tlg2300/pd-main.c index 95f94e5aa66..3316caa4733 100644 --- a/drivers/media/usb/tlg2300/pd-main.c +++ b/drivers/media/usb/tlg2300/pd-main.c @@ -232,7 +232,7 @@ static int firmware_download(struct usb_device *udev) goto out; } - max_packet_size = udev->ep_out[0x1]->desc.wMaxPacketSize; + max_packet_size = le16_to_cpu(udev->ep_out[0x1]->desc.wMaxPacketSize); log("\t\t download size : %d", (int)max_packet_size); for (offset = 0; offset < fwlength; offset += max_packet_size) { diff --git a/drivers/media/usb/ttusb-dec/ttusb_dec.c b/drivers/media/usb/ttusb-dec/ttusb_dec.c index e52c3b97f30..29724af9b9a 100644 --- a/drivers/media/usb/ttusb-dec/ttusb_dec.c +++ b/drivers/media/usb/ttusb-dec/ttusb_dec.c @@ -366,7 +366,7 @@ static int ttusb_dec_get_stb_state (struct ttusb_dec *dec, unsigned int *mode, } return 0; } else { - return -1; + return -ENOENT; } } @@ -1241,6 +1241,8 @@ static void ttusb_dec_init_v_pes(struct ttusb_dec *dec) static int ttusb_dec_init_usb(struct ttusb_dec *dec) { + int result; + dprintk("%s\n", __func__); mutex_init(&dec->usb_mutex); @@ -1258,7 +1260,7 @@ static int ttusb_dec_init_usb(struct ttusb_dec *dec) return -ENOMEM; } dec->irq_buffer = usb_alloc_coherent(dec->udev,IRQ_PACKET_SIZE, - GFP_ATOMIC, &dec->irq_dma_handle); + GFP_KERNEL, &dec->irq_dma_handle); if(!dec->irq_buffer) { usb_free_urb(dec->irq_urb); return -ENOMEM; @@ -1270,7 +1272,13 @@ static int ttusb_dec_init_usb(struct ttusb_dec *dec) dec->irq_urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP; } - return ttusb_dec_alloc_iso_urbs(dec); + result = ttusb_dec_alloc_iso_urbs(dec); + if (result) { + usb_free_urb(dec->irq_urb); + usb_free_coherent(dec->udev, IRQ_PACKET_SIZE, + dec->irq_buffer, dec->irq_dma_handle); + } + return result; } static int ttusb_dec_boot_dsp(struct ttusb_dec *dec) @@ -1293,10 +1301,11 @@ static int ttusb_dec_boot_dsp(struct ttusb_dec *dec) dprintk("%s\n", __func__); - if (request_firmware(&fw_entry, dec->firmware_name, &dec->udev->dev)) { + result = request_firmware(&fw_entry, dec->firmware_name, &dec->udev->dev); + if (result) { printk(KERN_ERR "%s: Firmware (%s) unavailable.\n", __func__, dec->firmware_name); - return 1; + return result; } firmware = fw_entry->data; @@ -1306,7 +1315,7 @@ static int ttusb_dec_boot_dsp(struct ttusb_dec *dec) printk("%s: firmware size too small for DSP code (%zu < 60).\n", __func__, firmware_size); release_firmware(fw_entry); - return -1; + return -ENOENT; } /* a 32 bit checksum over the first 56 bytes of the DSP Code is stored @@ -1320,7 +1329,7 @@ static int ttusb_dec_boot_dsp(struct ttusb_dec *dec) "0x%08x != 0x%08x in file), file invalid.\n", __func__, crc32_csum, crc32_check); release_firmware(fw_entry); - return -1; + return -ENOENT; } memcpy(idstring, &firmware[36], 20); idstring[20] = '\0'; @@ -1389,55 +1398,48 @@ static int ttusb_dec_init_stb(struct ttusb_dec *dec) dprintk("%s\n", __func__); result = ttusb_dec_get_stb_state(dec, &mode, &model, &version); + if (result) + return result; - if (!result) { - if (!mode) { - if (version == 0xABCDEFAB) - printk(KERN_INFO "ttusb_dec: no version " - "info in Firmware\n"); - else - printk(KERN_INFO "ttusb_dec: Firmware " - "%x.%02x%c%c\n", - version >> 24, (version >> 16) & 0xff, - (version >> 8) & 0xff, version & 0xff); - - result = ttusb_dec_boot_dsp(dec); - if (result) - return result; - else - return 1; - } else { - /* We can't trust the USB IDs that some firmwares - give the box */ - switch (model) { - case 0x00070001: - case 0x00070008: - case 0x0007000c: - ttusb_dec_set_model(dec, TTUSB_DEC3000S); - break; - case 0x00070009: - case 0x00070013: - ttusb_dec_set_model(dec, TTUSB_DEC2000T); - break; - case 0x00070011: - ttusb_dec_set_model(dec, TTUSB_DEC2540T); - break; - default: - printk(KERN_ERR "%s: unknown model returned " - "by firmware (%08x) - please report\n", - __func__, model); - return -1; - break; - } + if (!mode) { + if (version == 0xABCDEFAB) + printk(KERN_INFO "ttusb_dec: no version " + "info in Firmware\n"); + else + printk(KERN_INFO "ttusb_dec: Firmware " + "%x.%02x%c%c\n", + version >> 24, (version >> 16) & 0xff, + (version >> 8) & 0xff, version & 0xff); + result = ttusb_dec_boot_dsp(dec); + if (result) + return result; + } else { + /* We can't trust the USB IDs that some firmwares + give the box */ + switch (model) { + case 0x00070001: + case 0x00070008: + case 0x0007000c: + ttusb_dec_set_model(dec, TTUSB_DEC3000S); + break; + case 0x00070009: + case 0x00070013: + ttusb_dec_set_model(dec, TTUSB_DEC2000T); + break; + case 0x00070011: + ttusb_dec_set_model(dec, TTUSB_DEC2540T); + break; + default: + printk(KERN_ERR "%s: unknown model returned " + "by firmware (%08x) - please report\n", + __func__, model); + return -ENOENT; + } if (version >= 0x01770000) dec->can_playback = 1; - - return 0; - } } - else - return result; + return 0; } static int ttusb_dec_init_dvb(struct ttusb_dec *dec) @@ -1539,19 +1541,7 @@ static void ttusb_dec_exit_dvb(struct ttusb_dec *dec) static void ttusb_dec_exit_rc(struct ttusb_dec *dec) { - dprintk("%s\n", __func__); - /* we have to check whether the irq URB is already submitted. - * As the irq is submitted after the interface is changed, - * this is the best method i figured out. - * Any others?*/ - if (dec->interface == TTUSB_DEC_INTERFACE_IN) - usb_kill_urb(dec->irq_urb); - - usb_free_urb(dec->irq_urb); - - usb_free_coherent(dec->udev,IRQ_PACKET_SIZE, - dec->irq_buffer, dec->irq_dma_handle); if (dec->rc_input_dev) { input_unregister_device(dec->rc_input_dev); @@ -1566,6 +1556,20 @@ static void ttusb_dec_exit_usb(struct ttusb_dec *dec) dprintk("%s\n", __func__); + if (enable_rc) { + /* we have to check whether the irq URB is already submitted. + * As the irq is submitted after the interface is changed, + * this is the best method i figured out. + * Any others?*/ + if (dec->interface == TTUSB_DEC_INTERFACE_IN) + usb_kill_urb(dec->irq_urb); + + usb_free_urb(dec->irq_urb); + + usb_free_coherent(dec->udev, IRQ_PACKET_SIZE, + dec->irq_buffer, dec->irq_dma_handle); + } + dec->iso_stream_count = 0; for (i = 0; i < ISO_BUF_COUNT; i++) @@ -1623,6 +1627,7 @@ static int ttusb_dec_probe(struct usb_interface *intf, { struct usb_device *udev; struct ttusb_dec *dec; + int result; dprintk("%s\n", __func__); @@ -1651,13 +1656,15 @@ static int ttusb_dec_probe(struct usb_interface *intf, dec->udev = udev; - if (ttusb_dec_init_usb(dec)) - return 0; - if (ttusb_dec_init_stb(dec)) { - ttusb_dec_exit_usb(dec); - return 0; - } - ttusb_dec_init_dvb(dec); + result = ttusb_dec_init_usb(dec); + if (result) + goto err_usb; + result = ttusb_dec_init_stb(dec); + if (result) + goto err_stb; + result = ttusb_dec_init_dvb(dec); + if (result) + goto err_stb; dec->adapter.priv = dec; switch (id->idProduct) { @@ -1696,6 +1703,11 @@ static int ttusb_dec_probe(struct usb_interface *intf, ttusb_init_rc(dec); return 0; +err_stb: + ttusb_dec_exit_usb(dec); +err_usb: + kfree(dec); + return result; } static void ttusb_dec_disconnect(struct usb_interface *intf) diff --git a/drivers/media/usb/uvc/uvc_ctrl.c b/drivers/media/usb/uvc/uvc_ctrl.c index a2f4501c23c..0eb82106d2f 100644 --- a/drivers/media/usb/uvc/uvc_ctrl.c +++ b/drivers/media/usb/uvc/uvc_ctrl.c @@ -664,7 +664,7 @@ static struct uvc_control_mapping uvc_ctrl_mappings[] = { .size = 32, .offset = 0, .v4l2_type = V4L2_CTRL_TYPE_INTEGER, - .data_type = UVC_CTRL_DATA_TYPE_UNSIGNED, + .data_type = UVC_CTRL_DATA_TYPE_SIGNED, }, { .id = V4L2_CID_TILT_ABSOLUTE, @@ -674,7 +674,7 @@ static struct uvc_control_mapping uvc_ctrl_mappings[] = { .size = 32, .offset = 32, .v4l2_type = V4L2_CTRL_TYPE_INTEGER, - .data_type = UVC_CTRL_DATA_TYPE_UNSIGNED, + .data_type = UVC_CTRL_DATA_TYPE_SIGNED, }, { .id = V4L2_CID_PRIVACY, diff --git a/drivers/media/usb/uvc/uvc_video.c b/drivers/media/usb/uvc/uvc_video.c index 3394c343201..899cb6d1c4a 100644 --- a/drivers/media/usb/uvc/uvc_video.c +++ b/drivers/media/usb/uvc/uvc_video.c @@ -680,7 +680,8 @@ void uvc_video_clock_update(struct uvc_streaming *stream, stream->dev->name, sof >> 16, div_u64(((u64)sof & 0xffff) * 1000000LLU, 65536), y, ts.tv_sec, ts.tv_nsec / NSEC_PER_USEC, - v4l2_buf->timestamp.tv_sec, v4l2_buf->timestamp.tv_usec, + v4l2_buf->timestamp.tv_sec, + (unsigned long)v4l2_buf->timestamp.tv_usec, x1, first->host_sof, first->dev_sof, x2, last->host_sof, last->dev_sof, y1, y2); diff --git a/drivers/media/v4l2-core/tuner-core.c b/drivers/media/v4l2-core/tuner-core.c index ddc9379eb27..20c09229a08 100644 --- a/drivers/media/v4l2-core/tuner-core.c +++ b/drivers/media/v4l2-core/tuner-core.c @@ -43,7 +43,7 @@ #define UNSET (-1U) -#define PREFIX (t->i2c->driver->driver.name) +#define PREFIX (t->i2c->dev.driver->name) /* * Driver modprobe parameters @@ -247,7 +247,7 @@ static const struct analog_demod_ops tuner_analog_ops = { /** * set_type - Sets the tuner type for a given device * - * @c: i2c_client descriptoy + * @c: i2c_client descriptor * @type: type of the tuner (e. g. tuner number) * @new_mode_mask: Indicates if tuner supports TV and/or Radio * @new_config: an optional parameter used by a few tuners to adjust @@ -452,7 +452,7 @@ static void set_type(struct i2c_client *c, unsigned int type, } tuner_dbg("%s %s I2C addr 0x%02x with type %d used for 0x%02x\n", - c->adapter->name, c->driver->driver.name, c->addr << 1, type, + c->adapter->name, c->dev.driver->name, c->addr << 1, type, t->mode_mask); return; @@ -556,7 +556,7 @@ static void tuner_lookup(struct i2c_adapter *adap, int mode_mask; if (pos->i2c->adapter != adap || - strcmp(pos->i2c->driver->driver.name, "tuner")) + strcmp(pos->i2c->dev.driver->name, "tuner")) continue; mode_mask = pos->mode_mask; diff --git a/drivers/media/v4l2-core/v4l2-async.c b/drivers/media/v4l2-core/v4l2-async.c index c85d69da35b..85a6a34128a 100644 --- a/drivers/media/v4l2-core/v4l2-async.c +++ b/drivers/media/v4l2-core/v4l2-async.c @@ -189,30 +189,53 @@ void v4l2_async_notifier_unregister(struct v4l2_async_notifier *notifier) struct v4l2_subdev *sd, *tmp; unsigned int notif_n_subdev = notifier->num_subdevs; unsigned int n_subdev = min(notif_n_subdev, V4L2_MAX_SUBDEVS); - struct device *dev[n_subdev]; + struct device **dev; int i = 0; if (!notifier->v4l2_dev) return; + dev = kmalloc(n_subdev * sizeof(*dev), GFP_KERNEL); + if (!dev) { + dev_err(notifier->v4l2_dev->dev, + "Failed to allocate device cache!\n"); + } + mutex_lock(&list_lock); list_del(¬ifier->list); list_for_each_entry_safe(sd, tmp, ¬ifier->done, async_list) { - dev[i] = get_device(sd->dev); + struct device *d; + + d = get_device(sd->dev); v4l2_async_cleanup(sd); /* If we handled USB devices, we'd have to lock the parent too */ - device_release_driver(dev[i++]); + device_release_driver(d); if (notifier->unbind) notifier->unbind(notifier, sd, sd->asd); + + /* + * Store device at the device cache, in order to call + * put_device() on the final step + */ + if (dev) + dev[i++] = d; + else + put_device(d); } mutex_unlock(&list_lock); + /* + * Call device_attach() to reprobe devices + * + * NOTE: If dev allocation fails, i is 0, and the whole loop won't be + * executed. + */ while (i--) { struct device *d = dev[i]; @@ -228,6 +251,7 @@ void v4l2_async_notifier_unregister(struct v4l2_async_notifier *notifier) } put_device(d); } + kfree(dev); notifier->v4l2_dev = NULL; diff --git a/drivers/media/v4l2-core/v4l2-clk.c b/drivers/media/v4l2-core/v4l2-clk.c index b67de8642b5..e18cc0469cf 100644 --- a/drivers/media/v4l2-core/v4l2-clk.c +++ b/drivers/media/v4l2-core/v4l2-clk.c @@ -240,3 +240,42 @@ void v4l2_clk_unregister(struct v4l2_clk *clk) kfree(clk); } EXPORT_SYMBOL(v4l2_clk_unregister); + +struct v4l2_clk_fixed { + unsigned long rate; + struct v4l2_clk_ops ops; +}; + +static unsigned long fixed_get_rate(struct v4l2_clk *clk) +{ + struct v4l2_clk_fixed *priv = clk->priv; + return priv->rate; +} + +struct v4l2_clk *__v4l2_clk_register_fixed(const char *dev_id, + const char *id, unsigned long rate, struct module *owner) +{ + struct v4l2_clk *clk; + struct v4l2_clk_fixed *priv = kzalloc(sizeof(*priv), GFP_KERNEL); + + if (!priv) + return ERR_PTR(-ENOMEM); + + priv->rate = rate; + priv->ops.get_rate = fixed_get_rate; + priv->ops.owner = owner; + + clk = v4l2_clk_register(&priv->ops, dev_id, id, priv); + if (IS_ERR(clk)) + kfree(priv); + + return clk; +} +EXPORT_SYMBOL(__v4l2_clk_register_fixed); + +void v4l2_clk_unregister_fixed(struct v4l2_clk *clk) +{ + kfree(clk->priv); + v4l2_clk_unregister(clk); +} +EXPORT_SYMBOL(v4l2_clk_unregister_fixed); diff --git a/drivers/media/v4l2-core/v4l2-common.c b/drivers/media/v4l2-core/v4l2-common.c index 037d7a55aa8..433d6d77942 100644 --- a/drivers/media/v4l2-core/v4l2-common.c +++ b/drivers/media/v4l2-core/v4l2-common.c @@ -236,14 +236,14 @@ void v4l2_i2c_subdev_init(struct v4l2_subdev *sd, struct i2c_client *client, v4l2_subdev_init(sd, ops); sd->flags |= V4L2_SUBDEV_FL_IS_I2C; /* the owner is the same as the i2c_client's driver owner */ - sd->owner = client->driver->driver.owner; + sd->owner = client->dev.driver->owner; sd->dev = &client->dev; /* i2c_client and v4l2_subdev point to one another */ v4l2_set_subdevdata(sd, client); i2c_set_clientdata(client, sd); /* initialize name */ snprintf(sd->name, sizeof(sd->name), "%s %d-%04x", - client->driver->driver.name, i2c_adapter_id(client->adapter), + client->dev.driver->name, i2c_adapter_id(client->adapter), client->addr); } EXPORT_SYMBOL_GPL(v4l2_i2c_subdev_init); @@ -274,11 +274,11 @@ struct v4l2_subdev *v4l2_i2c_new_subdev_board(struct v4l2_device *v4l2_dev, loaded. This delay-load mechanism doesn't work if other drivers want to use the i2c device, so explicitly loading the module is the best alternative. */ - if (client == NULL || client->driver == NULL) + if (client == NULL || client->dev.driver == NULL) goto error; /* Lock the module so we can safely get the v4l2_subdev pointer */ - if (!try_module_get(client->driver->driver.owner)) + if (!try_module_get(client->dev.driver->owner)) goto error; sd = i2c_get_clientdata(client); @@ -287,7 +287,7 @@ struct v4l2_subdev *v4l2_i2c_new_subdev_board(struct v4l2_device *v4l2_dev, if (v4l2_device_register_subdev(v4l2_dev, sd)) sd = NULL; /* Decrease the module use count to match the first try_module_get. */ - module_put(client->driver->driver.owner); + module_put(client->dev.driver->owner); error: /* If we have a client but no subdev, then something went wrong and diff --git a/drivers/media/v4l2-core/v4l2-ctrls.c b/drivers/media/v4l2-core/v4l2-ctrls.c index c3f08038868..60dcc0f3b32 100644 --- a/drivers/media/v4l2-core/v4l2-ctrls.c +++ b/drivers/media/v4l2-core/v4l2-ctrls.c @@ -565,13 +565,13 @@ EXPORT_SYMBOL(v4l2_ctrl_get_menu); * Returns NULL or an s64 type array containing the menu for given * control ID. The total number of the menu items is returned in @len. */ -const s64 const *v4l2_ctrl_get_int_menu(u32 id, u32 *len) +const s64 *v4l2_ctrl_get_int_menu(u32 id, u32 *len) { - static const s64 const qmenu_int_vpx_num_partitions[] = { + static const s64 qmenu_int_vpx_num_partitions[] = { 1, 2, 4, 8, }; - static const s64 const qmenu_int_vpx_num_ref_frames[] = { + static const s64 qmenu_int_vpx_num_ref_frames[] = { 1, 2, 3, }; @@ -583,7 +583,7 @@ const s64 const *v4l2_ctrl_get_int_menu(u32 id, u32 *len) default: *len = 0; return NULL; - }; + } } EXPORT_SYMBOL(v4l2_ctrl_get_int_menu); diff --git a/drivers/media/v4l2-core/v4l2-mem2mem.c b/drivers/media/v4l2-core/v4l2-mem2mem.c index 7c437128821..73035ee0f4d 100644 --- a/drivers/media/v4l2-core/v4l2-mem2mem.c +++ b/drivers/media/v4l2-core/v4l2-mem2mem.c @@ -41,6 +41,8 @@ module_param(debug, bool, 0644); #define TRANS_QUEUED (1 << 0) /* Instance is currently running in hardware */ #define TRANS_RUNNING (1 << 1) +/* Instance is currently aborting */ +#define TRANS_ABORT (1 << 2) /* Offset base for buffers on the destination queue - used to distinguish @@ -221,6 +223,14 @@ static void v4l2_m2m_try_schedule(struct v4l2_m2m_ctx *m2m_ctx) } spin_lock_irqsave(&m2m_dev->job_spinlock, flags_job); + + /* If the context is aborted then don't schedule it */ + if (m2m_ctx->job_flags & TRANS_ABORT) { + spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags_job); + dprintk("Aborted context\n"); + return; + } + if (m2m_ctx->job_flags & TRANS_QUEUED) { spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags_job); dprintk("On job queue already\n"); @@ -280,6 +290,8 @@ static void v4l2_m2m_cancel_job(struct v4l2_m2m_ctx *m2m_ctx) m2m_dev = m2m_ctx->m2m_dev; spin_lock_irqsave(&m2m_dev->job_spinlock, flags); + + m2m_ctx->job_flags |= TRANS_ABORT; if (m2m_ctx->job_flags & TRANS_RUNNING) { spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags); m2m_dev->m2m_ops->job_abort(m2m_ctx->priv); @@ -480,13 +492,15 @@ int v4l2_m2m_streamoff(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, m2m_dev = m2m_ctx->m2m_dev; spin_lock_irqsave(&m2m_dev->job_spinlock, flags_job); /* We should not be scheduled anymore, since we're dropping a queue. */ - INIT_LIST_HEAD(&m2m_ctx->queue); + if (m2m_ctx->job_flags & TRANS_QUEUED) + list_del(&m2m_ctx->queue); m2m_ctx->job_flags = 0; spin_lock_irqsave(&q_ctx->rdy_spinlock, flags); /* Drop queue, since streamoff returns device to the same state as after * calling reqbufs. */ INIT_LIST_HEAD(&q_ctx->rdy_queue); + q_ctx->num_rdy = 0; spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags); if (m2m_dev->curr_ctx == m2m_ctx) { diff --git a/drivers/media/v4l2-core/videobuf2-core.c b/drivers/media/v4l2-core/videobuf2-core.c index de0e87f0b2c..b19b306c8f7 100644 --- a/drivers/media/v4l2-core/videobuf2-core.c +++ b/drivers/media/v4l2-core/videobuf2-core.c @@ -241,7 +241,8 @@ static int __vb2_queue_alloc(struct vb2_queue *q, enum v4l2_memory memory, q->bufs[q->num_buffers + buffer] = vb; } - __setup_offsets(q, buffer); + if (memory == V4L2_MEMORY_MMAP) + __setup_offsets(q, buffer); dprintk(1, "Allocated %d buffers, %d plane(s) each\n", buffer, num_planes); @@ -1015,6 +1016,10 @@ static int __qbuf_userptr(struct vb2_buffer *vb, const struct v4l2_buffer *b) /* Check if the provided plane buffer is large enough */ if (planes[plane].length < q->plane_sizes[plane]) { + dprintk(1, "qbuf: provided buffer size %u is less than " + "setup size %u for plane %d\n", + planes[plane].length, + q->plane_sizes[plane], plane); ret = -EINVAL; goto err; } @@ -1205,8 +1210,11 @@ static int __buf_prepare(struct vb2_buffer *vb, const struct v4l2_buffer *b) int ret; ret = __verify_length(vb, b); - if (ret < 0) + if (ret < 0) { + dprintk(1, "%s(): plane parameters verification failed: %d\n", + __func__, ret); return ret; + } switch (q->memory) { case V4L2_MEMORY_MMAP: @@ -2469,10 +2477,11 @@ size_t vb2_read(struct vb2_queue *q, char __user *data, size_t count, } EXPORT_SYMBOL_GPL(vb2_read); -size_t vb2_write(struct vb2_queue *q, char __user *data, size_t count, +size_t vb2_write(struct vb2_queue *q, const char __user *data, size_t count, loff_t *ppos, int nonblocking) { - return __vb2_perform_fileio(q, data, count, ppos, nonblocking, 0); + return __vb2_perform_fileio(q, (char __user *) data, count, + ppos, nonblocking, 0); } EXPORT_SYMBOL_GPL(vb2_write); @@ -2633,7 +2642,7 @@ int vb2_fop_release(struct file *file) } EXPORT_SYMBOL_GPL(vb2_fop_release); -ssize_t vb2_fop_write(struct file *file, char __user *buf, +ssize_t vb2_fop_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos) { struct video_device *vdev = video_devdata(file); diff --git a/drivers/media/v4l2-core/videobuf2-dma-sg.c b/drivers/media/v4l2-core/videobuf2-dma-sg.c index 16ae3dcc7e2..2f860543912 100644 --- a/drivers/media/v4l2-core/videobuf2-dma-sg.c +++ b/drivers/media/v4l2-core/videobuf2-dma-sg.c @@ -35,17 +35,61 @@ struct vb2_dma_sg_buf { struct page **pages; int write; int offset; - struct vb2_dma_sg_desc sg_desc; + struct sg_table sg_table; + size_t size; + unsigned int num_pages; atomic_t refcount; struct vb2_vmarea_handler handler; }; static void vb2_dma_sg_put(void *buf_priv); +static int vb2_dma_sg_alloc_compacted(struct vb2_dma_sg_buf *buf, + gfp_t gfp_flags) +{ + unsigned int last_page = 0; + int size = buf->size; + + while (size > 0) { + struct page *pages; + int order; + int i; + + order = get_order(size); + /* Dont over allocate*/ + if ((PAGE_SIZE << order) > size) + order--; + + pages = NULL; + while (!pages) { + pages = alloc_pages(GFP_KERNEL | __GFP_ZERO | + __GFP_NOWARN | gfp_flags, order); + if (pages) + break; + + if (order == 0) { + while (last_page--) + __free_page(buf->pages[last_page]); + return -ENOMEM; + } + order--; + } + + split_page(pages, order); + for (i = 0; i < (1 << order); i++) + buf->pages[last_page++] = &pages[i]; + + size -= PAGE_SIZE << order; + } + + return 0; +} + static void *vb2_dma_sg_alloc(void *alloc_ctx, unsigned long size, gfp_t gfp_flags) { struct vb2_dma_sg_buf *buf; - int i; + int ret; + int num_pages; buf = kzalloc(sizeof *buf, GFP_KERNEL); if (!buf) @@ -54,29 +98,23 @@ static void *vb2_dma_sg_alloc(void *alloc_ctx, unsigned long size, gfp_t gfp_fla buf->vaddr = NULL; buf->write = 0; buf->offset = 0; - buf->sg_desc.size = size; + buf->size = size; /* size is already page aligned */ - buf->sg_desc.num_pages = size >> PAGE_SHIFT; + buf->num_pages = size >> PAGE_SHIFT; - buf->sg_desc.sglist = vzalloc(buf->sg_desc.num_pages * - sizeof(*buf->sg_desc.sglist)); - if (!buf->sg_desc.sglist) - goto fail_sglist_alloc; - sg_init_table(buf->sg_desc.sglist, buf->sg_desc.num_pages); - - buf->pages = kzalloc(buf->sg_desc.num_pages * sizeof(struct page *), + buf->pages = kzalloc(buf->num_pages * sizeof(struct page *), GFP_KERNEL); if (!buf->pages) goto fail_pages_array_alloc; - for (i = 0; i < buf->sg_desc.num_pages; ++i) { - buf->pages[i] = alloc_page(GFP_KERNEL | __GFP_ZERO | - __GFP_NOWARN | gfp_flags); - if (NULL == buf->pages[i]) - goto fail_pages_alloc; - sg_set_page(&buf->sg_desc.sglist[i], - buf->pages[i], PAGE_SIZE, 0); - } + ret = vb2_dma_sg_alloc_compacted(buf, gfp_flags); + if (ret) + goto fail_pages_alloc; + + ret = sg_alloc_table_from_pages(&buf->sg_table, buf->pages, + buf->num_pages, 0, size, gfp_flags); + if (ret) + goto fail_table_alloc; buf->handler.refcount = &buf->refcount; buf->handler.put = vb2_dma_sg_put; @@ -85,18 +123,16 @@ static void *vb2_dma_sg_alloc(void *alloc_ctx, unsigned long size, gfp_t gfp_fla atomic_inc(&buf->refcount); dprintk(1, "%s: Allocated buffer of %d pages\n", - __func__, buf->sg_desc.num_pages); + __func__, buf->num_pages); return buf; +fail_table_alloc: + num_pages = buf->num_pages; + while (num_pages--) + __free_page(buf->pages[num_pages]); fail_pages_alloc: - while (--i >= 0) - __free_page(buf->pages[i]); kfree(buf->pages); - fail_pages_array_alloc: - vfree(buf->sg_desc.sglist); - -fail_sglist_alloc: kfree(buf); return NULL; } @@ -104,14 +140,14 @@ fail_sglist_alloc: static void vb2_dma_sg_put(void *buf_priv) { struct vb2_dma_sg_buf *buf = buf_priv; - int i = buf->sg_desc.num_pages; + int i = buf->num_pages; if (atomic_dec_and_test(&buf->refcount)) { dprintk(1, "%s: Freeing buffer of %d pages\n", __func__, - buf->sg_desc.num_pages); + buf->num_pages); if (buf->vaddr) - vm_unmap_ram(buf->vaddr, buf->sg_desc.num_pages); - vfree(buf->sg_desc.sglist); + vm_unmap_ram(buf->vaddr, buf->num_pages); + sg_free_table(&buf->sg_table); while (--i >= 0) __free_page(buf->pages[i]); kfree(buf->pages); @@ -124,7 +160,7 @@ static void *vb2_dma_sg_get_userptr(void *alloc_ctx, unsigned long vaddr, { struct vb2_dma_sg_buf *buf; unsigned long first, last; - int num_pages_from_user, i; + int num_pages_from_user; buf = kzalloc(sizeof *buf, GFP_KERNEL); if (!buf) @@ -133,56 +169,41 @@ static void *vb2_dma_sg_get_userptr(void *alloc_ctx, unsigned long vaddr, buf->vaddr = NULL; buf->write = write; buf->offset = vaddr & ~PAGE_MASK; - buf->sg_desc.size = size; + buf->size = size; first = (vaddr & PAGE_MASK) >> PAGE_SHIFT; last = ((vaddr + size - 1) & PAGE_MASK) >> PAGE_SHIFT; - buf->sg_desc.num_pages = last - first + 1; - - buf->sg_desc.sglist = vzalloc( - buf->sg_desc.num_pages * sizeof(*buf->sg_desc.sglist)); - if (!buf->sg_desc.sglist) - goto userptr_fail_sglist_alloc; - - sg_init_table(buf->sg_desc.sglist, buf->sg_desc.num_pages); + buf->num_pages = last - first + 1; - buf->pages = kzalloc(buf->sg_desc.num_pages * sizeof(struct page *), + buf->pages = kzalloc(buf->num_pages * sizeof(struct page *), GFP_KERNEL); if (!buf->pages) - goto userptr_fail_pages_array_alloc; + return NULL; num_pages_from_user = get_user_pages(current, current->mm, vaddr & PAGE_MASK, - buf->sg_desc.num_pages, + buf->num_pages, write, 1, /* force */ buf->pages, NULL); - if (num_pages_from_user != buf->sg_desc.num_pages) + if (num_pages_from_user != buf->num_pages) goto userptr_fail_get_user_pages; - sg_set_page(&buf->sg_desc.sglist[0], buf->pages[0], - PAGE_SIZE - buf->offset, buf->offset); - size -= PAGE_SIZE - buf->offset; - for (i = 1; i < buf->sg_desc.num_pages; ++i) { - sg_set_page(&buf->sg_desc.sglist[i], buf->pages[i], - min_t(size_t, PAGE_SIZE, size), 0); - size -= min_t(size_t, PAGE_SIZE, size); - } + if (sg_alloc_table_from_pages(&buf->sg_table, buf->pages, + buf->num_pages, buf->offset, size, 0)) + goto userptr_fail_alloc_table_from_pages; + return buf; +userptr_fail_alloc_table_from_pages: userptr_fail_get_user_pages: dprintk(1, "get_user_pages requested/got: %d/%d]\n", - num_pages_from_user, buf->sg_desc.num_pages); + num_pages_from_user, buf->num_pages); while (--num_pages_from_user >= 0) put_page(buf->pages[num_pages_from_user]); kfree(buf->pages); - -userptr_fail_pages_array_alloc: - vfree(buf->sg_desc.sglist); - -userptr_fail_sglist_alloc: kfree(buf); return NULL; } @@ -194,18 +215,18 @@ userptr_fail_sglist_alloc: static void vb2_dma_sg_put_userptr(void *buf_priv) { struct vb2_dma_sg_buf *buf = buf_priv; - int i = buf->sg_desc.num_pages; + int i = buf->num_pages; dprintk(1, "%s: Releasing userspace buffer of %d pages\n", - __func__, buf->sg_desc.num_pages); + __func__, buf->num_pages); if (buf->vaddr) - vm_unmap_ram(buf->vaddr, buf->sg_desc.num_pages); + vm_unmap_ram(buf->vaddr, buf->num_pages); + sg_free_table(&buf->sg_table); while (--i >= 0) { if (buf->write) set_page_dirty_lock(buf->pages[i]); put_page(buf->pages[i]); } - vfree(buf->sg_desc.sglist); kfree(buf->pages); kfree(buf); } @@ -218,7 +239,7 @@ static void *vb2_dma_sg_vaddr(void *buf_priv) if (!buf->vaddr) buf->vaddr = vm_map_ram(buf->pages, - buf->sg_desc.num_pages, + buf->num_pages, -1, PAGE_KERNEL); @@ -274,7 +295,7 @@ static void *vb2_dma_sg_cookie(void *buf_priv) { struct vb2_dma_sg_buf *buf = buf_priv; - return &buf->sg_desc; + return &buf->sg_table; } const struct vb2_mem_ops vb2_dma_sg_memops = { diff --git a/drivers/misc/carma/carma-fpga.c b/drivers/misc/carma/carma-fpga.c index 08b18f3f526..9e2b985293f 100644 --- a/drivers/misc/carma/carma-fpga.c +++ b/drivers/misc/carma/carma-fpga.c @@ -633,8 +633,7 @@ static int data_submit_dma(struct fpga_device *priv, struct data_buf *buf) struct dma_async_tx_descriptor *tx; dma_cookie_t cookie; dma_addr_t dst, src; - unsigned long dma_flags = DMA_COMPL_SKIP_DEST_UNMAP | - DMA_COMPL_SKIP_SRC_UNMAP; + unsigned long dma_flags = 0; dst_sg = buf->vb.sglist; dst_nents = buf->vb.sglen; diff --git a/drivers/misc/eeprom/at24.c b/drivers/misc/eeprom/at24.c index 94b8a332431..d87f77f790d 100644 --- a/drivers/misc/eeprom/at24.c +++ b/drivers/misc/eeprom/at24.c @@ -22,7 +22,7 @@ #include <linux/jiffies.h> #include <linux/of.h> #include <linux/i2c.h> -#include <linux/i2c/at24.h> +#include <linux/platform_data/at24.h> /* * I2C EEPROMs from most vendors are inexpensive and mostly interchangeable. diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c index 1a3163f1407..29d5d988a51 100644 --- a/drivers/mmc/card/block.c +++ b/drivers/mmc/card/block.c @@ -2448,7 +2448,6 @@ static int _mmc_blk_suspend(struct mmc_card *card) struct mmc_blk_data *md = mmc_get_drvdata(card); if (md) { - pm_runtime_get_sync(&card->dev); mmc_queue_suspend(&md->queue); list_for_each_entry(part_md, &md->part, part) { mmc_queue_suspend(&part_md->queue); @@ -2483,7 +2482,6 @@ static int mmc_blk_resume(struct mmc_card *card) list_for_each_entry(part_md, &md->part, part) { mmc_queue_resume(&part_md->queue); } - pm_runtime_put(&card->dev); } return 0; } diff --git a/drivers/mmc/core/bus.c b/drivers/mmc/core/bus.c index 3e227bd91e8..64145a32b91 100644 --- a/drivers/mmc/core/bus.c +++ b/drivers/mmc/core/bus.c @@ -342,7 +342,7 @@ int mmc_add_card(struct mmc_card *card) break; } - if (mmc_sd_card_uhs(card) && + if (mmc_card_uhs(card) && (card->sd_bus_speed < ARRAY_SIZE(uhs_speeds))) uhs_bus_speed_mode = uhs_speeds[card->sd_bus_speed]; diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c index bf18b6bfce4..57a2b403bf8 100644 --- a/drivers/mmc/core/core.c +++ b/drivers/mmc/core/core.c @@ -23,6 +23,7 @@ #include <linux/log2.h> #include <linux/regulator/consumer.h> #include <linux/pm_runtime.h> +#include <linux/pm_wakeup.h> #include <linux/suspend.h> #include <linux/fault-inject.h> #include <linux/random.h> @@ -301,7 +302,7 @@ void mmc_start_bkops(struct mmc_card *card, bool from_exception) } err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, - EXT_CSD_BKOPS_START, 1, timeout, use_busy_signal); + EXT_CSD_BKOPS_START, 1, timeout, use_busy_signal, true); if (err) { pr_warn("%s: Error %d starting bkops\n", mmc_hostname(card->host), err); @@ -918,31 +919,6 @@ int __mmc_claim_host(struct mmc_host *host, atomic_t *abort) EXPORT_SYMBOL(__mmc_claim_host); /** - * mmc_try_claim_host - try exclusively to claim a host - * @host: mmc host to claim - * - * Returns %1 if the host is claimed, %0 otherwise. - */ -int mmc_try_claim_host(struct mmc_host *host) -{ - int claimed_host = 0; - unsigned long flags; - - spin_lock_irqsave(&host->lock, flags); - if (!host->claimed || host->claimer == current) { - host->claimed = 1; - host->claimer = current; - host->claim_cnt += 1; - claimed_host = 1; - } - spin_unlock_irqrestore(&host->lock, flags); - if (host->ops->enable && claimed_host && host->claim_cnt == 1) - host->ops->enable(host); - return claimed_host; -} -EXPORT_SYMBOL(mmc_try_claim_host); - -/** * mmc_release_host - release a host * @host: mmc host to release * @@ -1382,22 +1358,31 @@ u32 mmc_select_voltage(struct mmc_host *host, u32 ocr) { int bit; - ocr &= host->ocr_avail; + /* + * Sanity check the voltages that the card claims to + * support. + */ + if (ocr & 0x7F) { + dev_warn(mmc_dev(host), + "card claims to support voltages below defined range\n"); + ocr &= ~0x7F; + } - bit = ffs(ocr); - if (bit) { - bit -= 1; + ocr &= host->ocr_avail; + if (!ocr) { + dev_warn(mmc_dev(host), "no support for card's volts\n"); + return 0; + } + if (host->caps2 & MMC_CAP2_FULL_PWR_CYCLE) { + bit = ffs(ocr) - 1; ocr &= 3 << bit; - - mmc_host_clk_hold(host); - host->ios.vdd = bit; - mmc_set_ios(host); - mmc_host_clk_release(host); + mmc_power_cycle(host, ocr); } else { - pr_warning("%s: host doesn't support card's voltages\n", - mmc_hostname(host)); - ocr = 0; + bit = fls(ocr) - 1; + ocr &= 3 << bit; + if (bit != host->ios.vdd) + dev_warn(mmc_dev(host), "exceeding card's volts\n"); } return ocr; @@ -1422,7 +1407,7 @@ int __mmc_set_signal_voltage(struct mmc_host *host, int signal_voltage) } -int mmc_set_signal_voltage(struct mmc_host *host, int signal_voltage) +int mmc_set_signal_voltage(struct mmc_host *host, int signal_voltage, u32 ocr) { struct mmc_command cmd = {0}; int err = 0; @@ -1504,7 +1489,7 @@ power_cycle: if (err) { pr_debug("%s: Signal voltage switch failed, " "power cycling card\n", mmc_hostname(host)); - mmc_power_cycle(host); + mmc_power_cycle(host, ocr); } mmc_host_clk_release(host); @@ -1545,22 +1530,14 @@ void mmc_set_driver_type(struct mmc_host *host, unsigned int drv_type) * If a host does all the power sequencing itself, ignore the * initial MMC_POWER_UP stage. */ -void mmc_power_up(struct mmc_host *host) +void mmc_power_up(struct mmc_host *host, u32 ocr) { - int bit; - if (host->ios.power_mode == MMC_POWER_ON) return; mmc_host_clk_hold(host); - /* If ocr is set, we use it */ - if (host->ocr) - bit = ffs(host->ocr) - 1; - else - bit = fls(host->ocr_avail) - 1; - - host->ios.vdd = bit; + host->ios.vdd = fls(ocr) - 1; if (mmc_host_is_spi(host)) host->ios.chip_select = MMC_CS_HIGH; else @@ -1604,13 +1581,6 @@ void mmc_power_off(struct mmc_host *host) host->ios.clock = 0; host->ios.vdd = 0; - - /* - * Reset ocr mask to be the highest possible voltage supported for - * this mmc host. This value will be used at next power up. - */ - host->ocr = 1 << (fls(host->ocr_avail) - 1); - if (!mmc_host_is_spi(host)) { host->ios.bus_mode = MMC_BUSMODE_OPENDRAIN; host->ios.chip_select = MMC_CS_DONTCARE; @@ -1630,12 +1600,12 @@ void mmc_power_off(struct mmc_host *host) mmc_host_clk_release(host); } -void mmc_power_cycle(struct mmc_host *host) +void mmc_power_cycle(struct mmc_host *host, u32 ocr) { mmc_power_off(host); /* Wait at least 1 ms according to SD spec */ mmc_delay(1); - mmc_power_up(host); + mmc_power_up(host, ocr); } /* @@ -1723,6 +1693,28 @@ void mmc_detach_bus(struct mmc_host *host) mmc_bus_put(host); } +static void _mmc_detect_change(struct mmc_host *host, unsigned long delay, + bool cd_irq) +{ +#ifdef CONFIG_MMC_DEBUG + unsigned long flags; + spin_lock_irqsave(&host->lock, flags); + WARN_ON(host->removed); + spin_unlock_irqrestore(&host->lock, flags); +#endif + + /* + * If the device is configured as wakeup, we prevent a new sleep for + * 5 s to give provision for user space to consume the event. + */ + if (cd_irq && !(host->caps & MMC_CAP_NEEDS_POLL) && + device_can_wakeup(mmc_dev(host))) + pm_wakeup_event(mmc_dev(host), 5000); + + host->detect_change = 1; + mmc_schedule_delayed_work(&host->detect, delay); +} + /** * mmc_detect_change - process change of state on a MMC socket * @host: host which changed state. @@ -1735,16 +1727,8 @@ void mmc_detach_bus(struct mmc_host *host) */ void mmc_detect_change(struct mmc_host *host, unsigned long delay) { -#ifdef CONFIG_MMC_DEBUG - unsigned long flags; - spin_lock_irqsave(&host->lock, flags); - WARN_ON(host->removed); - spin_unlock_irqrestore(&host->lock, flags); -#endif - host->detect_change = 1; - mmc_schedule_delayed_work(&host->detect, delay); + _mmc_detect_change(host, delay, true); } - EXPORT_SYMBOL(mmc_detect_change); void mmc_init_erase(struct mmc_card *card) @@ -2334,7 +2318,7 @@ static int mmc_rescan_try_freq(struct mmc_host *host, unsigned freq) pr_info("%s: %s: trying to init card at %u Hz\n", mmc_hostname(host), __func__, host->f_init); #endif - mmc_power_up(host); + mmc_power_up(host, host->ocr_avail); /* * Some eMMCs (with VCCQ always on) may not be reset after power up, so @@ -2423,7 +2407,7 @@ int mmc_detect_card_removed(struct mmc_host *host) * rescan handle the card removal. */ cancel_delayed_work(&host->detect); - mmc_detect_change(host, 0); + _mmc_detect_change(host, 0, false); } } @@ -2504,8 +2488,8 @@ void mmc_start_host(struct mmc_host *host) if (host->caps2 & MMC_CAP2_NO_PRESCAN_POWERUP) mmc_power_off(host); else - mmc_power_up(host); - mmc_detect_change(host, 0); + mmc_power_up(host, host->ocr_avail); + _mmc_detect_change(host, 0, false); } void mmc_stop_host(struct mmc_host *host) @@ -2583,7 +2567,7 @@ int mmc_power_restore_host(struct mmc_host *host) return -EINVAL; } - mmc_power_up(host); + mmc_power_up(host, host->card->ocr); ret = host->bus_ops->power_restore(host); mmc_bus_put(host); @@ -2657,28 +2641,6 @@ EXPORT_SYMBOL(mmc_cache_ctrl); #ifdef CONFIG_PM -/** - * mmc_suspend_host - suspend a host - * @host: mmc host - */ -int mmc_suspend_host(struct mmc_host *host) -{ - /* This function is deprecated */ - return 0; -} -EXPORT_SYMBOL(mmc_suspend_host); - -/** - * mmc_resume_host - resume a previously suspended host - * @host: mmc host - */ -int mmc_resume_host(struct mmc_host *host) -{ - /* This function is deprecated */ - return 0; -} -EXPORT_SYMBOL(mmc_resume_host); - /* Do the card removal on suspend if card is assumed removeable * Do that in pm notifier while userspace isn't yet frozen, so we will be able to sync the card. @@ -2724,7 +2686,7 @@ int mmc_pm_notify(struct notifier_block *notify_block, spin_lock_irqsave(&host->lock, flags); host->rescan_disable = 0; spin_unlock_irqrestore(&host->lock, flags); - mmc_detect_change(host, 0); + _mmc_detect_change(host, 0, false); } diff --git a/drivers/mmc/core/core.h b/drivers/mmc/core/core.h index 5345d156493..443a584660f 100644 --- a/drivers/mmc/core/core.h +++ b/drivers/mmc/core/core.h @@ -42,13 +42,13 @@ void mmc_set_ungated(struct mmc_host *host); void mmc_set_bus_mode(struct mmc_host *host, unsigned int mode); void mmc_set_bus_width(struct mmc_host *host, unsigned int width); u32 mmc_select_voltage(struct mmc_host *host, u32 ocr); -int mmc_set_signal_voltage(struct mmc_host *host, int signal_voltage); +int mmc_set_signal_voltage(struct mmc_host *host, int signal_voltage, u32 ocr); int __mmc_set_signal_voltage(struct mmc_host *host, int signal_voltage); void mmc_set_timing(struct mmc_host *host, unsigned int timing); void mmc_set_driver_type(struct mmc_host *host, unsigned int drv_type); -void mmc_power_up(struct mmc_host *host); +void mmc_power_up(struct mmc_host *host, u32 ocr); void mmc_power_off(struct mmc_host *host); -void mmc_power_cycle(struct mmc_host *host); +void mmc_power_cycle(struct mmc_host *host, u32 ocr); static inline void mmc_delay(unsigned int ms) { diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c index 6d02012a1d0..f631f5a9bf7 100644 --- a/drivers/mmc/core/mmc.c +++ b/drivers/mmc/core/mmc.c @@ -13,6 +13,7 @@ #include <linux/err.h> #include <linux/slab.h> #include <linux/stat.h> +#include <linux/pm_runtime.h> #include <linux/mmc/host.h> #include <linux/mmc/card.h> @@ -934,6 +935,7 @@ static int mmc_init_card(struct mmc_host *host, u32 ocr, goto err; } + card->ocr = ocr; card->type = MMC_TYPE_MMC; card->rca = 1; memcpy(card->raw_cid, cid, sizeof(card->raw_cid)); @@ -1404,9 +1406,9 @@ static int mmc_poweroff_notify(struct mmc_card *card, unsigned int notify_type) if (notify_type == EXT_CSD_POWER_OFF_LONG) timeout = card->ext_csd.power_off_longtime; - err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, - EXT_CSD_POWER_OFF_NOTIFICATION, - notify_type, timeout); + err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, + EXT_CSD_POWER_OFF_NOTIFICATION, + notify_type, timeout, true, false); if (err) pr_err("%s: Power Off Notification timed out, %u\n", mmc_hostname(card->host), timeout); @@ -1477,6 +1479,9 @@ static int _mmc_suspend(struct mmc_host *host, bool is_suspend) mmc_claim_host(host); + if (mmc_card_suspended(host->card)) + goto out; + if (mmc_card_doing_bkops(host->card)) { err = mmc_stop_bkops(host->card); if (err) @@ -1496,51 +1501,93 @@ static int _mmc_suspend(struct mmc_host *host, bool is_suspend) err = mmc_deselect_cards(host); host->card->state &= ~(MMC_STATE_HIGHSPEED | MMC_STATE_HIGHSPEED_200); - if (!err) + if (!err) { mmc_power_off(host); + mmc_card_set_suspended(host->card); + } out: mmc_release_host(host); return err; } /* - * Suspend callback from host. + * Suspend callback */ static int mmc_suspend(struct mmc_host *host) { - return _mmc_suspend(host, true); -} + int err; -/* - * Shutdown callback - */ -static int mmc_shutdown(struct mmc_host *host) -{ - return _mmc_suspend(host, false); + err = _mmc_suspend(host, true); + if (!err) { + pm_runtime_disable(&host->card->dev); + pm_runtime_set_suspended(&host->card->dev); + } + + return err; } /* - * Resume callback from host. - * * This function tries to determine if the same card is still present * and, if so, restore all state to it. */ -static int mmc_resume(struct mmc_host *host) +static int _mmc_resume(struct mmc_host *host) { - int err; + int err = 0; BUG_ON(!host); BUG_ON(!host->card); mmc_claim_host(host); - mmc_power_up(host); - mmc_select_voltage(host, host->ocr); - err = mmc_init_card(host, host->ocr, host->card); + + if (!mmc_card_suspended(host->card)) + goto out; + + mmc_power_up(host, host->card->ocr); + err = mmc_init_card(host, host->card->ocr, host->card); + mmc_card_clr_suspended(host->card); + +out: mmc_release_host(host); + return err; +} + +/* + * Shutdown callback + */ +static int mmc_shutdown(struct mmc_host *host) +{ + int err = 0; + + /* + * In a specific case for poweroff notify, we need to resume the card + * before we can shutdown it properly. + */ + if (mmc_can_poweroff_notify(host->card) && + !(host->caps2 & MMC_CAP2_FULL_PWR_CYCLE)) + err = _mmc_resume(host); + + if (!err) + err = _mmc_suspend(host, false); return err; } +/* + * Callback for resume. + */ +static int mmc_resume(struct mmc_host *host) +{ + int err = 0; + + if (!(host->caps & MMC_CAP_RUNTIME_RESUME)) { + err = _mmc_resume(host); + pm_runtime_set_active(&host->card->dev); + pm_runtime_mark_last_busy(&host->card->dev); + } + pm_runtime_enable(&host->card->dev); + + return err; +} /* * Callback for runtime_suspend. @@ -1552,18 +1599,11 @@ static int mmc_runtime_suspend(struct mmc_host *host) if (!(host->caps & MMC_CAP_AGGRESSIVE_PM)) return 0; - mmc_claim_host(host); - - err = mmc_suspend(host); - if (err) { + err = _mmc_suspend(host, true); + if (err) pr_err("%s: error %d doing aggessive suspend\n", mmc_hostname(host), err); - goto out; - } - mmc_power_off(host); -out: - mmc_release_host(host); return err; } @@ -1574,18 +1614,14 @@ static int mmc_runtime_resume(struct mmc_host *host) { int err; - if (!(host->caps & MMC_CAP_AGGRESSIVE_PM)) + if (!(host->caps & (MMC_CAP_AGGRESSIVE_PM | MMC_CAP_RUNTIME_RESUME))) return 0; - mmc_claim_host(host); - - mmc_power_up(host); - err = mmc_resume(host); + err = _mmc_resume(host); if (err) pr_err("%s: error %d doing aggessive resume\n", mmc_hostname(host), err); - mmc_release_host(host); return 0; } @@ -1595,7 +1631,7 @@ static int mmc_power_restore(struct mmc_host *host) host->card->state &= ~(MMC_STATE_HIGHSPEED | MMC_STATE_HIGHSPEED_200); mmc_claim_host(host); - ret = mmc_init_card(host, host->ocr, host->card); + ret = mmc_init_card(host, host->card->ocr, host->card); mmc_release_host(host); return ret; @@ -1640,7 +1676,7 @@ static void mmc_attach_bus_ops(struct mmc_host *host) int mmc_attach_mmc(struct mmc_host *host) { int err; - u32 ocr; + u32 ocr, rocr; BUG_ON(!host); WARN_ON(!host->claimed); @@ -1666,23 +1702,12 @@ int mmc_attach_mmc(struct mmc_host *host) goto err; } - /* - * Sanity check the voltages that the card claims to - * support. - */ - if (ocr & 0x7F) { - pr_warning("%s: card claims to support voltages " - "below the defined range. These will be ignored.\n", - mmc_hostname(host)); - ocr &= ~0x7F; - } - - host->ocr = mmc_select_voltage(host, ocr); + rocr = mmc_select_voltage(host, ocr); /* * Can we support the voltage of the card? */ - if (!host->ocr) { + if (!rocr) { err = -EINVAL; goto err; } @@ -1690,7 +1715,7 @@ int mmc_attach_mmc(struct mmc_host *host) /* * Detect and init the card. */ - err = mmc_init_card(host, host->ocr, NULL); + err = mmc_init_card(host, rocr, NULL); if (err) goto err; diff --git a/drivers/mmc/core/mmc_ops.c b/drivers/mmc/core/mmc_ops.c index ef183483d5b..e5b5eeb548d 100644 --- a/drivers/mmc/core/mmc_ops.c +++ b/drivers/mmc/core/mmc_ops.c @@ -23,6 +23,40 @@ #define MMC_OPS_TIMEOUT_MS (10 * 60 * 1000) /* 10 minute timeout */ +static inline int __mmc_send_status(struct mmc_card *card, u32 *status, + bool ignore_crc) +{ + int err; + struct mmc_command cmd = {0}; + + BUG_ON(!card); + BUG_ON(!card->host); + + cmd.opcode = MMC_SEND_STATUS; + if (!mmc_host_is_spi(card->host)) + cmd.arg = card->rca << 16; + cmd.flags = MMC_RSP_SPI_R2 | MMC_RSP_R1 | MMC_CMD_AC; + if (ignore_crc) + cmd.flags &= ~MMC_RSP_CRC; + + err = mmc_wait_for_cmd(card->host, &cmd, MMC_CMD_RETRIES); + if (err) + return err; + + /* NOTE: callers are required to understand the difference + * between "native" and SPI format status words! + */ + if (status) + *status = cmd.resp[0]; + + return 0; +} + +int mmc_send_status(struct mmc_card *card, u32 *status) +{ + return __mmc_send_status(card, status, false); +} + static int _mmc_select_card(struct mmc_host *host, struct mmc_card *card) { int err; @@ -370,16 +404,18 @@ int mmc_spi_set_crc(struct mmc_host *host, int use_crc) * @timeout_ms: timeout (ms) for operation performed by register write, * timeout of zero implies maximum possible timeout * @use_busy_signal: use the busy signal as response type + * @send_status: send status cmd to poll for busy * * Modifies the EXT_CSD register for selected card. */ int __mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value, - unsigned int timeout_ms, bool use_busy_signal) + unsigned int timeout_ms, bool use_busy_signal, bool send_status) { int err; struct mmc_command cmd = {0}; unsigned long timeout; - u32 status; + u32 status = 0; + bool ignore_crc = false; BUG_ON(!card); BUG_ON(!card->host); @@ -408,17 +444,37 @@ int __mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value, if (!use_busy_signal) return 0; - /* Must check status to be sure of no errors */ + /* + * Must check status to be sure of no errors + * If CMD13 is to check the busy completion of the timing change, + * disable the check of CRC error. + */ + if (index == EXT_CSD_HS_TIMING && + !(card->host->caps & MMC_CAP_WAIT_WHILE_BUSY)) + ignore_crc = true; + timeout = jiffies + msecs_to_jiffies(MMC_OPS_TIMEOUT_MS); do { - err = mmc_send_status(card, &status); - if (err) - return err; + if (send_status) { + err = __mmc_send_status(card, &status, ignore_crc); + if (err) + return err; + } if (card->host->caps & MMC_CAP_WAIT_WHILE_BUSY) break; if (mmc_host_is_spi(card->host)) break; + /* + * We are not allowed to issue a status command and the host + * does'nt support MMC_CAP_WAIT_WHILE_BUSY, then we can only + * rely on waiting for the stated timeout to be sufficient. + */ + if (!send_status) { + mmc_delay(timeout_ms); + return 0; + } + /* Timeout if the device never leaves the program state. */ if (time_after(jiffies, timeout)) { pr_err("%s: Card stuck in programming state! %s\n", @@ -445,36 +501,10 @@ EXPORT_SYMBOL_GPL(__mmc_switch); int mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value, unsigned int timeout_ms) { - return __mmc_switch(card, set, index, value, timeout_ms, true); + return __mmc_switch(card, set, index, value, timeout_ms, true, true); } EXPORT_SYMBOL_GPL(mmc_switch); -int mmc_send_status(struct mmc_card *card, u32 *status) -{ - int err; - struct mmc_command cmd = {0}; - - BUG_ON(!card); - BUG_ON(!card->host); - - cmd.opcode = MMC_SEND_STATUS; - if (!mmc_host_is_spi(card->host)) - cmd.arg = card->rca << 16; - cmd.flags = MMC_RSP_SPI_R2 | MMC_RSP_R1 | MMC_CMD_AC; - - err = mmc_wait_for_cmd(card->host, &cmd, MMC_CMD_RETRIES); - if (err) - return err; - - /* NOTE: callers are required to understand the difference - * between "native" and SPI format status words! - */ - if (status) - *status = cmd.resp[0]; - - return 0; -} - static int mmc_send_bus_test(struct mmc_card *card, struct mmc_host *host, u8 opcode, u8 len) diff --git a/drivers/mmc/core/sd.c b/drivers/mmc/core/sd.c index 5e8823dc3ef..6f42050b7cc 100644 --- a/drivers/mmc/core/sd.c +++ b/drivers/mmc/core/sd.c @@ -13,6 +13,7 @@ #include <linux/err.h> #include <linux/slab.h> #include <linux/stat.h> +#include <linux/pm_runtime.h> #include <linux/mmc/host.h> #include <linux/mmc/card.h> @@ -721,6 +722,7 @@ int mmc_sd_get_cid(struct mmc_host *host, u32 ocr, u32 *cid, u32 *rocr) int err; u32 max_current; int retries = 10; + u32 pocr = ocr; try_again: if (!retries) { @@ -773,7 +775,8 @@ try_again: */ if (!mmc_host_is_spi(host) && rocr && ((*rocr & 0x41000000) == 0x41000000)) { - err = mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_180); + err = mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_180, + pocr); if (err == -EAGAIN) { retries--; goto try_again; @@ -935,6 +938,7 @@ static int mmc_sd_init_card(struct mmc_host *host, u32 ocr, if (IS_ERR(card)) return PTR_ERR(card); + card->ocr = ocr; card->type = MMC_TYPE_SD; memcpy(card->raw_cid, cid, sizeof(card->raw_cid)); } @@ -1064,10 +1068,7 @@ static void mmc_sd_detect(struct mmc_host *host) } } -/* - * Suspend callback from host. - */ -static int mmc_sd_suspend(struct mmc_host *host) +static int _mmc_sd_suspend(struct mmc_host *host) { int err = 0; @@ -1075,34 +1076,77 @@ static int mmc_sd_suspend(struct mmc_host *host) BUG_ON(!host->card); mmc_claim_host(host); + + if (mmc_card_suspended(host->card)) + goto out; + if (!mmc_host_is_spi(host)) err = mmc_deselect_cards(host); host->card->state &= ~MMC_STATE_HIGHSPEED; - if (!err) + if (!err) { mmc_power_off(host); + mmc_card_set_suspended(host->card); + } + +out: mmc_release_host(host); + return err; +} + +/* + * Callback for suspend + */ +static int mmc_sd_suspend(struct mmc_host *host) +{ + int err; + + err = _mmc_sd_suspend(host); + if (!err) { + pm_runtime_disable(&host->card->dev); + pm_runtime_set_suspended(&host->card->dev); + } return err; } /* - * Resume callback from host. - * * This function tries to determine if the same card is still present * and, if so, restore all state to it. */ -static int mmc_sd_resume(struct mmc_host *host) +static int _mmc_sd_resume(struct mmc_host *host) { - int err; + int err = 0; BUG_ON(!host); BUG_ON(!host->card); mmc_claim_host(host); - mmc_power_up(host); - mmc_select_voltage(host, host->ocr); - err = mmc_sd_init_card(host, host->ocr, host->card); + + if (!mmc_card_suspended(host->card)) + goto out; + + mmc_power_up(host, host->card->ocr); + err = mmc_sd_init_card(host, host->card->ocr, host->card); + mmc_card_clr_suspended(host->card); + +out: mmc_release_host(host); + return err; +} + +/* + * Callback for resume + */ +static int mmc_sd_resume(struct mmc_host *host) +{ + int err = 0; + + if (!(host->caps & MMC_CAP_RUNTIME_RESUME)) { + err = _mmc_sd_resume(host); + pm_runtime_set_active(&host->card->dev); + pm_runtime_mark_last_busy(&host->card->dev); + } + pm_runtime_enable(&host->card->dev); return err; } @@ -1117,18 +1161,11 @@ static int mmc_sd_runtime_suspend(struct mmc_host *host) if (!(host->caps & MMC_CAP_AGGRESSIVE_PM)) return 0; - mmc_claim_host(host); - - err = mmc_sd_suspend(host); - if (err) { + err = _mmc_sd_suspend(host); + if (err) pr_err("%s: error %d doing aggessive suspend\n", mmc_hostname(host), err); - goto out; - } - mmc_power_off(host); -out: - mmc_release_host(host); return err; } @@ -1139,18 +1176,14 @@ static int mmc_sd_runtime_resume(struct mmc_host *host) { int err; - if (!(host->caps & MMC_CAP_AGGRESSIVE_PM)) + if (!(host->caps & (MMC_CAP_AGGRESSIVE_PM | MMC_CAP_RUNTIME_RESUME))) return 0; - mmc_claim_host(host); - - mmc_power_up(host); - err = mmc_sd_resume(host); + err = _mmc_sd_resume(host); if (err) pr_err("%s: error %d doing aggessive resume\n", mmc_hostname(host), err); - mmc_release_host(host); return 0; } @@ -1160,7 +1193,7 @@ static int mmc_sd_power_restore(struct mmc_host *host) host->card->state &= ~MMC_STATE_HIGHSPEED; mmc_claim_host(host); - ret = mmc_sd_init_card(host, host->ocr, host->card); + ret = mmc_sd_init_card(host, host->card->ocr, host->card); mmc_release_host(host); return ret; @@ -1205,7 +1238,7 @@ static void mmc_sd_attach_bus_ops(struct mmc_host *host) int mmc_attach_sd(struct mmc_host *host) { int err; - u32 ocr; + u32 ocr, rocr; BUG_ON(!host); WARN_ON(!host->claimed); @@ -1229,31 +1262,12 @@ int mmc_attach_sd(struct mmc_host *host) goto err; } - /* - * Sanity check the voltages that the card claims to - * support. - */ - if (ocr & 0x7F) { - pr_warning("%s: card claims to support voltages " - "below the defined range. These will be ignored.\n", - mmc_hostname(host)); - ocr &= ~0x7F; - } - - if ((ocr & MMC_VDD_165_195) && - !(host->ocr_avail_sd & MMC_VDD_165_195)) { - pr_warning("%s: SD card claims to support the " - "incompletely defined 'low voltage range'. This " - "will be ignored.\n", mmc_hostname(host)); - ocr &= ~MMC_VDD_165_195; - } - - host->ocr = mmc_select_voltage(host, ocr); + rocr = mmc_select_voltage(host, ocr); /* * Can we support the voltage(s) of the card(s)? */ - if (!host->ocr) { + if (!rocr) { err = -EINVAL; goto err; } @@ -1261,7 +1275,7 @@ int mmc_attach_sd(struct mmc_host *host) /* * Detect and init the card. */ - err = mmc_sd_init_card(host, host->ocr, NULL); + err = mmc_sd_init_card(host, rocr, NULL); if (err) goto err; diff --git a/drivers/mmc/core/sdio.c b/drivers/mmc/core/sdio.c index 80d89cff730..4d721c6e2af 100644 --- a/drivers/mmc/core/sdio.c +++ b/drivers/mmc/core/sdio.c @@ -593,23 +593,28 @@ static int mmc_sdio_init_card(struct mmc_host *host, u32 ocr, struct mmc_card *card; int err; int retries = 10; + u32 rocr = 0; + u32 ocr_card = ocr; BUG_ON(!host); WARN_ON(!host->claimed); + /* to query card if 1.8V signalling is supported */ + if (mmc_host_uhs(host)) + ocr |= R4_18V_PRESENT; + try_again: if (!retries) { pr_warning("%s: Skipping voltage switch\n", mmc_hostname(host)); ocr &= ~R4_18V_PRESENT; - host->ocr &= ~R4_18V_PRESENT; } /* * Inform the card of the voltage */ if (!powered_resume) { - err = mmc_send_io_op_cond(host, host->ocr, &ocr); + err = mmc_send_io_op_cond(host, ocr, &rocr); if (err) goto err; } @@ -632,8 +637,8 @@ try_again: goto err; } - if ((ocr & R4_MEMORY_PRESENT) && - mmc_sd_get_cid(host, host->ocr & ocr, card->raw_cid, NULL) == 0) { + if ((rocr & R4_MEMORY_PRESENT) && + mmc_sd_get_cid(host, ocr & rocr, card->raw_cid, NULL) == 0) { card->type = MMC_TYPE_SD_COMBO; if (oldcard && (oldcard->type != MMC_TYPE_SD_COMBO || @@ -663,8 +668,9 @@ try_again: * systems that claim 1.8v signalling in fact do not support * it. */ - if (!powered_resume && (ocr & R4_18V_PRESENT) && mmc_host_uhs(host)) { - err = mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_180); + if (!powered_resume && (rocr & ocr & R4_18V_PRESENT)) { + err = mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_180, + ocr); if (err == -EAGAIN) { sdio_reset(host); mmc_go_idle(host); @@ -674,12 +680,10 @@ try_again: goto try_again; } else if (err) { ocr &= ~R4_18V_PRESENT; - host->ocr &= ~R4_18V_PRESENT; } err = 0; } else { ocr &= ~R4_18V_PRESENT; - host->ocr &= ~R4_18V_PRESENT; } /* @@ -759,6 +763,7 @@ try_again: card = oldcard; } + card->ocr = ocr_card; mmc_fixup_device(card, NULL); if (card->type == MMC_TYPE_SD_COMBO) { @@ -981,8 +986,7 @@ static int mmc_sdio_resume(struct mmc_host *host) /* Restore power if needed */ if (!mmc_card_keep_power(host)) { - mmc_power_up(host); - mmc_select_voltage(host, host->ocr); + mmc_power_up(host, host->card->ocr); /* * Tell runtime PM core we just powered up the card, * since it still believes the card is powered off. @@ -1000,7 +1004,7 @@ static int mmc_sdio_resume(struct mmc_host *host) if (mmc_card_is_removable(host) || !mmc_card_keep_power(host)) { sdio_reset(host); mmc_go_idle(host); - err = mmc_sdio_init_card(host, host->ocr, host->card, + err = mmc_sdio_init_card(host, host->card->ocr, host->card, mmc_card_keep_power(host)); } else if (mmc_card_keep_power(host) && mmc_card_wake_sdio_irq(host)) { /* We may have switched to 1-bit mode during suspend */ @@ -1040,7 +1044,6 @@ static int mmc_sdio_resume(struct mmc_host *host) static int mmc_sdio_power_restore(struct mmc_host *host) { int ret; - u32 ocr; BUG_ON(!host); BUG_ON(!host->card); @@ -1062,32 +1065,17 @@ static int mmc_sdio_power_restore(struct mmc_host *host) * for OLPC SD8686 (which expects a [CMD5,5,3,7] init sequence), and * harmless in other situations. * - * With these steps taken, mmc_select_voltage() is also required to - * restore the correct voltage setting of the card. */ sdio_reset(host); mmc_go_idle(host); mmc_send_if_cond(host, host->ocr_avail); - ret = mmc_send_io_op_cond(host, 0, &ocr); + ret = mmc_send_io_op_cond(host, 0, NULL); if (ret) goto out; - if (host->ocr_avail_sdio) - host->ocr_avail = host->ocr_avail_sdio; - - host->ocr = mmc_select_voltage(host, ocr & ~0x7F); - if (!host->ocr) { - ret = -EINVAL; - goto out; - } - - if (mmc_host_uhs(host)) - /* to query card if 1.8V signalling is supported */ - host->ocr |= R4_18V_PRESENT; - - ret = mmc_sdio_init_card(host, host->ocr, host->card, + ret = mmc_sdio_init_card(host, host->card->ocr, host->card, mmc_card_keep_power(host)); if (!ret && host->sdio_irqs) mmc_signal_sdio_irq(host); @@ -1108,7 +1096,7 @@ static int mmc_sdio_runtime_suspend(struct mmc_host *host) static int mmc_sdio_runtime_resume(struct mmc_host *host) { /* Restore power and re-initialize. */ - mmc_power_up(host); + mmc_power_up(host, host->card->ocr); return mmc_sdio_power_restore(host); } @@ -1131,7 +1119,7 @@ static const struct mmc_bus_ops mmc_sdio_ops = { int mmc_attach_sdio(struct mmc_host *host) { int err, i, funcs; - u32 ocr; + u32 ocr, rocr; struct mmc_card *card; BUG_ON(!host); @@ -1145,23 +1133,13 @@ int mmc_attach_sdio(struct mmc_host *host) if (host->ocr_avail_sdio) host->ocr_avail = host->ocr_avail_sdio; - /* - * Sanity check the voltages that the card claims to - * support. - */ - if (ocr & 0x7F) { - pr_warning("%s: card claims to support voltages " - "below the defined range. These will be ignored.\n", - mmc_hostname(host)); - ocr &= ~0x7F; - } - host->ocr = mmc_select_voltage(host, ocr); + rocr = mmc_select_voltage(host, ocr); /* * Can we support the voltage(s) of the card(s)? */ - if (!host->ocr) { + if (!rocr) { err = -EINVAL; goto err; } @@ -1169,22 +1147,10 @@ int mmc_attach_sdio(struct mmc_host *host) /* * Detect and init the card. */ - if (mmc_host_uhs(host)) - /* to query card if 1.8V signalling is supported */ - host->ocr |= R4_18V_PRESENT; + err = mmc_sdio_init_card(host, rocr, NULL, 0); + if (err) + goto err; - err = mmc_sdio_init_card(host, host->ocr, NULL, 0); - if (err) { - if (err == -EAGAIN) { - /* - * Retry initialization with S18R set to 0. - */ - host->ocr &= ~R4_18V_PRESENT; - err = mmc_sdio_init_card(host, host->ocr, NULL, 0); - } - if (err) - goto err; - } card = host->card; /* diff --git a/drivers/mmc/core/sdio_bus.c b/drivers/mmc/core/sdio_bus.c index ef8956568c3..157b570ba34 100644 --- a/drivers/mmc/core/sdio_bus.c +++ b/drivers/mmc/core/sdio_bus.c @@ -308,8 +308,7 @@ static void sdio_acpi_set_handle(struct sdio_func *func) struct mmc_host *host = func->card->host; u64 addr = (host->slotno << 16) | func->num; - ACPI_HANDLE_SET(&func->dev, - acpi_get_child(ACPI_HANDLE(host->parent), addr)); + acpi_preset_companion(&func->dev, ACPI_HANDLE(host->parent), addr); } #else static inline void sdio_acpi_set_handle(struct sdio_func *func) {} diff --git a/drivers/mmc/host/atmel-mci.c b/drivers/mmc/host/atmel-mci.c index 69e438ee043..2cbb4516d35 100644 --- a/drivers/mmc/host/atmel-mci.c +++ b/drivers/mmc/host/atmel-mci.c @@ -255,7 +255,6 @@ struct atmel_mci_slot { #define ATMCI_CARD_PRESENT 0 #define ATMCI_CARD_NEED_INIT 1 #define ATMCI_SHUTDOWN 2 -#define ATMCI_SUSPENDED 3 int detect_pin; int wp_pin; @@ -589,6 +588,13 @@ static void atmci_timeout_timer(unsigned long data) if (host->mrq->cmd->data) { host->mrq->cmd->data->error = -ETIMEDOUT; host->data = NULL; + /* + * With some SDIO modules, sometimes DMA transfer hangs. If + * stop_transfer() is not called then the DMA request is not + * removed, following ones are queued and never computed. + */ + if (host->state == STATE_DATA_XFER) + host->stop_transfer(host); } else { host->mrq->cmd->error = -ETIMEDOUT; host->cmd = NULL; @@ -1803,12 +1809,14 @@ static void atmci_tasklet_func(unsigned long priv) if (unlikely(status)) { host->stop_transfer(host); host->data = NULL; - if (status & ATMCI_DTOE) { - data->error = -ETIMEDOUT; - } else if (status & ATMCI_DCRCE) { - data->error = -EILSEQ; - } else { - data->error = -EIO; + if (data) { + if (status & ATMCI_DTOE) { + data->error = -ETIMEDOUT; + } else if (status & ATMCI_DCRCE) { + data->error = -EILSEQ; + } else { + data->error = -EIO; + } } } @@ -2520,70 +2528,10 @@ static int __exit atmci_remove(struct platform_device *pdev) return 0; } -#ifdef CONFIG_PM_SLEEP -static int atmci_suspend(struct device *dev) -{ - struct atmel_mci *host = dev_get_drvdata(dev); - int i; - - for (i = 0; i < ATMCI_MAX_NR_SLOTS; i++) { - struct atmel_mci_slot *slot = host->slot[i]; - int ret; - - if (!slot) - continue; - ret = mmc_suspend_host(slot->mmc); - if (ret < 0) { - while (--i >= 0) { - slot = host->slot[i]; - if (slot - && test_bit(ATMCI_SUSPENDED, &slot->flags)) { - mmc_resume_host(host->slot[i]->mmc); - clear_bit(ATMCI_SUSPENDED, &slot->flags); - } - } - return ret; - } else { - set_bit(ATMCI_SUSPENDED, &slot->flags); - } - } - - return 0; -} - -static int atmci_resume(struct device *dev) -{ - struct atmel_mci *host = dev_get_drvdata(dev); - int i; - int ret = 0; - - for (i = 0; i < ATMCI_MAX_NR_SLOTS; i++) { - struct atmel_mci_slot *slot = host->slot[i]; - int err; - - slot = host->slot[i]; - if (!slot) - continue; - if (!test_bit(ATMCI_SUSPENDED, &slot->flags)) - continue; - err = mmc_resume_host(slot->mmc); - if (err < 0) - ret = err; - else - clear_bit(ATMCI_SUSPENDED, &slot->flags); - } - - return ret; -} -#endif - -static SIMPLE_DEV_PM_OPS(atmci_pm, atmci_suspend, atmci_resume); - static struct platform_driver atmci_driver = { .remove = __exit_p(atmci_remove), .driver = { .name = "atmel_mci", - .pm = &atmci_pm, .of_match_table = of_match_ptr(atmci_dt_ids), }, }; diff --git a/drivers/mmc/host/au1xmmc.c b/drivers/mmc/host/au1xmmc.c index df9becdd2e9..f5443a6c491 100644 --- a/drivers/mmc/host/au1xmmc.c +++ b/drivers/mmc/host/au1xmmc.c @@ -1157,11 +1157,6 @@ static int au1xmmc_remove(struct platform_device *pdev) static int au1xmmc_suspend(struct platform_device *pdev, pm_message_t state) { struct au1xmmc_host *host = platform_get_drvdata(pdev); - int ret; - - ret = mmc_suspend_host(host->mmc); - if (ret) - return ret; au_writel(0, HOST_CONFIG2(host)); au_writel(0, HOST_CONFIG(host)); @@ -1178,7 +1173,7 @@ static int au1xmmc_resume(struct platform_device *pdev) au1xmmc_reset_controller(host); - return mmc_resume_host(host->mmc); + return 0; } #else #define au1xmmc_suspend NULL diff --git a/drivers/mmc/host/bfin_sdh.c b/drivers/mmc/host/bfin_sdh.c index 94fae2f1baa..2b7f37e82ca 100644 --- a/drivers/mmc/host/bfin_sdh.c +++ b/drivers/mmc/host/bfin_sdh.c @@ -391,6 +391,7 @@ static void sdh_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) /* Disable 4 bit SDIO */ cfg &= ~SD4E; } + bfin_write_SDH_CFG(cfg); host->power_mode = ios->power_mode; #ifndef RSI_BLKSZ @@ -415,7 +416,6 @@ static void sdh_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) cfg &= ~SD_CMD_OD; # endif - if (ios->power_mode != MMC_POWER_OFF) cfg |= PWR_ON; else @@ -433,7 +433,6 @@ static void sdh_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) clk_ctl |= CLK_E; host->clk_div = clk_div; bfin_write_SDH_CLK_CTL(clk_ctl); - } else sdh_stop_clock(host); @@ -640,21 +639,15 @@ static int sdh_remove(struct platform_device *pdev) #ifdef CONFIG_PM static int sdh_suspend(struct platform_device *dev, pm_message_t state) { - struct mmc_host *mmc = platform_get_drvdata(dev); struct bfin_sd_host *drv_data = get_sdh_data(dev); - int ret = 0; - - if (mmc) - ret = mmc_suspend_host(mmc); peripheral_free_list(drv_data->pin_req); - return ret; + return 0; } static int sdh_resume(struct platform_device *dev) { - struct mmc_host *mmc = platform_get_drvdata(dev); struct bfin_sd_host *drv_data = get_sdh_data(dev); int ret = 0; @@ -665,10 +658,6 @@ static int sdh_resume(struct platform_device *dev) } sdh_reset(); - - if (mmc) - ret = mmc_resume_host(mmc); - return ret; } #else diff --git a/drivers/mmc/host/cb710-mmc.c b/drivers/mmc/host/cb710-mmc.c index 9d6e2b84440..1087b4c79cd 100644 --- a/drivers/mmc/host/cb710-mmc.c +++ b/drivers/mmc/host/cb710-mmc.c @@ -667,12 +667,6 @@ static const struct mmc_host_ops cb710_mmc_host = { static int cb710_mmc_suspend(struct platform_device *pdev, pm_message_t state) { struct cb710_slot *slot = cb710_pdev_to_slot(pdev); - struct mmc_host *mmc = cb710_slot_to_mmc(slot); - int err; - - err = mmc_suspend_host(mmc); - if (err) - return err; cb710_mmc_enable_irq(slot, 0, ~0); return 0; @@ -681,11 +675,9 @@ static int cb710_mmc_suspend(struct platform_device *pdev, pm_message_t state) static int cb710_mmc_resume(struct platform_device *pdev) { struct cb710_slot *slot = cb710_pdev_to_slot(pdev); - struct mmc_host *mmc = cb710_slot_to_mmc(slot); cb710_mmc_enable_irq(slot, 0, ~0); - - return mmc_resume_host(mmc); + return 0; } #endif /* CONFIG_PM */ diff --git a/drivers/mmc/host/davinci_mmc.c b/drivers/mmc/host/davinci_mmc.c index e9fa87df909..d6153740b77 100644 --- a/drivers/mmc/host/davinci_mmc.c +++ b/drivers/mmc/host/davinci_mmc.c @@ -193,7 +193,6 @@ struct mmc_davinci_host { #define DAVINCI_MMC_DATADIR_READ 1 #define DAVINCI_MMC_DATADIR_WRITE 2 unsigned char data_dir; - unsigned char suspended; /* buffer is used during PIO of one scatterlist segment, and * is updated along with buffer_bytes_left. bytes_left applies @@ -1435,38 +1434,23 @@ static int davinci_mmcsd_suspend(struct device *dev) { struct platform_device *pdev = to_platform_device(dev); struct mmc_davinci_host *host = platform_get_drvdata(pdev); - int ret; - ret = mmc_suspend_host(host->mmc); - if (!ret) { - writel(0, host->base + DAVINCI_MMCIM); - mmc_davinci_reset_ctrl(host, 1); - clk_disable(host->clk); - host->suspended = 1; - } else { - host->suspended = 0; - } + writel(0, host->base + DAVINCI_MMCIM); + mmc_davinci_reset_ctrl(host, 1); + clk_disable(host->clk); - return ret; + return 0; } static int davinci_mmcsd_resume(struct device *dev) { struct platform_device *pdev = to_platform_device(dev); struct mmc_davinci_host *host = platform_get_drvdata(pdev); - int ret; - - if (!host->suspended) - return 0; clk_enable(host->clk); - mmc_davinci_reset_ctrl(host, 0); - ret = mmc_resume_host(host->mmc); - if (!ret) - host->suspended = 0; - return ret; + return 0; } static const struct dev_pm_ops davinci_mmcsd_pm = { diff --git a/drivers/mmc/host/dw_mmc-exynos.c b/drivers/mmc/host/dw_mmc-exynos.c index 6a1fa2110a0..3423c5ed50c 100644 --- a/drivers/mmc/host/dw_mmc-exynos.c +++ b/drivers/mmc/host/dw_mmc-exynos.c @@ -14,8 +14,10 @@ #include <linux/clk.h> #include <linux/mmc/host.h> #include <linux/mmc/dw_mmc.h> +#include <linux/mmc/mmc.h> #include <linux/of.h> #include <linux/of_gpio.h> +#include <linux/slab.h> #include "dw_mmc.h" #include "dw_mmc-pltfm.h" @@ -30,16 +32,39 @@ #define SDMMC_CLKSEL_TIMING(x, y, z) (SDMMC_CLKSEL_CCLK_SAMPLE(x) | \ SDMMC_CLKSEL_CCLK_DRIVE(y) | \ SDMMC_CLKSEL_CCLK_DIVIDER(z)) +#define SDMMC_CLKSEL_WAKEUP_INT BIT(11) #define EXYNOS4210_FIXED_CIU_CLK_DIV 2 #define EXYNOS4412_FIXED_CIU_CLK_DIV 4 +/* Block number in eMMC */ +#define DWMCI_BLOCK_NUM 0xFFFFFFFF + +#define SDMMC_EMMCP_BASE 0x1000 +#define SDMMC_MPSECURITY (SDMMC_EMMCP_BASE + 0x0010) +#define SDMMC_MPSBEGIN0 (SDMMC_EMMCP_BASE + 0x0200) +#define SDMMC_MPSEND0 (SDMMC_EMMCP_BASE + 0x0204) +#define SDMMC_MPSCTRL0 (SDMMC_EMMCP_BASE + 0x020C) + +/* SMU control bits */ +#define DWMCI_MPSCTRL_SECURE_READ_BIT BIT(7) +#define DWMCI_MPSCTRL_SECURE_WRITE_BIT BIT(6) +#define DWMCI_MPSCTRL_NON_SECURE_READ_BIT BIT(5) +#define DWMCI_MPSCTRL_NON_SECURE_WRITE_BIT BIT(4) +#define DWMCI_MPSCTRL_USE_FUSE_KEY BIT(3) +#define DWMCI_MPSCTRL_ECB_MODE BIT(2) +#define DWMCI_MPSCTRL_ENCRYPTION BIT(1) +#define DWMCI_MPSCTRL_VALID BIT(0) + +#define EXYNOS_CCLKIN_MIN 50000000 /* unit: HZ */ + /* Variations in Exynos specific dw-mshc controller */ enum dw_mci_exynos_type { DW_MCI_TYPE_EXYNOS4210, DW_MCI_TYPE_EXYNOS4412, DW_MCI_TYPE_EXYNOS5250, DW_MCI_TYPE_EXYNOS5420, + DW_MCI_TYPE_EXYNOS5420_SMU, }; /* Exynos implementation specific driver private data */ @@ -48,6 +73,7 @@ struct dw_mci_exynos_priv_data { u8 ciu_div; u32 sdr_timing; u32 ddr_timing; + u32 cur_speed; }; static struct dw_mci_exynos_compatible { @@ -66,44 +92,80 @@ static struct dw_mci_exynos_compatible { }, { .compatible = "samsung,exynos5420-dw-mshc", .ctrl_type = DW_MCI_TYPE_EXYNOS5420, + }, { + .compatible = "samsung,exynos5420-dw-mshc-smu", + .ctrl_type = DW_MCI_TYPE_EXYNOS5420_SMU, }, }; static int dw_mci_exynos_priv_init(struct dw_mci *host) { - struct dw_mci_exynos_priv_data *priv; - int idx; - - priv = devm_kzalloc(host->dev, sizeof(*priv), GFP_KERNEL); - if (!priv) { - dev_err(host->dev, "mem alloc failed for private data\n"); - return -ENOMEM; - } + struct dw_mci_exynos_priv_data *priv = host->priv; - for (idx = 0; idx < ARRAY_SIZE(exynos_compat); idx++) { - if (of_device_is_compatible(host->dev->of_node, - exynos_compat[idx].compatible)) - priv->ctrl_type = exynos_compat[idx].ctrl_type; + if (priv->ctrl_type == DW_MCI_TYPE_EXYNOS5420_SMU) { + mci_writel(host, MPSBEGIN0, 0); + mci_writel(host, MPSEND0, DWMCI_BLOCK_NUM); + mci_writel(host, MPSCTRL0, DWMCI_MPSCTRL_SECURE_WRITE_BIT | + DWMCI_MPSCTRL_NON_SECURE_READ_BIT | + DWMCI_MPSCTRL_VALID | + DWMCI_MPSCTRL_NON_SECURE_WRITE_BIT); } - host->priv = priv; return 0; } static int dw_mci_exynos_setup_clock(struct dw_mci *host) { struct dw_mci_exynos_priv_data *priv = host->priv; + unsigned long rate = clk_get_rate(host->ciu_clk); - if (priv->ctrl_type == DW_MCI_TYPE_EXYNOS5250 || - priv->ctrl_type == DW_MCI_TYPE_EXYNOS5420) - host->bus_hz /= (priv->ciu_div + 1); - else if (priv->ctrl_type == DW_MCI_TYPE_EXYNOS4412) - host->bus_hz /= EXYNOS4412_FIXED_CIU_CLK_DIV; - else if (priv->ctrl_type == DW_MCI_TYPE_EXYNOS4210) - host->bus_hz /= EXYNOS4210_FIXED_CIU_CLK_DIV; + host->bus_hz = rate / (priv->ciu_div + 1); + return 0; +} + +#ifdef CONFIG_PM_SLEEP +static int dw_mci_exynos_suspend(struct device *dev) +{ + struct dw_mci *host = dev_get_drvdata(dev); + + return dw_mci_suspend(host); +} + +static int dw_mci_exynos_resume(struct device *dev) +{ + struct dw_mci *host = dev_get_drvdata(dev); + + dw_mci_exynos_priv_init(host); + return dw_mci_resume(host); +} + +/** + * dw_mci_exynos_resume_noirq - Exynos-specific resume code + * + * On exynos5420 there is a silicon errata that will sometimes leave the + * WAKEUP_INT bit in the CLKSEL register asserted. This bit is 1 to indicate + * that it fired and we can clear it by writing a 1 back. Clear it to prevent + * interrupts from going off constantly. + * + * We run this code on all exynos variants because it doesn't hurt. + */ + +static int dw_mci_exynos_resume_noirq(struct device *dev) +{ + struct dw_mci *host = dev_get_drvdata(dev); + u32 clksel; + + clksel = mci_readl(host, CLKSEL); + if (clksel & SDMMC_CLKSEL_WAKEUP_INT) + mci_writel(host, CLKSEL, clksel); return 0; } +#else +#define dw_mci_exynos_suspend NULL +#define dw_mci_exynos_resume NULL +#define dw_mci_exynos_resume_noirq NULL +#endif /* CONFIG_PM_SLEEP */ static void dw_mci_exynos_prepare_command(struct dw_mci *host, u32 *cmdr) { @@ -121,23 +183,68 @@ static void dw_mci_exynos_prepare_command(struct dw_mci *host, u32 *cmdr) static void dw_mci_exynos_set_ios(struct dw_mci *host, struct mmc_ios *ios) { struct dw_mci_exynos_priv_data *priv = host->priv; + unsigned int wanted = ios->clock; + unsigned long actual; + u8 div = priv->ciu_div + 1; - if (ios->timing == MMC_TIMING_UHS_DDR50) + if (ios->timing == MMC_TIMING_UHS_DDR50) { mci_writel(host, CLKSEL, priv->ddr_timing); - else + /* Should be double rate for DDR mode */ + if (ios->bus_width == MMC_BUS_WIDTH_8) + wanted <<= 1; + } else { mci_writel(host, CLKSEL, priv->sdr_timing); + } + + /* Don't care if wanted clock is zero */ + if (!wanted) + return; + + /* Guaranteed minimum frequency for cclkin */ + if (wanted < EXYNOS_CCLKIN_MIN) + wanted = EXYNOS_CCLKIN_MIN; + + if (wanted != priv->cur_speed) { + int ret = clk_set_rate(host->ciu_clk, wanted * div); + if (ret) + dev_warn(host->dev, + "failed to set clk-rate %u error: %d\n", + wanted * div, ret); + actual = clk_get_rate(host->ciu_clk); + host->bus_hz = actual / div; + priv->cur_speed = wanted; + host->current_speed = 0; + } } static int dw_mci_exynos_parse_dt(struct dw_mci *host) { - struct dw_mci_exynos_priv_data *priv = host->priv; + struct dw_mci_exynos_priv_data *priv; struct device_node *np = host->dev->of_node; u32 timing[2]; u32 div = 0; + int idx; int ret; - of_property_read_u32(np, "samsung,dw-mshc-ciu-div", &div); - priv->ciu_div = div; + priv = devm_kzalloc(host->dev, sizeof(*priv), GFP_KERNEL); + if (!priv) { + dev_err(host->dev, "mem alloc failed for private data\n"); + return -ENOMEM; + } + + for (idx = 0; idx < ARRAY_SIZE(exynos_compat); idx++) { + if (of_device_is_compatible(np, exynos_compat[idx].compatible)) + priv->ctrl_type = exynos_compat[idx].ctrl_type; + } + + if (priv->ctrl_type == DW_MCI_TYPE_EXYNOS4412) + priv->ciu_div = EXYNOS4412_FIXED_CIU_CLK_DIV - 1; + else if (priv->ctrl_type == DW_MCI_TYPE_EXYNOS4210) + priv->ciu_div = EXYNOS4210_FIXED_CIU_CLK_DIV - 1; + else { + of_property_read_u32(np, "samsung,dw-mshc-ciu-div", &div); + priv->ciu_div = div; + } ret = of_property_read_u32_array(np, "samsung,dw-mshc-sdr-timing", timing, 2); @@ -152,9 +259,131 @@ static int dw_mci_exynos_parse_dt(struct dw_mci *host) return ret; priv->ddr_timing = SDMMC_CLKSEL_TIMING(timing[0], timing[1], div); + host->priv = priv; return 0; } +static inline u8 dw_mci_exynos_get_clksmpl(struct dw_mci *host) +{ + return SDMMC_CLKSEL_CCLK_SAMPLE(mci_readl(host, CLKSEL)); +} + +static inline void dw_mci_exynos_set_clksmpl(struct dw_mci *host, u8 sample) +{ + u32 clksel; + clksel = mci_readl(host, CLKSEL); + clksel = (clksel & ~0x7) | SDMMC_CLKSEL_CCLK_SAMPLE(sample); + mci_writel(host, CLKSEL, clksel); +} + +static inline u8 dw_mci_exynos_move_next_clksmpl(struct dw_mci *host) +{ + u32 clksel; + u8 sample; + + clksel = mci_readl(host, CLKSEL); + sample = (clksel + 1) & 0x7; + clksel = (clksel & ~0x7) | sample; + mci_writel(host, CLKSEL, clksel); + return sample; +} + +static s8 dw_mci_exynos_get_best_clksmpl(u8 candiates) +{ + const u8 iter = 8; + u8 __c; + s8 i, loc = -1; + + for (i = 0; i < iter; i++) { + __c = ror8(candiates, i); + if ((__c & 0xc7) == 0xc7) { + loc = i; + goto out; + } + } + + for (i = 0; i < iter; i++) { + __c = ror8(candiates, i); + if ((__c & 0x83) == 0x83) { + loc = i; + goto out; + } + } + +out: + return loc; +} + +static int dw_mci_exynos_execute_tuning(struct dw_mci_slot *slot, u32 opcode, + struct dw_mci_tuning_data *tuning_data) +{ + struct dw_mci *host = slot->host; + struct mmc_host *mmc = slot->mmc; + const u8 *blk_pattern = tuning_data->blk_pattern; + u8 *blk_test; + unsigned int blksz = tuning_data->blksz; + u8 start_smpl, smpl, candiates = 0; + s8 found = -1; + int ret = 0; + + blk_test = kmalloc(blksz, GFP_KERNEL); + if (!blk_test) + return -ENOMEM; + + start_smpl = dw_mci_exynos_get_clksmpl(host); + + do { + struct mmc_request mrq = {NULL}; + struct mmc_command cmd = {0}; + struct mmc_command stop = {0}; + struct mmc_data data = {0}; + struct scatterlist sg; + + cmd.opcode = opcode; + cmd.arg = 0; + cmd.flags = MMC_RSP_R1 | MMC_CMD_ADTC; + + stop.opcode = MMC_STOP_TRANSMISSION; + stop.arg = 0; + stop.flags = MMC_RSP_R1B | MMC_CMD_AC; + + data.blksz = blksz; + data.blocks = 1; + data.flags = MMC_DATA_READ; + data.sg = &sg; + data.sg_len = 1; + + sg_init_one(&sg, blk_test, blksz); + mrq.cmd = &cmd; + mrq.stop = &stop; + mrq.data = &data; + host->mrq = &mrq; + + mci_writel(host, TMOUT, ~0); + smpl = dw_mci_exynos_move_next_clksmpl(host); + + mmc_wait_for_req(mmc, &mrq); + + if (!cmd.error && !data.error) { + if (!memcmp(blk_pattern, blk_test, blksz)) + candiates |= (1 << smpl); + } else { + dev_dbg(host->dev, + "Tuning error: cmd.error:%d, data.error:%d\n", + cmd.error, data.error); + } + } while (start_smpl != smpl); + + found = dw_mci_exynos_get_best_clksmpl(candiates); + if (found >= 0) + dw_mci_exynos_set_clksmpl(host, found); + else + ret = -EIO; + + kfree(blk_test); + return ret; +} + /* Common capabilities of Exynos4/Exynos5 SoC */ static unsigned long exynos_dwmmc_caps[4] = { MMC_CAP_UHS_DDR50 | MMC_CAP_1_8V_DDR | @@ -171,6 +400,7 @@ static const struct dw_mci_drv_data exynos_drv_data = { .prepare_command = dw_mci_exynos_prepare_command, .set_ios = dw_mci_exynos_set_ios, .parse_dt = dw_mci_exynos_parse_dt, + .execute_tuning = dw_mci_exynos_execute_tuning, }; static const struct of_device_id dw_mci_exynos_match[] = { @@ -180,6 +410,8 @@ static const struct of_device_id dw_mci_exynos_match[] = { .data = &exynos_drv_data, }, { .compatible = "samsung,exynos5420-dw-mshc", .data = &exynos_drv_data, }, + { .compatible = "samsung,exynos5420-dw-mshc-smu", + .data = &exynos_drv_data, }, {}, }; MODULE_DEVICE_TABLE(of, dw_mci_exynos_match); @@ -194,13 +426,20 @@ static int dw_mci_exynos_probe(struct platform_device *pdev) return dw_mci_pltfm_register(pdev, drv_data); } +const struct dev_pm_ops dw_mci_exynos_pmops = { + SET_SYSTEM_SLEEP_PM_OPS(dw_mci_exynos_suspend, dw_mci_exynos_resume) + .resume_noirq = dw_mci_exynos_resume_noirq, + .thaw_noirq = dw_mci_exynos_resume_noirq, + .restore_noirq = dw_mci_exynos_resume_noirq, +}; + static struct platform_driver dw_mci_exynos_pltfm_driver = { .probe = dw_mci_exynos_probe, .remove = __exit_p(dw_mci_pltfm_remove), .driver = { .name = "dwmmc_exynos", .of_match_table = dw_mci_exynos_match, - .pm = &dw_mci_pltfm_pmops, + .pm = &dw_mci_exynos_pmops, }, }; diff --git a/drivers/mmc/host/dw_mmc-pltfm.c b/drivers/mmc/host/dw_mmc-pltfm.c index 20897529ea5..5c496565529 100644 --- a/drivers/mmc/host/dw_mmc-pltfm.c +++ b/drivers/mmc/host/dw_mmc-pltfm.c @@ -39,7 +39,6 @@ int dw_mci_pltfm_register(struct platform_device *pdev, { struct dw_mci *host; struct resource *regs; - int ret; host = devm_kzalloc(&pdev->dev, sizeof(struct dw_mci), GFP_KERNEL); if (!host) @@ -59,12 +58,6 @@ int dw_mci_pltfm_register(struct platform_device *pdev, if (IS_ERR(host->regs)) return PTR_ERR(host->regs); - if (drv_data && drv_data->init) { - ret = drv_data->init(host); - if (ret) - return ret; - } - platform_set_drvdata(pdev, host); return dw_mci_probe(host); } diff --git a/drivers/mmc/host/dw_mmc-socfpga.c b/drivers/mmc/host/dw_mmc-socfpga.c index 14b5961a851..3e8e53ae330 100644 --- a/drivers/mmc/host/dw_mmc-socfpga.c +++ b/drivers/mmc/host/dw_mmc-socfpga.c @@ -38,21 +38,6 @@ struct dw_mci_socfpga_priv_data { static int dw_mci_socfpga_priv_init(struct dw_mci *host) { - struct dw_mci_socfpga_priv_data *priv; - - priv = devm_kzalloc(host->dev, sizeof(*priv), GFP_KERNEL); - if (!priv) { - dev_err(host->dev, "mem alloc failed for private data\n"); - return -ENOMEM; - } - - priv->sysreg = syscon_regmap_lookup_by_compatible("altr,sys-mgr"); - if (IS_ERR(priv->sysreg)) { - dev_err(host->dev, "regmap for altr,sys-mgr lookup failed.\n"); - return PTR_ERR(priv->sysreg); - } - host->priv = priv; - return 0; } @@ -79,12 +64,24 @@ static void dw_mci_socfpga_prepare_command(struct dw_mci *host, u32 *cmdr) static int dw_mci_socfpga_parse_dt(struct dw_mci *host) { - struct dw_mci_socfpga_priv_data *priv = host->priv; + struct dw_mci_socfpga_priv_data *priv; struct device_node *np = host->dev->of_node; u32 timing[2]; u32 div = 0; int ret; + priv = devm_kzalloc(host->dev, sizeof(*priv), GFP_KERNEL); + if (!priv) { + dev_err(host->dev, "mem alloc failed for private data\n"); + return -ENOMEM; + } + + priv->sysreg = syscon_regmap_lookup_by_compatible("altr,sys-mgr"); + if (IS_ERR(priv->sysreg)) { + dev_err(host->dev, "regmap for altr,sys-mgr lookup failed.\n"); + return PTR_ERR(priv->sysreg); + } + ret = of_property_read_u32(np, "altr,dw-mshc-ciu-div", &div); if (ret) dev_info(host->dev, "No dw-mshc-ciu-div specified, assuming 1"); @@ -96,6 +93,7 @@ static int dw_mci_socfpga_parse_dt(struct dw_mci *host) return ret; priv->hs_timing = SYSMGR_SDMMC_CTRL_SET(timing[0], timing[1]); + host->priv = priv; return 0; } @@ -113,7 +111,7 @@ static const struct of_device_id dw_mci_socfpga_match[] = { }; MODULE_DEVICE_TABLE(of, dw_mci_socfpga_match); -int dw_mci_socfpga_probe(struct platform_device *pdev) +static int dw_mci_socfpga_probe(struct platform_device *pdev) { const struct dw_mci_drv_data *drv_data; const struct of_device_id *match; @@ -128,7 +126,7 @@ static struct platform_driver dw_mci_socfpga_pltfm_driver = { .remove = __exit_p(dw_mci_pltfm_remove), .driver = { .name = "dwmmc_socfpga", - .of_match_table = of_match_ptr(dw_mci_socfpga_match), + .of_match_table = dw_mci_socfpga_match, .pm = &dw_mci_pltfm_pmops, }, }; diff --git a/drivers/mmc/host/dw_mmc.c b/drivers/mmc/host/dw_mmc.c index 018f365e5ae..4bce0deec36 100644 --- a/drivers/mmc/host/dw_mmc.c +++ b/drivers/mmc/host/dw_mmc.c @@ -29,6 +29,7 @@ #include <linux/irq.h> #include <linux/mmc/host.h> #include <linux/mmc/mmc.h> +#include <linux/mmc/sdio.h> #include <linux/mmc/dw_mmc.h> #include <linux/bitops.h> #include <linux/regulator/consumer.h> @@ -50,6 +51,9 @@ #define DW_MCI_RECV_STATUS 2 #define DW_MCI_DMA_THRESHOLD 16 +#define DW_MCI_FREQ_MAX 200000000 /* unit: HZ */ +#define DW_MCI_FREQ_MIN 400000 /* unit: HZ */ + #ifdef CONFIG_MMC_DW_IDMAC #define IDMAC_INT_CLR (SDMMC_IDMAC_INT_AI | SDMMC_IDMAC_INT_NI | \ SDMMC_IDMAC_INT_CES | SDMMC_IDMAC_INT_DU | \ @@ -76,42 +80,39 @@ struct idmac_desc { }; #endif /* CONFIG_MMC_DW_IDMAC */ -/** - * struct dw_mci_slot - MMC slot state - * @mmc: The mmc_host representing this slot. - * @host: The MMC controller this slot is using. - * @quirks: Slot-level quirks (DW_MCI_SLOT_QUIRK_XXX) - * @wp_gpio: If gpio_is_valid() we'll use this to read write protect. - * @ctype: Card type for this slot. - * @mrq: mmc_request currently being processed or waiting to be - * processed, or NULL when the slot is idle. - * @queue_node: List node for placing this node in the @queue list of - * &struct dw_mci. - * @clock: Clock rate configured by set_ios(). Protected by host->lock. - * @flags: Random state bits associated with the slot. - * @id: Number of this slot. - * @last_detect_state: Most recently observed card detect state. - */ -struct dw_mci_slot { - struct mmc_host *mmc; - struct dw_mci *host; - - int quirks; - int wp_gpio; - - u32 ctype; - - struct mmc_request *mrq; - struct list_head queue_node; +static const u8 tuning_blk_pattern_4bit[] = { + 0xff, 0x0f, 0xff, 0x00, 0xff, 0xcc, 0xc3, 0xcc, + 0xc3, 0x3c, 0xcc, 0xff, 0xfe, 0xff, 0xfe, 0xef, + 0xff, 0xdf, 0xff, 0xdd, 0xff, 0xfb, 0xff, 0xfb, + 0xbf, 0xff, 0x7f, 0xff, 0x77, 0xf7, 0xbd, 0xef, + 0xff, 0xf0, 0xff, 0xf0, 0x0f, 0xfc, 0xcc, 0x3c, + 0xcc, 0x33, 0xcc, 0xcf, 0xff, 0xef, 0xff, 0xee, + 0xff, 0xfd, 0xff, 0xfd, 0xdf, 0xff, 0xbf, 0xff, + 0xbb, 0xff, 0xf7, 0xff, 0xf7, 0x7f, 0x7b, 0xde, +}; - unsigned int clock; - unsigned long flags; -#define DW_MMC_CARD_PRESENT 0 -#define DW_MMC_CARD_NEED_INIT 1 - int id; - int last_detect_state; +static const u8 tuning_blk_pattern_8bit[] = { + 0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00, 0x00, + 0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc, 0xcc, + 0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff, 0xff, + 0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee, 0xff, + 0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd, 0xdd, + 0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff, 0xbb, + 0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff, 0xff, + 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee, 0xff, + 0xff, 0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00, + 0x00, 0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc, + 0xcc, 0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff, + 0xff, 0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee, + 0xff, 0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd, + 0xdd, 0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff, + 0xbb, 0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff, + 0xff, 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee, }; +static inline bool dw_mci_fifo_reset(struct dw_mci *host); +static inline bool dw_mci_ctrl_all_reset(struct dw_mci *host); + #if defined(CONFIG_DEBUG_FS) static int dw_mci_req_show(struct seq_file *s, void *v) { @@ -249,10 +250,15 @@ static u32 dw_mci_prepare_command(struct mmc_host *mmc, struct mmc_command *cmd) cmdr = cmd->opcode; - if (cmdr == MMC_STOP_TRANSMISSION) + if (cmd->opcode == MMC_STOP_TRANSMISSION || + cmd->opcode == MMC_GO_IDLE_STATE || + cmd->opcode == MMC_GO_INACTIVE_STATE || + (cmd->opcode == SD_IO_RW_DIRECT && + ((cmd->arg >> 9) & 0x1FFFF) == SDIO_CCCR_ABORT)) cmdr |= SDMMC_CMD_STOP; else - cmdr |= SDMMC_CMD_PRV_DAT_WAIT; + if (cmd->opcode != MMC_SEND_STATUS && cmd->data) + cmdr |= SDMMC_CMD_PRV_DAT_WAIT; if (cmd->flags & MMC_RSP_PRESENT) { /* We expect a response, so set this bit */ @@ -279,6 +285,40 @@ static u32 dw_mci_prepare_command(struct mmc_host *mmc, struct mmc_command *cmd) return cmdr; } +static u32 dw_mci_prep_stop_abort(struct dw_mci *host, struct mmc_command *cmd) +{ + struct mmc_command *stop; + u32 cmdr; + + if (!cmd->data) + return 0; + + stop = &host->stop_abort; + cmdr = cmd->opcode; + memset(stop, 0, sizeof(struct mmc_command)); + + if (cmdr == MMC_READ_SINGLE_BLOCK || + cmdr == MMC_READ_MULTIPLE_BLOCK || + cmdr == MMC_WRITE_BLOCK || + cmdr == MMC_WRITE_MULTIPLE_BLOCK) { + stop->opcode = MMC_STOP_TRANSMISSION; + stop->arg = 0; + stop->flags = MMC_RSP_R1B | MMC_CMD_AC; + } else if (cmdr == SD_IO_RW_EXTENDED) { + stop->opcode = SD_IO_RW_DIRECT; + stop->arg |= (1 << 31) | (0 << 28) | (SDIO_CCCR_ABORT << 9) | + ((cmd->arg >> 28) & 0x7); + stop->flags = MMC_RSP_SPI_R5 | MMC_RSP_R5 | MMC_CMD_AC; + } else { + return 0; + } + + cmdr = stop->opcode | SDMMC_CMD_STOP | + SDMMC_CMD_RESP_CRC | SDMMC_CMD_RESP_EXP; + + return cmdr; +} + static void dw_mci_start_command(struct dw_mci *host, struct mmc_command *cmd, u32 cmd_flags) { @@ -293,9 +333,10 @@ static void dw_mci_start_command(struct dw_mci *host, mci_writel(host, CMD, cmd_flags | SDMMC_CMD_START); } -static void send_stop_cmd(struct dw_mci *host, struct mmc_data *data) +static inline void send_stop_abort(struct dw_mci *host, struct mmc_data *data) { - dw_mci_start_command(host, data->stop, host->stop_cmdr); + struct mmc_command *stop = data->stop ? data->stop : &host->stop_abort; + dw_mci_start_command(host, stop, host->stop_cmdr); } /* DMA interface functions */ @@ -304,10 +345,10 @@ static void dw_mci_stop_dma(struct dw_mci *host) if (host->using_dma) { host->dma_ops->stop(host); host->dma_ops->cleanup(host); - } else { - /* Data transfer was stopped by the interrupt handler */ - set_bit(EVENT_XFER_COMPLETE, &host->pending_events); } + + /* Data transfer was stopped by the interrupt handler */ + set_bit(EVENT_XFER_COMPLETE, &host->pending_events); } static int dw_mci_get_dma_dir(struct mmc_data *data) @@ -331,6 +372,14 @@ static void dw_mci_dma_cleanup(struct dw_mci *host) dw_mci_get_dma_dir(data)); } +static void dw_mci_idmac_reset(struct dw_mci *host) +{ + u32 bmod = mci_readl(host, BMOD); + /* Software reset of DMA */ + bmod |= SDMMC_IDMAC_SWRESET; + mci_writel(host, BMOD, bmod); +} + static void dw_mci_idmac_stop_dma(struct dw_mci *host) { u32 temp; @@ -344,6 +393,7 @@ static void dw_mci_idmac_stop_dma(struct dw_mci *host) /* Stop the IDMAC running */ temp = mci_readl(host, BMOD); temp &= ~(SDMMC_IDMAC_ENABLE | SDMMC_IDMAC_FB); + temp |= SDMMC_IDMAC_SWRESET; mci_writel(host, BMOD, temp); } @@ -435,7 +485,7 @@ static int dw_mci_idmac_init(struct dw_mci *host) p->des3 = host->sg_dma; p->des0 = IDMAC_DES0_ER; - mci_writel(host, BMOD, SDMMC_IDMAC_SWRESET); + dw_mci_idmac_reset(host); /* Mask out interrupts - get Tx & Rx complete only */ mci_writel(host, IDSTS, IDMAC_INT_CLR); @@ -532,6 +582,78 @@ static void dw_mci_post_req(struct mmc_host *mmc, data->host_cookie = 0; } +static void dw_mci_adjust_fifoth(struct dw_mci *host, struct mmc_data *data) +{ +#ifdef CONFIG_MMC_DW_IDMAC + unsigned int blksz = data->blksz; + const u32 mszs[] = {1, 4, 8, 16, 32, 64, 128, 256}; + u32 fifo_width = 1 << host->data_shift; + u32 blksz_depth = blksz / fifo_width, fifoth_val; + u32 msize = 0, rx_wmark = 1, tx_wmark, tx_wmark_invers; + int idx = (sizeof(mszs) / sizeof(mszs[0])) - 1; + + tx_wmark = (host->fifo_depth) / 2; + tx_wmark_invers = host->fifo_depth - tx_wmark; + + /* + * MSIZE is '1', + * if blksz is not a multiple of the FIFO width + */ + if (blksz % fifo_width) { + msize = 0; + rx_wmark = 1; + goto done; + } + + do { + if (!((blksz_depth % mszs[idx]) || + (tx_wmark_invers % mszs[idx]))) { + msize = idx; + rx_wmark = mszs[idx] - 1; + break; + } + } while (--idx > 0); + /* + * If idx is '0', it won't be tried + * Thus, initial values are uesed + */ +done: + fifoth_val = SDMMC_SET_FIFOTH(msize, rx_wmark, tx_wmark); + mci_writel(host, FIFOTH, fifoth_val); +#endif +} + +static void dw_mci_ctrl_rd_thld(struct dw_mci *host, struct mmc_data *data) +{ + unsigned int blksz = data->blksz; + u32 blksz_depth, fifo_depth; + u16 thld_size; + + WARN_ON(!(data->flags & MMC_DATA_READ)); + + if (host->timing != MMC_TIMING_MMC_HS200 && + host->timing != MMC_TIMING_UHS_SDR104) + goto disable; + + blksz_depth = blksz / (1 << host->data_shift); + fifo_depth = host->fifo_depth; + + if (blksz_depth > fifo_depth) + goto disable; + + /* + * If (blksz_depth) >= (fifo_depth >> 1), should be 'thld_size <= blksz' + * If (blksz_depth) < (fifo_depth >> 1), should be thld_size = blksz + * Currently just choose blksz. + */ + thld_size = blksz; + mci_writel(host, CDTHRCTL, SDMMC_SET_RD_THLD(thld_size, 1)); + return; + +disable: + mci_writel(host, CDTHRCTL, SDMMC_SET_RD_THLD(0, 0)); +} + static int dw_mci_submit_data_dma(struct dw_mci *host, struct mmc_data *data) { int sg_len; @@ -556,6 +678,14 @@ static int dw_mci_submit_data_dma(struct dw_mci *host, struct mmc_data *data) (unsigned long)host->sg_cpu, (unsigned long)host->sg_dma, sg_len); + /* + * Decide the MSIZE and RX/TX Watermark. + * If current block size is same with previous size, + * no need to update fifoth. + */ + if (host->prev_blksz != data->blksz) + dw_mci_adjust_fifoth(host, data); + /* Enable the DMA interface */ temp = mci_readl(host, CTRL); temp |= SDMMC_CTRL_DMA_ENABLE; @@ -581,10 +711,12 @@ static void dw_mci_submit_data(struct dw_mci *host, struct mmc_data *data) host->sg = NULL; host->data = data; - if (data->flags & MMC_DATA_READ) + if (data->flags & MMC_DATA_READ) { host->dir_status = DW_MCI_RECV_STATUS; - else + dw_mci_ctrl_rd_thld(host, data); + } else { host->dir_status = DW_MCI_SEND_STATUS; + } if (dw_mci_submit_data_dma(host, data)) { int flags = SG_MITER_ATOMIC; @@ -606,6 +738,21 @@ static void dw_mci_submit_data(struct dw_mci *host, struct mmc_data *data) temp = mci_readl(host, CTRL); temp &= ~SDMMC_CTRL_DMA_ENABLE; mci_writel(host, CTRL, temp); + + /* + * Use the initial fifoth_val for PIO mode. + * If next issued data may be transfered by DMA mode, + * prev_blksz should be invalidated. + */ + mci_writel(host, FIFOTH, host->fifoth_val); + host->prev_blksz = 0; + } else { + /* + * Keep the current block size. + * It will be used to decide whether to update + * fifoth register next time. + */ + host->prev_blksz = data->blksz; } } @@ -632,24 +779,31 @@ static void mci_send_cmd(struct dw_mci_slot *slot, u32 cmd, u32 arg) static void dw_mci_setup_bus(struct dw_mci_slot *slot, bool force_clkinit) { struct dw_mci *host = slot->host; + unsigned int clock = slot->clock; u32 div; u32 clk_en_a; - if (slot->clock != host->current_speed || force_clkinit) { - div = host->bus_hz / slot->clock; - if (host->bus_hz % slot->clock && host->bus_hz > slot->clock) + if (!clock) { + mci_writel(host, CLKENA, 0); + mci_send_cmd(slot, + SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT, 0); + } else if (clock != host->current_speed || force_clkinit) { + div = host->bus_hz / clock; + if (host->bus_hz % clock && host->bus_hz > clock) /* * move the + 1 after the divide to prevent * over-clocking the card. */ div += 1; - div = (host->bus_hz != slot->clock) ? DIV_ROUND_UP(div, 2) : 0; + div = (host->bus_hz != clock) ? DIV_ROUND_UP(div, 2) : 0; - dev_info(&slot->mmc->class_dev, - "Bus speed (slot %d) = %dHz (slot req %dHz, actual %dHZ" - " div = %d)\n", slot->id, host->bus_hz, slot->clock, - div ? ((host->bus_hz / div) >> 1) : host->bus_hz, div); + if ((clock << div) != slot->__clk_old || force_clkinit) + dev_info(&slot->mmc->class_dev, + "Bus speed (slot %d) = %dHz (slot req %dHz, actual %dHZ div = %d)\n", + slot->id, host->bus_hz, clock, + div ? ((host->bus_hz / div) >> 1) : + host->bus_hz, div); /* disable clock */ mci_writel(host, CLKENA, 0); @@ -676,9 +830,12 @@ static void dw_mci_setup_bus(struct dw_mci_slot *slot, bool force_clkinit) mci_send_cmd(slot, SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT, 0); - host->current_speed = slot->clock; + /* keep the clock with reflecting clock dividor */ + slot->__clk_old = clock << div; } + host->current_speed = clock; + /* Set the current slot bus width */ mci_writel(host, CTYPE, (slot->ctype << slot->id)); } @@ -700,7 +857,9 @@ static void __dw_mci_start_request(struct dw_mci *host, host->pending_events = 0; host->completed_events = 0; + host->cmd_status = 0; host->data_status = 0; + host->dir_status = 0; data = cmd->data; if (data) { @@ -724,6 +883,8 @@ static void __dw_mci_start_request(struct dw_mci *host, if (mrq->stop) host->stop_cmdr = dw_mci_prepare_command(slot->mmc, mrq->stop); + else + host->stop_cmdr = dw_mci_prep_stop_abort(host, cmd); } static void dw_mci_start_request(struct dw_mci *host, @@ -806,14 +967,13 @@ static void dw_mci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) regs &= ~((0x1 << slot->id) << 16); mci_writel(slot->host, UHS_REG, regs); + slot->host->timing = ios->timing; - if (ios->clock) { - /* - * Use mirror of ios->clock to prevent race with mmc - * core ios update when finding the minimum. - */ - slot->clock = ios->clock; - } + /* + * Use mirror of ios->clock to prevent race with mmc + * core ios update when finding the minimum. + */ + slot->clock = ios->clock; if (drv_data && drv_data->set_ios) drv_data->set_ios(slot->host, ios); @@ -939,6 +1099,38 @@ static void dw_mci_enable_sdio_irq(struct mmc_host *mmc, int enb) } } +static int dw_mci_execute_tuning(struct mmc_host *mmc, u32 opcode) +{ + struct dw_mci_slot *slot = mmc_priv(mmc); + struct dw_mci *host = slot->host; + const struct dw_mci_drv_data *drv_data = host->drv_data; + struct dw_mci_tuning_data tuning_data; + int err = -ENOSYS; + + if (opcode == MMC_SEND_TUNING_BLOCK_HS200) { + if (mmc->ios.bus_width == MMC_BUS_WIDTH_8) { + tuning_data.blk_pattern = tuning_blk_pattern_8bit; + tuning_data.blksz = sizeof(tuning_blk_pattern_8bit); + } else if (mmc->ios.bus_width == MMC_BUS_WIDTH_4) { + tuning_data.blk_pattern = tuning_blk_pattern_4bit; + tuning_data.blksz = sizeof(tuning_blk_pattern_4bit); + } else { + return -EINVAL; + } + } else if (opcode == MMC_SEND_TUNING_BLOCK) { + tuning_data.blk_pattern = tuning_blk_pattern_4bit; + tuning_data.blksz = sizeof(tuning_blk_pattern_4bit); + } else { + dev_err(host->dev, + "Undefined command(%d) for tuning\n", opcode); + return -EINVAL; + } + + if (drv_data && drv_data->execute_tuning) + err = drv_data->execute_tuning(slot, opcode, &tuning_data); + return err; +} + static const struct mmc_host_ops dw_mci_ops = { .request = dw_mci_request, .pre_req = dw_mci_pre_req, @@ -947,6 +1139,7 @@ static const struct mmc_host_ops dw_mci_ops = { .get_ro = dw_mci_get_ro, .get_cd = dw_mci_get_cd, .enable_sdio_irq = dw_mci_enable_sdio_irq, + .execute_tuning = dw_mci_execute_tuning, }; static void dw_mci_request_end(struct dw_mci *host, struct mmc_request *mrq) @@ -978,7 +1171,7 @@ static void dw_mci_request_end(struct dw_mci *host, struct mmc_request *mrq) spin_lock(&host->lock); } -static void dw_mci_command_complete(struct dw_mci *host, struct mmc_command *cmd) +static int dw_mci_command_complete(struct dw_mci *host, struct mmc_command *cmd) { u32 status = host->cmd_status; @@ -1012,12 +1205,52 @@ static void dw_mci_command_complete(struct dw_mci *host, struct mmc_command *cmd /* newer ip versions need a delay between retries */ if (host->quirks & DW_MCI_QUIRK_RETRY_DELAY) mdelay(20); + } - if (cmd->data) { - dw_mci_stop_dma(host); - host->data = NULL; + return cmd->error; +} + +static int dw_mci_data_complete(struct dw_mci *host, struct mmc_data *data) +{ + u32 status = host->data_status; + + if (status & DW_MCI_DATA_ERROR_FLAGS) { + if (status & SDMMC_INT_DRTO) { + data->error = -ETIMEDOUT; + } else if (status & SDMMC_INT_DCRC) { + data->error = -EILSEQ; + } else if (status & SDMMC_INT_EBE) { + if (host->dir_status == + DW_MCI_SEND_STATUS) { + /* + * No data CRC status was returned. + * The number of bytes transferred + * will be exaggerated in PIO mode. + */ + data->bytes_xfered = 0; + data->error = -ETIMEDOUT; + } else if (host->dir_status == + DW_MCI_RECV_STATUS) { + data->error = -EIO; + } + } else { + /* SDMMC_INT_SBE is included */ + data->error = -EIO; } + + dev_err(host->dev, "data error, status 0x%08x\n", status); + + /* + * After an error, there may be data lingering + * in the FIFO + */ + dw_mci_fifo_reset(host); + } else { + data->bytes_xfered = data->blocks * data->blksz; + data->error = 0; } + + return data->error; } static void dw_mci_tasklet_func(unsigned long priv) @@ -1025,14 +1258,16 @@ static void dw_mci_tasklet_func(unsigned long priv) struct dw_mci *host = (struct dw_mci *)priv; struct mmc_data *data; struct mmc_command *cmd; + struct mmc_request *mrq; enum dw_mci_state state; enum dw_mci_state prev_state; - u32 status, ctrl; + unsigned int err; spin_lock(&host->lock); state = host->state; data = host->data; + mrq = host->mrq; do { prev_state = state; @@ -1049,16 +1284,23 @@ static void dw_mci_tasklet_func(unsigned long priv) cmd = host->cmd; host->cmd = NULL; set_bit(EVENT_CMD_COMPLETE, &host->completed_events); - dw_mci_command_complete(host, cmd); - if (cmd == host->mrq->sbc && !cmd->error) { + err = dw_mci_command_complete(host, cmd); + if (cmd == mrq->sbc && !err) { prev_state = state = STATE_SENDING_CMD; __dw_mci_start_request(host, host->cur_slot, - host->mrq->cmd); + mrq->cmd); goto unlock; } - if (!host->mrq->data || cmd->error) { - dw_mci_request_end(host, host->mrq); + if (cmd->data && err) { + dw_mci_stop_dma(host); + send_stop_abort(host, data); + state = STATE_SENDING_STOP; + break; + } + + if (!cmd->data || err) { + dw_mci_request_end(host, mrq); goto unlock; } @@ -1069,8 +1311,7 @@ static void dw_mci_tasklet_func(unsigned long priv) if (test_and_clear_bit(EVENT_DATA_ERROR, &host->pending_events)) { dw_mci_stop_dma(host); - if (data->stop) - send_stop_cmd(host, data); + send_stop_abort(host, data); state = STATE_DATA_ERROR; break; } @@ -1090,60 +1331,27 @@ static void dw_mci_tasklet_func(unsigned long priv) host->data = NULL; set_bit(EVENT_DATA_COMPLETE, &host->completed_events); - status = host->data_status; - - if (status & DW_MCI_DATA_ERROR_FLAGS) { - if (status & SDMMC_INT_DRTO) { - data->error = -ETIMEDOUT; - } else if (status & SDMMC_INT_DCRC) { - data->error = -EILSEQ; - } else if (status & SDMMC_INT_EBE && - host->dir_status == - DW_MCI_SEND_STATUS) { - /* - * No data CRC status was returned. - * The number of bytes transferred will - * be exaggerated in PIO mode. - */ - data->bytes_xfered = 0; - data->error = -ETIMEDOUT; - } else { - dev_err(host->dev, - "data FIFO error " - "(status=%08x)\n", - status); - data->error = -EIO; - } - /* - * After an error, there may be data lingering - * in the FIFO, so reset it - doing so - * generates a block interrupt, hence setting - * the scatter-gather pointer to NULL. - */ - sg_miter_stop(&host->sg_miter); - host->sg = NULL; - ctrl = mci_readl(host, CTRL); - ctrl |= SDMMC_CTRL_FIFO_RESET; - mci_writel(host, CTRL, ctrl); - } else { - data->bytes_xfered = data->blocks * data->blksz; - data->error = 0; - } + err = dw_mci_data_complete(host, data); - if (!data->stop) { - dw_mci_request_end(host, host->mrq); - goto unlock; - } + if (!err) { + if (!data->stop || mrq->sbc) { + if (mrq->sbc) + data->stop->error = 0; + dw_mci_request_end(host, mrq); + goto unlock; + } - if (host->mrq->sbc && !data->error) { - data->stop->error = 0; - dw_mci_request_end(host, host->mrq); - goto unlock; + /* stop command for open-ended transfer*/ + if (data->stop) + send_stop_abort(host, data); } + /* + * If err has non-zero, + * stop-abort command has been already issued. + */ prev_state = state = STATE_SENDING_STOP; - if (!data->error) - send_stop_cmd(host, data); + /* fall through */ case STATE_SENDING_STOP: @@ -1151,9 +1359,19 @@ static void dw_mci_tasklet_func(unsigned long priv) &host->pending_events)) break; + /* CMD error in data command */ + if (mrq->cmd->error && mrq->data) + dw_mci_fifo_reset(host); + host->cmd = NULL; - dw_mci_command_complete(host, host->mrq->stop); - dw_mci_request_end(host, host->mrq); + host->data = NULL; + + if (mrq->stop) + dw_mci_command_complete(host, mrq->stop); + else + host->cmd_status = 0; + + dw_mci_request_end(host, mrq); goto unlock; case STATE_DATA_ERROR: @@ -1697,7 +1915,6 @@ static void dw_mci_work_routine_card(struct work_struct *work) struct mmc_host *mmc = slot->mmc; struct mmc_request *mrq; int present; - u32 ctrl; present = dw_mci_get_cd(mmc); while (present != slot->last_detect_state) { @@ -1736,11 +1953,10 @@ static void dw_mci_work_routine_card(struct work_struct *work) case STATE_DATA_ERROR: if (mrq->data->error == -EINPROGRESS) mrq->data->error = -ENOMEDIUM; - if (!mrq->stop) - break; /* fall through */ case STATE_SENDING_STOP: - mrq->stop->error = -ENOMEDIUM; + if (mrq->stop) + mrq->stop->error = -ENOMEDIUM; break; } @@ -1763,23 +1979,10 @@ static void dw_mci_work_routine_card(struct work_struct *work) if (present == 0) { clear_bit(DW_MMC_CARD_PRESENT, &slot->flags); - /* - * Clear down the FIFO - doing so generates a - * block interrupt, hence setting the - * scatter-gather pointer to NULL. - */ - sg_miter_stop(&host->sg_miter); - host->sg = NULL; - - ctrl = mci_readl(host, CTRL); - ctrl |= SDMMC_CTRL_FIFO_RESET; - mci_writel(host, CTRL, ctrl); - + /* Clear down the FIFO */ + dw_mci_fifo_reset(host); #ifdef CONFIG_MMC_DW_IDMAC - ctrl = mci_readl(host, BMOD); - /* Software reset of DMA */ - ctrl |= SDMMC_IDMAC_SWRESET; - mci_writel(host, BMOD, ctrl); + dw_mci_idmac_reset(host); #endif } @@ -1901,6 +2104,7 @@ static int dw_mci_init_slot(struct dw_mci *host, unsigned int id) struct dw_mci_slot *slot; const struct dw_mci_drv_data *drv_data = host->drv_data; int ctrl_id, ret; + u32 freq[2]; u8 bus_width; mmc = mmc_alloc_host(sizeof(struct dw_mci_slot), host->dev); @@ -1916,8 +2120,14 @@ static int dw_mci_init_slot(struct dw_mci *host, unsigned int id) slot->quirks = dw_mci_of_get_slot_quirks(host->dev, slot->id); mmc->ops = &dw_mci_ops; - mmc->f_min = DIV_ROUND_UP(host->bus_hz, 510); - mmc->f_max = host->bus_hz; + if (of_property_read_u32_array(host->dev->of_node, + "clock-freq-min-max", freq, 2)) { + mmc->f_min = DW_MCI_FREQ_MIN; + mmc->f_max = DW_MCI_FREQ_MAX; + } else { + mmc->f_min = freq[0]; + mmc->f_max = freq[1]; + } if (host->pdata->get_ocr) mmc->ocr_avail = host->pdata->get_ocr(id); @@ -1964,9 +2174,6 @@ static int dw_mci_init_slot(struct dw_mci *host, unsigned int id) mmc->caps |= MMC_CAP_4_BIT_DATA; } - if (host->pdata->quirks & DW_MCI_QUIRK_HIGHSPEED) - mmc->caps |= MMC_CAP_SD_HIGHSPEED | MMC_CAP_MMC_HIGHSPEED; - if (host->pdata->blk_settings) { mmc->max_segs = host->pdata->blk_settings->max_segs; mmc->max_blk_size = host->pdata->blk_settings->max_blk_size; @@ -2008,12 +2215,6 @@ static int dw_mci_init_slot(struct dw_mci *host, unsigned int id) /* Card initially undetected */ slot->last_detect_state = 0; - /* - * Card may have been plugged in prior to boot so we - * need to run the detect tasklet - */ - queue_work(host->card_workqueue, &host->card_work); - return 0; err_setup_bus: @@ -2074,36 +2275,57 @@ no_dma: return; } -static bool mci_wait_reset(struct device *dev, struct dw_mci *host) +static bool dw_mci_ctrl_reset(struct dw_mci *host, u32 reset) { unsigned long timeout = jiffies + msecs_to_jiffies(500); - unsigned int ctrl; + u32 ctrl; - mci_writel(host, CTRL, (SDMMC_CTRL_RESET | SDMMC_CTRL_FIFO_RESET | - SDMMC_CTRL_DMA_RESET)); + ctrl = mci_readl(host, CTRL); + ctrl |= reset; + mci_writel(host, CTRL, ctrl); /* wait till resets clear */ do { ctrl = mci_readl(host, CTRL); - if (!(ctrl & (SDMMC_CTRL_RESET | SDMMC_CTRL_FIFO_RESET | - SDMMC_CTRL_DMA_RESET))) + if (!(ctrl & reset)) return true; } while (time_before(jiffies, timeout)); - dev_err(dev, "Timeout resetting block (ctrl %#x)\n", ctrl); + dev_err(host->dev, + "Timeout resetting block (ctrl reset %#x)\n", + ctrl & reset); return false; } +static inline bool dw_mci_fifo_reset(struct dw_mci *host) +{ + /* + * Reseting generates a block interrupt, hence setting + * the scatter-gather pointer to NULL. + */ + if (host->sg) { + sg_miter_stop(&host->sg_miter); + host->sg = NULL; + } + + return dw_mci_ctrl_reset(host, SDMMC_CTRL_FIFO_RESET); +} + +static inline bool dw_mci_ctrl_all_reset(struct dw_mci *host) +{ + return dw_mci_ctrl_reset(host, + SDMMC_CTRL_FIFO_RESET | + SDMMC_CTRL_RESET | + SDMMC_CTRL_DMA_RESET); +} + #ifdef CONFIG_OF static struct dw_mci_of_quirks { char *quirk; int id; } of_quirks[] = { { - .quirk = "supports-highspeed", - .id = DW_MCI_QUIRK_HIGHSPEED, - }, { .quirk = "broken-cd", .id = DW_MCI_QUIRK_BROKEN_CARD_DETECTION, }, @@ -2158,6 +2380,15 @@ static struct dw_mci_board *dw_mci_parse_dt(struct dw_mci *host) if (of_find_property(np, "enable-sdio-wakeup", NULL)) pdata->pm_caps |= MMC_PM_WAKE_SDIO_IRQ; + if (of_find_property(np, "supports-highspeed", NULL)) + pdata->caps |= MMC_CAP_SD_HIGHSPEED | MMC_CAP_MMC_HIGHSPEED; + + if (of_find_property(np, "caps2-mmc-hs200-1_8v", NULL)) + pdata->caps2 |= MMC_CAP2_HS200_1_8V_SDR; + + if (of_find_property(np, "caps2-mmc-hs200-1_2v", NULL)) + pdata->caps2 |= MMC_CAP2_HS200_1_2V_SDR; + return pdata; } @@ -2221,6 +2452,15 @@ int dw_mci_probe(struct dw_mci *host) host->bus_hz = clk_get_rate(host->ciu_clk); } + if (drv_data && drv_data->init) { + ret = drv_data->init(host); + if (ret) { + dev_err(host->dev, + "implementation specific init failed\n"); + goto err_clk_ciu; + } + } + if (drv_data && drv_data->setup_clock) { ret = drv_data->setup_clock(host); if (ret) { @@ -2287,7 +2527,7 @@ int dw_mci_probe(struct dw_mci *host) } /* Reset all blocks */ - if (!mci_wait_reset(host->dev, host)) + if (!dw_mci_ctrl_all_reset(host)) return -ENODEV; host->dma_ops = host->pdata->dma_ops; @@ -2317,8 +2557,8 @@ int dw_mci_probe(struct dw_mci *host) fifo_size = host->pdata->fifo_depth; } host->fifo_depth = fifo_size; - host->fifoth_val = ((0x2 << 28) | ((fifo_size/2 - 1) << 16) | - ((fifo_size/2) << 0)); + host->fifoth_val = + SDMMC_SET_FIFOTH(0x2, fifo_size / 2 - 1, fifo_size / 2); mci_writel(host, FIFOTH, host->fifoth_val); /* disable clock to CIU */ @@ -2456,23 +2696,6 @@ EXPORT_SYMBOL(dw_mci_remove); */ int dw_mci_suspend(struct dw_mci *host) { - int i, ret = 0; - - for (i = 0; i < host->num_slots; i++) { - struct dw_mci_slot *slot = host->slot[i]; - if (!slot) - continue; - ret = mmc_suspend_host(slot->mmc); - if (ret < 0) { - while (--i >= 0) { - slot = host->slot[i]; - if (slot) - mmc_resume_host(host->slot[i]->mmc); - } - return ret; - } - } - if (host->vmmc) regulator_disable(host->vmmc); @@ -2493,7 +2716,7 @@ int dw_mci_resume(struct dw_mci *host) } } - if (!mci_wait_reset(host->dev, host)) { + if (!dw_mci_ctrl_all_reset(host)) { ret = -ENODEV; return ret; } @@ -2501,8 +2724,15 @@ int dw_mci_resume(struct dw_mci *host) if (host->use_dma && host->dma_ops->init) host->dma_ops->init(host); - /* Restore the old value at FIFOTH register */ + /* + * Restore the initial value at FIFOTH register + * And Invalidate the prev_blksz with zero + */ mci_writel(host, FIFOTH, host->fifoth_val); + host->prev_blksz = 0; + + /* Put in max timeout */ + mci_writel(host, TMOUT, 0xFFFFFFFF); mci_writel(host, RINTSTS, 0xFFFFFFFF); mci_writel(host, INTMASK, SDMMC_INT_CMD_DONE | SDMMC_INT_DATA_OVER | @@ -2518,10 +2748,6 @@ int dw_mci_resume(struct dw_mci *host) dw_mci_set_ios(slot->mmc, &slot->mmc->ios); dw_mci_setup_bus(slot, true); } - - ret = mmc_resume_host(host->slot[i]->mmc); - if (ret < 0) - return ret; } return 0; } diff --git a/drivers/mmc/host/dw_mmc.h b/drivers/mmc/host/dw_mmc.h index 81b29941c5b..6bf24ab917e 100644 --- a/drivers/mmc/host/dw_mmc.h +++ b/drivers/mmc/host/dw_mmc.h @@ -53,6 +53,7 @@ #define SDMMC_IDINTEN 0x090 #define SDMMC_DSCADDR 0x094 #define SDMMC_BUFADDR 0x098 +#define SDMMC_CDTHRCTL 0x100 #define SDMMC_DATA(x) (x) /* @@ -128,6 +129,10 @@ #define SDMMC_CMD_INDX(n) ((n) & 0x1F) /* Status register defines */ #define SDMMC_GET_FCNT(x) (((x)>>17) & 0x1FFF) +/* FIFOTH register defines */ +#define SDMMC_SET_FIFOTH(m, r, t) (((m) & 0x7) << 28 | \ + ((r) & 0xFFF) << 16 | \ + ((t) & 0xFFF)) /* Internal DMAC interrupt defines */ #define SDMMC_IDMAC_INT_AI BIT(9) #define SDMMC_IDMAC_INT_NI BIT(8) @@ -142,6 +147,8 @@ #define SDMMC_IDMAC_SWRESET BIT(0) /* Version ID register define */ #define SDMMC_GET_VERID(x) ((x) & 0xFFFF) +/* Card read threshold */ +#define SDMMC_SET_RD_THLD(v, x) (((v) & 0x1FFF) << 16 | (x)) /* Register access macros */ #define mci_readl(dev, reg) \ @@ -184,6 +191,52 @@ extern int dw_mci_resume(struct dw_mci *host); #endif /** + * struct dw_mci_slot - MMC slot state + * @mmc: The mmc_host representing this slot. + * @host: The MMC controller this slot is using. + * @quirks: Slot-level quirks (DW_MCI_SLOT_QUIRK_XXX) + * @wp_gpio: If gpio_is_valid() we'll use this to read write protect. + * @ctype: Card type for this slot. + * @mrq: mmc_request currently being processed or waiting to be + * processed, or NULL when the slot is idle. + * @queue_node: List node for placing this node in the @queue list of + * &struct dw_mci. + * @clock: Clock rate configured by set_ios(). Protected by host->lock. + * @__clk_old: The last updated clock with reflecting clock divider. + * Keeping track of this helps us to avoid spamming the console + * with CONFIG_MMC_CLKGATE. + * @flags: Random state bits associated with the slot. + * @id: Number of this slot. + * @last_detect_state: Most recently observed card detect state. + */ +struct dw_mci_slot { + struct mmc_host *mmc; + struct dw_mci *host; + + int quirks; + int wp_gpio; + + u32 ctype; + + struct mmc_request *mrq; + struct list_head queue_node; + + unsigned int clock; + unsigned int __clk_old; + + unsigned long flags; +#define DW_MMC_CARD_PRESENT 0 +#define DW_MMC_CARD_NEED_INIT 1 + int id; + int last_detect_state; +}; + +struct dw_mci_tuning_data { + const u8 *blk_pattern; + unsigned int blksz; +}; + +/** * dw_mci driver data - dw-mshc implementation specific driver data. * @caps: mmc subsystem specified capabilities of the controller(s). * @init: early implementation specific initialization. @@ -203,5 +256,7 @@ struct dw_mci_drv_data { void (*prepare_command)(struct dw_mci *host, u32 *cmdr); void (*set_ios)(struct dw_mci *host, struct mmc_ios *ios); int (*parse_dt)(struct dw_mci *host); + int (*execute_tuning)(struct dw_mci_slot *slot, u32 opcode, + struct dw_mci_tuning_data *tuning_data); }; #endif /* _DW_MMC_H_ */ diff --git a/drivers/mmc/host/jz4740_mmc.c b/drivers/mmc/host/jz4740_mmc.c index 66516339e3a..de2139cf344 100644 --- a/drivers/mmc/host/jz4740_mmc.c +++ b/drivers/mmc/host/jz4740_mmc.c @@ -880,8 +880,6 @@ static int jz4740_mmc_suspend(struct device *dev) { struct jz4740_mmc_host *host = dev_get_drvdata(dev); - mmc_suspend_host(host->mmc); - jz_gpio_bulk_suspend(jz4740_mmc_pins, jz4740_mmc_num_pins(host)); return 0; @@ -893,8 +891,6 @@ static int jz4740_mmc_resume(struct device *dev) jz_gpio_bulk_resume(jz4740_mmc_pins, jz4740_mmc_num_pins(host)); - mmc_resume_host(host->mmc); - return 0; } diff --git a/drivers/mmc/host/mmci.c b/drivers/mmc/host/mmci.c index d135c76c485..f32057972dd 100644 --- a/drivers/mmc/host/mmci.c +++ b/drivers/mmc/host/mmci.c @@ -1730,37 +1730,28 @@ static int mmci_suspend(struct device *dev) { struct amba_device *adev = to_amba_device(dev); struct mmc_host *mmc = amba_get_drvdata(adev); - int ret = 0; if (mmc) { struct mmci_host *host = mmc_priv(mmc); - - ret = mmc_suspend_host(mmc); - if (ret == 0) { - pm_runtime_get_sync(dev); - writel(0, host->base + MMCIMASK0); - } + pm_runtime_get_sync(dev); + writel(0, host->base + MMCIMASK0); } - return ret; + return 0; } static int mmci_resume(struct device *dev) { struct amba_device *adev = to_amba_device(dev); struct mmc_host *mmc = amba_get_drvdata(adev); - int ret = 0; if (mmc) { struct mmci_host *host = mmc_priv(mmc); - writel(MCI_IRQENABLE, host->base + MMCIMASK0); pm_runtime_put(dev); - - ret = mmc_resume_host(mmc); } - return ret; + return 0; } #endif diff --git a/drivers/mmc/host/msm_sdcc.c b/drivers/mmc/host/msm_sdcc.c index b900de4e7e9..9405ecdaf6c 100644 --- a/drivers/mmc/host/msm_sdcc.c +++ b/drivers/mmc/host/msm_sdcc.c @@ -1416,28 +1416,10 @@ ioremap_free: } #ifdef CONFIG_PM -#ifdef CONFIG_MMC_MSM7X00A_RESUME_IN_WQ -static void -do_resume_work(struct work_struct *work) -{ - struct msmsdcc_host *host = - container_of(work, struct msmsdcc_host, resume_task); - struct mmc_host *mmc = host->mmc; - - if (mmc) { - mmc_resume_host(mmc); - if (host->stat_irq) - enable_irq(host->stat_irq); - } -} -#endif - - static int msmsdcc_suspend(struct platform_device *dev, pm_message_t state) { struct mmc_host *mmc = mmc_get_drvdata(dev); - int rc = 0; if (mmc) { struct msmsdcc_host *host = mmc_priv(mmc); @@ -1445,14 +1427,11 @@ msmsdcc_suspend(struct platform_device *dev, pm_message_t state) if (host->stat_irq) disable_irq(host->stat_irq); - if (mmc->card && mmc->card->type != MMC_TYPE_SDIO) - rc = mmc_suspend_host(mmc); - if (!rc) - msmsdcc_writel(host, 0, MMCIMASK0); + msmsdcc_writel(host, 0, MMCIMASK0); if (host->clks_on) msmsdcc_disable_clocks(host, 0); } - return rc; + return 0; } static int @@ -1467,8 +1446,6 @@ msmsdcc_resume(struct platform_device *dev) msmsdcc_writel(host, host->saved_irq0mask, MMCIMASK0); - if (mmc->card && mmc->card->type != MMC_TYPE_SDIO) - mmc_resume_host(mmc); if (host->stat_irq) enable_irq(host->stat_irq); #if BUSCLK_PWRSAVE diff --git a/drivers/mmc/host/mvsdio.c b/drivers/mmc/host/mvsdio.c index deecee08c28..45aa2206741 100644 --- a/drivers/mmc/host/mvsdio.c +++ b/drivers/mmc/host/mvsdio.c @@ -775,9 +775,9 @@ static int mvsd_probe(struct platform_device *pdev) spin_lock_init(&host->lock); - host->base = devm_request_and_ioremap(&pdev->dev, r); - if (!host->base) { - ret = -ENOMEM; + host->base = devm_ioremap_resource(&pdev->dev, r); + if (IS_ERR(host->base)) { + ret = PTR_ERR(host->base); goto out; } @@ -838,33 +838,6 @@ static int mvsd_remove(struct platform_device *pdev) return 0; } -#ifdef CONFIG_PM -static int mvsd_suspend(struct platform_device *dev, pm_message_t state) -{ - struct mmc_host *mmc = platform_get_drvdata(dev); - int ret = 0; - - if (mmc) - ret = mmc_suspend_host(mmc); - - return ret; -} - -static int mvsd_resume(struct platform_device *dev) -{ - struct mmc_host *mmc = platform_get_drvdata(dev); - int ret = 0; - - if (mmc) - ret = mmc_resume_host(mmc); - - return ret; -} -#else -#define mvsd_suspend NULL -#define mvsd_resume NULL -#endif - static const struct of_device_id mvsdio_dt_ids[] = { { .compatible = "marvell,orion-sdio" }, { /* sentinel */ } @@ -874,8 +847,6 @@ MODULE_DEVICE_TABLE(of, mvsdio_dt_ids); static struct platform_driver mvsd_driver = { .probe = mvsd_probe, .remove = mvsd_remove, - .suspend = mvsd_suspend, - .resume = mvsd_resume, .driver = { .name = DRIVER_NAME, .of_match_table = mvsdio_dt_ids, diff --git a/drivers/mmc/host/mxcmmc.c b/drivers/mmc/host/mxcmmc.c index c174c6a0d22..f7199c83f5c 100644 --- a/drivers/mmc/host/mxcmmc.c +++ b/drivers/mmc/host/mxcmmc.c @@ -1250,28 +1250,20 @@ static int mxcmci_suspend(struct device *dev) { struct mmc_host *mmc = dev_get_drvdata(dev); struct mxcmci_host *host = mmc_priv(mmc); - int ret = 0; - if (mmc) - ret = mmc_suspend_host(mmc); clk_disable_unprepare(host->clk_per); clk_disable_unprepare(host->clk_ipg); - - return ret; + return 0; } static int mxcmci_resume(struct device *dev) { struct mmc_host *mmc = dev_get_drvdata(dev); struct mxcmci_host *host = mmc_priv(mmc); - int ret = 0; clk_prepare_enable(host->clk_per); clk_prepare_enable(host->clk_ipg); - if (mmc) - ret = mmc_resume_host(mmc); - - return ret; + return 0; } static const struct dev_pm_ops mxcmci_pm_ops = { diff --git a/drivers/mmc/host/mxs-mmc.c b/drivers/mmc/host/mxs-mmc.c index e1fa3ef735e..50fc9df791b 100644 --- a/drivers/mmc/host/mxs-mmc.c +++ b/drivers/mmc/host/mxs-mmc.c @@ -724,13 +724,9 @@ static int mxs_mmc_suspend(struct device *dev) struct mmc_host *mmc = dev_get_drvdata(dev); struct mxs_mmc_host *host = mmc_priv(mmc); struct mxs_ssp *ssp = &host->ssp; - int ret = 0; - - ret = mmc_suspend_host(mmc); clk_disable_unprepare(ssp->clk); - - return ret; + return 0; } static int mxs_mmc_resume(struct device *dev) @@ -738,13 +734,9 @@ static int mxs_mmc_resume(struct device *dev) struct mmc_host *mmc = dev_get_drvdata(dev); struct mxs_mmc_host *host = mmc_priv(mmc); struct mxs_ssp *ssp = &host->ssp; - int ret = 0; clk_prepare_enable(ssp->clk); - - ret = mmc_resume_host(mmc); - - return ret; + return 0; } static const struct dev_pm_ops mxs_mmc_pm_ops = { diff --git a/drivers/mmc/host/omap.c b/drivers/mmc/host/omap.c index b94f38ec2a8..0b10a9030f4 100644 --- a/drivers/mmc/host/omap.c +++ b/drivers/mmc/host/omap.c @@ -128,7 +128,6 @@ struct mmc_omap_slot { struct mmc_omap_host { int initialized; - int suspended; struct mmc_request * mrq; struct mmc_command * cmd; struct mmc_data * data; @@ -1513,61 +1512,9 @@ static int mmc_omap_remove(struct platform_device *pdev) return 0; } -#ifdef CONFIG_PM -static int mmc_omap_suspend(struct platform_device *pdev, pm_message_t mesg) -{ - int i, ret = 0; - struct mmc_omap_host *host = platform_get_drvdata(pdev); - - if (host == NULL || host->suspended) - return 0; - - for (i = 0; i < host->nr_slots; i++) { - struct mmc_omap_slot *slot; - - slot = host->slots[i]; - ret = mmc_suspend_host(slot->mmc); - if (ret < 0) { - while (--i >= 0) { - slot = host->slots[i]; - mmc_resume_host(slot->mmc); - } - return ret; - } - } - host->suspended = 1; - return 0; -} - -static int mmc_omap_resume(struct platform_device *pdev) -{ - int i, ret = 0; - struct mmc_omap_host *host = platform_get_drvdata(pdev); - - if (host == NULL || !host->suspended) - return 0; - - for (i = 0; i < host->nr_slots; i++) { - struct mmc_omap_slot *slot; - slot = host->slots[i]; - ret = mmc_resume_host(slot->mmc); - if (ret < 0) - return ret; - - host->suspended = 0; - } - return 0; -} -#else -#define mmc_omap_suspend NULL -#define mmc_omap_resume NULL -#endif - static struct platform_driver mmc_omap_driver = { .probe = mmc_omap_probe, .remove = mmc_omap_remove, - .suspend = mmc_omap_suspend, - .resume = mmc_omap_resume, .driver = { .name = DRIVER_NAME, .owner = THIS_MODULE, diff --git a/drivers/mmc/host/omap_hsmmc.c b/drivers/mmc/host/omap_hsmmc.c index 6ac63df645c..dbd32ad3b74 100644 --- a/drivers/mmc/host/omap_hsmmc.c +++ b/drivers/mmc/host/omap_hsmmc.c @@ -75,6 +75,7 @@ #define ICE 0x1 #define ICS 0x2 #define CEN (1 << 2) +#define CLKD_MAX 0x3FF /* max clock divisor: 1023 */ #define CLKD_MASK 0x0000FFC0 #define CLKD_SHIFT 6 #define DTO_MASK 0x000F0000 @@ -119,7 +120,8 @@ BRR_EN | BWR_EN | TC_EN | CC_EN) #define MMC_AUTOSUSPEND_DELAY 100 -#define MMC_TIMEOUT_MS 20 +#define MMC_TIMEOUT_MS 20 /* 20 mSec */ +#define MMC_TIMEOUT_US 20000 /* 20000 micro Sec */ #define OMAP_MMC_MIN_CLOCK 400000 #define OMAP_MMC_MAX_CLOCK 52000000 #define DRIVER_NAME "omap_hsmmc" @@ -171,6 +173,10 @@ struct omap_hsmmc_host { unsigned char bus_mode; unsigned char power_mode; int suspended; + u32 con; + u32 hctl; + u32 sysctl; + u32 capa; int irq; int use_dma, dma_ch; struct dma_chan *tx_chan; @@ -183,7 +189,6 @@ struct omap_hsmmc_host { int use_reg; int req_in_progress; struct omap_hsmmc_next next_data; - struct omap_mmc_platform_data *pdata; }; @@ -493,8 +498,8 @@ static u16 calc_divisor(struct omap_hsmmc_host *host, struct mmc_ios *ios) if (ios->clock) { dsor = DIV_ROUND_UP(clk_get_rate(host->fclk), ios->clock); - if (dsor > 250) - dsor = 250; + if (dsor > CLKD_MAX) + dsor = CLKD_MAX; } return dsor; @@ -597,25 +602,20 @@ static void omap_hsmmc_set_bus_mode(struct omap_hsmmc_host *host) static int omap_hsmmc_context_restore(struct omap_hsmmc_host *host) { struct mmc_ios *ios = &host->mmc->ios; - struct omap_mmc_platform_data *pdata = host->pdata; - int context_loss = 0; u32 hctl, capa; unsigned long timeout; - if (pdata->get_context_loss_count) { - context_loss = pdata->get_context_loss_count(host->dev); - if (context_loss < 0) - return 1; - } - - dev_dbg(mmc_dev(host->mmc), "context was %slost\n", - context_loss == host->context_loss ? "not " : ""); - if (host->context_loss == context_loss) - return 1; - if (!OMAP_HSMMC_READ(host->base, SYSSTATUS) & RESETDONE) return 1; + if (host->con == OMAP_HSMMC_READ(host->base, CON) && + host->hctl == OMAP_HSMMC_READ(host->base, HCTL) && + host->sysctl == OMAP_HSMMC_READ(host->base, SYSCTL) && + host->capa == OMAP_HSMMC_READ(host->base, CAPA)) + return 0; + + host->context_loss++; + if (host->pdata->controller_flags & OMAP_HSMMC_SUPPORTS_DUAL_VOLT) { if (host->power_mode != MMC_POWER_OFF && (1 << ios->vdd) <= MMC_VDD_23_24) @@ -655,9 +655,8 @@ static int omap_hsmmc_context_restore(struct omap_hsmmc_host *host) omap_hsmmc_set_bus_mode(host); out: - host->context_loss = context_loss; - - dev_dbg(mmc_dev(host->mmc), "context is restored\n"); + dev_dbg(mmc_dev(host->mmc), "context is restored: restore count %d\n", + host->context_loss); return 0; } @@ -666,15 +665,10 @@ out: */ static void omap_hsmmc_context_save(struct omap_hsmmc_host *host) { - struct omap_mmc_platform_data *pdata = host->pdata; - int context_loss; - - if (pdata->get_context_loss_count) { - context_loss = pdata->get_context_loss_count(host->dev); - if (context_loss < 0) - return; - host->context_loss = context_loss; - } + host->con = OMAP_HSMMC_READ(host->base, CON); + host->hctl = OMAP_HSMMC_READ(host->base, HCTL); + host->sysctl = OMAP_HSMMC_READ(host->base, SYSCTL); + host->capa = OMAP_HSMMC_READ(host->base, CAPA); } #else @@ -975,8 +969,7 @@ static inline void omap_hsmmc_reset_controller_fsm(struct omap_hsmmc_host *host, unsigned long bit) { unsigned long i = 0; - unsigned long limit = (loops_per_jiffy * - msecs_to_jiffies(MMC_TIMEOUT_MS)); + unsigned long limit = MMC_TIMEOUT_US; OMAP_HSMMC_WRITE(host->base, SYSCTL, OMAP_HSMMC_READ(host->base, SYSCTL) | bit); @@ -988,13 +981,13 @@ static inline void omap_hsmmc_reset_controller_fsm(struct omap_hsmmc_host *host, if (mmc_slot(host).features & HSMMC_HAS_UPDATED_RESET) { while ((!(OMAP_HSMMC_READ(host->base, SYSCTL) & bit)) && (i++ < limit)) - cpu_relax(); + udelay(1); } i = 0; while ((OMAP_HSMMC_READ(host->base, SYSCTL) & bit) && (i++ < limit)) - cpu_relax(); + udelay(1); if (OMAP_HSMMC_READ(host->base, SYSCTL) & bit) dev_err(mmc_dev(host->mmc), @@ -1178,9 +1171,6 @@ static irqreturn_t omap_hsmmc_detect(int irq, void *dev_id) struct omap_mmc_slot_data *slot = &mmc_slot(host); int carddetect; - if (host->suspended) - return IRQ_HANDLED; - sysfs_notify(&host->mmc->class_dev.kobj, NULL, "cover_switch"); if (slot->card_detect) @@ -1635,18 +1625,9 @@ static int omap_hsmmc_regs_show(struct seq_file *s, void *data) { struct mmc_host *mmc = s->private; struct omap_hsmmc_host *host = mmc_priv(mmc); - int context_loss = 0; - - if (host->pdata->get_context_loss_count) - context_loss = host->pdata->get_context_loss_count(host->dev); - seq_printf(s, "mmc%d:\n ctx_loss:\t%d:%d\n\nregs:\n", - mmc->index, host->context_loss, context_loss); - - if (host->suspended) { - seq_printf(s, "host suspended, can't read registers\n"); - return 0; - } + seq_printf(s, "mmc%d:\n ctx_loss:\t%d\n\nregs:\n", + mmc->index, host->context_loss); pm_runtime_get_sync(host->dev); @@ -1838,13 +1819,6 @@ static int omap_hsmmc_probe(struct platform_device *pdev) mmc->ops = &omap_hsmmc_ops; - /* - * If regulator_disable can only put vcc_aux to sleep then there is - * no off state. - */ - if (mmc_slot(host).vcc_aux_disable_is_sleep) - mmc_slot(host).no_off = 1; - mmc->f_min = OMAP_MMC_MIN_CLOCK; if (pdata->max_freq > 0) @@ -1874,7 +1848,7 @@ static int omap_hsmmc_probe(struct platform_device *pdev) omap_hsmmc_context_save(host); /* This can be removed once we support PBIAS with DT */ - if (host->dev->of_node && host->mapbase == 0x4809c000) + if (host->dev->of_node && res->start == 0x4809c000) host->pbias_disable = 1; host->dbclk = clk_get(&pdev->dev, "mmchsdb_fck"); @@ -2119,23 +2093,12 @@ static void omap_hsmmc_complete(struct device *dev) static int omap_hsmmc_suspend(struct device *dev) { - int ret = 0; struct omap_hsmmc_host *host = dev_get_drvdata(dev); if (!host) return 0; - if (host && host->suspended) - return 0; - pm_runtime_get_sync(host->dev); - host->suspended = 1; - ret = mmc_suspend_host(host->mmc); - - if (ret) { - host->suspended = 0; - goto err; - } if (!(host->mmc->pm_flags & MMC_PM_KEEP_POWER)) { omap_hsmmc_disable_irq(host); @@ -2145,23 +2108,19 @@ static int omap_hsmmc_suspend(struct device *dev) if (host->dbclk) clk_disable_unprepare(host->dbclk); -err: + pm_runtime_put_sync(host->dev); - return ret; + return 0; } /* Routine to resume the MMC device */ static int omap_hsmmc_resume(struct device *dev) { - int ret = 0; struct omap_hsmmc_host *host = dev_get_drvdata(dev); if (!host) return 0; - if (host && !host->suspended) - return 0; - pm_runtime_get_sync(host->dev); if (host->dbclk) @@ -2172,16 +2131,9 @@ static int omap_hsmmc_resume(struct device *dev) omap_hsmmc_protect_card(host); - /* Notify the core to resume the host */ - ret = mmc_resume_host(host->mmc); - if (ret == 0) - host->suspended = 0; - pm_runtime_mark_last_busy(host->dev); pm_runtime_put_autosuspend(host->dev); - - return ret; - + return 0; } #else diff --git a/drivers/mmc/host/pxamci.c b/drivers/mmc/host/pxamci.c index 1956a3df7cf..32fe11323f3 100644 --- a/drivers/mmc/host/pxamci.c +++ b/drivers/mmc/host/pxamci.c @@ -880,35 +880,6 @@ static int pxamci_remove(struct platform_device *pdev) return 0; } -#ifdef CONFIG_PM -static int pxamci_suspend(struct device *dev) -{ - struct mmc_host *mmc = dev_get_drvdata(dev); - int ret = 0; - - if (mmc) - ret = mmc_suspend_host(mmc); - - return ret; -} - -static int pxamci_resume(struct device *dev) -{ - struct mmc_host *mmc = dev_get_drvdata(dev); - int ret = 0; - - if (mmc) - ret = mmc_resume_host(mmc); - - return ret; -} - -static const struct dev_pm_ops pxamci_pm_ops = { - .suspend = pxamci_suspend, - .resume = pxamci_resume, -}; -#endif - static struct platform_driver pxamci_driver = { .probe = pxamci_probe, .remove = pxamci_remove, @@ -916,9 +887,6 @@ static struct platform_driver pxamci_driver = { .name = DRIVER_NAME, .owner = THIS_MODULE, .of_match_table = of_match_ptr(pxa_mmc_dt_ids), -#ifdef CONFIG_PM - .pm = &pxamci_pm_ops, -#endif }, }; diff --git a/drivers/mmc/host/rtsx_pci_sdmmc.c b/drivers/mmc/host/rtsx_pci_sdmmc.c index 375a880e0c5..c46feda07d5 100644 --- a/drivers/mmc/host/rtsx_pci_sdmmc.c +++ b/drivers/mmc/host/rtsx_pci_sdmmc.c @@ -364,7 +364,7 @@ static int sd_rw_multi(struct realtek_pci_sdmmc *host, struct mmc_request *mrq) struct mmc_host *mmc = host->mmc; struct mmc_card *card = mmc->card; struct mmc_data *data = mrq->data; - int uhs = mmc_sd_card_uhs(card); + int uhs = mmc_card_uhs(card); int read = (data->flags & MMC_DATA_READ) ? 1 : 0; u8 cfg2, trans_mode; int err; @@ -1197,37 +1197,6 @@ static const struct mmc_host_ops realtek_pci_sdmmc_ops = { .execute_tuning = sdmmc_execute_tuning, }; -#ifdef CONFIG_PM -static int rtsx_pci_sdmmc_suspend(struct platform_device *pdev, - pm_message_t state) -{ - struct realtek_pci_sdmmc *host = platform_get_drvdata(pdev); - struct mmc_host *mmc = host->mmc; - int err; - - dev_dbg(sdmmc_dev(host), "--> %s\n", __func__); - - err = mmc_suspend_host(mmc); - if (err) - return err; - - return 0; -} - -static int rtsx_pci_sdmmc_resume(struct platform_device *pdev) -{ - struct realtek_pci_sdmmc *host = platform_get_drvdata(pdev); - struct mmc_host *mmc = host->mmc; - - dev_dbg(sdmmc_dev(host), "--> %s\n", __func__); - - return mmc_resume_host(mmc); -} -#else /* CONFIG_PM */ -#define rtsx_pci_sdmmc_suspend NULL -#define rtsx_pci_sdmmc_resume NULL -#endif /* CONFIG_PM */ - static void init_extra_caps(struct realtek_pci_sdmmc *host) { struct mmc_host *mmc = host->mmc; @@ -1367,8 +1336,6 @@ static struct platform_driver rtsx_pci_sdmmc_driver = { .probe = rtsx_pci_sdmmc_drv_probe, .remove = rtsx_pci_sdmmc_drv_remove, .id_table = rtsx_pci_sdmmc_ids, - .suspend = rtsx_pci_sdmmc_suspend, - .resume = rtsx_pci_sdmmc_resume, .driver = { .owner = THIS_MODULE, .name = DRV_NAME_RTSX_PCI_SDMMC, diff --git a/drivers/mmc/host/s3cmci.c b/drivers/mmc/host/s3cmci.c index 8d6794cdf89..2fce5ea5eb3 100644 --- a/drivers/mmc/host/s3cmci.c +++ b/drivers/mmc/host/s3cmci.c @@ -1949,39 +1949,10 @@ static struct platform_device_id s3cmci_driver_ids[] = { MODULE_DEVICE_TABLE(platform, s3cmci_driver_ids); - -#ifdef CONFIG_PM - -static int s3cmci_suspend(struct device *dev) -{ - struct mmc_host *mmc = platform_get_drvdata(to_platform_device(dev)); - - return mmc_suspend_host(mmc); -} - -static int s3cmci_resume(struct device *dev) -{ - struct mmc_host *mmc = platform_get_drvdata(to_platform_device(dev)); - - return mmc_resume_host(mmc); -} - -static const struct dev_pm_ops s3cmci_pm = { - .suspend = s3cmci_suspend, - .resume = s3cmci_resume, -}; - -#define s3cmci_pm_ops &s3cmci_pm -#else /* CONFIG_PM */ -#define s3cmci_pm_ops NULL -#endif /* CONFIG_PM */ - - static struct platform_driver s3cmci_driver = { .driver = { .name = "s3c-sdi", .owner = THIS_MODULE, - .pm = s3cmci_pm_ops, }, .id_table = s3cmci_driver_ids, .probe = s3cmci_probe, diff --git a/drivers/mmc/host/sdhci-bcm-kona.c b/drivers/mmc/host/sdhci-bcm-kona.c index 85472d3fd37..7a190fe4dff 100644 --- a/drivers/mmc/host/sdhci-bcm-kona.c +++ b/drivers/mmc/host/sdhci-bcm-kona.c @@ -316,19 +316,7 @@ err_pltfm_free: static int __exit sdhci_bcm_kona_remove(struct platform_device *pdev) { - struct sdhci_host *host = platform_get_drvdata(pdev); - int dead; - u32 scratch; - - dead = 0; - scratch = readl(host->ioaddr + SDHCI_INT_STATUS); - if (scratch == (u32)-1) - dead = 1; - sdhci_remove_host(host, dead); - - sdhci_free_host(host); - - return 0; + return sdhci_pltfm_unregister(pdev); } static struct platform_driver sdhci_bcm_kona_driver = { diff --git a/drivers/mmc/host/sdhci-bcm2835.c b/drivers/mmc/host/sdhci-bcm2835.c index 36fa2df0466..f6d8d67c545 100644 --- a/drivers/mmc/host/sdhci-bcm2835.c +++ b/drivers/mmc/host/sdhci-bcm2835.c @@ -178,13 +178,7 @@ err: static int bcm2835_sdhci_remove(struct platform_device *pdev) { - struct sdhci_host *host = platform_get_drvdata(pdev); - int dead = (readl(host->ioaddr + SDHCI_INT_STATUS) == 0xffffffff); - - sdhci_remove_host(host, dead); - sdhci_pltfm_free(pdev); - - return 0; + return sdhci_pltfm_unregister(pdev); } static const struct of_device_id bcm2835_sdhci_of_match[] = { diff --git a/drivers/mmc/host/sdhci-esdhc-imx.c b/drivers/mmc/host/sdhci-esdhc-imx.c index abc8cf01e6e..461a4c3f4ef 100644 --- a/drivers/mmc/host/sdhci-esdhc-imx.c +++ b/drivers/mmc/host/sdhci-esdhc-imx.c @@ -34,12 +34,40 @@ /* VENDOR SPEC register */ #define ESDHC_VENDOR_SPEC 0xc0 #define ESDHC_VENDOR_SPEC_SDIO_QUIRK (1 << 1) +#define ESDHC_VENDOR_SPEC_VSELECT (1 << 1) +#define ESDHC_VENDOR_SPEC_FRC_SDCLK_ON (1 << 8) #define ESDHC_WTMK_LVL 0x44 #define ESDHC_MIX_CTRL 0x48 +#define ESDHC_MIX_CTRL_DDREN (1 << 3) #define ESDHC_MIX_CTRL_AC23EN (1 << 7) +#define ESDHC_MIX_CTRL_EXE_TUNE (1 << 22) +#define ESDHC_MIX_CTRL_SMPCLK_SEL (1 << 23) +#define ESDHC_MIX_CTRL_FBCLK_SEL (1 << 25) /* Bits 3 and 6 are not SDHCI standard definitions */ #define ESDHC_MIX_CTRL_SDHCI_MASK 0xb7 +/* dll control register */ +#define ESDHC_DLL_CTRL 0x60 +#define ESDHC_DLL_OVERRIDE_VAL_SHIFT 9 +#define ESDHC_DLL_OVERRIDE_EN_SHIFT 8 + +/* tune control register */ +#define ESDHC_TUNE_CTRL_STATUS 0x68 +#define ESDHC_TUNE_CTRL_STEP 1 +#define ESDHC_TUNE_CTRL_MIN 0 +#define ESDHC_TUNE_CTRL_MAX ((1 << 7) - 1) + +#define ESDHC_TUNING_CTRL 0xcc +#define ESDHC_STD_TUNING_EN (1 << 24) +/* NOTE: the minimum valid tuning start tap for mx6sl is 1 */ +#define ESDHC_TUNING_START_TAP 0x1 + +#define ESDHC_TUNING_BLOCK_PATTERN_LEN 64 + +/* pinctrl state */ +#define ESDHC_PINCTRL_STATE_100MHZ "state_100mhz" +#define ESDHC_PINCTRL_STATE_200MHZ "state_200mhz" + /* * Our interpretation of the SDHCI_HOST_CONTROL register */ @@ -66,21 +94,60 @@ * As a result, the TC flag is not asserted and SW received timeout * exeception. Bit1 of Vendor Spec registor is used to fix it. */ -#define ESDHC_FLAG_MULTIBLK_NO_INT (1 << 1) - -enum imx_esdhc_type { - IMX25_ESDHC, - IMX35_ESDHC, - IMX51_ESDHC, - IMX53_ESDHC, - IMX6Q_USDHC, +#define ESDHC_FLAG_MULTIBLK_NO_INT BIT(1) +/* + * The flag enables the workaround for ESDHC errata ENGcm07207 which + * affects i.MX25 and i.MX35. + */ +#define ESDHC_FLAG_ENGCM07207 BIT(2) +/* + * The flag tells that the ESDHC controller is an USDHC block that is + * integrated on the i.MX6 series. + */ +#define ESDHC_FLAG_USDHC BIT(3) +/* The IP supports manual tuning process */ +#define ESDHC_FLAG_MAN_TUNING BIT(4) +/* The IP supports standard tuning process */ +#define ESDHC_FLAG_STD_TUNING BIT(5) +/* The IP has SDHCI_CAPABILITIES_1 register */ +#define ESDHC_FLAG_HAVE_CAP1 BIT(6) + +struct esdhc_soc_data { + u32 flags; +}; + +static struct esdhc_soc_data esdhc_imx25_data = { + .flags = ESDHC_FLAG_ENGCM07207, +}; + +static struct esdhc_soc_data esdhc_imx35_data = { + .flags = ESDHC_FLAG_ENGCM07207, +}; + +static struct esdhc_soc_data esdhc_imx51_data = { + .flags = 0, +}; + +static struct esdhc_soc_data esdhc_imx53_data = { + .flags = ESDHC_FLAG_MULTIBLK_NO_INT, +}; + +static struct esdhc_soc_data usdhc_imx6q_data = { + .flags = ESDHC_FLAG_USDHC | ESDHC_FLAG_MAN_TUNING, +}; + +static struct esdhc_soc_data usdhc_imx6sl_data = { + .flags = ESDHC_FLAG_USDHC | ESDHC_FLAG_STD_TUNING + | ESDHC_FLAG_HAVE_CAP1, }; struct pltfm_imx_data { - int flags; u32 scratchpad; - enum imx_esdhc_type devtype; struct pinctrl *pinctrl; + struct pinctrl_state *pins_default; + struct pinctrl_state *pins_100mhz; + struct pinctrl_state *pins_200mhz; + const struct esdhc_soc_data *socdata; struct esdhc_platform_data boarddata; struct clk *clk_ipg; struct clk *clk_ahb; @@ -90,25 +157,20 @@ struct pltfm_imx_data { MULTIBLK_IN_PROCESS, /* exact multiblock cmd in process */ WAIT_FOR_INT, /* sent CMD12, waiting for response INT */ } multiblock_status; - + u32 uhs_mode; + u32 is_ddr; }; static struct platform_device_id imx_esdhc_devtype[] = { { .name = "sdhci-esdhc-imx25", - .driver_data = IMX25_ESDHC, + .driver_data = (kernel_ulong_t) &esdhc_imx25_data, }, { .name = "sdhci-esdhc-imx35", - .driver_data = IMX35_ESDHC, + .driver_data = (kernel_ulong_t) &esdhc_imx35_data, }, { .name = "sdhci-esdhc-imx51", - .driver_data = IMX51_ESDHC, - }, { - .name = "sdhci-esdhc-imx53", - .driver_data = IMX53_ESDHC, - }, { - .name = "sdhci-usdhc-imx6q", - .driver_data = IMX6Q_USDHC, + .driver_data = (kernel_ulong_t) &esdhc_imx51_data, }, { /* sentinel */ } @@ -116,38 +178,34 @@ static struct platform_device_id imx_esdhc_devtype[] = { MODULE_DEVICE_TABLE(platform, imx_esdhc_devtype); static const struct of_device_id imx_esdhc_dt_ids[] = { - { .compatible = "fsl,imx25-esdhc", .data = &imx_esdhc_devtype[IMX25_ESDHC], }, - { .compatible = "fsl,imx35-esdhc", .data = &imx_esdhc_devtype[IMX35_ESDHC], }, - { .compatible = "fsl,imx51-esdhc", .data = &imx_esdhc_devtype[IMX51_ESDHC], }, - { .compatible = "fsl,imx53-esdhc", .data = &imx_esdhc_devtype[IMX53_ESDHC], }, - { .compatible = "fsl,imx6q-usdhc", .data = &imx_esdhc_devtype[IMX6Q_USDHC], }, + { .compatible = "fsl,imx25-esdhc", .data = &esdhc_imx25_data, }, + { .compatible = "fsl,imx35-esdhc", .data = &esdhc_imx35_data, }, + { .compatible = "fsl,imx51-esdhc", .data = &esdhc_imx51_data, }, + { .compatible = "fsl,imx53-esdhc", .data = &esdhc_imx53_data, }, + { .compatible = "fsl,imx6sl-usdhc", .data = &usdhc_imx6sl_data, }, + { .compatible = "fsl,imx6q-usdhc", .data = &usdhc_imx6q_data, }, { /* sentinel */ } }; MODULE_DEVICE_TABLE(of, imx_esdhc_dt_ids); static inline int is_imx25_esdhc(struct pltfm_imx_data *data) { - return data->devtype == IMX25_ESDHC; -} - -static inline int is_imx35_esdhc(struct pltfm_imx_data *data) -{ - return data->devtype == IMX35_ESDHC; + return data->socdata == &esdhc_imx25_data; } -static inline int is_imx51_esdhc(struct pltfm_imx_data *data) +static inline int is_imx53_esdhc(struct pltfm_imx_data *data) { - return data->devtype == IMX51_ESDHC; + return data->socdata == &esdhc_imx53_data; } -static inline int is_imx53_esdhc(struct pltfm_imx_data *data) +static inline int is_imx6q_usdhc(struct pltfm_imx_data *data) { - return data->devtype == IMX53_ESDHC; + return data->socdata == &usdhc_imx6q_data; } -static inline int is_imx6q_usdhc(struct pltfm_imx_data *data) +static inline int esdhc_is_usdhc(struct pltfm_imx_data *data) { - return data->devtype == IMX6Q_USDHC; + return !!(data->socdata->flags & ESDHC_FLAG_USDHC); } static inline void esdhc_clrset_le(struct sdhci_host *host, u32 mask, u32 val, int reg) @@ -164,7 +222,21 @@ static u32 esdhc_readl_le(struct sdhci_host *host, int reg) struct pltfm_imx_data *imx_data = pltfm_host->priv; u32 val = readl(host->ioaddr + reg); + if (unlikely(reg == SDHCI_PRESENT_STATE)) { + u32 fsl_prss = val; + /* save the least 20 bits */ + val = fsl_prss & 0x000FFFFF; + /* move dat[0-3] bits */ + val |= (fsl_prss & 0x0F000000) >> 4; + /* move cmd line bit */ + val |= (fsl_prss & 0x00800000) << 1; + } + if (unlikely(reg == SDHCI_CAPABILITIES)) { + /* ignore bit[0-15] as it stores cap_1 register val for mx6sl */ + if (imx_data->socdata->flags & ESDHC_FLAG_HAVE_CAP1) + val &= 0xffff0000; + /* In FSL esdhc IC module, only bit20 is used to indicate the * ADMA2 capability of esdhc, but this bit is messed up on * some SOCs (e.g. on MX25, MX35 this bit is set, but they @@ -178,6 +250,25 @@ static u32 esdhc_readl_le(struct sdhci_host *host, int reg) } } + if (unlikely(reg == SDHCI_CAPABILITIES_1)) { + if (esdhc_is_usdhc(imx_data)) { + if (imx_data->socdata->flags & ESDHC_FLAG_HAVE_CAP1) + val = readl(host->ioaddr + SDHCI_CAPABILITIES) & 0xFFFF; + else + /* imx6q/dl does not have cap_1 register, fake one */ + val = SDHCI_SUPPORT_DDR50 | SDHCI_SUPPORT_SDR104 + | SDHCI_SUPPORT_SDR50 + | SDHCI_USE_SDR50_TUNING; + } + } + + if (unlikely(reg == SDHCI_MAX_CURRENT) && esdhc_is_usdhc(imx_data)) { + val = 0; + val |= 0xFF << SDHCI_MAX_CURRENT_330_SHIFT; + val |= 0xFF << SDHCI_MAX_CURRENT_300_SHIFT; + val |= 0xFF << SDHCI_MAX_CURRENT_180_SHIFT; + } + if (unlikely(reg == SDHCI_INT_STATUS)) { if (val & ESDHC_INT_VENDOR_SPEC_DMA_ERR) { val &= ~ESDHC_INT_VENDOR_SPEC_DMA_ERR; @@ -224,7 +315,7 @@ static void esdhc_writel_le(struct sdhci_host *host, u32 val, int reg) } } - if (unlikely((imx_data->flags & ESDHC_FLAG_MULTIBLK_NO_INT) + if (unlikely((imx_data->socdata->flags & ESDHC_FLAG_MULTIBLK_NO_INT) && (reg == SDHCI_INT_STATUS) && (val & SDHCI_INT_DATA_END))) { u32 v; @@ -256,10 +347,12 @@ static u16 esdhc_readw_le(struct sdhci_host *host, int reg) { struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); struct pltfm_imx_data *imx_data = pltfm_host->priv; + u16 ret = 0; + u32 val; if (unlikely(reg == SDHCI_HOST_VERSION)) { reg ^= 2; - if (is_imx6q_usdhc(imx_data)) { + if (esdhc_is_usdhc(imx_data)) { /* * The usdhc register returns a wrong host version. * Correct it here. @@ -268,6 +361,30 @@ static u16 esdhc_readw_le(struct sdhci_host *host, int reg) } } + if (unlikely(reg == SDHCI_HOST_CONTROL2)) { + val = readl(host->ioaddr + ESDHC_VENDOR_SPEC); + if (val & ESDHC_VENDOR_SPEC_VSELECT) + ret |= SDHCI_CTRL_VDD_180; + + if (esdhc_is_usdhc(imx_data)) { + if (imx_data->socdata->flags & ESDHC_FLAG_MAN_TUNING) + val = readl(host->ioaddr + ESDHC_MIX_CTRL); + else if (imx_data->socdata->flags & ESDHC_FLAG_STD_TUNING) + /* the std tuning bits is in ACMD12_ERR for imx6sl */ + val = readl(host->ioaddr + SDHCI_ACMD12_ERR); + } + + if (val & ESDHC_MIX_CTRL_EXE_TUNE) + ret |= SDHCI_CTRL_EXEC_TUNING; + if (val & ESDHC_MIX_CTRL_SMPCLK_SEL) + ret |= SDHCI_CTRL_TUNED_CLK; + + ret |= (imx_data->uhs_mode & SDHCI_CTRL_UHS_MASK); + ret &= ~SDHCI_CTRL_PRESET_VAL_ENABLE; + + return ret; + } + return readw(host->ioaddr + reg); } @@ -275,10 +392,59 @@ static void esdhc_writew_le(struct sdhci_host *host, u16 val, int reg) { struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); struct pltfm_imx_data *imx_data = pltfm_host->priv; + u32 new_val = 0; switch (reg) { + case SDHCI_CLOCK_CONTROL: + new_val = readl(host->ioaddr + ESDHC_VENDOR_SPEC); + if (val & SDHCI_CLOCK_CARD_EN) + new_val |= ESDHC_VENDOR_SPEC_FRC_SDCLK_ON; + else + new_val &= ~ESDHC_VENDOR_SPEC_FRC_SDCLK_ON; + writel(new_val, host->ioaddr + ESDHC_VENDOR_SPEC); + return; + case SDHCI_HOST_CONTROL2: + new_val = readl(host->ioaddr + ESDHC_VENDOR_SPEC); + if (val & SDHCI_CTRL_VDD_180) + new_val |= ESDHC_VENDOR_SPEC_VSELECT; + else + new_val &= ~ESDHC_VENDOR_SPEC_VSELECT; + writel(new_val, host->ioaddr + ESDHC_VENDOR_SPEC); + imx_data->uhs_mode = val & SDHCI_CTRL_UHS_MASK; + if (imx_data->socdata->flags & ESDHC_FLAG_MAN_TUNING) { + new_val = readl(host->ioaddr + ESDHC_MIX_CTRL); + if (val & SDHCI_CTRL_TUNED_CLK) + new_val |= ESDHC_MIX_CTRL_SMPCLK_SEL; + else + new_val &= ~ESDHC_MIX_CTRL_SMPCLK_SEL; + writel(new_val , host->ioaddr + ESDHC_MIX_CTRL); + } else if (imx_data->socdata->flags & ESDHC_FLAG_STD_TUNING) { + u32 v = readl(host->ioaddr + SDHCI_ACMD12_ERR); + u32 m = readl(host->ioaddr + ESDHC_MIX_CTRL); + new_val = readl(host->ioaddr + ESDHC_TUNING_CTRL); + if (val & SDHCI_CTRL_EXEC_TUNING) { + new_val |= ESDHC_STD_TUNING_EN | + ESDHC_TUNING_START_TAP; + v |= ESDHC_MIX_CTRL_EXE_TUNE; + m |= ESDHC_MIX_CTRL_FBCLK_SEL; + } else { + new_val &= ~ESDHC_STD_TUNING_EN; + v &= ~ESDHC_MIX_CTRL_EXE_TUNE; + m &= ~ESDHC_MIX_CTRL_FBCLK_SEL; + } + + if (val & SDHCI_CTRL_TUNED_CLK) + v |= ESDHC_MIX_CTRL_SMPCLK_SEL; + else + v &= ~ESDHC_MIX_CTRL_SMPCLK_SEL; + + writel(new_val, host->ioaddr + ESDHC_TUNING_CTRL); + writel(v, host->ioaddr + SDHCI_ACMD12_ERR); + writel(m, host->ioaddr + ESDHC_MIX_CTRL); + } + return; case SDHCI_TRANSFER_MODE: - if ((imx_data->flags & ESDHC_FLAG_MULTIBLK_NO_INT) + if ((imx_data->socdata->flags & ESDHC_FLAG_MULTIBLK_NO_INT) && (host->cmd->opcode == SD_IO_RW_EXTENDED) && (host->cmd->data->blocks > 1) && (host->cmd->data->flags & MMC_DATA_READ)) { @@ -288,7 +454,7 @@ static void esdhc_writew_le(struct sdhci_host *host, u16 val, int reg) writel(v, host->ioaddr + ESDHC_VENDOR_SPEC); } - if (is_imx6q_usdhc(imx_data)) { + if (esdhc_is_usdhc(imx_data)) { u32 m = readl(host->ioaddr + ESDHC_MIX_CTRL); /* Swap AC23 bit */ if (val & SDHCI_TRNS_AUTO_CMD23) { @@ -310,10 +476,10 @@ static void esdhc_writew_le(struct sdhci_host *host, u16 val, int reg) val |= SDHCI_CMD_ABORTCMD; if ((host->cmd->opcode == MMC_SET_BLOCK_COUNT) && - (imx_data->flags & ESDHC_FLAG_MULTIBLK_NO_INT)) + (imx_data->socdata->flags & ESDHC_FLAG_MULTIBLK_NO_INT)) imx_data->multiblock_status = MULTIBLK_IN_PROCESS; - if (is_imx6q_usdhc(imx_data)) + if (esdhc_is_usdhc(imx_data)) writel(val << 16, host->ioaddr + SDHCI_TRANSFER_MODE); else @@ -379,8 +545,10 @@ static void esdhc_writeb_le(struct sdhci_host *host, u8 val, int reg) * The reset on usdhc fails to clear MIX_CTRL register. * Do it manually here. */ - if (is_imx6q_usdhc(imx_data)) + if (esdhc_is_usdhc(imx_data)) { writel(0, host->ioaddr + ESDHC_MIX_CTRL); + imx_data->is_ddr = 0; + } } } @@ -409,8 +577,60 @@ static inline void esdhc_pltfm_set_clock(struct sdhci_host *host, unsigned int clock) { struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); + struct pltfm_imx_data *imx_data = pltfm_host->priv; + unsigned int host_clock = clk_get_rate(pltfm_host->clk); + int pre_div = 2; + int div = 1; + u32 temp, val; + + if (clock == 0) { + if (esdhc_is_usdhc(imx_data)) { + val = readl(host->ioaddr + ESDHC_VENDOR_SPEC); + writel(val & ~ESDHC_VENDOR_SPEC_FRC_SDCLK_ON, + host->ioaddr + ESDHC_VENDOR_SPEC); + } + goto out; + } + + if (esdhc_is_usdhc(imx_data) && !imx_data->is_ddr) + pre_div = 1; + + temp = sdhci_readl(host, ESDHC_SYSTEM_CONTROL); + temp &= ~(ESDHC_CLOCK_IPGEN | ESDHC_CLOCK_HCKEN | ESDHC_CLOCK_PEREN + | ESDHC_CLOCK_MASK); + sdhci_writel(host, temp, ESDHC_SYSTEM_CONTROL); + + while (host_clock / pre_div / 16 > clock && pre_div < 256) + pre_div *= 2; + + while (host_clock / pre_div / div > clock && div < 16) + div++; - esdhc_set_clock(host, clock, clk_get_rate(pltfm_host->clk)); + host->mmc->actual_clock = host_clock / pre_div / div; + dev_dbg(mmc_dev(host->mmc), "desired SD clock: %d, actual: %d\n", + clock, host->mmc->actual_clock); + + if (imx_data->is_ddr) + pre_div >>= 2; + else + pre_div >>= 1; + div--; + + temp = sdhci_readl(host, ESDHC_SYSTEM_CONTROL); + temp |= (ESDHC_CLOCK_IPGEN | ESDHC_CLOCK_HCKEN | ESDHC_CLOCK_PEREN + | (div << ESDHC_DIVIDER_SHIFT) + | (pre_div << ESDHC_PREDIV_SHIFT)); + sdhci_writel(host, temp, ESDHC_SYSTEM_CONTROL); + + if (esdhc_is_usdhc(imx_data)) { + val = readl(host->ioaddr + ESDHC_VENDOR_SPEC); + writel(val | ESDHC_VENDOR_SPEC_FRC_SDCLK_ON, + host->ioaddr + ESDHC_VENDOR_SPEC); + } + + mdelay(1); +out: + host->clock = clock; } static unsigned int esdhc_pltfm_get_ro(struct sdhci_host *host) @@ -454,7 +674,192 @@ static int esdhc_pltfm_bus_width(struct sdhci_host *host, int width) return 0; } -static const struct sdhci_ops sdhci_esdhc_ops = { +static void esdhc_prepare_tuning(struct sdhci_host *host, u32 val) +{ + u32 reg; + + /* FIXME: delay a bit for card to be ready for next tuning due to errors */ + mdelay(1); + + reg = readl(host->ioaddr + ESDHC_MIX_CTRL); + reg |= ESDHC_MIX_CTRL_EXE_TUNE | ESDHC_MIX_CTRL_SMPCLK_SEL | + ESDHC_MIX_CTRL_FBCLK_SEL; + writel(reg, host->ioaddr + ESDHC_MIX_CTRL); + writel(val << 8, host->ioaddr + ESDHC_TUNE_CTRL_STATUS); + dev_dbg(mmc_dev(host->mmc), + "tunning with delay 0x%x ESDHC_TUNE_CTRL_STATUS 0x%x\n", + val, readl(host->ioaddr + ESDHC_TUNE_CTRL_STATUS)); +} + +static void esdhc_request_done(struct mmc_request *mrq) +{ + complete(&mrq->completion); +} + +static int esdhc_send_tuning_cmd(struct sdhci_host *host, u32 opcode) +{ + struct mmc_command cmd = {0}; + struct mmc_request mrq = {0}; + struct mmc_data data = {0}; + struct scatterlist sg; + char tuning_pattern[ESDHC_TUNING_BLOCK_PATTERN_LEN]; + + cmd.opcode = opcode; + cmd.arg = 0; + cmd.flags = MMC_RSP_R1 | MMC_CMD_ADTC; + + data.blksz = ESDHC_TUNING_BLOCK_PATTERN_LEN; + data.blocks = 1; + data.flags = MMC_DATA_READ; + data.sg = &sg; + data.sg_len = 1; + + sg_init_one(&sg, tuning_pattern, sizeof(tuning_pattern)); + + mrq.cmd = &cmd; + mrq.cmd->mrq = &mrq; + mrq.data = &data; + mrq.data->mrq = &mrq; + mrq.cmd->data = mrq.data; + + mrq.done = esdhc_request_done; + init_completion(&(mrq.completion)); + + disable_irq(host->irq); + spin_lock(&host->lock); + host->mrq = &mrq; + + sdhci_send_command(host, mrq.cmd); + + spin_unlock(&host->lock); + enable_irq(host->irq); + + wait_for_completion(&mrq.completion); + + if (cmd.error) + return cmd.error; + if (data.error) + return data.error; + + return 0; +} + +static void esdhc_post_tuning(struct sdhci_host *host) +{ + u32 reg; + + reg = readl(host->ioaddr + ESDHC_MIX_CTRL); + reg &= ~ESDHC_MIX_CTRL_EXE_TUNE; + writel(reg, host->ioaddr + ESDHC_MIX_CTRL); +} + +static int esdhc_executing_tuning(struct sdhci_host *host, u32 opcode) +{ + int min, max, avg, ret; + + /* find the mininum delay first which can pass tuning */ + min = ESDHC_TUNE_CTRL_MIN; + while (min < ESDHC_TUNE_CTRL_MAX) { + esdhc_prepare_tuning(host, min); + if (!esdhc_send_tuning_cmd(host, opcode)) + break; + min += ESDHC_TUNE_CTRL_STEP; + } + + /* find the maxinum delay which can not pass tuning */ + max = min + ESDHC_TUNE_CTRL_STEP; + while (max < ESDHC_TUNE_CTRL_MAX) { + esdhc_prepare_tuning(host, max); + if (esdhc_send_tuning_cmd(host, opcode)) { + max -= ESDHC_TUNE_CTRL_STEP; + break; + } + max += ESDHC_TUNE_CTRL_STEP; + } + + /* use average delay to get the best timing */ + avg = (min + max) / 2; + esdhc_prepare_tuning(host, avg); + ret = esdhc_send_tuning_cmd(host, opcode); + esdhc_post_tuning(host); + + dev_dbg(mmc_dev(host->mmc), "tunning %s at 0x%x ret %d\n", + ret ? "failed" : "passed", avg, ret); + + return ret; +} + +static int esdhc_change_pinstate(struct sdhci_host *host, + unsigned int uhs) +{ + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); + struct pltfm_imx_data *imx_data = pltfm_host->priv; + struct pinctrl_state *pinctrl; + + dev_dbg(mmc_dev(host->mmc), "change pinctrl state for uhs %d\n", uhs); + + if (IS_ERR(imx_data->pinctrl) || + IS_ERR(imx_data->pins_default) || + IS_ERR(imx_data->pins_100mhz) || + IS_ERR(imx_data->pins_200mhz)) + return -EINVAL; + + switch (uhs) { + case MMC_TIMING_UHS_SDR50: + pinctrl = imx_data->pins_100mhz; + break; + case MMC_TIMING_UHS_SDR104: + pinctrl = imx_data->pins_200mhz; + break; + default: + /* back to default state for other legacy timing */ + pinctrl = imx_data->pins_default; + } + + return pinctrl_select_state(imx_data->pinctrl, pinctrl); +} + +static int esdhc_set_uhs_signaling(struct sdhci_host *host, unsigned int uhs) +{ + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); + struct pltfm_imx_data *imx_data = pltfm_host->priv; + struct esdhc_platform_data *boarddata = &imx_data->boarddata; + + switch (uhs) { + case MMC_TIMING_UHS_SDR12: + imx_data->uhs_mode = SDHCI_CTRL_UHS_SDR12; + break; + case MMC_TIMING_UHS_SDR25: + imx_data->uhs_mode = SDHCI_CTRL_UHS_SDR25; + break; + case MMC_TIMING_UHS_SDR50: + imx_data->uhs_mode = SDHCI_CTRL_UHS_SDR50; + break; + case MMC_TIMING_UHS_SDR104: + imx_data->uhs_mode = SDHCI_CTRL_UHS_SDR104; + break; + case MMC_TIMING_UHS_DDR50: + imx_data->uhs_mode = SDHCI_CTRL_UHS_DDR50; + writel(readl(host->ioaddr + ESDHC_MIX_CTRL) | + ESDHC_MIX_CTRL_DDREN, + host->ioaddr + ESDHC_MIX_CTRL); + imx_data->is_ddr = 1; + if (boarddata->delay_line) { + u32 v; + v = boarddata->delay_line << + ESDHC_DLL_OVERRIDE_VAL_SHIFT | + (1 << ESDHC_DLL_OVERRIDE_EN_SHIFT); + if (is_imx53_esdhc(imx_data)) + v <<= 1; + writel(v, host->ioaddr + ESDHC_DLL_CTRL); + } + break; + } + + return esdhc_change_pinstate(host, uhs); +} + +static struct sdhci_ops sdhci_esdhc_ops = { .read_l = esdhc_readl_le, .read_w = esdhc_readw_le, .write_l = esdhc_writel_le, @@ -465,6 +870,7 @@ static const struct sdhci_ops sdhci_esdhc_ops = { .get_min_clock = esdhc_pltfm_get_min_clock, .get_ro = esdhc_pltfm_get_ro, .platform_bus_width = esdhc_pltfm_bus_width, + .set_uhs_signaling = esdhc_set_uhs_signaling, }; static const struct sdhci_pltfm_data sdhci_esdhc_imx_pdata = { @@ -506,6 +912,14 @@ sdhci_esdhc_imx_probe_dt(struct platform_device *pdev, of_property_read_u32(np, "max-frequency", &boarddata->f_max); + if (of_find_property(np, "no-1-8-v", NULL)) + boarddata->support_vsel = false; + else + boarddata->support_vsel = true; + + if (of_property_read_u32(np, "fsl,delay-line", &boarddata->delay_line)) + boarddata->delay_line = 0; + return 0; } #else @@ -539,9 +953,8 @@ static int sdhci_esdhc_imx_probe(struct platform_device *pdev) goto free_sdhci; } - if (of_id) - pdev->id_entry = of_id->data; - imx_data->devtype = pdev->id_entry->driver_data; + imx_data->socdata = of_id ? of_id->data : (struct esdhc_soc_data *) + pdev->id_entry->driver_data; pltfm_host->priv = imx_data; imx_data->clk_ipg = devm_clk_get(&pdev->dev, "ipg"); @@ -568,29 +981,39 @@ static int sdhci_esdhc_imx_probe(struct platform_device *pdev) clk_prepare_enable(imx_data->clk_ipg); clk_prepare_enable(imx_data->clk_ahb); - imx_data->pinctrl = devm_pinctrl_get_select_default(&pdev->dev); + imx_data->pinctrl = devm_pinctrl_get(&pdev->dev); if (IS_ERR(imx_data->pinctrl)) { err = PTR_ERR(imx_data->pinctrl); goto disable_clk; } + imx_data->pins_default = pinctrl_lookup_state(imx_data->pinctrl, + PINCTRL_STATE_DEFAULT); + if (IS_ERR(imx_data->pins_default)) { + err = PTR_ERR(imx_data->pins_default); + dev_err(mmc_dev(host->mmc), "could not get default state\n"); + goto disable_clk; + } + host->quirks |= SDHCI_QUIRK_BROKEN_TIMEOUT_VAL; - if (is_imx25_esdhc(imx_data) || is_imx35_esdhc(imx_data)) + if (imx_data->socdata->flags & ESDHC_FLAG_ENGCM07207) /* Fix errata ENGcm07207 present on i.MX25 and i.MX35 */ host->quirks |= SDHCI_QUIRK_NO_MULTIBLOCK | SDHCI_QUIRK_BROKEN_ADMA; - if (is_imx53_esdhc(imx_data)) - imx_data->flags |= ESDHC_FLAG_MULTIBLK_NO_INT; - /* * The imx6q ROM code will change the default watermark level setting * to something insane. Change it back here. */ - if (is_imx6q_usdhc(imx_data)) + if (esdhc_is_usdhc(imx_data)) { writel(0x08100810, host->ioaddr + ESDHC_WTMK_LVL); + host->quirks2 |= SDHCI_QUIRK2_PRESET_VALUE_BROKEN; + } + if (imx_data->socdata->flags & ESDHC_FLAG_MAN_TUNING) + sdhci_esdhc_ops.platform_execute_tuning = + esdhc_executing_tuning; boarddata = &imx_data->boarddata; if (sdhci_esdhc_imx_probe_dt(pdev, boarddata) < 0) { if (!host->mmc->parent->platform_data) { @@ -650,6 +1073,23 @@ static int sdhci_esdhc_imx_probe(struct platform_device *pdev) break; } + /* sdr50 and sdr104 needs work on 1.8v signal voltage */ + if ((boarddata->support_vsel) && esdhc_is_usdhc(imx_data)) { + imx_data->pins_100mhz = pinctrl_lookup_state(imx_data->pinctrl, + ESDHC_PINCTRL_STATE_100MHZ); + imx_data->pins_200mhz = pinctrl_lookup_state(imx_data->pinctrl, + ESDHC_PINCTRL_STATE_200MHZ); + if (IS_ERR(imx_data->pins_100mhz) || + IS_ERR(imx_data->pins_200mhz)) { + dev_warn(mmc_dev(host->mmc), + "could not get ultra high speed state, work on normal mode\n"); + /* fall back to not support uhs by specify no 1.8v quirk */ + host->quirks2 |= SDHCI_QUIRK2_NO_1_8_V; + } + } else { + host->quirks2 |= SDHCI_QUIRK2_NO_1_8_V; + } + err = sdhci_add_host(host); if (err) goto disable_clk; diff --git a/drivers/mmc/host/sdhci-esdhc.h b/drivers/mmc/host/sdhci-esdhc.h index a2a06420e46..a7d9f95a7b0 100644 --- a/drivers/mmc/host/sdhci-esdhc.h +++ b/drivers/mmc/host/sdhci-esdhc.h @@ -49,41 +49,4 @@ #define ESDHC_HOST_CONTROL_RES 0x05 -static inline void esdhc_set_clock(struct sdhci_host *host, unsigned int clock, - unsigned int host_clock) -{ - int pre_div = 2; - int div = 1; - u32 temp; - - if (clock == 0) - goto out; - - temp = sdhci_readl(host, ESDHC_SYSTEM_CONTROL); - temp &= ~(ESDHC_CLOCK_IPGEN | ESDHC_CLOCK_HCKEN | ESDHC_CLOCK_PEREN - | ESDHC_CLOCK_MASK); - sdhci_writel(host, temp, ESDHC_SYSTEM_CONTROL); - - while (host_clock / pre_div / 16 > clock && pre_div < 256) - pre_div *= 2; - - while (host_clock / pre_div / div > clock && div < 16) - div++; - - dev_dbg(mmc_dev(host->mmc), "desired SD clock: %d, actual: %d\n", - clock, host_clock / pre_div / div); - - pre_div >>= 1; - div--; - - temp = sdhci_readl(host, ESDHC_SYSTEM_CONTROL); - temp |= (ESDHC_CLOCK_IPGEN | ESDHC_CLOCK_HCKEN | ESDHC_CLOCK_PEREN - | (div << ESDHC_DIVIDER_SHIFT) - | (pre_div << ESDHC_PREDIV_SHIFT)); - sdhci_writel(host, temp, ESDHC_SYSTEM_CONTROL); - mdelay(1); -out: - host->clock = clock; -} - #endif /* _DRIVERS_MMC_SDHCI_ESDHC_H */ diff --git a/drivers/mmc/host/sdhci-of-esdhc.c b/drivers/mmc/host/sdhci-of-esdhc.c index e328252ebf2..0b249970b11 100644 --- a/drivers/mmc/host/sdhci-of-esdhc.c +++ b/drivers/mmc/host/sdhci-of-esdhc.c @@ -199,6 +199,14 @@ static unsigned int esdhc_of_get_min_clock(struct sdhci_host *host) static void esdhc_of_set_clock(struct sdhci_host *host, unsigned int clock) { + + int pre_div = 2; + int div = 1; + u32 temp; + + if (clock == 0) + goto out; + /* Workaround to reduce the clock frequency for p1010 esdhc */ if (of_find_compatible_node(NULL, NULL, "fsl,p1010-esdhc")) { if (clock > 20000000) @@ -207,8 +215,31 @@ static void esdhc_of_set_clock(struct sdhci_host *host, unsigned int clock) clock -= 5000000; } - /* Set the clock */ - esdhc_set_clock(host, clock, host->max_clk); + temp = sdhci_readl(host, ESDHC_SYSTEM_CONTROL); + temp &= ~(ESDHC_CLOCK_IPGEN | ESDHC_CLOCK_HCKEN | ESDHC_CLOCK_PEREN + | ESDHC_CLOCK_MASK); + sdhci_writel(host, temp, ESDHC_SYSTEM_CONTROL); + + while (host->max_clk / pre_div / 16 > clock && pre_div < 256) + pre_div *= 2; + + while (host->max_clk / pre_div / div > clock && div < 16) + div++; + + dev_dbg(mmc_dev(host->mmc), "desired SD clock: %d, actual: %d\n", + clock, host->max_clk / pre_div / div); + + pre_div >>= 1; + div--; + + temp = sdhci_readl(host, ESDHC_SYSTEM_CONTROL); + temp |= (ESDHC_CLOCK_IPGEN | ESDHC_CLOCK_HCKEN | ESDHC_CLOCK_PEREN + | (div << ESDHC_DIVIDER_SHIFT) + | (pre_div << ESDHC_PREDIV_SHIFT)); + sdhci_writel(host, temp, ESDHC_SYSTEM_CONTROL); + mdelay(1); +out: + host->clock = clock; } #ifdef CONFIG_PM diff --git a/drivers/mmc/host/sdhci-pci.c b/drivers/mmc/host/sdhci-pci.c index d7d6bc8968d..8f753811fc7 100644 --- a/drivers/mmc/host/sdhci-pci.c +++ b/drivers/mmc/host/sdhci-pci.c @@ -37,6 +37,12 @@ #define PCI_DEVICE_ID_INTEL_BYT_SDIO 0x0f15 #define PCI_DEVICE_ID_INTEL_BYT_SD 0x0f16 #define PCI_DEVICE_ID_INTEL_BYT_EMMC2 0x0f50 +#define PCI_DEVICE_ID_INTEL_MRFL_MMC 0x1190 +#define PCI_DEVICE_ID_INTEL_CLV_SDIO0 0x08f9 +#define PCI_DEVICE_ID_INTEL_CLV_SDIO1 0x08fa +#define PCI_DEVICE_ID_INTEL_CLV_SDIO2 0x08fb +#define PCI_DEVICE_ID_INTEL_CLV_EMMC0 0x08e5 +#define PCI_DEVICE_ID_INTEL_CLV_EMMC1 0x08e6 /* * PCI registers @@ -356,6 +362,28 @@ static const struct sdhci_pci_fixes sdhci_intel_byt_sd = { .allow_runtime_pm = true, }; +/* Define Host controllers for Intel Merrifield platform */ +#define INTEL_MRFL_EMMC_0 0 +#define INTEL_MRFL_EMMC_1 1 + +static int intel_mrfl_mmc_probe_slot(struct sdhci_pci_slot *slot) +{ + if ((PCI_FUNC(slot->chip->pdev->devfn) != INTEL_MRFL_EMMC_0) && + (PCI_FUNC(slot->chip->pdev->devfn) != INTEL_MRFL_EMMC_1)) + /* SD support is not ready yet */ + return -ENODEV; + + slot->host->mmc->caps |= MMC_CAP_8_BIT_DATA | MMC_CAP_NONREMOVABLE | + MMC_CAP_1_8V_DDR; + + return 0; +} + +static const struct sdhci_pci_fixes sdhci_intel_mrfl_mmc = { + .quirks = SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC, + .probe_slot = intel_mrfl_mmc_probe_slot, +}; + /* O2Micro extra registers */ #define O2_SD_LOCK_WP 0xD3 #define O2_SD_MULTI_VCC3V 0xEE @@ -939,6 +967,54 @@ static const struct pci_device_id pci_ids[] = { .driver_data = (kernel_ulong_t)&sdhci_intel_byt_emmc, }, + + { + .vendor = PCI_VENDOR_ID_INTEL, + .device = PCI_DEVICE_ID_INTEL_CLV_SDIO0, + .subvendor = PCI_ANY_ID, + .subdevice = PCI_ANY_ID, + .driver_data = (kernel_ulong_t)&sdhci_intel_mfd_sd, + }, + + { + .vendor = PCI_VENDOR_ID_INTEL, + .device = PCI_DEVICE_ID_INTEL_CLV_SDIO1, + .subvendor = PCI_ANY_ID, + .subdevice = PCI_ANY_ID, + .driver_data = (kernel_ulong_t)&sdhci_intel_mfd_sdio, + }, + + { + .vendor = PCI_VENDOR_ID_INTEL, + .device = PCI_DEVICE_ID_INTEL_CLV_SDIO2, + .subvendor = PCI_ANY_ID, + .subdevice = PCI_ANY_ID, + .driver_data = (kernel_ulong_t)&sdhci_intel_mfd_sdio, + }, + + { + .vendor = PCI_VENDOR_ID_INTEL, + .device = PCI_DEVICE_ID_INTEL_CLV_EMMC0, + .subvendor = PCI_ANY_ID, + .subdevice = PCI_ANY_ID, + .driver_data = (kernel_ulong_t)&sdhci_intel_mfd_emmc, + }, + + { + .vendor = PCI_VENDOR_ID_INTEL, + .device = PCI_DEVICE_ID_INTEL_CLV_EMMC1, + .subvendor = PCI_ANY_ID, + .subdevice = PCI_ANY_ID, + .driver_data = (kernel_ulong_t)&sdhci_intel_mfd_emmc, + }, + + { + .vendor = PCI_VENDOR_ID_INTEL, + .device = PCI_DEVICE_ID_INTEL_MRFL_MMC, + .subvendor = PCI_ANY_ID, + .subdevice = PCI_ANY_ID, + .driver_data = (kernel_ulong_t)&sdhci_intel_mrfl_mmc, + }, { .vendor = PCI_VENDOR_ID_O2, .device = PCI_DEVICE_ID_O2_8120, diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c index 7a7fb4f0d5a..bd8a0982aec 100644 --- a/drivers/mmc/host/sdhci.c +++ b/drivers/mmc/host/sdhci.c @@ -49,7 +49,6 @@ static unsigned int debug_quirks2; static void sdhci_finish_data(struct sdhci_host *); -static void sdhci_send_command(struct sdhci_host *, struct mmc_command *); static void sdhci_finish_command(struct sdhci_host *); static int sdhci_execute_tuning(struct mmc_host *mmc, u32 opcode); static void sdhci_tuning_timer(unsigned long data); @@ -981,7 +980,7 @@ static void sdhci_finish_data(struct sdhci_host *host) tasklet_schedule(&host->finish_tasklet); } -static void sdhci_send_command(struct sdhci_host *host, struct mmc_command *cmd) +void sdhci_send_command(struct sdhci_host *host, struct mmc_command *cmd) { int flags; u32 mask; @@ -1053,6 +1052,7 @@ static void sdhci_send_command(struct sdhci_host *host, struct mmc_command *cmd) sdhci_writew(host, SDHCI_MAKE_CMD(cmd->opcode, flags), SDHCI_COMMAND); } +EXPORT_SYMBOL_GPL(sdhci_send_command); static void sdhci_finish_command(struct sdhci_host *host) { @@ -1435,7 +1435,8 @@ static void sdhci_do_set_ios(struct sdhci_host *host, struct mmc_ios *ios) } if (host->version >= SDHCI_SPEC_300 && - (ios->power_mode == MMC_POWER_UP)) + (ios->power_mode == MMC_POWER_UP) && + !(host->quirks2 & SDHCI_QUIRK2_PRESET_VALUE_BROKEN)) sdhci_enable_preset_value(host, false); sdhci_set_clock(host, ios->clock); @@ -1875,6 +1876,14 @@ static int sdhci_execute_tuning(struct mmc_host *mmc, u32 opcode) return 0; } + if (host->ops->platform_execute_tuning) { + spin_unlock(&host->lock); + enable_irq(host->irq); + err = host->ops->platform_execute_tuning(host, opcode); + sdhci_runtime_pm_put(host); + return err; + } + sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2); /* @@ -1981,6 +1990,7 @@ static int sdhci_execute_tuning(struct mmc_host *mmc, u32 opcode) if (!tuning_loop_counter || !timeout) { ctrl &= ~SDHCI_CTRL_TUNED_CLK; sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2); + err = -EIO; } else { if (!(ctrl & SDHCI_CTRL_TUNED_CLK)) { pr_info(DRIVER_NAME ": Tuning procedure" @@ -2491,6 +2501,14 @@ again: result = IRQ_HANDLED; intmask = sdhci_readl(host, SDHCI_INT_STATUS); + + /* + * If we know we'll call the driver to signal SDIO IRQ, disregard + * further indications of Card Interrupt in the status to avoid a + * needless loop. + */ + if (cardint) + intmask &= ~SDHCI_INT_CARD_INT; if (intmask && --max_loops) goto again; out: @@ -2546,8 +2564,6 @@ EXPORT_SYMBOL_GPL(sdhci_disable_irq_wakeups); int sdhci_suspend_host(struct sdhci_host *host) { - int ret; - if (host->ops->platform_suspend) host->ops->platform_suspend(host); @@ -2559,19 +2575,6 @@ int sdhci_suspend_host(struct sdhci_host *host) host->flags &= ~SDHCI_NEEDS_RETUNING; } - ret = mmc_suspend_host(host->mmc); - if (ret) { - if (host->flags & SDHCI_USING_RETUNING_TIMER) { - host->flags |= SDHCI_NEEDS_RETUNING; - mod_timer(&host->tuning_timer, jiffies + - host->tuning_count * HZ); - } - - sdhci_enable_card_detection(host); - - return ret; - } - if (!device_may_wakeup(mmc_dev(host->mmc))) { sdhci_mask_irqs(host, SDHCI_INT_ALL_MASK); free_irq(host->irq, host); @@ -2579,14 +2582,14 @@ int sdhci_suspend_host(struct sdhci_host *host) sdhci_enable_irq_wakeups(host); enable_irq_wake(host->irq); } - return ret; + return 0; } EXPORT_SYMBOL_GPL(sdhci_suspend_host); int sdhci_resume_host(struct sdhci_host *host) { - int ret; + int ret = 0; if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) { if (host->ops->enable_dma) @@ -2615,7 +2618,6 @@ int sdhci_resume_host(struct sdhci_host *host) mmiowb(); } - ret = mmc_resume_host(host->mmc); sdhci_enable_card_detection(host); if (host->ops->platform_resume) diff --git a/drivers/mmc/host/sdhci.h b/drivers/mmc/host/sdhci.h index b037f188fe4..0a3ed01887d 100644 --- a/drivers/mmc/host/sdhci.h +++ b/drivers/mmc/host/sdhci.h @@ -288,6 +288,7 @@ struct sdhci_ops { unsigned int (*get_ro)(struct sdhci_host *host); void (*platform_reset_enter)(struct sdhci_host *host, u8 mask); void (*platform_reset_exit)(struct sdhci_host *host, u8 mask); + int (*platform_execute_tuning)(struct sdhci_host *host, u32 opcode); int (*set_uhs_signaling)(struct sdhci_host *host, unsigned int uhs); void (*hw_reset)(struct sdhci_host *host); void (*platform_suspend)(struct sdhci_host *host); @@ -393,6 +394,8 @@ static inline void *sdhci_priv(struct sdhci_host *host) extern void sdhci_card_detect(struct sdhci_host *host); extern int sdhci_add_host(struct sdhci_host *host); extern void sdhci_remove_host(struct sdhci_host *host, int dead); +extern void sdhci_send_command(struct sdhci_host *host, + struct mmc_command *cmd); #ifdef CONFIG_PM extern int sdhci_suspend_host(struct sdhci_host *host); diff --git a/drivers/mmc/host/sdricoh_cs.c b/drivers/mmc/host/sdricoh_cs.c index 50adbd155f3..b7e30577531 100644 --- a/drivers/mmc/host/sdricoh_cs.c +++ b/drivers/mmc/host/sdricoh_cs.c @@ -516,9 +516,7 @@ static void sdricoh_pcmcia_detach(struct pcmcia_device *link) #ifdef CONFIG_PM static int sdricoh_pcmcia_suspend(struct pcmcia_device *link) { - struct mmc_host *mmc = link->priv; dev_dbg(&link->dev, "suspend\n"); - mmc_suspend_host(mmc); return 0; } @@ -527,7 +525,6 @@ static int sdricoh_pcmcia_resume(struct pcmcia_device *link) struct mmc_host *mmc = link->priv; dev_dbg(&link->dev, "resume\n"); sdricoh_reset(mmc_priv(mmc)); - mmc_resume_host(mmc); return 0; } #else diff --git a/drivers/mmc/host/sh_mmcif.c b/drivers/mmc/host/sh_mmcif.c index 36629a024aa..d032b080ac4 100644 --- a/drivers/mmc/host/sh_mmcif.c +++ b/drivers/mmc/host/sh_mmcif.c @@ -964,7 +964,7 @@ static void sh_mmcif_request(struct mmc_host *mmc, struct mmc_request *mrq) static int sh_mmcif_clk_update(struct sh_mmcif_host *host) { - int ret = clk_enable(host->hclk); + int ret = clk_prepare_enable(host->hclk); if (!ret) { host->clk = clk_get_rate(host->hclk); @@ -1018,7 +1018,7 @@ static void sh_mmcif_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) } if (host->power) { pm_runtime_put_sync(&host->pd->dev); - clk_disable(host->hclk); + clk_disable_unprepare(host->hclk); host->power = false; if (ios->power_mode == MMC_POWER_OFF) sh_mmcif_set_power(host, ios); @@ -1466,7 +1466,7 @@ static int sh_mmcif_probe(struct platform_device *pdev) mutex_init(&host->thread_lock); - clk_disable(host->hclk); + clk_disable_unprepare(host->hclk); ret = mmc_add_host(mmc); if (ret < 0) goto emmcaddh; @@ -1487,7 +1487,7 @@ ereqirq1: ereqirq0: pm_runtime_suspend(&pdev->dev); eresume: - clk_disable(host->hclk); + clk_disable_unprepare(host->hclk); eclkupdate: clk_put(host->hclk); eclkget: @@ -1505,7 +1505,7 @@ static int sh_mmcif_remove(struct platform_device *pdev) int irq[2]; host->dying = true; - clk_enable(host->hclk); + clk_prepare_enable(host->hclk); pm_runtime_get_sync(&pdev->dev); dev_pm_qos_hide_latency_limit(&pdev->dev); @@ -1530,7 +1530,7 @@ static int sh_mmcif_remove(struct platform_device *pdev) if (irq[1] >= 0) free_irq(irq[1], host); - clk_disable(host->hclk); + clk_disable_unprepare(host->hclk); mmc_free_host(host->mmc); pm_runtime_put_sync(&pdev->dev); pm_runtime_disable(&pdev->dev); @@ -1538,28 +1538,21 @@ static int sh_mmcif_remove(struct platform_device *pdev) return 0; } -#ifdef CONFIG_PM +#ifdef CONFIG_PM_SLEEP static int sh_mmcif_suspend(struct device *dev) { struct sh_mmcif_host *host = dev_get_drvdata(dev); - int ret = mmc_suspend_host(host->mmc); - if (!ret) - sh_mmcif_writel(host->addr, MMCIF_CE_INT_MASK, MASK_ALL); + sh_mmcif_writel(host->addr, MMCIF_CE_INT_MASK, MASK_ALL); - return ret; + return 0; } static int sh_mmcif_resume(struct device *dev) { - struct sh_mmcif_host *host = dev_get_drvdata(dev); - - return mmc_resume_host(host->mmc); + return 0; } -#else -#define sh_mmcif_suspend NULL -#define sh_mmcif_resume NULL -#endif /* CONFIG_PM */ +#endif static const struct of_device_id mmcif_of_match[] = { { .compatible = "renesas,sh-mmcif" }, @@ -1568,8 +1561,7 @@ static const struct of_device_id mmcif_of_match[] = { MODULE_DEVICE_TABLE(of, mmcif_of_match); static const struct dev_pm_ops sh_mmcif_dev_pm_ops = { - .suspend = sh_mmcif_suspend, - .resume = sh_mmcif_resume, + SET_SYSTEM_SLEEP_PM_OPS(sh_mmcif_suspend, sh_mmcif_resume) }; static struct platform_driver sh_mmcif_driver = { diff --git a/drivers/mmc/host/tifm_sd.c b/drivers/mmc/host/tifm_sd.c index 43d962829f8..d1760ebcac0 100644 --- a/drivers/mmc/host/tifm_sd.c +++ b/drivers/mmc/host/tifm_sd.c @@ -1030,7 +1030,7 @@ static void tifm_sd_remove(struct tifm_dev *sock) static int tifm_sd_suspend(struct tifm_dev *sock, pm_message_t state) { - return mmc_suspend_host(tifm_get_drvdata(sock)); + return 0; } static int tifm_sd_resume(struct tifm_dev *sock) @@ -1044,8 +1044,6 @@ static int tifm_sd_resume(struct tifm_dev *sock) if (rc) host->eject = 1; - else - rc = mmc_resume_host(mmc); return rc; } diff --git a/drivers/mmc/host/tmio_mmc_pio.c b/drivers/mmc/host/tmio_mmc_pio.c index b3802256f95..f3b2d8ca1ec 100644 --- a/drivers/mmc/host/tmio_mmc_pio.c +++ b/drivers/mmc/host/tmio_mmc_pio.c @@ -1145,12 +1145,9 @@ int tmio_mmc_host_suspend(struct device *dev) { struct mmc_host *mmc = dev_get_drvdata(dev); struct tmio_mmc_host *host = mmc_priv(mmc); - int ret = mmc_suspend_host(mmc); - if (!ret) - tmio_mmc_disable_mmc_irqs(host, TMIO_MASK_ALL); - - return ret; + tmio_mmc_disable_mmc_irqs(host, TMIO_MASK_ALL); + return 0; } EXPORT_SYMBOL(tmio_mmc_host_suspend); @@ -1163,7 +1160,7 @@ int tmio_mmc_host_resume(struct device *dev) /* The MMC core will perform the complete set up */ host->resuming = true; - return mmc_resume_host(mmc); + return 0; } EXPORT_SYMBOL(tmio_mmc_host_resume); diff --git a/drivers/mmc/host/via-sdmmc.c b/drivers/mmc/host/via-sdmmc.c index 4f84586c6e9..63fac78b3d4 100644 --- a/drivers/mmc/host/via-sdmmc.c +++ b/drivers/mmc/host/via-sdmmc.c @@ -1269,21 +1269,18 @@ static void via_init_sdc_pm(struct via_crdr_mmc_host *host) static int via_sd_suspend(struct pci_dev *pcidev, pm_message_t state) { struct via_crdr_mmc_host *host; - int ret = 0; host = pci_get_drvdata(pcidev); via_save_pcictrlreg(host); via_save_sdcreg(host); - ret = mmc_suspend_host(host->mmc); - pci_save_state(pcidev); pci_enable_wake(pcidev, pci_choose_state(pcidev, state), 0); pci_disable_device(pcidev); pci_set_power_state(pcidev, pci_choose_state(pcidev, state)); - return ret; + return 0; } static int via_sd_resume(struct pci_dev *pcidev) @@ -1316,8 +1313,6 @@ static int via_sd_resume(struct pci_dev *pcidev) via_restore_pcictrlreg(sdhost); via_init_sdc_pm(sdhost); - ret = mmc_resume_host(sdhost->mmc); - return ret; } diff --git a/drivers/mmc/host/vub300.c b/drivers/mmc/host/vub300.c index e9028ad05ff..4262296c12f 100644 --- a/drivers/mmc/host/vub300.c +++ b/drivers/mmc/host/vub300.c @@ -2392,26 +2392,12 @@ static void vub300_disconnect(struct usb_interface *interface) #ifdef CONFIG_PM static int vub300_suspend(struct usb_interface *intf, pm_message_t message) { - struct vub300_mmc_host *vub300 = usb_get_intfdata(intf); - if (!vub300 || !vub300->mmc) { - return 0; - } else { - struct mmc_host *mmc = vub300->mmc; - mmc_suspend_host(mmc); - return 0; - } + return 0; } static int vub300_resume(struct usb_interface *intf) { - struct vub300_mmc_host *vub300 = usb_get_intfdata(intf); - if (!vub300 || !vub300->mmc) { - return 0; - } else { - struct mmc_host *mmc = vub300->mmc; - mmc_resume_host(mmc); - return 0; - } + return 0; } #else #define vub300_suspend NULL diff --git a/drivers/mmc/host/wbsd.c b/drivers/mmc/host/wbsd.c index e954b775887..1defd5ed323 100644 --- a/drivers/mmc/host/wbsd.c +++ b/drivers/mmc/host/wbsd.c @@ -1814,28 +1814,11 @@ static void wbsd_pnp_remove(struct pnp_dev *dev) #ifdef CONFIG_PM -static int wbsd_suspend(struct wbsd_host *host, pm_message_t state) -{ - BUG_ON(host == NULL); - - return mmc_suspend_host(host->mmc); -} - -static int wbsd_resume(struct wbsd_host *host) -{ - BUG_ON(host == NULL); - - wbsd_init_device(host); - - return mmc_resume_host(host->mmc); -} - static int wbsd_platform_suspend(struct platform_device *dev, pm_message_t state) { struct mmc_host *mmc = platform_get_drvdata(dev); struct wbsd_host *host; - int ret; if (mmc == NULL) return 0; @@ -1844,12 +1827,7 @@ static int wbsd_platform_suspend(struct platform_device *dev, host = mmc_priv(mmc); - ret = wbsd_suspend(host, state); - if (ret) - return ret; - wbsd_chip_poweroff(host); - return 0; } @@ -1872,7 +1850,8 @@ static int wbsd_platform_resume(struct platform_device *dev) */ mdelay(5); - return wbsd_resume(host); + wbsd_init_device(host); + return 0; } #ifdef CONFIG_PNP @@ -1880,16 +1859,12 @@ static int wbsd_platform_resume(struct platform_device *dev) static int wbsd_pnp_suspend(struct pnp_dev *pnp_dev, pm_message_t state) { struct mmc_host *mmc = dev_get_drvdata(&pnp_dev->dev); - struct wbsd_host *host; if (mmc == NULL) return 0; DBGF("Suspending...\n"); - - host = mmc_priv(mmc); - - return wbsd_suspend(host, state); + return 0; } static int wbsd_pnp_resume(struct pnp_dev *pnp_dev) @@ -1922,7 +1897,8 @@ static int wbsd_pnp_resume(struct pnp_dev *pnp_dev) */ mdelay(5); - return wbsd_resume(host); + wbsd_init_device(host); + return 0; } #endif /* CONFIG_PNP */ diff --git a/drivers/mmc/host/wmt-sdmmc.c b/drivers/mmc/host/wmt-sdmmc.c index 34231d5168f..e902ed7846b 100644 --- a/drivers/mmc/host/wmt-sdmmc.c +++ b/drivers/mmc/host/wmt-sdmmc.c @@ -212,28 +212,14 @@ struct wmt_mci_priv { static void wmt_set_sd_power(struct wmt_mci_priv *priv, int enable) { - u32 reg_tmp; - if (enable) { - if (priv->power_inverted) { - reg_tmp = readb(priv->sdmmc_base + SDMMC_BUSMODE); - writeb(reg_tmp | BM_SD_OFF, - priv->sdmmc_base + SDMMC_BUSMODE); - } else { - reg_tmp = readb(priv->sdmmc_base + SDMMC_BUSMODE); - writeb(reg_tmp & (~BM_SD_OFF), - priv->sdmmc_base + SDMMC_BUSMODE); - } - } else { - if (priv->power_inverted) { - reg_tmp = readb(priv->sdmmc_base + SDMMC_BUSMODE); - writeb(reg_tmp & (~BM_SD_OFF), - priv->sdmmc_base + SDMMC_BUSMODE); - } else { - reg_tmp = readb(priv->sdmmc_base + SDMMC_BUSMODE); - writeb(reg_tmp | BM_SD_OFF, - priv->sdmmc_base + SDMMC_BUSMODE); - } - } + u32 reg_tmp = readb(priv->sdmmc_base + SDMMC_BUSMODE); + + if (enable ^ priv->power_inverted) + reg_tmp &= ~BM_SD_OFF; + else + reg_tmp |= BM_SD_OFF; + + writeb(reg_tmp, priv->sdmmc_base + SDMMC_BUSMODE); } static void wmt_mci_read_response(struct mmc_host *mmc) @@ -939,28 +925,23 @@ static int wmt_mci_suspend(struct device *dev) struct platform_device *pdev = to_platform_device(dev); struct mmc_host *mmc = platform_get_drvdata(pdev); struct wmt_mci_priv *priv; - int ret; if (!mmc) return 0; priv = mmc_priv(mmc); - ret = mmc_suspend_host(mmc); - - if (!ret) { - reg_tmp = readb(priv->sdmmc_base + SDMMC_BUSMODE); - writeb(reg_tmp | BM_SOFT_RESET, priv->sdmmc_base + - SDMMC_BUSMODE); + reg_tmp = readb(priv->sdmmc_base + SDMMC_BUSMODE); + writeb(reg_tmp | BM_SOFT_RESET, priv->sdmmc_base + + SDMMC_BUSMODE); - reg_tmp = readw(priv->sdmmc_base + SDMMC_BLKLEN); - writew(reg_tmp & 0x5FFF, priv->sdmmc_base + SDMMC_BLKLEN); + reg_tmp = readw(priv->sdmmc_base + SDMMC_BLKLEN); + writew(reg_tmp & 0x5FFF, priv->sdmmc_base + SDMMC_BLKLEN); - writeb(0xFF, priv->sdmmc_base + SDMMC_STS0); - writeb(0xFF, priv->sdmmc_base + SDMMC_STS1); + writeb(0xFF, priv->sdmmc_base + SDMMC_STS0); + writeb(0xFF, priv->sdmmc_base + SDMMC_STS1); - clk_disable(priv->clk_sdmmc); - } - return ret; + clk_disable(priv->clk_sdmmc); + return 0; } static int wmt_mci_resume(struct device *dev) @@ -969,7 +950,6 @@ static int wmt_mci_resume(struct device *dev) struct platform_device *pdev = to_platform_device(dev); struct mmc_host *mmc = platform_get_drvdata(pdev); struct wmt_mci_priv *priv; - int ret = 0; if (mmc) { priv = mmc_priv(mmc); @@ -987,10 +967,9 @@ static int wmt_mci_resume(struct device *dev) writeb(reg_tmp | INT0_DI_INT_EN, priv->sdmmc_base + SDMMC_INTMASK0); - ret = mmc_resume_host(mmc); } - return ret; + return 0; } static const struct dev_pm_ops wmt_mci_pm = { diff --git a/drivers/mtd/nand/atmel_nand.c b/drivers/mtd/nand/atmel_nand.c index d78a97d4153..59f08c44abd 100644 --- a/drivers/mtd/nand/atmel_nand.c +++ b/drivers/mtd/nand/atmel_nand.c @@ -375,8 +375,7 @@ static int atmel_nand_dma_op(struct mtd_info *mtd, void *buf, int len, dma_dev = host->dma_chan->device; - flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT | DMA_COMPL_SKIP_SRC_UNMAP | - DMA_COMPL_SKIP_DEST_UNMAP; + flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT; phys_addr = dma_map_single(dma_dev->dev, p, len, dir); if (dma_mapping_error(dma_dev->dev, phys_addr)) { diff --git a/drivers/mtd/nand/fsmc_nand.c b/drivers/mtd/nand/fsmc_nand.c index 3dc1a7564d8..8b2752263db 100644 --- a/drivers/mtd/nand/fsmc_nand.c +++ b/drivers/mtd/nand/fsmc_nand.c @@ -573,8 +573,6 @@ static int dma_xfer(struct fsmc_nand_data *host, void *buffer, int len, dma_dev = chan->device; dma_addr = dma_map_single(dma_dev->dev, buffer, len, direction); - flags |= DMA_COMPL_SKIP_SRC_UNMAP | DMA_COMPL_SKIP_DEST_UNMAP; - if (direction == DMA_TO_DEVICE) { dma_src = dma_addr; dma_dst = host->data_pa; diff --git a/drivers/net/bonding/bond_sysfs.c b/drivers/net/bonding/bond_sysfs.c index bc8fd362a5a..0ec2a7e8c8a 100644 --- a/drivers/net/bonding/bond_sysfs.c +++ b/drivers/net/bonding/bond_sysfs.c @@ -524,8 +524,9 @@ static ssize_t bonding_store_arp_interval(struct device *d, goto out; } if (bond->params.mode == BOND_MODE_ALB || - bond->params.mode == BOND_MODE_TLB) { - pr_info("%s: ARP monitoring cannot be used with ALB/TLB. Only MII monitoring is supported on %s.\n", + bond->params.mode == BOND_MODE_TLB || + bond->params.mode == BOND_MODE_8023AD) { + pr_info("%s: ARP monitoring cannot be used with ALB/TLB/802.3ad. Only MII monitoring is supported on %s.\n", bond->dev->name, bond->dev->name); ret = -EINVAL; goto out; @@ -603,15 +604,14 @@ static ssize_t bonding_store_arp_targets(struct device *d, return restart_syscall(); targets = bond->params.arp_targets; - newtarget = in_aton(buf + 1); + if (!in4_pton(buf + 1, -1, (u8 *)&newtarget, -1, NULL) || + IS_IP_TARGET_UNUSABLE_ADDRESS(newtarget)) { + pr_err("%s: invalid ARP target %pI4 specified for addition\n", + bond->dev->name, &newtarget); + goto out; + } /* look for adds */ if (buf[0] == '+') { - if ((newtarget == 0) || (newtarget == htonl(INADDR_BROADCAST))) { - pr_err("%s: invalid ARP target %pI4 specified for addition\n", - bond->dev->name, &newtarget); - goto out; - } - if (bond_get_targets_ip(targets, newtarget) != -1) { /* dup */ pr_err("%s: ARP target %pI4 is already present\n", bond->dev->name, &newtarget); @@ -634,12 +634,6 @@ static ssize_t bonding_store_arp_targets(struct device *d, targets[ind] = newtarget; write_unlock_bh(&bond->lock); } else if (buf[0] == '-') { - if ((newtarget == 0) || (newtarget == htonl(INADDR_BROADCAST))) { - pr_err("%s: invalid ARP target %pI4 specified for removal\n", - bond->dev->name, &newtarget); - goto out; - } - ind = bond_get_targets_ip(targets, newtarget); if (ind == -1) { pr_err("%s: unable to remove nonexistent ARP target %pI4.\n", @@ -701,6 +695,8 @@ static ssize_t bonding_store_downdelay(struct device *d, int new_value, ret = count; struct bonding *bond = to_bond(d); + if (!rtnl_trylock()) + return restart_syscall(); if (!(bond->params.miimon)) { pr_err("%s: Unable to set down delay as MII monitoring is disabled\n", bond->dev->name); @@ -734,6 +730,7 @@ static ssize_t bonding_store_downdelay(struct device *d, } out: + rtnl_unlock(); return ret; } static DEVICE_ATTR(downdelay, S_IRUGO | S_IWUSR, @@ -756,6 +753,8 @@ static ssize_t bonding_store_updelay(struct device *d, int new_value, ret = count; struct bonding *bond = to_bond(d); + if (!rtnl_trylock()) + return restart_syscall(); if (!(bond->params.miimon)) { pr_err("%s: Unable to set up delay as MII monitoring is disabled\n", bond->dev->name); @@ -789,6 +788,7 @@ static ssize_t bonding_store_updelay(struct device *d, } out: + rtnl_unlock(); return ret; } static DEVICE_ATTR(updelay, S_IRUGO | S_IWUSR, diff --git a/drivers/net/bonding/bonding.h b/drivers/net/bonding/bonding.h index 77a07a12e77..ca31286aa02 100644 --- a/drivers/net/bonding/bonding.h +++ b/drivers/net/bonding/bonding.h @@ -63,6 +63,9 @@ (((mode) == BOND_MODE_TLB) || \ ((mode) == BOND_MODE_ALB)) +#define IS_IP_TARGET_UNUSABLE_ADDRESS(a) \ + ((htonl(INADDR_BROADCAST) == a) || \ + ipv4_is_zeronet(a)) /* * Less bad way to call ioctl from within the kernel; this needs to be * done some other way to get the call out of interrupt context. diff --git a/drivers/net/ethernet/atheros/alx/main.c b/drivers/net/ethernet/atheros/alx/main.c index 5aa5e814649..c3c4c266b84 100644 --- a/drivers/net/ethernet/atheros/alx/main.c +++ b/drivers/net/ethernet/atheros/alx/main.c @@ -1388,6 +1388,9 @@ static int alx_resume(struct device *dev) { struct pci_dev *pdev = to_pci_dev(dev); struct alx_priv *alx = pci_get_drvdata(pdev); + struct alx_hw *hw = &alx->hw; + + alx_reset_phy(hw); if (!netif_running(alx->dev)) return 0; diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h index 4e01c57d8c8..a1f66e2c9a8 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h @@ -1376,7 +1376,6 @@ enum { BNX2X_SP_RTNL_RX_MODE, BNX2X_SP_RTNL_HYPERVISOR_VLAN, BNX2X_SP_RTNL_TX_STOP, - BNX2X_SP_RTNL_TX_RESUME, }; struct bnx2x_prev_path_list { diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c index dcafbda3e5b..ec96130533c 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c @@ -2959,6 +2959,10 @@ int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode, bool keep_link) bp->port.pmf = 0; + /* clear pending work in rtnl task */ + bp->sp_rtnl_state = 0; + smp_mb(); + /* Free SKBs, SGEs, TPA pool and driver internals */ bnx2x_free_skbs(bp); if (CNIC_LOADED(bp)) diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c index fcf2761d882..fdace204b05 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c @@ -778,11 +778,6 @@ void bnx2x_dcbx_set_params(struct bnx2x *bp, u32 state) /* ets may affect cmng configuration: reinit it in hw */ bnx2x_set_local_cmng(bp); - - set_bit(BNX2X_SP_RTNL_TX_RESUME, &bp->sp_rtnl_state); - - schedule_delayed_work(&bp->sp_rtnl_task, 0); - return; case BNX2X_DCBX_STATE_TX_RELEASED: DP(BNX2X_MSG_DCB, "BNX2X_DCBX_STATE_TX_RELEASED\n"); diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c index e622cc1f96f..814d0eca9b3 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c @@ -577,7 +577,9 @@ void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr, rc = bnx2x_issue_dmae_with_comp(bp, &dmae, bnx2x_sp(bp, wb_comp)); if (rc) { BNX2X_ERR("DMAE returned failure %d\n", rc); +#ifdef BNX2X_STOP_ON_ERROR bnx2x_panic(); +#endif } } @@ -614,7 +616,9 @@ void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32) rc = bnx2x_issue_dmae_with_comp(bp, &dmae, bnx2x_sp(bp, wb_comp)); if (rc) { BNX2X_ERR("DMAE returned failure %d\n", rc); +#ifdef BNX2X_STOP_ON_ERROR bnx2x_panic(); +#endif } } @@ -5231,18 +5235,18 @@ static void bnx2x_eq_int(struct bnx2x *bp) case EVENT_RING_OPCODE_STOP_TRAFFIC: DP(BNX2X_MSG_SP | BNX2X_MSG_DCB, "got STOP TRAFFIC\n"); + bnx2x_dcbx_set_params(bp, BNX2X_DCBX_STATE_TX_PAUSED); if (f_obj->complete_cmd(bp, f_obj, BNX2X_F_CMD_TX_STOP)) break; - bnx2x_dcbx_set_params(bp, BNX2X_DCBX_STATE_TX_PAUSED); goto next_spqe; case EVENT_RING_OPCODE_START_TRAFFIC: DP(BNX2X_MSG_SP | BNX2X_MSG_DCB, "got START TRAFFIC\n"); + bnx2x_dcbx_set_params(bp, BNX2X_DCBX_STATE_TX_RELEASED); if (f_obj->complete_cmd(bp, f_obj, BNX2X_F_CMD_TX_START)) break; - bnx2x_dcbx_set_params(bp, BNX2X_DCBX_STATE_TX_RELEASED); goto next_spqe; case EVENT_RING_OPCODE_FUNCTION_UPDATE: @@ -9352,6 +9356,10 @@ static int bnx2x_process_kill(struct bnx2x *bp, bool global) bnx2x_process_kill_chip_reset(bp, global); barrier(); + /* clear errors in PGB */ + if (!CHIP_IS_E1x(bp)) + REG_WR(bp, PGLUE_B_REG_LATCHED_ERRORS_CLR, 0x7f); + /* Recover after reset: */ /* MCP */ if (global && bnx2x_reset_mcp_comp(bp, val)) @@ -9706,11 +9714,10 @@ sp_rtnl_not_reset: &bp->sp_rtnl_state)) bnx2x_pf_set_vfs_vlan(bp); - if (test_and_clear_bit(BNX2X_SP_RTNL_TX_STOP, &bp->sp_rtnl_state)) + if (test_and_clear_bit(BNX2X_SP_RTNL_TX_STOP, &bp->sp_rtnl_state)) { bnx2x_dcbx_stop_hw_tx(bp); - - if (test_and_clear_bit(BNX2X_SP_RTNL_TX_RESUME, &bp->sp_rtnl_state)) bnx2x_dcbx_resume_hw_tx(bp); + } /* work which needs rtnl lock not-taken (as it takes the lock itself and * can be called from other contexts as well) diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h index 5ecf267dc4c..3efbb35267c 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h @@ -2864,6 +2864,17 @@ #define PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_READ 0x9430 #define PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_WRITE 0x9434 #define PGLUE_B_REG_INTERNAL_VFID_ENABLE 0x9438 +/* [W 7] Writing 1 to each bit in this register clears a corresponding error + * details register and enables logging new error details. Bit 0 - clears + * INCORRECT_RCV_DETAILS; Bit 1 - clears RX_ERR_DETAILS; Bit 2 - clears + * TX_ERR_WR_ADD_31_0 TX_ERR_WR_ADD_63_32 TX_ERR_WR_DETAILS + * TX_ERR_WR_DETAILS2 TX_ERR_RD_ADD_31_0 TX_ERR_RD_ADD_63_32 + * TX_ERR_RD_DETAILS TX_ERR_RD_DETAILS2 TX_ERR_WR_DETAILS_ICPL; Bit 3 - + * clears VF_LENGTH_VIOLATION_DETAILS. Bit 4 - clears + * VF_GRC_SPACE_VIOLATION_DETAILS. Bit 5 - clears RX_TCPL_ERR_DETAILS. Bit 6 + * - clears TCPL_IN_TWO_RCBS_DETAILS. */ +#define PGLUE_B_REG_LATCHED_ERRORS_CLR 0x943c + /* [R 9] Interrupt register #0 read */ #define PGLUE_B_REG_PGLUE_B_INT_STS 0x9298 /* [RC 9] Interrupt register #0 read clear */ diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c index 9199adf32d3..efa8a151d78 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c @@ -152,7 +152,7 @@ static int bnx2x_send_msg2pf(struct bnx2x *bp, u8 *done, dma_addr_t msg_mapping) if (bp->old_bulletin.valid_bitmap & 1 << CHANNEL_DOWN) { DP(BNX2X_MSG_IOV, "detecting channel down. Aborting message\n"); *done = PFVF_STATUS_SUCCESS; - return 0; + return -EINVAL; } /* Write message address */ diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c index 00c5be8c55b..a9e068423ba 100644 --- a/drivers/net/ethernet/broadcom/tg3.c +++ b/drivers/net/ethernet/broadcom/tg3.c @@ -13618,16 +13618,9 @@ static int tg3_hwtstamp_ioctl(struct net_device *dev, if (stmpconf.flags) return -EINVAL; - switch (stmpconf.tx_type) { - case HWTSTAMP_TX_ON: - tg3_flag_set(tp, TX_TSTAMP_EN); - break; - case HWTSTAMP_TX_OFF: - tg3_flag_clear(tp, TX_TSTAMP_EN); - break; - default: + if (stmpconf.tx_type != HWTSTAMP_TX_ON && + stmpconf.tx_type != HWTSTAMP_TX_OFF) return -ERANGE; - } switch (stmpconf.rx_filter) { case HWTSTAMP_FILTER_NONE: @@ -13689,6 +13682,11 @@ static int tg3_hwtstamp_ioctl(struct net_device *dev, tw32(TG3_RX_PTP_CTL, tp->rxptpctl | TG3_RX_PTP_CTL_HWTS_INTERLOCK); + if (stmpconf.tx_type == HWTSTAMP_TX_ON) + tg3_flag_set(tp, TX_TSTAMP_EN); + else + tg3_flag_clear(tp, TX_TSTAMP_EN); + return copy_to_user(ifr->ifr_data, &stmpconf, sizeof(stmpconf)) ? -EFAULT : 0; } diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.c b/drivers/net/ethernet/emulex/benet/be_cmds.c index 7fb0edfe3d2..dbcd5262c01 100644 --- a/drivers/net/ethernet/emulex/benet/be_cmds.c +++ b/drivers/net/ethernet/emulex/benet/be_cmds.c @@ -1758,7 +1758,7 @@ err: /* Uses sycnhronous mcc */ int be_cmd_vlan_config(struct be_adapter *adapter, u32 if_id, u16 *vtag_array, - u32 num, bool untagged, bool promiscuous) + u32 num, bool promiscuous) { struct be_mcc_wrb *wrb; struct be_cmd_req_vlan_config *req; @@ -1778,7 +1778,7 @@ int be_cmd_vlan_config(struct be_adapter *adapter, u32 if_id, u16 *vtag_array, req->interface_id = if_id; req->promiscuous = promiscuous; - req->untagged = untagged; + req->untagged = BE_IF_FLAGS_UNTAGGED & be_if_cap_flags(adapter) ? 1 : 0; req->num_vlan = num; if (!promiscuous) { memcpy(req->normal_vlan, vtag_array, @@ -1847,7 +1847,19 @@ int be_cmd_rx_filter(struct be_adapter *adapter, u32 flags, u32 value) memcpy(req->mcast_mac[i++].byte, ha->addr, ETH_ALEN); } + if ((req->if_flags_mask & cpu_to_le32(be_if_cap_flags(adapter))) != + req->if_flags_mask) { + dev_warn(&adapter->pdev->dev, + "Cannot set rx filter flags 0x%x\n", + req->if_flags_mask); + dev_warn(&adapter->pdev->dev, + "Interface is capable of 0x%x flags only\n", + be_if_cap_flags(adapter)); + } + req->if_flags_mask &= cpu_to_le32(be_if_cap_flags(adapter)); + status = be_mcc_notify_wait(adapter); + err: spin_unlock_bh(&adapter->mcc_lock); return status; diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.h b/drivers/net/ethernet/emulex/benet/be_cmds.h index edf3e8a0ff8..0075686276a 100644 --- a/drivers/net/ethernet/emulex/benet/be_cmds.h +++ b/drivers/net/ethernet/emulex/benet/be_cmds.h @@ -1984,7 +1984,7 @@ int be_cmd_get_fw_ver(struct be_adapter *adapter, char *fw_ver, char *fw_on_flash); int be_cmd_modify_eqd(struct be_adapter *adapter, struct be_set_eqd *, int num); int be_cmd_vlan_config(struct be_adapter *adapter, u32 if_id, u16 *vtag_array, - u32 num, bool untagged, bool promiscuous); + u32 num, bool promiscuous); int be_cmd_rx_filter(struct be_adapter *adapter, u32 flags, u32 status); int be_cmd_set_flow_control(struct be_adapter *adapter, u32 tx_fc, u32 rx_fc); int be_cmd_get_flow_control(struct be_adapter *adapter, u32 *tx_fc, u32 *rx_fc); diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c index eaecaadfa8c..abde9747163 100644 --- a/drivers/net/ethernet/emulex/benet/be_main.c +++ b/drivers/net/ethernet/emulex/benet/be_main.c @@ -1079,7 +1079,7 @@ static int be_vid_config(struct be_adapter *adapter) vids[num++] = cpu_to_le16(i); status = be_cmd_vlan_config(adapter, adapter->if_handle, - vids, num, 1, 0); + vids, num, 0); if (status) { /* Set to VLAN promisc mode as setting VLAN filter failed */ @@ -2676,6 +2676,11 @@ static int be_close(struct net_device *netdev) be_rx_qs_destroy(adapter); + for (i = 1; i < (adapter->uc_macs + 1); i++) + be_cmd_pmac_del(adapter, adapter->if_handle, + adapter->pmac_id[i], 0); + adapter->uc_macs = 0; + for_all_evt_queues(adapter, eqo, i) { if (msix_enabled(adapter)) synchronize_irq(be_msix_vec_get(adapter, eqo)); diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c index b2793b91cc5..4cbebf3d80e 100644 --- a/drivers/net/ethernet/freescale/fec_main.c +++ b/drivers/net/ethernet/freescale/fec_main.c @@ -386,7 +386,14 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *ndev) */ bdp->cbd_bufaddr = dma_map_single(&fep->pdev->dev, bufaddr, FEC_ENET_TX_FRSIZE, DMA_TO_DEVICE); - + if (dma_mapping_error(&fep->pdev->dev, bdp->cbd_bufaddr)) { + bdp->cbd_bufaddr = 0; + fep->tx_skbuff[index] = NULL; + dev_kfree_skb_any(skb); + if (net_ratelimit()) + netdev_err(ndev, "Tx DMA memory map failed\n"); + return NETDEV_TX_OK; + } /* Send it on its way. Tell FEC it's ready, interrupt when done, * it's the last BD of the frame, and to put the CRC on the end. */ @@ -861,6 +868,7 @@ fec_enet_rx(struct net_device *ndev, int budget) struct bufdesc_ex *ebdp = NULL; bool vlan_packet_rcvd = false; u16 vlan_tag; + int index = 0; #ifdef CONFIG_M532x flush_cache_all(); @@ -916,10 +924,15 @@ fec_enet_rx(struct net_device *ndev, int budget) ndev->stats.rx_packets++; pkt_len = bdp->cbd_datlen; ndev->stats.rx_bytes += pkt_len; - data = (__u8*)__va(bdp->cbd_bufaddr); - dma_unmap_single(&fep->pdev->dev, bdp->cbd_bufaddr, - FEC_ENET_TX_FRSIZE, DMA_FROM_DEVICE); + if (fep->bufdesc_ex) + index = (struct bufdesc_ex *)bdp - + (struct bufdesc_ex *)fep->rx_bd_base; + else + index = bdp - fep->rx_bd_base; + data = fep->rx_skbuff[index]->data; + dma_sync_single_for_cpu(&fep->pdev->dev, bdp->cbd_bufaddr, + FEC_ENET_RX_FRSIZE, DMA_FROM_DEVICE); if (id_entry->driver_data & FEC_QUIRK_SWAP_FRAME) swap_buffer(data, pkt_len); @@ -999,8 +1012,8 @@ fec_enet_rx(struct net_device *ndev, int budget) napi_gro_receive(&fep->napi, skb); } - bdp->cbd_bufaddr = dma_map_single(&fep->pdev->dev, data, - FEC_ENET_TX_FRSIZE, DMA_FROM_DEVICE); + dma_sync_single_for_device(&fep->pdev->dev, bdp->cbd_bufaddr, + FEC_ENET_RX_FRSIZE, DMA_FROM_DEVICE); rx_processing_done: /* Clear the status flags for this buffer */ status &= ~BD_ENET_RX_STATS; @@ -1719,6 +1732,12 @@ static int fec_enet_alloc_buffers(struct net_device *ndev) bdp->cbd_bufaddr = dma_map_single(&fep->pdev->dev, skb->data, FEC_ENET_RX_FRSIZE, DMA_FROM_DEVICE); + if (dma_mapping_error(&fep->pdev->dev, bdp->cbd_bufaddr)) { + fec_enet_free_buffers(ndev); + if (net_ratelimit()) + netdev_err(ndev, "Rx DMA memory map failed\n"); + return -ENOMEM; + } bdp->cbd_sc = BD_ENET_RX_EMPTY; if (fep->bufdesc_ex) { diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c index aedd5736a87..8d3945ab733 100644 --- a/drivers/net/ethernet/intel/e1000e/netdev.c +++ b/drivers/net/ethernet/intel/e1000e/netdev.c @@ -3482,10 +3482,10 @@ s32 e1000e_get_base_timinca(struct e1000_adapter *adapter, u32 *timinca) * specified. Matching the kind of event packet is not supported, with the * exception of "all V2 events regardless of level 2 or 4". **/ -static int e1000e_config_hwtstamp(struct e1000_adapter *adapter) +static int e1000e_config_hwtstamp(struct e1000_adapter *adapter, + struct hwtstamp_config *config) { struct e1000_hw *hw = &adapter->hw; - struct hwtstamp_config *config = &adapter->hwtstamp_config; u32 tsync_tx_ctl = E1000_TSYNCTXCTL_ENABLED; u32 tsync_rx_ctl = E1000_TSYNCRXCTL_ENABLED; u32 rxmtrl = 0; @@ -3586,6 +3586,8 @@ static int e1000e_config_hwtstamp(struct e1000_adapter *adapter) return -ERANGE; } + adapter->hwtstamp_config = *config; + /* enable/disable Tx h/w time stamping */ regval = er32(TSYNCTXCTL); regval &= ~E1000_TSYNCTXCTL_ENABLED; @@ -3874,7 +3876,7 @@ void e1000e_reset(struct e1000_adapter *adapter) e1000e_reset_adaptive(hw); /* initialize systim and reset the ns time counter */ - e1000e_config_hwtstamp(adapter); + e1000e_config_hwtstamp(adapter, &adapter->hwtstamp_config); /* Set EEE advertisement as appropriate */ if (adapter->flags2 & FLAG2_HAS_EEE) { @@ -5797,14 +5799,10 @@ static int e1000e_hwtstamp_ioctl(struct net_device *netdev, struct ifreq *ifr) if (copy_from_user(&config, ifr->ifr_data, sizeof(config))) return -EFAULT; - adapter->hwtstamp_config = config; - - ret_val = e1000e_config_hwtstamp(adapter); + ret_val = e1000e_config_hwtstamp(adapter, &config); if (ret_val) return ret_val; - config = adapter->hwtstamp_config; - switch (config.rx_filter) { case HWTSTAMP_FILTER_PTP_V2_L4_SYNC: case HWTSTAMP_FILTER_PTP_V2_L2_SYNC: diff --git a/drivers/net/ethernet/marvell/mv643xx_eth.c b/drivers/net/ethernet/marvell/mv643xx_eth.c index 00cd36e0860..61088a6a942 100644 --- a/drivers/net/ethernet/marvell/mv643xx_eth.c +++ b/drivers/net/ethernet/marvell/mv643xx_eth.c @@ -2890,7 +2890,8 @@ static int mv643xx_eth_probe(struct platform_device *pdev) PHY_INTERFACE_MODE_GMII); if (!mp->phy) err = -ENODEV; - phy_addr_set(mp, mp->phy->addr); + else + phy_addr_set(mp, mp->phy->addr); } else if (pd->phy_addr != MV643XX_ETH_PHY_NONE) { mp->phy = phy_scan(mp, pd->phy_addr); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c index 6ca30739625..8675d26a678 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c @@ -98,6 +98,7 @@ enum { static struct mlx5_cmd_work_ent *alloc_cmd(struct mlx5_cmd *cmd, struct mlx5_cmd_msg *in, struct mlx5_cmd_msg *out, + void *uout, int uout_size, mlx5_cmd_cbk_t cbk, void *context, int page_queue) { @@ -110,6 +111,8 @@ static struct mlx5_cmd_work_ent *alloc_cmd(struct mlx5_cmd *cmd, ent->in = in; ent->out = out; + ent->uout = uout; + ent->uout_size = uout_size; ent->callback = cbk; ent->context = context; ent->cmd = cmd; @@ -534,6 +537,7 @@ static void cmd_work_handler(struct work_struct *work) ent->lay = lay; memset(lay, 0, sizeof(*lay)); memcpy(lay->in, ent->in->first.data, sizeof(lay->in)); + ent->op = be32_to_cpu(lay->in[0]) >> 16; if (ent->in->next) lay->in_ptr = cpu_to_be64(ent->in->next->dma); lay->inlen = cpu_to_be32(ent->in->len); @@ -628,7 +632,8 @@ static int wait_func(struct mlx5_core_dev *dev, struct mlx5_cmd_work_ent *ent) * 2. page queue commands do not support asynchrous completion */ static int mlx5_cmd_invoke(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *in, - struct mlx5_cmd_msg *out, mlx5_cmd_cbk_t callback, + struct mlx5_cmd_msg *out, void *uout, int uout_size, + mlx5_cmd_cbk_t callback, void *context, int page_queue, u8 *status) { struct mlx5_cmd *cmd = &dev->cmd; @@ -642,7 +647,8 @@ static int mlx5_cmd_invoke(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *in, if (callback && page_queue) return -EINVAL; - ent = alloc_cmd(cmd, in, out, callback, context, page_queue); + ent = alloc_cmd(cmd, in, out, uout, uout_size, callback, context, + page_queue); if (IS_ERR(ent)) return PTR_ERR(ent); @@ -670,10 +676,10 @@ static int mlx5_cmd_invoke(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *in, op = be16_to_cpu(((struct mlx5_inbox_hdr *)in->first.data)->opcode); if (op < ARRAY_SIZE(cmd->stats)) { stats = &cmd->stats[op]; - spin_lock(&stats->lock); + spin_lock_irq(&stats->lock); stats->sum += ds; ++stats->n; - spin_unlock(&stats->lock); + spin_unlock_irq(&stats->lock); } mlx5_core_dbg_mask(dev, 1 << MLX5_CMD_TIME, "fw exec time for %s is %lld nsec\n", @@ -826,7 +832,7 @@ static struct mlx5_cmd_msg *mlx5_alloc_cmd_msg(struct mlx5_core_dev *dev, int n; int i; - msg = kzalloc(sizeof(*msg), GFP_KERNEL); + msg = kzalloc(sizeof(*msg), flags); if (!msg) return ERR_PTR(-ENOMEM); @@ -1109,6 +1115,19 @@ void mlx5_cmd_use_polling(struct mlx5_core_dev *dev) up(&cmd->sem); } +static void free_msg(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *msg) +{ + unsigned long flags; + + if (msg->cache) { + spin_lock_irqsave(&msg->cache->lock, flags); + list_add_tail(&msg->list, &msg->cache->head); + spin_unlock_irqrestore(&msg->cache->lock, flags); + } else { + mlx5_free_cmd_msg(dev, msg); + } +} + void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, unsigned long vector) { struct mlx5_cmd *cmd = &dev->cmd; @@ -1117,6 +1136,10 @@ void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, unsigned long vector) void *context; int err; int i; + ktime_t t1, t2, delta; + s64 ds; + struct mlx5_cmd_stats *stats; + unsigned long flags; for (i = 0; i < (1 << cmd->log_sz); i++) { if (test_bit(i, &vector)) { @@ -1141,9 +1164,29 @@ void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, unsigned long vector) } free_ent(cmd, ent->idx); if (ent->callback) { + t1 = timespec_to_ktime(ent->ts1); + t2 = timespec_to_ktime(ent->ts2); + delta = ktime_sub(t2, t1); + ds = ktime_to_ns(delta); + if (ent->op < ARRAY_SIZE(cmd->stats)) { + stats = &cmd->stats[ent->op]; + spin_lock_irqsave(&stats->lock, flags); + stats->sum += ds; + ++stats->n; + spin_unlock_irqrestore(&stats->lock, flags); + } + callback = ent->callback; context = ent->context; err = ent->ret; + if (!err) + err = mlx5_copy_from_msg(ent->uout, + ent->out, + ent->uout_size); + + mlx5_free_cmd_msg(dev, ent->out); + free_msg(dev, ent->in); + free_cmd(ent); callback(err, context); } else { @@ -1160,7 +1203,8 @@ static int status_to_err(u8 status) return status ? -1 : 0; /* TBD more meaningful codes */ } -static struct mlx5_cmd_msg *alloc_msg(struct mlx5_core_dev *dev, int in_size) +static struct mlx5_cmd_msg *alloc_msg(struct mlx5_core_dev *dev, int in_size, + gfp_t gfp) { struct mlx5_cmd_msg *msg = ERR_PTR(-ENOMEM); struct mlx5_cmd *cmd = &dev->cmd; @@ -1172,7 +1216,7 @@ static struct mlx5_cmd_msg *alloc_msg(struct mlx5_core_dev *dev, int in_size) ent = &cmd->cache.med; if (ent) { - spin_lock(&ent->lock); + spin_lock_irq(&ent->lock); if (!list_empty(&ent->head)) { msg = list_entry(ent->head.next, typeof(*msg), list); /* For cached lists, we must explicitly state what is @@ -1181,43 +1225,34 @@ static struct mlx5_cmd_msg *alloc_msg(struct mlx5_core_dev *dev, int in_size) msg->len = in_size; list_del(&msg->list); } - spin_unlock(&ent->lock); + spin_unlock_irq(&ent->lock); } if (IS_ERR(msg)) - msg = mlx5_alloc_cmd_msg(dev, GFP_KERNEL, in_size); + msg = mlx5_alloc_cmd_msg(dev, gfp, in_size); return msg; } -static void free_msg(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *msg) -{ - if (msg->cache) { - spin_lock(&msg->cache->lock); - list_add_tail(&msg->list, &msg->cache->head); - spin_unlock(&msg->cache->lock); - } else { - mlx5_free_cmd_msg(dev, msg); - } -} - static int is_manage_pages(struct mlx5_inbox_hdr *in) { return be16_to_cpu(in->opcode) == MLX5_CMD_OP_MANAGE_PAGES; } -int mlx5_cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out, - int out_size) +static int cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out, + int out_size, mlx5_cmd_cbk_t callback, void *context) { struct mlx5_cmd_msg *inb; struct mlx5_cmd_msg *outb; int pages_queue; + gfp_t gfp; int err; u8 status = 0; pages_queue = is_manage_pages(in); + gfp = callback ? GFP_ATOMIC : GFP_KERNEL; - inb = alloc_msg(dev, in_size); + inb = alloc_msg(dev, in_size, gfp); if (IS_ERR(inb)) { err = PTR_ERR(inb); return err; @@ -1229,13 +1264,14 @@ int mlx5_cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out, goto out_in; } - outb = mlx5_alloc_cmd_msg(dev, GFP_KERNEL, out_size); + outb = mlx5_alloc_cmd_msg(dev, gfp, out_size); if (IS_ERR(outb)) { err = PTR_ERR(outb); goto out_in; } - err = mlx5_cmd_invoke(dev, inb, outb, NULL, NULL, pages_queue, &status); + err = mlx5_cmd_invoke(dev, inb, outb, out, out_size, callback, context, + pages_queue, &status); if (err) goto out_out; @@ -1248,14 +1284,30 @@ int mlx5_cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out, err = mlx5_copy_from_msg(out, outb, out_size); out_out: - mlx5_free_cmd_msg(dev, outb); + if (!callback) + mlx5_free_cmd_msg(dev, outb); out_in: - free_msg(dev, inb); + if (!callback) + free_msg(dev, inb); return err; } + +int mlx5_cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out, + int out_size) +{ + return cmd_exec(dev, in, in_size, out, out_size, NULL, NULL); +} EXPORT_SYMBOL(mlx5_cmd_exec); +int mlx5_cmd_exec_cb(struct mlx5_core_dev *dev, void *in, int in_size, + void *out, int out_size, mlx5_cmd_cbk_t callback, + void *context) +{ + return cmd_exec(dev, in, in_size, out, out_size, callback, context); +} +EXPORT_SYMBOL(mlx5_cmd_exec_cb); + static void destroy_msg_cache(struct mlx5_core_dev *dev) { struct mlx5_cmd *cmd = &dev->cmd; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/debugfs.c b/drivers/net/ethernet/mellanox/mlx5/core/debugfs.c index 9c7194b26ee..80f6d127257 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/debugfs.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/debugfs.c @@ -154,10 +154,10 @@ static ssize_t average_read(struct file *filp, char __user *buf, size_t count, return 0; stats = filp->private_data; - spin_lock(&stats->lock); + spin_lock_irq(&stats->lock); if (stats->n) field = div64_u64(stats->sum, stats->n); - spin_unlock(&stats->lock); + spin_unlock_irq(&stats->lock); ret = snprintf(tbuf, sizeof(tbuf), "%llu\n", field); if (ret > 0) { if (copy_to_user(buf, tbuf, ret)) @@ -175,10 +175,10 @@ static ssize_t average_write(struct file *filp, const char __user *buf, struct mlx5_cmd_stats *stats; stats = filp->private_data; - spin_lock(&stats->lock); + spin_lock_irq(&stats->lock); stats->sum = 0; stats->n = 0; - spin_unlock(&stats->lock); + spin_unlock_irq(&stats->lock); *pos += count; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eq.c b/drivers/net/ethernet/mellanox/mlx5/core/eq.c index 2231d93cc7a..64a61b286b2 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eq.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eq.c @@ -354,7 +354,7 @@ int mlx5_create_map_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq, u8 vecidx, in->hdr.opcode = cpu_to_be16(MLX5_CMD_OP_CREATE_EQ); in->ctx.log_sz_usr_page = cpu_to_be32(ilog2(eq->nent) << 24 | uar->index); in->ctx.intr = vecidx; - in->ctx.log_page_size = PAGE_SHIFT - 12; + in->ctx.log_page_size = eq->buf.page_shift - MLX5_ADAPTER_PAGE_SHIFT; in->events_mask = cpu_to_be64(mask); err = mlx5_cmd_exec(dev, in, inlen, &out, sizeof(out)); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c index bc0f5fb66e2..40a9f5ed814 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c @@ -159,6 +159,36 @@ struct mlx5_reg_host_endianess { u8 rsvd[15]; }; + +#define CAP_MASK(pos, size) ((u64)((1 << (size)) - 1) << (pos)) + +enum { + MLX5_CAP_BITS_RW_MASK = CAP_MASK(MLX5_CAP_OFF_CMDIF_CSUM, 2) | + CAP_MASK(MLX5_CAP_OFF_DCT, 1), +}; + +/* selectively copy writable fields clearing any reserved area + */ +static void copy_rw_fields(struct mlx5_hca_cap *to, struct mlx5_hca_cap *from) +{ + u64 v64; + + to->log_max_qp = from->log_max_qp & 0x1f; + to->log_max_ra_req_dc = from->log_max_ra_req_dc & 0x3f; + to->log_max_ra_res_dc = from->log_max_ra_res_dc & 0x3f; + to->log_max_ra_req_qp = from->log_max_ra_req_qp & 0x3f; + to->log_max_ra_res_qp = from->log_max_ra_res_qp & 0x3f; + to->log_max_atomic_size_qp = from->log_max_atomic_size_qp; + to->log_max_atomic_size_dc = from->log_max_atomic_size_dc; + v64 = be64_to_cpu(from->flags) & MLX5_CAP_BITS_RW_MASK; + to->flags = cpu_to_be64(v64); +} + +enum { + HCA_CAP_OPMOD_GET_MAX = 0, + HCA_CAP_OPMOD_GET_CUR = 1, +}; + static int handle_hca_cap(struct mlx5_core_dev *dev) { struct mlx5_cmd_query_hca_cap_mbox_out *query_out = NULL; @@ -180,7 +210,7 @@ static int handle_hca_cap(struct mlx5_core_dev *dev) } query_ctx.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_QUERY_HCA_CAP); - query_ctx.hdr.opmod = cpu_to_be16(0x1); + query_ctx.hdr.opmod = cpu_to_be16(HCA_CAP_OPMOD_GET_CUR); err = mlx5_cmd_exec(dev, &query_ctx, sizeof(query_ctx), query_out, sizeof(*query_out)); if (err) @@ -192,8 +222,7 @@ static int handle_hca_cap(struct mlx5_core_dev *dev) goto query_ex; } - memcpy(&set_ctx->hca_cap, &query_out->hca_cap, - sizeof(set_ctx->hca_cap)); + copy_rw_fields(&set_ctx->hca_cap, &query_out->hca_cap); if (dev->profile->mask & MLX5_PROF_MASK_QP_SIZE) set_ctx->hca_cap.log_max_qp = dev->profile->log_max_qp; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mr.c b/drivers/net/ethernet/mellanox/mlx5/core/mr.c index 5b44e2e46da..35e514dc7b7 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/mr.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/mr.c @@ -37,31 +37,41 @@ #include "mlx5_core.h" int mlx5_core_create_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mr *mr, - struct mlx5_create_mkey_mbox_in *in, int inlen) + struct mlx5_create_mkey_mbox_in *in, int inlen, + mlx5_cmd_cbk_t callback, void *context, + struct mlx5_create_mkey_mbox_out *out) { - struct mlx5_create_mkey_mbox_out out; + struct mlx5_create_mkey_mbox_out lout; int err; u8 key; - memset(&out, 0, sizeof(out)); - spin_lock(&dev->priv.mkey_lock); + memset(&lout, 0, sizeof(lout)); + spin_lock_irq(&dev->priv.mkey_lock); key = dev->priv.mkey_key++; - spin_unlock(&dev->priv.mkey_lock); + spin_unlock_irq(&dev->priv.mkey_lock); in->seg.qpn_mkey7_0 |= cpu_to_be32(key); in->hdr.opcode = cpu_to_be16(MLX5_CMD_OP_CREATE_MKEY); - err = mlx5_cmd_exec(dev, in, inlen, &out, sizeof(out)); + if (callback) { + err = mlx5_cmd_exec_cb(dev, in, inlen, out, sizeof(*out), + callback, context); + return err; + } else { + err = mlx5_cmd_exec(dev, in, inlen, &lout, sizeof(lout)); + } + if (err) { mlx5_core_dbg(dev, "cmd exec faile %d\n", err); return err; } - if (out.hdr.status) { - mlx5_core_dbg(dev, "status %d\n", out.hdr.status); - return mlx5_cmd_status_to_err(&out.hdr); + if (lout.hdr.status) { + mlx5_core_dbg(dev, "status %d\n", lout.hdr.status); + return mlx5_cmd_status_to_err(&lout.hdr); } - mr->key = mlx5_idx_to_mkey(be32_to_cpu(out.mkey) & 0xffffff) | key; - mlx5_core_dbg(dev, "out 0x%x, key 0x%x, mkey 0x%x\n", be32_to_cpu(out.mkey), key, mr->key); + mr->key = mlx5_idx_to_mkey(be32_to_cpu(lout.mkey) & 0xffffff) | key; + mlx5_core_dbg(dev, "out 0x%x, key 0x%x, mkey 0x%x\n", + be32_to_cpu(lout.mkey), key, mr->key); return err; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c index 7b12acf210f..37b6ad1f9a1 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c @@ -57,10 +57,13 @@ struct mlx5_pages_req { }; struct fw_page { - struct rb_node rb_node; - u64 addr; - struct page *page; - u16 func_id; + struct rb_node rb_node; + u64 addr; + struct page *page; + u16 func_id; + unsigned long bitmask; + struct list_head list; + unsigned free_count; }; struct mlx5_query_pages_inbox { @@ -94,6 +97,11 @@ enum { MAX_RECLAIM_TIME_MSECS = 5000, }; +enum { + MLX5_MAX_RECLAIM_TIME_MILI = 5000, + MLX5_NUM_4K_IN_PAGE = PAGE_SIZE / 4096, +}; + static int insert_page(struct mlx5_core_dev *dev, u64 addr, struct page *page, u16 func_id) { struct rb_root *root = &dev->priv.page_root; @@ -101,6 +109,7 @@ static int insert_page(struct mlx5_core_dev *dev, u64 addr, struct page *page, u struct rb_node *parent = NULL; struct fw_page *nfp; struct fw_page *tfp; + int i; while (*new) { parent = *new; @@ -113,25 +122,29 @@ static int insert_page(struct mlx5_core_dev *dev, u64 addr, struct page *page, u return -EEXIST; } - nfp = kmalloc(sizeof(*nfp), GFP_KERNEL); + nfp = kzalloc(sizeof(*nfp), GFP_KERNEL); if (!nfp) return -ENOMEM; nfp->addr = addr; nfp->page = page; nfp->func_id = func_id; + nfp->free_count = MLX5_NUM_4K_IN_PAGE; + for (i = 0; i < MLX5_NUM_4K_IN_PAGE; i++) + set_bit(i, &nfp->bitmask); rb_link_node(&nfp->rb_node, parent, new); rb_insert_color(&nfp->rb_node, root); + list_add(&nfp->list, &dev->priv.free_list); return 0; } -static struct page *remove_page(struct mlx5_core_dev *dev, u64 addr) +static struct fw_page *find_fw_page(struct mlx5_core_dev *dev, u64 addr) { struct rb_root *root = &dev->priv.page_root; struct rb_node *tmp = root->rb_node; - struct page *result = NULL; + struct fw_page *result = NULL; struct fw_page *tfp; while (tmp) { @@ -141,9 +154,7 @@ static struct page *remove_page(struct mlx5_core_dev *dev, u64 addr) } else if (tfp->addr > addr) { tmp = tmp->rb_right; } else { - rb_erase(&tfp->rb_node, root); - result = tfp->page; - kfree(tfp); + result = tfp; break; } } @@ -176,12 +187,98 @@ static int mlx5_cmd_query_pages(struct mlx5_core_dev *dev, u16 *func_id, return err; } +static int alloc_4k(struct mlx5_core_dev *dev, u64 *addr) +{ + struct fw_page *fp; + unsigned n; + + if (list_empty(&dev->priv.free_list)) { + return -ENOMEM; + mlx5_core_warn(dev, "\n"); + } + + fp = list_entry(dev->priv.free_list.next, struct fw_page, list); + n = find_first_bit(&fp->bitmask, 8 * sizeof(fp->bitmask)); + if (n >= MLX5_NUM_4K_IN_PAGE) { + mlx5_core_warn(dev, "alloc 4k bug\n"); + return -ENOENT; + } + clear_bit(n, &fp->bitmask); + fp->free_count--; + if (!fp->free_count) + list_del(&fp->list); + + *addr = fp->addr + n * 4096; + + return 0; +} + +static void free_4k(struct mlx5_core_dev *dev, u64 addr) +{ + struct fw_page *fwp; + int n; + + fwp = find_fw_page(dev, addr & PAGE_MASK); + if (!fwp) { + mlx5_core_warn(dev, "page not found\n"); + return; + } + + n = (addr & ~PAGE_MASK) % 4096; + fwp->free_count++; + set_bit(n, &fwp->bitmask); + if (fwp->free_count == MLX5_NUM_4K_IN_PAGE) { + rb_erase(&fwp->rb_node, &dev->priv.page_root); + if (fwp->free_count != 1) + list_del(&fwp->list); + dma_unmap_page(&dev->pdev->dev, addr, PAGE_SIZE, DMA_BIDIRECTIONAL); + __free_page(fwp->page); + kfree(fwp); + } else if (fwp->free_count == 1) { + list_add(&fwp->list, &dev->priv.free_list); + } +} + +static int alloc_system_page(struct mlx5_core_dev *dev, u16 func_id) +{ + struct page *page; + u64 addr; + int err; + + page = alloc_page(GFP_HIGHUSER); + if (!page) { + mlx5_core_warn(dev, "failed to allocate page\n"); + return -ENOMEM; + } + addr = dma_map_page(&dev->pdev->dev, page, 0, + PAGE_SIZE, DMA_BIDIRECTIONAL); + if (dma_mapping_error(&dev->pdev->dev, addr)) { + mlx5_core_warn(dev, "failed dma mapping page\n"); + err = -ENOMEM; + goto out_alloc; + } + err = insert_page(dev, addr, page, func_id); + if (err) { + mlx5_core_err(dev, "failed to track allocated page\n"); + goto out_mapping; + } + + return 0; + +out_mapping: + dma_unmap_page(&dev->pdev->dev, addr, PAGE_SIZE, DMA_BIDIRECTIONAL); + +out_alloc: + __free_page(page); + + return err; +} static int give_pages(struct mlx5_core_dev *dev, u16 func_id, int npages, int notify_fail) { struct mlx5_manage_pages_inbox *in; struct mlx5_manage_pages_outbox out; - struct page *page; + struct mlx5_manage_pages_inbox *nin; int inlen; u64 addr; int err; @@ -196,27 +293,15 @@ static int give_pages(struct mlx5_core_dev *dev, u16 func_id, int npages, memset(&out, 0, sizeof(out)); for (i = 0; i < npages; i++) { - page = alloc_page(GFP_HIGHUSER); - if (!page) { - err = -ENOMEM; - mlx5_core_warn(dev, "failed to allocate page\n"); - goto out_alloc; - } - addr = dma_map_page(&dev->pdev->dev, page, 0, - PAGE_SIZE, DMA_BIDIRECTIONAL); - if (dma_mapping_error(&dev->pdev->dev, addr)) { - mlx5_core_warn(dev, "failed dma mapping page\n"); - __free_page(page); - err = -ENOMEM; - goto out_alloc; - } - err = insert_page(dev, addr, page, func_id); +retry: + err = alloc_4k(dev, &addr); if (err) { - mlx5_core_err(dev, "failed to track allocated page\n"); - dma_unmap_page(&dev->pdev->dev, addr, PAGE_SIZE, DMA_BIDIRECTIONAL); - __free_page(page); - err = -ENOMEM; - goto out_alloc; + if (err == -ENOMEM) + err = alloc_system_page(dev, func_id); + if (err) + goto out_4k; + + goto retry; } in->pas[i] = cpu_to_be64(addr); } @@ -226,7 +311,6 @@ static int give_pages(struct mlx5_core_dev *dev, u16 func_id, int npages, in->func_id = cpu_to_be16(func_id); in->num_entries = cpu_to_be32(npages); err = mlx5_cmd_exec(dev, in, inlen, &out, sizeof(out)); - mlx5_core_dbg(dev, "err %d\n", err); if (err) { mlx5_core_warn(dev, "func_id 0x%x, npages %d, err %d\n", func_id, npages, err); goto out_alloc; @@ -247,25 +331,22 @@ static int give_pages(struct mlx5_core_dev *dev, u16 func_id, int npages, out_alloc: if (notify_fail) { - memset(in, 0, inlen); - memset(&out, 0, sizeof(out)); - in->hdr.opcode = cpu_to_be16(MLX5_CMD_OP_MANAGE_PAGES); - in->hdr.opmod = cpu_to_be16(MLX5_PAGES_CANT_GIVE); - if (mlx5_cmd_exec(dev, in, sizeof(*in), &out, sizeof(out))) - mlx5_core_warn(dev, "\n"); - } - for (i--; i >= 0; i--) { - addr = be64_to_cpu(in->pas[i]); - page = remove_page(dev, addr); - if (!page) { - mlx5_core_err(dev, "BUG: can't remove page at addr 0x%llx\n", - addr); - continue; + nin = kzalloc(sizeof(*nin), GFP_KERNEL); + if (!nin) { + mlx5_core_warn(dev, "allocation failed\n"); + goto out_4k; } - dma_unmap_page(&dev->pdev->dev, addr, PAGE_SIZE, DMA_BIDIRECTIONAL); - __free_page(page); + memset(&out, 0, sizeof(out)); + nin->hdr.opcode = cpu_to_be16(MLX5_CMD_OP_MANAGE_PAGES); + nin->hdr.opmod = cpu_to_be16(MLX5_PAGES_CANT_GIVE); + if (mlx5_cmd_exec(dev, nin, sizeof(*nin), &out, sizeof(out))) + mlx5_core_warn(dev, "page notify failed\n"); + kfree(nin); } +out_4k: + for (i--; i >= 0; i--) + free_4k(dev, be64_to_cpu(in->pas[i])); out_free: mlx5_vfree(in); return err; @@ -276,7 +357,6 @@ static int reclaim_pages(struct mlx5_core_dev *dev, u32 func_id, int npages, { struct mlx5_manage_pages_inbox in; struct mlx5_manage_pages_outbox *out; - struct page *page; int num_claimed; int outlen; u64 addr; @@ -315,13 +395,7 @@ static int reclaim_pages(struct mlx5_core_dev *dev, u32 func_id, int npages, for (i = 0; i < num_claimed; i++) { addr = be64_to_cpu(out->pas[i]); - page = remove_page(dev, addr); - if (!page) { - mlx5_core_warn(dev, "FW reported unknown DMA address 0x%llx\n", addr); - } else { - dma_unmap_page(&dev->pdev->dev, addr, PAGE_SIZE, DMA_BIDIRECTIONAL); - __free_page(page); - } + free_4k(dev, addr); } out_free: @@ -381,14 +455,19 @@ int mlx5_satisfy_startup_pages(struct mlx5_core_dev *dev, int boot) return give_pages(dev, func_id, npages, 0); } +enum { + MLX5_BLKS_FOR_RECLAIM_PAGES = 12 +}; + static int optimal_reclaimed_pages(void) { struct mlx5_cmd_prot_block *block; struct mlx5_cmd_layout *lay; int ret; - ret = (sizeof(lay->in) + sizeof(block->data) - - sizeof(struct mlx5_manage_pages_outbox)) / 8; + ret = (sizeof(lay->out) + MLX5_BLKS_FOR_RECLAIM_PAGES * sizeof(block->data) - + sizeof(struct mlx5_manage_pages_outbox)) / + FIELD_SIZEOF(struct mlx5_manage_pages_outbox, pas[0]); return ret; } @@ -427,6 +506,7 @@ int mlx5_reclaim_startup_pages(struct mlx5_core_dev *dev) void mlx5_pagealloc_init(struct mlx5_core_dev *dev) { dev->priv.page_root = RB_ROOT; + INIT_LIST_HEAD(&dev->priv.free_list); } void mlx5_pagealloc_cleanup(struct mlx5_core_dev *dev) diff --git a/drivers/net/ethernet/micrel/ks8842.c b/drivers/net/ethernet/micrel/ks8842.c index 0951f7aca1e..822616e3c37 100644 --- a/drivers/net/ethernet/micrel/ks8842.c +++ b/drivers/net/ethernet/micrel/ks8842.c @@ -459,8 +459,7 @@ static int ks8842_tx_frame_dma(struct sk_buff *skb, struct net_device *netdev) sg_dma_len(&ctl->sg) += 4 - sg_dma_len(&ctl->sg) % 4; ctl->adesc = dmaengine_prep_slave_sg(ctl->chan, - &ctl->sg, 1, DMA_MEM_TO_DEV, - DMA_PREP_INTERRUPT | DMA_COMPL_SKIP_SRC_UNMAP); + &ctl->sg, 1, DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT); if (!ctl->adesc) return NETDEV_TX_BUSY; @@ -571,8 +570,7 @@ static int __ks8842_start_new_rx_dma(struct net_device *netdev) sg_dma_len(sg) = DMA_BUFFER_SIZE; ctl->adesc = dmaengine_prep_slave_sg(ctl->chan, - sg, 1, DMA_DEV_TO_MEM, - DMA_PREP_INTERRUPT | DMA_COMPL_SKIP_SRC_UNMAP); + sg, 1, DMA_DEV_TO_MEM, DMA_PREP_INTERRUPT); if (!ctl->adesc) goto out; diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c index 5a0f04c2c81..27ffe0ebf0a 100644 --- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c +++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c @@ -245,16 +245,8 @@ static int hwtstamp_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) /* Get ieee1588's dev information */ pdev = adapter->ptp_pdev; - switch (cfg.tx_type) { - case HWTSTAMP_TX_OFF: - adapter->hwts_tx_en = 0; - break; - case HWTSTAMP_TX_ON: - adapter->hwts_tx_en = 1; - break; - default: + if (cfg.tx_type != HWTSTAMP_TX_OFF && cfg.tx_type != HWTSTAMP_TX_ON) return -ERANGE; - } switch (cfg.rx_filter) { case HWTSTAMP_FILTER_NONE: @@ -284,6 +276,8 @@ static int hwtstamp_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) return -ERANGE; } + adapter->hwts_tx_en = cfg.tx_type == HWTSTAMP_TX_ON; + /* Clear out any old time stamps. */ pch_ch_event_write(pdev, TX_SNAPSHOT_LOCKED | RX_SNAPSHOT_LOCKED); diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index 8d4ccd35a01..8a7a23a84ac 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -435,16 +435,9 @@ static int stmmac_hwtstamp_ioctl(struct net_device *dev, struct ifreq *ifr) if (config.flags) return -EINVAL; - switch (config.tx_type) { - case HWTSTAMP_TX_OFF: - priv->hwts_tx_en = 0; - break; - case HWTSTAMP_TX_ON: - priv->hwts_tx_en = 1; - break; - default: + if (config.tx_type != HWTSTAMP_TX_OFF && + config.tx_type != HWTSTAMP_TX_ON) return -ERANGE; - } if (priv->adv_ts) { switch (config.rx_filter) { @@ -576,6 +569,7 @@ static int stmmac_hwtstamp_ioctl(struct net_device *dev, struct ifreq *ifr) } } priv->hwts_rx_en = ((config.rx_filter == HWTSTAMP_FILTER_NONE) ? 0 : 1); + priv->hwts_tx_en = config.tx_type == HWTSTAMP_TX_ON; if (!priv->hwts_tx_en && !priv->hwts_rx_en) priv->hw->ptp->config_hw_tstamping(priv->ioaddr, 0); diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c index 90d41d26ec6..7536a4c0129 100644 --- a/drivers/net/ethernet/ti/cpsw.c +++ b/drivers/net/ethernet/ti/cpsw.c @@ -967,14 +967,19 @@ static inline void cpsw_add_dual_emac_def_ale_entries( priv->host_port, ALE_VLAN, slave->port_vlan); } -static void cpsw_slave_open(struct cpsw_slave *slave, struct cpsw_priv *priv) +static void soft_reset_slave(struct cpsw_slave *slave) { char name[32]; - u32 slave_port; - - sprintf(name, "slave-%d", slave->slave_num); + snprintf(name, sizeof(name), "slave-%d", slave->slave_num); soft_reset(name, &slave->sliver->soft_reset); +} + +static void cpsw_slave_open(struct cpsw_slave *slave, struct cpsw_priv *priv) +{ + u32 slave_port; + + soft_reset_slave(slave); /* setup priority mapping */ __raw_writel(RX_PRIORITY_MAPPING, &slave->sliver->rx_pri_map); @@ -1323,6 +1328,10 @@ static int cpsw_hwtstamp_ioctl(struct net_device *dev, struct ifreq *ifr) struct cpts *cpts = priv->cpts; struct hwtstamp_config cfg; + if (priv->version != CPSW_VERSION_1 && + priv->version != CPSW_VERSION_2) + return -EOPNOTSUPP; + if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg))) return -EFAULT; @@ -1330,16 +1339,8 @@ static int cpsw_hwtstamp_ioctl(struct net_device *dev, struct ifreq *ifr) if (cfg.flags) return -EINVAL; - switch (cfg.tx_type) { - case HWTSTAMP_TX_OFF: - cpts->tx_enable = 0; - break; - case HWTSTAMP_TX_ON: - cpts->tx_enable = 1; - break; - default: + if (cfg.tx_type != HWTSTAMP_TX_OFF && cfg.tx_type != HWTSTAMP_TX_ON) return -ERANGE; - } switch (cfg.rx_filter) { case HWTSTAMP_FILTER_NONE: @@ -1366,6 +1367,8 @@ static int cpsw_hwtstamp_ioctl(struct net_device *dev, struct ifreq *ifr) return -ERANGE; } + cpts->tx_enable = cfg.tx_type == HWTSTAMP_TX_ON; + switch (priv->version) { case CPSW_VERSION_1: cpsw_hwtstamp_v1(priv); @@ -1374,7 +1377,7 @@ static int cpsw_hwtstamp_ioctl(struct net_device *dev, struct ifreq *ifr) cpsw_hwtstamp_v2(priv); break; default: - return -ENOTSUPP; + WARN_ON(1); } return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0; @@ -2173,8 +2176,9 @@ static int cpsw_suspend(struct device *dev) if (netif_running(ndev)) cpsw_ndo_stop(ndev); - soft_reset("sliver 0", &priv->slaves[0].sliver->soft_reset); - soft_reset("sliver 1", &priv->slaves[1].sliver->soft_reset); + + for_each_slave(priv, soft_reset_slave); + pm_runtime_put_sync(&pdev->dev); /* Select sleep pin state */ diff --git a/drivers/net/ethernet/xscale/ixp4xx_eth.c b/drivers/net/ethernet/xscale/ixp4xx_eth.c index e78802e75ea..bcc224a8373 100644 --- a/drivers/net/ethernet/xscale/ixp4xx_eth.c +++ b/drivers/net/ethernet/xscale/ixp4xx_eth.c @@ -389,16 +389,8 @@ static int hwtstamp_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) ch = PORT2CHANNEL(port); regs = (struct ixp46x_ts_regs __iomem *) IXP4XX_TIMESYNC_BASE_VIRT; - switch (cfg.tx_type) { - case HWTSTAMP_TX_OFF: - port->hwts_tx_en = 0; - break; - case HWTSTAMP_TX_ON: - port->hwts_tx_en = 1; - break; - default: + if (cfg.tx_type != HWTSTAMP_TX_OFF && cfg.tx_type != HWTSTAMP_TX_ON) return -ERANGE; - } switch (cfg.rx_filter) { case HWTSTAMP_FILTER_NONE: @@ -416,6 +408,8 @@ static int hwtstamp_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) return -ERANGE; } + port->hwts_tx_en = cfg.tx_type == HWTSTAMP_TX_ON; + /* Clear out any old time stamps. */ __raw_writel(TX_SNAPSHOT_LOCKED | RX_SNAPSHOT_LOCKED, ®s->channel[ch].ch_event); diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c index 9dccb1edfd2..dc76670c2f2 100644 --- a/drivers/net/macvtap.c +++ b/drivers/net/macvtap.c @@ -628,6 +628,7 @@ static ssize_t macvtap_get_user(struct macvtap_queue *q, struct msghdr *m, const struct iovec *iv, unsigned long total_len, size_t count, int noblock) { + int good_linear = SKB_MAX_HEAD(NET_IP_ALIGN); struct sk_buff *skb; struct macvlan_dev *vlan; unsigned long len = total_len; @@ -670,6 +671,8 @@ static ssize_t macvtap_get_user(struct macvtap_queue *q, struct msghdr *m, if (m && m->msg_control && sock_flag(&q->sk, SOCK_ZEROCOPY)) { copylen = vnet_hdr.hdr_len ? vnet_hdr.hdr_len : GOODCOPY_LEN; + if (copylen > good_linear) + copylen = good_linear; linear = copylen; if (iov_pages(iv, vnet_hdr_len + copylen, count) <= MAX_SKB_FRAGS) @@ -678,7 +681,10 @@ static ssize_t macvtap_get_user(struct macvtap_queue *q, struct msghdr *m, if (!zerocopy) { copylen = len; - linear = vnet_hdr.hdr_len; + if (vnet_hdr.hdr_len > good_linear) + linear = good_linear; + else + linear = vnet_hdr.hdr_len; } skb = macvtap_alloc_skb(&q->sk, NET_IP_ALIGN, copylen, diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c index 6574eb8766f..34b0de09d88 100644 --- a/drivers/net/team/team.c +++ b/drivers/net/team/team.c @@ -2650,7 +2650,7 @@ static int team_nl_cmd_port_list_get(struct sk_buff *skb, return err; } -static struct genl_ops team_nl_ops[] = { +static const struct genl_ops team_nl_ops[] = { { .cmd = TEAM_CMD_NOOP, .doit = team_nl_cmd_noop, @@ -2676,15 +2676,15 @@ static struct genl_ops team_nl_ops[] = { }, }; -static struct genl_multicast_group team_change_event_mcgrp = { - .name = TEAM_GENL_CHANGE_EVENT_MC_GRP_NAME, +static const struct genl_multicast_group team_nl_mcgrps[] = { + { .name = TEAM_GENL_CHANGE_EVENT_MC_GRP_NAME, }, }; static int team_nl_send_multicast(struct sk_buff *skb, struct team *team, u32 portid) { - return genlmsg_multicast_netns(dev_net(team->dev), skb, 0, - team_change_event_mcgrp.id, GFP_KERNEL); + return genlmsg_multicast_netns(&team_nl_family, dev_net(team->dev), + skb, 0, 0, GFP_KERNEL); } static int team_nl_send_event_options_get(struct team *team, @@ -2703,23 +2703,8 @@ static int team_nl_send_event_port_get(struct team *team, static int team_nl_init(void) { - int err; - - err = genl_register_family_with_ops(&team_nl_family, team_nl_ops, - ARRAY_SIZE(team_nl_ops)); - if (err) - return err; - - err = genl_register_mc_group(&team_nl_family, &team_change_event_mcgrp); - if (err) - goto err_change_event_grp_reg; - - return 0; - -err_change_event_grp_reg: - genl_unregister_family(&team_nl_family); - - return err; + return genl_register_family_with_ops_groups(&team_nl_family, team_nl_ops, + team_nl_mcgrps); } static void team_nl_fini(void) diff --git a/drivers/net/tun.c b/drivers/net/tun.c index 7cb105c103f..782e38bfc1e 100644 --- a/drivers/net/tun.c +++ b/drivers/net/tun.c @@ -981,6 +981,7 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile, struct sk_buff *skb; size_t len = total_len, align = NET_SKB_PAD, linear; struct virtio_net_hdr gso = { 0 }; + int good_linear; int offset = 0; int copylen; bool zerocopy = false; @@ -1021,12 +1022,16 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile, return -EINVAL; } + good_linear = SKB_MAX_HEAD(align); + if (msg_control) { /* There are 256 bytes to be copied in skb, so there is * enough room for skb expand head in case it is used. * The rest of the buffer is mapped from userspace. */ copylen = gso.hdr_len ? gso.hdr_len : GOODCOPY_LEN; + if (copylen > good_linear) + copylen = good_linear; linear = copylen; if (iov_pages(iv, offset + copylen, count) <= MAX_SKB_FRAGS) zerocopy = true; @@ -1034,7 +1039,10 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile, if (!zerocopy) { copylen = len; - linear = gso.hdr_len; + if (gso.hdr_len > good_linear) + linear = good_linear; + else + linear = gso.hdr_len; } skb = tun_alloc_skb(tfile, align, copylen, linear, noblock); diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c index f74786aa37b..e15ec2b1203 100644 --- a/drivers/net/usb/cdc_ncm.c +++ b/drivers/net/usb/cdc_ncm.c @@ -66,7 +66,7 @@ static void cdc_ncm_tx_timeout_start(struct cdc_ncm_ctx *ctx); static enum hrtimer_restart cdc_ncm_tx_timer_cb(struct hrtimer *hr_timer); static struct usb_driver cdc_ncm_driver; -static u8 cdc_ncm_setup(struct usbnet *dev) +static int cdc_ncm_setup(struct usbnet *dev) { struct cdc_ncm_ctx *ctx = (struct cdc_ncm_ctx *)dev->data[0]; struct usb_cdc_ncm_ntb_parameters ncm_parm; diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c index 90a429b7eba..8494bb53ebd 100644 --- a/drivers/net/usb/usbnet.c +++ b/drivers/net/usb/usbnet.c @@ -204,9 +204,6 @@ static void intr_complete (struct urb *urb) break; } - if (!netif_running (dev->net)) - return; - status = usb_submit_urb (urb, GFP_ATOMIC); if (status != 0) netif_err(dev, timer, dev->net, diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index cdc7c90a6a9..7bab4de658a 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c @@ -36,7 +36,10 @@ module_param(csum, bool, 0444); module_param(gso, bool, 0444); /* FIXME: MTU in config. */ -#define MAX_PACKET_LEN (ETH_HLEN + VLAN_HLEN + ETH_DATA_LEN) +#define GOOD_PACKET_LEN (ETH_HLEN + VLAN_HLEN + ETH_DATA_LEN) +#define MERGE_BUFFER_LEN (ALIGN(GOOD_PACKET_LEN + \ + sizeof(struct virtio_net_hdr_mrg_rxbuf), \ + L1_CACHE_BYTES)) #define GOOD_COPY_LEN 128 #define VIRTNET_DRIVER_VERSION "1.0.0" @@ -314,10 +317,10 @@ static int receive_mergeable(struct receive_queue *rq, struct sk_buff *head_skb) head_skb->dev->stats.rx_length_errors++; return -EINVAL; } - if (unlikely(len > MAX_PACKET_LEN)) { + if (unlikely(len > MERGE_BUFFER_LEN)) { pr_debug("%s: rx error: merge buffer too long\n", head_skb->dev->name); - len = MAX_PACKET_LEN; + len = MERGE_BUFFER_LEN; } if (unlikely(num_skb_frags == MAX_SKB_FRAGS)) { struct sk_buff *nskb = alloc_skb(0, GFP_ATOMIC); @@ -336,18 +339,17 @@ static int receive_mergeable(struct receive_queue *rq, struct sk_buff *head_skb) if (curr_skb != head_skb) { head_skb->data_len += len; head_skb->len += len; - head_skb->truesize += MAX_PACKET_LEN; + head_skb->truesize += MERGE_BUFFER_LEN; } page = virt_to_head_page(buf); offset = buf - (char *)page_address(page); if (skb_can_coalesce(curr_skb, num_skb_frags, page, offset)) { put_page(page); skb_coalesce_rx_frag(curr_skb, num_skb_frags - 1, - len, MAX_PACKET_LEN); + len, MERGE_BUFFER_LEN); } else { skb_add_rx_frag(curr_skb, num_skb_frags, page, - offset, len, - MAX_PACKET_LEN); + offset, len, MERGE_BUFFER_LEN); } --rq->num; } @@ -383,7 +385,7 @@ static void receive_buf(struct receive_queue *rq, void *buf, unsigned int len) struct page *page = virt_to_head_page(buf); skb = page_to_skb(rq, page, (char *)buf - (char *)page_address(page), - len, MAX_PACKET_LEN); + len, MERGE_BUFFER_LEN); if (unlikely(!skb)) { dev->stats.rx_dropped++; put_page(page); @@ -471,11 +473,11 @@ static int add_recvbuf_small(struct receive_queue *rq, gfp_t gfp) struct skb_vnet_hdr *hdr; int err; - skb = __netdev_alloc_skb_ip_align(vi->dev, MAX_PACKET_LEN, gfp); + skb = __netdev_alloc_skb_ip_align(vi->dev, GOOD_PACKET_LEN, gfp); if (unlikely(!skb)) return -ENOMEM; - skb_put(skb, MAX_PACKET_LEN); + skb_put(skb, GOOD_PACKET_LEN); hdr = skb_vnet_hdr(skb); sg_set_buf(rq->sg, &hdr->hdr, sizeof hdr->hdr); @@ -542,20 +544,20 @@ static int add_recvbuf_mergeable(struct receive_queue *rq, gfp_t gfp) int err; if (gfp & __GFP_WAIT) { - if (skb_page_frag_refill(MAX_PACKET_LEN, &vi->alloc_frag, + if (skb_page_frag_refill(MERGE_BUFFER_LEN, &vi->alloc_frag, gfp)) { buf = (char *)page_address(vi->alloc_frag.page) + vi->alloc_frag.offset; get_page(vi->alloc_frag.page); - vi->alloc_frag.offset += MAX_PACKET_LEN; + vi->alloc_frag.offset += MERGE_BUFFER_LEN; } } else { - buf = netdev_alloc_frag(MAX_PACKET_LEN); + buf = netdev_alloc_frag(MERGE_BUFFER_LEN); } if (!buf) return -ENOMEM; - sg_init_one(rq->sg, buf, MAX_PACKET_LEN); + sg_init_one(rq->sg, buf, MERGE_BUFFER_LEN); err = virtqueue_add_inbuf(rq->vq, rq->sg, 1, buf, gfp); if (err < 0) put_page(virt_to_head_page(buf)); @@ -1619,8 +1621,8 @@ static int virtnet_probe(struct virtio_device *vdev) if (err) goto free_stats; - netif_set_real_num_tx_queues(dev, 1); - netif_set_real_num_rx_queues(dev, 1); + netif_set_real_num_tx_queues(dev, vi->curr_queue_pairs); + netif_set_real_num_rx_queues(dev, vi->curr_queue_pairs); err = register_netdev(dev); if (err) { diff --git a/drivers/net/wireless/ath/ath9k/ar9003_hw.c b/drivers/net/wireless/ath/ath9k/ar9003_hw.c index b07f164d65c..20e49095db2 100644 --- a/drivers/net/wireless/ath/ath9k/ar9003_hw.c +++ b/drivers/net/wireless/ath/ath9k/ar9003_hw.c @@ -187,17 +187,17 @@ static void ar9003_hw_init_mode_regs(struct ath_hw *ah) INIT_INI_ARRAY(&ah->iniCckfirJapan2484, ar9485_1_1_baseband_core_txfir_coeff_japan_2484); - /* Load PCIE SERDES settings from INI */ - - /* Awake Setting */ - - INIT_INI_ARRAY(&ah->iniPcieSerdes, - ar9485_1_1_pcie_phy_clkreq_disable_L1); - - /* Sleep Setting */ - - INIT_INI_ARRAY(&ah->iniPcieSerdesLowPower, - ar9485_1_1_pcie_phy_clkreq_disable_L1); + if (ah->config.no_pll_pwrsave) { + INIT_INI_ARRAY(&ah->iniPcieSerdes, + ar9485_1_1_pcie_phy_clkreq_disable_L1); + INIT_INI_ARRAY(&ah->iniPcieSerdesLowPower, + ar9485_1_1_pcie_phy_clkreq_disable_L1); + } else { + INIT_INI_ARRAY(&ah->iniPcieSerdes, + ar9485_1_1_pll_on_cdr_on_clkreq_disable_L1); + INIT_INI_ARRAY(&ah->iniPcieSerdesLowPower, + ar9485_1_1_pll_on_cdr_on_clkreq_disable_L1); + } } else if (AR_SREV_9462_21(ah)) { INIT_INI_ARRAY(&ah->iniMac[ATH_INI_CORE], ar9462_2p1_mac_core); diff --git a/drivers/net/wireless/ath/ath9k/ar9485_initvals.h b/drivers/net/wireless/ath/ath9k/ar9485_initvals.h index 6f899c69264..7c1845221e1 100644 --- a/drivers/net/wireless/ath/ath9k/ar9485_initvals.h +++ b/drivers/net/wireless/ath/ath9k/ar9485_initvals.h @@ -32,13 +32,6 @@ static const u32 ar9485_1_1_mac_postamble[][5] = { {0x00008318, 0x00003e80, 0x00007d00, 0x00006880, 0x00003440}, }; -static const u32 ar9485_1_1_pcie_phy_pll_on_clkreq_disable_L1[][2] = { - /* Addr allmodes */ - {0x00018c00, 0x18012e5e}, - {0x00018c04, 0x000801d8}, - {0x00018c08, 0x0000080c}, -}; - static const u32 ar9485Common_wo_xlna_rx_gain_1_1[][2] = { /* Addr allmodes */ {0x00009e00, 0x037216a0}, @@ -1101,20 +1094,6 @@ static const u32 ar9485_common_rx_gain_1_1[][2] = { {0x0000a1fc, 0x00000296}, }; -static const u32 ar9485_1_1_pcie_phy_pll_on_clkreq_enable_L1[][2] = { - /* Addr allmodes */ - {0x00018c00, 0x18052e5e}, - {0x00018c04, 0x000801d8}, - {0x00018c08, 0x0000080c}, -}; - -static const u32 ar9485_1_1_pcie_phy_clkreq_enable_L1[][2] = { - /* Addr allmodes */ - {0x00018c00, 0x18053e5e}, - {0x00018c04, 0x000801d8}, - {0x00018c08, 0x0000080c}, -}; - static const u32 ar9485_1_1_soc_preamble[][2] = { /* Addr allmodes */ {0x00004014, 0xba280400}, @@ -1173,13 +1152,6 @@ static const u32 ar9485_1_1_baseband_postamble[][5] = { {0x0000be18, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, }; -static const u32 ar9485_1_1_pcie_phy_clkreq_disable_L1[][2] = { - /* Addr allmodes */ - {0x00018c00, 0x18013e5e}, - {0x00018c04, 0x000801d8}, - {0x00018c08, 0x0000080c}, -}; - static const u32 ar9485_1_1_radio_postamble[][2] = { /* Addr allmodes */ {0x0001609c, 0x0b283f31}, @@ -1358,4 +1330,18 @@ static const u32 ar9485_1_1_baseband_core_txfir_coeff_japan_2484[][2] = { {0x0000a3a0, 0xca9228ee}, }; +static const u32 ar9485_1_1_pcie_phy_clkreq_disable_L1[][2] = { + /* Addr allmodes */ + {0x00018c00, 0x18013e5e}, + {0x00018c04, 0x000801d8}, + {0x00018c08, 0x0000080c}, +}; + +static const u32 ar9485_1_1_pll_on_cdr_on_clkreq_disable_L1[][2] = { + /* Addr allmodes */ + {0x00018c00, 0x1801265e}, + {0x00018c04, 0x000801d8}, + {0x00018c08, 0x0000080c}, +}; + #endif /* INITVALS_9485_H */ diff --git a/drivers/net/wireless/ath/ath9k/ath9k.h b/drivers/net/wireless/ath/ath9k/ath9k.h index e7a38d844a6..60a5da53668 100644 --- a/drivers/net/wireless/ath/ath9k/ath9k.h +++ b/drivers/net/wireless/ath/ath9k/ath9k.h @@ -632,15 +632,16 @@ void ath_ant_comb_scan(struct ath_softc *sc, struct ath_rx_status *rs); /* Main driver core */ /********************/ -#define ATH9K_PCI_CUS198 0x0001 -#define ATH9K_PCI_CUS230 0x0002 -#define ATH9K_PCI_CUS217 0x0004 -#define ATH9K_PCI_CUS252 0x0008 -#define ATH9K_PCI_WOW 0x0010 -#define ATH9K_PCI_BT_ANT_DIV 0x0020 -#define ATH9K_PCI_D3_L1_WAR 0x0040 -#define ATH9K_PCI_AR9565_1ANT 0x0080 -#define ATH9K_PCI_AR9565_2ANT 0x0100 +#define ATH9K_PCI_CUS198 0x0001 +#define ATH9K_PCI_CUS230 0x0002 +#define ATH9K_PCI_CUS217 0x0004 +#define ATH9K_PCI_CUS252 0x0008 +#define ATH9K_PCI_WOW 0x0010 +#define ATH9K_PCI_BT_ANT_DIV 0x0020 +#define ATH9K_PCI_D3_L1_WAR 0x0040 +#define ATH9K_PCI_AR9565_1ANT 0x0080 +#define ATH9K_PCI_AR9565_2ANT 0x0100 +#define ATH9K_PCI_NO_PLL_PWRSAVE 0x0200 /* * Default cache line size, in bytes. diff --git a/drivers/net/wireless/ath/ath9k/dfs_debug.c b/drivers/net/wireless/ath/ath9k/dfs_debug.c index 90b8342d1ed..8824610c21f 100644 --- a/drivers/net/wireless/ath/ath9k/dfs_debug.c +++ b/drivers/net/wireless/ath/ath9k/dfs_debug.c @@ -44,14 +44,20 @@ static ssize_t read_file_dfs(struct file *file, char __user *user_buf, if (buf == NULL) return -ENOMEM; - if (sc->dfs_detector) - dfs_pool_stats = sc->dfs_detector->get_stats(sc->dfs_detector); - len += scnprintf(buf + len, size - len, "DFS support for " "macVersion = 0x%x, macRev = 0x%x: %s\n", hw_ver->macVersion, hw_ver->macRev, (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_DFS) ? "enabled" : "disabled"); + + if (!sc->dfs_detector) { + len += scnprintf(buf + len, size - len, + "DFS detector not enabled\n"); + goto exit; + } + + dfs_pool_stats = sc->dfs_detector->get_stats(sc->dfs_detector); + len += scnprintf(buf + len, size - len, "Pulse detector statistics:\n"); ATH9K_DFS_STAT("pulse events reported ", pulses_total); ATH9K_DFS_STAT("invalid pulse events ", pulses_no_dfs); @@ -76,6 +82,7 @@ static ssize_t read_file_dfs(struct file *file, char __user *user_buf, ATH9K_DFS_POOL_STAT("Seqs. alloc error ", pseq_alloc_error); ATH9K_DFS_POOL_STAT("Seqs. in use ", pseq_used); +exit: if (len > size) len = size; diff --git a/drivers/net/wireless/ath/ath9k/hw.h b/drivers/net/wireless/ath/ath9k/hw.h index 9ea24f1cba7..a2c9a5dbac6 100644 --- a/drivers/net/wireless/ath/ath9k/hw.h +++ b/drivers/net/wireless/ath/ath9k/hw.h @@ -316,6 +316,7 @@ struct ath9k_ops_config { u32 ant_ctrl_comm2g_switch_enable; bool xatten_margin_cfg; bool alt_mingainidx; + bool no_pll_pwrsave; }; enum ath9k_int { diff --git a/drivers/net/wireless/ath/ath9k/init.c b/drivers/net/wireless/ath/ath9k/init.c index d8643ebabd3..710192ed27e 100644 --- a/drivers/net/wireless/ath/ath9k/init.c +++ b/drivers/net/wireless/ath/ath9k/init.c @@ -609,6 +609,11 @@ static void ath9k_init_platform(struct ath_softc *sc) ah->config.pcie_waen = 0x0040473b; ath_info(common, "Enable WAR for ASPM D3/L1\n"); } + + if (sc->driver_data & ATH9K_PCI_NO_PLL_PWRSAVE) { + ah->config.no_pll_pwrsave = true; + ath_info(common, "Disable PLL PowerSave\n"); + } } static void ath9k_eeprom_request_cb(const struct firmware *eeprom_blob, @@ -863,8 +868,8 @@ static const struct ieee80211_iface_combination if_comb[] = { .max_interfaces = 1, .num_different_channels = 1, .beacon_int_infra_match = true, - .radar_detect_widths = BIT(NL80211_CHAN_NO_HT) | - BIT(NL80211_CHAN_HT20), + .radar_detect_widths = BIT(NL80211_CHAN_WIDTH_20_NOHT) | + BIT(NL80211_CHAN_WIDTH_20), } }; diff --git a/drivers/net/wireless/ath/ath9k/pci.c b/drivers/net/wireless/ath/ath9k/pci.c index 7e4c2524b63..b5656fce4ff 100644 --- a/drivers/net/wireless/ath/ath9k/pci.c +++ b/drivers/net/wireless/ath/ath9k/pci.c @@ -195,6 +195,93 @@ static DEFINE_PCI_DEVICE_TABLE(ath_pci_id_table) = { 0x3219), .driver_data = ATH9K_PCI_BT_ANT_DIV }, + /* AR9485 cards with PLL power-save disabled by default. */ + { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS, + 0x0032, + PCI_VENDOR_ID_AZWAVE, + 0x2C97), + .driver_data = ATH9K_PCI_NO_PLL_PWRSAVE }, + { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS, + 0x0032, + PCI_VENDOR_ID_AZWAVE, + 0x2100), + .driver_data = ATH9K_PCI_NO_PLL_PWRSAVE }, + { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS, + 0x0032, + 0x1C56, /* ASKEY */ + 0x4001), + .driver_data = ATH9K_PCI_NO_PLL_PWRSAVE }, + { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS, + 0x0032, + 0x11AD, /* LITEON */ + 0x6627), + .driver_data = ATH9K_PCI_NO_PLL_PWRSAVE }, + { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS, + 0x0032, + 0x11AD, /* LITEON */ + 0x6628), + .driver_data = ATH9K_PCI_NO_PLL_PWRSAVE }, + { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS, + 0x0032, + PCI_VENDOR_ID_FOXCONN, + 0xE04E), + .driver_data = ATH9K_PCI_NO_PLL_PWRSAVE }, + { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS, + 0x0032, + PCI_VENDOR_ID_FOXCONN, + 0xE04F), + .driver_data = ATH9K_PCI_NO_PLL_PWRSAVE }, + { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS, + 0x0032, + 0x144F, /* ASKEY */ + 0x7197), + .driver_data = ATH9K_PCI_NO_PLL_PWRSAVE }, + { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS, + 0x0032, + 0x1B9A, /* XAVI */ + 0x2000), + .driver_data = ATH9K_PCI_NO_PLL_PWRSAVE }, + { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS, + 0x0032, + 0x1B9A, /* XAVI */ + 0x2001), + .driver_data = ATH9K_PCI_NO_PLL_PWRSAVE }, + { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS, + 0x0032, + PCI_VENDOR_ID_AZWAVE, + 0x1186), + .driver_data = ATH9K_PCI_NO_PLL_PWRSAVE }, + { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS, + 0x0032, + PCI_VENDOR_ID_AZWAVE, + 0x1F86), + .driver_data = ATH9K_PCI_NO_PLL_PWRSAVE }, + { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS, + 0x0032, + PCI_VENDOR_ID_AZWAVE, + 0x1195), + .driver_data = ATH9K_PCI_NO_PLL_PWRSAVE }, + { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS, + 0x0032, + PCI_VENDOR_ID_AZWAVE, + 0x1F95), + .driver_data = ATH9K_PCI_NO_PLL_PWRSAVE }, + { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS, + 0x0032, + 0x1B9A, /* XAVI */ + 0x1C00), + .driver_data = ATH9K_PCI_NO_PLL_PWRSAVE }, + { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS, + 0x0032, + 0x1B9A, /* XAVI */ + 0x1C01), + .driver_data = ATH9K_PCI_NO_PLL_PWRSAVE }, + { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS, + 0x0032, + PCI_VENDOR_ID_ASUSTEK, + 0x850D), + .driver_data = ATH9K_PCI_NO_PLL_PWRSAVE }, + { PCI_VDEVICE(ATHEROS, 0x0032) }, /* PCI-E AR9485 */ { PCI_VDEVICE(ATHEROS, 0x0033) }, /* PCI-E AR9580 */ diff --git a/drivers/net/wireless/ath/wcn36xx/debug.c b/drivers/net/wireless/ath/wcn36xx/debug.c index 5b84f7ae0b1..ef44a2da644 100644 --- a/drivers/net/wireless/ath/wcn36xx/debug.c +++ b/drivers/net/wireless/ath/wcn36xx/debug.c @@ -126,7 +126,7 @@ static ssize_t write_file_dump(struct file *file, if (begin == NULL) break; - if (kstrtoul(begin, 0, (unsigned long *)(arg + i)) != 0) + if (kstrtou32(begin, 0, &arg[i]) != 0) break; } diff --git a/drivers/net/wireless/ath/wcn36xx/smd.c b/drivers/net/wireless/ath/wcn36xx/smd.c index f8c3a10510c..de9eb2cfbf4 100644 --- a/drivers/net/wireless/ath/wcn36xx/smd.c +++ b/drivers/net/wireless/ath/wcn36xx/smd.c @@ -1286,7 +1286,8 @@ int wcn36xx_smd_send_beacon(struct wcn36xx *wcn, struct ieee80211_vif *vif, } else { wcn36xx_err("Beacon is to big: beacon size=%d\n", msg_body.beacon_length); - return -ENOMEM; + ret = -ENOMEM; + goto out; } memcpy(msg_body.bssid, vif->addr, ETH_ALEN); @@ -1327,7 +1328,8 @@ int wcn36xx_smd_update_proberesp_tmpl(struct wcn36xx *wcn, if (skb->len > BEACON_TEMPLATE_SIZE) { wcn36xx_warn("probe response template is too big: %d\n", skb->len); - return -E2BIG; + ret = -E2BIG; + goto out; } msg.probe_resp_template_len = skb->len; @@ -1606,7 +1608,8 @@ int wcn36xx_smd_keep_alive_req(struct wcn36xx *wcn, /* TODO: it also support ARP response type */ } else { wcn36xx_warn("unknow keep alive packet type %d\n", packet_type); - return -EINVAL; + ret = -EINVAL; + goto out; } PREPARE_HAL_BUF(wcn->hal_buf, msg_body); diff --git a/drivers/net/wireless/libertas/debugfs.c b/drivers/net/wireless/libertas/debugfs.c index 668dd27616a..cc6a0a586f0 100644 --- a/drivers/net/wireless/libertas/debugfs.c +++ b/drivers/net/wireless/libertas/debugfs.c @@ -913,7 +913,10 @@ static ssize_t lbs_debugfs_write(struct file *f, const char __user *buf, char *p2; struct debug_data *d = f->private_data; - pdata = kmalloc(cnt, GFP_KERNEL); + if (cnt == 0) + return 0; + + pdata = kmalloc(cnt + 1, GFP_KERNEL); if (pdata == NULL) return 0; @@ -922,6 +925,7 @@ static ssize_t lbs_debugfs_write(struct file *f, const char __user *buf, kfree(pdata); return 0; } + pdata[cnt] = '\0'; p0 = pdata; for (i = 0; i < num_of_items; i++) { diff --git a/drivers/net/wireless/libertas/if_cs.c b/drivers/net/wireless/libertas/if_cs.c index ef8c98e2109..f499efc6abc 100644 --- a/drivers/net/wireless/libertas/if_cs.c +++ b/drivers/net/wireless/libertas/if_cs.c @@ -902,6 +902,7 @@ static int if_cs_probe(struct pcmcia_device *p_dev) if (card->model == MODEL_UNKNOWN) { pr_err("unsupported manf_id 0x%04x / card_id 0x%04x\n", p_dev->manf_id, p_dev->card_id); + ret = -ENODEV; goto out2; } diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c index de0df86704e..9df7bc91a26 100644 --- a/drivers/net/wireless/mac80211_hwsim.c +++ b/drivers/net/wireless/mac80211_hwsim.c @@ -2097,7 +2097,7 @@ out: } /* Generic Netlink operations array */ -static struct genl_ops hwsim_ops[] = { +static const struct genl_ops hwsim_ops[] = { { .cmd = HWSIM_CMD_REGISTER, .policy = hwsim_genl_policy, @@ -2148,8 +2148,7 @@ static int hwsim_init_netlink(void) printk(KERN_INFO "mac80211_hwsim: initializing netlink\n"); - rc = genl_register_family_with_ops(&hwsim_genl_family, - hwsim_ops, ARRAY_SIZE(hwsim_ops)); + rc = genl_register_family_with_ops(&hwsim_genl_family, hwsim_ops); if (rc) goto failure; diff --git a/drivers/net/wireless/mwifiex/fw.h b/drivers/net/wireless/mwifiex/fw.h index f80f30b6160..c8385ec77a8 100644 --- a/drivers/net/wireless/mwifiex/fw.h +++ b/drivers/net/wireless/mwifiex/fw.h @@ -1020,8 +1020,8 @@ struct mwifiex_power_group { } __packed; struct mwifiex_types_power_group { - u16 type; - u16 length; + __le16 type; + __le16 length; } __packed; struct host_cmd_ds_txpwr_cfg { diff --git a/drivers/net/wireless/mwifiex/ie.c b/drivers/net/wireless/mwifiex/ie.c index 220af4fe0fc..81ac001ee74 100644 --- a/drivers/net/wireless/mwifiex/ie.c +++ b/drivers/net/wireless/mwifiex/ie.c @@ -82,7 +82,7 @@ mwifiex_update_autoindex_ies(struct mwifiex_private *priv, struct mwifiex_ie_list *ie_list) { u16 travel_len, index, mask; - s16 input_len; + s16 input_len, tlv_len; struct mwifiex_ie *ie; u8 *tmp; @@ -91,11 +91,13 @@ mwifiex_update_autoindex_ies(struct mwifiex_private *priv, ie_list->len = 0; - while (input_len > 0) { + while (input_len >= sizeof(struct mwifiex_ie_types_header)) { ie = (struct mwifiex_ie *)(((u8 *)ie_list) + travel_len); - input_len -= le16_to_cpu(ie->ie_length) + MWIFIEX_IE_HDR_SIZE; - travel_len += le16_to_cpu(ie->ie_length) + MWIFIEX_IE_HDR_SIZE; + tlv_len = le16_to_cpu(ie->ie_length); + travel_len += tlv_len + MWIFIEX_IE_HDR_SIZE; + if (input_len < tlv_len + MWIFIEX_IE_HDR_SIZE) + return -1; index = le16_to_cpu(ie->ie_index); mask = le16_to_cpu(ie->mgmt_subtype_mask); @@ -132,6 +134,7 @@ mwifiex_update_autoindex_ies(struct mwifiex_private *priv, le16_add_cpu(&ie_list->len, le16_to_cpu(priv->mgmt_ie[index].ie_length) + MWIFIEX_IE_HDR_SIZE); + input_len -= tlv_len + MWIFIEX_IE_HDR_SIZE; } if (GET_BSS_ROLE(priv) == MWIFIEX_BSS_ROLE_UAP) diff --git a/drivers/net/wireless/mwifiex/sdio.c b/drivers/net/wireless/mwifiex/sdio.c index 1576104e3d9..9bf8898743a 100644 --- a/drivers/net/wireless/mwifiex/sdio.c +++ b/drivers/net/wireless/mwifiex/sdio.c @@ -1029,7 +1029,10 @@ static int mwifiex_decode_rx_packet(struct mwifiex_adapter *adapter, struct sk_buff *skb, u32 upld_typ) { u8 *cmd_buf; + __le16 *curr_ptr = (__le16 *)skb->data; + u16 pkt_len = le16_to_cpu(*curr_ptr); + skb_trim(skb, pkt_len); skb_pull(skb, INTF_HEADER_LEN); switch (upld_typ) { diff --git a/drivers/net/wireless/mwifiex/sta_cmd.c b/drivers/net/wireless/mwifiex/sta_cmd.c index 7d66018a2e3..2181ee283d8 100644 --- a/drivers/net/wireless/mwifiex/sta_cmd.c +++ b/drivers/net/wireless/mwifiex/sta_cmd.c @@ -239,14 +239,14 @@ static int mwifiex_cmd_tx_power_cfg(struct host_cmd_ds_command *cmd, memmove(cmd_txp_cfg, txp, sizeof(struct host_cmd_ds_txpwr_cfg) + sizeof(struct mwifiex_types_power_group) + - pg_tlv->length); + le16_to_cpu(pg_tlv->length)); pg_tlv = (struct mwifiex_types_power_group *) ((u8 *) cmd_txp_cfg + sizeof(struct host_cmd_ds_txpwr_cfg)); cmd->size = cpu_to_le16(le16_to_cpu(cmd->size) + sizeof(struct mwifiex_types_power_group) + - pg_tlv->length); + le16_to_cpu(pg_tlv->length)); } else { memmove(cmd_txp_cfg, txp, sizeof(*txp)); } diff --git a/drivers/net/wireless/mwifiex/sta_cmdresp.c b/drivers/net/wireless/mwifiex/sta_cmdresp.c index 58a6013712d..2675ca7f8d1 100644 --- a/drivers/net/wireless/mwifiex/sta_cmdresp.c +++ b/drivers/net/wireless/mwifiex/sta_cmdresp.c @@ -274,17 +274,20 @@ static int mwifiex_ret_tx_rate_cfg(struct mwifiex_private *priv, struct host_cmd_ds_tx_rate_cfg *rate_cfg = &resp->params.tx_rate_cfg; struct mwifiex_rate_scope *rate_scope; struct mwifiex_ie_types_header *head; - u16 tlv, tlv_buf_len; + u16 tlv, tlv_buf_len, tlv_buf_left; u8 *tlv_buf; u32 i; - tlv_buf = ((u8 *)rate_cfg) + - sizeof(struct host_cmd_ds_tx_rate_cfg); - tlv_buf_len = le16_to_cpu(*(__le16 *) (tlv_buf + sizeof(u16))); + tlv_buf = ((u8 *)rate_cfg) + sizeof(struct host_cmd_ds_tx_rate_cfg); + tlv_buf_left = le16_to_cpu(resp->size) - S_DS_GEN - sizeof(*rate_cfg); - while (tlv_buf && tlv_buf_len > 0) { - tlv = (*tlv_buf); - tlv = tlv | (*(tlv_buf + 1) << 8); + while (tlv_buf_left >= sizeof(*head)) { + head = (struct mwifiex_ie_types_header *)tlv_buf; + tlv = le16_to_cpu(head->type); + tlv_buf_len = le16_to_cpu(head->len); + + if (tlv_buf_left < (sizeof(*head) + tlv_buf_len)) + break; switch (tlv) { case TLV_TYPE_RATE_SCOPE: @@ -304,9 +307,8 @@ static int mwifiex_ret_tx_rate_cfg(struct mwifiex_private *priv, /* Add RATE_DROP tlv here */ } - head = (struct mwifiex_ie_types_header *) tlv_buf; - tlv_buf += le16_to_cpu(head->len) + sizeof(*head); - tlv_buf_len -= le16_to_cpu(head->len); + tlv_buf += (sizeof(*head) + tlv_buf_len); + tlv_buf_left -= (sizeof(*head) + tlv_buf_len); } priv->is_data_rate_auto = mwifiex_is_rate_auto(priv); @@ -340,13 +342,17 @@ static int mwifiex_get_power_level(struct mwifiex_private *priv, void *data_buf) ((u8 *) data_buf + sizeof(struct host_cmd_ds_txpwr_cfg)); pg = (struct mwifiex_power_group *) ((u8 *) pg_tlv_hdr + sizeof(struct mwifiex_types_power_group)); - length = pg_tlv_hdr->length; - if (length > 0) { - max_power = pg->power_max; - min_power = pg->power_min; - length -= sizeof(struct mwifiex_power_group); - } - while (length) { + length = le16_to_cpu(pg_tlv_hdr->length); + + /* At least one structure required to update power */ + if (length < sizeof(struct mwifiex_power_group)) + return 0; + + max_power = pg->power_max; + min_power = pg->power_min; + length -= sizeof(struct mwifiex_power_group); + + while (length >= sizeof(struct mwifiex_power_group)) { pg++; if (max_power < pg->power_max) max_power = pg->power_max; @@ -356,10 +362,8 @@ static int mwifiex_get_power_level(struct mwifiex_private *priv, void *data_buf) length -= sizeof(struct mwifiex_power_group); } - if (pg_tlv_hdr->length > 0) { - priv->min_tx_power_level = (u8) min_power; - priv->max_tx_power_level = (u8) max_power; - } + priv->min_tx_power_level = (u8) min_power; + priv->max_tx_power_level = (u8) max_power; return 0; } diff --git a/drivers/net/wireless/mwifiex/sta_ioctl.c b/drivers/net/wireless/mwifiex/sta_ioctl.c index f084412eee0..c8e029df770 100644 --- a/drivers/net/wireless/mwifiex/sta_ioctl.c +++ b/drivers/net/wireless/mwifiex/sta_ioctl.c @@ -638,8 +638,9 @@ int mwifiex_set_tx_power(struct mwifiex_private *priv, txp_cfg->mode = cpu_to_le32(1); pg_tlv = (struct mwifiex_types_power_group *) (buf + sizeof(struct host_cmd_ds_txpwr_cfg)); - pg_tlv->type = TLV_TYPE_POWER_GROUP; - pg_tlv->length = 4 * sizeof(struct mwifiex_power_group); + pg_tlv->type = cpu_to_le16(TLV_TYPE_POWER_GROUP); + pg_tlv->length = + cpu_to_le16(4 * sizeof(struct mwifiex_power_group)); pg = (struct mwifiex_power_group *) (buf + sizeof(struct host_cmd_ds_txpwr_cfg) + sizeof(struct mwifiex_types_power_group)); diff --git a/drivers/net/wireless/mwifiex/uap_txrx.c b/drivers/net/wireless/mwifiex/uap_txrx.c index 1cfe5a738c4..92f76d655e6 100644 --- a/drivers/net/wireless/mwifiex/uap_txrx.c +++ b/drivers/net/wireless/mwifiex/uap_txrx.c @@ -97,6 +97,7 @@ static void mwifiex_uap_queue_bridged_pkt(struct mwifiex_private *priv, struct mwifiex_txinfo *tx_info; int hdr_chop; struct timeval tv; + struct ethhdr *p_ethhdr; u8 rfc1042_eth_hdr[ETH_ALEN] = { 0xaa, 0xaa, 0x03, 0x00, 0x00, 0x00 }; uap_rx_pd = (struct uap_rxpd *)(skb->data); @@ -112,14 +113,36 @@ static void mwifiex_uap_queue_bridged_pkt(struct mwifiex_private *priv, } if (!memcmp(&rx_pkt_hdr->rfc1042_hdr, - rfc1042_eth_hdr, sizeof(rfc1042_eth_hdr))) + rfc1042_eth_hdr, sizeof(rfc1042_eth_hdr))) { + /* Replace the 803 header and rfc1042 header (llc/snap) with + * an Ethernet II header, keep the src/dst and snap_type + * (ethertype). + * + * The firmware only passes up SNAP frames converting all RX + * data from 802.11 to 802.2/LLC/SNAP frames. + * + * To create the Ethernet II, just move the src, dst address + * right before the snap_type. + */ + p_ethhdr = (struct ethhdr *) + ((u8 *)(&rx_pkt_hdr->eth803_hdr) + + sizeof(rx_pkt_hdr->eth803_hdr) + + sizeof(rx_pkt_hdr->rfc1042_hdr) + - sizeof(rx_pkt_hdr->eth803_hdr.h_dest) + - sizeof(rx_pkt_hdr->eth803_hdr.h_source) + - sizeof(rx_pkt_hdr->rfc1042_hdr.snap_type)); + memcpy(p_ethhdr->h_source, rx_pkt_hdr->eth803_hdr.h_source, + sizeof(p_ethhdr->h_source)); + memcpy(p_ethhdr->h_dest, rx_pkt_hdr->eth803_hdr.h_dest, + sizeof(p_ethhdr->h_dest)); /* Chop off the rxpd + the excess memory from * 802.2/llc/snap header that was removed. */ - hdr_chop = (u8 *)eth_hdr - (u8 *)uap_rx_pd; - else + hdr_chop = (u8 *)p_ethhdr - (u8 *)uap_rx_pd; + } else { /* Chop off the rxpd */ hdr_chop = (u8 *)&rx_pkt_hdr->eth803_hdr - (u8 *)uap_rx_pd; + } /* Chop off the leading header bytes so the it points * to the start of either the reconstructed EthII frame diff --git a/drivers/net/wireless/mwifiex/wmm.c b/drivers/net/wireless/mwifiex/wmm.c index 5dd0ccc70b8..13eaeed0389 100644 --- a/drivers/net/wireless/mwifiex/wmm.c +++ b/drivers/net/wireless/mwifiex/wmm.c @@ -722,6 +722,9 @@ int mwifiex_ret_wmm_get_status(struct mwifiex_private *priv, tlv_hdr = (struct mwifiex_ie_types_data *) curr; tlv_len = le16_to_cpu(tlv_hdr->header.len); + if (resp_len < tlv_len + sizeof(tlv_hdr->header)) + break; + switch (le16_to_cpu(tlv_hdr->header.type)) { case TLV_TYPE_WMMQSTATUS: tlv_wmm_qstatus = diff --git a/drivers/net/wireless/prism54/islpci_dev.c b/drivers/net/wireless/prism54/islpci_dev.c index 41a16d30c79..e05d9b4c831 100644 --- a/drivers/net/wireless/prism54/islpci_dev.c +++ b/drivers/net/wireless/prism54/islpci_dev.c @@ -811,6 +811,10 @@ static const struct net_device_ops islpci_netdev_ops = { .ndo_validate_addr = eth_validate_addr, }; +static struct device_type wlan_type = { + .name = "wlan", +}; + struct net_device * islpci_setup(struct pci_dev *pdev) { @@ -821,9 +825,8 @@ islpci_setup(struct pci_dev *pdev) return ndev; pci_set_drvdata(pdev, ndev); -#if defined(SET_NETDEV_DEV) SET_NETDEV_DEV(ndev, &pdev->dev); -#endif + SET_NETDEV_DEVTYPE(ndev, &wlan_type); /* setup the structure members */ ndev->base_addr = pci_resource_start(pdev, 0); diff --git a/drivers/net/wireless/rt2x00/rt2800lib.c b/drivers/net/wireless/rt2x00/rt2800lib.c index c5738f14c4b..776aff3678f 100644 --- a/drivers/net/wireless/rt2x00/rt2800lib.c +++ b/drivers/net/wireless/rt2x00/rt2800lib.c @@ -2640,7 +2640,7 @@ static void rt2800_config_channel_rf53xx(struct rt2x00_dev *rt2x00dev, if (rt2x00_rt(rt2x00dev, RT5392)) { rt2800_rfcsr_read(rt2x00dev, 50, &rfcsr); - if (info->default_power1 > POWER_BOUND) + if (info->default_power2 > POWER_BOUND) rt2x00_set_field8(&rfcsr, RFCSR50_TX, POWER_BOUND); else rt2x00_set_field8(&rfcsr, RFCSR50_TX, diff --git a/drivers/net/wireless/rt2x00/rt2x00lib.h b/drivers/net/wireless/rt2x00/rt2x00lib.h index a0935987fa3..7f40ab8e1bd 100644 --- a/drivers/net/wireless/rt2x00/rt2x00lib.h +++ b/drivers/net/wireless/rt2x00/rt2x00lib.h @@ -146,7 +146,7 @@ void rt2x00queue_remove_l2pad(struct sk_buff *skb, unsigned int header_length); * @local: frame is not from mac80211 */ int rt2x00queue_write_tx_frame(struct data_queue *queue, struct sk_buff *skb, - bool local); + struct ieee80211_sta *sta, bool local); /** * rt2x00queue_update_beacon - Send new beacon from mac80211 diff --git a/drivers/net/wireless/rt2x00/rt2x00mac.c b/drivers/net/wireless/rt2x00/rt2x00mac.c index 7c157857f5c..2183e797839 100644 --- a/drivers/net/wireless/rt2x00/rt2x00mac.c +++ b/drivers/net/wireless/rt2x00/rt2x00mac.c @@ -90,7 +90,7 @@ static int rt2x00mac_tx_rts_cts(struct rt2x00_dev *rt2x00dev, frag_skb->data, data_length, tx_info, (struct ieee80211_rts *)(skb->data)); - retval = rt2x00queue_write_tx_frame(queue, skb, true); + retval = rt2x00queue_write_tx_frame(queue, skb, NULL, true); if (retval) { dev_kfree_skb_any(skb); rt2x00_warn(rt2x00dev, "Failed to send RTS/CTS frame\n"); @@ -151,7 +151,7 @@ void rt2x00mac_tx(struct ieee80211_hw *hw, goto exit_fail; } - if (unlikely(rt2x00queue_write_tx_frame(queue, skb, false))) + if (unlikely(rt2x00queue_write_tx_frame(queue, skb, control->sta, false))) goto exit_fail; /* diff --git a/drivers/net/wireless/rt2x00/rt2x00queue.c b/drivers/net/wireless/rt2x00/rt2x00queue.c index 50590b1420a..a5d38e8ad9e 100644 --- a/drivers/net/wireless/rt2x00/rt2x00queue.c +++ b/drivers/net/wireless/rt2x00/rt2x00queue.c @@ -635,7 +635,7 @@ static void rt2x00queue_bar_check(struct queue_entry *entry) } int rt2x00queue_write_tx_frame(struct data_queue *queue, struct sk_buff *skb, - bool local) + struct ieee80211_sta *sta, bool local) { struct ieee80211_tx_info *tx_info; struct queue_entry *entry; @@ -649,7 +649,7 @@ int rt2x00queue_write_tx_frame(struct data_queue *queue, struct sk_buff *skb, * after that we are free to use the skb->cb array * for our information. */ - rt2x00queue_create_tx_descriptor(queue->rt2x00dev, skb, &txdesc, NULL); + rt2x00queue_create_tx_descriptor(queue->rt2x00dev, skb, &txdesc, sta); /* * All information is retrieved from the skb->cb array, diff --git a/drivers/net/wireless/rtlwifi/base.c b/drivers/net/wireless/rtlwifi/base.c index 9a78e3daf74..ff784072fb4 100644 --- a/drivers/net/wireless/rtlwifi/base.c +++ b/drivers/net/wireless/rtlwifi/base.c @@ -37,6 +37,7 @@ #include <linux/ip.h> #include <linux/module.h> +#include <linux/udp.h> /* *NOTICE!!!: This file will be very big, we should @@ -1074,64 +1075,52 @@ u8 rtl_is_special_data(struct ieee80211_hw *hw, struct sk_buff *skb, u8 is_tx) if (!ieee80211_is_data(fc)) return false; + ip = (const struct iphdr *)(skb->data + mac_hdr_len + + SNAP_SIZE + PROTOC_TYPE_SIZE); + ether_type = be16_to_cpup((__be16 *) + (skb->data + mac_hdr_len + SNAP_SIZE)); - ip = (struct iphdr *)((u8 *) skb->data + mac_hdr_len + - SNAP_SIZE + PROTOC_TYPE_SIZE); - ether_type = *(u16 *) ((u8 *) skb->data + mac_hdr_len + SNAP_SIZE); - /* ether_type = ntohs(ether_type); */ - - if (ETH_P_IP == ether_type) { - if (IPPROTO_UDP == ip->protocol) { - struct udphdr *udp = (struct udphdr *)((u8 *) ip + - (ip->ihl << 2)); - if (((((u8 *) udp)[1] == 68) && - (((u8 *) udp)[3] == 67)) || - ((((u8 *) udp)[1] == 67) && - (((u8 *) udp)[3] == 68))) { - /* - * 68 : UDP BOOTP client - * 67 : UDP BOOTP server - */ - RT_TRACE(rtlpriv, (COMP_SEND | COMP_RECV), - DBG_DMESG, "dhcp %s !!\n", - is_tx ? "Tx" : "Rx"); - - if (is_tx) { - rtlpriv->enter_ps = false; - schedule_work(&rtlpriv-> - works.lps_change_work); - ppsc->last_delaylps_stamp_jiffies = - jiffies; - } + switch (ether_type) { + case ETH_P_IP: { + struct udphdr *udp; + u16 src; + u16 dst; - return true; - } - } - } else if (ETH_P_ARP == ether_type) { - if (is_tx) { - rtlpriv->enter_ps = false; - schedule_work(&rtlpriv->works.lps_change_work); - ppsc->last_delaylps_stamp_jiffies = jiffies; - } + if (ip->protocol != IPPROTO_UDP) + return false; + udp = (struct udphdr *)((u8 *)ip + (ip->ihl << 2)); + src = be16_to_cpu(udp->source); + dst = be16_to_cpu(udp->dest); - return true; - } else if (ETH_P_PAE == ether_type) { + /* If this case involves port 68 (UDP BOOTP client) connecting + * with port 67 (UDP BOOTP server), then return true so that + * the lowest speed is used. + */ + if (!((src == 68 && dst == 67) || (src == 67 && dst == 68))) + return false; + + RT_TRACE(rtlpriv, (COMP_SEND | COMP_RECV), DBG_DMESG, + "dhcp %s !!\n", is_tx ? "Tx" : "Rx"); + break; + } + case ETH_P_ARP: + break; + case ETH_P_PAE: RT_TRACE(rtlpriv, (COMP_SEND | COMP_RECV), DBG_DMESG, "802.1X %s EAPOL pkt!!\n", is_tx ? "Tx" : "Rx"); - - if (is_tx) { - rtlpriv->enter_ps = false; - schedule_work(&rtlpriv->works.lps_change_work); - ppsc->last_delaylps_stamp_jiffies = jiffies; - } - - return true; - } else if (ETH_P_IPV6 == ether_type) { - /* IPv6 */ - return true; + break; + case ETH_P_IPV6: + /* TODO: Is this right? */ + return false; + default: + return false; } - - return false; + if (is_tx) { + rtlpriv->enter_ps = false; + schedule_work(&rtlpriv->works.lps_change_work); + ppsc->last_delaylps_stamp_jiffies = jiffies; + } + return true; } EXPORT_SYMBOL_GPL(rtl_is_special_data); diff --git a/drivers/net/wireless/rtlwifi/efuse.c b/drivers/net/wireless/rtlwifi/efuse.c index ae13fb94b2e..2ffc7298f68 100644 --- a/drivers/net/wireless/rtlwifi/efuse.c +++ b/drivers/net/wireless/rtlwifi/efuse.c @@ -262,9 +262,9 @@ void read_efuse(struct ieee80211_hw *hw, u16 _offset, u16 _size_byte, u8 *pbuf) sizeof(u8), GFP_ATOMIC); if (!efuse_tbl) return; - efuse_word = kmalloc(EFUSE_MAX_WORD_UNIT * sizeof(u16 *), GFP_ATOMIC); + efuse_word = kzalloc(EFUSE_MAX_WORD_UNIT * sizeof(u16 *), GFP_ATOMIC); if (!efuse_word) - goto done; + goto out; for (i = 0; i < EFUSE_MAX_WORD_UNIT; i++) { efuse_word[i] = kmalloc(efuse_max_section * sizeof(u16), GFP_ATOMIC); @@ -378,6 +378,7 @@ done: for (i = 0; i < EFUSE_MAX_WORD_UNIT; i++) kfree(efuse_word[i]); kfree(efuse_word); +out: kfree(efuse_tbl); } diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/trx.c b/drivers/net/wireless/rtlwifi/rtl8192cu/trx.c index 25e50ffc44e..b0c346a9e4b 100644 --- a/drivers/net/wireless/rtlwifi/rtl8192cu/trx.c +++ b/drivers/net/wireless/rtlwifi/rtl8192cu/trx.c @@ -349,7 +349,7 @@ bool rtl92cu_rx_query_desc(struct ieee80211_hw *hw, p_drvinfo); } /*rx_status->qual = stats->signal; */ - rx_status->signal = stats->rssi + 10; + rx_status->signal = stats->recvsignalpower + 10; return true; } diff --git a/drivers/net/wireless/rtlwifi/rtl8192de/trx.c b/drivers/net/wireless/rtlwifi/rtl8192de/trx.c index 945ddecf90c..0eb0f4ae592 100644 --- a/drivers/net/wireless/rtlwifi/rtl8192de/trx.c +++ b/drivers/net/wireless/rtlwifi/rtl8192de/trx.c @@ -525,7 +525,7 @@ bool rtl92de_rx_query_desc(struct ieee80211_hw *hw, struct rtl_stats *stats, p_drvinfo); } /*rx_status->qual = stats->signal; */ - rx_status->signal = stats->rssi + 10; + rx_status->signal = stats->recvsignalpower + 10; return true; } diff --git a/drivers/net/wireless/rtlwifi/rtl8192se/rf.c b/drivers/net/wireless/rtlwifi/rtl8192se/rf.c index 5061f1db3f0..92d38ab3c60 100644 --- a/drivers/net/wireless/rtlwifi/rtl8192se/rf.c +++ b/drivers/net/wireless/rtlwifi/rtl8192se/rf.c @@ -265,7 +265,7 @@ static void _rtl92s_get_txpower_writeval_byregulatory(struct ieee80211_hw *hw, rtlefuse->pwrgroup_ht40 [RF90_PATH_A][chnl - 1]) { pwrdiff_limit[i] = - rtlefuse->pwrgroup_ht20 + rtlefuse->pwrgroup_ht40 [RF90_PATH_A][chnl - 1]; } } else { diff --git a/drivers/net/wireless/rtlwifi/rtl8192se/trx.c b/drivers/net/wireless/rtlwifi/rtl8192se/trx.c index 222d2e792ca..27efbcdac6a 100644 --- a/drivers/net/wireless/rtlwifi/rtl8192se/trx.c +++ b/drivers/net/wireless/rtlwifi/rtl8192se/trx.c @@ -329,7 +329,7 @@ bool rtl92se_rx_query_desc(struct ieee80211_hw *hw, struct rtl_stats *stats, } /*rx_status->qual = stats->signal; */ - rx_status->signal = stats->rssi + 10; + rx_status->signal = stats->recvsignalpower + 10; return true; } diff --git a/drivers/net/wireless/rtlwifi/wifi.h b/drivers/net/wireless/rtlwifi/wifi.h index d224dc3bb09..0c65386fa30 100644 --- a/drivers/net/wireless/rtlwifi/wifi.h +++ b/drivers/net/wireless/rtlwifi/wifi.h @@ -77,11 +77,7 @@ #define RTL_SLOT_TIME_9 9 #define RTL_SLOT_TIME_20 20 -/*related with tcp/ip. */ -/*if_ehther.h*/ -#define ETH_P_PAE 0x888E /*Port Access Entity (IEEE 802.1X) */ -#define ETH_P_IP 0x0800 /*Internet Protocol packet */ -#define ETH_P_ARP 0x0806 /*Address Resolution packet */ +/*related to tcp/ip. */ #define SNAP_SIZE 6 #define PROTOC_TYPE_SIZE 2 diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c index d85e6697971..e59acb1daa2 100644 --- a/drivers/net/xen-netfront.c +++ b/drivers/net/xen-netfront.c @@ -277,12 +277,13 @@ static void xennet_alloc_rx_buffers(struct net_device *dev) if (!page) { kfree_skb(skb); no_skb: - /* Any skbuffs queued for refill? Force them out. */ - if (i != 0) - goto refill; /* Could not allocate any skbuffs. Try again later. */ mod_timer(&np->rx_refill_timer, jiffies + (HZ/10)); + + /* Any skbuffs queued for refill? Force them out. */ + if (i != 0) + goto refill; break; } diff --git a/drivers/ntb/ntb_transport.c b/drivers/ntb/ntb_transport.c index 12a9e83c008..d0222f13d15 100644 --- a/drivers/ntb/ntb_transport.c +++ b/drivers/ntb/ntb_transport.c @@ -1034,10 +1034,9 @@ static void ntb_async_rx(struct ntb_queue_entry *entry, void *offset, struct dma_chan *chan = qp->dma_chan; struct dma_device *device; size_t pay_off, buff_off; - dma_addr_t src, dest; + struct dmaengine_unmap_data *unmap; dma_cookie_t cookie; void *buf = entry->buf; - unsigned long flags; entry->len = len; @@ -1045,35 +1044,49 @@ static void ntb_async_rx(struct ntb_queue_entry *entry, void *offset, goto err; if (len < copy_bytes) - goto err1; + goto err_wait; device = chan->device; pay_off = (size_t) offset & ~PAGE_MASK; buff_off = (size_t) buf & ~PAGE_MASK; if (!is_dma_copy_aligned(device, pay_off, buff_off, len)) - goto err1; + goto err_wait; - dest = dma_map_single(device->dev, buf, len, DMA_FROM_DEVICE); - if (dma_mapping_error(device->dev, dest)) - goto err1; + unmap = dmaengine_get_unmap_data(device->dev, 2, GFP_NOWAIT); + if (!unmap) + goto err_wait; - src = dma_map_single(device->dev, offset, len, DMA_TO_DEVICE); - if (dma_mapping_error(device->dev, src)) - goto err2; + unmap->len = len; + unmap->addr[0] = dma_map_page(device->dev, virt_to_page(offset), + pay_off, len, DMA_TO_DEVICE); + if (dma_mapping_error(device->dev, unmap->addr[0])) + goto err_get_unmap; + + unmap->to_cnt = 1; - flags = DMA_COMPL_DEST_UNMAP_SINGLE | DMA_COMPL_SRC_UNMAP_SINGLE | - DMA_PREP_INTERRUPT; - txd = device->device_prep_dma_memcpy(chan, dest, src, len, flags); + unmap->addr[1] = dma_map_page(device->dev, virt_to_page(buf), + buff_off, len, DMA_FROM_DEVICE); + if (dma_mapping_error(device->dev, unmap->addr[1])) + goto err_get_unmap; + + unmap->from_cnt = 1; + + txd = device->device_prep_dma_memcpy(chan, unmap->addr[1], + unmap->addr[0], len, + DMA_PREP_INTERRUPT); if (!txd) - goto err3; + goto err_get_unmap; txd->callback = ntb_rx_copy_callback; txd->callback_param = entry; + dma_set_unmap(txd, unmap); cookie = dmaengine_submit(txd); if (dma_submit_error(cookie)) - goto err3; + goto err_set_unmap; + + dmaengine_unmap_put(unmap); qp->last_cookie = cookie; @@ -1081,11 +1094,11 @@ static void ntb_async_rx(struct ntb_queue_entry *entry, void *offset, return; -err3: - dma_unmap_single(device->dev, src, len, DMA_TO_DEVICE); -err2: - dma_unmap_single(device->dev, dest, len, DMA_FROM_DEVICE); -err1: +err_set_unmap: + dmaengine_unmap_put(unmap); +err_get_unmap: + dmaengine_unmap_put(unmap); +err_wait: /* If the callbacks come out of order, the writing of the index to the * last completed will be out of order. This may result in the * receive stalling forever. @@ -1245,12 +1258,12 @@ static void ntb_async_tx(struct ntb_transport_qp *qp, struct dma_chan *chan = qp->dma_chan; struct dma_device *device; size_t dest_off, buff_off; - dma_addr_t src, dest; + struct dmaengine_unmap_data *unmap; + dma_addr_t dest; dma_cookie_t cookie; void __iomem *offset; size_t len = entry->len; void *buf = entry->buf; - unsigned long flags; offset = qp->tx_mw + qp->tx_max_frame * qp->tx_index; hdr = offset + qp->tx_max_frame - sizeof(struct ntb_payload_header); @@ -1273,28 +1286,41 @@ static void ntb_async_tx(struct ntb_transport_qp *qp, if (!is_dma_copy_aligned(device, buff_off, dest_off, len)) goto err; - src = dma_map_single(device->dev, buf, len, DMA_TO_DEVICE); - if (dma_mapping_error(device->dev, src)) + unmap = dmaengine_get_unmap_data(device->dev, 1, GFP_NOWAIT); + if (!unmap) goto err; - flags = DMA_COMPL_SRC_UNMAP_SINGLE | DMA_PREP_INTERRUPT; - txd = device->device_prep_dma_memcpy(chan, dest, src, len, flags); + unmap->len = len; + unmap->addr[0] = dma_map_page(device->dev, virt_to_page(buf), + buff_off, len, DMA_TO_DEVICE); + if (dma_mapping_error(device->dev, unmap->addr[0])) + goto err_get_unmap; + + unmap->to_cnt = 1; + + txd = device->device_prep_dma_memcpy(chan, dest, unmap->addr[0], len, + DMA_PREP_INTERRUPT); if (!txd) - goto err1; + goto err_get_unmap; txd->callback = ntb_tx_copy_callback; txd->callback_param = entry; + dma_set_unmap(txd, unmap); cookie = dmaengine_submit(txd); if (dma_submit_error(cookie)) - goto err1; + goto err_set_unmap; + + dmaengine_unmap_put(unmap); dma_async_issue_pending(chan); qp->tx_async++; return; -err1: - dma_unmap_single(device->dev, src, len, DMA_TO_DEVICE); +err_set_unmap: + dmaengine_unmap_put(unmap); +err_get_unmap: + dmaengine_unmap_put(unmap); err: ntb_memcpy_tx(entry, offset); qp->tx_memcpy++; diff --git a/drivers/pci/hotplug/acpi_pcihp.c b/drivers/pci/hotplug/acpi_pcihp.c index 1ce8ee054f1..a94d850ae22 100644 --- a/drivers/pci/hotplug/acpi_pcihp.c +++ b/drivers/pci/hotplug/acpi_pcihp.c @@ -367,7 +367,7 @@ int acpi_get_hp_hw_control_from_firmware(struct pci_dev *pdev, u32 flags) string = (struct acpi_buffer){ ACPI_ALLOCATE_BUFFER, NULL }; } - handle = DEVICE_ACPI_HANDLE(&pdev->dev); + handle = ACPI_HANDLE(&pdev->dev); if (!handle) { /* * This hotplug controller was not listed in the ACPI name diff --git a/drivers/pci/hotplug/acpiphp.h b/drivers/pci/hotplug/acpiphp.h index 26100f510b1..1592dbe4f90 100644 --- a/drivers/pci/hotplug/acpiphp.h +++ b/drivers/pci/hotplug/acpiphp.h @@ -176,7 +176,6 @@ u8 acpiphp_get_latch_status(struct acpiphp_slot *slot); u8 acpiphp_get_adapter_status(struct acpiphp_slot *slot); /* variables */ -extern bool acpiphp_debug; extern bool acpiphp_disabled; #endif /* _ACPIPHP_H */ diff --git a/drivers/pci/hotplug/pciehp_acpi.c b/drivers/pci/hotplug/pciehp_acpi.c index ead7c534095..cff7cadfc2e 100644 --- a/drivers/pci/hotplug/pciehp_acpi.c +++ b/drivers/pci/hotplug/pciehp_acpi.c @@ -54,7 +54,7 @@ int pciehp_acpi_slot_detection_check(struct pci_dev *dev) { if (slot_detection_mode != PCIEHP_DETECT_ACPI) return 0; - if (acpi_pci_detect_ejectable(DEVICE_ACPI_HANDLE(&dev->dev))) + if (acpi_pci_detect_ejectable(ACPI_HANDLE(&dev->dev))) return 0; return -ENODEV; } @@ -96,7 +96,7 @@ static int __init dummy_probe(struct pcie_device *dev) dup_slot_id++; } list_add_tail(&slot->list, &dummy_slots); - handle = DEVICE_ACPI_HANDLE(&pdev->dev); + handle = ACPI_HANDLE(&pdev->dev); if (!acpi_slot_detected && acpi_pci_detect_ejectable(handle)) acpi_slot_detected = 1; return -ENODEV; /* dummy driver always returns error */ diff --git a/drivers/pci/hotplug/sgi_hotplug.c b/drivers/pci/hotplug/sgi_hotplug.c index b2781dfe60e..5b05a68cca6 100644 --- a/drivers/pci/hotplug/sgi_hotplug.c +++ b/drivers/pci/hotplug/sgi_hotplug.c @@ -9,6 +9,7 @@ * Work to add BIOS PROM support was completed by Mike Habeck. */ +#include <linux/acpi.h> #include <linux/init.h> #include <linux/kernel.h> #include <linux/module.h> @@ -29,7 +30,6 @@ #include <asm/sn/sn_feature_sets.h> #include <asm/sn/sn_sal.h> #include <asm/sn/types.h> -#include <linux/acpi.h> #include <asm/sn/acpi.h> #include "../pci.h" @@ -414,7 +414,7 @@ static int enable_slot(struct hotplug_slot *bss_hotplug_slot) acpi_handle rethandle; acpi_status ret; - phandle = PCI_CONTROLLER(slot->pci_bus)->acpi_handle; + phandle = acpi_device_handle(PCI_CONTROLLER(slot->pci_bus)->companion); if (acpi_bus_get_device(phandle, &pdevice)) { dev_dbg(&slot->pci_bus->self->dev, @@ -495,7 +495,7 @@ static int disable_slot(struct hotplug_slot *bss_hotplug_slot) /* free the ACPI resources for the slot */ if (SN_ACPI_BASE_SUPPORT() && - PCI_CONTROLLER(slot->pci_bus)->acpi_handle) { + PCI_CONTROLLER(slot->pci_bus)->companion) { unsigned long long adr; struct acpi_device *device; acpi_handle phandle; @@ -504,7 +504,7 @@ static int disable_slot(struct hotplug_slot *bss_hotplug_slot) acpi_status ret; /* Get the rootbus node pointer */ - phandle = PCI_CONTROLLER(slot->pci_bus)->acpi_handle; + phandle = acpi_device_handle(PCI_CONTROLLER(slot->pci_bus)->companion); acpi_scan_lock_acquire(); /* diff --git a/drivers/pci/ioapic.c b/drivers/pci/ioapic.c index 1b90579b233..50ce6809829 100644 --- a/drivers/pci/ioapic.c +++ b/drivers/pci/ioapic.c @@ -37,7 +37,7 @@ static int ioapic_probe(struct pci_dev *dev, const struct pci_device_id *ent) char *type; struct resource *res; - handle = DEVICE_ACPI_HANDLE(&dev->dev); + handle = ACPI_HANDLE(&dev->dev); if (!handle) return -EINVAL; diff --git a/drivers/pci/pci-acpi.c b/drivers/pci/pci-acpi.c index dfd1f59de72..f166126e28d 100644 --- a/drivers/pci/pci-acpi.c +++ b/drivers/pci/pci-acpi.c @@ -173,14 +173,14 @@ static pci_power_t acpi_pci_choose_state(struct pci_dev *pdev) static bool acpi_pci_power_manageable(struct pci_dev *dev) { - acpi_handle handle = DEVICE_ACPI_HANDLE(&dev->dev); + acpi_handle handle = ACPI_HANDLE(&dev->dev); return handle ? acpi_bus_power_manageable(handle) : false; } static int acpi_pci_set_power_state(struct pci_dev *dev, pci_power_t state) { - acpi_handle handle = DEVICE_ACPI_HANDLE(&dev->dev); + acpi_handle handle = ACPI_HANDLE(&dev->dev); static const u8 state_conv[] = { [PCI_D0] = ACPI_STATE_D0, [PCI_D1] = ACPI_STATE_D1, @@ -217,7 +217,7 @@ static int acpi_pci_set_power_state(struct pci_dev *dev, pci_power_t state) static bool acpi_pci_can_wakeup(struct pci_dev *dev) { - acpi_handle handle = DEVICE_ACPI_HANDLE(&dev->dev); + acpi_handle handle = ACPI_HANDLE(&dev->dev); return handle ? acpi_bus_can_wakeup(handle) : false; } diff --git a/drivers/pci/pci-label.c b/drivers/pci/pci-label.c index edaed6f4da6..d51f45aa669 100644 --- a/drivers/pci/pci-label.c +++ b/drivers/pci/pci-label.c @@ -263,7 +263,7 @@ device_has_dsm(struct device *dev) acpi_handle handle; struct acpi_buffer output = {ACPI_ALLOCATE_BUFFER, NULL}; - handle = DEVICE_ACPI_HANDLE(dev); + handle = ACPI_HANDLE(dev); if (!handle) return FALSE; @@ -295,7 +295,7 @@ acpilabel_show(struct device *dev, struct device_attribute *attr, char *buf) acpi_handle handle; int length; - handle = DEVICE_ACPI_HANDLE(dev); + handle = ACPI_HANDLE(dev); if (!handle) return -1; @@ -316,7 +316,7 @@ acpiindex_show(struct device *dev, struct device_attribute *attr, char *buf) acpi_handle handle; int length; - handle = DEVICE_ACPI_HANDLE(dev); + handle = ACPI_HANDLE(dev); if (!handle) return -1; diff --git a/drivers/platform/x86/apple-gmux.c b/drivers/platform/x86/apple-gmux.c index 605a9be5512..b9429fbf1cd 100644 --- a/drivers/platform/x86/apple-gmux.c +++ b/drivers/platform/x86/apple-gmux.c @@ -519,7 +519,7 @@ static int gmux_probe(struct pnp_dev *pnp, const struct pnp_device_id *id) gmux_data->power_state = VGA_SWITCHEROO_ON; - gmux_data->dhandle = DEVICE_ACPI_HANDLE(&pnp->dev); + gmux_data->dhandle = ACPI_HANDLE(&pnp->dev); if (!gmux_data->dhandle) { pr_err("Cannot find acpi handle for pnp device %s\n", dev_name(&pnp->dev)); diff --git a/drivers/pnp/pnpacpi/core.c b/drivers/pnp/pnpacpi/core.c index 747826d9905..14655a0f043 100644 --- a/drivers/pnp/pnpacpi/core.c +++ b/drivers/pnp/pnpacpi/core.c @@ -89,7 +89,7 @@ static int pnpacpi_set_resources(struct pnp_dev *dev) pnp_dbg(&dev->dev, "set resources\n"); - handle = DEVICE_ACPI_HANDLE(&dev->dev); + handle = ACPI_HANDLE(&dev->dev); if (!handle || acpi_bus_get_device(handle, &acpi_dev)) { dev_dbg(&dev->dev, "ACPI device not found in %s!\n", __func__); return -ENODEV; @@ -122,7 +122,7 @@ static int pnpacpi_disable_resources(struct pnp_dev *dev) dev_dbg(&dev->dev, "disable resources\n"); - handle = DEVICE_ACPI_HANDLE(&dev->dev); + handle = ACPI_HANDLE(&dev->dev); if (!handle || acpi_bus_get_device(handle, &acpi_dev)) { dev_dbg(&dev->dev, "ACPI device not found in %s!\n", __func__); return 0; @@ -144,7 +144,7 @@ static bool pnpacpi_can_wakeup(struct pnp_dev *dev) struct acpi_device *acpi_dev; acpi_handle handle; - handle = DEVICE_ACPI_HANDLE(&dev->dev); + handle = ACPI_HANDLE(&dev->dev); if (!handle || acpi_bus_get_device(handle, &acpi_dev)) { dev_dbg(&dev->dev, "ACPI device not found in %s!\n", __func__); return false; @@ -159,7 +159,7 @@ static int pnpacpi_suspend(struct pnp_dev *dev, pm_message_t state) acpi_handle handle; int error = 0; - handle = DEVICE_ACPI_HANDLE(&dev->dev); + handle = ACPI_HANDLE(&dev->dev); if (!handle || acpi_bus_get_device(handle, &acpi_dev)) { dev_dbg(&dev->dev, "ACPI device not found in %s!\n", __func__); return 0; @@ -194,7 +194,7 @@ static int pnpacpi_suspend(struct pnp_dev *dev, pm_message_t state) static int pnpacpi_resume(struct pnp_dev *dev) { struct acpi_device *acpi_dev; - acpi_handle handle = DEVICE_ACPI_HANDLE(&dev->dev); + acpi_handle handle = ACPI_HANDLE(&dev->dev); int error = 0; if (!handle || acpi_bus_get_device(handle, &acpi_dev)) { diff --git a/drivers/power/Kconfig b/drivers/power/Kconfig index e6f92b45091..5e2054afe84 100644 --- a/drivers/power/Kconfig +++ b/drivers/power/Kconfig @@ -346,6 +346,12 @@ config CHARGER_BQ24190 help Say Y to enable support for the TI BQ24190 battery charger. +config CHARGER_BQ24735 + tristate "TI BQ24735 battery charger support" + depends on I2C && GPIOLIB + help + Say Y to enable support for the TI BQ24735 battery charger. + config CHARGER_SMB347 tristate "Summit Microelectronics SMB347 Battery Charger" depends on I2C diff --git a/drivers/power/Makefile b/drivers/power/Makefile index a4b74177706..372b4e8ab59 100644 --- a/drivers/power/Makefile +++ b/drivers/power/Makefile @@ -52,6 +52,7 @@ obj-$(CONFIG_CHARGER_MAX8997) += max8997_charger.o obj-$(CONFIG_CHARGER_MAX8998) += max8998_charger.o obj-$(CONFIG_CHARGER_BQ2415X) += bq2415x_charger.o obj-$(CONFIG_CHARGER_BQ24190) += bq24190_charger.o +obj-$(CONFIG_CHARGER_BQ24735) += bq24735-charger.o obj-$(CONFIG_POWER_AVS) += avs/ obj-$(CONFIG_CHARGER_SMB347) += smb347-charger.o obj-$(CONFIG_CHARGER_TPS65090) += tps65090-charger.o diff --git a/drivers/power/ab8500_charger.c b/drivers/power/ab8500_charger.c index a4c4a10b3a4..19110aa613a 100644 --- a/drivers/power/ab8500_charger.c +++ b/drivers/power/ab8500_charger.c @@ -766,7 +766,6 @@ static int ab8500_charger_max_usb_curr(struct ab8500_charger *di, ret = -ENXIO; break; } - break; case USB_STAT_CARKIT_1: case USB_STAT_CARKIT_2: case USB_STAT_ACA_DOCK_CHARGER: @@ -1387,8 +1386,12 @@ static int ab8500_charger_ac_en(struct ux500_charger *charger, * the GPADC module independant of the AB8500 chargers */ if (!di->vddadc_en_ac) { - regulator_enable(di->regu); - di->vddadc_en_ac = true; + ret = regulator_enable(di->regu); + if (ret) + dev_warn(di->dev, + "Failed to enable regulator\n"); + else + di->vddadc_en_ac = true; } /* Check if the requested voltage or current is valid */ @@ -1556,8 +1559,12 @@ static int ab8500_charger_usb_en(struct ux500_charger *charger, * the GPADC module independant of the AB8500 chargers */ if (!di->vddadc_en_usb) { - regulator_enable(di->regu); - di->vddadc_en_usb = true; + ret = regulator_enable(di->regu); + if (ret) + dev_warn(di->dev, + "Failed to enable regulator\n"); + else + di->vddadc_en_usb = true; } /* Enable USB charging */ diff --git a/drivers/power/bq2415x_charger.c b/drivers/power/bq2415x_charger.c index 0727f925613..df893dd1447 100644 --- a/drivers/power/bq2415x_charger.c +++ b/drivers/power/bq2415x_charger.c @@ -605,9 +605,13 @@ static int bq2415x_set_battery_regulation_voltage(struct bq2415x_device *bq, { int val = (mV/10 - 350) / 2; + /* + * According to datasheet, maximum battery regulation voltage is + * 4440mV which is b101111 = 47. + */ if (val < 0) val = 0; - else if (val > 94) /* FIXME: Max is 94 or 122 ? Set max value ? */ + else if (val > 47) return -EINVAL; return bq2415x_i2c_write_mask(bq, BQ2415X_REG_VOLTAGE, val, diff --git a/drivers/power/bq24735-charger.c b/drivers/power/bq24735-charger.c new file mode 100644 index 00000000000..d022b823305 --- /dev/null +++ b/drivers/power/bq24735-charger.c @@ -0,0 +1,419 @@ +/* + * Battery charger driver for TI BQ24735 + * + * Copyright (c) 2013, NVIDIA CORPORATION. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + */ + +#include <linux/err.h> +#include <linux/gpio.h> +#include <linux/i2c.h> +#include <linux/init.h> +#include <linux/interrupt.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/of_gpio.h> +#include <linux/power_supply.h> +#include <linux/slab.h> + +#include <linux/power/bq24735-charger.h> + +#define BQ24735_CHG_OPT 0x12 +#define BQ24735_CHG_OPT_CHARGE_DISABLE (1 << 0) +#define BQ24735_CHG_OPT_AC_PRESENT (1 << 4) +#define BQ24735_CHARGE_CURRENT 0x14 +#define BQ24735_CHARGE_CURRENT_MASK 0x1fc0 +#define BQ24735_CHARGE_VOLTAGE 0x15 +#define BQ24735_CHARGE_VOLTAGE_MASK 0x7ff0 +#define BQ24735_INPUT_CURRENT 0x3f +#define BQ24735_INPUT_CURRENT_MASK 0x1f80 +#define BQ24735_MANUFACTURER_ID 0xfe +#define BQ24735_DEVICE_ID 0xff + +struct bq24735 { + struct power_supply charger; + struct i2c_client *client; + struct bq24735_platform *pdata; +}; + +static inline struct bq24735 *to_bq24735(struct power_supply *psy) +{ + return container_of(psy, struct bq24735, charger); +} + +static enum power_supply_property bq24735_charger_properties[] = { + POWER_SUPPLY_PROP_ONLINE, +}; + +static inline int bq24735_write_word(struct i2c_client *client, u8 reg, + u16 value) +{ + return i2c_smbus_write_word_data(client, reg, le16_to_cpu(value)); +} + +static inline int bq24735_read_word(struct i2c_client *client, u8 reg) +{ + s32 ret = i2c_smbus_read_word_data(client, reg); + + return ret < 0 ? ret : le16_to_cpu(ret); +} + +static int bq24735_update_word(struct i2c_client *client, u8 reg, + u16 mask, u16 value) +{ + unsigned int tmp; + int ret; + + ret = bq24735_read_word(client, reg); + if (ret < 0) + return ret; + + tmp = ret & ~mask; + tmp |= value & mask; + + return bq24735_write_word(client, reg, tmp); +} + +static inline int bq24735_enable_charging(struct bq24735 *charger) +{ + return bq24735_update_word(charger->client, BQ24735_CHG_OPT, + BQ24735_CHG_OPT_CHARGE_DISABLE, + ~BQ24735_CHG_OPT_CHARGE_DISABLE); +} + +static inline int bq24735_disable_charging(struct bq24735 *charger) +{ + return bq24735_update_word(charger->client, BQ24735_CHG_OPT, + BQ24735_CHG_OPT_CHARGE_DISABLE, + BQ24735_CHG_OPT_CHARGE_DISABLE); +} + +static int bq24735_config_charger(struct bq24735 *charger) +{ + struct bq24735_platform *pdata = charger->pdata; + int ret; + u16 value; + + if (pdata->charge_current) { + value = pdata->charge_current & BQ24735_CHARGE_CURRENT_MASK; + + ret = bq24735_write_word(charger->client, + BQ24735_CHARGE_CURRENT, value); + if (ret < 0) { + dev_err(&charger->client->dev, + "Failed to write charger current : %d\n", + ret); + return ret; + } + } + + if (pdata->charge_voltage) { + value = pdata->charge_voltage & BQ24735_CHARGE_VOLTAGE_MASK; + + ret = bq24735_write_word(charger->client, + BQ24735_CHARGE_VOLTAGE, value); + if (ret < 0) { + dev_err(&charger->client->dev, + "Failed to write charger voltage : %d\n", + ret); + return ret; + } + } + + if (pdata->input_current) { + value = pdata->input_current & BQ24735_INPUT_CURRENT_MASK; + + ret = bq24735_write_word(charger->client, + BQ24735_INPUT_CURRENT, value); + if (ret < 0) { + dev_err(&charger->client->dev, + "Failed to write input current : %d\n", + ret); + return ret; + } + } + + return 0; +} + +static bool bq24735_charger_is_present(struct bq24735 *charger) +{ + struct bq24735_platform *pdata = charger->pdata; + int ret; + + if (pdata->status_gpio_valid) { + ret = gpio_get_value_cansleep(pdata->status_gpio); + return ret ^= pdata->status_gpio_active_low == 0; + } else { + int ac = 0; + + ac = bq24735_read_word(charger->client, BQ24735_CHG_OPT); + if (ac < 0) { + dev_err(&charger->client->dev, + "Failed to read charger options : %d\n", + ac); + return false; + } + return (ac & BQ24735_CHG_OPT_AC_PRESENT) ? true : false; + } + + return false; +} + +static irqreturn_t bq24735_charger_isr(int irq, void *devid) +{ + struct power_supply *psy = devid; + struct bq24735 *charger = to_bq24735(psy); + + if (bq24735_charger_is_present(charger)) + bq24735_enable_charging(charger); + else + bq24735_disable_charging(charger); + + power_supply_changed(psy); + + return IRQ_HANDLED; +} + +static int bq24735_charger_get_property(struct power_supply *psy, + enum power_supply_property psp, + union power_supply_propval *val) +{ + struct bq24735 *charger; + + charger = container_of(psy, struct bq24735, charger); + + switch (psp) { + case POWER_SUPPLY_PROP_ONLINE: + val->intval = bq24735_charger_is_present(charger) ? 1 : 0; + break; + default: + return -EINVAL; + } + + return 0; +} + +static struct bq24735_platform *bq24735_parse_dt_data(struct i2c_client *client) +{ + struct bq24735_platform *pdata; + struct device_node *np = client->dev.of_node; + u32 val; + int ret; + enum of_gpio_flags flags; + + pdata = devm_kzalloc(&client->dev, sizeof(*pdata), GFP_KERNEL); + if (!pdata) { + dev_err(&client->dev, + "Memory alloc for bq24735 pdata failed\n"); + return NULL; + } + + pdata->status_gpio = of_get_named_gpio_flags(np, "ti,ac-detect-gpios", + 0, &flags); + + if (flags & OF_GPIO_ACTIVE_LOW) + pdata->status_gpio_active_low = 1; + + ret = of_property_read_u32(np, "ti,charge-current", &val); + if (!ret) + pdata->charge_current = val; + + ret = of_property_read_u32(np, "ti,charge-voltage", &val); + if (!ret) + pdata->charge_voltage = val; + + ret = of_property_read_u32(np, "ti,input-current", &val); + if (!ret) + pdata->input_current = val; + + return pdata; +} + +static int bq24735_charger_probe(struct i2c_client *client, + const struct i2c_device_id *id) +{ + int ret; + struct bq24735 *charger; + struct power_supply *supply; + char *name; + + charger = devm_kzalloc(&client->dev, sizeof(*charger), GFP_KERNEL); + if (!charger) + return -ENOMEM; + + charger->pdata = client->dev.platform_data; + + if (IS_ENABLED(CONFIG_OF) && !charger->pdata && client->dev.of_node) + charger->pdata = bq24735_parse_dt_data(client); + + if (!charger->pdata) { + dev_err(&client->dev, "no platform data provided\n"); + return -EINVAL; + } + + name = (char *)charger->pdata->name; + if (!name) { + name = kasprintf(GFP_KERNEL, "bq24735@%s", + dev_name(&client->dev)); + if (!name) { + dev_err(&client->dev, "Failed to alloc device name\n"); + return -ENOMEM; + } + } + + charger->client = client; + + supply = &charger->charger; + + supply->name = name; + supply->type = POWER_SUPPLY_TYPE_MAINS; + supply->properties = bq24735_charger_properties; + supply->num_properties = ARRAY_SIZE(bq24735_charger_properties); + supply->get_property = bq24735_charger_get_property; + supply->supplied_to = charger->pdata->supplied_to; + supply->num_supplicants = charger->pdata->num_supplicants; + supply->of_node = client->dev.of_node; + + i2c_set_clientdata(client, charger); + + ret = bq24735_read_word(client, BQ24735_MANUFACTURER_ID); + if (ret < 0) { + dev_err(&client->dev, "Failed to read manufacturer id : %d\n", + ret); + goto err_free_name; + } else if (ret != 0x0040) { + dev_err(&client->dev, + "manufacturer id mismatch. 0x0040 != 0x%04x\n", ret); + ret = -ENODEV; + goto err_free_name; + } + + ret = bq24735_read_word(client, BQ24735_DEVICE_ID); + if (ret < 0) { + dev_err(&client->dev, "Failed to read device id : %d\n", ret); + goto err_free_name; + } else if (ret != 0x000B) { + dev_err(&client->dev, + "device id mismatch. 0x000b != 0x%04x\n", ret); + ret = -ENODEV; + goto err_free_name; + } + + if (gpio_is_valid(charger->pdata->status_gpio)) { + ret = devm_gpio_request(&client->dev, + charger->pdata->status_gpio, + name); + if (ret) { + dev_err(&client->dev, + "Failed GPIO request for GPIO %d: %d\n", + charger->pdata->status_gpio, ret); + } + + charger->pdata->status_gpio_valid = !ret; + } + + ret = bq24735_config_charger(charger); + if (ret < 0) { + dev_err(&client->dev, "failed in configuring charger"); + goto err_free_name; + } + + /* check for AC adapter presence */ + if (bq24735_charger_is_present(charger)) { + ret = bq24735_enable_charging(charger); + if (ret < 0) { + dev_err(&client->dev, "Failed to enable charging\n"); + goto err_free_name; + } + } + + ret = power_supply_register(&client->dev, supply); + if (ret < 0) { + dev_err(&client->dev, "Failed to register power supply: %d\n", + ret); + goto err_free_name; + } + + if (client->irq) { + ret = devm_request_threaded_irq(&client->dev, client->irq, + NULL, bq24735_charger_isr, + IRQF_TRIGGER_RISING | + IRQF_TRIGGER_FALLING | + IRQF_ONESHOT, + supply->name, supply); + if (ret) { + dev_err(&client->dev, + "Unable to register IRQ %d err %d\n", + client->irq, ret); + goto err_unregister_supply; + } + } + + return 0; +err_unregister_supply: + power_supply_unregister(supply); +err_free_name: + if (name != charger->pdata->name) + kfree(name); + + return ret; +} + +static int bq24735_charger_remove(struct i2c_client *client) +{ + struct bq24735 *charger = i2c_get_clientdata(client); + + if (charger->client->irq) + devm_free_irq(&charger->client->dev, charger->client->irq, + &charger->charger); + + power_supply_unregister(&charger->charger); + + if (charger->charger.name != charger->pdata->name) + kfree(charger->charger.name); + + return 0; +} + +static const struct i2c_device_id bq24735_charger_id[] = { + { "bq24735-charger", 0 }, + {} +}; +MODULE_DEVICE_TABLE(i2c, bq24735_charger_id); + +static const struct of_device_id bq24735_match_ids[] = { + { .compatible = "ti,bq24735", }, + { /* end */ } +}; +MODULE_DEVICE_TABLE(of, bq24735_match_ids); + +static struct i2c_driver bq24735_charger_driver = { + .driver = { + .name = "bq24735-charger", + .owner = THIS_MODULE, + .of_match_table = bq24735_match_ids, + }, + .probe = bq24735_charger_probe, + .remove = bq24735_charger_remove, + .id_table = bq24735_charger_id, +}; + +module_i2c_driver(bq24735_charger_driver); + +MODULE_DESCRIPTION("bq24735 battery charging driver"); +MODULE_AUTHOR("Darbha Sriharsha <dsriharsha@nvidia.com>"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/power/charger-manager.c b/drivers/power/charger-manager.c index e30e847600b..7287c0efd6b 100644 --- a/drivers/power/charger-manager.c +++ b/drivers/power/charger-manager.c @@ -1378,7 +1378,8 @@ static int charger_manager_register_sysfs(struct charger_manager *cm) charger = &desc->charger_regulators[i]; snprintf(buf, 10, "charger.%d", i); - str = kzalloc(sizeof(char) * (strlen(buf) + 1), GFP_KERNEL); + str = devm_kzalloc(cm->dev, + sizeof(char) * (strlen(buf) + 1), GFP_KERNEL); if (!str) { ret = -ENOMEM; goto err; @@ -1452,30 +1453,23 @@ static int charger_manager_probe(struct platform_device *pdev) rtc_dev = NULL; dev_err(&pdev->dev, "Cannot get RTC %s\n", g_desc->rtc_name); - ret = -ENODEV; - goto err_alloc; + return -ENODEV; } } if (!desc) { dev_err(&pdev->dev, "No platform data (desc) found\n"); - ret = -ENODEV; - goto err_alloc; + return -ENODEV; } - cm = kzalloc(sizeof(struct charger_manager), GFP_KERNEL); - if (!cm) { - ret = -ENOMEM; - goto err_alloc; - } + cm = devm_kzalloc(&pdev->dev, + sizeof(struct charger_manager), GFP_KERNEL); + if (!cm) + return -ENOMEM; /* Basic Values. Unspecified are Null or 0 */ cm->dev = &pdev->dev; - cm->desc = kmemdup(desc, sizeof(struct charger_desc), GFP_KERNEL); - if (!cm->desc) { - ret = -ENOMEM; - goto err_alloc_desc; - } + cm->desc = desc; cm->last_temp_mC = INT_MIN; /* denotes "unmeasured, yet" */ /* @@ -1498,27 +1492,23 @@ static int charger_manager_probe(struct platform_device *pdev) } if (!desc->charger_regulators || desc->num_charger_regulators < 1) { - ret = -EINVAL; dev_err(&pdev->dev, "charger_regulators undefined\n"); - goto err_no_charger; + return -EINVAL; } if (!desc->psy_charger_stat || !desc->psy_charger_stat[0]) { dev_err(&pdev->dev, "No power supply defined\n"); - ret = -EINVAL; - goto err_no_charger_stat; + return -EINVAL; } /* Counting index only */ while (desc->psy_charger_stat[i]) i++; - cm->charger_stat = kzalloc(sizeof(struct power_supply *) * (i + 1), - GFP_KERNEL); - if (!cm->charger_stat) { - ret = -ENOMEM; - goto err_no_charger_stat; - } + cm->charger_stat = devm_kzalloc(&pdev->dev, + sizeof(struct power_supply *) * i, GFP_KERNEL); + if (!cm->charger_stat) + return -ENOMEM; for (i = 0; desc->psy_charger_stat[i]; i++) { cm->charger_stat[i] = power_supply_get_by_name( @@ -1526,8 +1516,7 @@ static int charger_manager_probe(struct platform_device *pdev) if (!cm->charger_stat[i]) { dev_err(&pdev->dev, "Cannot find power supply \"%s\"\n", desc->psy_charger_stat[i]); - ret = -ENODEV; - goto err_chg_stat; + return -ENODEV; } } @@ -1535,21 +1524,18 @@ static int charger_manager_probe(struct platform_device *pdev) if (!cm->fuel_gauge) { dev_err(&pdev->dev, "Cannot find power supply \"%s\"\n", desc->psy_fuel_gauge); - ret = -ENODEV; - goto err_chg_stat; + return -ENODEV; } if (desc->polling_interval_ms == 0 || msecs_to_jiffies(desc->polling_interval_ms) <= CM_JIFFIES_SMALL) { dev_err(&pdev->dev, "polling_interval_ms is too small\n"); - ret = -EINVAL; - goto err_chg_stat; + return -EINVAL; } if (!desc->temperature_out_of_range) { dev_err(&pdev->dev, "there is no temperature_out_of_range\n"); - ret = -EINVAL; - goto err_chg_stat; + return -EINVAL; } if (!desc->charging_max_duration_ms || @@ -1570,14 +1556,13 @@ static int charger_manager_probe(struct platform_device *pdev) cm->charger_psy.name = cm->psy_name_buf; /* Allocate for psy properties because they may vary */ - cm->charger_psy.properties = kzalloc(sizeof(enum power_supply_property) + cm->charger_psy.properties = devm_kzalloc(&pdev->dev, + sizeof(enum power_supply_property) * (ARRAY_SIZE(default_charger_props) + - NUM_CHARGER_PSY_OPTIONAL), - GFP_KERNEL); - if (!cm->charger_psy.properties) { - ret = -ENOMEM; - goto err_chg_stat; - } + NUM_CHARGER_PSY_OPTIONAL), GFP_KERNEL); + if (!cm->charger_psy.properties) + return -ENOMEM; + memcpy(cm->charger_psy.properties, default_charger_props, sizeof(enum power_supply_property) * ARRAY_SIZE(default_charger_props)); @@ -1614,7 +1599,7 @@ static int charger_manager_probe(struct platform_device *pdev) if (ret) { dev_err(&pdev->dev, "Cannot register charger-manager with name \"%s\"\n", cm->charger_psy.name); - goto err_register; + return ret; } /* Register extcon device for charger cable */ @@ -1655,8 +1640,6 @@ err_reg_sysfs: charger = &desc->charger_regulators[i]; sysfs_remove_group(&cm->charger_psy.dev->kobj, &charger->attr_g); - - kfree(charger->attr_g.name); } err_reg_extcon: for (i = 0; i < desc->num_charger_regulators; i++) { @@ -1674,16 +1657,7 @@ err_reg_extcon: } power_supply_unregister(&cm->charger_psy); -err_register: - kfree(cm->charger_psy.properties); -err_chg_stat: - kfree(cm->charger_stat); -err_no_charger_stat: -err_no_charger: - kfree(cm->desc); -err_alloc_desc: - kfree(cm); -err_alloc: + return ret; } @@ -1718,11 +1692,6 @@ static int charger_manager_remove(struct platform_device *pdev) try_charger_enable(cm, false); - kfree(cm->charger_psy.properties); - kfree(cm->charger_stat); - kfree(cm->desc); - kfree(cm); - return 0; } diff --git a/drivers/power/isp1704_charger.c b/drivers/power/isp1704_charger.c index fc04d191579..1bb3a91b1ac 100644 --- a/drivers/power/isp1704_charger.c +++ b/drivers/power/isp1704_charger.c @@ -2,6 +2,7 @@ * ISP1704 USB Charger Detection driver * * Copyright (C) 2010 Nokia Corporation + * Copyright (C) 2012 - 2013 Pali Rohár <pali.rohar@gmail.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -65,10 +66,6 @@ struct isp1704_charger { unsigned present:1; unsigned online:1; unsigned current_max; - - /* temp storage variables */ - unsigned long event; - unsigned max_power; }; static inline int isp1704_read(struct isp1704_charger *isp, u32 reg) @@ -231,56 +228,59 @@ static inline int isp1704_charger_detect(struct isp1704_charger *isp) return ret; } +static inline int isp1704_charger_detect_dcp(struct isp1704_charger *isp) +{ + if (isp1704_charger_detect(isp) && + isp1704_charger_type(isp) == POWER_SUPPLY_TYPE_USB_DCP) + return true; + else + return false; +} + static void isp1704_charger_work(struct work_struct *data) { - int detect; - unsigned long event; - unsigned power; struct isp1704_charger *isp = container_of(data, struct isp1704_charger, work); static DEFINE_MUTEX(lock); - event = isp->event; - power = isp->max_power; - mutex_lock(&lock); - if (event != USB_EVENT_NONE) - isp1704_charger_set_power(isp, 1); - - switch (event) { + switch (isp->phy->last_event) { case USB_EVENT_VBUS: - isp->online = true; + /* do not call wall charger detection more times */ + if (!isp->present) { + isp->online = true; + isp->present = 1; + isp1704_charger_set_power(isp, 1); + + /* detect wall charger */ + if (isp1704_charger_detect_dcp(isp)) { + isp->psy.type = POWER_SUPPLY_TYPE_USB_DCP; + isp->current_max = 1800; + } else { + isp->psy.type = POWER_SUPPLY_TYPE_USB; + isp->current_max = 500; + } - /* detect charger */ - detect = isp1704_charger_detect(isp); - - if (detect) { - isp->present = detect; - isp->psy.type = isp1704_charger_type(isp); + /* enable data pullups */ + if (isp->phy->otg->gadget) + usb_gadget_connect(isp->phy->otg->gadget); } - switch (isp->psy.type) { - case POWER_SUPPLY_TYPE_USB_DCP: - isp->current_max = 1800; - break; - case POWER_SUPPLY_TYPE_USB_CDP: + if (isp->psy.type != POWER_SUPPLY_TYPE_USB_DCP) { /* * Only 500mA here or high speed chirp * handshaking may break */ - isp->current_max = 500; - /* FALLTHROUGH */ - case POWER_SUPPLY_TYPE_USB: - default: - /* enable data pullups */ - if (isp->phy->otg->gadget) - usb_gadget_connect(isp->phy->otg->gadget); + if (isp->current_max > 500) + isp->current_max = 500; + + if (isp->current_max > 100) + isp->psy.type = POWER_SUPPLY_TYPE_USB_CDP; } break; case USB_EVENT_NONE: isp->online = false; - isp->current_max = 0; isp->present = 0; isp->current_max = 0; isp->psy.type = POWER_SUPPLY_TYPE_USB; @@ -298,12 +298,6 @@ static void isp1704_charger_work(struct work_struct *data) isp1704_charger_set_power(isp, 0); break; - case USB_EVENT_ENUMERATED: - if (isp->present) - isp->current_max = 1800; - else - isp->current_max = power; - break; default: goto out; } @@ -314,16 +308,11 @@ out: } static int isp1704_notifier_call(struct notifier_block *nb, - unsigned long event, void *power) + unsigned long val, void *v) { struct isp1704_charger *isp = container_of(nb, struct isp1704_charger, nb); - isp->event = event; - - if (power) - isp->max_power = *((unsigned *)power); - schedule_work(&isp->work); return NOTIFY_OK; @@ -462,13 +451,13 @@ static int isp1704_charger_probe(struct platform_device *pdev) if (isp->phy->otg->gadget) usb_gadget_disconnect(isp->phy->otg->gadget); + if (isp->phy->last_event == USB_EVENT_NONE) + isp1704_charger_set_power(isp, 0); + /* Detect charger if VBUS is valid (the cable was already plugged). */ - ret = isp1704_read(isp, ULPI_USB_INT_STS); - isp1704_charger_set_power(isp, 0); - if ((ret & ULPI_INT_VBUS_VALID) && !isp->phy->otg->default_a) { - isp->event = USB_EVENT_VBUS; + if (isp->phy->last_event == USB_EVENT_VBUS && + !isp->phy->otg->default_a) schedule_work(&isp->work); - } return 0; fail2: diff --git a/drivers/power/max17042_battery.c b/drivers/power/max17042_battery.c index d664ef58afa..e0b22f9b6fd 100644 --- a/drivers/power/max17042_battery.c +++ b/drivers/power/max17042_battery.c @@ -33,6 +33,7 @@ #include <linux/power_supply.h> #include <linux/power/max17042_battery.h> #include <linux/of.h> +#include <linux/regmap.h> /* Status register bits */ #define STATUS_POR_BIT (1 << 1) @@ -67,6 +68,7 @@ struct max17042_chip { struct i2c_client *client; + struct regmap *regmap; struct power_supply battery; enum max170xx_chip_type chip_type; struct max17042_platform_data *pdata; @@ -74,35 +76,6 @@ struct max17042_chip { int init_complete; }; -static int max17042_write_reg(struct i2c_client *client, u8 reg, u16 value) -{ - int ret = i2c_smbus_write_word_data(client, reg, value); - - if (ret < 0) - dev_err(&client->dev, "%s: err %d\n", __func__, ret); - - return ret; -} - -static int max17042_read_reg(struct i2c_client *client, u8 reg) -{ - int ret = i2c_smbus_read_word_data(client, reg); - - if (ret < 0) - dev_err(&client->dev, "%s: err %d\n", __func__, ret); - - return ret; -} - -static void max17042_set_reg(struct i2c_client *client, - struct max17042_reg_data *data, int size) -{ - int i; - - for (i = 0; i < size; i++) - max17042_write_reg(client, data[i].addr, data[i].data); -} - static enum power_supply_property max17042_battery_props[] = { POWER_SUPPLY_PROP_PRESENT, POWER_SUPPLY_PROP_CYCLE_COUNT, @@ -125,96 +98,98 @@ static int max17042_get_property(struct power_supply *psy, { struct max17042_chip *chip = container_of(psy, struct max17042_chip, battery); + struct regmap *map = chip->regmap; int ret; + u32 data; if (!chip->init_complete) return -EAGAIN; switch (psp) { case POWER_SUPPLY_PROP_PRESENT: - ret = max17042_read_reg(chip->client, MAX17042_STATUS); + ret = regmap_read(map, MAX17042_STATUS, &data); if (ret < 0) return ret; - if (ret & MAX17042_STATUS_BattAbsent) + if (data & MAX17042_STATUS_BattAbsent) val->intval = 0; else val->intval = 1; break; case POWER_SUPPLY_PROP_CYCLE_COUNT: - ret = max17042_read_reg(chip->client, MAX17042_Cycles); + ret = regmap_read(map, MAX17042_Cycles, &data); if (ret < 0) return ret; - val->intval = ret; + val->intval = data; break; case POWER_SUPPLY_PROP_VOLTAGE_MAX: - ret = max17042_read_reg(chip->client, MAX17042_MinMaxVolt); + ret = regmap_read(map, MAX17042_MinMaxVolt, &data); if (ret < 0) return ret; - val->intval = ret >> 8; + val->intval = data >> 8; val->intval *= 20000; /* Units of LSB = 20mV */ break; case POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN: if (chip->chip_type == MAX17042) - ret = max17042_read_reg(chip->client, MAX17042_V_empty); + ret = regmap_read(map, MAX17042_V_empty, &data); else - ret = max17042_read_reg(chip->client, MAX17047_V_empty); + ret = regmap_read(map, MAX17047_V_empty, &data); if (ret < 0) return ret; - val->intval = ret >> 7; + val->intval = data >> 7; val->intval *= 10000; /* Units of LSB = 10mV */ break; case POWER_SUPPLY_PROP_VOLTAGE_NOW: - ret = max17042_read_reg(chip->client, MAX17042_VCELL); + ret = regmap_read(map, MAX17042_VCELL, &data); if (ret < 0) return ret; - val->intval = ret * 625 / 8; + val->intval = data * 625 / 8; break; case POWER_SUPPLY_PROP_VOLTAGE_AVG: - ret = max17042_read_reg(chip->client, MAX17042_AvgVCELL); + ret = regmap_read(map, MAX17042_AvgVCELL, &data); if (ret < 0) return ret; - val->intval = ret * 625 / 8; + val->intval = data * 625 / 8; break; case POWER_SUPPLY_PROP_VOLTAGE_OCV: - ret = max17042_read_reg(chip->client, MAX17042_OCVInternal); + ret = regmap_read(map, MAX17042_OCVInternal, &data); if (ret < 0) return ret; - val->intval = ret * 625 / 8; + val->intval = data * 625 / 8; break; case POWER_SUPPLY_PROP_CAPACITY: - ret = max17042_read_reg(chip->client, MAX17042_RepSOC); + ret = regmap_read(map, MAX17042_RepSOC, &data); if (ret < 0) return ret; - val->intval = ret >> 8; + val->intval = data >> 8; break; case POWER_SUPPLY_PROP_CHARGE_FULL: - ret = max17042_read_reg(chip->client, MAX17042_FullCAP); + ret = regmap_read(map, MAX17042_FullCAP, &data); if (ret < 0) return ret; - val->intval = ret * 1000 / 2; + val->intval = data * 1000 / 2; break; case POWER_SUPPLY_PROP_CHARGE_COUNTER: - ret = max17042_read_reg(chip->client, MAX17042_QH); + ret = regmap_read(map, MAX17042_QH, &data); if (ret < 0) return ret; - val->intval = ret * 1000 / 2; + val->intval = data * 1000 / 2; break; case POWER_SUPPLY_PROP_TEMP: - ret = max17042_read_reg(chip->client, MAX17042_TEMP); + ret = regmap_read(map, MAX17042_TEMP, &data); if (ret < 0) return ret; - val->intval = ret; + val->intval = data; /* The value is signed. */ if (val->intval & 0x8000) { val->intval = (0x7fff & ~val->intval) + 1; @@ -226,11 +201,11 @@ static int max17042_get_property(struct power_supply *psy, break; case POWER_SUPPLY_PROP_CURRENT_NOW: if (chip->pdata->enable_current_sense) { - ret = max17042_read_reg(chip->client, MAX17042_Current); + ret = regmap_read(map, MAX17042_Current, &data); if (ret < 0) return ret; - val->intval = ret; + val->intval = data; if (val->intval & 0x8000) { /* Negative */ val->intval = ~val->intval & 0x7fff; @@ -244,12 +219,11 @@ static int max17042_get_property(struct power_supply *psy, break; case POWER_SUPPLY_PROP_CURRENT_AVG: if (chip->pdata->enable_current_sense) { - ret = max17042_read_reg(chip->client, - MAX17042_AvgCurrent); + ret = regmap_read(map, MAX17042_AvgCurrent, &data); if (ret < 0) return ret; - val->intval = ret; + val->intval = data; if (val->intval & 0x8000) { /* Negative */ val->intval = ~val->intval & 0x7fff; @@ -267,16 +241,15 @@ static int max17042_get_property(struct power_supply *psy, return 0; } -static int max17042_write_verify_reg(struct i2c_client *client, - u8 reg, u16 value) +static int max17042_write_verify_reg(struct regmap *map, u8 reg, u32 value) { int retries = 8; int ret; - u16 read_value; + u32 read_value; do { - ret = i2c_smbus_write_word_data(client, reg, value); - read_value = max17042_read_reg(client, reg); + ret = regmap_write(map, reg, value); + regmap_read(map, reg, &read_value); if (read_value != value) { ret = -EIO; retries--; @@ -284,50 +257,51 @@ static int max17042_write_verify_reg(struct i2c_client *client, } while (retries && read_value != value); if (ret < 0) - dev_err(&client->dev, "%s: err %d\n", __func__, ret); + pr_err("%s: err %d\n", __func__, ret); return ret; } -static inline void max17042_override_por( - struct i2c_client *client, u8 reg, u16 value) +static inline void max17042_override_por(struct regmap *map, + u8 reg, u16 value) { if (value) - max17042_write_reg(client, reg, value); + regmap_write(map, reg, value); } static inline void max10742_unlock_model(struct max17042_chip *chip) { - struct i2c_client *client = chip->client; - max17042_write_reg(client, MAX17042_MLOCKReg1, MODEL_UNLOCK1); - max17042_write_reg(client, MAX17042_MLOCKReg2, MODEL_UNLOCK2); + struct regmap *map = chip->regmap; + regmap_write(map, MAX17042_MLOCKReg1, MODEL_UNLOCK1); + regmap_write(map, MAX17042_MLOCKReg2, MODEL_UNLOCK2); } static inline void max10742_lock_model(struct max17042_chip *chip) { - struct i2c_client *client = chip->client; - max17042_write_reg(client, MAX17042_MLOCKReg1, MODEL_LOCK1); - max17042_write_reg(client, MAX17042_MLOCKReg2, MODEL_LOCK2); + struct regmap *map = chip->regmap; + + regmap_write(map, MAX17042_MLOCKReg1, MODEL_LOCK1); + regmap_write(map, MAX17042_MLOCKReg2, MODEL_LOCK2); } static inline void max17042_write_model_data(struct max17042_chip *chip, u8 addr, int size) { - struct i2c_client *client = chip->client; + struct regmap *map = chip->regmap; int i; for (i = 0; i < size; i++) - max17042_write_reg(client, addr + i, - chip->pdata->config_data->cell_char_tbl[i]); + regmap_write(map, addr + i, + chip->pdata->config_data->cell_char_tbl[i]); } static inline void max17042_read_model_data(struct max17042_chip *chip, - u8 addr, u16 *data, int size) + u8 addr, u32 *data, int size) { - struct i2c_client *client = chip->client; + struct regmap *map = chip->regmap; int i; for (i = 0; i < size; i++) - data[i] = max17042_read_reg(client, addr + i); + regmap_read(map, addr + i, &data[i]); } static inline int max17042_model_data_compare(struct max17042_chip *chip, @@ -350,7 +324,7 @@ static int max17042_init_model(struct max17042_chip *chip) { int ret; int table_size = ARRAY_SIZE(chip->pdata->config_data->cell_char_tbl); - u16 *temp_data; + u32 *temp_data; temp_data = kcalloc(table_size, sizeof(*temp_data), GFP_KERNEL); if (!temp_data) @@ -365,7 +339,7 @@ static int max17042_init_model(struct max17042_chip *chip) ret = max17042_model_data_compare( chip, chip->pdata->config_data->cell_char_tbl, - temp_data, + (u16 *)temp_data, table_size); max10742_lock_model(chip); @@ -378,7 +352,7 @@ static int max17042_verify_model_lock(struct max17042_chip *chip) { int i; int table_size = ARRAY_SIZE(chip->pdata->config_data->cell_char_tbl); - u16 *temp_data; + u32 *temp_data; int ret = 0; temp_data = kcalloc(table_size, sizeof(*temp_data), GFP_KERNEL); @@ -398,40 +372,38 @@ static int max17042_verify_model_lock(struct max17042_chip *chip) static void max17042_write_config_regs(struct max17042_chip *chip) { struct max17042_config_data *config = chip->pdata->config_data; + struct regmap *map = chip->regmap; - max17042_write_reg(chip->client, MAX17042_CONFIG, config->config); - max17042_write_reg(chip->client, MAX17042_LearnCFG, config->learn_cfg); - max17042_write_reg(chip->client, MAX17042_FilterCFG, + regmap_write(map, MAX17042_CONFIG, config->config); + regmap_write(map, MAX17042_LearnCFG, config->learn_cfg); + regmap_write(map, MAX17042_FilterCFG, config->filter_cfg); - max17042_write_reg(chip->client, MAX17042_RelaxCFG, config->relax_cfg); + regmap_write(map, MAX17042_RelaxCFG, config->relax_cfg); if (chip->chip_type == MAX17047) - max17042_write_reg(chip->client, MAX17047_FullSOCThr, + regmap_write(map, MAX17047_FullSOCThr, config->full_soc_thresh); } static void max17042_write_custom_regs(struct max17042_chip *chip) { struct max17042_config_data *config = chip->pdata->config_data; + struct regmap *map = chip->regmap; - max17042_write_verify_reg(chip->client, MAX17042_RCOMP0, - config->rcomp0); - max17042_write_verify_reg(chip->client, MAX17042_TempCo, - config->tcompc0); - max17042_write_verify_reg(chip->client, MAX17042_ICHGTerm, - config->ichgt_term); + max17042_write_verify_reg(map, MAX17042_RCOMP0, config->rcomp0); + max17042_write_verify_reg(map, MAX17042_TempCo, config->tcompc0); + max17042_write_verify_reg(map, MAX17042_ICHGTerm, config->ichgt_term); if (chip->chip_type == MAX17042) { - max17042_write_reg(chip->client, MAX17042_EmptyTempCo, - config->empty_tempco); - max17042_write_verify_reg(chip->client, MAX17042_K_empty0, + regmap_write(map, MAX17042_EmptyTempCo, config->empty_tempco); + max17042_write_verify_reg(map, MAX17042_K_empty0, config->kempty0); } else { - max17042_write_verify_reg(chip->client, MAX17047_QRTbl00, + max17042_write_verify_reg(map, MAX17047_QRTbl00, config->qrtbl00); - max17042_write_verify_reg(chip->client, MAX17047_QRTbl10, + max17042_write_verify_reg(map, MAX17047_QRTbl10, config->qrtbl10); - max17042_write_verify_reg(chip->client, MAX17047_QRTbl20, + max17042_write_verify_reg(map, MAX17047_QRTbl20, config->qrtbl20); - max17042_write_verify_reg(chip->client, MAX17047_QRTbl30, + max17042_write_verify_reg(map, MAX17047_QRTbl30, config->qrtbl30); } } @@ -439,58 +411,60 @@ static void max17042_write_custom_regs(struct max17042_chip *chip) static void max17042_update_capacity_regs(struct max17042_chip *chip) { struct max17042_config_data *config = chip->pdata->config_data; + struct regmap *map = chip->regmap; - max17042_write_verify_reg(chip->client, MAX17042_FullCAP, + max17042_write_verify_reg(map, MAX17042_FullCAP, config->fullcap); - max17042_write_reg(chip->client, MAX17042_DesignCap, - config->design_cap); - max17042_write_verify_reg(chip->client, MAX17042_FullCAPNom, + regmap_write(map, MAX17042_DesignCap, config->design_cap); + max17042_write_verify_reg(map, MAX17042_FullCAPNom, config->fullcapnom); } static void max17042_reset_vfsoc0_reg(struct max17042_chip *chip) { - u16 vfSoc; + unsigned int vfSoc; + struct regmap *map = chip->regmap; - vfSoc = max17042_read_reg(chip->client, MAX17042_VFSOC); - max17042_write_reg(chip->client, MAX17042_VFSOC0Enable, VFSOC0_UNLOCK); - max17042_write_verify_reg(chip->client, MAX17042_VFSOC0, vfSoc); - max17042_write_reg(chip->client, MAX17042_VFSOC0Enable, VFSOC0_LOCK); + regmap_read(map, MAX17042_VFSOC, &vfSoc); + regmap_write(map, MAX17042_VFSOC0Enable, VFSOC0_UNLOCK); + max17042_write_verify_reg(map, MAX17042_VFSOC0, vfSoc); + regmap_write(map, MAX17042_VFSOC0Enable, VFSOC0_LOCK); } static void max17042_load_new_capacity_params(struct max17042_chip *chip) { - u16 full_cap0, rep_cap, dq_acc, vfSoc; + u32 full_cap0, rep_cap, dq_acc, vfSoc; u32 rem_cap; struct max17042_config_data *config = chip->pdata->config_data; + struct regmap *map = chip->regmap; - full_cap0 = max17042_read_reg(chip->client, MAX17042_FullCAP0); - vfSoc = max17042_read_reg(chip->client, MAX17042_VFSOC); + regmap_read(map, MAX17042_FullCAP0, &full_cap0); + regmap_read(map, MAX17042_VFSOC, &vfSoc); /* fg_vfSoc needs to shifted by 8 bits to get the * perc in 1% accuracy, to get the right rem_cap multiply * full_cap0, fg_vfSoc and devide by 100 */ rem_cap = ((vfSoc >> 8) * full_cap0) / 100; - max17042_write_verify_reg(chip->client, MAX17042_RemCap, (u16)rem_cap); + max17042_write_verify_reg(map, MAX17042_RemCap, rem_cap); - rep_cap = (u16)rem_cap; - max17042_write_verify_reg(chip->client, MAX17042_RepCap, rep_cap); + rep_cap = rem_cap; + max17042_write_verify_reg(map, MAX17042_RepCap, rep_cap); /* Write dQ_acc to 200% of Capacity and dP_acc to 200% */ dq_acc = config->fullcap / dQ_ACC_DIV; - max17042_write_verify_reg(chip->client, MAX17042_dQacc, dq_acc); - max17042_write_verify_reg(chip->client, MAX17042_dPacc, dP_ACC_200); + max17042_write_verify_reg(map, MAX17042_dQacc, dq_acc); + max17042_write_verify_reg(map, MAX17042_dPacc, dP_ACC_200); - max17042_write_verify_reg(chip->client, MAX17042_FullCAP, + max17042_write_verify_reg(map, MAX17042_FullCAP, config->fullcap); - max17042_write_reg(chip->client, MAX17042_DesignCap, + regmap_write(map, MAX17042_DesignCap, config->design_cap); - max17042_write_verify_reg(chip->client, MAX17042_FullCAPNom, + max17042_write_verify_reg(map, MAX17042_FullCAPNom, config->fullcapnom); /* Update SOC register with new SOC */ - max17042_write_reg(chip->client, MAX17042_RepSOC, vfSoc); + regmap_write(map, MAX17042_RepSOC, vfSoc); } /* @@ -500,59 +474,60 @@ static void max17042_load_new_capacity_params(struct max17042_chip *chip) */ static inline void max17042_override_por_values(struct max17042_chip *chip) { - struct i2c_client *client = chip->client; + struct regmap *map = chip->regmap; struct max17042_config_data *config = chip->pdata->config_data; - max17042_override_por(client, MAX17042_TGAIN, config->tgain); - max17042_override_por(client, MAx17042_TOFF, config->toff); - max17042_override_por(client, MAX17042_CGAIN, config->cgain); - max17042_override_por(client, MAX17042_COFF, config->coff); - - max17042_override_por(client, MAX17042_VALRT_Th, config->valrt_thresh); - max17042_override_por(client, MAX17042_TALRT_Th, config->talrt_thresh); - max17042_override_por(client, MAX17042_SALRT_Th, - config->soc_alrt_thresh); - max17042_override_por(client, MAX17042_CONFIG, config->config); - max17042_override_por(client, MAX17042_SHDNTIMER, config->shdntimer); - - max17042_override_por(client, MAX17042_DesignCap, config->design_cap); - max17042_override_por(client, MAX17042_ICHGTerm, config->ichgt_term); - - max17042_override_por(client, MAX17042_AtRate, config->at_rate); - max17042_override_por(client, MAX17042_LearnCFG, config->learn_cfg); - max17042_override_por(client, MAX17042_FilterCFG, config->filter_cfg); - max17042_override_por(client, MAX17042_RelaxCFG, config->relax_cfg); - max17042_override_por(client, MAX17042_MiscCFG, config->misc_cfg); - max17042_override_por(client, MAX17042_MaskSOC, config->masksoc); - - max17042_override_por(client, MAX17042_FullCAP, config->fullcap); - max17042_override_por(client, MAX17042_FullCAPNom, config->fullcapnom); + max17042_override_por(map, MAX17042_TGAIN, config->tgain); + max17042_override_por(map, MAx17042_TOFF, config->toff); + max17042_override_por(map, MAX17042_CGAIN, config->cgain); + max17042_override_por(map, MAX17042_COFF, config->coff); + + max17042_override_por(map, MAX17042_VALRT_Th, config->valrt_thresh); + max17042_override_por(map, MAX17042_TALRT_Th, config->talrt_thresh); + max17042_override_por(map, MAX17042_SALRT_Th, + config->soc_alrt_thresh); + max17042_override_por(map, MAX17042_CONFIG, config->config); + max17042_override_por(map, MAX17042_SHDNTIMER, config->shdntimer); + + max17042_override_por(map, MAX17042_DesignCap, config->design_cap); + max17042_override_por(map, MAX17042_ICHGTerm, config->ichgt_term); + + max17042_override_por(map, MAX17042_AtRate, config->at_rate); + max17042_override_por(map, MAX17042_LearnCFG, config->learn_cfg); + max17042_override_por(map, MAX17042_FilterCFG, config->filter_cfg); + max17042_override_por(map, MAX17042_RelaxCFG, config->relax_cfg); + max17042_override_por(map, MAX17042_MiscCFG, config->misc_cfg); + max17042_override_por(map, MAX17042_MaskSOC, config->masksoc); + + max17042_override_por(map, MAX17042_FullCAP, config->fullcap); + max17042_override_por(map, MAX17042_FullCAPNom, config->fullcapnom); if (chip->chip_type == MAX17042) - max17042_override_por(client, MAX17042_SOC_empty, + max17042_override_por(map, MAX17042_SOC_empty, config->socempty); - max17042_override_por(client, MAX17042_LAvg_empty, config->lavg_empty); - max17042_override_por(client, MAX17042_dQacc, config->dqacc); - max17042_override_por(client, MAX17042_dPacc, config->dpacc); + max17042_override_por(map, MAX17042_LAvg_empty, config->lavg_empty); + max17042_override_por(map, MAX17042_dQacc, config->dqacc); + max17042_override_por(map, MAX17042_dPacc, config->dpacc); if (chip->chip_type == MAX17042) - max17042_override_por(client, MAX17042_V_empty, config->vempty); + max17042_override_por(map, MAX17042_V_empty, config->vempty); else - max17042_override_por(client, MAX17047_V_empty, config->vempty); - max17042_override_por(client, MAX17042_TempNom, config->temp_nom); - max17042_override_por(client, MAX17042_TempLim, config->temp_lim); - max17042_override_por(client, MAX17042_FCTC, config->fctc); - max17042_override_por(client, MAX17042_RCOMP0, config->rcomp0); - max17042_override_por(client, MAX17042_TempCo, config->tcompc0); + max17042_override_por(map, MAX17047_V_empty, config->vempty); + max17042_override_por(map, MAX17042_TempNom, config->temp_nom); + max17042_override_por(map, MAX17042_TempLim, config->temp_lim); + max17042_override_por(map, MAX17042_FCTC, config->fctc); + max17042_override_por(map, MAX17042_RCOMP0, config->rcomp0); + max17042_override_por(map, MAX17042_TempCo, config->tcompc0); if (chip->chip_type) { - max17042_override_por(client, MAX17042_EmptyTempCo, - config->empty_tempco); - max17042_override_por(client, MAX17042_K_empty0, - config->kempty0); + max17042_override_por(map, MAX17042_EmptyTempCo, + config->empty_tempco); + max17042_override_por(map, MAX17042_K_empty0, + config->kempty0); } } static int max17042_init_chip(struct max17042_chip *chip) { + struct regmap *map = chip->regmap; int ret; int val; @@ -597,31 +572,32 @@ static int max17042_init_chip(struct max17042_chip *chip) max17042_load_new_capacity_params(chip); /* Init complete, Clear the POR bit */ - val = max17042_read_reg(chip->client, MAX17042_STATUS); - max17042_write_reg(chip->client, MAX17042_STATUS, - val & (~STATUS_POR_BIT)); + regmap_read(map, MAX17042_STATUS, &val); + regmap_write(map, MAX17042_STATUS, val & (~STATUS_POR_BIT)); return 0; } static void max17042_set_soc_threshold(struct max17042_chip *chip, u16 off) { - u16 soc, soc_tr; + struct regmap *map = chip->regmap; + u32 soc, soc_tr; /* program interrupt thesholds such that we should * get interrupt for every 'off' perc change in the soc */ - soc = max17042_read_reg(chip->client, MAX17042_RepSOC) >> 8; + regmap_read(map, MAX17042_RepSOC, &soc); + soc >>= 8; soc_tr = (soc + off) << 8; soc_tr |= (soc - off); - max17042_write_reg(chip->client, MAX17042_SALRT_Th, soc_tr); + regmap_write(map, MAX17042_SALRT_Th, soc_tr); } static irqreturn_t max17042_thread_handler(int id, void *dev) { struct max17042_chip *chip = dev; - u16 val; + u32 val; - val = max17042_read_reg(chip->client, MAX17042_STATUS); + regmap_read(chip->regmap, MAX17042_STATUS, &val); if ((val & STATUS_INTR_SOCMIN_BIT) || (val & STATUS_INTR_SOCMAX_BIT)) { dev_info(&chip->client->dev, "SOC threshold INTR\n"); @@ -682,13 +658,20 @@ max17042_get_pdata(struct device *dev) } #endif +static struct regmap_config max17042_regmap_config = { + .reg_bits = 8, + .val_bits = 16, + .val_format_endian = REGMAP_ENDIAN_NATIVE, +}; + static int max17042_probe(struct i2c_client *client, const struct i2c_device_id *id) { struct i2c_adapter *adapter = to_i2c_adapter(client->dev.parent); struct max17042_chip *chip; int ret; - int reg; + int i; + u32 val; if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_WORD_DATA)) return -EIO; @@ -698,6 +681,12 @@ static int max17042_probe(struct i2c_client *client, return -ENOMEM; chip->client = client; + chip->regmap = devm_regmap_init_i2c(client, &max17042_regmap_config); + if (IS_ERR(chip->regmap)) { + dev_err(&client->dev, "Failed to initialize regmap\n"); + return -EINVAL; + } + chip->pdata = max17042_get_pdata(&client->dev); if (!chip->pdata) { dev_err(&client->dev, "no platform data provided\n"); @@ -706,15 +695,15 @@ static int max17042_probe(struct i2c_client *client, i2c_set_clientdata(client, chip); - ret = max17042_read_reg(chip->client, MAX17042_DevName); - if (ret == MAX17042_IC_VERSION) { + regmap_read(chip->regmap, MAX17042_DevName, &val); + if (val == MAX17042_IC_VERSION) { dev_dbg(&client->dev, "chip type max17042 detected\n"); chip->chip_type = MAX17042; - } else if (ret == MAX17047_IC_VERSION) { + } else if (val == MAX17047_IC_VERSION) { dev_dbg(&client->dev, "chip type max17047/50 detected\n"); chip->chip_type = MAX17047; } else { - dev_err(&client->dev, "device version mismatch: %x\n", ret); + dev_err(&client->dev, "device version mismatch: %x\n", val); return -EIO; } @@ -733,13 +722,15 @@ static int max17042_probe(struct i2c_client *client, chip->pdata->r_sns = MAX17042_DEFAULT_SNS_RESISTOR; if (chip->pdata->init_data) - max17042_set_reg(client, chip->pdata->init_data, - chip->pdata->num_init_data); + for (i = 0; i < chip->pdata->num_init_data; i++) + regmap_write(chip->regmap, + chip->pdata->init_data[i].addr, + chip->pdata->init_data[i].data); if (!chip->pdata->enable_current_sense) { - max17042_write_reg(client, MAX17042_CGAIN, 0x0000); - max17042_write_reg(client, MAX17042_MiscCFG, 0x0003); - max17042_write_reg(client, MAX17042_LearnCFG, 0x0007); + regmap_write(chip->regmap, MAX17042_CGAIN, 0x0000); + regmap_write(chip->regmap, MAX17042_MiscCFG, 0x0003); + regmap_write(chip->regmap, MAX17042_LearnCFG, 0x0007); } ret = power_supply_register(&client->dev, &chip->battery); @@ -754,9 +745,9 @@ static int max17042_probe(struct i2c_client *client, IRQF_TRIGGER_FALLING, chip->battery.name, chip); if (!ret) { - reg = max17042_read_reg(client, MAX17042_CONFIG); - reg |= CONFIG_ALRT_BIT_ENBL; - max17042_write_reg(client, MAX17042_CONFIG, reg); + regmap_read(chip->regmap, MAX17042_CONFIG, &val); + val |= CONFIG_ALRT_BIT_ENBL; + regmap_write(chip->regmap, MAX17042_CONFIG, val); max17042_set_soc_threshold(chip, 1); } else { client->irq = 0; @@ -765,8 +756,8 @@ static int max17042_probe(struct i2c_client *client, } } - reg = max17042_read_reg(chip->client, MAX17042_STATUS); - if (reg & STATUS_POR_BIT) { + regmap_read(chip->regmap, MAX17042_STATUS, &val); + if (val & STATUS_POR_BIT) { INIT_WORK(&chip->work, max17042_init_worker); schedule_work(&chip->work); } else { @@ -786,7 +777,7 @@ static int max17042_remove(struct i2c_client *client) return 0; } -#ifdef CONFIG_PM +#ifdef CONFIG_PM_SLEEP static int max17042_suspend(struct device *dev) { struct max17042_chip *chip = dev_get_drvdata(dev); @@ -816,17 +807,11 @@ static int max17042_resume(struct device *dev) return 0; } - -static const struct dev_pm_ops max17042_pm_ops = { - .suspend = max17042_suspend, - .resume = max17042_resume, -}; - -#define MAX17042_PM_OPS (&max17042_pm_ops) -#else -#define MAX17042_PM_OPS NULL #endif +static SIMPLE_DEV_PM_OPS(max17042_pm_ops, max17042_suspend, + max17042_resume); + #ifdef CONFIG_OF static const struct of_device_id max17042_dt_match[] = { { .compatible = "maxim,max17042" }, @@ -849,7 +834,7 @@ static struct i2c_driver max17042_i2c_driver = { .driver = { .name = "max17042", .of_match_table = of_match_ptr(max17042_dt_match), - .pm = MAX17042_PM_OPS, + .pm = &max17042_pm_ops, }, .probe = max17042_probe, .remove = max17042_remove, diff --git a/drivers/power/pm2301_charger.c b/drivers/power/pm2301_charger.c index ffa10ed83eb..62c15af58c9 100644 --- a/drivers/power/pm2301_charger.c +++ b/drivers/power/pm2301_charger.c @@ -205,7 +205,7 @@ static int pm2xxx_charger_batt_therm_mngt(struct pm2xxx_charger *pm2, int val) } -int pm2xxx_charger_die_therm_mngt(struct pm2xxx_charger *pm2, int val) +static int pm2xxx_charger_die_therm_mngt(struct pm2xxx_charger *pm2, int val) { queue_work(pm2->charger_wq, &pm2->check_main_thermal_prot_work); @@ -722,8 +722,12 @@ static int pm2xxx_charger_ac_en(struct ux500_charger *charger, dev_dbg(pm2->dev, "Enable AC: %dmV %dmA\n", vset, iset); if (!pm2->vddadc_en_ac) { - regulator_enable(pm2->regu); - pm2->vddadc_en_ac = true; + ret = regulator_enable(pm2->regu); + if (ret) + dev_warn(pm2->dev, + "Failed to enable vddadc regulator\n"); + else + pm2->vddadc_en_ac = true; } ret = pm2xxx_charging_init(pm2); @@ -953,37 +957,24 @@ static int pm2xxx_runtime_suspend(struct device *dev) { struct i2c_client *pm2xxx_i2c_client = to_i2c_client(dev); struct pm2xxx_charger *pm2; - int ret = 0; pm2 = (struct pm2xxx_charger *)i2c_get_clientdata(pm2xxx_i2c_client); - if (!pm2) { - dev_err(pm2->dev, "no pm2xxx_charger data supplied\n"); - ret = -EINVAL; - return ret; - } - clear_lpn_pin(pm2); - return ret; + return 0; } static int pm2xxx_runtime_resume(struct device *dev) { struct i2c_client *pm2xxx_i2c_client = to_i2c_client(dev); struct pm2xxx_charger *pm2; - int ret = 0; pm2 = (struct pm2xxx_charger *)i2c_get_clientdata(pm2xxx_i2c_client); - if (!pm2) { - dev_err(pm2->dev, "no pm2xxx_charger data supplied\n"); - ret = -EINVAL; - return ret; - } if (gpio_is_valid(pm2->lpn_pin) && gpio_get_value(pm2->lpn_pin) == 0) set_lpn_pin(pm2); - return ret; + return 0; } #endif diff --git a/drivers/power/tps65090-charger.c b/drivers/power/tps65090-charger.c index bdd7b9b2546..8fc9d6df87f 100644 --- a/drivers/power/tps65090-charger.c +++ b/drivers/power/tps65090-charger.c @@ -15,15 +15,17 @@ * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ +#include <linux/delay.h> #include <linux/err.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/kernel.h> #include <linux/module.h> -#include <linux/slab.h> -#include <linux/delay.h> +#include <linux/of_device.h> #include <linux/platform_device.h> #include <linux/power_supply.h> +#include <linux/slab.h> + #include <linux/mfd/tps65090.h> #define TPS65090_REG_INTR_STS 0x00 @@ -185,10 +187,6 @@ static irqreturn_t tps65090_charger_isr(int irq, void *dev_id) return IRQ_HANDLED; } -#if defined(CONFIG_OF) - -#include <linux/of_device.h> - static struct tps65090_platform_data * tps65090_parse_dt_charger_data(struct platform_device *pdev) { @@ -210,13 +208,6 @@ static struct tps65090_platform_data * return pdata; } -#else -static struct tps65090_platform_data * - tps65090_parse_dt_charger_data(struct platform_device *pdev) -{ - return NULL; -} -#endif static int tps65090_charger_probe(struct platform_device *pdev) { @@ -228,7 +219,7 @@ static int tps65090_charger_probe(struct platform_device *pdev) pdata = dev_get_platdata(pdev->dev.parent); - if (!pdata && pdev->dev.of_node) + if (IS_ENABLED(CONFIG_OF) && !pdata && pdev->dev.of_node) pdata = tps65090_parse_dt_charger_data(pdev); if (!pdata) { @@ -277,13 +268,13 @@ static int tps65090_charger_probe(struct platform_device *pdev) if (ret) { dev_err(cdata->dev, "Unable to register irq %d err %d\n", irq, ret); - goto fail_free_irq; + goto fail_unregister_supply; } ret = tps65090_config_charger(cdata); if (ret < 0) { dev_err(&pdev->dev, "charger config failed, err %d\n", ret); - goto fail_free_irq; + goto fail_unregister_supply; } /* Check for charger presence */ @@ -292,14 +283,14 @@ static int tps65090_charger_probe(struct platform_device *pdev) if (ret < 0) { dev_err(cdata->dev, "%s(): Error in reading reg 0x%x", __func__, TPS65090_REG_CG_STATUS1); - goto fail_free_irq; + goto fail_unregister_supply; } if (status1 != 0) { ret = tps65090_enable_charging(cdata); if (ret < 0) { dev_err(cdata->dev, "error enabling charger\n"); - goto fail_free_irq; + goto fail_unregister_supply; } cdata->ac_online = 1; power_supply_changed(&cdata->ac); @@ -307,8 +298,6 @@ static int tps65090_charger_probe(struct platform_device *pdev) return 0; -fail_free_irq: - devm_free_irq(cdata->dev, irq, cdata); fail_unregister_supply: power_supply_unregister(&cdata->ac); @@ -319,7 +308,6 @@ static int tps65090_charger_remove(struct platform_device *pdev) { struct tps65090_charger *cdata = platform_get_drvdata(pdev); - devm_free_irq(cdata->dev, cdata->irq, cdata); power_supply_unregister(&cdata->ac); return 0; diff --git a/drivers/power/twl4030_charger.c b/drivers/power/twl4030_charger.c index d98abe911e3..f14108844e1 100644 --- a/drivers/power/twl4030_charger.c +++ b/drivers/power/twl4030_charger.c @@ -495,10 +495,38 @@ static enum power_supply_property twl4030_charger_props[] = { POWER_SUPPLY_PROP_CURRENT_NOW, }; +#ifdef CONFIG_OF +static const struct twl4030_bci_platform_data * +twl4030_bci_parse_dt(struct device *dev) +{ + struct device_node *np = dev->of_node; + struct twl4030_bci_platform_data *pdata; + u32 num; + + if (!np) + return NULL; + pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL); + if (!pdata) + return pdata; + + if (of_property_read_u32(np, "ti,bb-uvolt", &num) == 0) + pdata->bb_uvolt = num; + if (of_property_read_u32(np, "ti,bb-uamp", &num) == 0) + pdata->bb_uamp = num; + return pdata; +} +#else +static inline const struct twl4030_bci_platform_data * +twl4030_bci_parse_dt(struct device *dev) +{ + return NULL; +} +#endif + static int __init twl4030_bci_probe(struct platform_device *pdev) { struct twl4030_bci *bci; - struct twl4030_bci_platform_data *pdata = pdev->dev.platform_data; + const struct twl4030_bci_platform_data *pdata = pdev->dev.platform_data; int ret; u32 reg; @@ -506,6 +534,9 @@ static int __init twl4030_bci_probe(struct platform_device *pdev) if (bci == NULL) return -ENOMEM; + if (!pdata) + pdata = twl4030_bci_parse_dt(&pdev->dev); + bci->dev = &pdev->dev; bci->irq_chg = platform_get_irq(pdev, 0); bci->irq_bci = platform_get_irq(pdev, 1); @@ -581,8 +612,11 @@ static int __init twl4030_bci_probe(struct platform_device *pdev) twl4030_charger_enable_ac(true); twl4030_charger_enable_usb(bci, true); - twl4030_charger_enable_backup(pdata->bb_uvolt, - pdata->bb_uamp); + if (pdata) + twl4030_charger_enable_backup(pdata->bb_uvolt, + pdata->bb_uamp); + else + twl4030_charger_enable_backup(0, 0); return 0; @@ -631,10 +665,17 @@ static int __exit twl4030_bci_remove(struct platform_device *pdev) return 0; } +static const struct of_device_id twl_bci_of_match[] = { + {.compatible = "ti,twl4030-bci", }, + { } +}; +MODULE_DEVICE_TABLE(of, twl_bci_of_match); + static struct platform_driver twl4030_bci_driver = { .driver = { .name = "twl4030_bci", .owner = THIS_MODULE, + .of_match_table = of_match_ptr(twl_bci_of_match), }, .remove = __exit_p(twl4030_bci_remove), }; diff --git a/drivers/rtc/Kconfig b/drivers/rtc/Kconfig index 15f166a470a..00773022211 100644 --- a/drivers/rtc/Kconfig +++ b/drivers/rtc/Kconfig @@ -626,7 +626,7 @@ comment "Platform RTC drivers" config RTC_DRV_CMOS tristate "PC-style 'CMOS'" - depends on X86 || ALPHA || ARM || M32R || ATARI || PPC || MIPS || SPARC64 + depends on X86 || ARM || M32R || ATARI || PPC || MIPS || SPARC64 default y if X86 help Say "yes" here to get direct support for the real time clock @@ -643,6 +643,14 @@ config RTC_DRV_CMOS This driver can also be built as a module. If so, the module will be called rtc-cmos. +config RTC_DRV_ALPHA + bool "Alpha PC-style CMOS" + depends on ALPHA + default y + help + Direct support for the real-time clock found on every Alpha + system, specifically MC146818 compatibles. If in doubt, say Y. + config RTC_DRV_VRTC tristate "Virtual RTC for Intel MID platforms" depends on X86_INTEL_MID diff --git a/drivers/rtc/rtc-at91rm9200.c b/drivers/rtc/rtc-at91rm9200.c index 8b2cd8a5a2f..c0da95e9570 100644 --- a/drivers/rtc/rtc-at91rm9200.c +++ b/drivers/rtc/rtc-at91rm9200.c @@ -428,6 +428,14 @@ static int __exit at91_rtc_remove(struct platform_device *pdev) return 0; } +static void at91_rtc_shutdown(struct platform_device *pdev) +{ + /* Disable all interrupts */ + at91_rtc_write(AT91_RTC_IDR, AT91_RTC_ACKUPD | AT91_RTC_ALARM | + AT91_RTC_SECEV | AT91_RTC_TIMEV | + AT91_RTC_CALEV); +} + #ifdef CONFIG_PM_SLEEP /* AT91RM9200 RTC Power management control */ @@ -466,6 +474,7 @@ static SIMPLE_DEV_PM_OPS(at91_rtc_pm_ops, at91_rtc_suspend, at91_rtc_resume); static struct platform_driver at91_rtc_driver = { .remove = __exit_p(at91_rtc_remove), + .shutdown = at91_rtc_shutdown, .driver = { .name = "at91_rtc", .owner = THIS_MODULE, diff --git a/drivers/s390/block/scm_blk.c b/drivers/s390/block/scm_blk.c index 548209a9c43..d0ab5019d88 100644 --- a/drivers/s390/block/scm_blk.c +++ b/drivers/s390/block/scm_blk.c @@ -118,22 +118,6 @@ static void scm_request_done(struct scm_request *scmrq) spin_unlock_irqrestore(&list_lock, flags); } -static int scm_open(struct block_device *blkdev, fmode_t mode) -{ - return scm_get_ref(); -} - -static void scm_release(struct gendisk *gendisk, fmode_t mode) -{ - scm_put_ref(); -} - -static const struct block_device_operations scm_blk_devops = { - .owner = THIS_MODULE, - .open = scm_open, - .release = scm_release, -}; - static bool scm_permit_request(struct scm_blk_dev *bdev, struct request *req) { return rq_data_dir(req) != WRITE || bdev->state != SCM_WR_PROHIBIT; @@ -256,7 +240,7 @@ static void scm_blk_request(struct request_queue *rq) atomic_inc(&bdev->queued_reqs); blk_start_request(req); - ret = scm_start_aob(scmrq->aob); + ret = eadm_start_aob(scmrq->aob); if (ret) { SCM_LOG(5, "no subchannel"); scm_request_requeue(scmrq); @@ -320,7 +304,7 @@ static void scm_blk_handle_error(struct scm_request *scmrq) } restart: - if (!scm_start_aob(scmrq->aob)) + if (!eadm_start_aob(scmrq->aob)) return; requeue: @@ -363,6 +347,10 @@ static void scm_blk_tasklet(struct scm_blk_dev *bdev) blk_run_queue(bdev->rq); } +static const struct block_device_operations scm_blk_devops = { + .owner = THIS_MODULE, +}; + int scm_blk_dev_setup(struct scm_blk_dev *bdev, struct scm_device *scmdev) { struct request_queue *rq; diff --git a/drivers/s390/block/scm_blk_cluster.c b/drivers/s390/block/scm_blk_cluster.c index c0d102e3a48..27f930cd657 100644 --- a/drivers/s390/block/scm_blk_cluster.c +++ b/drivers/s390/block/scm_blk_cluster.c @@ -187,7 +187,7 @@ bool scm_need_cluster_request(struct scm_request *scmrq) void scm_initiate_cluster_request(struct scm_request *scmrq) { scm_prepare_cluster_request(scmrq); - if (scm_start_aob(scmrq->aob)) + if (eadm_start_aob(scmrq->aob)) scm_request_requeue(scmrq); } diff --git a/drivers/s390/char/Makefile b/drivers/s390/char/Makefile index 17821a026c9..b69ab17f13f 100644 --- a/drivers/s390/char/Makefile +++ b/drivers/s390/char/Makefile @@ -3,7 +3,8 @@ # obj-y += ctrlchar.o keyboard.o defkeymap.o sclp.o sclp_rw.o sclp_quiesce.o \ - sclp_cmd.o sclp_config.o sclp_cpi_sys.o sclp_ocf.o sclp_ctl.o + sclp_cmd.o sclp_config.o sclp_cpi_sys.o sclp_ocf.o sclp_ctl.o \ + sclp_early.o obj-$(CONFIG_TN3270) += raw3270.o obj-$(CONFIG_TN3270_CONSOLE) += con3270.o diff --git a/drivers/s390/char/fs3270.c b/drivers/s390/char/fs3270.c index f93cc32eb81..71e97473801 100644 --- a/drivers/s390/char/fs3270.c +++ b/drivers/s390/char/fs3270.c @@ -564,6 +564,7 @@ static void __exit fs3270_exit(void) { raw3270_unregister_notifier(&fs3270_notifier); + device_destroy(class3270, MKDEV(IBM_FS3270_MAJOR, 0)); __unregister_chrdev(IBM_FS3270_MAJOR, 0, 1, "fs3270"); } diff --git a/drivers/s390/char/sclp.h b/drivers/s390/char/sclp.h index 40d1406289e..6fbe09686d1 100644 --- a/drivers/s390/char/sclp.h +++ b/drivers/s390/char/sclp.h @@ -99,6 +99,7 @@ struct init_sccb { } __attribute__((packed)); extern u64 sclp_facilities; + #define SCLP_HAS_CHP_INFO (sclp_facilities & 0x8000000000000000ULL) #define SCLP_HAS_CHP_RECONFIG (sclp_facilities & 0x2000000000000000ULL) #define SCLP_HAS_CPU_INFO (sclp_facilities & 0x0800000000000000ULL) @@ -179,6 +180,10 @@ void sclp_sdias_exit(void); extern int sclp_console_pages; extern int sclp_console_drop; extern unsigned long sclp_console_full; +extern u8 sclp_fac84; +extern unsigned long long sclp_rzm; +extern unsigned long long sclp_rnmax; +extern __initdata int sclp_early_read_info_sccb_valid; /* useful inlines */ diff --git a/drivers/s390/char/sclp_cmd.c b/drivers/s390/char/sclp_cmd.c index 77df9cb0068..eaa21d542c5 100644 --- a/drivers/s390/char/sclp_cmd.c +++ b/drivers/s390/char/sclp_cmd.c @@ -28,168 +28,6 @@ #include "sclp.h" -#define SCLP_CMDW_READ_SCP_INFO 0x00020001 -#define SCLP_CMDW_READ_SCP_INFO_FORCED 0x00120001 - -struct read_info_sccb { - struct sccb_header header; /* 0-7 */ - u16 rnmax; /* 8-9 */ - u8 rnsize; /* 10 */ - u8 _reserved0[24 - 11]; /* 11-15 */ - u8 loadparm[8]; /* 24-31 */ - u8 _reserved1[48 - 32]; /* 32-47 */ - u64 facilities; /* 48-55 */ - u8 _reserved2[84 - 56]; /* 56-83 */ - u8 fac84; /* 84 */ - u8 fac85; /* 85 */ - u8 _reserved3[91 - 86]; /* 86-90 */ - u8 flags; /* 91 */ - u8 _reserved4[100 - 92]; /* 92-99 */ - u32 rnsize2; /* 100-103 */ - u64 rnmax2; /* 104-111 */ - u8 _reserved5[4096 - 112]; /* 112-4095 */ -} __attribute__((packed, aligned(PAGE_SIZE))); - -static struct init_sccb __initdata early_event_mask_sccb __aligned(PAGE_SIZE); -static struct read_info_sccb __initdata early_read_info_sccb; -static int __initdata early_read_info_sccb_valid; - -u64 sclp_facilities; -static u8 sclp_fac84; -static unsigned long long rzm; -static unsigned long long rnmax; - -static int __init sclp_cmd_sync_early(sclp_cmdw_t cmd, void *sccb) -{ - int rc; - - __ctl_set_bit(0, 9); - rc = sclp_service_call(cmd, sccb); - if (rc) - goto out; - __load_psw_mask(PSW_DEFAULT_KEY | PSW_MASK_BASE | PSW_MASK_EA | - PSW_MASK_BA | PSW_MASK_EXT | PSW_MASK_WAIT); - local_irq_disable(); -out: - /* Contents of the sccb might have changed. */ - barrier(); - __ctl_clear_bit(0, 9); - return rc; -} - -static void __init sclp_read_info_early(void) -{ - int rc; - int i; - struct read_info_sccb *sccb; - sclp_cmdw_t commands[] = {SCLP_CMDW_READ_SCP_INFO_FORCED, - SCLP_CMDW_READ_SCP_INFO}; - - sccb = &early_read_info_sccb; - for (i = 0; i < ARRAY_SIZE(commands); i++) { - do { - memset(sccb, 0, sizeof(*sccb)); - sccb->header.length = sizeof(*sccb); - sccb->header.function_code = 0x80; - sccb->header.control_mask[2] = 0x80; - rc = sclp_cmd_sync_early(commands[i], sccb); - } while (rc == -EBUSY); - - if (rc) - break; - if (sccb->header.response_code == 0x10) { - early_read_info_sccb_valid = 1; - break; - } - if (sccb->header.response_code != 0x1f0) - break; - } -} - -static void __init sclp_event_mask_early(void) -{ - struct init_sccb *sccb = &early_event_mask_sccb; - int rc; - - do { - memset(sccb, 0, sizeof(*sccb)); - sccb->header.length = sizeof(*sccb); - sccb->mask_length = sizeof(sccb_mask_t); - rc = sclp_cmd_sync_early(SCLP_CMDW_WRITE_EVENT_MASK, sccb); - } while (rc == -EBUSY); -} - -void __init sclp_facilities_detect(void) -{ - struct read_info_sccb *sccb; - - sclp_read_info_early(); - if (!early_read_info_sccb_valid) - return; - - sccb = &early_read_info_sccb; - sclp_facilities = sccb->facilities; - sclp_fac84 = sccb->fac84; - if (sccb->fac85 & 0x02) - S390_lowcore.machine_flags |= MACHINE_FLAG_ESOP; - rnmax = sccb->rnmax ? sccb->rnmax : sccb->rnmax2; - rzm = sccb->rnsize ? sccb->rnsize : sccb->rnsize2; - rzm <<= 20; - - sclp_event_mask_early(); -} - -bool __init sclp_has_linemode(void) -{ - struct init_sccb *sccb = &early_event_mask_sccb; - - if (sccb->header.response_code != 0x20) - return 0; - if (!(sccb->sclp_send_mask & (EVTYP_OPCMD_MASK | EVTYP_PMSGCMD_MASK))) - return 0; - if (!(sccb->sclp_receive_mask & (EVTYP_MSG_MASK | EVTYP_PMSGCMD_MASK))) - return 0; - return 1; -} - -bool __init sclp_has_vt220(void) -{ - struct init_sccb *sccb = &early_event_mask_sccb; - - if (sccb->header.response_code != 0x20) - return 0; - if (sccb->sclp_send_mask & EVTYP_VT220MSG_MASK) - return 1; - return 0; -} - -unsigned long long sclp_get_rnmax(void) -{ - return rnmax; -} - -unsigned long long sclp_get_rzm(void) -{ - return rzm; -} - -/* - * This function will be called after sclp_facilities_detect(), which gets - * called from early.c code. Therefore the sccb should have valid contents. - */ -void __init sclp_get_ipl_info(struct sclp_ipl_info *info) -{ - struct read_info_sccb *sccb; - - if (!early_read_info_sccb_valid) - return; - sccb = &early_read_info_sccb; - info->is_valid = 1; - if (sccb->flags & 0x2) - info->has_dump = 1; - memcpy(&info->loadparm, &sccb->loadparm, LOADPARM_LEN); -} - static void sclp_sync_callback(struct sclp_req *req, void *data) { struct completion *completion = data; @@ -356,14 +194,14 @@ struct assign_storage_sccb { int arch_get_memory_phys_device(unsigned long start_pfn) { - if (!rzm) + if (!sclp_rzm) return 0; - return PFN_PHYS(start_pfn) >> ilog2(rzm); + return PFN_PHYS(start_pfn) >> ilog2(sclp_rzm); } static unsigned long long rn2addr(u16 rn) { - return (unsigned long long) (rn - 1) * rzm; + return (unsigned long long) (rn - 1) * sclp_rzm; } static int do_assign_storage(sclp_cmdw_t cmd, u16 rn) @@ -404,7 +242,7 @@ static int sclp_assign_storage(u16 rn) if (rc) return rc; start = rn2addr(rn); - storage_key_init_range(start, start + rzm); + storage_key_init_range(start, start + sclp_rzm); return 0; } @@ -462,7 +300,7 @@ static int sclp_mem_change_state(unsigned long start, unsigned long size, istart = rn2addr(incr->rn); if (start + size - 1 < istart) break; - if (start > istart + rzm - 1) + if (start > istart + sclp_rzm - 1) continue; if (online) rc |= sclp_assign_storage(incr->rn); @@ -526,7 +364,7 @@ static void __init add_memory_merged(u16 rn) if (!first_rn) goto skip_add; start = rn2addr(first_rn); - size = (unsigned long long ) num * rzm; + size = (unsigned long long) num * sclp_rzm; if (start >= VMEM_MAX_PHYS) goto skip_add; if (start + size > VMEM_MAX_PHYS) @@ -574,7 +412,7 @@ static void __init insert_increment(u16 rn, int standby, int assigned) } if (!assigned) new_incr->rn = last_rn + 1; - if (new_incr->rn > rnmax) { + if (new_incr->rn > sclp_rnmax) { kfree(new_incr); return; } @@ -617,7 +455,7 @@ static int __init sclp_detect_standby_memory(void) if (OLDMEM_BASE) /* No standby memory in kdump mode */ return 0; - if (!early_read_info_sccb_valid) + if (!sclp_early_read_info_sccb_valid) return 0; if ((sclp_facilities & 0xe00000000000ULL) != 0xe00000000000ULL) return 0; @@ -661,7 +499,7 @@ static int __init sclp_detect_standby_memory(void) } if (rc || list_empty(&sclp_mem_list)) goto out; - for (i = 1; i <= rnmax - assigned; i++) + for (i = 1; i <= sclp_rnmax - assigned; i++) insert_increment(0, 1, 0); rc = register_memory_notifier(&sclp_mem_nb); if (rc) diff --git a/drivers/s390/char/sclp_early.c b/drivers/s390/char/sclp_early.c new file mode 100644 index 00000000000..f7aa080e9b2 --- /dev/null +++ b/drivers/s390/char/sclp_early.c @@ -0,0 +1,264 @@ +/* + * SCLP early driver + * + * Copyright IBM Corp. 2013 + */ + +#define KMSG_COMPONENT "sclp_early" +#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt + +#include <asm/ctl_reg.h> +#include <asm/sclp.h> +#include <asm/ipl.h> +#include "sclp_sdias.h" +#include "sclp.h" + +#define SCLP_CMDW_READ_SCP_INFO 0x00020001 +#define SCLP_CMDW_READ_SCP_INFO_FORCED 0x00120001 + +struct read_info_sccb { + struct sccb_header header; /* 0-7 */ + u16 rnmax; /* 8-9 */ + u8 rnsize; /* 10 */ + u8 _reserved0[24 - 11]; /* 11-15 */ + u8 loadparm[8]; /* 24-31 */ + u8 _reserved1[48 - 32]; /* 32-47 */ + u64 facilities; /* 48-55 */ + u8 _reserved2[84 - 56]; /* 56-83 */ + u8 fac84; /* 84 */ + u8 fac85; /* 85 */ + u8 _reserved3[91 - 86]; /* 86-90 */ + u8 flags; /* 91 */ + u8 _reserved4[100 - 92]; /* 92-99 */ + u32 rnsize2; /* 100-103 */ + u64 rnmax2; /* 104-111 */ + u8 _reserved5[4096 - 112]; /* 112-4095 */ +} __packed __aligned(PAGE_SIZE); + +static __initdata struct init_sccb early_event_mask_sccb __aligned(PAGE_SIZE); +static __initdata struct read_info_sccb early_read_info_sccb; +static __initdata char sccb_early[PAGE_SIZE] __aligned(PAGE_SIZE); +static unsigned long sclp_hsa_size; + +__initdata int sclp_early_read_info_sccb_valid; +u64 sclp_facilities; +u8 sclp_fac84; +unsigned long long sclp_rzm; +unsigned long long sclp_rnmax; + +static int __init sclp_cmd_sync_early(sclp_cmdw_t cmd, void *sccb) +{ + int rc; + + __ctl_set_bit(0, 9); + rc = sclp_service_call(cmd, sccb); + if (rc) + goto out; + __load_psw_mask(PSW_DEFAULT_KEY | PSW_MASK_BASE | PSW_MASK_EA | + PSW_MASK_BA | PSW_MASK_EXT | PSW_MASK_WAIT); + local_irq_disable(); +out: + /* Contents of the sccb might have changed. */ + barrier(); + __ctl_clear_bit(0, 9); + return rc; +} + +static void __init sclp_read_info_early(void) +{ + int rc; + int i; + struct read_info_sccb *sccb; + sclp_cmdw_t commands[] = {SCLP_CMDW_READ_SCP_INFO_FORCED, + SCLP_CMDW_READ_SCP_INFO}; + + sccb = &early_read_info_sccb; + for (i = 0; i < ARRAY_SIZE(commands); i++) { + do { + memset(sccb, 0, sizeof(*sccb)); + sccb->header.length = sizeof(*sccb); + sccb->header.function_code = 0x80; + sccb->header.control_mask[2] = 0x80; + rc = sclp_cmd_sync_early(commands[i], sccb); + } while (rc == -EBUSY); + + if (rc) + break; + if (sccb->header.response_code == 0x10) { + sclp_early_read_info_sccb_valid = 1; + break; + } + if (sccb->header.response_code != 0x1f0) + break; + } +} + +static void __init sclp_facilities_detect(void) +{ + struct read_info_sccb *sccb; + + sclp_read_info_early(); + if (!sclp_early_read_info_sccb_valid) + return; + + sccb = &early_read_info_sccb; + sclp_facilities = sccb->facilities; + sclp_fac84 = sccb->fac84; + if (sccb->fac85 & 0x02) + S390_lowcore.machine_flags |= MACHINE_FLAG_ESOP; + sclp_rnmax = sccb->rnmax ? sccb->rnmax : sccb->rnmax2; + sclp_rzm = sccb->rnsize ? sccb->rnsize : sccb->rnsize2; + sclp_rzm <<= 20; +} + +bool __init sclp_has_linemode(void) +{ + struct init_sccb *sccb = &early_event_mask_sccb; + + if (sccb->header.response_code != 0x20) + return 0; + if (!(sccb->sclp_send_mask & (EVTYP_OPCMD_MASK | EVTYP_PMSGCMD_MASK))) + return 0; + if (!(sccb->sclp_receive_mask & (EVTYP_MSG_MASK | EVTYP_PMSGCMD_MASK))) + return 0; + return 1; +} + +bool __init sclp_has_vt220(void) +{ + struct init_sccb *sccb = &early_event_mask_sccb; + + if (sccb->header.response_code != 0x20) + return 0; + if (sccb->sclp_send_mask & EVTYP_VT220MSG_MASK) + return 1; + return 0; +} + +unsigned long long sclp_get_rnmax(void) +{ + return sclp_rnmax; +} + +unsigned long long sclp_get_rzm(void) +{ + return sclp_rzm; +} + +/* + * This function will be called after sclp_facilities_detect(), which gets + * called from early.c code. Therefore the sccb should have valid contents. + */ +void __init sclp_get_ipl_info(struct sclp_ipl_info *info) +{ + struct read_info_sccb *sccb; + + if (!sclp_early_read_info_sccb_valid) + return; + sccb = &early_read_info_sccb; + info->is_valid = 1; + if (sccb->flags & 0x2) + info->has_dump = 1; + memcpy(&info->loadparm, &sccb->loadparm, LOADPARM_LEN); +} + +static int __init sclp_cmd_early(sclp_cmdw_t cmd, void *sccb) +{ + int rc; + + do { + rc = sclp_cmd_sync_early(cmd, sccb); + } while (rc == -EBUSY); + + if (rc) + return -EIO; + if (((struct sccb_header *) sccb)->response_code != 0x0020) + return -EIO; + return 0; +} + +static void __init sccb_init_eq_size(struct sdias_sccb *sccb) +{ + memset(sccb, 0, sizeof(*sccb)); + + sccb->hdr.length = sizeof(*sccb); + sccb->evbuf.hdr.length = sizeof(struct sdias_evbuf); + sccb->evbuf.hdr.type = EVTYP_SDIAS; + sccb->evbuf.event_qual = SDIAS_EQ_SIZE; + sccb->evbuf.data_id = SDIAS_DI_FCP_DUMP; + sccb->evbuf.event_id = 4712; + sccb->evbuf.dbs = 1; +} + +static int __init sclp_set_event_mask(unsigned long receive_mask, + unsigned long send_mask) +{ + struct init_sccb *sccb = (void *) &sccb_early; + + memset(sccb, 0, sizeof(*sccb)); + sccb->header.length = sizeof(*sccb); + sccb->mask_length = sizeof(sccb_mask_t); + sccb->receive_mask = receive_mask; + sccb->send_mask = send_mask; + return sclp_cmd_early(SCLP_CMDW_WRITE_EVENT_MASK, sccb); +} + +static long __init sclp_hsa_size_init(void) +{ + struct sdias_sccb *sccb = (void *) &sccb_early; + + sccb_init_eq_size(sccb); + if (sclp_cmd_early(SCLP_CMDW_WRITE_EVENT_DATA, sccb)) + return -EIO; + if (sccb->evbuf.blk_cnt != 0) + return (sccb->evbuf.blk_cnt - 1) * PAGE_SIZE; + return 0; +} + +static long __init sclp_hsa_copy_wait(void) +{ + struct sccb_header *sccb = (void *) &sccb_early; + + memset(sccb, 0, PAGE_SIZE); + sccb->length = PAGE_SIZE; + if (sclp_cmd_early(SCLP_CMDW_READ_EVENT_DATA, sccb)) + return -EIO; + return (((struct sdias_sccb *) sccb)->evbuf.blk_cnt - 1) * PAGE_SIZE; +} + +unsigned long sclp_get_hsa_size(void) +{ + return sclp_hsa_size; +} + +static void __init sclp_hsa_size_detect(void) +{ + long size; + + /* First try synchronous interface (LPAR) */ + if (sclp_set_event_mask(0, 0x40000010)) + return; + size = sclp_hsa_size_init(); + if (size < 0) + return; + if (size != 0) + goto out; + /* Then try asynchronous interface (z/VM) */ + if (sclp_set_event_mask(0x00000010, 0x40000010)) + return; + size = sclp_hsa_size_init(); + if (size < 0) + return; + size = sclp_hsa_copy_wait(); + if (size < 0) + return; +out: + sclp_hsa_size = size; +} + +void __init sclp_early_detect(void) +{ + sclp_facilities_detect(); + sclp_hsa_size_detect(); + sclp_set_event_mask(0, 0); +} diff --git a/drivers/s390/char/sclp_sdias.c b/drivers/s390/char/sclp_sdias.c index b1032931a1c..561a0414b35 100644 --- a/drivers/s390/char/sclp_sdias.c +++ b/drivers/s390/char/sclp_sdias.c @@ -1,7 +1,7 @@ /* - * Sclp "store data in absolut storage" + * SCLP "store data in absolute storage" * - * Copyright IBM Corp. 2003, 2007 + * Copyright IBM Corp. 2003, 2013 * Author(s): Michael Holzheu */ @@ -14,6 +14,7 @@ #include <asm/debug.h> #include <asm/ipl.h> +#include "sclp_sdias.h" #include "sclp.h" #include "sclp_rw.h" @@ -22,46 +23,12 @@ #define SDIAS_RETRIES 300 #define SDIAS_SLEEP_TICKS 50 -#define EQ_STORE_DATA 0x0 -#define EQ_SIZE 0x1 -#define DI_FCP_DUMP 0x0 -#define ASA_SIZE_32 0x0 -#define ASA_SIZE_64 0x1 -#define EVSTATE_ALL_STORED 0x0 -#define EVSTATE_NO_DATA 0x3 -#define EVSTATE_PART_STORED 0x10 - static struct debug_info *sdias_dbf; static struct sclp_register sclp_sdias_register = { .send_mask = EVTYP_SDIAS_MASK, }; -struct sdias_evbuf { - struct evbuf_header hdr; - u8 event_qual; - u8 data_id; - u64 reserved2; - u32 event_id; - u16 reserved3; - u8 asa_size; - u8 event_status; - u32 reserved4; - u32 blk_cnt; - u64 asa; - u32 reserved5; - u32 fbn; - u32 reserved6; - u32 lbn; - u16 reserved7; - u16 dbs; -} __attribute__((packed)); - -struct sdias_sccb { - struct sccb_header hdr; - struct sdias_evbuf evbuf; -} __attribute__((packed)); - static struct sdias_sccb sccb __attribute__((aligned(4096))); static struct sdias_evbuf sdias_evbuf; @@ -148,8 +115,8 @@ int sclp_sdias_blk_count(void) sccb.hdr.length = sizeof(sccb); sccb.evbuf.hdr.length = sizeof(struct sdias_evbuf); sccb.evbuf.hdr.type = EVTYP_SDIAS; - sccb.evbuf.event_qual = EQ_SIZE; - sccb.evbuf.data_id = DI_FCP_DUMP; + sccb.evbuf.event_qual = SDIAS_EQ_SIZE; + sccb.evbuf.data_id = SDIAS_DI_FCP_DUMP; sccb.evbuf.event_id = 4712; sccb.evbuf.dbs = 1; @@ -208,13 +175,13 @@ int sclp_sdias_copy(void *dest, int start_blk, int nr_blks) sccb.evbuf.hdr.length = sizeof(struct sdias_evbuf); sccb.evbuf.hdr.type = EVTYP_SDIAS; sccb.evbuf.hdr.flags = 0; - sccb.evbuf.event_qual = EQ_STORE_DATA; - sccb.evbuf.data_id = DI_FCP_DUMP; + sccb.evbuf.event_qual = SDIAS_EQ_STORE_DATA; + sccb.evbuf.data_id = SDIAS_DI_FCP_DUMP; sccb.evbuf.event_id = 4712; #ifdef CONFIG_64BIT - sccb.evbuf.asa_size = ASA_SIZE_64; + sccb.evbuf.asa_size = SDIAS_ASA_SIZE_64; #else - sccb.evbuf.asa_size = ASA_SIZE_32; + sccb.evbuf.asa_size = SDIAS_ASA_SIZE_32; #endif sccb.evbuf.event_status = 0; sccb.evbuf.blk_cnt = nr_blks; @@ -240,20 +207,19 @@ int sclp_sdias_copy(void *dest, int start_blk, int nr_blks) } switch (sdias_evbuf.event_status) { - case EVSTATE_ALL_STORED: - TRACE("all stored\n"); - break; - case EVSTATE_PART_STORED: - TRACE("part stored: %i\n", sdias_evbuf.blk_cnt); - break; - case EVSTATE_NO_DATA: - TRACE("no data\n"); - /* fall through */ - default: - pr_err("Error from SCLP while copying hsa. " - "Event status = %x\n", - sdias_evbuf.event_status); - rc = -EIO; + case SDIAS_EVSTATE_ALL_STORED: + TRACE("all stored\n"); + break; + case SDIAS_EVSTATE_PART_STORED: + TRACE("part stored: %i\n", sdias_evbuf.blk_cnt); + break; + case SDIAS_EVSTATE_NO_DATA: + TRACE("no data\n"); + /* fall through */ + default: + pr_err("Error from SCLP while copying hsa. Event status = %x\n", + sdias_evbuf.event_status); + rc = -EIO; } out: mutex_unlock(&sdias_mutex); diff --git a/drivers/s390/char/sclp_sdias.h b/drivers/s390/char/sclp_sdias.h new file mode 100644 index 00000000000..f2431c41415 --- /dev/null +++ b/drivers/s390/char/sclp_sdias.h @@ -0,0 +1,46 @@ +/* + * SCLP "store data in absolute storage" + * + * Copyright IBM Corp. 2003, 2013 + */ + +#ifndef SCLP_SDIAS_H +#define SCLP_SDIAS_H + +#include "sclp.h" + +#define SDIAS_EQ_STORE_DATA 0x0 +#define SDIAS_EQ_SIZE 0x1 +#define SDIAS_DI_FCP_DUMP 0x0 +#define SDIAS_ASA_SIZE_32 0x0 +#define SDIAS_ASA_SIZE_64 0x1 +#define SDIAS_EVSTATE_ALL_STORED 0x0 +#define SDIAS_EVSTATE_NO_DATA 0x3 +#define SDIAS_EVSTATE_PART_STORED 0x10 + +struct sdias_evbuf { + struct evbuf_header hdr; + u8 event_qual; + u8 data_id; + u64 reserved2; + u32 event_id; + u16 reserved3; + u8 asa_size; + u8 event_status; + u32 reserved4; + u32 blk_cnt; + u64 asa; + u32 reserved5; + u32 fbn; + u32 reserved6; + u32 lbn; + u16 reserved7; + u16 dbs; +} __packed; + +struct sdias_sccb { + struct sccb_header hdr; + struct sdias_evbuf evbuf; +} __packed; + +#endif /* SCLP_SDIAS_H */ diff --git a/drivers/s390/char/zcore.c b/drivers/s390/char/zcore.c index ffb1fcf0bf5..3d8e4d63f51 100644 --- a/drivers/s390/char/zcore.c +++ b/drivers/s390/char/zcore.c @@ -328,9 +328,9 @@ static ssize_t zcore_read(struct file *file, char __user *buf, size_t count, mem_offs = 0; /* Copy from HSA data */ - if (*ppos < (ZFCPDUMP_HSA_SIZE + HEADER_SIZE)) { - size = min((count - hdr_count), (size_t) (ZFCPDUMP_HSA_SIZE - - mem_start)); + if (*ppos < sclp_get_hsa_size() + HEADER_SIZE) { + size = min((count - hdr_count), + (size_t) (sclp_get_hsa_size() - mem_start)); rc = memcpy_hsa_user(buf + hdr_count, mem_start, size); if (rc) goto fail; @@ -490,7 +490,7 @@ static ssize_t zcore_hsa_read(struct file *filp, char __user *buf, static char str[18]; if (hsa_available) - snprintf(str, sizeof(str), "%lx\n", ZFCPDUMP_HSA_SIZE); + snprintf(str, sizeof(str), "%lx\n", sclp_get_hsa_size()); else snprintf(str, sizeof(str), "0\n"); return simple_read_from_buffer(buf, count, ppos, str, strlen(str)); @@ -584,17 +584,9 @@ static int __init sys_info_init(enum arch_id arch, unsigned long mem_end) static int __init check_sdias(void) { - int rc, act_hsa_size; - - rc = sclp_sdias_blk_count(); - if (rc < 0) { + if (!sclp_get_hsa_size()) { TRACE("Could not determine HSA size\n"); - return rc; - } - act_hsa_size = (rc - 1) * PAGE_SIZE; - if (act_hsa_size < ZFCPDUMP_HSA_SIZE) { - TRACE("HSA size too small: %i\n", act_hsa_size); - return -EINVAL; + return -ENODEV; } return 0; } @@ -662,7 +654,7 @@ static int __init zcore_reipl_init(void) ipl_block = (void *) __get_free_page(GFP_KERNEL); if (!ipl_block) return -ENOMEM; - if (ipib_info.ipib < ZFCPDUMP_HSA_SIZE) + if (ipib_info.ipib < sclp_get_hsa_size()) rc = memcpy_hsa_kernel(ipl_block, ipib_info.ipib, PAGE_SIZE); else rc = memcpy_real(ipl_block, (void *) ipib_info.ipib, PAGE_SIZE); diff --git a/drivers/s390/cio/eadm_sch.c b/drivers/s390/cio/eadm_sch.c index aca7bfc113a..3a2ee4a740b 100644 --- a/drivers/s390/cio/eadm_sch.c +++ b/drivers/s390/cio/eadm_sch.c @@ -190,7 +190,7 @@ static struct subchannel *eadm_get_idle_sch(void) return NULL; } -static int eadm_start_aob(struct aob *aob) +int eadm_start_aob(struct aob *aob) { struct eadm_private *private; struct subchannel *sch; @@ -218,6 +218,7 @@ out_unlock: return ret; } +EXPORT_SYMBOL_GPL(eadm_start_aob); static int eadm_subchannel_probe(struct subchannel *sch) { @@ -380,11 +381,6 @@ static struct css_driver eadm_subchannel_driver = { .restore = eadm_subchannel_restore, }; -static struct eadm_ops eadm_ops = { - .eadm_start = eadm_start_aob, - .owner = THIS_MODULE, -}; - static int __init eadm_sch_init(void) { int ret; @@ -404,7 +400,6 @@ static int __init eadm_sch_init(void) if (ret) goto cleanup; - register_eadm_ops(&eadm_ops); return ret; cleanup: @@ -415,7 +410,6 @@ cleanup: static void __exit eadm_sch_exit(void) { - unregister_eadm_ops(&eadm_ops); css_driver_unregister(&eadm_subchannel_driver); isc_unregister(EADM_SCH_ISC); debug_unregister(eadm_debug); diff --git a/drivers/s390/cio/scm.c b/drivers/s390/cio/scm.c index 46ec25632e8..15268edc54a 100644 --- a/drivers/s390/cio/scm.c +++ b/drivers/s390/cio/scm.c @@ -15,8 +15,6 @@ #include "chsc.h" static struct device *scm_root; -static struct eadm_ops *eadm_ops; -static DEFINE_MUTEX(eadm_ops_mutex); #define to_scm_dev(n) container_of(n, struct scm_device, dev) #define to_scm_drv(d) container_of(d, struct scm_driver, drv) @@ -73,49 +71,6 @@ void scm_driver_unregister(struct scm_driver *scmdrv) } EXPORT_SYMBOL_GPL(scm_driver_unregister); -int scm_get_ref(void) -{ - int ret = 0; - - mutex_lock(&eadm_ops_mutex); - if (!eadm_ops || !try_module_get(eadm_ops->owner)) - ret = -ENOENT; - mutex_unlock(&eadm_ops_mutex); - - return ret; -} -EXPORT_SYMBOL_GPL(scm_get_ref); - -void scm_put_ref(void) -{ - mutex_lock(&eadm_ops_mutex); - module_put(eadm_ops->owner); - mutex_unlock(&eadm_ops_mutex); -} -EXPORT_SYMBOL_GPL(scm_put_ref); - -void register_eadm_ops(struct eadm_ops *ops) -{ - mutex_lock(&eadm_ops_mutex); - eadm_ops = ops; - mutex_unlock(&eadm_ops_mutex); -} -EXPORT_SYMBOL_GPL(register_eadm_ops); - -void unregister_eadm_ops(struct eadm_ops *ops) -{ - mutex_lock(&eadm_ops_mutex); - eadm_ops = NULL; - mutex_unlock(&eadm_ops_mutex); -} -EXPORT_SYMBOL_GPL(unregister_eadm_ops); - -int scm_start_aob(struct aob *aob) -{ - return eadm_ops->eadm_start(aob); -} -EXPORT_SYMBOL_GPL(scm_start_aob); - void scm_irq_handler(struct aob *aob, int error) { struct aob_rq_header *aobrq = (void *) aob->request.data; diff --git a/drivers/scsi/aacraid/commctrl.c b/drivers/scsi/aacraid/commctrl.c index d85ac1a9d2c..fbcd48d0bfc 100644 --- a/drivers/scsi/aacraid/commctrl.c +++ b/drivers/scsi/aacraid/commctrl.c @@ -511,7 +511,8 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg) goto cleanup; } - if (fibsize > (dev->max_fib_size - sizeof(struct aac_fibhdr))) { + if ((fibsize < (sizeof(struct user_aac_srb) - sizeof(struct user_sgentry))) || + (fibsize > (dev->max_fib_size - sizeof(struct aac_fibhdr)))) { rcode = -EINVAL; goto cleanup; } diff --git a/drivers/scsi/pmcraid.c b/drivers/scsi/pmcraid.c index e43db774204..bd6f743d87a 100644 --- a/drivers/scsi/pmcraid.c +++ b/drivers/scsi/pmcraid.c @@ -1512,7 +1512,8 @@ static int pmcraid_notify_aen( } result = - genlmsg_multicast(skb, 0, pmcraid_event_family.id, GFP_ATOMIC); + genlmsg_multicast(&pmcraid_event_family, skb, 0, + pmcraid_event_family.id, GFP_ATOMIC); /* If there are no listeners, genlmsg_multicast may return non-zero * value. diff --git a/drivers/scsi/scsi_transport_srp.c b/drivers/scsi/scsi_transport_srp.c index f379c7f3034..2700a5a09bd 100644 --- a/drivers/scsi/scsi_transport_srp.c +++ b/drivers/scsi/scsi_transport_srp.c @@ -24,12 +24,15 @@ #include <linux/err.h> #include <linux/slab.h> #include <linux/string.h> +#include <linux/delay.h> #include <scsi/scsi.h> +#include <scsi/scsi_cmnd.h> #include <scsi/scsi_device.h> #include <scsi/scsi_host.h> #include <scsi/scsi_transport.h> #include <scsi/scsi_transport_srp.h> +#include "scsi_priv.h" #include "scsi_transport_srp_internal.h" struct srp_host_attrs { @@ -38,7 +41,7 @@ struct srp_host_attrs { #define to_srp_host_attrs(host) ((struct srp_host_attrs *)(host)->shost_data) #define SRP_HOST_ATTRS 0 -#define SRP_RPORT_ATTRS 3 +#define SRP_RPORT_ATTRS 8 struct srp_internal { struct scsi_transport_template t; @@ -54,6 +57,36 @@ struct srp_internal { #define dev_to_rport(d) container_of(d, struct srp_rport, dev) #define transport_class_to_srp_rport(dev) dev_to_rport((dev)->parent) +static inline struct Scsi_Host *rport_to_shost(struct srp_rport *r) +{ + return dev_to_shost(r->dev.parent); +} + +/** + * srp_tmo_valid() - check timeout combination validity + * + * The combination of the timeout parameters must be such that SCSI commands + * are finished in a reasonable time. Hence do not allow the fast I/O fail + * timeout to exceed SCSI_DEVICE_BLOCK_MAX_TIMEOUT. Furthermore, these + * parameters must be such that multipath can detect failed paths timely. + * Hence do not allow all three parameters to be disabled simultaneously. + */ +int srp_tmo_valid(int reconnect_delay, int fast_io_fail_tmo, int dev_loss_tmo) +{ + if (reconnect_delay < 0 && fast_io_fail_tmo < 0 && dev_loss_tmo < 0) + return -EINVAL; + if (reconnect_delay == 0) + return -EINVAL; + if (fast_io_fail_tmo > SCSI_DEVICE_BLOCK_MAX_TIMEOUT) + return -EINVAL; + if (dev_loss_tmo >= LONG_MAX / HZ) + return -EINVAL; + if (fast_io_fail_tmo >= 0 && dev_loss_tmo >= 0 && + fast_io_fail_tmo >= dev_loss_tmo) + return -EINVAL; + return 0; +} +EXPORT_SYMBOL_GPL(srp_tmo_valid); static int srp_host_setup(struct transport_container *tc, struct device *dev, struct device *cdev) @@ -134,10 +167,465 @@ static ssize_t store_srp_rport_delete(struct device *dev, static DEVICE_ATTR(delete, S_IWUSR, NULL, store_srp_rport_delete); +static ssize_t show_srp_rport_state(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + static const char *const state_name[] = { + [SRP_RPORT_RUNNING] = "running", + [SRP_RPORT_BLOCKED] = "blocked", + [SRP_RPORT_FAIL_FAST] = "fail-fast", + [SRP_RPORT_LOST] = "lost", + }; + struct srp_rport *rport = transport_class_to_srp_rport(dev); + enum srp_rport_state state = rport->state; + + return sprintf(buf, "%s\n", + (unsigned)state < ARRAY_SIZE(state_name) ? + state_name[state] : "???"); +} + +static DEVICE_ATTR(state, S_IRUGO, show_srp_rport_state, NULL); + +static ssize_t srp_show_tmo(char *buf, int tmo) +{ + return tmo >= 0 ? sprintf(buf, "%d\n", tmo) : sprintf(buf, "off\n"); +} + +static int srp_parse_tmo(int *tmo, const char *buf) +{ + int res = 0; + + if (strncmp(buf, "off", 3) != 0) + res = kstrtoint(buf, 0, tmo); + else + *tmo = -1; + + return res; +} + +static ssize_t show_reconnect_delay(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct srp_rport *rport = transport_class_to_srp_rport(dev); + + return srp_show_tmo(buf, rport->reconnect_delay); +} + +static ssize_t store_reconnect_delay(struct device *dev, + struct device_attribute *attr, + const char *buf, const size_t count) +{ + struct srp_rport *rport = transport_class_to_srp_rport(dev); + int res, delay; + + res = srp_parse_tmo(&delay, buf); + if (res) + goto out; + res = srp_tmo_valid(delay, rport->fast_io_fail_tmo, + rport->dev_loss_tmo); + if (res) + goto out; + + if (rport->reconnect_delay <= 0 && delay > 0 && + rport->state != SRP_RPORT_RUNNING) { + queue_delayed_work(system_long_wq, &rport->reconnect_work, + delay * HZ); + } else if (delay <= 0) { + cancel_delayed_work(&rport->reconnect_work); + } + rport->reconnect_delay = delay; + res = count; + +out: + return res; +} + +static DEVICE_ATTR(reconnect_delay, S_IRUGO | S_IWUSR, show_reconnect_delay, + store_reconnect_delay); + +static ssize_t show_failed_reconnects(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct srp_rport *rport = transport_class_to_srp_rport(dev); + + return sprintf(buf, "%d\n", rport->failed_reconnects); +} + +static DEVICE_ATTR(failed_reconnects, S_IRUGO, show_failed_reconnects, NULL); + +static ssize_t show_srp_rport_fast_io_fail_tmo(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct srp_rport *rport = transport_class_to_srp_rport(dev); + + return srp_show_tmo(buf, rport->fast_io_fail_tmo); +} + +static ssize_t store_srp_rport_fast_io_fail_tmo(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct srp_rport *rport = transport_class_to_srp_rport(dev); + int res; + int fast_io_fail_tmo; + + res = srp_parse_tmo(&fast_io_fail_tmo, buf); + if (res) + goto out; + res = srp_tmo_valid(rport->reconnect_delay, fast_io_fail_tmo, + rport->dev_loss_tmo); + if (res) + goto out; + rport->fast_io_fail_tmo = fast_io_fail_tmo; + res = count; + +out: + return res; +} + +static DEVICE_ATTR(fast_io_fail_tmo, S_IRUGO | S_IWUSR, + show_srp_rport_fast_io_fail_tmo, + store_srp_rport_fast_io_fail_tmo); + +static ssize_t show_srp_rport_dev_loss_tmo(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct srp_rport *rport = transport_class_to_srp_rport(dev); + + return srp_show_tmo(buf, rport->dev_loss_tmo); +} + +static ssize_t store_srp_rport_dev_loss_tmo(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct srp_rport *rport = transport_class_to_srp_rport(dev); + int res; + int dev_loss_tmo; + + res = srp_parse_tmo(&dev_loss_tmo, buf); + if (res) + goto out; + res = srp_tmo_valid(rport->reconnect_delay, rport->fast_io_fail_tmo, + dev_loss_tmo); + if (res) + goto out; + rport->dev_loss_tmo = dev_loss_tmo; + res = count; + +out: + return res; +} + +static DEVICE_ATTR(dev_loss_tmo, S_IRUGO | S_IWUSR, + show_srp_rport_dev_loss_tmo, + store_srp_rport_dev_loss_tmo); + +static int srp_rport_set_state(struct srp_rport *rport, + enum srp_rport_state new_state) +{ + enum srp_rport_state old_state = rport->state; + + lockdep_assert_held(&rport->mutex); + + switch (new_state) { + case SRP_RPORT_RUNNING: + switch (old_state) { + case SRP_RPORT_LOST: + goto invalid; + default: + break; + } + break; + case SRP_RPORT_BLOCKED: + switch (old_state) { + case SRP_RPORT_RUNNING: + break; + default: + goto invalid; + } + break; + case SRP_RPORT_FAIL_FAST: + switch (old_state) { + case SRP_RPORT_LOST: + goto invalid; + default: + break; + } + break; + case SRP_RPORT_LOST: + break; + } + rport->state = new_state; + return 0; + +invalid: + return -EINVAL; +} + +/** + * srp_reconnect_work() - reconnect and schedule a new attempt if necessary + */ +static void srp_reconnect_work(struct work_struct *work) +{ + struct srp_rport *rport = container_of(to_delayed_work(work), + struct srp_rport, reconnect_work); + struct Scsi_Host *shost = rport_to_shost(rport); + int delay, res; + + res = srp_reconnect_rport(rport); + if (res != 0) { + shost_printk(KERN_ERR, shost, + "reconnect attempt %d failed (%d)\n", + ++rport->failed_reconnects, res); + delay = rport->reconnect_delay * + min(100, max(1, rport->failed_reconnects - 10)); + if (delay > 0) + queue_delayed_work(system_long_wq, + &rport->reconnect_work, delay * HZ); + } +} + +static void __rport_fail_io_fast(struct srp_rport *rport) +{ + struct Scsi_Host *shost = rport_to_shost(rport); + struct srp_internal *i; + + lockdep_assert_held(&rport->mutex); + + if (srp_rport_set_state(rport, SRP_RPORT_FAIL_FAST)) + return; + scsi_target_unblock(rport->dev.parent, SDEV_TRANSPORT_OFFLINE); + + /* Involve the LLD if possible to terminate all I/O on the rport. */ + i = to_srp_internal(shost->transportt); + if (i->f->terminate_rport_io) + i->f->terminate_rport_io(rport); +} + +/** + * rport_fast_io_fail_timedout() - fast I/O failure timeout handler + */ +static void rport_fast_io_fail_timedout(struct work_struct *work) +{ + struct srp_rport *rport = container_of(to_delayed_work(work), + struct srp_rport, fast_io_fail_work); + struct Scsi_Host *shost = rport_to_shost(rport); + + pr_info("fast_io_fail_tmo expired for SRP %s / %s.\n", + dev_name(&rport->dev), dev_name(&shost->shost_gendev)); + + mutex_lock(&rport->mutex); + if (rport->state == SRP_RPORT_BLOCKED) + __rport_fail_io_fast(rport); + mutex_unlock(&rport->mutex); +} + +/** + * rport_dev_loss_timedout() - device loss timeout handler + */ +static void rport_dev_loss_timedout(struct work_struct *work) +{ + struct srp_rport *rport = container_of(to_delayed_work(work), + struct srp_rport, dev_loss_work); + struct Scsi_Host *shost = rport_to_shost(rport); + struct srp_internal *i = to_srp_internal(shost->transportt); + + pr_info("dev_loss_tmo expired for SRP %s / %s.\n", + dev_name(&rport->dev), dev_name(&shost->shost_gendev)); + + mutex_lock(&rport->mutex); + WARN_ON(srp_rport_set_state(rport, SRP_RPORT_LOST) != 0); + scsi_target_unblock(rport->dev.parent, SDEV_TRANSPORT_OFFLINE); + mutex_unlock(&rport->mutex); + + i->f->rport_delete(rport); +} + +static void __srp_start_tl_fail_timers(struct srp_rport *rport) +{ + struct Scsi_Host *shost = rport_to_shost(rport); + int delay, fast_io_fail_tmo, dev_loss_tmo; + + lockdep_assert_held(&rport->mutex); + + if (!rport->deleted) { + delay = rport->reconnect_delay; + fast_io_fail_tmo = rport->fast_io_fail_tmo; + dev_loss_tmo = rport->dev_loss_tmo; + pr_debug("%s current state: %d\n", + dev_name(&shost->shost_gendev), rport->state); + + if (delay > 0) + queue_delayed_work(system_long_wq, + &rport->reconnect_work, + 1UL * delay * HZ); + if (fast_io_fail_tmo >= 0 && + srp_rport_set_state(rport, SRP_RPORT_BLOCKED) == 0) { + pr_debug("%s new state: %d\n", + dev_name(&shost->shost_gendev), + rport->state); + scsi_target_block(&shost->shost_gendev); + queue_delayed_work(system_long_wq, + &rport->fast_io_fail_work, + 1UL * fast_io_fail_tmo * HZ); + } + if (dev_loss_tmo >= 0) + queue_delayed_work(system_long_wq, + &rport->dev_loss_work, + 1UL * dev_loss_tmo * HZ); + } else { + pr_debug("%s has already been deleted\n", + dev_name(&shost->shost_gendev)); + srp_rport_set_state(rport, SRP_RPORT_FAIL_FAST); + scsi_target_unblock(&shost->shost_gendev, + SDEV_TRANSPORT_OFFLINE); + } +} + +/** + * srp_start_tl_fail_timers() - start the transport layer failure timers + * + * Start the transport layer fast I/O failure and device loss timers. Do not + * modify a timer that was already started. + */ +void srp_start_tl_fail_timers(struct srp_rport *rport) +{ + mutex_lock(&rport->mutex); + __srp_start_tl_fail_timers(rport); + mutex_unlock(&rport->mutex); +} +EXPORT_SYMBOL(srp_start_tl_fail_timers); + +/** + * scsi_request_fn_active() - number of kernel threads inside scsi_request_fn() + */ +static int scsi_request_fn_active(struct Scsi_Host *shost) +{ + struct scsi_device *sdev; + struct request_queue *q; + int request_fn_active = 0; + + shost_for_each_device(sdev, shost) { + q = sdev->request_queue; + + spin_lock_irq(q->queue_lock); + request_fn_active += q->request_fn_active; + spin_unlock_irq(q->queue_lock); + } + + return request_fn_active; +} + +/** + * srp_reconnect_rport() - reconnect to an SRP target port + * + * Blocks SCSI command queueing before invoking reconnect() such that + * queuecommand() won't be invoked concurrently with reconnect() from outside + * the SCSI EH. This is important since a reconnect() implementation may + * reallocate resources needed by queuecommand(). + * + * Notes: + * - This function neither waits until outstanding requests have finished nor + * tries to abort these. It is the responsibility of the reconnect() + * function to finish outstanding commands before reconnecting to the target + * port. + * - It is the responsibility of the caller to ensure that the resources + * reallocated by the reconnect() function won't be used while this function + * is in progress. One possible strategy is to invoke this function from + * the context of the SCSI EH thread only. Another possible strategy is to + * lock the rport mutex inside each SCSI LLD callback that can be invoked by + * the SCSI EH (the scsi_host_template.eh_*() functions and also the + * scsi_host_template.queuecommand() function). + */ +int srp_reconnect_rport(struct srp_rport *rport) +{ + struct Scsi_Host *shost = rport_to_shost(rport); + struct srp_internal *i = to_srp_internal(shost->transportt); + struct scsi_device *sdev; + int res; + + pr_debug("SCSI host %s\n", dev_name(&shost->shost_gendev)); + + res = mutex_lock_interruptible(&rport->mutex); + if (res) + goto out; + scsi_target_block(&shost->shost_gendev); + while (scsi_request_fn_active(shost)) + msleep(20); + res = i->f->reconnect(rport); + pr_debug("%s (state %d): transport.reconnect() returned %d\n", + dev_name(&shost->shost_gendev), rport->state, res); + if (res == 0) { + cancel_delayed_work(&rport->fast_io_fail_work); + cancel_delayed_work(&rport->dev_loss_work); + + rport->failed_reconnects = 0; + srp_rport_set_state(rport, SRP_RPORT_RUNNING); + scsi_target_unblock(&shost->shost_gendev, SDEV_RUNNING); + /* + * If the SCSI error handler has offlined one or more devices, + * invoking scsi_target_unblock() won't change the state of + * these devices into running so do that explicitly. + */ + spin_lock_irq(shost->host_lock); + __shost_for_each_device(sdev, shost) + if (sdev->sdev_state == SDEV_OFFLINE) + sdev->sdev_state = SDEV_RUNNING; + spin_unlock_irq(shost->host_lock); + } else if (rport->state == SRP_RPORT_RUNNING) { + /* + * srp_reconnect_rport() was invoked with fast_io_fail + * off. Mark the port as failed and start the TL failure + * timers if these had not yet been started. + */ + __rport_fail_io_fast(rport); + scsi_target_unblock(&shost->shost_gendev, + SDEV_TRANSPORT_OFFLINE); + __srp_start_tl_fail_timers(rport); + } else if (rport->state != SRP_RPORT_BLOCKED) { + scsi_target_unblock(&shost->shost_gendev, + SDEV_TRANSPORT_OFFLINE); + } + mutex_unlock(&rport->mutex); + +out: + return res; +} +EXPORT_SYMBOL(srp_reconnect_rport); + +/** + * srp_timed_out() - SRP transport intercept of the SCSI timeout EH + * + * If a timeout occurs while an rport is in the blocked state, ask the SCSI + * EH to continue waiting (BLK_EH_RESET_TIMER). Otherwise let the SCSI core + * handle the timeout (BLK_EH_NOT_HANDLED). + * + * Note: This function is called from soft-IRQ context and with the request + * queue lock held. + */ +static enum blk_eh_timer_return srp_timed_out(struct scsi_cmnd *scmd) +{ + struct scsi_device *sdev = scmd->device; + struct Scsi_Host *shost = sdev->host; + struct srp_internal *i = to_srp_internal(shost->transportt); + + pr_debug("timeout for sdev %s\n", dev_name(&sdev->sdev_gendev)); + return i->f->reset_timer_if_blocked && scsi_device_blocked(sdev) ? + BLK_EH_RESET_TIMER : BLK_EH_NOT_HANDLED; +} + static void srp_rport_release(struct device *dev) { struct srp_rport *rport = dev_to_rport(dev); + cancel_delayed_work_sync(&rport->reconnect_work); + cancel_delayed_work_sync(&rport->fast_io_fail_work); + cancel_delayed_work_sync(&rport->dev_loss_work); + put_device(dev->parent); kfree(rport); } @@ -185,6 +673,24 @@ static int srp_host_match(struct attribute_container *cont, struct device *dev) } /** + * srp_rport_get() - increment rport reference count + */ +void srp_rport_get(struct srp_rport *rport) +{ + get_device(&rport->dev); +} +EXPORT_SYMBOL(srp_rport_get); + +/** + * srp_rport_put() - decrement rport reference count + */ +void srp_rport_put(struct srp_rport *rport) +{ + put_device(&rport->dev); +} +EXPORT_SYMBOL(srp_rport_put); + +/** * srp_rport_add - add a SRP remote port to the device hierarchy * @shost: scsi host the remote port is connected to. * @ids: The port id for the remote port. @@ -196,12 +702,15 @@ struct srp_rport *srp_rport_add(struct Scsi_Host *shost, { struct srp_rport *rport; struct device *parent = &shost->shost_gendev; + struct srp_internal *i = to_srp_internal(shost->transportt); int id, ret; rport = kzalloc(sizeof(*rport), GFP_KERNEL); if (!rport) return ERR_PTR(-ENOMEM); + mutex_init(&rport->mutex); + device_initialize(&rport->dev); rport->dev.parent = get_device(parent); @@ -210,6 +719,17 @@ struct srp_rport *srp_rport_add(struct Scsi_Host *shost, memcpy(rport->port_id, ids->port_id, sizeof(rport->port_id)); rport->roles = ids->roles; + if (i->f->reconnect) + rport->reconnect_delay = i->f->reconnect_delay ? + *i->f->reconnect_delay : 10; + INIT_DELAYED_WORK(&rport->reconnect_work, srp_reconnect_work); + rport->fast_io_fail_tmo = i->f->fast_io_fail_tmo ? + *i->f->fast_io_fail_tmo : 15; + rport->dev_loss_tmo = i->f->dev_loss_tmo ? *i->f->dev_loss_tmo : 60; + INIT_DELAYED_WORK(&rport->fast_io_fail_work, + rport_fast_io_fail_timedout); + INIT_DELAYED_WORK(&rport->dev_loss_work, rport_dev_loss_timedout); + id = atomic_inc_return(&to_srp_host_attrs(shost)->next_port_id); dev_set_name(&rport->dev, "port-%d:%d", shost->host_no, id); @@ -259,6 +779,13 @@ void srp_rport_del(struct srp_rport *rport) transport_remove_device(dev); device_del(dev); transport_destroy_device(dev); + + mutex_lock(&rport->mutex); + if (rport->state == SRP_RPORT_BLOCKED) + __rport_fail_io_fast(rport); + rport->deleted = true; + mutex_unlock(&rport->mutex); + put_device(dev); } EXPORT_SYMBOL_GPL(srp_rport_del); @@ -310,6 +837,8 @@ srp_attach_transport(struct srp_function_template *ft) if (!i) return NULL; + i->t.eh_timed_out = srp_timed_out; + i->t.tsk_mgmt_response = srp_tsk_mgmt_response; i->t.it_nexus_response = srp_it_nexus_response; @@ -327,6 +856,15 @@ srp_attach_transport(struct srp_function_template *ft) count = 0; i->rport_attrs[count++] = &dev_attr_port_id; i->rport_attrs[count++] = &dev_attr_roles; + if (ft->has_rport_state) { + i->rport_attrs[count++] = &dev_attr_state; + i->rport_attrs[count++] = &dev_attr_fast_io_fail_tmo; + i->rport_attrs[count++] = &dev_attr_dev_loss_tmo; + } + if (ft->reconnect) { + i->rport_attrs[count++] = &dev_attr_reconnect_delay; + i->rport_attrs[count++] = &dev_attr_failed_reconnects; + } if (ft->rport_delete) i->rport_attrs[count++] = &dev_attr_delete; i->rport_attrs[count++] = NULL; diff --git a/drivers/spi/spi-dw-mid.c b/drivers/spi/spi-dw-mid.c index b9f0192758d..6d207afec8c 100644 --- a/drivers/spi/spi-dw-mid.c +++ b/drivers/spi/spi-dw-mid.c @@ -150,7 +150,7 @@ static int mid_spi_dma_transfer(struct dw_spi *dws, int cs_change) &dws->tx_sgl, 1, DMA_MEM_TO_DEV, - DMA_PREP_INTERRUPT | DMA_COMPL_SKIP_DEST_UNMAP); + DMA_PREP_INTERRUPT); txdesc->callback = dw_spi_dma_done; txdesc->callback_param = dws; @@ -173,7 +173,7 @@ static int mid_spi_dma_transfer(struct dw_spi *dws, int cs_change) &dws->rx_sgl, 1, DMA_DEV_TO_MEM, - DMA_PREP_INTERRUPT | DMA_COMPL_SKIP_DEST_UNMAP); + DMA_PREP_INTERRUPT); rxdesc->callback = dw_spi_dma_done; rxdesc->callback_param = dws; diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c index 8d85ddc4601..18cc625d887 100644 --- a/drivers/spi/spi.c +++ b/drivers/spi/spi.c @@ -357,6 +357,19 @@ struct spi_device *spi_alloc_device(struct spi_master *master) } EXPORT_SYMBOL_GPL(spi_alloc_device); +static void spi_dev_set_name(struct spi_device *spi) +{ + struct acpi_device *adev = ACPI_COMPANION(&spi->dev); + + if (adev) { + dev_set_name(&spi->dev, "spi-%s", acpi_dev_name(adev)); + return; + } + + dev_set_name(&spi->dev, "%s.%u", dev_name(&spi->master->dev), + spi->chip_select); +} + /** * spi_add_device - Add spi_device allocated with spi_alloc_device * @spi: spi_device to register @@ -383,9 +396,7 @@ int spi_add_device(struct spi_device *spi) } /* Set the bus ID string */ - dev_set_name(&spi->dev, "%s.%u", dev_name(&spi->master->dev), - spi->chip_select); - + spi_dev_set_name(spi); /* We need to make sure there's no other device with this * chipselect **BEFORE** we call setup(), else we'll trash @@ -1144,7 +1155,7 @@ static acpi_status acpi_spi_add_device(acpi_handle handle, u32 level, return AE_NO_MEMORY; } - ACPI_HANDLE_SET(&spi->dev, handle); + ACPI_COMPANION_SET(&spi->dev, adev); spi->irq = -1; INIT_LIST_HEAD(&resource_list); diff --git a/drivers/staging/media/lirc/TODO b/drivers/staging/media/lirc/TODO index b6cb593f55c..cbea5d84fed 100644 --- a/drivers/staging/media/lirc/TODO +++ b/drivers/staging/media/lirc/TODO @@ -2,6 +2,11 @@ (see drivers/media/IR/mceusb.c vs. lirc_mceusb.c in lirc cvs for an example of a previously completed port). +- lirc_bt829 uses registers on a Mach64 VT, which has a separate kernel + framebuffer driver (atyfb) and userland X driver (mach64). It can't + simply be converted to a normal PCI driver, but ideally it should be + coordinated with the other drivers. + Please send patches to: Jarod Wilson <jarod@wilsonet.com> Greg Kroah-Hartman <greg@kroah.com> diff --git a/drivers/staging/media/lirc/lirc_bt829.c b/drivers/staging/media/lirc/lirc_bt829.c index fbbdce4c119..30edc740ac2 100644 --- a/drivers/staging/media/lirc/lirc_bt829.c +++ b/drivers/staging/media/lirc/lirc_bt829.c @@ -63,7 +63,7 @@ static bool debug; } while (0) static int atir_minor; -static unsigned long pci_addr_phys; +static phys_addr_t pci_addr_phys; static unsigned char *pci_addr_lin; static struct lirc_driver atir_driver; @@ -78,11 +78,11 @@ static struct pci_dev *do_pci_probe(void) pci_addr_phys = 0; if (my_dev->resource[0].flags & IORESOURCE_MEM) { pci_addr_phys = my_dev->resource[0].start; - pr_info("memory at 0x%08X\n", - (unsigned int)pci_addr_phys); + pr_info("memory at %pa\n", &pci_addr_phys); } if (pci_addr_phys == 0) { pr_err("no memory resource ?\n"); + pci_dev_put(my_dev); return NULL; } } else { @@ -120,13 +120,20 @@ static void atir_set_use_dec(void *data) int init_module(void) { struct pci_dev *pdev; + int rc; pdev = do_pci_probe(); if (pdev == NULL) return -ENODEV; - if (!atir_init_start()) - return -ENODEV; + rc = pci_enable_device(pdev); + if (rc) + goto err_put_dev; + + if (!atir_init_start()) { + rc = -ENODEV; + goto err_disable; + } strcpy(atir_driver.name, "ATIR"); atir_driver.minor = -1; @@ -142,17 +149,31 @@ int init_module(void) atir_minor = lirc_register_driver(&atir_driver); if (atir_minor < 0) { pr_err("failed to register driver!\n"); - return atir_minor; + rc = atir_minor; + goto err_unmap; } dprintk("driver is registered on minor %d\n", atir_minor); return 0; + +err_unmap: + iounmap(pci_addr_lin); +err_disable: + pci_disable_device(pdev); +err_put_dev: + pci_dev_put(pdev); + return rc; } void cleanup_module(void) { + struct pci_dev *pdev = to_pci_dev(atir_driver.dev); + lirc_unregister_driver(atir_minor); + iounmap(pci_addr_lin); + pci_disable_device(pdev); + pci_dev_put(pdev); } diff --git a/drivers/staging/media/lirc/lirc_serial.c b/drivers/staging/media/lirc/lirc_serial.c index f6bc4c91ab3..2e3a98575d4 100644 --- a/drivers/staging/media/lirc/lirc_serial.c +++ b/drivers/staging/media/lirc/lirc_serial.c @@ -707,7 +707,8 @@ static irqreturn_t irq_handler(int i, void *blah) pr_warn("ignoring spike: %d %d %lx %lx %lx %lx\n", dcd, sense, tv.tv_sec, lasttv.tv_sec, - tv.tv_usec, lasttv.tv_usec); + (unsigned long)tv.tv_usec, + (unsigned long)lasttv.tv_usec); continue; } @@ -719,7 +720,8 @@ static irqreturn_t irq_handler(int i, void *blah) pr_warn("%d %d %lx %lx %lx %lx\n", dcd, sense, tv.tv_sec, lasttv.tv_sec, - tv.tv_usec, lasttv.tv_usec); + (unsigned long)tv.tv_usec, + (unsigned long)lasttv.tv_usec); data = PULSE_MASK; } else if (deltv > 15) { data = PULSE_MASK; /* really long time */ @@ -728,7 +730,8 @@ static irqreturn_t irq_handler(int i, void *blah) pr_warn("AIEEEE: %d %d %lx %lx %lx %lx\n", dcd, sense, tv.tv_sec, lasttv.tv_sec, - tv.tv_usec, lasttv.tv_usec); + (unsigned long)tv.tv_usec, + (unsigned long)lasttv.tv_usec); /* * detecting pulse while this * MUST be a space! diff --git a/drivers/staging/media/lirc/lirc_zilog.c b/drivers/staging/media/lirc/lirc_zilog.c index 11d5338b4f2..0feeaadf29d 100644 --- a/drivers/staging/media/lirc/lirc_zilog.c +++ b/drivers/staging/media/lirc/lirc_zilog.c @@ -61,6 +61,9 @@ #include <media/lirc_dev.h> #include <media/lirc.h> +/* Max transfer size done by I2C transfer functions */ +#define MAX_XFER_SIZE 64 + struct IR; struct IR_rx { @@ -941,7 +944,14 @@ static ssize_t read(struct file *filep, char *outbuf, size_t n, loff_t *ppos) schedule(); set_current_state(TASK_INTERRUPTIBLE); } else { - unsigned char buf[rbuf->chunk_size]; + unsigned char buf[MAX_XFER_SIZE]; + + if (rbuf->chunk_size > sizeof(buf)) { + zilog_error("chunk_size is too big (%d)!\n", + rbuf->chunk_size); + ret = -EINVAL; + break; + } m = lirc_buffer_read(rbuf, buf); if (m == rbuf->chunk_size) { ret = copy_to_user((void *)outbuf+written, buf, diff --git a/drivers/staging/media/msi3101/Kconfig b/drivers/staging/media/msi3101/Kconfig index 76d5bbd4d93..0c349c8595e 100644 --- a/drivers/staging/media/msi3101/Kconfig +++ b/drivers/staging/media/msi3101/Kconfig @@ -1,4 +1,5 @@ config USB_MSI3101 tristate "Mirics MSi3101 SDR Dongle" depends on USB && VIDEO_DEV && VIDEO_V4L2 - select VIDEOBUF2_VMALLOC + select VIDEOBUF2_CORE + select VIDEOBUF2_VMALLOC diff --git a/drivers/staging/media/solo6x10/solo6x10-disp.c b/drivers/staging/media/solo6x10/solo6x10-disp.c index 32d9953bc36..145295a5db7 100644 --- a/drivers/staging/media/solo6x10/solo6x10-disp.c +++ b/drivers/staging/media/solo6x10/solo6x10-disp.c @@ -176,18 +176,27 @@ static void solo_vout_config(struct solo_dev *solo_dev) static int solo_dma_vin_region(struct solo_dev *solo_dev, u32 off, u16 val, int reg_size) { - u16 buf[64]; - int i; - int ret = 0; + u16 *buf; + const int n = 64, size = n * sizeof(*buf); + int i, ret = 0; + + buf = kmalloc(size, GFP_KERNEL); + if (!buf) + return -ENOMEM; - for (i = 0; i < sizeof(buf) >> 1; i++) + for (i = 0; i < n; i++) buf[i] = cpu_to_le16(val); - for (i = 0; i < reg_size; i += sizeof(buf)) - ret |= solo_p2m_dma(solo_dev, 1, buf, - SOLO_MOTION_EXT_ADDR(solo_dev) + off + i, - sizeof(buf), 0, 0); + for (i = 0; i < reg_size; i += size) { + ret = solo_p2m_dma(solo_dev, 1, buf, + SOLO_MOTION_EXT_ADDR(solo_dev) + off + i, + size, 0, 0); + + if (ret) + break; + } + kfree(buf); return ret; } diff --git a/drivers/staging/media/solo6x10/solo6x10-v4l2-enc.c b/drivers/staging/media/solo6x10/solo6x10-v4l2-enc.c index a4c589604b0..d582c5b84c1 100644 --- a/drivers/staging/media/solo6x10/solo6x10-v4l2-enc.c +++ b/drivers/staging/media/solo6x10/solo6x10-v4l2-enc.c @@ -95,38 +95,11 @@ static unsigned char vop_6110_pal_cif[] = { 0x01, 0x68, 0xce, 0x32, 0x28, 0x00, 0x00, 0x00, }; -struct vop_header { - /* VE_STATUS0 */ - u32 mpeg_size:20, sad_motion_flag:1, video_motion_flag:1, vop_type:2, - channel:5, source_fl:1, interlace:1, progressive:1; - - /* VE_STATUS1 */ - u32 vsize:8, hsize:8, last_queue:4, nop0:8, scale:4; - - /* VE_STATUS2 */ - u32 mpeg_off; - - /* VE_STATUS3 */ - u32 jpeg_off; - - /* VE_STATUS4 */ - u32 jpeg_size:20, interval:10, nop1:2; - - /* VE_STATUS5/6 */ - u32 sec, usec; - - /* VE_STATUS7/8/9 */ - u32 nop2[3]; - - /* VE_STATUS10 */ - u32 mpeg_size_alt:20, nop3:12; - - u32 end_nops[5]; -} __packed; +typedef __le32 vop_header[16]; struct solo_enc_buf { enum solo_enc_types type; - struct vop_header *vh; + const vop_header *vh; int motion; }; @@ -346,7 +319,7 @@ static int enc_get_mpeg_dma(struct solo_dev *solo_dev, dma_addr_t dma, /* Build a descriptor queue out of an SG list and send it to the P2M for * processing. */ static int solo_send_desc(struct solo_enc_dev *solo_enc, int skip, - struct vb2_dma_sg_desc *vbuf, int off, int size, + struct sg_table *vbuf, int off, int size, unsigned int base, unsigned int base_size) { struct solo_dev *solo_dev = solo_enc->solo_dev; @@ -359,7 +332,7 @@ static int solo_send_desc(struct solo_enc_dev *solo_enc, int skip, solo_enc->desc_count = 1; - for_each_sg(vbuf->sglist, sg, vbuf->num_pages, i) { + for_each_sg(vbuf->sgl, sg, vbuf->nents, i) { struct solo_p2m_desc *desc; dma_addr_t dma; int len; @@ -430,84 +403,145 @@ static int solo_send_desc(struct solo_enc_dev *solo_enc, int skip, solo_enc->desc_count - 1); } +/* Extract values from VOP header - VE_STATUSxx */ +static inline int vop_interlaced(const vop_header *vh) +{ + return (__le32_to_cpu((*vh)[0]) >> 30) & 1; +} + +static inline u8 vop_channel(const vop_header *vh) +{ + return (__le32_to_cpu((*vh)[0]) >> 24) & 0x1F; +} + +static inline u8 vop_type(const vop_header *vh) +{ + return (__le32_to_cpu((*vh)[0]) >> 22) & 3; +} + +static inline u32 vop_mpeg_size(const vop_header *vh) +{ + return __le32_to_cpu((*vh)[0]) & 0xFFFFF; +} + +static inline u8 vop_hsize(const vop_header *vh) +{ + return (__le32_to_cpu((*vh)[1]) >> 8) & 0xFF; +} + +static inline u8 vop_vsize(const vop_header *vh) +{ + return __le32_to_cpu((*vh)[1]) & 0xFF; +} + +static inline u32 vop_mpeg_offset(const vop_header *vh) +{ + return __le32_to_cpu((*vh)[2]); +} + +static inline u32 vop_jpeg_offset(const vop_header *vh) +{ + return __le32_to_cpu((*vh)[3]); +} + +static inline u32 vop_jpeg_size(const vop_header *vh) +{ + return __le32_to_cpu((*vh)[4]) & 0xFFFFF; +} + +static inline u32 vop_sec(const vop_header *vh) +{ + return __le32_to_cpu((*vh)[5]); +} + +static inline u32 vop_usec(const vop_header *vh) +{ + return __le32_to_cpu((*vh)[6]); +} + static int solo_fill_jpeg(struct solo_enc_dev *solo_enc, - struct vb2_buffer *vb, struct vop_header *vh) + struct vb2_buffer *vb, const vop_header *vh) { struct solo_dev *solo_dev = solo_enc->solo_dev; - struct vb2_dma_sg_desc *vbuf = vb2_dma_sg_plane_desc(vb, 0); + struct sg_table *vbuf = vb2_dma_sg_plane_desc(vb, 0); int frame_size; int ret; vb->v4l2_buf.flags |= V4L2_BUF_FLAG_KEYFRAME; - if (vb2_plane_size(vb, 0) < vh->jpeg_size + solo_enc->jpeg_len) + if (vb2_plane_size(vb, 0) < vop_jpeg_size(vh) + solo_enc->jpeg_len) return -EIO; - sg_copy_from_buffer(vbuf->sglist, vbuf->num_pages, - solo_enc->jpeg_header, - solo_enc->jpeg_len); - - frame_size = (vh->jpeg_size + solo_enc->jpeg_len + (DMA_ALIGN - 1)) + frame_size = (vop_jpeg_size(vh) + solo_enc->jpeg_len + (DMA_ALIGN - 1)) & ~(DMA_ALIGN - 1); - vb2_set_plane_payload(vb, 0, vh->jpeg_size + solo_enc->jpeg_len); + vb2_set_plane_payload(vb, 0, vop_jpeg_size(vh) + solo_enc->jpeg_len); - dma_map_sg(&solo_dev->pdev->dev, vbuf->sglist, vbuf->num_pages, + /* may discard all previous data in vbuf->sgl */ + dma_map_sg(&solo_dev->pdev->dev, vbuf->sgl, vbuf->nents, DMA_FROM_DEVICE); - ret = solo_send_desc(solo_enc, solo_enc->jpeg_len, vbuf, vh->jpeg_off, - frame_size, SOLO_JPEG_EXT_ADDR(solo_dev), - SOLO_JPEG_EXT_SIZE(solo_dev)); - dma_unmap_sg(&solo_dev->pdev->dev, vbuf->sglist, vbuf->num_pages, + ret = solo_send_desc(solo_enc, solo_enc->jpeg_len, vbuf, + vop_jpeg_offset(vh) - SOLO_JPEG_EXT_ADDR(solo_dev), + frame_size, SOLO_JPEG_EXT_ADDR(solo_dev), + SOLO_JPEG_EXT_SIZE(solo_dev)); + dma_unmap_sg(&solo_dev->pdev->dev, vbuf->sgl, vbuf->nents, DMA_FROM_DEVICE); + + /* add the header only after dma_unmap_sg() */ + sg_copy_from_buffer(vbuf->sgl, vbuf->nents, + solo_enc->jpeg_header, solo_enc->jpeg_len); + return ret; } static int solo_fill_mpeg(struct solo_enc_dev *solo_enc, - struct vb2_buffer *vb, struct vop_header *vh) + struct vb2_buffer *vb, const vop_header *vh) { struct solo_dev *solo_dev = solo_enc->solo_dev; - struct vb2_dma_sg_desc *vbuf = vb2_dma_sg_plane_desc(vb, 0); + struct sg_table *vbuf = vb2_dma_sg_plane_desc(vb, 0); int frame_off, frame_size; int skip = 0; int ret; - if (vb2_plane_size(vb, 0) < vh->mpeg_size) + if (vb2_plane_size(vb, 0) < vop_mpeg_size(vh)) return -EIO; /* If this is a key frame, add extra header */ - if (!vh->vop_type) { - sg_copy_from_buffer(vbuf->sglist, vbuf->num_pages, - solo_enc->vop, - solo_enc->vop_len); - + vb->v4l2_buf.flags &= ~(V4L2_BUF_FLAG_KEYFRAME | V4L2_BUF_FLAG_PFRAME | V4L2_BUF_FLAG_BFRAME); + if (!vop_type(vh)) { skip = solo_enc->vop_len; - vb->v4l2_buf.flags |= V4L2_BUF_FLAG_KEYFRAME; - vb2_set_plane_payload(vb, 0, vh->mpeg_size + solo_enc->vop_len); + vb2_set_plane_payload(vb, 0, vop_mpeg_size(vh) + solo_enc->vop_len); } else { vb->v4l2_buf.flags |= V4L2_BUF_FLAG_PFRAME; - vb2_set_plane_payload(vb, 0, vh->mpeg_size); + vb2_set_plane_payload(vb, 0, vop_mpeg_size(vh)); } /* Now get the actual mpeg payload */ - frame_off = (vh->mpeg_off + sizeof(*vh)) + frame_off = (vop_mpeg_offset(vh) - SOLO_MP4E_EXT_ADDR(solo_dev) + sizeof(*vh)) % SOLO_MP4E_EXT_SIZE(solo_dev); - frame_size = (vh->mpeg_size + skip + (DMA_ALIGN - 1)) + frame_size = (vop_mpeg_size(vh) + skip + (DMA_ALIGN - 1)) & ~(DMA_ALIGN - 1); - dma_map_sg(&solo_dev->pdev->dev, vbuf->sglist, vbuf->num_pages, + /* may discard all previous data in vbuf->sgl */ + dma_map_sg(&solo_dev->pdev->dev, vbuf->sgl, vbuf->nents, DMA_FROM_DEVICE); ret = solo_send_desc(solo_enc, skip, vbuf, frame_off, frame_size, SOLO_MP4E_EXT_ADDR(solo_dev), SOLO_MP4E_EXT_SIZE(solo_dev)); - dma_unmap_sg(&solo_dev->pdev->dev, vbuf->sglist, vbuf->num_pages, + dma_unmap_sg(&solo_dev->pdev->dev, vbuf->sgl, vbuf->nents, DMA_FROM_DEVICE); + + /* add the header only after dma_unmap_sg() */ + if (!vop_type(vh)) + sg_copy_from_buffer(vbuf->sgl, vbuf->nents, + solo_enc->vop, solo_enc->vop_len); return ret; } static int solo_enc_fillbuf(struct solo_enc_dev *solo_enc, struct vb2_buffer *vb, struct solo_enc_buf *enc_buf) { - struct vop_header *vh = enc_buf->vh; + const vop_header *vh = enc_buf->vh; int ret; /* Check for motion flags */ @@ -531,8 +565,8 @@ static int solo_enc_fillbuf(struct solo_enc_dev *solo_enc, if (!ret) { vb->v4l2_buf.sequence = solo_enc->sequence++; - vb->v4l2_buf.timestamp.tv_sec = vh->sec; - vb->v4l2_buf.timestamp.tv_usec = vh->usec; + vb->v4l2_buf.timestamp.tv_sec = vop_sec(vh); + vb->v4l2_buf.timestamp.tv_usec = vop_usec(vh); } vb2_buffer_done(vb, ret ? VB2_BUF_STATE_ERROR : VB2_BUF_STATE_DONE); @@ -605,15 +639,13 @@ static void solo_handle_ring(struct solo_dev *solo_dev) /* FAIL... */ if (enc_get_mpeg_dma(solo_dev, solo_dev->vh_dma, off, - sizeof(struct vop_header))) + sizeof(vop_header))) continue; - enc_buf.vh = (struct vop_header *)solo_dev->vh_buf; - enc_buf.vh->mpeg_off -= SOLO_MP4E_EXT_ADDR(solo_dev); - enc_buf.vh->jpeg_off -= SOLO_JPEG_EXT_ADDR(solo_dev); + enc_buf.vh = solo_dev->vh_buf; /* Sanity check */ - if (enc_buf.vh->mpeg_off != off) + if (vop_mpeg_offset(enc_buf.vh) != SOLO_MP4E_EXT_ADDR(solo_dev) + off) continue; if (solo_motion_detected(solo_enc)) @@ -1329,7 +1361,7 @@ int solo_enc_v4l2_init(struct solo_dev *solo_dev, unsigned nr) init_waitqueue_head(&solo_dev->ring_thread_wait); - solo_dev->vh_size = sizeof(struct vop_header); + solo_dev->vh_size = sizeof(vop_header); solo_dev->vh_buf = pci_alloc_consistent(solo_dev->pdev, solo_dev->vh_size, &solo_dev->vh_dma); diff --git a/drivers/staging/media/solo6x10/solo6x10.h b/drivers/staging/media/solo6x10/solo6x10.h index 6f91d2e34b2..f1bbb8cb74e 100644 --- a/drivers/staging/media/solo6x10/solo6x10.h +++ b/drivers/staging/media/solo6x10/solo6x10.h @@ -94,7 +94,6 @@ #define SOLO_ENC_MODE_HD1 1 #define SOLO_ENC_MODE_D1 9 -#define SOLO_DEFAULT_GOP 30 #define SOLO_DEFAULT_QP 3 #ifndef V4L2_BUF_FLAG_MOTION_ON diff --git a/drivers/thermal/thermal_core.c b/drivers/thermal/thermal_core.c index 03a567199bb..f1d511a9475 100644 --- a/drivers/thermal/thermal_core.c +++ b/drivers/thermal/thermal_core.c @@ -1608,15 +1608,17 @@ exit: EXPORT_SYMBOL_GPL(thermal_zone_get_zone_by_name); #ifdef CONFIG_NET +static const struct genl_multicast_group thermal_event_mcgrps[] = { + { .name = THERMAL_GENL_MCAST_GROUP_NAME, }, +}; + static struct genl_family thermal_event_genl_family = { .id = GENL_ID_GENERATE, .name = THERMAL_GENL_FAMILY_NAME, .version = THERMAL_GENL_VERSION, .maxattr = THERMAL_GENL_ATTR_MAX, -}; - -static struct genl_multicast_group thermal_event_mcgrp = { - .name = THERMAL_GENL_MCAST_GROUP_NAME, + .mcgrps = thermal_event_mcgrps, + .n_mcgrps = ARRAY_SIZE(thermal_event_mcgrps), }; int thermal_generate_netlink_event(struct thermal_zone_device *tz, @@ -1677,7 +1679,8 @@ int thermal_generate_netlink_event(struct thermal_zone_device *tz, return result; } - result = genlmsg_multicast(skb, 0, thermal_event_mcgrp.id, GFP_ATOMIC); + result = genlmsg_multicast(&thermal_event_genl_family, skb, 0, + 0, GFP_ATOMIC); if (result) dev_err(&tz->device, "Failed to send netlink event:%d", result); @@ -1687,17 +1690,7 @@ EXPORT_SYMBOL_GPL(thermal_generate_netlink_event); static int genetlink_init(void) { - int result; - - result = genl_register_family(&thermal_event_genl_family); - if (result) - return result; - - result = genl_register_mc_group(&thermal_event_genl_family, - &thermal_event_mcgrp); - if (result) - genl_unregister_family(&thermal_event_genl_family); - return result; + return genl_register_family(&thermal_event_genl_family); } static void genetlink_exit(void) diff --git a/drivers/tty/serial/sh-sci.c b/drivers/tty/serial/sh-sci.c index 537750261aa..7d8103cd3e2 100644 --- a/drivers/tty/serial/sh-sci.c +++ b/drivers/tty/serial/sh-sci.c @@ -1433,7 +1433,7 @@ static void work_fn_rx(struct work_struct *work) desc = s->desc_rx[new]; if (dma_async_is_tx_complete(s->chan_rx, s->active_rx, NULL, NULL) != - DMA_SUCCESS) { + DMA_COMPLETE) { /* Handle incomplete DMA receive */ struct dma_chan *chan = s->chan_rx; struct shdma_desc *sh_desc = container_of(desc, diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c index 06cec635e70..a7c04e24ca4 100644 --- a/drivers/usb/core/hub.c +++ b/drivers/usb/core/hub.c @@ -5501,6 +5501,6 @@ acpi_handle usb_get_hub_port_acpi_handle(struct usb_device *hdev, if (!hub) return NULL; - return DEVICE_ACPI_HANDLE(&hub->ports[port1 - 1]->dev); + return ACPI_HANDLE(&hub->ports[port1 - 1]->dev); } #endif diff --git a/drivers/usb/core/usb-acpi.c b/drivers/usb/core/usb-acpi.c index 255c14464bf..4e243c37f17 100644 --- a/drivers/usb/core/usb-acpi.c +++ b/drivers/usb/core/usb-acpi.c @@ -173,7 +173,7 @@ static int usb_acpi_find_device(struct device *dev, acpi_handle *handle) } /* root hub's parent is the usb hcd. */ - parent_handle = DEVICE_ACPI_HANDLE(dev->parent); + parent_handle = ACPI_HANDLE(dev->parent); *handle = acpi_get_child(parent_handle, udev->portnum); if (!*handle) return -ENODEV; @@ -194,7 +194,7 @@ static int usb_acpi_find_device(struct device *dev, acpi_handle *handle) raw_port_num = usb_hcd_find_raw_port_number(hcd, port_num); - *handle = acpi_get_child(DEVICE_ACPI_HANDLE(&udev->dev), + *handle = acpi_get_child(ACPI_HANDLE(&udev->dev), raw_port_num); if (!*handle) return -ENODEV; diff --git a/drivers/watchdog/Kconfig b/drivers/watchdog/Kconfig index 6df632e0bb5..5be6e919f78 100644 --- a/drivers/watchdog/Kconfig +++ b/drivers/watchdog/Kconfig @@ -392,6 +392,25 @@ config RETU_WATCHDOG To compile this driver as a module, choose M here: the module will be called retu_wdt. +config MOXART_WDT + tristate "MOXART watchdog" + depends on ARCH_MOXART + help + Say Y here to include Watchdog timer support for the watchdog + existing on the MOXA ART SoC series platforms. + + To compile this driver as a module, choose M here: the + module will be called moxart_wdt. + +config SIRFSOC_WATCHDOG + tristate "SiRFSOC watchdog" + depends on ARCH_SIRF + select WATCHDOG_CORE + default y + help + Support for CSR SiRFprimaII and SiRFatlasVI watchdog. When + the watchdog triggers the system will be reset. + # AVR32 Architecture config AT32AP700X_WDT @@ -866,6 +885,7 @@ config VIA_WDT config W83627HF_WDT tristate "W83627HF/W83627DHG Watchdog Timer" depends on X86 + select WATCHDOG_CORE ---help--- This is the driver for the hardware watchdog on the W83627HF chipset as used in Advantech PC-9578 and Tyan S2721-533 motherboards @@ -1125,6 +1145,13 @@ config LANTIQ_WDT help Hardware driver for the Lantiq SoC Watchdog Timer. +config RALINK_WDT + tristate "Ralink SoC watchdog" + select WATCHDOG_CORE + depends on RALINK + help + Hardware driver for the Ralink SoC Watchdog Timer. + # PARISC Architecture # POWERPC Architecture diff --git a/drivers/watchdog/Makefile b/drivers/watchdog/Makefile index 8c7b8bcbbdc..91bd95a64ba 100644 --- a/drivers/watchdog/Makefile +++ b/drivers/watchdog/Makefile @@ -55,6 +55,8 @@ obj-$(CONFIG_IMX2_WDT) += imx2_wdt.o obj-$(CONFIG_UX500_WATCHDOG) += ux500_wdt.o obj-$(CONFIG_RETU_WATCHDOG) += retu_wdt.o obj-$(CONFIG_BCM2835_WDT) += bcm2835_wdt.o +obj-$(CONFIG_MOXART_WDT) += moxart_wdt.o +obj-$(CONFIG_SIRFSOC_WATCHDOG) += sirfsoc_wdt.o # AVR32 Architecture obj-$(CONFIG_AT32AP700X_WDT) += at32ap700x_wdt.o @@ -134,6 +136,7 @@ obj-$(CONFIG_TXX9_WDT) += txx9wdt.o obj-$(CONFIG_OCTEON_WDT) += octeon-wdt.o octeon-wdt-y := octeon-wdt-main.o octeon-wdt-nmi.o obj-$(CONFIG_LANTIQ_WDT) += lantiq_wdt.o +obj-$(CONFIG_RALINK_WDT) += rt2880_wdt.o # PARISC Architecture diff --git a/drivers/watchdog/acquirewdt.c b/drivers/watchdog/acquirewdt.c index 24a517777fa..5cf1621def9 100644 --- a/drivers/watchdog/acquirewdt.c +++ b/drivers/watchdog/acquirewdt.c @@ -60,8 +60,7 @@ #include <linux/types.h> /* For standard types (like size_t) */ #include <linux/errno.h> /* For the -ENODEV/... values */ #include <linux/kernel.h> /* For printk/panic/... */ -#include <linux/miscdevice.h> /* For MODULE_ALIAS_MISCDEV - (WATCHDOG_MINOR) */ +#include <linux/miscdevice.h> /* For struct miscdevice */ #include <linux/watchdog.h> /* For the watchdog specific items */ #include <linux/fs.h> /* For file operations */ #include <linux/ioport.h> /* For io-port access */ @@ -337,4 +336,3 @@ module_exit(acq_exit); MODULE_AUTHOR("David Woodhouse"); MODULE_DESCRIPTION("Acquire Inc. Single Board Computer Watchdog Timer driver"); MODULE_LICENSE("GPL"); -MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR); diff --git a/drivers/watchdog/advantechwdt.c b/drivers/watchdog/advantechwdt.c index cc6702fc526..a8961addc59 100644 --- a/drivers/watchdog/advantechwdt.c +++ b/drivers/watchdog/advantechwdt.c @@ -345,4 +345,3 @@ module_exit(advwdt_exit); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Marek Michalkiewicz <marekm@linux.org.pl>"); MODULE_DESCRIPTION("Advantech Single Board Computer WDT driver"); -MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR); diff --git a/drivers/watchdog/alim1535_wdt.c b/drivers/watchdog/alim1535_wdt.c index 41b84936a52..fbb7b94cabf 100644 --- a/drivers/watchdog/alim1535_wdt.c +++ b/drivers/watchdog/alim1535_wdt.c @@ -452,4 +452,3 @@ module_exit(watchdog_exit); MODULE_AUTHOR("Alan Cox"); MODULE_DESCRIPTION("ALi M1535 PMU Watchdog Timer driver"); MODULE_LICENSE("GPL"); -MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR); diff --git a/drivers/watchdog/alim7101_wdt.c b/drivers/watchdog/alim7101_wdt.c index 5eee55012e3..12f0b762b52 100644 --- a/drivers/watchdog/alim7101_wdt.c +++ b/drivers/watchdog/alim7101_wdt.c @@ -425,4 +425,3 @@ MODULE_DEVICE_TABLE(pci, alim7101_pci_tbl); MODULE_AUTHOR("Steve Hill"); MODULE_DESCRIPTION("ALi M7101 PMU Computer Watchdog Timer driver"); MODULE_LICENSE("GPL"); -MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR); diff --git a/drivers/watchdog/ar7_wdt.c b/drivers/watchdog/ar7_wdt.c index b3709f9cf5b..3a996576343 100644 --- a/drivers/watchdog/ar7_wdt.c +++ b/drivers/watchdog/ar7_wdt.c @@ -46,7 +46,6 @@ MODULE_AUTHOR("Nicolas Thill <nico@openwrt.org>"); MODULE_DESCRIPTION(LONGNAME); MODULE_LICENSE("GPL"); -MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR); static int margin = 60; module_param(margin, int, 0); diff --git a/drivers/watchdog/at32ap700x_wdt.c b/drivers/watchdog/at32ap700x_wdt.c index b178e717ef0..afe7d17e677 100644 --- a/drivers/watchdog/at32ap700x_wdt.c +++ b/drivers/watchdog/at32ap700x_wdt.c @@ -434,4 +434,3 @@ module_platform_driver_probe(at32_wdt_driver, at32_wdt_probe); MODULE_AUTHOR("Hans-Christian Egtvedt <egtvedt@samfundet.no>"); MODULE_DESCRIPTION("Watchdog driver for Atmel AT32AP700X"); MODULE_LICENSE("GPL"); -MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR); diff --git a/drivers/watchdog/at91rm9200_wdt.c b/drivers/watchdog/at91rm9200_wdt.c index 1c75260b987..dee6cc21d27 100644 --- a/drivers/watchdog/at91rm9200_wdt.c +++ b/drivers/watchdog/at91rm9200_wdt.c @@ -269,7 +269,7 @@ static struct platform_driver at91wdt_driver = { .driver = { .name = "at91_wdt", .owner = THIS_MODULE, - .of_match_table = of_match_ptr(at91_wdt_dt_ids), + .of_match_table = at91_wdt_dt_ids, }, }; @@ -297,5 +297,4 @@ module_exit(at91_wdt_exit); MODULE_AUTHOR("Andrew Victor"); MODULE_DESCRIPTION("Watchdog driver for Atmel AT91RM9200"); MODULE_LICENSE("GPL"); -MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR); MODULE_ALIAS("platform:at91_wdt"); diff --git a/drivers/watchdog/ath79_wdt.c b/drivers/watchdog/ath79_wdt.c index 37cb09b27b6..9fa1f69dac1 100644 --- a/drivers/watchdog/ath79_wdt.c +++ b/drivers/watchdog/ath79_wdt.c @@ -329,4 +329,3 @@ MODULE_AUTHOR("Gabor Juhos <juhosg@openwrt.org"); MODULE_AUTHOR("Imre Kaloz <kaloz@openwrt.org"); MODULE_LICENSE("GPL v2"); MODULE_ALIAS("platform:" DRIVER_NAME); -MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR); diff --git a/drivers/watchdog/bcm2835_wdt.c b/drivers/watchdog/bcm2835_wdt.c index 61566fc47f8..a6a2cebb258 100644 --- a/drivers/watchdog/bcm2835_wdt.c +++ b/drivers/watchdog/bcm2835_wdt.c @@ -186,4 +186,3 @@ MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default=" MODULE_AUTHOR("Lubomir Rintel <lkundrak@v3.sk>"); MODULE_DESCRIPTION("Driver for Broadcom BCM2835 watchdog timer"); MODULE_LICENSE("GPL"); -MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR); diff --git a/drivers/watchdog/bcm63xx_wdt.c b/drivers/watchdog/bcm63xx_wdt.c index a14a58d9d11..4eb188b87f8 100644 --- a/drivers/watchdog/bcm63xx_wdt.c +++ b/drivers/watchdog/bcm63xx_wdt.c @@ -317,5 +317,4 @@ MODULE_AUTHOR("Miguel Gaio <miguel.gaio@efixo.com>"); MODULE_AUTHOR("Florian Fainelli <florian@openwrt.org>"); MODULE_DESCRIPTION("Driver for the Broadcom BCM63xx SoC watchdog"); MODULE_LICENSE("GPL"); -MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR); MODULE_ALIAS("platform:bcm63xx-wdt"); diff --git a/drivers/watchdog/bfin_wdt.c b/drivers/watchdog/bfin_wdt.c index 5d36d6fb496..a3b6a5b30f9 100644 --- a/drivers/watchdog/bfin_wdt.c +++ b/drivers/watchdog/bfin_wdt.c @@ -465,7 +465,6 @@ module_exit(bfin_wdt_exit); MODULE_AUTHOR("Michele d'Amico, Mike Frysinger <vapier@gentoo.org>"); MODULE_DESCRIPTION("Blackfin Watchdog Device Driver"); MODULE_LICENSE("GPL"); -MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR); module_param(timeout, uint, 0); MODULE_PARM_DESC(timeout, diff --git a/drivers/watchdog/cpu5wdt.c b/drivers/watchdog/cpu5wdt.c index f270bb7bc45..f7ae49edb51 100644 --- a/drivers/watchdog/cpu5wdt.c +++ b/drivers/watchdog/cpu5wdt.c @@ -289,7 +289,6 @@ MODULE_AUTHOR("Heiko Ronsdorf <hero@ihg.uni-duisburg.de>"); MODULE_DESCRIPTION("sma cpu5 watchdog driver"); MODULE_SUPPORTED_DEVICE("sma cpu5 watchdog"); MODULE_LICENSE("GPL"); -MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR); module_param(port, int, 0); MODULE_PARM_DESC(port, "base address of watchdog card, default is 0x91"); diff --git a/drivers/watchdog/davinci_wdt.c b/drivers/watchdog/davinci_wdt.c index bead7740c86..dd625cca1ae 100644 --- a/drivers/watchdog/davinci_wdt.c +++ b/drivers/watchdog/davinci_wdt.c @@ -267,5 +267,4 @@ MODULE_PARM_DESC(heartbeat, __MODULE_STRING(DEFAULT_HEARTBEAT)); MODULE_LICENSE("GPL"); -MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR); MODULE_ALIAS("platform:watchdog"); diff --git a/drivers/watchdog/dw_wdt.c b/drivers/watchdog/dw_wdt.c index e621098bf66..a46f5c7ee7f 100644 --- a/drivers/watchdog/dw_wdt.c +++ b/drivers/watchdog/dw_wdt.c @@ -29,6 +29,7 @@ #include <linux/miscdevice.h> #include <linux/module.h> #include <linux/moduleparam.h> +#include <linux/of.h> #include <linux/pm.h> #include <linux/platform_device.h> #include <linux/spinlock.h> @@ -203,12 +204,12 @@ static long dw_wdt_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) switch (cmd) { case WDIOC_GETSUPPORT: - return copy_to_user((struct watchdog_info *)arg, &dw_wdt_ident, + return copy_to_user((void __user *)arg, &dw_wdt_ident, sizeof(dw_wdt_ident)) ? -EFAULT : 0; case WDIOC_GETSTATUS: case WDIOC_GETBOOTSTATUS: - return put_user(0, (int *)arg); + return put_user(0, (int __user *)arg); case WDIOC_KEEPALIVE: dw_wdt_set_next_heartbeat(); @@ -252,17 +253,17 @@ static int dw_wdt_release(struct inode *inode, struct file *filp) return 0; } -#ifdef CONFIG_PM +#ifdef CONFIG_PM_SLEEP static int dw_wdt_suspend(struct device *dev) { - clk_disable(dw_wdt.clk); + clk_disable_unprepare(dw_wdt.clk); return 0; } static int dw_wdt_resume(struct device *dev) { - int err = clk_enable(dw_wdt.clk); + int err = clk_prepare_enable(dw_wdt.clk); if (err) return err; @@ -271,12 +272,9 @@ static int dw_wdt_resume(struct device *dev) return 0; } +#endif /* CONFIG_PM_SLEEP */ -static const struct dev_pm_ops dw_wdt_pm_ops = { - .suspend = dw_wdt_suspend, - .resume = dw_wdt_resume, -}; -#endif /* CONFIG_PM */ +static SIMPLE_DEV_PM_OPS(dw_wdt_pm_ops, dw_wdt_suspend, dw_wdt_resume); static const struct file_operations wdt_fops = { .owner = THIS_MODULE, @@ -309,7 +307,7 @@ static int dw_wdt_drv_probe(struct platform_device *pdev) if (IS_ERR(dw_wdt.clk)) return PTR_ERR(dw_wdt.clk); - ret = clk_enable(dw_wdt.clk); + ret = clk_prepare_enable(dw_wdt.clk); if (ret) return ret; @@ -326,7 +324,7 @@ static int dw_wdt_drv_probe(struct platform_device *pdev) return 0; out_disable_clk: - clk_disable(dw_wdt.clk); + clk_disable_unprepare(dw_wdt.clk); return ret; } @@ -335,20 +333,27 @@ static int dw_wdt_drv_remove(struct platform_device *pdev) { misc_deregister(&dw_wdt_miscdev); - clk_disable(dw_wdt.clk); + clk_disable_unprepare(dw_wdt.clk); return 0; } +#ifdef CONFIG_OF +static const struct of_device_id dw_wdt_of_match[] = { + { .compatible = "snps,dw-wdt", }, + { /* sentinel */ } +}; +MODULE_DEVICE_TABLE(of, dw_wdt_of_match); +#endif + static struct platform_driver dw_wdt_driver = { .probe = dw_wdt_drv_probe, .remove = dw_wdt_drv_remove, .driver = { .name = "dw_wdt", .owner = THIS_MODULE, -#ifdef CONFIG_PM + .of_match_table = of_match_ptr(dw_wdt_of_match), .pm = &dw_wdt_pm_ops, -#endif /* CONFIG_PM */ }, }; @@ -357,4 +362,3 @@ module_platform_driver(dw_wdt_driver); MODULE_AUTHOR("Jamie Iles"); MODULE_DESCRIPTION("Synopsys DesignWare Watchdog Driver"); MODULE_LICENSE("GPL"); -MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR); diff --git a/drivers/watchdog/ep93xx_wdt.c b/drivers/watchdog/ep93xx_wdt.c index e0574844c31..833e8131184 100644 --- a/drivers/watchdog/ep93xx_wdt.c +++ b/drivers/watchdog/ep93xx_wdt.c @@ -179,4 +179,3 @@ MODULE_AUTHOR("Ray Lehtiniemi <rayl@mail.com>," MODULE_DESCRIPTION("EP93xx Watchdog"); MODULE_LICENSE("GPL"); MODULE_VERSION(WDT_VERSION); -MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR); diff --git a/drivers/watchdog/eurotechwdt.c b/drivers/watchdog/eurotechwdt.c index cd31b8a2a72..23ee53240c4 100644 --- a/drivers/watchdog/eurotechwdt.c +++ b/drivers/watchdog/eurotechwdt.c @@ -477,4 +477,3 @@ module_exit(eurwdt_exit); MODULE_AUTHOR("Rodolfo Giometti"); MODULE_DESCRIPTION("Driver for Eurotech CPU-1220/1410 on board watchdog"); MODULE_LICENSE("GPL"); -MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR); diff --git a/drivers/watchdog/gef_wdt.c b/drivers/watchdog/gef_wdt.c index 3755833430d..25beb30878d 100644 --- a/drivers/watchdog/gef_wdt.c +++ b/drivers/watchdog/gef_wdt.c @@ -331,5 +331,4 @@ module_exit(gef_wdt_exit); MODULE_AUTHOR("Martyn Welch <martyn.welch@ge.com>"); MODULE_DESCRIPTION("GE watchdog driver"); MODULE_LICENSE("GPL"); -MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR); MODULE_ALIAS("platform:gef_wdt"); diff --git a/drivers/watchdog/geodewdt.c b/drivers/watchdog/geodewdt.c index fcd599d4e22..4a6ae84b42b 100644 --- a/drivers/watchdog/geodewdt.c +++ b/drivers/watchdog/geodewdt.c @@ -297,4 +297,3 @@ module_exit(geodewdt_exit); MODULE_AUTHOR("Advanced Micro Devices, Inc"); MODULE_DESCRIPTION("Geode GX/LX Watchdog Driver"); MODULE_LICENSE("GPL"); -MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR); diff --git a/drivers/watchdog/hpwdt.c b/drivers/watchdog/hpwdt.c index 19f3c3fc65f..45b979d9dd1 100644 --- a/drivers/watchdog/hpwdt.c +++ b/drivers/watchdog/hpwdt.c @@ -881,7 +881,6 @@ MODULE_AUTHOR("Tom Mingarelli"); MODULE_DESCRIPTION("hp watchdog driver"); MODULE_LICENSE("GPL"); MODULE_VERSION(HPWDT_VERSION); -MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR); module_param(soft_margin, int, 0); MODULE_PARM_DESC(soft_margin, "Watchdog timeout in seconds"); diff --git a/drivers/watchdog/i6300esb.c b/drivers/watchdog/i6300esb.c index 2b2ea13d03e..a72fe9361dd 100644 --- a/drivers/watchdog/i6300esb.c +++ b/drivers/watchdog/i6300esb.c @@ -497,4 +497,3 @@ module_pci_driver(esb_driver); MODULE_AUTHOR("Ross Biro and David Härdeman"); MODULE_DESCRIPTION("Watchdog driver for Intel 6300ESB chipsets"); MODULE_LICENSE("GPL"); -MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR); diff --git a/drivers/watchdog/iTCO_wdt.c b/drivers/watchdog/iTCO_wdt.c index 6130321da38..04f8af65acf 100644 --- a/drivers/watchdog/iTCO_wdt.c +++ b/drivers/watchdog/iTCO_wdt.c @@ -56,8 +56,6 @@ #include <linux/types.h> /* For standard types (like size_t) */ #include <linux/errno.h> /* For the -ENODEV/... values */ #include <linux/kernel.h> /* For printk/panic/... */ -#include <linux/miscdevice.h> /* For MODULE_ALIAS_MISCDEV - (WATCHDOG_MINOR) */ #include <linux/watchdog.h> /* For the watchdog specific items */ #include <linux/init.h> /* For __init/__exit/... */ #include <linux/fs.h> /* For file operations */ @@ -394,7 +392,7 @@ static int iTCO_wdt_probe(struct platform_device *dev) { int ret = -ENODEV; unsigned long val32; - struct lpc_ich_info *ich_info = dev->dev.platform_data; + struct lpc_ich_info *ich_info = dev_get_platdata(&dev->dev); if (!ich_info) goto out; @@ -582,5 +580,4 @@ MODULE_AUTHOR("Wim Van Sebroeck <wim@iguana.be>"); MODULE_DESCRIPTION("Intel TCO WatchDog Timer Driver"); MODULE_VERSION(DRV_VERSION); MODULE_LICENSE("GPL"); -MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR); MODULE_ALIAS("platform:" DRV_NAME); diff --git a/drivers/watchdog/ib700wdt.c b/drivers/watchdog/ib700wdt.c index eb6b5cc98ec..7ae36690c44 100644 --- a/drivers/watchdog/ib700wdt.c +++ b/drivers/watchdog/ib700wdt.c @@ -382,6 +382,5 @@ module_exit(ibwdt_exit); MODULE_AUTHOR("Charles Howes <chowes@vsol.net>"); MODULE_DESCRIPTION("IB700 SBC watchdog driver"); MODULE_LICENSE("GPL"); -MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR); /* end of ib700wdt.c */ diff --git a/drivers/watchdog/ibmasr.c b/drivers/watchdog/ibmasr.c index bc3fb8fe89a..db0a34460e5 100644 --- a/drivers/watchdog/ibmasr.c +++ b/drivers/watchdog/ibmasr.c @@ -419,4 +419,3 @@ MODULE_PARM_DESC(nowayout, MODULE_DESCRIPTION("IBM Automatic Server Restart driver"); MODULE_AUTHOR("Andrey Panin"); MODULE_LICENSE("GPL"); -MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR); diff --git a/drivers/watchdog/ie6xx_wdt.c b/drivers/watchdog/ie6xx_wdt.c index e24ef6a6e06..70a240297c6 100644 --- a/drivers/watchdog/ie6xx_wdt.c +++ b/drivers/watchdog/ie6xx_wdt.c @@ -344,5 +344,4 @@ module_exit(ie6xx_wdt_exit); MODULE_AUTHOR("Alexander Stein <alexander.stein@systec-electronic.com>"); MODULE_DESCRIPTION("Intel Atom E6xx Watchdog Device Driver"); MODULE_LICENSE("GPL"); -MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR); MODULE_ALIAS("platform:" DRIVER_NAME); diff --git a/drivers/watchdog/imx2_wdt.c b/drivers/watchdog/imx2_wdt.c index 693ac3f4de5..b4786bccc42 100644 --- a/drivers/watchdog/imx2_wdt.c +++ b/drivers/watchdog/imx2_wdt.c @@ -322,6 +322,7 @@ static const struct of_device_id imx2_wdt_dt_ids[] = { { .compatible = "fsl,imx21-wdt", }, { /* sentinel */ } }; +MODULE_DEVICE_TABLE(of, imx2_wdt_dt_ids); static struct platform_driver imx2_wdt_driver = { .remove = __exit_p(imx2_wdt_remove), @@ -338,5 +339,4 @@ module_platform_driver_probe(imx2_wdt_driver, imx2_wdt_probe); MODULE_AUTHOR("Wolfram Sang"); MODULE_DESCRIPTION("Watchdog driver for IMX2 and later"); MODULE_LICENSE("GPL v2"); -MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR); MODULE_ALIAS("platform:" DRIVER_NAME); diff --git a/drivers/watchdog/indydog.c b/drivers/watchdog/indydog.c index 6d90f7a2ce2..1b5c25a47b8 100644 --- a/drivers/watchdog/indydog.c +++ b/drivers/watchdog/indydog.c @@ -214,4 +214,3 @@ module_exit(watchdog_exit); MODULE_AUTHOR("Guido Guenther <agx@sigxcpu.org>"); MODULE_DESCRIPTION("Hardware Watchdog Device for SGI IP22"); MODULE_LICENSE("GPL"); -MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR); diff --git a/drivers/watchdog/intel_scu_watchdog.c b/drivers/watchdog/intel_scu_watchdog.c index 8ced2561395..e13e65e996a 100644 --- a/drivers/watchdog/intel_scu_watchdog.c +++ b/drivers/watchdog/intel_scu_watchdog.c @@ -564,5 +564,4 @@ module_exit(intel_scu_watchdog_exit); MODULE_AUTHOR("Intel Corporation"); MODULE_DESCRIPTION("Intel SCU Watchdog Device Driver"); MODULE_LICENSE("GPL"); -MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR); MODULE_VERSION(WDT_VER); diff --git a/drivers/watchdog/iop_wdt.c b/drivers/watchdog/iop_wdt.c index d964faf1a25..b16013ffacc 100644 --- a/drivers/watchdog/iop_wdt.c +++ b/drivers/watchdog/iop_wdt.c @@ -259,4 +259,3 @@ MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started"); MODULE_AUTHOR("Curt E Bruns <curt.e.bruns@intel.com>"); MODULE_DESCRIPTION("iop watchdog timer driver"); MODULE_LICENSE("GPL"); -MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR); diff --git a/drivers/watchdog/it8712f_wdt.c b/drivers/watchdog/it8712f_wdt.c index f4cce6d66a5..41b3979a9d8 100644 --- a/drivers/watchdog/it8712f_wdt.c +++ b/drivers/watchdog/it8712f_wdt.c @@ -41,7 +41,6 @@ MODULE_AUTHOR("Jorge Boncompte - DTI2 <jorge@dti2.net>"); MODULE_DESCRIPTION("IT8712F Watchdog Driver"); MODULE_LICENSE("GPL"); -MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR); static int max_units = 255; static int margin = 60; /* in seconds */ diff --git a/drivers/watchdog/it87_wdt.c b/drivers/watchdog/it87_wdt.c index d3dcc6988b5..e2bba68ae71 100644 --- a/drivers/watchdog/it87_wdt.c +++ b/drivers/watchdog/it87_wdt.c @@ -772,4 +772,3 @@ module_exit(it87_wdt_exit); MODULE_AUTHOR("Oliver Schuster"); MODULE_DESCRIPTION("Hardware Watchdog Device Driver for IT87xx EC-LPC I/O"); MODULE_LICENSE("GPL"); -MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR); diff --git a/drivers/watchdog/ixp4xx_wdt.c b/drivers/watchdog/ixp4xx_wdt.c index 5580b4fff7f..f20cc53ff71 100644 --- a/drivers/watchdog/ixp4xx_wdt.c +++ b/drivers/watchdog/ixp4xx_wdt.c @@ -208,5 +208,3 @@ module_param(nowayout, bool, 0); MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started"); MODULE_LICENSE("GPL"); -MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR); - diff --git a/drivers/watchdog/jz4740_wdt.c b/drivers/watchdog/jz4740_wdt.c index d1afdf684c1..2de486a7eea 100644 --- a/drivers/watchdog/jz4740_wdt.c +++ b/drivers/watchdog/jz4740_wdt.c @@ -222,5 +222,4 @@ module_platform_driver(jz4740_wdt_driver); MODULE_AUTHOR("Paul Cercueil <paul@crapouillou.net>"); MODULE_DESCRIPTION("jz4740 Watchdog Driver"); MODULE_LICENSE("GPL"); -MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR); MODULE_ALIAS("platform:jz4740-wdt"); diff --git a/drivers/watchdog/kempld_wdt.c b/drivers/watchdog/kempld_wdt.c index 5c3d4df63e6..a1a3638c579 100644 --- a/drivers/watchdog/kempld_wdt.c +++ b/drivers/watchdog/kempld_wdt.c @@ -67,7 +67,7 @@ enum { PRESCALER_12, }; -const u32 kempld_prescaler[] = { +static const u32 kempld_prescaler[] = { [PRESCALER_21] = (1 << 21) - 1, [PRESCALER_17] = (1 << 17) - 1, [PRESCALER_12] = (1 << 12) - 1, @@ -361,7 +361,7 @@ static long kempld_wdt_ioctl(struct watchdog_device *wdd, unsigned int cmd, ret = kempld_wdt_keepalive(wdd); break; case WDIOC_GETPRETIMEOUT: - ret = put_user(wdt_data->pretimeout, (int *)arg); + ret = put_user(wdt_data->pretimeout, (int __user *)arg); break; } @@ -578,4 +578,3 @@ module_platform_driver(kempld_wdt_driver); MODULE_DESCRIPTION("KEM PLD Watchdog Driver"); MODULE_AUTHOR("Michael Brunner <michael.brunner@kontron.com>"); MODULE_LICENSE("GPL"); -MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR); diff --git a/drivers/watchdog/ks8695_wdt.c b/drivers/watchdog/ks8695_wdt.c index dce9ecffd44..40ca5594a33 100644 --- a/drivers/watchdog/ks8695_wdt.c +++ b/drivers/watchdog/ks8695_wdt.c @@ -323,5 +323,4 @@ module_exit(ks8695_wdt_exit); MODULE_AUTHOR("Andrew Victor"); MODULE_DESCRIPTION("Watchdog driver for KS8695"); MODULE_LICENSE("GPL"); -MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR); MODULE_ALIAS("platform:ks8695_wdt"); diff --git a/drivers/watchdog/lantiq_wdt.c b/drivers/watchdog/lantiq_wdt.c index 088fd0c9d88..3b3148c764a 100644 --- a/drivers/watchdog/lantiq_wdt.c +++ b/drivers/watchdog/lantiq_wdt.c @@ -249,4 +249,3 @@ MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started"); MODULE_AUTHOR("John Crispin <blogic@openwrt.org>"); MODULE_DESCRIPTION("Lantiq SoC Watchdog"); MODULE_LICENSE("GPL"); -MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR); diff --git a/drivers/watchdog/m54xx_wdt.c b/drivers/watchdog/m54xx_wdt.c index 173494a681e..da6fa2b6807 100644 --- a/drivers/watchdog/m54xx_wdt.c +++ b/drivers/watchdog/m54xx_wdt.c @@ -223,4 +223,3 @@ module_param(nowayout, bool, 0); MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started"); MODULE_LICENSE("GPL"); -MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR); diff --git a/drivers/watchdog/machzwd.c b/drivers/watchdog/machzwd.c index bf84f788e59..9826b59ef73 100644 --- a/drivers/watchdog/machzwd.c +++ b/drivers/watchdog/machzwd.c @@ -92,7 +92,6 @@ static unsigned short zf_readw(unsigned char port) MODULE_AUTHOR("Fernando Fuganti <fuganti@conectiva.com.br>"); MODULE_DESCRIPTION("MachZ ZF-Logic Watchdog driver"); MODULE_LICENSE("GPL"); -MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR); static bool nowayout = WATCHDOG_NOWAYOUT; module_param(nowayout, bool, 0); diff --git a/drivers/watchdog/max63xx_wdt.c b/drivers/watchdog/max63xx_wdt.c index cc9d328086e..6d4f3998e1f 100644 --- a/drivers/watchdog/max63xx_wdt.c +++ b/drivers/watchdog/max63xx_wdt.c @@ -258,4 +258,3 @@ MODULE_PARM_DESC(nodelay, "(max6373/74 only, default=0)"); MODULE_LICENSE("GPL"); -MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR); diff --git a/drivers/watchdog/mixcomwd.c b/drivers/watchdog/mixcomwd.c index 97d62ee5034..be86ea359ee 100644 --- a/drivers/watchdog/mixcomwd.c +++ b/drivers/watchdog/mixcomwd.c @@ -315,4 +315,3 @@ MODULE_AUTHOR("Gergely Madarasz <gorgo@itc.hu>"); MODULE_DESCRIPTION("MixCom Watchdog driver"); MODULE_VERSION(VERSION); MODULE_LICENSE("GPL"); -MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR); diff --git a/drivers/watchdog/moxart_wdt.c b/drivers/watchdog/moxart_wdt.c new file mode 100644 index 00000000000..4166e4d116a --- /dev/null +++ b/drivers/watchdog/moxart_wdt.c @@ -0,0 +1,165 @@ +/* + * MOXA ART SoCs watchdog driver. + * + * Copyright (C) 2013 Jonas Jensen + * + * Jonas Jensen <jonas.jensen@gmail.com> + * + * This file is licensed under the terms of the GNU General Public + * License version 2. This program is licensed "as is" without any + * warranty of any kind, whether express or implied. + */ + +#include <linux/clk.h> +#include <linux/io.h> +#include <linux/module.h> +#include <linux/err.h> +#include <linux/kernel.h> +#include <linux/platform_device.h> +#include <linux/watchdog.h> +#include <linux/moduleparam.h> + +#define REG_COUNT 0x4 +#define REG_MODE 0x8 +#define REG_ENABLE 0xC + +struct moxart_wdt_dev { + struct watchdog_device dev; + void __iomem *base; + unsigned int clock_frequency; +}; + +static int heartbeat; + +static int moxart_wdt_stop(struct watchdog_device *wdt_dev) +{ + struct moxart_wdt_dev *moxart_wdt = watchdog_get_drvdata(wdt_dev); + + writel(0, moxart_wdt->base + REG_ENABLE); + + return 0; +} + +static int moxart_wdt_start(struct watchdog_device *wdt_dev) +{ + struct moxart_wdt_dev *moxart_wdt = watchdog_get_drvdata(wdt_dev); + + writel(moxart_wdt->clock_frequency * wdt_dev->timeout, + moxart_wdt->base + REG_COUNT); + writel(0x5ab9, moxart_wdt->base + REG_MODE); + writel(0x03, moxart_wdt->base + REG_ENABLE); + + return 0; +} + +static int moxart_wdt_set_timeout(struct watchdog_device *wdt_dev, + unsigned int timeout) +{ + wdt_dev->timeout = timeout; + + return 0; +} + +static const struct watchdog_info moxart_wdt_info = { + .identity = "moxart-wdt", + .options = WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING | + WDIOF_MAGICCLOSE, +}; + +static const struct watchdog_ops moxart_wdt_ops = { + .owner = THIS_MODULE, + .start = moxart_wdt_start, + .stop = moxart_wdt_stop, + .set_timeout = moxart_wdt_set_timeout, +}; + +static int moxart_wdt_probe(struct platform_device *pdev) +{ + struct moxart_wdt_dev *moxart_wdt; + struct device *dev = &pdev->dev; + struct device_node *node = dev->of_node; + struct resource *res; + struct clk *clk; + int err; + unsigned int max_timeout; + bool nowayout = WATCHDOG_NOWAYOUT; + + moxart_wdt = devm_kzalloc(dev, sizeof(*moxart_wdt), GFP_KERNEL); + if (!moxart_wdt) + return -ENOMEM; + + platform_set_drvdata(pdev, moxart_wdt); + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + moxart_wdt->base = devm_ioremap_resource(dev, res); + if (IS_ERR(moxart_wdt->base)) + return PTR_ERR(moxart_wdt->base); + + clk = of_clk_get(node, 0); + if (IS_ERR(clk)) { + pr_err("%s: of_clk_get failed\n", __func__); + return PTR_ERR(clk); + } + + moxart_wdt->clock_frequency = clk_get_rate(clk); + if (moxart_wdt->clock_frequency == 0) { + pr_err("%s: incorrect clock frequency\n", __func__); + return -EINVAL; + } + + max_timeout = UINT_MAX / moxart_wdt->clock_frequency; + + moxart_wdt->dev.info = &moxart_wdt_info; + moxart_wdt->dev.ops = &moxart_wdt_ops; + moxart_wdt->dev.timeout = max_timeout; + moxart_wdt->dev.min_timeout = 1; + moxart_wdt->dev.max_timeout = max_timeout; + moxart_wdt->dev.parent = dev; + + watchdog_init_timeout(&moxart_wdt->dev, heartbeat, dev); + watchdog_set_nowayout(&moxart_wdt->dev, nowayout); + + watchdog_set_drvdata(&moxart_wdt->dev, moxart_wdt); + + err = watchdog_register_device(&moxart_wdt->dev); + if (err) + return err; + + dev_dbg(dev, "Watchdog enabled (heartbeat=%d sec, nowayout=%d)\n", + moxart_wdt->dev.timeout, nowayout); + + return 0; +} + +static int moxart_wdt_remove(struct platform_device *pdev) +{ + struct moxart_wdt_dev *moxart_wdt = platform_get_drvdata(pdev); + + moxart_wdt_stop(&moxart_wdt->dev); + watchdog_unregister_device(&moxart_wdt->dev); + + return 0; +} + +static const struct of_device_id moxart_watchdog_match[] = { + { .compatible = "moxa,moxart-watchdog" }, + { }, +}; + +static struct platform_driver moxart_wdt_driver = { + .probe = moxart_wdt_probe, + .remove = moxart_wdt_remove, + .driver = { + .name = "moxart-watchdog", + .owner = THIS_MODULE, + .of_match_table = moxart_watchdog_match, + }, +}; +module_platform_driver(moxart_wdt_driver); + +module_param(heartbeat, int, 0); +MODULE_PARM_DESC(heartbeat, "Watchdog heartbeat in seconds"); + +MODULE_DESCRIPTION("MOXART watchdog driver"); +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Jonas Jensen <jonas.jensen@gmail.com>"); diff --git a/drivers/watchdog/mpc8xxx_wdt.c b/drivers/watchdog/mpc8xxx_wdt.c index d0ebebae607..d82152077fd 100644 --- a/drivers/watchdog/mpc8xxx_wdt.c +++ b/drivers/watchdog/mpc8xxx_wdt.c @@ -330,4 +330,3 @@ MODULE_AUTHOR("Dave Updegraff, Kumar Gala"); MODULE_DESCRIPTION("Driver for watchdog timer in MPC8xx/MPC83xx/MPC86xx " "uProcessors"); MODULE_LICENSE("GPL"); -MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR); diff --git a/drivers/watchdog/mtx-1_wdt.c b/drivers/watchdog/mtx-1_wdt.c index b4341110ad4..edb31ffd792 100644 --- a/drivers/watchdog/mtx-1_wdt.c +++ b/drivers/watchdog/mtx-1_wdt.c @@ -257,5 +257,4 @@ module_platform_driver(mtx1_wdt_driver); MODULE_AUTHOR("Michael Stickel, Florian Fainelli"); MODULE_DESCRIPTION("Driver for the MTX-1 watchdog"); MODULE_LICENSE("GPL"); -MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR); MODULE_ALIAS("platform:mtx1-wdt"); diff --git a/drivers/watchdog/mv64x60_wdt.c b/drivers/watchdog/mv64x60_wdt.c index e4cf9801926..f9fa5840939 100644 --- a/drivers/watchdog/mv64x60_wdt.c +++ b/drivers/watchdog/mv64x60_wdt.c @@ -255,7 +255,7 @@ static struct miscdevice mv64x60_wdt_miscdev = { static int mv64x60_wdt_probe(struct platform_device *dev) { - struct mv64x60_wdt_pdata *pdata = dev->dev.platform_data; + struct mv64x60_wdt_pdata *pdata = dev_get_platdata(&dev->dev); struct resource *r; int timeout = 10; @@ -323,5 +323,4 @@ module_exit(mv64x60_wdt_exit); MODULE_AUTHOR("James Chapman <jchapman@katalix.com>"); MODULE_DESCRIPTION("MV64x60 watchdog driver"); MODULE_LICENSE("GPL"); -MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR); MODULE_ALIAS("platform:" MV64x60_WDT_NAME); diff --git a/drivers/watchdog/nuc900_wdt.c b/drivers/watchdog/nuc900_wdt.c index b15b6efd91a..a0d893b0930 100644 --- a/drivers/watchdog/nuc900_wdt.c +++ b/drivers/watchdog/nuc900_wdt.c @@ -307,5 +307,4 @@ module_platform_driver(nuc900wdt_driver); MODULE_AUTHOR("Wan ZongShun <mcuos.com@gmail.com>"); MODULE_DESCRIPTION("Watchdog driver for NUC900"); MODULE_LICENSE("GPL"); -MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR); MODULE_ALIAS("platform:nuc900-wdt"); diff --git a/drivers/watchdog/nv_tco.c b/drivers/watchdog/nv_tco.c index 59cf19eeea0..231e5b9d5c8 100644 --- a/drivers/watchdog/nv_tco.c +++ b/drivers/watchdog/nv_tco.c @@ -513,4 +513,3 @@ module_exit(nv_tco_cleanup_module); MODULE_AUTHOR("Mike Waychison"); MODULE_DESCRIPTION("TCO timer driver for NV chipsets"); MODULE_LICENSE("GPL"); -MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR); diff --git a/drivers/watchdog/of_xilinx_wdt.c b/drivers/watchdog/of_xilinx_wdt.c index 4dd281f2c33..fb57103c8eb 100644 --- a/drivers/watchdog/of_xilinx_wdt.c +++ b/drivers/watchdog/of_xilinx_wdt.c @@ -405,4 +405,3 @@ module_platform_driver(xwdt_driver); MODULE_AUTHOR("Alejandro Cabrera <aldaya@gmail.com>"); MODULE_DESCRIPTION("Xilinx Watchdog driver"); MODULE_LICENSE("GPL v2"); -MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR); diff --git a/drivers/watchdog/omap_wdt.c b/drivers/watchdog/omap_wdt.c index af88ffd1068..09cf0135e8a 100644 --- a/drivers/watchdog/omap_wdt.c +++ b/drivers/watchdog/omap_wdt.c @@ -68,14 +68,14 @@ static void omap_wdt_reload(struct omap_wdt_dev *wdev) void __iomem *base = wdev->base; /* wait for posted write to complete */ - while ((__raw_readl(base + OMAP_WATCHDOG_WPS)) & 0x08) + while ((readl_relaxed(base + OMAP_WATCHDOG_WPS)) & 0x08) cpu_relax(); wdev->wdt_trgr_pattern = ~wdev->wdt_trgr_pattern; - __raw_writel(wdev->wdt_trgr_pattern, (base + OMAP_WATCHDOG_TGR)); + writel_relaxed(wdev->wdt_trgr_pattern, (base + OMAP_WATCHDOG_TGR)); /* wait for posted write to complete */ - while ((__raw_readl(base + OMAP_WATCHDOG_WPS)) & 0x08) + while ((readl_relaxed(base + OMAP_WATCHDOG_WPS)) & 0x08) cpu_relax(); /* reloaded WCRR from WLDR */ } @@ -85,12 +85,12 @@ static void omap_wdt_enable(struct omap_wdt_dev *wdev) void __iomem *base = wdev->base; /* Sequence to enable the watchdog */ - __raw_writel(0xBBBB, base + OMAP_WATCHDOG_SPR); - while ((__raw_readl(base + OMAP_WATCHDOG_WPS)) & 0x10) + writel_relaxed(0xBBBB, base + OMAP_WATCHDOG_SPR); + while ((readl_relaxed(base + OMAP_WATCHDOG_WPS)) & 0x10) cpu_relax(); - __raw_writel(0x4444, base + OMAP_WATCHDOG_SPR); - while ((__raw_readl(base + OMAP_WATCHDOG_WPS)) & 0x10) + writel_relaxed(0x4444, base + OMAP_WATCHDOG_SPR); + while ((readl_relaxed(base + OMAP_WATCHDOG_WPS)) & 0x10) cpu_relax(); } @@ -99,12 +99,12 @@ static void omap_wdt_disable(struct omap_wdt_dev *wdev) void __iomem *base = wdev->base; /* sequence required to disable watchdog */ - __raw_writel(0xAAAA, base + OMAP_WATCHDOG_SPR); /* TIMER_MODE */ - while (__raw_readl(base + OMAP_WATCHDOG_WPS) & 0x10) + writel_relaxed(0xAAAA, base + OMAP_WATCHDOG_SPR); /* TIMER_MODE */ + while (readl_relaxed(base + OMAP_WATCHDOG_WPS) & 0x10) cpu_relax(); - __raw_writel(0x5555, base + OMAP_WATCHDOG_SPR); /* TIMER_MODE */ - while (__raw_readl(base + OMAP_WATCHDOG_WPS) & 0x10) + writel_relaxed(0x5555, base + OMAP_WATCHDOG_SPR); /* TIMER_MODE */ + while (readl_relaxed(base + OMAP_WATCHDOG_WPS) & 0x10) cpu_relax(); } @@ -115,11 +115,11 @@ static void omap_wdt_set_timer(struct omap_wdt_dev *wdev, void __iomem *base = wdev->base; /* just count up at 32 KHz */ - while (__raw_readl(base + OMAP_WATCHDOG_WPS) & 0x04) + while (readl_relaxed(base + OMAP_WATCHDOG_WPS) & 0x04) cpu_relax(); - __raw_writel(pre_margin, base + OMAP_WATCHDOG_LDR); - while (__raw_readl(base + OMAP_WATCHDOG_WPS) & 0x04) + writel_relaxed(pre_margin, base + OMAP_WATCHDOG_LDR); + while (readl_relaxed(base + OMAP_WATCHDOG_WPS) & 0x04) cpu_relax(); } @@ -135,11 +135,11 @@ static int omap_wdt_start(struct watchdog_device *wdog) pm_runtime_get_sync(wdev->dev); /* initialize prescaler */ - while (__raw_readl(base + OMAP_WATCHDOG_WPS) & 0x01) + while (readl_relaxed(base + OMAP_WATCHDOG_WPS) & 0x01) cpu_relax(); - __raw_writel((1 << 5) | (PTV << 2), base + OMAP_WATCHDOG_CNTRL); - while (__raw_readl(base + OMAP_WATCHDOG_WPS) & 0x01) + writel_relaxed((1 << 5) | (PTV << 2), base + OMAP_WATCHDOG_CNTRL); + while (readl_relaxed(base + OMAP_WATCHDOG_WPS) & 0x01) cpu_relax(); omap_wdt_set_timer(wdev, wdog->timeout); @@ -205,7 +205,7 @@ static const struct watchdog_ops omap_wdt_ops = { static int omap_wdt_probe(struct platform_device *pdev) { - struct omap_wd_timer_platform_data *pdata = pdev->dev.platform_data; + struct omap_wd_timer_platform_data *pdata = dev_get_platdata(&pdev->dev); struct watchdog_device *omap_wdt; struct resource *res, *mem; struct omap_wdt_dev *wdev; @@ -275,7 +275,7 @@ static int omap_wdt_probe(struct platform_device *pdev) } pr_info("OMAP Watchdog Timer Rev 0x%02x: initial timeout %d sec\n", - __raw_readl(wdev->base + OMAP_WATCHDOG_REV) & 0xFF, + readl_relaxed(wdev->base + OMAP_WATCHDOG_REV) & 0xFF, omap_wdt->timeout); pm_runtime_put_sync(wdev->dev); diff --git a/drivers/watchdog/orion_wdt.c b/drivers/watchdog/orion_wdt.c index 4ea5fcccac0..44edca66d56 100644 --- a/drivers/watchdog/orion_wdt.c +++ b/drivers/watchdog/orion_wdt.c @@ -207,7 +207,7 @@ static struct platform_driver orion_wdt_driver = { .driver = { .owner = THIS_MODULE, .name = "orion_wdt", - .of_match_table = of_match_ptr(orion_wdt_of_match_table), + .of_match_table = orion_wdt_of_match_table, }, }; @@ -225,4 +225,3 @@ MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default=" MODULE_LICENSE("GPL"); MODULE_ALIAS("platform:orion_wdt"); -MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR); diff --git a/drivers/watchdog/pc87413_wdt.c b/drivers/watchdog/pc87413_wdt.c index 5afb89b4865..5211d56b368 100644 --- a/drivers/watchdog/pc87413_wdt.c +++ b/drivers/watchdog/pc87413_wdt.c @@ -580,8 +580,6 @@ MODULE_AUTHOR("Sven Anders <anders@anduras.de>, " MODULE_DESCRIPTION("PC87413 WDT driver"); MODULE_LICENSE("GPL"); -MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR); - module_param(io, int, 0); MODULE_PARM_DESC(io, MODNAME " I/O port (default: " __MODULE_STRING(IO_DEFAULT) ")."); diff --git a/drivers/watchdog/pcwd.c b/drivers/watchdog/pcwd.c index 33e49a7f889..e936f15dc7c 100644 --- a/drivers/watchdog/pcwd.c +++ b/drivers/watchdog/pcwd.c @@ -61,7 +61,7 @@ #include <linux/delay.h> /* For mdelay function */ #include <linux/timer.h> /* For timer related operations */ #include <linux/jiffies.h> /* For jiffies stuff */ -#include <linux/miscdevice.h> /* For MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR) */ +#include <linux/miscdevice.h> /* For struct miscdevice */ #include <linux/watchdog.h> /* For the watchdog specific items */ #include <linux/reboot.h> /* For kernel_power_off() */ #include <linux/init.h> /* For __init/__exit/... */ @@ -1011,5 +1011,3 @@ MODULE_AUTHOR("Ken Hollis <kenji@bitgate.com>, " MODULE_DESCRIPTION("Berkshire ISA-PC Watchdog driver"); MODULE_VERSION(WATCHDOG_VERSION); MODULE_LICENSE("GPL"); -MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR); -MODULE_ALIAS_MISCDEV(TEMP_MINOR); diff --git a/drivers/watchdog/pcwd_pci.c b/drivers/watchdog/pcwd_pci.c index 7890f84edf7..b4864f254b4 100644 --- a/drivers/watchdog/pcwd_pci.c +++ b/drivers/watchdog/pcwd_pci.c @@ -40,7 +40,7 @@ #include <linux/errno.h> /* For the -ENODEV/... values */ #include <linux/kernel.h> /* For printk/panic/... */ #include <linux/delay.h> /* For mdelay function */ -#include <linux/miscdevice.h> /* For MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR) */ +#include <linux/miscdevice.h> /* For struct miscdevice */ #include <linux/watchdog.h> /* For the watchdog specific items */ #include <linux/notifier.h> /* For notifier support */ #include <linux/reboot.h> /* For reboot_notifier stuff */ @@ -820,5 +820,3 @@ module_pci_driver(pcipcwd_driver); MODULE_AUTHOR("Wim Van Sebroeck <wim@iguana.be>"); MODULE_DESCRIPTION("Berkshire PCI-PC Watchdog driver"); MODULE_LICENSE("GPL"); -MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR); -MODULE_ALIAS_MISCDEV(TEMP_MINOR); diff --git a/drivers/watchdog/pcwd_usb.c b/drivers/watchdog/pcwd_usb.c index 7b14d184792..b731b5d129b 100644 --- a/drivers/watchdog/pcwd_usb.c +++ b/drivers/watchdog/pcwd_usb.c @@ -32,7 +32,7 @@ #include <linux/errno.h> /* For the -ENODEV/... values */ #include <linux/kernel.h> /* For printk/panic/... */ #include <linux/delay.h> /* For mdelay function */ -#include <linux/miscdevice.h> /* For MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR) */ +#include <linux/miscdevice.h> /* For struct miscdevice */ #include <linux/watchdog.h> /* For the watchdog specific items */ #include <linux/notifier.h> /* For notifier support */ #include <linux/reboot.h> /* For reboot_notifier stuff */ @@ -72,8 +72,6 @@ do { \ MODULE_AUTHOR(DRIVER_AUTHOR); MODULE_DESCRIPTION(DRIVER_DESC); MODULE_LICENSE(DRIVER_LICENSE); -MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR); -MODULE_ALIAS_MISCDEV(TEMP_MINOR); /* Module Parameters */ module_param(debug, int, 0); @@ -235,13 +233,17 @@ static int usb_pcwd_send_command(struct usb_pcwd_private *usb_pcwd, unsigned char cmd, unsigned char *msb, unsigned char *lsb) { int got_response, count; - unsigned char buf[6]; + unsigned char *buf; /* We will not send any commands if the USB PCWD device does * not exist */ if ((!usb_pcwd) || (!usb_pcwd->exists)) return -1; + buf = kmalloc(6, GFP_KERNEL); + if (buf == NULL) + return 0; + /* The USB PC Watchdog uses a 6 byte report format. * The board currently uses only 3 of the six bytes of the report. */ buf[0] = cmd; /* Byte 0 = CMD */ @@ -256,8 +258,8 @@ static int usb_pcwd_send_command(struct usb_pcwd_private *usb_pcwd, if (usb_control_msg(usb_pcwd->udev, usb_sndctrlpipe(usb_pcwd->udev, 0), HID_REQ_SET_REPORT, HID_DT_REPORT, - 0x0200, usb_pcwd->interface_number, buf, sizeof(buf), - USB_COMMAND_TIMEOUT) != sizeof(buf)) { + 0x0200, usb_pcwd->interface_number, buf, 6, + USB_COMMAND_TIMEOUT) != 6) { dbg("usb_pcwd_send_command: error in usb_control_msg for " "cmd 0x%x 0x%x 0x%x\n", cmd, *msb, *lsb); } @@ -277,6 +279,8 @@ static int usb_pcwd_send_command(struct usb_pcwd_private *usb_pcwd, *lsb = usb_pcwd->cmd_data_lsb; } + kfree(buf); + return got_response; } diff --git a/drivers/watchdog/pika_wdt.c b/drivers/watchdog/pika_wdt.c index 329bc60ad7a..0cdfee26669 100644 --- a/drivers/watchdog/pika_wdt.c +++ b/drivers/watchdog/pika_wdt.c @@ -299,5 +299,3 @@ module_exit(pikawdt_exit); MODULE_AUTHOR("Sean MacLennan <smaclennan@pikatech.com>"); MODULE_DESCRIPTION("PIKA FPGA based Watchdog Timer"); MODULE_LICENSE("GPL"); -MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR); - diff --git a/drivers/watchdog/pnx4008_wdt.c b/drivers/watchdog/pnx4008_wdt.c index b30bd430f59..1bdcc313e1d 100644 --- a/drivers/watchdog/pnx4008_wdt.c +++ b/drivers/watchdog/pnx4008_wdt.c @@ -233,5 +233,4 @@ MODULE_PARM_DESC(nowayout, "Set to 1 to keep watchdog running after device release"); MODULE_LICENSE("GPL"); -MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR); MODULE_ALIAS("platform:pnx4008-watchdog"); diff --git a/drivers/watchdog/pnx833x_wdt.c b/drivers/watchdog/pnx833x_wdt.c index 1b62a7dfcc9..882fdcb46ad 100644 --- a/drivers/watchdog/pnx833x_wdt.c +++ b/drivers/watchdog/pnx833x_wdt.c @@ -278,4 +278,3 @@ module_exit(watchdog_exit); MODULE_AUTHOR("Daniel Laird/Andre McCurdy"); MODULE_DESCRIPTION("Hardware Watchdog Device for PNX833x"); MODULE_LICENSE("GPL"); -MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR); diff --git a/drivers/watchdog/rc32434_wdt.c b/drivers/watchdog/rc32434_wdt.c index 9cf6bc7a234..71e78ef4b73 100644 --- a/drivers/watchdog/rc32434_wdt.c +++ b/drivers/watchdog/rc32434_wdt.c @@ -25,8 +25,7 @@ #include <linux/errno.h> /* For the -ENODEV/... values */ #include <linux/kernel.h> /* For printk/panic/... */ #include <linux/fs.h> /* For file operations */ -#include <linux/miscdevice.h> /* For MODULE_ALIAS_MISCDEV - (WATCHDOG_MINOR) */ +#include <linux/miscdevice.h> /* For struct miscdevice */ #include <linux/watchdog.h> /* For the watchdog specific items */ #include <linux/init.h> /* For __init/__exit/... */ #include <linux/platform_device.h> /* For platform_driver framework */ @@ -329,4 +328,3 @@ MODULE_AUTHOR("Ondrej Zajicek <santiago@crfreenet.org>," "Florian Fainelli <florian@openwrt.org>"); MODULE_DESCRIPTION("Driver for the IDT RC32434 SoC watchdog"); MODULE_LICENSE("GPL"); -MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR); diff --git a/drivers/watchdog/rdc321x_wdt.c b/drivers/watchdog/rdc321x_wdt.c index b0f116c2fd5..082d0626295 100644 --- a/drivers/watchdog/rdc321x_wdt.c +++ b/drivers/watchdog/rdc321x_wdt.c @@ -231,7 +231,7 @@ static int rdc321x_wdt_probe(struct platform_device *pdev) struct resource *r; struct rdc321x_wdt_pdata *pdata; - pdata = pdev->dev.platform_data; + pdata = dev_get_platdata(&pdev->dev); if (!pdata) { dev_err(&pdev->dev, "no platform data supplied\n"); return -ENODEV; @@ -298,4 +298,3 @@ module_platform_driver(rdc321x_wdt_driver); MODULE_AUTHOR("Florian Fainelli <florian@openwrt.org>"); MODULE_DESCRIPTION("RDC321x watchdog driver"); MODULE_LICENSE("GPL"); -MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR); diff --git a/drivers/watchdog/rt2880_wdt.c b/drivers/watchdog/rt2880_wdt.c new file mode 100644 index 00000000000..53d37fea183 --- /dev/null +++ b/drivers/watchdog/rt2880_wdt.c @@ -0,0 +1,207 @@ +/* + * Ralink RT288x/RT3xxx/MT76xx built-in hardware watchdog timer + * + * Copyright (C) 2011 Gabor Juhos <juhosg@openwrt.org> + * Copyright (C) 2013 John Crispin <blogic@openwrt.org> + * + * This driver was based on: drivers/watchdog/softdog.c + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation. + */ + +#include <linux/clk.h> +#include <linux/reset.h> +#include <linux/module.h> +#include <linux/kernel.h> +#include <linux/watchdog.h> +#include <linux/miscdevice.h> +#include <linux/moduleparam.h> +#include <linux/platform_device.h> + +#include <asm/mach-ralink/ralink_regs.h> + +#define SYSC_RSTSTAT 0x38 +#define WDT_RST_CAUSE BIT(1) + +#define RALINK_WDT_TIMEOUT 30 +#define RALINK_WDT_PRESCALE 65536 + +#define TIMER_REG_TMR1LOAD 0x00 +#define TIMER_REG_TMR1CTL 0x08 + +#define TMRSTAT_TMR1RST BIT(5) + +#define TMR1CTL_ENABLE BIT(7) +#define TMR1CTL_MODE_SHIFT 4 +#define TMR1CTL_MODE_MASK 0x3 +#define TMR1CTL_MODE_FREE_RUNNING 0x0 +#define TMR1CTL_MODE_PERIODIC 0x1 +#define TMR1CTL_MODE_TIMEOUT 0x2 +#define TMR1CTL_MODE_WDT 0x3 +#define TMR1CTL_PRESCALE_MASK 0xf +#define TMR1CTL_PRESCALE_65536 0xf + +static struct clk *rt288x_wdt_clk; +static unsigned long rt288x_wdt_freq; +static void __iomem *rt288x_wdt_base; + +static bool nowayout = WATCHDOG_NOWAYOUT; +module_param(nowayout, bool, 0); +MODULE_PARM_DESC(nowayout, + "Watchdog cannot be stopped once started (default=" + __MODULE_STRING(WATCHDOG_NOWAYOUT) ")"); + +static inline void rt_wdt_w32(unsigned reg, u32 val) +{ + iowrite32(val, rt288x_wdt_base + reg); +} + +static inline u32 rt_wdt_r32(unsigned reg) +{ + return ioread32(rt288x_wdt_base + reg); +} + +static int rt288x_wdt_ping(struct watchdog_device *w) +{ + rt_wdt_w32(TIMER_REG_TMR1LOAD, w->timeout * rt288x_wdt_freq); + + return 0; +} + +static int rt288x_wdt_start(struct watchdog_device *w) +{ + u32 t; + + t = rt_wdt_r32(TIMER_REG_TMR1CTL); + t &= ~(TMR1CTL_MODE_MASK << TMR1CTL_MODE_SHIFT | + TMR1CTL_PRESCALE_MASK); + t |= (TMR1CTL_MODE_WDT << TMR1CTL_MODE_SHIFT | + TMR1CTL_PRESCALE_65536); + rt_wdt_w32(TIMER_REG_TMR1CTL, t); + + rt288x_wdt_ping(w); + + t = rt_wdt_r32(TIMER_REG_TMR1CTL); + t |= TMR1CTL_ENABLE; + rt_wdt_w32(TIMER_REG_TMR1CTL, t); + + return 0; +} + +static int rt288x_wdt_stop(struct watchdog_device *w) +{ + u32 t; + + rt288x_wdt_ping(w); + + t = rt_wdt_r32(TIMER_REG_TMR1CTL); + t &= ~TMR1CTL_ENABLE; + rt_wdt_w32(TIMER_REG_TMR1CTL, t); + + return 0; +} + +static int rt288x_wdt_set_timeout(struct watchdog_device *w, unsigned int t) +{ + w->timeout = t; + rt288x_wdt_ping(w); + + return 0; +} + +static int rt288x_wdt_bootcause(void) +{ + if (rt_sysc_r32(SYSC_RSTSTAT) & WDT_RST_CAUSE) + return WDIOF_CARDRESET; + + return 0; +} + +static struct watchdog_info rt288x_wdt_info = { + .identity = "Ralink Watchdog", + .options = WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING | WDIOF_MAGICCLOSE, +}; + +static struct watchdog_ops rt288x_wdt_ops = { + .owner = THIS_MODULE, + .start = rt288x_wdt_start, + .stop = rt288x_wdt_stop, + .ping = rt288x_wdt_ping, + .set_timeout = rt288x_wdt_set_timeout, +}; + +static struct watchdog_device rt288x_wdt_dev = { + .info = &rt288x_wdt_info, + .ops = &rt288x_wdt_ops, + .min_timeout = 1, +}; + +static int rt288x_wdt_probe(struct platform_device *pdev) +{ + struct resource *res; + int ret; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + rt288x_wdt_base = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(rt288x_wdt_base)) + return PTR_ERR(rt288x_wdt_base); + + rt288x_wdt_clk = devm_clk_get(&pdev->dev, NULL); + if (IS_ERR(rt288x_wdt_clk)) + return PTR_ERR(rt288x_wdt_clk); + + device_reset(&pdev->dev); + + rt288x_wdt_freq = clk_get_rate(rt288x_wdt_clk) / RALINK_WDT_PRESCALE; + + rt288x_wdt_dev.dev = &pdev->dev; + rt288x_wdt_dev.bootstatus = rt288x_wdt_bootcause(); + + rt288x_wdt_dev.max_timeout = (0xfffful / rt288x_wdt_freq); + rt288x_wdt_dev.timeout = rt288x_wdt_dev.max_timeout; + + watchdog_set_nowayout(&rt288x_wdt_dev, nowayout); + + ret = watchdog_register_device(&rt288x_wdt_dev); + if (!ret) + dev_info(&pdev->dev, "Initialized\n"); + + return 0; +} + +static int rt288x_wdt_remove(struct platform_device *pdev) +{ + watchdog_unregister_device(&rt288x_wdt_dev); + + return 0; +} + +static void rt288x_wdt_shutdown(struct platform_device *pdev) +{ + rt288x_wdt_stop(&rt288x_wdt_dev); +} + +static const struct of_device_id rt288x_wdt_match[] = { + { .compatible = "ralink,rt2880-wdt" }, + {}, +}; +MODULE_DEVICE_TABLE(of, rt288x_wdt_match); + +static struct platform_driver rt288x_wdt_driver = { + .probe = rt288x_wdt_probe, + .remove = rt288x_wdt_remove, + .shutdown = rt288x_wdt_shutdown, + .driver = { + .name = KBUILD_MODNAME, + .owner = THIS_MODULE, + .of_match_table = rt288x_wdt_match, + }, +}; + +module_platform_driver(rt288x_wdt_driver); + +MODULE_DESCRIPTION("MediaTek/Ralink RT288x/RT3xxx hardware watchdog driver"); +MODULE_AUTHOR("Gabor Juhos <juhosg@openwrt.org"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/watchdog/s3c2410_wdt.c b/drivers/watchdog/s3c2410_wdt.c index 23aad7c6bf5..7d8fd041ee2 100644 --- a/drivers/watchdog/s3c2410_wdt.c +++ b/drivers/watchdog/s3c2410_wdt.c @@ -29,7 +29,6 @@ #include <linux/moduleparam.h> #include <linux/types.h> #include <linux/timer.h> -#include <linux/miscdevice.h> /* for MODULE_ALIAS_MISCDEV */ #include <linux/watchdog.h> #include <linux/init.h> #include <linux/platform_device.h> @@ -539,5 +538,4 @@ MODULE_AUTHOR("Ben Dooks <ben@simtec.co.uk>, " "Dimitry Andric <dimitry.andric@tomtom.com>"); MODULE_DESCRIPTION("S3C2410 Watchdog Device Driver"); MODULE_LICENSE("GPL"); -MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR); MODULE_ALIAS("platform:s3c2410-wdt"); diff --git a/drivers/watchdog/sa1100_wdt.c b/drivers/watchdog/sa1100_wdt.c index ccd6b29e21b..e1d39a1e962 100644 --- a/drivers/watchdog/sa1100_wdt.c +++ b/drivers/watchdog/sa1100_wdt.c @@ -193,4 +193,3 @@ module_param(margin, int, 0); MODULE_PARM_DESC(margin, "Watchdog margin in seconds (default 60s)"); MODULE_LICENSE("GPL"); -MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR); diff --git a/drivers/watchdog/sb_wdog.c b/drivers/watchdog/sb_wdog.c index ea5d84a1fda..3abae50773b 100644 --- a/drivers/watchdog/sb_wdog.c +++ b/drivers/watchdog/sb_wdog.c @@ -341,7 +341,6 @@ MODULE_PARM_DESC(timeout, "Watchdog timeout in microseconds (max/default 8388607 or 8.3ish secs)"); MODULE_LICENSE("GPL"); -MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR); /* * example code that can be put in a platform code area to utilize the diff --git a/drivers/watchdog/sbc60xxwdt.c b/drivers/watchdog/sbc60xxwdt.c index 63632ec87c7..2eef58a0cf0 100644 --- a/drivers/watchdog/sbc60xxwdt.c +++ b/drivers/watchdog/sbc60xxwdt.c @@ -387,4 +387,3 @@ module_exit(sbc60xxwdt_unload); MODULE_AUTHOR("Jakob Oestergaard <jakob@unthought.net>"); MODULE_DESCRIPTION("60xx Single Board Computer Watchdog Timer driver"); MODULE_LICENSE("GPL"); -MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR); diff --git a/drivers/watchdog/sbc7240_wdt.c b/drivers/watchdog/sbc7240_wdt.c index 719edc8fdeb..5f268add17c 100644 --- a/drivers/watchdog/sbc7240_wdt.c +++ b/drivers/watchdog/sbc7240_wdt.c @@ -309,5 +309,3 @@ MODULE_AUTHOR("Gilles Gigan"); MODULE_DESCRIPTION("Watchdog device driver for single board" " computers EPIC Nano 7240 from iEi"); MODULE_LICENSE("GPL"); -MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR); - diff --git a/drivers/watchdog/sbc8360.c b/drivers/watchdog/sbc8360.c index d4781e05f01..da60560ca44 100644 --- a/drivers/watchdog/sbc8360.c +++ b/drivers/watchdog/sbc8360.c @@ -404,6 +404,5 @@ MODULE_AUTHOR("Ian E. Morgan <imorgan@webcon.ca>"); MODULE_DESCRIPTION("SBC8360 watchdog driver"); MODULE_LICENSE("GPL"); MODULE_VERSION("1.01"); -MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR); /* end of sbc8360.c */ diff --git a/drivers/watchdog/sbc_epx_c3.c b/drivers/watchdog/sbc_epx_c3.c index 0c3e9f66ef7..a1c502e0d8e 100644 --- a/drivers/watchdog/sbc_epx_c3.c +++ b/drivers/watchdog/sbc_epx_c3.c @@ -220,4 +220,3 @@ MODULE_DESCRIPTION("Hardware Watchdog Device for Winsystems EPX-C3 SBC. " "so only use it if you are *sure* you are running on this specific " "SBC system from Winsystems! It writes to IO ports 0x1ee and 0x1ef!"); MODULE_LICENSE("GPL"); -MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR); diff --git a/drivers/watchdog/sbc_fitpc2_wdt.c b/drivers/watchdog/sbc_fitpc2_wdt.c index 90d5527ca88..a517d8bae75 100644 --- a/drivers/watchdog/sbc_fitpc2_wdt.c +++ b/drivers/watchdog/sbc_fitpc2_wdt.c @@ -263,5 +263,3 @@ module_param(nowayout, bool, 0); MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started"); MODULE_LICENSE("GPL"); -MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR); - diff --git a/drivers/watchdog/sc1200wdt.c b/drivers/watchdog/sc1200wdt.c index 3fb83b0c28c..3b9fff9dcf6 100644 --- a/drivers/watchdog/sc1200wdt.c +++ b/drivers/watchdog/sc1200wdt.c @@ -476,4 +476,3 @@ MODULE_AUTHOR("Zwane Mwaikambo <zwane@commfireservices.com>"); MODULE_DESCRIPTION( "Driver for National Semiconductor PC87307/PC97307 watchdog component"); MODULE_LICENSE("GPL"); -MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR); diff --git a/drivers/watchdog/sc520_wdt.c b/drivers/watchdog/sc520_wdt.c index 707e027e500..f353e18b1a8 100644 --- a/drivers/watchdog/sc520_wdt.c +++ b/drivers/watchdog/sc520_wdt.c @@ -433,4 +433,3 @@ MODULE_AUTHOR("Scott and Bill Jennings"); MODULE_DESCRIPTION( "Driver for watchdog timer in AMD \"Elan\" SC520 uProcessor"); MODULE_LICENSE("GPL"); -MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR); diff --git a/drivers/watchdog/sch311x_wdt.c b/drivers/watchdog/sch311x_wdt.c index af7b136b187..b96127ea3de 100644 --- a/drivers/watchdog/sch311x_wdt.c +++ b/drivers/watchdog/sch311x_wdt.c @@ -26,8 +26,7 @@ #include <linux/types.h> /* For standard types (like size_t) */ #include <linux/errno.h> /* For the -ENODEV/... values */ #include <linux/kernel.h> /* For printk/... */ -#include <linux/miscdevice.h> /* For MODULE_ALIAS_MISCDEV - (WATCHDOG_MINOR) */ +#include <linux/miscdevice.h> /* For struct miscdevice */ #include <linux/watchdog.h> /* For the watchdog specific items */ #include <linux/init.h> /* For __init/__exit/... */ #include <linux/fs.h> /* For file operations */ @@ -545,5 +544,3 @@ module_exit(sch311x_wdt_exit); MODULE_AUTHOR("Wim Van Sebroeck <wim@iguana.be>"); MODULE_DESCRIPTION("SMSC SCH311x WatchDog Timer Driver"); MODULE_LICENSE("GPL"); -MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR); - diff --git a/drivers/watchdog/scx200_wdt.c b/drivers/watchdog/scx200_wdt.c index 8ae7c282d46..836377cf927 100644 --- a/drivers/watchdog/scx200_wdt.c +++ b/drivers/watchdog/scx200_wdt.c @@ -37,7 +37,6 @@ MODULE_AUTHOR("Christer Weinigel <wingel@nano-system.com>"); MODULE_DESCRIPTION("NatSemi SCx200 Watchdog Driver"); MODULE_LICENSE("GPL"); -MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR); static int margin = 60; /* in seconds */ module_param(margin, int, 0); diff --git a/drivers/watchdog/shwdt.c b/drivers/watchdog/shwdt.c index 5bca7945776..f9b8e06f355 100644 --- a/drivers/watchdog/shwdt.c +++ b/drivers/watchdog/shwdt.c @@ -343,7 +343,6 @@ MODULE_AUTHOR("Paul Mundt <lethal@linux-sh.org>"); MODULE_DESCRIPTION("SuperH watchdog driver"); MODULE_LICENSE("GPL"); MODULE_ALIAS("platform:" DRV_NAME); -MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR); module_param(clock_division_ratio, int, 0); MODULE_PARM_DESC(clock_division_ratio, diff --git a/drivers/watchdog/sirfsoc_wdt.c b/drivers/watchdog/sirfsoc_wdt.c new file mode 100644 index 00000000000..ced3edc9595 --- /dev/null +++ b/drivers/watchdog/sirfsoc_wdt.c @@ -0,0 +1,226 @@ +/* + * Watchdog driver for CSR SiRFprimaII and SiRFatlasVI + * + * Copyright (c) 2013 Cambridge Silicon Radio Limited, a CSR plc group company. + * + * Licensed under GPLv2 or later. + */ + +#include <linux/module.h> +#include <linux/watchdog.h> +#include <linux/platform_device.h> +#include <linux/moduleparam.h> +#include <linux/of.h> +#include <linux/io.h> +#include <linux/uaccess.h> + +#define CLOCK_FREQ 1000000 + +#define SIRFSOC_TIMER_COUNTER_LO 0x0000 +#define SIRFSOC_TIMER_MATCH_0 0x0008 +#define SIRFSOC_TIMER_INT_EN 0x0024 +#define SIRFSOC_TIMER_WATCHDOG_EN 0x0028 +#define SIRFSOC_TIMER_LATCH 0x0030 +#define SIRFSOC_TIMER_LATCHED_LO 0x0034 + +#define SIRFSOC_TIMER_WDT_INDEX 5 + +#define SIRFSOC_WDT_MIN_TIMEOUT 30 /* 30 secs */ +#define SIRFSOC_WDT_MAX_TIMEOUT (10 * 60) /* 10 mins */ +#define SIRFSOC_WDT_DEFAULT_TIMEOUT 30 /* 30 secs */ + +static unsigned int timeout = SIRFSOC_WDT_DEFAULT_TIMEOUT; +static bool nowayout = WATCHDOG_NOWAYOUT; + +module_param(timeout, uint, 0); +module_param(nowayout, bool, 0); + +MODULE_PARM_DESC(timeout, "Default watchdog timeout (in seconds)"); +MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default=" + __MODULE_STRING(WATCHDOG_NOWAYOUT) ")"); + +static unsigned int sirfsoc_wdt_gettimeleft(struct watchdog_device *wdd) +{ + u32 counter, match; + void __iomem *wdt_base; + int time_left; + + wdt_base = watchdog_get_drvdata(wdd); + counter = readl(wdt_base + SIRFSOC_TIMER_COUNTER_LO); + match = readl(wdt_base + + SIRFSOC_TIMER_MATCH_0 + (SIRFSOC_TIMER_WDT_INDEX << 2)); + + time_left = match - counter; + + return time_left / CLOCK_FREQ; +} + +static int sirfsoc_wdt_updatetimeout(struct watchdog_device *wdd) +{ + u32 counter, timeout_ticks; + void __iomem *wdt_base; + + timeout_ticks = wdd->timeout * CLOCK_FREQ; + wdt_base = watchdog_get_drvdata(wdd); + + /* Enable the latch before reading the LATCH_LO register */ + writel(1, wdt_base + SIRFSOC_TIMER_LATCH); + + /* Set the TO value */ + counter = readl(wdt_base + SIRFSOC_TIMER_LATCHED_LO); + + counter += timeout_ticks; + + writel(counter, wdt_base + + SIRFSOC_TIMER_MATCH_0 + (SIRFSOC_TIMER_WDT_INDEX << 2)); + + return 0; +} + +static int sirfsoc_wdt_enable(struct watchdog_device *wdd) +{ + void __iomem *wdt_base = watchdog_get_drvdata(wdd); + sirfsoc_wdt_updatetimeout(wdd); + + /* + * NOTE: If interrupt is not enabled + * then WD-Reset doesn't get generated at all. + */ + writel(readl(wdt_base + SIRFSOC_TIMER_INT_EN) + | (1 << SIRFSOC_TIMER_WDT_INDEX), + wdt_base + SIRFSOC_TIMER_INT_EN); + writel(1, wdt_base + SIRFSOC_TIMER_WATCHDOG_EN); + + return 0; +} + +static int sirfsoc_wdt_disable(struct watchdog_device *wdd) +{ + void __iomem *wdt_base = watchdog_get_drvdata(wdd); + + writel(0, wdt_base + SIRFSOC_TIMER_WATCHDOG_EN); + writel(readl(wdt_base + SIRFSOC_TIMER_INT_EN) + & (~(1 << SIRFSOC_TIMER_WDT_INDEX)), + wdt_base + SIRFSOC_TIMER_INT_EN); + + return 0; +} + +static int sirfsoc_wdt_settimeout(struct watchdog_device *wdd, unsigned int to) +{ + wdd->timeout = to; + sirfsoc_wdt_updatetimeout(wdd); + + return 0; +} + +#define OPTIONS (WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING | WDIOF_MAGICCLOSE) + +static const struct watchdog_info sirfsoc_wdt_ident = { + .options = OPTIONS, + .firmware_version = 0, + .identity = "SiRFSOC Watchdog", +}; + +static struct watchdog_ops sirfsoc_wdt_ops = { + .owner = THIS_MODULE, + .start = sirfsoc_wdt_enable, + .stop = sirfsoc_wdt_disable, + .get_timeleft = sirfsoc_wdt_gettimeleft, + .ping = sirfsoc_wdt_updatetimeout, + .set_timeout = sirfsoc_wdt_settimeout, +}; + +static struct watchdog_device sirfsoc_wdd = { + .info = &sirfsoc_wdt_ident, + .ops = &sirfsoc_wdt_ops, + .timeout = SIRFSOC_WDT_DEFAULT_TIMEOUT, + .min_timeout = SIRFSOC_WDT_MIN_TIMEOUT, + .max_timeout = SIRFSOC_WDT_MAX_TIMEOUT, +}; + +static int sirfsoc_wdt_probe(struct platform_device *pdev) +{ + struct resource *res; + int ret; + void __iomem *base; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + base = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(base)) + return PTR_ERR(base); + + watchdog_set_drvdata(&sirfsoc_wdd, base); + + watchdog_init_timeout(&sirfsoc_wdd, timeout, &pdev->dev); + watchdog_set_nowayout(&sirfsoc_wdd, nowayout); + + ret = watchdog_register_device(&sirfsoc_wdd); + if (ret) + return ret; + + platform_set_drvdata(pdev, &sirfsoc_wdd); + + return 0; +} + +static void sirfsoc_wdt_shutdown(struct platform_device *pdev) +{ + struct watchdog_device *wdd = platform_get_drvdata(pdev); + + sirfsoc_wdt_disable(wdd); +} + +static int sirfsoc_wdt_remove(struct platform_device *pdev) +{ + sirfsoc_wdt_shutdown(pdev); + return 0; +} + +#ifdef CONFIG_PM_SLEEP +static int sirfsoc_wdt_suspend(struct device *dev) +{ + return 0; +} + +static int sirfsoc_wdt_resume(struct device *dev) +{ + struct watchdog_device *wdd = dev_get_drvdata(dev); + + /* + * NOTE: Since timer controller registers settings are saved + * and restored back by the timer-prima2.c, so we need not + * update WD settings except refreshing timeout. + */ + sirfsoc_wdt_updatetimeout(wdd); + + return 0; +} +#endif + +static SIMPLE_DEV_PM_OPS(sirfsoc_wdt_pm_ops, + sirfsoc_wdt_suspend, sirfsoc_wdt_resume); + +static const struct of_device_id sirfsoc_wdt_of_match[] = { + { .compatible = "sirf,prima2-tick"}, + {}, +}; +MODULE_DEVICE_TABLE(of, sirfsoc_wdt_of_match); + +static struct platform_driver sirfsoc_wdt_driver = { + .driver = { + .name = "sirfsoc-wdt", + .owner = THIS_MODULE, + .pm = &sirfsoc_wdt_pm_ops, + .of_match_table = of_match_ptr(sirfsoc_wdt_of_match), + }, + .probe = sirfsoc_wdt_probe, + .remove = sirfsoc_wdt_remove, + .shutdown = sirfsoc_wdt_shutdown, +}; +module_platform_driver(sirfsoc_wdt_driver); + +MODULE_DESCRIPTION("SiRF SoC watchdog driver"); +MODULE_AUTHOR("Xianglong Du <Xianglong.Du@csr.com>"); +MODULE_LICENSE("GPL v2"); +MODULE_ALIAS("platform:sirfsoc-wdt"); diff --git a/drivers/watchdog/smsc37b787_wdt.c b/drivers/watchdog/smsc37b787_wdt.c index 6d665f9c1d5..445ea1ad1fa 100644 --- a/drivers/watchdog/smsc37b787_wdt.c +++ b/drivers/watchdog/smsc37b787_wdt.c @@ -603,8 +603,6 @@ MODULE_DESCRIPTION("Driver for SMsC 37B787 watchdog component (Version " VERSION ")"); MODULE_LICENSE("GPL"); -MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR); - #ifdef SMSC_SUPPORT_MINUTES module_param(unit, int, 0); MODULE_PARM_DESC(unit, diff --git a/drivers/watchdog/softdog.c b/drivers/watchdog/softdog.c index b68b1e519d5..ef2638fee4a 100644 --- a/drivers/watchdog/softdog.c +++ b/drivers/watchdog/softdog.c @@ -207,4 +207,3 @@ module_exit(watchdog_exit); MODULE_AUTHOR("Alan Cox"); MODULE_DESCRIPTION("Software Watchdog Device Driver"); MODULE_LICENSE("GPL"); -MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR); diff --git a/drivers/watchdog/sp5100_tco.c b/drivers/watchdog/sp5100_tco.c index 0e9d8c479c3..ce63a1bbf39 100644 --- a/drivers/watchdog/sp5100_tco.c +++ b/drivers/watchdog/sp5100_tco.c @@ -580,4 +580,3 @@ module_exit(sp5100_tco_cleanup_module); MODULE_AUTHOR("Priyanka Gupta"); MODULE_DESCRIPTION("TCO timer driver for SP5100/SB800 chipset"); MODULE_LICENSE("GPL"); -MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR); diff --git a/drivers/watchdog/sp805_wdt.c b/drivers/watchdog/sp805_wdt.c index 58df98aec12..3f786ce0a6f 100644 --- a/drivers/watchdog/sp805_wdt.c +++ b/drivers/watchdog/sp805_wdt.c @@ -268,7 +268,6 @@ static int sp805_wdt_remove(struct amba_device *adev) struct sp805_wdt *wdt = amba_get_drvdata(adev); watchdog_unregister_device(&wdt->wdd); - amba_set_drvdata(adev, NULL); watchdog_set_drvdata(&wdt->wdd, NULL); return 0; diff --git a/drivers/watchdog/stmp3xxx_rtc_wdt.c b/drivers/watchdog/stmp3xxx_rtc_wdt.c index c97e98dcde6..d667f6b51d3 100644 --- a/drivers/watchdog/stmp3xxx_rtc_wdt.c +++ b/drivers/watchdog/stmp3xxx_rtc_wdt.c @@ -30,7 +30,7 @@ MODULE_PARM_DESC(heartbeat, "Watchdog heartbeat period in seconds from 1 to " static int wdt_start(struct watchdog_device *wdd) { struct device *dev = watchdog_get_drvdata(wdd); - struct stmp3xxx_wdt_pdata *pdata = dev->platform_data; + struct stmp3xxx_wdt_pdata *pdata = dev_get_platdata(dev); pdata->wdt_set_timeout(dev->parent, wdd->timeout * WDOG_TICK_RATE); return 0; @@ -39,7 +39,7 @@ static int wdt_start(struct watchdog_device *wdd) static int wdt_stop(struct watchdog_device *wdd) { struct device *dev = watchdog_get_drvdata(wdd); - struct stmp3xxx_wdt_pdata *pdata = dev->platform_data; + struct stmp3xxx_wdt_pdata *pdata = dev_get_platdata(dev); pdata->wdt_set_timeout(dev->parent, 0); return 0; @@ -108,4 +108,3 @@ module_platform_driver(stmp3xxx_wdt_driver); MODULE_DESCRIPTION("STMP3XXX RTC Watchdog Driver"); MODULE_LICENSE("GPL v2"); MODULE_AUTHOR("Wolfram Sang <w.sang@pengutronix.de>"); -MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR); diff --git a/drivers/watchdog/sunxi_wdt.c b/drivers/watchdog/sunxi_wdt.c index f6caa77151c..76332d893e1 100644 --- a/drivers/watchdog/sunxi_wdt.c +++ b/drivers/watchdog/sunxi_wdt.c @@ -217,7 +217,7 @@ static struct platform_driver sunxi_wdt_driver = { .driver = { .owner = THIS_MODULE, .name = DRV_NAME, - .of_match_table = of_match_ptr(sunxi_wdt_dt_ids) + .of_match_table = sunxi_wdt_dt_ids, }, }; diff --git a/drivers/watchdog/ts72xx_wdt.c b/drivers/watchdog/ts72xx_wdt.c index c9b0c627fe7..09d4831aa61 100644 --- a/drivers/watchdog/ts72xx_wdt.c +++ b/drivers/watchdog/ts72xx_wdt.c @@ -192,7 +192,7 @@ static int ts72xx_wdt_open(struct inode *inode, struct file *file) dev_err(&wdt->pdev->dev, "failed to convert timeout (%d) to register value\n", timeout); - return -EINVAL; + return regval; } if (mutex_lock_interruptible(&wdt->lock)) @@ -305,7 +305,8 @@ static long ts72xx_wdt_ioctl(struct file *file, unsigned int cmd, switch (cmd) { case WDIOC_GETSUPPORT: - error = copy_to_user(argp, &winfo, sizeof(winfo)); + if (copy_to_user(argp, &winfo, sizeof(winfo))) + error = -EFAULT; break; case WDIOC_GETSTATUS: @@ -320,10 +321,9 @@ static long ts72xx_wdt_ioctl(struct file *file, unsigned int cmd, case WDIOC_SETOPTIONS: { int options; - if (get_user(options, p)) { - error = -EFAULT; + error = get_user(options, p); + if (error) break; - } error = -EINVAL; @@ -341,30 +341,26 @@ static long ts72xx_wdt_ioctl(struct file *file, unsigned int cmd, case WDIOC_SETTIMEOUT: { int new_timeout; + int regval; - if (get_user(new_timeout, p)) { - error = -EFAULT; - } else { - int regval; - - regval = timeout_to_regval(new_timeout); - if (regval < 0) { - error = -EINVAL; - } else { - ts72xx_wdt_stop(wdt); - wdt->regval = regval; - ts72xx_wdt_start(wdt); - } - } + error = get_user(new_timeout, p); if (error) break; + regval = timeout_to_regval(new_timeout); + if (regval < 0) { + error = regval; + break; + } + ts72xx_wdt_stop(wdt); + wdt->regval = regval; + ts72xx_wdt_start(wdt); + /*FALLTHROUGH*/ } case WDIOC_GETTIMEOUT: - if (put_user(regval_to_timeout(wdt->regval), p)) - error = -EFAULT; + error = put_user(regval_to_timeout(wdt->regval), p); break; default: diff --git a/drivers/watchdog/txx9wdt.c b/drivers/watchdog/txx9wdt.c index 88f23c5cfdd..0fd0e8ae62a 100644 --- a/drivers/watchdog/txx9wdt.c +++ b/drivers/watchdog/txx9wdt.c @@ -176,5 +176,4 @@ module_platform_driver_probe(txx9wdt_driver, txx9wdt_probe); MODULE_DESCRIPTION("TXx9 Watchdog Driver"); MODULE_LICENSE("GPL"); -MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR); MODULE_ALIAS("platform:txx9wdt"); diff --git a/drivers/watchdog/ux500_wdt.c b/drivers/watchdog/ux500_wdt.c index a614d84121c..e029b5768f2 100644 --- a/drivers/watchdog/ux500_wdt.c +++ b/drivers/watchdog/ux500_wdt.c @@ -88,7 +88,7 @@ static struct watchdog_device ux500_wdt = { static int ux500_wdt_probe(struct platform_device *pdev) { int ret; - struct ux500_wdt_data *pdata = pdev->dev.platform_data; + struct ux500_wdt_data *pdata = dev_get_platdata(&pdev->dev); if (pdata) { if (pdata->timeout > 0) @@ -167,5 +167,4 @@ module_platform_driver(ux500_wdt_driver); MODULE_AUTHOR("Jonas Aaberg <jonas.aberg@stericsson.com>"); MODULE_DESCRIPTION("Ux500 Watchdog Driver"); MODULE_LICENSE("GPL"); -MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR); MODULE_ALIAS("platform:ux500_wdt"); diff --git a/drivers/watchdog/w83627hf_wdt.c b/drivers/watchdog/w83627hf_wdt.c index 92f1326f0cf..e24b2108287 100644 --- a/drivers/watchdog/w83627hf_wdt.c +++ b/drivers/watchdog/w83627hf_wdt.c @@ -1,6 +1,9 @@ /* * w83627hf/thf WDT driver * + * (c) Copyright 2013 Guenter Roeck + * converted to watchdog infrastructure + * * (c) Copyright 2007 Vlad Drukker <vlad@storewiz.com> * added support for W83627THF. * @@ -31,31 +34,22 @@ #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/types.h> -#include <linux/miscdevice.h> #include <linux/watchdog.h> -#include <linux/fs.h> #include <linux/ioport.h> #include <linux/notifier.h> #include <linux/reboot.h> #include <linux/init.h> -#include <linux/spinlock.h> #include <linux/io.h> -#include <linux/uaccess.h> - #define WATCHDOG_NAME "w83627hf/thf/hg/dhg WDT" #define WATCHDOG_TIMEOUT 60 /* 60 sec default timeout */ -static unsigned long wdt_is_open; -static char expect_close; -static DEFINE_SPINLOCK(io_lock); - /* You must set this - there is no sane way to probe for this board. */ static int wdt_io = 0x2E; module_param(wdt_io, int, 0); MODULE_PARM_DESC(wdt_io, "w83627hf/thf WDT io port (default 0x2E)"); -static int timeout = WATCHDOG_TIMEOUT; /* in seconds */ +static int timeout; /* in seconds */ module_param(timeout, int, 0); MODULE_PARM_DESC(timeout, "Watchdog timeout in seconds. 1 <= timeout <= 255, default=" @@ -76,236 +70,147 @@ MODULE_PARM_DESC(nowayout, (same as EFER) */ #define WDT_EFDR (WDT_EFIR+1) /* Extended Function Data Register */ -static void w83627hf_select_wd_register(void) +#define W83627HF_LD_WDT 0x08 + +static void superio_outb(int reg, int val) { - unsigned char c; + outb(reg, WDT_EFER); + outb(val, WDT_EFDR); +} + +static inline int superio_inb(int reg) +{ + outb(reg, WDT_EFER); + return inb(WDT_EFDR); +} + +static int superio_enter(void) +{ + if (!request_muxed_region(wdt_io, 2, WATCHDOG_NAME)) + return -EBUSY; + outb_p(0x87, WDT_EFER); /* Enter extended function mode */ outb_p(0x87, WDT_EFER); /* Again according to manual */ - outb(0x20, WDT_EFER); /* check chip version */ - c = inb(WDT_EFDR); - if (c == 0x82) { /* W83627THF */ - outb_p(0x2b, WDT_EFER); /* select GPIO3 */ - c = ((inb_p(WDT_EFDR) & 0xf7) | 0x04); /* select WDT0 */ - outb_p(0x2b, WDT_EFER); - outb_p(c, WDT_EFDR); /* set GPIO3 to WDT0 */ - } else if (c == 0x88 || c == 0xa0) { /* W83627EHF / W83627DHG */ - outb_p(0x2d, WDT_EFER); /* select GPIO5 */ - c = inb_p(WDT_EFDR) & ~0x01; /* PIN77 -> WDT0# */ - outb_p(0x2d, WDT_EFER); - outb_p(c, WDT_EFDR); /* set GPIO5 to WDT0 */ - } + return 0; +} - outb_p(0x07, WDT_EFER); /* point to logical device number reg */ - outb_p(0x08, WDT_EFDR); /* select logical device 8 (GPIO2) */ - outb_p(0x30, WDT_EFER); /* select CR30 */ - outb_p(0x01, WDT_EFDR); /* set bit 0 to activate GPIO2 */ +static void superio_select(int ld) +{ + superio_outb(0x07, ld); } -static void w83627hf_unselect_wd_register(void) +static void superio_exit(void) { outb_p(0xAA, WDT_EFER); /* Leave extended function mode */ + release_region(wdt_io, 2); } /* tyan motherboards seem to set F5 to 0x4C ? * So explicitly init to appropriate value. */ -static void w83627hf_init(void) +static int w83627hf_init(struct watchdog_device *wdog) { + int ret; unsigned char t; - w83627hf_select_wd_register(); + ret = superio_enter(); + if (ret) + return ret; + + superio_select(W83627HF_LD_WDT); + t = superio_inb(0x20); /* check chip version */ + if (t == 0x82) { /* W83627THF */ + t = (superio_inb(0x2b) & 0xf7); + superio_outb(0x2b, t | 0x04); /* set GPIO3 to WDT0 */ + } else if (t == 0x88 || t == 0xa0) { /* W83627EHF / W83627DHG */ + t = superio_inb(0x2d); + superio_outb(0x2d, t & ~0x01); /* set GPIO5 to WDT0 */ + } + + /* set CR30 bit 0 to activate GPIO2 */ + t = superio_inb(0x30); + if (!(t & 0x01)) + superio_outb(0x30, t | 0x01); - outb_p(0xF6, WDT_EFER); /* Select CRF6 */ - t = inb_p(WDT_EFDR); /* read CRF6 */ + t = superio_inb(0xF6); if (t != 0) { pr_info("Watchdog already running. Resetting timeout to %d sec\n", - timeout); - outb_p(timeout, WDT_EFDR); /* Write back to CRF6 */ + wdog->timeout); + superio_outb(0xF6, wdog->timeout); } - outb_p(0xF5, WDT_EFER); /* Select CRF5 */ - t = inb_p(WDT_EFDR); /* read CRF5 */ - t &= ~0x0C; /* set second mode & disable keyboard - turning off watchdog */ - t |= 0x02; /* enable the WDTO# output low pulse - to the KBRST# pin (PIN60) */ - outb_p(t, WDT_EFDR); /* Write back to CRF5 */ - - outb_p(0xF7, WDT_EFER); /* Select CRF7 */ - t = inb_p(WDT_EFDR); /* read CRF7 */ - t &= ~0xC0; /* disable keyboard & mouse turning off - watchdog */ - outb_p(t, WDT_EFDR); /* Write back to CRF7 */ - - w83627hf_unselect_wd_register(); -} - -static void wdt_set_time(int timeout) -{ - spin_lock(&io_lock); - - w83627hf_select_wd_register(); + /* set second mode & disable keyboard turning off watchdog */ + t = superio_inb(0xF5) & ~0x0C; + /* enable the WDTO# output low pulse to the KBRST# pin */ + t |= 0x02; + superio_outb(0xF5, t); - outb_p(0xF6, WDT_EFER); /* Select CRF6 */ - outb_p(timeout, WDT_EFDR); /* Write Timeout counter to CRF6 */ + /* disable keyboard & mouse turning off watchdog */ + t = superio_inb(0xF7) & ~0xC0; + superio_outb(0xF7, t); - w83627hf_unselect_wd_register(); + superio_exit(); - spin_unlock(&io_lock); -} - -static int wdt_ping(void) -{ - wdt_set_time(timeout); return 0; } -static int wdt_disable(void) +static int wdt_set_time(unsigned int timeout) { - wdt_set_time(0); - return 0; -} + int ret; + + ret = superio_enter(); + if (ret) + return ret; + + superio_select(W83627HF_LD_WDT); + superio_outb(0xF6, timeout); + superio_exit(); -static int wdt_set_heartbeat(int t) -{ - if (t < 1 || t > 255) - return -EINVAL; - timeout = t; return 0; } -static int wdt_get_time(void) +static int wdt_start(struct watchdog_device *wdog) { - int timeleft; - - spin_lock(&io_lock); - - w83627hf_select_wd_register(); - - outb_p(0xF6, WDT_EFER); /* Select CRF6 */ - timeleft = inb_p(WDT_EFDR); /* Read Timeout counter to CRF6 */ - - w83627hf_unselect_wd_register(); - - spin_unlock(&io_lock); - - return timeleft; + return wdt_set_time(wdog->timeout); } -static ssize_t wdt_write(struct file *file, const char __user *buf, - size_t count, loff_t *ppos) +static int wdt_stop(struct watchdog_device *wdog) { - if (count) { - if (!nowayout) { - size_t i; - - expect_close = 0; - - for (i = 0; i != count; i++) { - char c; - if (get_user(c, buf + i)) - return -EFAULT; - if (c == 'V') - expect_close = 42; - } - } - wdt_ping(); - } - return count; + return wdt_set_time(0); } -static long wdt_ioctl(struct file *file, unsigned int cmd, unsigned long arg) +static int wdt_set_timeout(struct watchdog_device *wdog, unsigned int timeout) { - void __user *argp = (void __user *)arg; - int __user *p = argp; - int timeval; - static const struct watchdog_info ident = { - .options = WDIOF_KEEPALIVEPING | WDIOF_SETTIMEOUT | - WDIOF_MAGICCLOSE, - .firmware_version = 1, - .identity = "W83627HF WDT", - }; - - switch (cmd) { - case WDIOC_GETSUPPORT: - if (copy_to_user(argp, &ident, sizeof(ident))) - return -EFAULT; - break; - case WDIOC_GETSTATUS: - case WDIOC_GETBOOTSTATUS: - return put_user(0, p); - case WDIOC_SETOPTIONS: - { - int options, retval = -EINVAL; - - if (get_user(options, p)) - return -EFAULT; - if (options & WDIOS_DISABLECARD) { - wdt_disable(); - retval = 0; - } - if (options & WDIOS_ENABLECARD) { - wdt_ping(); - retval = 0; - } - return retval; - } - case WDIOC_KEEPALIVE: - wdt_ping(); - break; - case WDIOC_SETTIMEOUT: - if (get_user(timeval, p)) - return -EFAULT; - if (wdt_set_heartbeat(timeval)) - return -EINVAL; - wdt_ping(); - /* Fall */ - case WDIOC_GETTIMEOUT: - return put_user(timeout, p); - case WDIOC_GETTIMELEFT: - timeval = wdt_get_time(); - return put_user(timeval, p); - default: - return -ENOTTY; - } + wdog->timeout = timeout; + return 0; } -static int wdt_open(struct inode *inode, struct file *file) +static unsigned int wdt_get_time(struct watchdog_device *wdog) { - if (test_and_set_bit(0, &wdt_is_open)) - return -EBUSY; - /* - * Activate - */ + unsigned int timeleft; + int ret; - wdt_ping(); - return nonseekable_open(inode, file); -} + ret = superio_enter(); + if (ret) + return 0; -static int wdt_close(struct inode *inode, struct file *file) -{ - if (expect_close == 42) - wdt_disable(); - else { - pr_crit("Unexpected close, not stopping watchdog!\n"); - wdt_ping(); - } - expect_close = 0; - clear_bit(0, &wdt_is_open); - return 0; + superio_select(W83627HF_LD_WDT); + timeleft = superio_inb(0xF6); + superio_exit(); + + return timeleft; } /* * Notifier for system down */ - static int wdt_notify_sys(struct notifier_block *this, unsigned long code, void *unused) { if (code == SYS_DOWN || code == SYS_HALT) - wdt_disable(); /* Turn the WDT off */ + wdt_set_time(0); /* Turn the WDT off */ return NOTIFY_DONE; } @@ -314,19 +219,25 @@ static int wdt_notify_sys(struct notifier_block *this, unsigned long code, * Kernel Interfaces */ -static const struct file_operations wdt_fops = { - .owner = THIS_MODULE, - .llseek = no_llseek, - .write = wdt_write, - .unlocked_ioctl = wdt_ioctl, - .open = wdt_open, - .release = wdt_close, +static struct watchdog_info wdt_info = { + .options = WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING | WDIOF_MAGICCLOSE, + .identity = "W83627HF Watchdog", }; -static struct miscdevice wdt_miscdev = { - .minor = WATCHDOG_MINOR, - .name = "watchdog", - .fops = &wdt_fops, +static struct watchdog_ops wdt_ops = { + .owner = THIS_MODULE, + .start = wdt_start, + .stop = wdt_stop, + .set_timeout = wdt_set_timeout, + .get_timeleft = wdt_get_time, +}; + +static struct watchdog_device wdt_dev = { + .info = &wdt_info, + .ops = &wdt_ops, + .timeout = WATCHDOG_TIMEOUT, + .min_timeout = 1, + .max_timeout = 255, }; /* @@ -344,50 +255,39 @@ static int __init wdt_init(void) pr_info("WDT driver for the Winbond(TM) W83627HF/THF/HG/DHG Super I/O chip initialising\n"); - if (wdt_set_heartbeat(timeout)) { - wdt_set_heartbeat(WATCHDOG_TIMEOUT); - pr_info("timeout value must be 1 <= timeout <= 255, using %d\n", - WATCHDOG_TIMEOUT); - } + watchdog_init_timeout(&wdt_dev, timeout, NULL); + watchdog_set_nowayout(&wdt_dev, nowayout); - if (!request_region(wdt_io, 1, WATCHDOG_NAME)) { - pr_err("I/O address 0x%04x already in use\n", wdt_io); - ret = -EIO; - goto out; + ret = w83627hf_init(&wdt_dev); + if (ret) { + pr_err("failed to initialize watchdog (err=%d)\n", ret); + return ret; } - w83627hf_init(); - ret = register_reboot_notifier(&wdt_notifier); if (ret != 0) { pr_err("cannot register reboot notifier (err=%d)\n", ret); - goto unreg_regions; + return ret; } - ret = misc_register(&wdt_miscdev); - if (ret != 0) { - pr_err("cannot register miscdev on minor=%d (err=%d)\n", - WATCHDOG_MINOR, ret); + ret = watchdog_register_device(&wdt_dev); + if (ret) goto unreg_reboot; - } pr_info("initialized. timeout=%d sec (nowayout=%d)\n", - timeout, nowayout); + wdt_dev.timeout, nowayout); -out: return ret; + unreg_reboot: unregister_reboot_notifier(&wdt_notifier); -unreg_regions: - release_region(wdt_io, 1); - goto out; + return ret; } static void __exit wdt_exit(void) { - misc_deregister(&wdt_miscdev); + watchdog_unregister_device(&wdt_dev); unregister_reboot_notifier(&wdt_notifier); - release_region(wdt_io, 1); } module_init(wdt_init); @@ -396,4 +296,3 @@ module_exit(wdt_exit); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Pádraig Brady <P@draigBrady.com>"); MODULE_DESCRIPTION("w83627hf/thf WDT driver"); -MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR); diff --git a/drivers/watchdog/w83697hf_wdt.c b/drivers/watchdog/w83697hf_wdt.c index cd9f3c1e1af..aaf2995d37f 100644 --- a/drivers/watchdog/w83697hf_wdt.c +++ b/drivers/watchdog/w83697hf_wdt.c @@ -458,4 +458,3 @@ MODULE_LICENSE("GPL"); MODULE_AUTHOR("Marcus Junker <junker@anduras.de>, " "Samuel Tardieu <sam@rfc1149.net>"); MODULE_DESCRIPTION("w83697hf/hg WDT driver"); -MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR); diff --git a/drivers/watchdog/w83697ug_wdt.c b/drivers/watchdog/w83697ug_wdt.c index 274be0bfaf2..ff58cb74671 100644 --- a/drivers/watchdog/w83697ug_wdt.c +++ b/drivers/watchdog/w83697ug_wdt.c @@ -395,4 +395,3 @@ module_exit(wdt_exit); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Flemming Frandsen <ff@nrvissing.net>"); MODULE_DESCRIPTION("w83697ug/uf WDT driver"); -MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR); diff --git a/drivers/watchdog/w83877f_wdt.c b/drivers/watchdog/w83877f_wdt.c index 7874ae06232..f0483c75ed3 100644 --- a/drivers/watchdog/w83877f_wdt.c +++ b/drivers/watchdog/w83877f_wdt.c @@ -406,4 +406,3 @@ module_exit(w83877f_wdt_unload); MODULE_AUTHOR("Scott and Bill Jennings"); MODULE_DESCRIPTION("Driver for watchdog timer in w83877f chip"); MODULE_LICENSE("GPL"); -MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR); diff --git a/drivers/watchdog/w83977f_wdt.c b/drivers/watchdog/w83977f_wdt.c index 5d2c902825c..91bf55a2002 100644 --- a/drivers/watchdog/w83977f_wdt.c +++ b/drivers/watchdog/w83977f_wdt.c @@ -527,4 +527,3 @@ module_exit(w83977f_wdt_exit); MODULE_AUTHOR("Jose Goncalves <jose.goncalves@inov.pt>"); MODULE_DESCRIPTION("Driver for watchdog timer in W83977F I/O chip"); MODULE_LICENSE("GPL"); -MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR); diff --git a/drivers/watchdog/wafer5823wdt.c b/drivers/watchdog/wafer5823wdt.c index 25aba6e00a2..db0da7ea4fd 100644 --- a/drivers/watchdog/wafer5823wdt.c +++ b/drivers/watchdog/wafer5823wdt.c @@ -322,6 +322,5 @@ module_exit(wafwdt_exit); MODULE_AUTHOR("Justin Cormack"); MODULE_DESCRIPTION("ICP Wafer 5823 Single Board Computer WDT driver"); MODULE_LICENSE("GPL"); -MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR); /* end of wafer5823wdt.c */ diff --git a/drivers/watchdog/watchdog_core.c b/drivers/watchdog/watchdog_core.c index 05d18b4c661..461336c4519 100644 --- a/drivers/watchdog/watchdog_core.c +++ b/drivers/watchdog/watchdog_core.c @@ -77,7 +77,7 @@ int watchdog_init_timeout(struct watchdog_device *wdd, watchdog_check_min_max_timeout(wdd); - /* try to get the tiemout module parameter first */ + /* try to get the timeout module parameter first */ if (!watchdog_timeout_invalid(wdd, timeout_parm)) { wdd->timeout = timeout_parm; return ret; diff --git a/drivers/watchdog/wdrtas.c b/drivers/watchdog/wdrtas.c index 3045debd541..0240c60d14e 100644 --- a/drivers/watchdog/wdrtas.c +++ b/drivers/watchdog/wdrtas.c @@ -48,8 +48,6 @@ MODULE_AUTHOR("Utz Bacher <utz.bacher@de.ibm.com>"); MODULE_DESCRIPTION("RTAS watchdog driver"); MODULE_LICENSE("GPL"); -MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR); -MODULE_ALIAS_MISCDEV(TEMP_MINOR); static bool wdrtas_nowayout = WATCHDOG_NOWAYOUT; static atomic_t wdrtas_miscdev_open = ATOMIC_INIT(0); diff --git a/drivers/watchdog/wdt.c b/drivers/watchdog/wdt.c index ee4333c0110..e0206b5b7d8 100644 --- a/drivers/watchdog/wdt.c +++ b/drivers/watchdog/wdt.c @@ -664,6 +664,4 @@ module_exit(wdt_exit); MODULE_AUTHOR("Alan Cox"); MODULE_DESCRIPTION("Driver for ISA ICS watchdog cards (WDT500/501)"); -MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR); -MODULE_ALIAS_MISCDEV(TEMP_MINOR); MODULE_LICENSE("GPL"); diff --git a/drivers/watchdog/wdt285.c b/drivers/watchdog/wdt285.c index 5eec7405388..7355ddd0b20 100644 --- a/drivers/watchdog/wdt285.c +++ b/drivers/watchdog/wdt285.c @@ -224,7 +224,6 @@ static void __exit footbridge_watchdog_exit(void) MODULE_AUTHOR("Phil Blundell <pb@nexus.co.uk>"); MODULE_DESCRIPTION("Footbridge watchdog driver"); MODULE_LICENSE("GPL"); -MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR); module_param(soft_margin, int, 0); MODULE_PARM_DESC(soft_margin, "Watchdog timeout in seconds"); diff --git a/drivers/watchdog/wdt977.c b/drivers/watchdog/wdt977.c index 65a40234493..a8e6f87f60c 100644 --- a/drivers/watchdog/wdt977.c +++ b/drivers/watchdog/wdt977.c @@ -507,4 +507,3 @@ module_exit(wd977_exit); MODULE_AUTHOR("Woody Suwalski <woodys@xandros.com>"); MODULE_DESCRIPTION("W83977AF Watchdog driver"); MODULE_LICENSE("GPL"); -MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR); diff --git a/drivers/watchdog/wdt_pci.c b/drivers/watchdog/wdt_pci.c index 36a54c0e32d..ee89ba4dea6 100644 --- a/drivers/watchdog/wdt_pci.c +++ b/drivers/watchdog/wdt_pci.c @@ -744,5 +744,3 @@ module_pci_driver(wdtpci_driver); MODULE_AUTHOR("JP Nollmann, Alan Cox"); MODULE_DESCRIPTION("Driver for the ICS PCI-WDT500/501 watchdog cards"); MODULE_LICENSE("GPL"); -MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR); -MODULE_ALIAS_MISCDEV(TEMP_MINOR); diff --git a/drivers/watchdog/wm831x_wdt.c b/drivers/watchdog/wm831x_wdt.c index d4e47eda418..e243bd01c77 100644 --- a/drivers/watchdog/wm831x_wdt.c +++ b/drivers/watchdog/wm831x_wdt.c @@ -184,7 +184,7 @@ static const struct watchdog_ops wm831x_wdt_ops = { static int wm831x_wdt_probe(struct platform_device *pdev) { struct wm831x *wm831x = dev_get_drvdata(pdev->dev.parent); - struct wm831x_pdata *chip_pdata; + struct wm831x_pdata *chip_pdata = dev_get_platdata(pdev->dev.parent); struct wm831x_watchdog_pdata *pdata; struct wm831x_wdt_drvdata *driver_data; struct watchdog_device *wm831x_wdt; @@ -231,12 +231,10 @@ static int wm831x_wdt_probe(struct platform_device *pdev) wm831x_wdt->timeout = wm831x_wdt_cfgs[i].time; /* Apply any configuration */ - if (pdev->dev.parent->platform_data) { - chip_pdata = pdev->dev.parent->platform_data; + if (chip_pdata) pdata = chip_pdata->watchdog; - } else { + else pdata = NULL; - } if (pdata) { reg &= ~(WM831X_WDOG_SECACT_MASK | WM831X_WDOG_PRIMACT_MASK | diff --git a/drivers/watchdog/xen_wdt.c b/drivers/watchdog/xen_wdt.c index 92ad33d0cb7..7a42dffd39e 100644 --- a/drivers/watchdog/xen_wdt.c +++ b/drivers/watchdog/xen_wdt.c @@ -362,4 +362,3 @@ MODULE_AUTHOR("Jan Beulich <jbeulich@novell.com>"); MODULE_DESCRIPTION("Xen WatchDog Timer Driver"); MODULE_VERSION(DRV_VERSION); MODULE_LICENSE("GPL"); -MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR); diff --git a/drivers/xen/pci.c b/drivers/xen/pci.c index d15f6e80479..188825122aa 100644 --- a/drivers/xen/pci.c +++ b/drivers/xen/pci.c @@ -59,12 +59,12 @@ static int xen_add_device(struct device *dev) add.flags = XEN_PCI_DEV_EXTFN; #ifdef CONFIG_ACPI - handle = DEVICE_ACPI_HANDLE(&pci_dev->dev); + handle = ACPI_HANDLE(&pci_dev->dev); if (!handle && pci_dev->bus->bridge) - handle = DEVICE_ACPI_HANDLE(pci_dev->bus->bridge); + handle = ACPI_HANDLE(pci_dev->bus->bridge); #ifdef CONFIG_PCI_IOV if (!handle && pci_dev->is_virtfn) - handle = DEVICE_ACPI_HANDLE(physfn->bus->bridge); + handle = ACPI_HANDLE(physfn->bus->bridge); #endif if (handle) { acpi_status status; diff --git a/fs/9p/vfs_dentry.c b/fs/9p/vfs_dentry.c index f039b104a98..b03dd23feda 100644 --- a/fs/9p/vfs_dentry.c +++ b/fs/9p/vfs_dentry.c @@ -43,23 +43,6 @@ #include "fid.h" /** - * v9fs_dentry_delete - called when dentry refcount equals 0 - * @dentry: dentry in question - * - * By returning 1 here we should remove cacheing of unused - * dentry components. - * - */ - -static int v9fs_dentry_delete(const struct dentry *dentry) -{ - p9_debug(P9_DEBUG_VFS, " dentry: %s (%p)\n", - dentry->d_name.name, dentry); - - return 1; -} - -/** * v9fs_cached_dentry_delete - called when dentry refcount equals 0 * @dentry: dentry in question * @@ -134,6 +117,6 @@ const struct dentry_operations v9fs_cached_dentry_operations = { }; const struct dentry_operations v9fs_dentry_operations = { - .d_delete = v9fs_dentry_delete, + .d_delete = always_delete_dentry, .d_release = v9fs_dentry_release, }; @@ -601,7 +601,7 @@ EXPORT_SYMBOL(bio_get_nr_vecs); static int __bio_add_page(struct request_queue *q, struct bio *bio, struct page *page, unsigned int len, unsigned int offset, - unsigned short max_sectors) + unsigned int max_sectors) { int retried_segments = 0; struct bio_vec *bvec; diff --git a/fs/btrfs/Kconfig b/fs/btrfs/Kconfig index f9d5094e102..aa976eced2d 100644 --- a/fs/btrfs/Kconfig +++ b/fs/btrfs/Kconfig @@ -9,12 +9,17 @@ config BTRFS_FS select XOR_BLOCKS help - Btrfs is a new filesystem with extents, writable snapshotting, - support for multiple devices and many more features. + Btrfs is a general purpose copy-on-write filesystem with extents, + writable snapshotting, support for multiple devices and many more + features focused on fault tolerance, repair and easy administration. - Btrfs is highly experimental, and THE DISK FORMAT IS NOT YET - FINALIZED. You should say N here unless you are interested in - testing Btrfs with non-critical data. + The filesystem disk format is no longer unstable, and it's not + expected to change unless there are strong reasons to do so. If there + is a format change, file systems with a unchanged format will + continue to be mountable and usable by newer kernels. + + For more information, please see the web pages at + http://btrfs.wiki.kernel.org. To compile this file system support as a module, choose M here. The module will be called btrfs. diff --git a/fs/btrfs/async-thread.c b/fs/btrfs/async-thread.c index 8aec751fa46..c1e0b0caf9c 100644 --- a/fs/btrfs/async-thread.c +++ b/fs/btrfs/async-thread.c @@ -495,6 +495,7 @@ static int __btrfs_start_workers(struct btrfs_workers *workers) spin_lock_irq(&workers->lock); if (workers->stopping) { spin_unlock_irq(&workers->lock); + ret = -EINVAL; goto fail_kthread; } list_add_tail(&worker->worker_list, &workers->idle_list); diff --git a/fs/btrfs/check-integrity.c b/fs/btrfs/check-integrity.c index e0aab445697..b50764bef14 100644 --- a/fs/btrfs/check-integrity.c +++ b/fs/btrfs/check-integrity.c @@ -77,6 +77,15 @@ * the integrity of (super)-block write requests, do not * enable the config option BTRFS_FS_CHECK_INTEGRITY to * include and compile the integrity check tool. + * + * Expect millions of lines of information in the kernel log with an + * enabled check_int_print_mask. Therefore set LOG_BUF_SHIFT in the + * kernel config to at least 26 (which is 64MB). Usually the value is + * limited to 21 (which is 2MB) in init/Kconfig. The file needs to be + * changed like this before LOG_BUF_SHIFT can be set to a high value: + * config LOG_BUF_SHIFT + * int "Kernel log buffer size (16 => 64KB, 17 => 128KB)" + * range 12 30 */ #include <linux/sched.h> @@ -124,6 +133,7 @@ #define BTRFSIC_PRINT_MASK_INITIAL_DATABASE 0x00000400 #define BTRFSIC_PRINT_MASK_NUM_COPIES 0x00000800 #define BTRFSIC_PRINT_MASK_TREE_WITH_ALL_MIRRORS 0x00001000 +#define BTRFSIC_PRINT_MASK_SUBMIT_BIO_BH_VERBOSE 0x00002000 struct btrfsic_dev_state; struct btrfsic_state; @@ -3015,6 +3025,7 @@ void btrfsic_submit_bio(int rw, struct bio *bio) (rw & WRITE) && NULL != bio->bi_io_vec) { unsigned int i; u64 dev_bytenr; + u64 cur_bytenr; int bio_is_patched; char **mapped_datav; @@ -3033,6 +3044,7 @@ void btrfsic_submit_bio(int rw, struct bio *bio) GFP_NOFS); if (!mapped_datav) goto leave; + cur_bytenr = dev_bytenr; for (i = 0; i < bio->bi_vcnt; i++) { BUG_ON(bio->bi_io_vec[i].bv_len != PAGE_CACHE_SIZE); mapped_datav[i] = kmap(bio->bi_io_vec[i].bv_page); @@ -3044,16 +3056,13 @@ void btrfsic_submit_bio(int rw, struct bio *bio) kfree(mapped_datav); goto leave; } - if ((BTRFSIC_PRINT_MASK_SUBMIT_BIO_BH | - BTRFSIC_PRINT_MASK_VERBOSE) == - (dev_state->state->print_mask & - (BTRFSIC_PRINT_MASK_SUBMIT_BIO_BH | - BTRFSIC_PRINT_MASK_VERBOSE))) + if (dev_state->state->print_mask & + BTRFSIC_PRINT_MASK_SUBMIT_BIO_BH_VERBOSE) printk(KERN_INFO - "#%u: page=%p, len=%u, offset=%u\n", - i, bio->bi_io_vec[i].bv_page, - bio->bi_io_vec[i].bv_len, + "#%u: bytenr=%llu, len=%u, offset=%u\n", + i, cur_bytenr, bio->bi_io_vec[i].bv_len, bio->bi_io_vec[i].bv_offset); + cur_bytenr += bio->bi_io_vec[i].bv_len; } btrfsic_process_written_block(dev_state, dev_bytenr, mapped_datav, bio->bi_vcnt, diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index f9aeb2759a6..54ab86127f7 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h @@ -3613,9 +3613,6 @@ int btrfs_csum_file_blocks(struct btrfs_trans_handle *trans, struct btrfs_ordered_sum *sums); int btrfs_csum_one_bio(struct btrfs_root *root, struct inode *inode, struct bio *bio, u64 file_start, int contig); -int btrfs_csum_truncate(struct btrfs_trans_handle *trans, - struct btrfs_root *root, struct btrfs_path *path, - u64 isize); int btrfs_lookup_csums_range(struct btrfs_root *root, u64 start, u64 end, struct list_head *list, int search_commit); /* inode.c */ @@ -3744,9 +3741,6 @@ void btrfs_cleanup_defrag_inodes(struct btrfs_fs_info *fs_info); int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync); void btrfs_drop_extent_cache(struct inode *inode, u64 start, u64 end, int skip_pinned); -int btrfs_replace_extent_cache(struct inode *inode, struct extent_map *replace, - u64 start, u64 end, int skip_pinned, - int modified); extern const struct file_operations btrfs_file_operations; int __btrfs_drop_extents(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct inode *inode, diff --git a/fs/btrfs/dev-replace.c b/fs/btrfs/dev-replace.c index 342f9fd411e..2cfc3dfff64 100644 --- a/fs/btrfs/dev-replace.c +++ b/fs/btrfs/dev-replace.c @@ -366,7 +366,7 @@ int btrfs_dev_replace_start(struct btrfs_root *root, dev_replace->tgtdev = tgt_device; printk_in_rcu(KERN_INFO - "btrfs: dev_replace from %s (devid %llu) to %s) started\n", + "btrfs: dev_replace from %s (devid %llu) to %s started\n", src_device->missing ? "<missing disk>" : rcu_str_deref(src_device->name), src_device->devid, diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index 4c4ed0bb3da..8072cfa8a3b 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c @@ -3517,7 +3517,6 @@ int btrfs_cleanup_fs_roots(struct btrfs_fs_info *fs_info) int btrfs_commit_super(struct btrfs_root *root) { struct btrfs_trans_handle *trans; - int ret; mutex_lock(&root->fs_info->cleaner_mutex); btrfs_run_delayed_iputs(root); @@ -3531,25 +3530,7 @@ int btrfs_commit_super(struct btrfs_root *root) trans = btrfs_join_transaction(root); if (IS_ERR(trans)) return PTR_ERR(trans); - ret = btrfs_commit_transaction(trans, root); - if (ret) - return ret; - /* run commit again to drop the original snapshot */ - trans = btrfs_join_transaction(root); - if (IS_ERR(trans)) - return PTR_ERR(trans); - ret = btrfs_commit_transaction(trans, root); - if (ret) - return ret; - ret = btrfs_write_and_wait_transaction(NULL, root); - if (ret) { - btrfs_error(root->fs_info, ret, - "Failed to sync btree inode to disk."); - return ret; - } - - ret = write_ctree_super(NULL, root, 0); - return ret; + return btrfs_commit_transaction(trans, root); } int close_ctree(struct btrfs_root *root) diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index 856bc2b2192..8e457fca0a0 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c @@ -1980,6 +1980,7 @@ int repair_io_failure(struct btrfs_fs_info *fs_info, u64 start, struct btrfs_mapping_tree *map_tree = &fs_info->mapping_tree; int ret; + ASSERT(!(fs_info->sb->s_flags & MS_RDONLY)); BUG_ON(!mirror_num); /* we can't repair anything in raid56 yet */ @@ -2036,6 +2037,9 @@ int repair_eb_io_failure(struct btrfs_root *root, struct extent_buffer *eb, unsigned long i, num_pages = num_extent_pages(eb->start, eb->len); int ret = 0; + if (root->fs_info->sb->s_flags & MS_RDONLY) + return -EROFS; + for (i = 0; i < num_pages; i++) { struct page *p = extent_buffer_page(eb, i); ret = repair_io_failure(root->fs_info, start, PAGE_CACHE_SIZE, @@ -2057,12 +2061,12 @@ static int clean_io_failure(u64 start, struct page *page) u64 private; u64 private_failure; struct io_failure_record *failrec; - struct btrfs_fs_info *fs_info; + struct inode *inode = page->mapping->host; + struct btrfs_fs_info *fs_info = BTRFS_I(inode)->root->fs_info; struct extent_state *state; int num_copies; int did_repair = 0; int ret; - struct inode *inode = page->mapping->host; private = 0; ret = count_range_bits(&BTRFS_I(inode)->io_failure_tree, &private, @@ -2085,6 +2089,8 @@ static int clean_io_failure(u64 start, struct page *page) did_repair = 1; goto out; } + if (fs_info->sb->s_flags & MS_RDONLY) + goto out; spin_lock(&BTRFS_I(inode)->io_tree.lock); state = find_first_extent_bit_state(&BTRFS_I(inode)->io_tree, @@ -2094,7 +2100,6 @@ static int clean_io_failure(u64 start, struct page *page) if (state && state->start <= failrec->start && state->end >= failrec->start + failrec->len - 1) { - fs_info = BTRFS_I(inode)->root->fs_info; num_copies = btrfs_num_copies(fs_info, failrec->logical, failrec->len); if (num_copies > 1) { diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index da8d2f696ac..f1a77449d03 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -2129,7 +2129,8 @@ static noinline bool record_extent_backrefs(struct btrfs_path *path, old->extent_offset, fs_info, path, record_one_backref, old); - BUG_ON(ret < 0 && ret != -ENOENT); + if (ret < 0 && ret != -ENOENT) + return false; /* no backref to be processed for this extent */ if (!old->count) { @@ -6186,8 +6187,7 @@ insert: write_unlock(&em_tree->lock); out: - if (em) - trace_btrfs_get_extent(root, em); + trace_btrfs_get_extent(root, em); if (path) btrfs_free_path(path); diff --git a/fs/btrfs/ordered-data.c b/fs/btrfs/ordered-data.c index 25a8f3812f1..69582d5b69d 100644 --- a/fs/btrfs/ordered-data.c +++ b/fs/btrfs/ordered-data.c @@ -638,6 +638,7 @@ void btrfs_wait_ordered_roots(struct btrfs_fs_info *fs_info, int nr) WARN_ON(nr < 0); } } + list_splice_tail(&splice, &fs_info->ordered_roots); spin_unlock(&fs_info->ordered_root_lock); } @@ -803,7 +804,7 @@ int btrfs_wait_ordered_range(struct inode *inode, u64 start, u64 len) btrfs_put_ordered_extent(ordered); break; } - if (ordered->file_offset + ordered->len < start) { + if (ordered->file_offset + ordered->len <= start) { btrfs_put_ordered_extent(ordered); break; } diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c index 2544805544f..561e2f16ba3 100644 --- a/fs/btrfs/scrub.c +++ b/fs/btrfs/scrub.c @@ -938,8 +938,10 @@ static int scrub_handle_errored_block(struct scrub_block *sblock_to_check) BTRFS_DEV_STAT_CORRUPTION_ERRS); } - if (sctx->readonly && !sctx->is_dev_replace) - goto did_not_correct_error; + if (sctx->readonly) { + ASSERT(!sctx->is_dev_replace); + goto out; + } if (!is_metadata && !have_csum) { struct scrub_fixup_nodatasum *fixup_nodatasum; diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c index 57c16b46afb..c6a872a8a46 100644 --- a/fs/btrfs/transaction.c +++ b/fs/btrfs/transaction.c @@ -1480,7 +1480,7 @@ static void do_async_commit(struct work_struct *work) * We've got freeze protection passed with the transaction. * Tell lockdep about it. */ - if (ac->newtrans->type < TRANS_JOIN_NOLOCK) + if (ac->newtrans->type & __TRANS_FREEZABLE) rwsem_acquire_read( &ac->root->fs_info->sb->s_writers.lock_map[SB_FREEZE_FS-1], 0, 1, _THIS_IP_); @@ -1521,7 +1521,7 @@ int btrfs_commit_transaction_async(struct btrfs_trans_handle *trans, * Tell lockdep we've released the freeze rwsem, since the * async commit thread will be the one to unlock it. */ - if (trans->type < TRANS_JOIN_NOLOCK) + if (ac->newtrans->type & __TRANS_FREEZABLE) rwsem_release( &root->fs_info->sb->s_writers.lock_map[SB_FREEZE_FS-1], 1, _THIS_IP_); diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c index 744553c83fe..9f7fc51ca33 100644 --- a/fs/btrfs/tree-log.c +++ b/fs/btrfs/tree-log.c @@ -3697,7 +3697,8 @@ static int btrfs_log_inode(struct btrfs_trans_handle *trans, ret = btrfs_truncate_inode_items(trans, log, inode, 0, 0); } else if (test_and_clear_bit(BTRFS_INODE_COPY_EVERYTHING, - &BTRFS_I(inode)->runtime_flags)) { + &BTRFS_I(inode)->runtime_flags) || + inode_only == LOG_INODE_EXISTS) { if (inode_only == LOG_INODE_ALL) fast_search = true; max_key.type = BTRFS_XATTR_ITEM_KEY; @@ -3801,7 +3802,7 @@ log_extents: err = ret; goto out_unlock; } - } else { + } else if (inode_only == LOG_INODE_ALL) { struct extent_map_tree *tree = &BTRFS_I(inode)->extent_tree; struct extent_map *em, *n; diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c index 0db63709786..92303f42baa 100644 --- a/fs/btrfs/volumes.c +++ b/fs/btrfs/volumes.c @@ -5394,7 +5394,7 @@ static int bio_size_ok(struct block_device *bdev, struct bio *bio, { struct bio_vec *prev; struct request_queue *q = bdev_get_queue(bdev); - unsigned short max_sectors = queue_max_sectors(q); + unsigned int max_sectors = queue_max_sectors(q); struct bvec_merge_data bvm = { .bi_bdev = bdev, .bi_sector = sector, diff --git a/fs/configfs/dir.c b/fs/configfs/dir.c index 277bd1be21f..e081acbac2e 100644 --- a/fs/configfs/dir.c +++ b/fs/configfs/dir.c @@ -56,29 +56,28 @@ static void configfs_d_iput(struct dentry * dentry, struct configfs_dirent *sd = dentry->d_fsdata; if (sd) { - BUG_ON(sd->s_dentry != dentry); /* Coordinate with configfs_readdir */ spin_lock(&configfs_dirent_lock); - sd->s_dentry = NULL; + /* Coordinate with configfs_attach_attr where will increase + * sd->s_count and update sd->s_dentry to new allocated one. + * Only set sd->dentry to null when this dentry is the only + * sd owner. + * If not do so, configfs_d_iput may run just after + * configfs_attach_attr and set sd->s_dentry to null + * even it's still in use. + */ + if (atomic_read(&sd->s_count) <= 2) + sd->s_dentry = NULL; + spin_unlock(&configfs_dirent_lock); configfs_put(sd); } iput(inode); } -/* - * We _must_ delete our dentries on last dput, as the chain-to-parent - * behavior is required to clear the parents of default_groups. - */ -static int configfs_d_delete(const struct dentry *dentry) -{ - return 1; -} - const struct dentry_operations configfs_dentry_ops = { .d_iput = configfs_d_iput, - /* simple_delete_dentry() isn't exported */ - .d_delete = configfs_d_delete, + .d_delete = always_delete_dentry, }; #ifdef CONFIG_LOCKDEP @@ -426,8 +425,11 @@ static int configfs_attach_attr(struct configfs_dirent * sd, struct dentry * den struct configfs_attribute * attr = sd->s_element; int error; + spin_lock(&configfs_dirent_lock); dentry->d_fsdata = configfs_get(sd); sd->s_dentry = dentry; + spin_unlock(&configfs_dirent_lock); + error = configfs_create(dentry, (attr->ca_mode & S_IALLUGO) | S_IFREG, configfs_init_file); if (error) { diff --git a/fs/coredump.c b/fs/coredump.c index 62406b6959b..bc3fbcd3255 100644 --- a/fs/coredump.c +++ b/fs/coredump.c @@ -695,7 +695,7 @@ int dump_emit(struct coredump_params *cprm, const void *addr, int nr) while (nr) { if (dump_interrupted()) return 0; - n = vfs_write(file, addr, nr, &pos); + n = __kernel_write(file, addr, nr, &pos); if (n <= 0) return 0; file->f_pos = pos; @@ -733,7 +733,7 @@ int dump_align(struct coredump_params *cprm, int align) { unsigned mod = cprm->written & (align - 1); if (align & (align - 1)) - return -EINVAL; - return mod ? dump_skip(cprm, align - mod) : 0; + return 0; + return mod ? dump_skip(cprm, align - mod) : 1; } EXPORT_SYMBOL(dump_align); diff --git a/fs/dcache.c b/fs/dcache.c index 0a38ef8d7f0..4bdb300b16e 100644 --- a/fs/dcache.c +++ b/fs/dcache.c @@ -88,35 +88,6 @@ EXPORT_SYMBOL(rename_lock); static struct kmem_cache *dentry_cache __read_mostly; -/** - * read_seqbegin_or_lock - begin a sequence number check or locking block - * @lock: sequence lock - * @seq : sequence number to be checked - * - * First try it once optimistically without taking the lock. If that fails, - * take the lock. The sequence number is also used as a marker for deciding - * whether to be a reader (even) or writer (odd). - * N.B. seq must be initialized to an even number to begin with. - */ -static inline void read_seqbegin_or_lock(seqlock_t *lock, int *seq) -{ - if (!(*seq & 1)) /* Even */ - *seq = read_seqbegin(lock); - else /* Odd */ - read_seqlock_excl(lock); -} - -static inline int need_seqretry(seqlock_t *lock, int seq) -{ - return !(seq & 1) && read_seqretry(lock, seq); -} - -static inline void done_seqretry(seqlock_t *lock, int seq) -{ - if (seq & 1) - read_sequnlock_excl(lock); -} - /* * This is the single most critical data structure when it comes * to the dcache: the hashtable for lookups. Somebody should try @@ -125,8 +96,6 @@ static inline void done_seqretry(seqlock_t *lock, int seq) * This hash-function tries to avoid losing too many bits of hash * information, yet avoid using a prime hash-size or similar. */ -#define D_HASHBITS d_hash_shift -#define D_HASHMASK d_hash_mask static unsigned int d_hash_mask __read_mostly; static unsigned int d_hash_shift __read_mostly; @@ -137,8 +106,8 @@ static inline struct hlist_bl_head *d_hash(const struct dentry *parent, unsigned int hash) { hash += (unsigned long) parent / L1_CACHE_BYTES; - hash = hash + (hash >> D_HASHBITS); - return dentry_hashtable + (hash & D_HASHMASK); + hash = hash + (hash >> d_hash_shift); + return dentry_hashtable + (hash & d_hash_mask); } /* Statistics gathering. */ @@ -469,7 +438,7 @@ static struct dentry *d_kill(struct dentry *dentry, struct dentry *parent) { list_del(&dentry->d_u.d_child); /* - * Inform try_to_ascend() that we are no longer attached to the + * Inform d_walk() that we are no longer attached to the * dentry tree */ dentry->d_flags |= DCACHE_DENTRY_KILLED; @@ -1069,34 +1038,6 @@ void shrink_dcache_sb(struct super_block *sb) } EXPORT_SYMBOL(shrink_dcache_sb); -/* - * This tries to ascend one level of parenthood, but - * we can race with renaming, so we need to re-check - * the parenthood after dropping the lock and check - * that the sequence number still matches. - */ -static struct dentry *try_to_ascend(struct dentry *old, unsigned seq) -{ - struct dentry *new = old->d_parent; - - rcu_read_lock(); - spin_unlock(&old->d_lock); - spin_lock(&new->d_lock); - - /* - * might go back up the wrong parent if we have had a rename - * or deletion - */ - if (new != old->d_parent || - (old->d_flags & DCACHE_DENTRY_KILLED) || - need_seqretry(&rename_lock, seq)) { - spin_unlock(&new->d_lock); - new = NULL; - } - rcu_read_unlock(); - return new; -} - /** * enum d_walk_ret - action to talke during tree walk * @D_WALK_CONTINUE: contrinue walk @@ -1185,9 +1126,24 @@ resume: */ if (this_parent != parent) { struct dentry *child = this_parent; - this_parent = try_to_ascend(this_parent, seq); - if (!this_parent) + this_parent = child->d_parent; + + rcu_read_lock(); + spin_unlock(&child->d_lock); + spin_lock(&this_parent->d_lock); + + /* + * might go back up the wrong parent if we have had a rename + * or deletion + */ + if (this_parent != child->d_parent || + (child->d_flags & DCACHE_DENTRY_KILLED) || + need_seqretry(&rename_lock, seq)) { + spin_unlock(&this_parent->d_lock); + rcu_read_unlock(); goto rename_retry; + } + rcu_read_unlock(); next = child->d_u.d_child.next; goto resume; } diff --git a/fs/dlm/netlink.c b/fs/dlm/netlink.c index 60a327863b1..e7cfbaf8d0e 100644 --- a/fs/dlm/netlink.c +++ b/fs/dlm/netlink.c @@ -74,14 +74,16 @@ static int user_cmd(struct sk_buff *skb, struct genl_info *info) return 0; } -static struct genl_ops dlm_nl_ops = { - .cmd = DLM_CMD_HELLO, - .doit = user_cmd, +static struct genl_ops dlm_nl_ops[] = { + { + .cmd = DLM_CMD_HELLO, + .doit = user_cmd, + }, }; int __init dlm_netlink_init(void) { - return genl_register_family_with_ops(&family, &dlm_nl_ops, 1); + return genl_register_family_with_ops(&family, dlm_nl_ops); } void dlm_netlink_exit(void) diff --git a/fs/efivarfs/super.c b/fs/efivarfs/super.c index a8766b880c0..becc725a195 100644 --- a/fs/efivarfs/super.c +++ b/fs/efivarfs/super.c @@ -83,19 +83,10 @@ static int efivarfs_d_hash(const struct dentry *dentry, struct qstr *qstr) return 0; } -/* - * Retaining negative dentries for an in-memory filesystem just wastes - * memory and lookup time: arrange for them to be deleted immediately. - */ -static int efivarfs_delete_dentry(const struct dentry *dentry) -{ - return 1; -} - static struct dentry_operations efivarfs_d_ops = { .d_compare = efivarfs_d_compare, .d_hash = efivarfs_d_hash, - .d_delete = efivarfs_delete_dentry, + .d_delete = always_delete_dentry, }; static struct dentry *efivarfs_alloc_dentry(struct dentry *parent, char *name) diff --git a/fs/exec.c b/fs/exec.c index 977319fd77f..7ea097f6b34 100644 --- a/fs/exec.c +++ b/fs/exec.c @@ -1380,10 +1380,6 @@ int search_binary_handler(struct linux_binprm *bprm) if (retval) return retval; - retval = audit_bprm(bprm); - if (retval) - return retval; - retval = -ENOENT; retry: read_lock(&binfmt_lock); @@ -1431,6 +1427,7 @@ static int exec_binprm(struct linux_binprm *bprm) ret = search_binary_handler(bprm); if (ret >= 0) { + audit_bprm(bprm); trace_sched_process_exec(current, old_pid, bprm); ptrace_event(PTRACE_EVENT_EXEC, old_vpid); current->did_exec = 1; diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c index e66a8009aff..c8420f7e4db 100644 --- a/fs/gfs2/glock.c +++ b/fs/gfs2/glock.c @@ -1899,7 +1899,8 @@ static int gfs2_glock_iter_next(struct gfs2_glock_iter *gi) gi->nhash = 0; } /* Skip entries for other sb and dead entries */ - } while (gi->sdp != gi->gl->gl_sbd || __lockref_is_dead(&gl->gl_lockref)); + } while (gi->sdp != gi->gl->gl_sbd || + __lockref_is_dead(&gi->gl->gl_lockref)); return 0; } diff --git a/fs/gfs2/inode.c b/fs/gfs2/inode.c index 1615df16cf4..7119504159f 100644 --- a/fs/gfs2/inode.c +++ b/fs/gfs2/inode.c @@ -1171,8 +1171,11 @@ static int gfs2_atomic_open(struct inode *dir, struct dentry *dentry, if (d != NULL) dentry = d; if (dentry->d_inode) { - if (!(*opened & FILE_OPENED)) + if (!(*opened & FILE_OPENED)) { + if (d == NULL) + dget(dentry); return finish_no_open(file, dentry); + } dput(d); return 0; } diff --git a/fs/gfs2/lock_dlm.c b/fs/gfs2/lock_dlm.c index c8423d6de6c..2a6ba06bee6 100644 --- a/fs/gfs2/lock_dlm.c +++ b/fs/gfs2/lock_dlm.c @@ -466,19 +466,19 @@ static void gdlm_cancel(struct gfs2_glock *gl) static void control_lvb_read(struct lm_lockstruct *ls, uint32_t *lvb_gen, char *lvb_bits) { - uint32_t gen; + __le32 gen; memcpy(lvb_bits, ls->ls_control_lvb, GDLM_LVB_SIZE); - memcpy(&gen, lvb_bits, sizeof(uint32_t)); + memcpy(&gen, lvb_bits, sizeof(__le32)); *lvb_gen = le32_to_cpu(gen); } static void control_lvb_write(struct lm_lockstruct *ls, uint32_t lvb_gen, char *lvb_bits) { - uint32_t gen; + __le32 gen; memcpy(ls->ls_control_lvb, lvb_bits, GDLM_LVB_SIZE); gen = cpu_to_le32(lvb_gen); - memcpy(ls->ls_control_lvb, &gen, sizeof(uint32_t)); + memcpy(ls->ls_control_lvb, &gen, sizeof(__le32)); } static int all_jid_bits_clear(char *lvb) diff --git a/fs/gfs2/quota.c b/fs/gfs2/quota.c index 453b50eadde..98236d0df3c 100644 --- a/fs/gfs2/quota.c +++ b/fs/gfs2/quota.c @@ -667,7 +667,7 @@ static int gfs2_adjust_quota(struct gfs2_inode *ip, loff_t loc, struct buffer_head *bh; struct page *page; void *kaddr, *ptr; - struct gfs2_quota q, *qp; + struct gfs2_quota q; int err, nbytes; u64 size; @@ -683,28 +683,25 @@ static int gfs2_adjust_quota(struct gfs2_inode *ip, loff_t loc, return err; err = -EIO; - qp = &q; - qp->qu_value = be64_to_cpu(qp->qu_value); - qp->qu_value += change; - qp->qu_value = cpu_to_be64(qp->qu_value); - qd->qd_qb.qb_value = qp->qu_value; + be64_add_cpu(&q.qu_value, change); + qd->qd_qb.qb_value = q.qu_value; if (fdq) { if (fdq->d_fieldmask & FS_DQ_BSOFT) { - qp->qu_warn = cpu_to_be64(fdq->d_blk_softlimit >> sdp->sd_fsb2bb_shift); - qd->qd_qb.qb_warn = qp->qu_warn; + q.qu_warn = cpu_to_be64(fdq->d_blk_softlimit >> sdp->sd_fsb2bb_shift); + qd->qd_qb.qb_warn = q.qu_warn; } if (fdq->d_fieldmask & FS_DQ_BHARD) { - qp->qu_limit = cpu_to_be64(fdq->d_blk_hardlimit >> sdp->sd_fsb2bb_shift); - qd->qd_qb.qb_limit = qp->qu_limit; + q.qu_limit = cpu_to_be64(fdq->d_blk_hardlimit >> sdp->sd_fsb2bb_shift); + qd->qd_qb.qb_limit = q.qu_limit; } if (fdq->d_fieldmask & FS_DQ_BCOUNT) { - qp->qu_value = cpu_to_be64(fdq->d_bcount >> sdp->sd_fsb2bb_shift); - qd->qd_qb.qb_value = qp->qu_value; + q.qu_value = cpu_to_be64(fdq->d_bcount >> sdp->sd_fsb2bb_shift); + qd->qd_qb.qb_value = q.qu_value; } } /* Write the quota into the quota file on disk */ - ptr = qp; + ptr = &q; nbytes = sizeof(struct gfs2_quota); get_a_page: page = find_or_create_page(mapping, index, GFP_NOFS); diff --git a/fs/gfs2/rgrp.c b/fs/gfs2/rgrp.c index 4d83abdd563..c8d6161bd68 100644 --- a/fs/gfs2/rgrp.c +++ b/fs/gfs2/rgrp.c @@ -1127,7 +1127,7 @@ int gfs2_rgrp_bh_get(struct gfs2_rgrpd *rgd) rgd->rd_flags |= (GFS2_RDF_UPTODATE | GFS2_RDF_CHECK); rgd->rd_free_clone = rgd->rd_free; } - if (be32_to_cpu(GFS2_MAGIC) != rgd->rd_rgl->rl_magic) { + if (cpu_to_be32(GFS2_MAGIC) != rgd->rd_rgl->rl_magic) { rgd->rd_rgl->rl_unlinked = cpu_to_be32(count_unlinked(rgd)); gfs2_rgrp_ondisk2lvb(rgd->rd_rgl, rgd->rd_bits[0].bi_bh->b_data); @@ -1161,7 +1161,7 @@ int update_rgrp_lvb(struct gfs2_rgrpd *rgd) if (rgd->rd_flags & GFS2_RDF_UPTODATE) return 0; - if (be32_to_cpu(GFS2_MAGIC) != rgd->rd_rgl->rl_magic) + if (cpu_to_be32(GFS2_MAGIC) != rgd->rd_rgl->rl_magic) return gfs2_rgrp_bh_get(rgd); rl_flags = be32_to_cpu(rgd->rd_rgl->rl_flags); diff --git a/fs/hostfs/hostfs_kern.c b/fs/hostfs/hostfs_kern.c index 25437280a20..db23ce1bd90 100644 --- a/fs/hostfs/hostfs_kern.c +++ b/fs/hostfs/hostfs_kern.c @@ -33,15 +33,6 @@ static inline struct hostfs_inode_info *HOSTFS_I(struct inode *inode) #define FILE_HOSTFS_I(file) HOSTFS_I(file_inode(file)) -static int hostfs_d_delete(const struct dentry *dentry) -{ - return 1; -} - -static const struct dentry_operations hostfs_dentry_ops = { - .d_delete = hostfs_d_delete, -}; - /* Changed in hostfs_args before the kernel starts running */ static char *root_ino = ""; static int append = 0; @@ -925,7 +916,7 @@ static int hostfs_fill_sb_common(struct super_block *sb, void *d, int silent) sb->s_blocksize_bits = 10; sb->s_magic = HOSTFS_SUPER_MAGIC; sb->s_op = &hostfs_sbops; - sb->s_d_op = &hostfs_dentry_ops; + sb->s_d_op = &simple_dentry_operations; sb->s_maxbytes = MAX_LFS_FILESIZE; /* NULL is printed as <NULL> by sprintf: avoid that. */ diff --git a/fs/libfs.c b/fs/libfs.c index 5de06947ba5..a1844244246 100644 --- a/fs/libfs.c +++ b/fs/libfs.c @@ -47,10 +47,16 @@ EXPORT_SYMBOL(simple_statfs); * Retaining negative dentries for an in-memory filesystem just wastes * memory and lookup time: arrange for them to be deleted immediately. */ -static int simple_delete_dentry(const struct dentry *dentry) +int always_delete_dentry(const struct dentry *dentry) { return 1; } +EXPORT_SYMBOL(always_delete_dentry); + +const struct dentry_operations simple_dentry_operations = { + .d_delete = always_delete_dentry, +}; +EXPORT_SYMBOL(simple_dentry_operations); /* * Lookup the data. This is trivial - if the dentry didn't already @@ -58,10 +64,6 @@ static int simple_delete_dentry(const struct dentry *dentry) */ struct dentry *simple_lookup(struct inode *dir, struct dentry *dentry, unsigned int flags) { - static const struct dentry_operations simple_dentry_operations = { - .d_delete = simple_delete_dentry, - }; - if (dentry->d_name.len > NAME_MAX) return ERR_PTR(-ENAMETOOLONG); if (!dentry->d_sb->s_d_op) diff --git a/fs/namei.c b/fs/namei.c index e029a4cbff7..8f77a8cea28 100644 --- a/fs/namei.c +++ b/fs/namei.c @@ -2435,6 +2435,7 @@ static int may_delete(struct inode *dir, struct dentry *victim, bool isdir) */ static inline int may_create(struct inode *dir, struct dentry *child) { + audit_inode_child(dir, child, AUDIT_TYPE_CHILD_CREATE); if (child->d_inode) return -EEXIST; if (IS_DEADDIR(dir)) diff --git a/fs/proc/base.c b/fs/proc/base.c index 1485e38daaa..03c8d747be4 100644 --- a/fs/proc/base.c +++ b/fs/proc/base.c @@ -1151,10 +1151,16 @@ static ssize_t proc_loginuid_write(struct file * file, const char __user * buf, goto out_free_page; } - kloginuid = make_kuid(file->f_cred->user_ns, loginuid); - if (!uid_valid(kloginuid)) { - length = -EINVAL; - goto out_free_page; + + /* is userspace tring to explicitly UNSET the loginuid? */ + if (loginuid == AUDIT_UID_UNSET) { + kloginuid = INVALID_UID; + } else { + kloginuid = make_kuid(file->f_cred->user_ns, loginuid); + if (!uid_valid(kloginuid)) { + length = -EINVAL; + goto out_free_page; + } } length = audit_set_loginuid(kloginuid); diff --git a/fs/proc/generic.c b/fs/proc/generic.c index 737e15615b0..cca93b6fb9a 100644 --- a/fs/proc/generic.c +++ b/fs/proc/generic.c @@ -175,22 +175,6 @@ static const struct inode_operations proc_link_inode_operations = { }; /* - * As some entries in /proc are volatile, we want to - * get rid of unused dentries. This could be made - * smarter: we could keep a "volatile" flag in the - * inode to indicate which ones to keep. - */ -static int proc_delete_dentry(const struct dentry * dentry) -{ - return 1; -} - -static const struct dentry_operations proc_dentry_operations = -{ - .d_delete = proc_delete_dentry, -}; - -/* * Don't create negative dentries here, return -ENOENT by hand * instead. */ @@ -209,7 +193,7 @@ struct dentry *proc_lookup_de(struct proc_dir_entry *de, struct inode *dir, inode = proc_get_inode(dir->i_sb, de); if (!inode) return ERR_PTR(-ENOMEM); - d_set_d_op(dentry, &proc_dentry_operations); + d_set_d_op(dentry, &simple_dentry_operations); d_add(dentry, inode); return NULL; } diff --git a/fs/proc/namespaces.c b/fs/proc/namespaces.c index 49a7fff2e83..9ae46b87470 100644 --- a/fs/proc/namespaces.c +++ b/fs/proc/namespaces.c @@ -42,12 +42,6 @@ static const struct inode_operations ns_inode_operations = { .setattr = proc_setattr, }; -static int ns_delete_dentry(const struct dentry *dentry) -{ - /* Don't cache namespace inodes when not in use */ - return 1; -} - static char *ns_dname(struct dentry *dentry, char *buffer, int buflen) { struct inode *inode = dentry->d_inode; @@ -59,7 +53,7 @@ static char *ns_dname(struct dentry *dentry, char *buffer, int buflen) const struct dentry_operations ns_dentry_operations = { - .d_delete = ns_delete_dentry, + .d_delete = always_delete_dentry, .d_dname = ns_dname, }; diff --git a/fs/quota/netlink.c b/fs/quota/netlink.c index 16e8abb7709..72d29177998 100644 --- a/fs/quota/netlink.c +++ b/fs/quota/netlink.c @@ -9,13 +9,25 @@ #include <net/netlink.h> #include <net/genetlink.h> +static const struct genl_multicast_group quota_mcgrps[] = { + { .name = "events", }, +}; + /* Netlink family structure for quota */ static struct genl_family quota_genl_family = { - .id = GENL_ID_GENERATE, + /* + * Needed due to multicast group ID abuse - old code assumed + * the family ID was also a valid multicast group ID (which + * isn't true) and userspace might thus rely on it. Assign a + * static ID for this group to make dealing with that easier. + */ + .id = GENL_ID_VFS_DQUOT, .hdrsize = 0, .name = "VFS_DQUOT", .version = 1, .maxattr = QUOTA_NL_A_MAX, + .mcgrps = quota_mcgrps, + .n_mcgrps = ARRAY_SIZE(quota_mcgrps), }; /** @@ -78,7 +90,7 @@ void quota_send_warning(struct kqid qid, dev_t dev, goto attr_err_out; genlmsg_end(skb, msg_head); - genlmsg_multicast(skb, 0, quota_genl_family.id, GFP_NOFS); + genlmsg_multicast("a_genl_family, skb, 0, 0, GFP_NOFS); return; attr_err_out: printk(KERN_ERR "VFS: Not enough space to compose quota message!\n"); diff --git a/fs/seq_file.c b/fs/seq_file.c index 1cd2388ca5b..1d641bb108d 100644 --- a/fs/seq_file.c +++ b/fs/seq_file.c @@ -136,6 +136,7 @@ static int traverse(struct seq_file *m, loff_t offset) Eoverflow: m->op->stop(m, p); kfree(m->buf); + m->count = 0; m->buf = kmalloc(m->size <<= 1, GFP_KERNEL); return !m->buf ? -ENOMEM : -EAGAIN; } @@ -232,10 +233,10 @@ ssize_t seq_read(struct file *file, char __user *buf, size_t size, loff_t *ppos) goto Fill; m->op->stop(m, p); kfree(m->buf); + m->count = 0; m->buf = kmalloc(m->size <<= 1, GFP_KERNEL); if (!m->buf) goto Enomem; - m->count = 0; m->version = 0; pos = m->index; p = m->op->start(m, &pos); diff --git a/fs/squashfs/Kconfig b/fs/squashfs/Kconfig index c70111ebefd..b6fa8657dcb 100644 --- a/fs/squashfs/Kconfig +++ b/fs/squashfs/Kconfig @@ -25,6 +25,78 @@ config SQUASHFS If unsure, say N. +choice + prompt "File decompression options" + depends on SQUASHFS + help + Squashfs now supports two options for decompressing file + data. Traditionally Squashfs has decompressed into an + intermediate buffer and then memcopied it into the page cache. + Squashfs now supports the ability to decompress directly into + the page cache. + + If unsure, select "Decompress file data into an intermediate buffer" + +config SQUASHFS_FILE_CACHE + bool "Decompress file data into an intermediate buffer" + help + Decompress file data into an intermediate buffer and then + memcopy it into the page cache. + +config SQUASHFS_FILE_DIRECT + bool "Decompress files directly into the page cache" + help + Directly decompress file data into the page cache. + Doing so can significantly improve performance because + it eliminates a memcpy and it also removes the lock contention + on the single buffer. + +endchoice + +choice + prompt "Decompressor parallelisation options" + depends on SQUASHFS + help + Squashfs now supports three parallelisation options for + decompression. Each one exhibits various trade-offs between + decompression performance and CPU and memory usage. + + If in doubt, select "Single threaded compression" + +config SQUASHFS_DECOMP_SINGLE + bool "Single threaded compression" + help + Traditionally Squashfs has used single-threaded decompression. + Only one block (data or metadata) can be decompressed at any + one time. This limits CPU and memory usage to a minimum. + +config SQUASHFS_DECOMP_MULTI + bool "Use multiple decompressors for parallel I/O" + help + By default Squashfs uses a single decompressor but it gives + poor performance on parallel I/O workloads when using multiple CPU + machines due to waiting on decompressor availability. + + If you have a parallel I/O workload and your system has enough memory, + using this option may improve overall I/O performance. + + This decompressor implementation uses up to two parallel + decompressors per core. It dynamically allocates decompressors + on a demand basis. + +config SQUASHFS_DECOMP_MULTI_PERCPU + bool "Use percpu multiple decompressors for parallel I/O" + help + By default Squashfs uses a single decompressor but it gives + poor performance on parallel I/O workloads when using multiple CPU + machines due to waiting on decompressor availability. + + This decompressor implementation uses a maximum of one + decompressor per core. It uses percpu variables to ensure + decompression is load-balanced across the cores. + +endchoice + config SQUASHFS_XATTR bool "Squashfs XATTR support" depends on SQUASHFS diff --git a/fs/squashfs/Makefile b/fs/squashfs/Makefile index 110b0476f3b..4132520b4ff 100644 --- a/fs/squashfs/Makefile +++ b/fs/squashfs/Makefile @@ -5,6 +5,11 @@ obj-$(CONFIG_SQUASHFS) += squashfs.o squashfs-y += block.o cache.o dir.o export.o file.o fragment.o id.o inode.o squashfs-y += namei.o super.o symlink.o decompressor.o +squashfs-$(CONFIG_SQUASHFS_FILE_CACHE) += file_cache.o +squashfs-$(CONFIG_SQUASHFS_FILE_DIRECT) += file_direct.o page_actor.o +squashfs-$(CONFIG_SQUASHFS_DECOMP_SINGLE) += decompressor_single.o +squashfs-$(CONFIG_SQUASHFS_DECOMP_MULTI) += decompressor_multi.o +squashfs-$(CONFIG_SQUASHFS_DECOMP_MULTI_PERCPU) += decompressor_multi_percpu.o squashfs-$(CONFIG_SQUASHFS_XATTR) += xattr.o xattr_id.o squashfs-$(CONFIG_SQUASHFS_LZO) += lzo_wrapper.o squashfs-$(CONFIG_SQUASHFS_XZ) += xz_wrapper.o diff --git a/fs/squashfs/block.c b/fs/squashfs/block.c index 41d108ecc9b..0cea9b9236d 100644 --- a/fs/squashfs/block.c +++ b/fs/squashfs/block.c @@ -36,6 +36,7 @@ #include "squashfs_fs_sb.h" #include "squashfs.h" #include "decompressor.h" +#include "page_actor.h" /* * Read the metadata block length, this is stored in the first two @@ -86,16 +87,16 @@ static struct buffer_head *get_block_length(struct super_block *sb, * generated a larger block - this does occasionally happen with compression * algorithms). */ -int squashfs_read_data(struct super_block *sb, void **buffer, u64 index, - int length, u64 *next_index, int srclength, int pages) +int squashfs_read_data(struct super_block *sb, u64 index, int length, + u64 *next_index, struct squashfs_page_actor *output) { struct squashfs_sb_info *msblk = sb->s_fs_info; struct buffer_head **bh; int offset = index & ((1 << msblk->devblksize_log2) - 1); u64 cur_index = index >> msblk->devblksize_log2; - int bytes, compressed, b = 0, k = 0, page = 0, avail; + int bytes, compressed, b = 0, k = 0, avail, i; - bh = kcalloc(((srclength + msblk->devblksize - 1) + bh = kcalloc(((output->length + msblk->devblksize - 1) >> msblk->devblksize_log2) + 1, sizeof(*bh), GFP_KERNEL); if (bh == NULL) return -ENOMEM; @@ -111,9 +112,9 @@ int squashfs_read_data(struct super_block *sb, void **buffer, u64 index, *next_index = index + length; TRACE("Block @ 0x%llx, %scompressed size %d, src size %d\n", - index, compressed ? "" : "un", length, srclength); + index, compressed ? "" : "un", length, output->length); - if (length < 0 || length > srclength || + if (length < 0 || length > output->length || (index + length) > msblk->bytes_used) goto read_failure; @@ -145,7 +146,7 @@ int squashfs_read_data(struct super_block *sb, void **buffer, u64 index, TRACE("Block @ 0x%llx, %scompressed size %d\n", index, compressed ? "" : "un", length); - if (length < 0 || length > srclength || + if (length < 0 || length > output->length || (index + length) > msblk->bytes_used) goto block_release; @@ -158,9 +159,15 @@ int squashfs_read_data(struct super_block *sb, void **buffer, u64 index, ll_rw_block(READ, b - 1, bh + 1); } + for (i = 0; i < b; i++) { + wait_on_buffer(bh[i]); + if (!buffer_uptodate(bh[i])) + goto block_release; + } + if (compressed) { - length = squashfs_decompress(msblk, buffer, bh, b, offset, - length, srclength, pages); + length = squashfs_decompress(msblk, bh, b, offset, length, + output); if (length < 0) goto read_failure; } else { @@ -168,22 +175,20 @@ int squashfs_read_data(struct super_block *sb, void **buffer, u64 index, * Block is uncompressed. */ int in, pg_offset = 0; + void *data = squashfs_first_page(output); for (bytes = length; k < b; k++) { in = min(bytes, msblk->devblksize - offset); bytes -= in; - wait_on_buffer(bh[k]); - if (!buffer_uptodate(bh[k])) - goto block_release; while (in) { if (pg_offset == PAGE_CACHE_SIZE) { - page++; + data = squashfs_next_page(output); pg_offset = 0; } avail = min_t(int, in, PAGE_CACHE_SIZE - pg_offset); - memcpy(buffer[page] + pg_offset, - bh[k]->b_data + offset, avail); + memcpy(data + pg_offset, bh[k]->b_data + offset, + avail); in -= avail; pg_offset += avail; offset += avail; @@ -191,6 +196,7 @@ int squashfs_read_data(struct super_block *sb, void **buffer, u64 index, offset = 0; put_bh(bh[k]); } + squashfs_finish_page(output); } kfree(bh); diff --git a/fs/squashfs/cache.c b/fs/squashfs/cache.c index af0b7380259..1cb70a0b216 100644 --- a/fs/squashfs/cache.c +++ b/fs/squashfs/cache.c @@ -56,6 +56,7 @@ #include "squashfs_fs.h" #include "squashfs_fs_sb.h" #include "squashfs.h" +#include "page_actor.h" /* * Look-up block in cache, and increment usage count. If not in cache, read @@ -119,9 +120,8 @@ struct squashfs_cache_entry *squashfs_cache_get(struct super_block *sb, entry->error = 0; spin_unlock(&cache->lock); - entry->length = squashfs_read_data(sb, entry->data, - block, length, &entry->next_index, - cache->block_size, cache->pages); + entry->length = squashfs_read_data(sb, block, length, + &entry->next_index, entry->actor); spin_lock(&cache->lock); @@ -220,6 +220,7 @@ void squashfs_cache_delete(struct squashfs_cache *cache) kfree(cache->entry[i].data[j]); kfree(cache->entry[i].data); } + kfree(cache->entry[i].actor); } kfree(cache->entry); @@ -280,6 +281,13 @@ struct squashfs_cache *squashfs_cache_init(char *name, int entries, goto cleanup; } } + + entry->actor = squashfs_page_actor_init(entry->data, + cache->pages, 0); + if (entry->actor == NULL) { + ERROR("Failed to allocate %s cache entry\n", name); + goto cleanup; + } } return cache; @@ -410,6 +418,7 @@ void *squashfs_read_table(struct super_block *sb, u64 block, int length) int pages = (length + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; int i, res; void *table, *buffer, **data; + struct squashfs_page_actor *actor; table = buffer = kmalloc(length, GFP_KERNEL); if (table == NULL) @@ -421,19 +430,28 @@ void *squashfs_read_table(struct super_block *sb, u64 block, int length) goto failed; } + actor = squashfs_page_actor_init(data, pages, length); + if (actor == NULL) { + res = -ENOMEM; + goto failed2; + } + for (i = 0; i < pages; i++, buffer += PAGE_CACHE_SIZE) data[i] = buffer; - res = squashfs_read_data(sb, data, block, length | - SQUASHFS_COMPRESSED_BIT_BLOCK, NULL, length, pages); + res = squashfs_read_data(sb, block, length | + SQUASHFS_COMPRESSED_BIT_BLOCK, NULL, actor); kfree(data); + kfree(actor); if (res < 0) goto failed; return table; +failed2: + kfree(data); failed: kfree(table); return ERR_PTR(res); diff --git a/fs/squashfs/decompressor.c b/fs/squashfs/decompressor.c index 3f6271d86ab..ac22fe73b0a 100644 --- a/fs/squashfs/decompressor.c +++ b/fs/squashfs/decompressor.c @@ -30,6 +30,7 @@ #include "squashfs_fs_sb.h" #include "decompressor.h" #include "squashfs.h" +#include "page_actor.h" /* * This file (and decompressor.h) implements a decompressor framework for @@ -37,29 +38,29 @@ */ static const struct squashfs_decompressor squashfs_lzma_unsupported_comp_ops = { - NULL, NULL, NULL, LZMA_COMPRESSION, "lzma", 0 + NULL, NULL, NULL, NULL, LZMA_COMPRESSION, "lzma", 0 }; #ifndef CONFIG_SQUASHFS_LZO static const struct squashfs_decompressor squashfs_lzo_comp_ops = { - NULL, NULL, NULL, LZO_COMPRESSION, "lzo", 0 + NULL, NULL, NULL, NULL, LZO_COMPRESSION, "lzo", 0 }; #endif #ifndef CONFIG_SQUASHFS_XZ static const struct squashfs_decompressor squashfs_xz_comp_ops = { - NULL, NULL, NULL, XZ_COMPRESSION, "xz", 0 + NULL, NULL, NULL, NULL, XZ_COMPRESSION, "xz", 0 }; #endif #ifndef CONFIG_SQUASHFS_ZLIB static const struct squashfs_decompressor squashfs_zlib_comp_ops = { - NULL, NULL, NULL, ZLIB_COMPRESSION, "zlib", 0 + NULL, NULL, NULL, NULL, ZLIB_COMPRESSION, "zlib", 0 }; #endif static const struct squashfs_decompressor squashfs_unknown_comp_ops = { - NULL, NULL, NULL, 0, "unknown", 0 + NULL, NULL, NULL, NULL, 0, "unknown", 0 }; static const struct squashfs_decompressor *decompressor[] = { @@ -83,10 +84,11 @@ const struct squashfs_decompressor *squashfs_lookup_decompressor(int id) } -void *squashfs_decompressor_init(struct super_block *sb, unsigned short flags) +static void *get_comp_opts(struct super_block *sb, unsigned short flags) { struct squashfs_sb_info *msblk = sb->s_fs_info; - void *strm, *buffer = NULL; + void *buffer = NULL, *comp_opts; + struct squashfs_page_actor *actor = NULL; int length = 0; /* @@ -94,23 +96,46 @@ void *squashfs_decompressor_init(struct super_block *sb, unsigned short flags) */ if (SQUASHFS_COMP_OPTS(flags)) { buffer = kmalloc(PAGE_CACHE_SIZE, GFP_KERNEL); - if (buffer == NULL) - return ERR_PTR(-ENOMEM); + if (buffer == NULL) { + comp_opts = ERR_PTR(-ENOMEM); + goto out; + } + + actor = squashfs_page_actor_init(&buffer, 1, 0); + if (actor == NULL) { + comp_opts = ERR_PTR(-ENOMEM); + goto out; + } - length = squashfs_read_data(sb, &buffer, - sizeof(struct squashfs_super_block), 0, NULL, - PAGE_CACHE_SIZE, 1); + length = squashfs_read_data(sb, + sizeof(struct squashfs_super_block), 0, NULL, actor); if (length < 0) { - strm = ERR_PTR(length); - goto finished; + comp_opts = ERR_PTR(length); + goto out; } } - strm = msblk->decompressor->init(msblk, buffer, length); + comp_opts = squashfs_comp_opts(msblk, buffer, length); -finished: +out: + kfree(actor); kfree(buffer); + return comp_opts; +} + + +void *squashfs_decompressor_setup(struct super_block *sb, unsigned short flags) +{ + struct squashfs_sb_info *msblk = sb->s_fs_info; + void *stream, *comp_opts = get_comp_opts(sb, flags); + + if (IS_ERR(comp_opts)) + return comp_opts; + + stream = squashfs_decompressor_create(msblk, comp_opts); + if (IS_ERR(stream)) + kfree(comp_opts); - return strm; + return stream; } diff --git a/fs/squashfs/decompressor.h b/fs/squashfs/decompressor.h index 330073e2902..af098532180 100644 --- a/fs/squashfs/decompressor.h +++ b/fs/squashfs/decompressor.h @@ -24,28 +24,22 @@ */ struct squashfs_decompressor { - void *(*init)(struct squashfs_sb_info *, void *, int); + void *(*init)(struct squashfs_sb_info *, void *); + void *(*comp_opts)(struct squashfs_sb_info *, void *, int); void (*free)(void *); - int (*decompress)(struct squashfs_sb_info *, void **, - struct buffer_head **, int, int, int, int, int); + int (*decompress)(struct squashfs_sb_info *, void *, + struct buffer_head **, int, int, int, + struct squashfs_page_actor *); int id; char *name; int supported; }; -static inline void squashfs_decompressor_free(struct squashfs_sb_info *msblk, - void *s) +static inline void *squashfs_comp_opts(struct squashfs_sb_info *msblk, + void *buff, int length) { - if (msblk->decompressor) - msblk->decompressor->free(s); -} - -static inline int squashfs_decompress(struct squashfs_sb_info *msblk, - void **buffer, struct buffer_head **bh, int b, int offset, int length, - int srclength, int pages) -{ - return msblk->decompressor->decompress(msblk, buffer, bh, b, offset, - length, srclength, pages); + return msblk->decompressor->comp_opts ? + msblk->decompressor->comp_opts(msblk, buff, length) : NULL; } #ifdef CONFIG_SQUASHFS_XZ diff --git a/fs/squashfs/decompressor_multi.c b/fs/squashfs/decompressor_multi.c new file mode 100644 index 00000000000..d6008a63647 --- /dev/null +++ b/fs/squashfs/decompressor_multi.c @@ -0,0 +1,198 @@ +/* + * Copyright (c) 2013 + * Minchan Kim <minchan@kernel.org> + * + * This work is licensed under the terms of the GNU GPL, version 2. See + * the COPYING file in the top-level directory. + */ +#include <linux/types.h> +#include <linux/mutex.h> +#include <linux/slab.h> +#include <linux/buffer_head.h> +#include <linux/sched.h> +#include <linux/wait.h> +#include <linux/cpumask.h> + +#include "squashfs_fs.h" +#include "squashfs_fs_sb.h" +#include "decompressor.h" +#include "squashfs.h" + +/* + * This file implements multi-threaded decompression in the + * decompressor framework + */ + + +/* + * The reason that multiply two is that a CPU can request new I/O + * while it is waiting previous request. + */ +#define MAX_DECOMPRESSOR (num_online_cpus() * 2) + + +int squashfs_max_decompressors(void) +{ + return MAX_DECOMPRESSOR; +} + + +struct squashfs_stream { + void *comp_opts; + struct list_head strm_list; + struct mutex mutex; + int avail_decomp; + wait_queue_head_t wait; +}; + + +struct decomp_stream { + void *stream; + struct list_head list; +}; + + +static void put_decomp_stream(struct decomp_stream *decomp_strm, + struct squashfs_stream *stream) +{ + mutex_lock(&stream->mutex); + list_add(&decomp_strm->list, &stream->strm_list); + mutex_unlock(&stream->mutex); + wake_up(&stream->wait); +} + +void *squashfs_decompressor_create(struct squashfs_sb_info *msblk, + void *comp_opts) +{ + struct squashfs_stream *stream; + struct decomp_stream *decomp_strm = NULL; + int err = -ENOMEM; + + stream = kzalloc(sizeof(*stream), GFP_KERNEL); + if (!stream) + goto out; + + stream->comp_opts = comp_opts; + mutex_init(&stream->mutex); + INIT_LIST_HEAD(&stream->strm_list); + init_waitqueue_head(&stream->wait); + + /* + * We should have a decompressor at least as default + * so if we fail to allocate new decompressor dynamically, + * we could always fall back to default decompressor and + * file system works. + */ + decomp_strm = kmalloc(sizeof(*decomp_strm), GFP_KERNEL); + if (!decomp_strm) + goto out; + + decomp_strm->stream = msblk->decompressor->init(msblk, + stream->comp_opts); + if (IS_ERR(decomp_strm->stream)) { + err = PTR_ERR(decomp_strm->stream); + goto out; + } + + list_add(&decomp_strm->list, &stream->strm_list); + stream->avail_decomp = 1; + return stream; + +out: + kfree(decomp_strm); + kfree(stream); + return ERR_PTR(err); +} + + +void squashfs_decompressor_destroy(struct squashfs_sb_info *msblk) +{ + struct squashfs_stream *stream = msblk->stream; + if (stream) { + struct decomp_stream *decomp_strm; + + while (!list_empty(&stream->strm_list)) { + decomp_strm = list_entry(stream->strm_list.prev, + struct decomp_stream, list); + list_del(&decomp_strm->list); + msblk->decompressor->free(decomp_strm->stream); + kfree(decomp_strm); + stream->avail_decomp--; + } + WARN_ON(stream->avail_decomp); + kfree(stream->comp_opts); + kfree(stream); + } +} + + +static struct decomp_stream *get_decomp_stream(struct squashfs_sb_info *msblk, + struct squashfs_stream *stream) +{ + struct decomp_stream *decomp_strm; + + while (1) { + mutex_lock(&stream->mutex); + + /* There is available decomp_stream */ + if (!list_empty(&stream->strm_list)) { + decomp_strm = list_entry(stream->strm_list.prev, + struct decomp_stream, list); + list_del(&decomp_strm->list); + mutex_unlock(&stream->mutex); + break; + } + + /* + * If there is no available decomp and already full, + * let's wait for releasing decomp from other users. + */ + if (stream->avail_decomp >= MAX_DECOMPRESSOR) + goto wait; + + /* Let's allocate new decomp */ + decomp_strm = kmalloc(sizeof(*decomp_strm), GFP_KERNEL); + if (!decomp_strm) + goto wait; + + decomp_strm->stream = msblk->decompressor->init(msblk, + stream->comp_opts); + if (IS_ERR(decomp_strm->stream)) { + kfree(decomp_strm); + goto wait; + } + + stream->avail_decomp++; + WARN_ON(stream->avail_decomp > MAX_DECOMPRESSOR); + + mutex_unlock(&stream->mutex); + break; +wait: + /* + * If system memory is tough, let's for other's + * releasing instead of hurting VM because it could + * make page cache thrashing. + */ + mutex_unlock(&stream->mutex); + wait_event(stream->wait, + !list_empty(&stream->strm_list)); + } + + return decomp_strm; +} + + +int squashfs_decompress(struct squashfs_sb_info *msblk, struct buffer_head **bh, + int b, int offset, int length, struct squashfs_page_actor *output) +{ + int res; + struct squashfs_stream *stream = msblk->stream; + struct decomp_stream *decomp_stream = get_decomp_stream(msblk, stream); + res = msblk->decompressor->decompress(msblk, decomp_stream->stream, + bh, b, offset, length, output); + put_decomp_stream(decomp_stream, stream); + if (res < 0) + ERROR("%s decompression failed, data probably corrupt\n", + msblk->decompressor->name); + return res; +} diff --git a/fs/squashfs/decompressor_multi_percpu.c b/fs/squashfs/decompressor_multi_percpu.c new file mode 100644 index 00000000000..23a9c28ad8e --- /dev/null +++ b/fs/squashfs/decompressor_multi_percpu.c @@ -0,0 +1,97 @@ +/* + * Copyright (c) 2013 + * Phillip Lougher <phillip@squashfs.org.uk> + * + * This work is licensed under the terms of the GNU GPL, version 2. See + * the COPYING file in the top-level directory. + */ + +#include <linux/types.h> +#include <linux/slab.h> +#include <linux/percpu.h> +#include <linux/buffer_head.h> + +#include "squashfs_fs.h" +#include "squashfs_fs_sb.h" +#include "decompressor.h" +#include "squashfs.h" + +/* + * This file implements multi-threaded decompression using percpu + * variables, one thread per cpu core. + */ + +struct squashfs_stream { + void *stream; +}; + +void *squashfs_decompressor_create(struct squashfs_sb_info *msblk, + void *comp_opts) +{ + struct squashfs_stream *stream; + struct squashfs_stream __percpu *percpu; + int err, cpu; + + percpu = alloc_percpu(struct squashfs_stream); + if (percpu == NULL) + return ERR_PTR(-ENOMEM); + + for_each_possible_cpu(cpu) { + stream = per_cpu_ptr(percpu, cpu); + stream->stream = msblk->decompressor->init(msblk, comp_opts); + if (IS_ERR(stream->stream)) { + err = PTR_ERR(stream->stream); + goto out; + } + } + + kfree(comp_opts); + return (__force void *) percpu; + +out: + for_each_possible_cpu(cpu) { + stream = per_cpu_ptr(percpu, cpu); + if (!IS_ERR_OR_NULL(stream->stream)) + msblk->decompressor->free(stream->stream); + } + free_percpu(percpu); + return ERR_PTR(err); +} + +void squashfs_decompressor_destroy(struct squashfs_sb_info *msblk) +{ + struct squashfs_stream __percpu *percpu = + (struct squashfs_stream __percpu *) msblk->stream; + struct squashfs_stream *stream; + int cpu; + + if (msblk->stream) { + for_each_possible_cpu(cpu) { + stream = per_cpu_ptr(percpu, cpu); + msblk->decompressor->free(stream->stream); + } + free_percpu(percpu); + } +} + +int squashfs_decompress(struct squashfs_sb_info *msblk, struct buffer_head **bh, + int b, int offset, int length, struct squashfs_page_actor *output) +{ + struct squashfs_stream __percpu *percpu = + (struct squashfs_stream __percpu *) msblk->stream; + struct squashfs_stream *stream = get_cpu_ptr(percpu); + int res = msblk->decompressor->decompress(msblk, stream->stream, bh, b, + offset, length, output); + put_cpu_ptr(stream); + + if (res < 0) + ERROR("%s decompression failed, data probably corrupt\n", + msblk->decompressor->name); + + return res; +} + +int squashfs_max_decompressors(void) +{ + return num_possible_cpus(); +} diff --git a/fs/squashfs/decompressor_single.c b/fs/squashfs/decompressor_single.c new file mode 100644 index 00000000000..a6c75929a00 --- /dev/null +++ b/fs/squashfs/decompressor_single.c @@ -0,0 +1,85 @@ +/* + * Copyright (c) 2013 + * Phillip Lougher <phillip@squashfs.org.uk> + * + * This work is licensed under the terms of the GNU GPL, version 2. See + * the COPYING file in the top-level directory. + */ + +#include <linux/types.h> +#include <linux/mutex.h> +#include <linux/slab.h> +#include <linux/buffer_head.h> + +#include "squashfs_fs.h" +#include "squashfs_fs_sb.h" +#include "decompressor.h" +#include "squashfs.h" + +/* + * This file implements single-threaded decompression in the + * decompressor framework + */ + +struct squashfs_stream { + void *stream; + struct mutex mutex; +}; + +void *squashfs_decompressor_create(struct squashfs_sb_info *msblk, + void *comp_opts) +{ + struct squashfs_stream *stream; + int err = -ENOMEM; + + stream = kmalloc(sizeof(*stream), GFP_KERNEL); + if (stream == NULL) + goto out; + + stream->stream = msblk->decompressor->init(msblk, comp_opts); + if (IS_ERR(stream->stream)) { + err = PTR_ERR(stream->stream); + goto out; + } + + kfree(comp_opts); + mutex_init(&stream->mutex); + return stream; + +out: + kfree(stream); + return ERR_PTR(err); +} + +void squashfs_decompressor_destroy(struct squashfs_sb_info *msblk) +{ + struct squashfs_stream *stream = msblk->stream; + + if (stream) { + msblk->decompressor->free(stream->stream); + kfree(stream); + } +} + +int squashfs_decompress(struct squashfs_sb_info *msblk, struct buffer_head **bh, + int b, int offset, int length, struct squashfs_page_actor *output) +{ + int res; + struct squashfs_stream *stream = msblk->stream; + + mutex_lock(&stream->mutex); + res = msblk->decompressor->decompress(msblk, stream->stream, bh, b, + offset, length, output); + mutex_unlock(&stream->mutex); + + if (res < 0) + ERROR("%s decompression failed, data probably corrupt\n", + msblk->decompressor->name); + + return res; +} + +int squashfs_max_decompressors(void) +{ + return 1; +} diff --git a/fs/squashfs/file.c b/fs/squashfs/file.c index 8ca62c28fe1..e5c9689062b 100644 --- a/fs/squashfs/file.c +++ b/fs/squashfs/file.c @@ -370,77 +370,15 @@ static int read_blocklist(struct inode *inode, int index, u64 *block) return le32_to_cpu(size); } - -static int squashfs_readpage(struct file *file, struct page *page) +/* Copy data into page cache */ +void squashfs_copy_cache(struct page *page, struct squashfs_cache_entry *buffer, + int bytes, int offset) { struct inode *inode = page->mapping->host; struct squashfs_sb_info *msblk = inode->i_sb->s_fs_info; - int bytes, i, offset = 0, sparse = 0; - struct squashfs_cache_entry *buffer = NULL; void *pageaddr; - - int mask = (1 << (msblk->block_log - PAGE_CACHE_SHIFT)) - 1; - int index = page->index >> (msblk->block_log - PAGE_CACHE_SHIFT); - int start_index = page->index & ~mask; - int end_index = start_index | mask; - int file_end = i_size_read(inode) >> msblk->block_log; - - TRACE("Entered squashfs_readpage, page index %lx, start block %llx\n", - page->index, squashfs_i(inode)->start); - - if (page->index >= ((i_size_read(inode) + PAGE_CACHE_SIZE - 1) >> - PAGE_CACHE_SHIFT)) - goto out; - - if (index < file_end || squashfs_i(inode)->fragment_block == - SQUASHFS_INVALID_BLK) { - /* - * Reading a datablock from disk. Need to read block list - * to get location and block size. - */ - u64 block = 0; - int bsize = read_blocklist(inode, index, &block); - if (bsize < 0) - goto error_out; - - if (bsize == 0) { /* hole */ - bytes = index == file_end ? - (i_size_read(inode) & (msblk->block_size - 1)) : - msblk->block_size; - sparse = 1; - } else { - /* - * Read and decompress datablock. - */ - buffer = squashfs_get_datablock(inode->i_sb, - block, bsize); - if (buffer->error) { - ERROR("Unable to read page, block %llx, size %x" - "\n", block, bsize); - squashfs_cache_put(buffer); - goto error_out; - } - bytes = buffer->length; - } - } else { - /* - * Datablock is stored inside a fragment (tail-end packed - * block). - */ - buffer = squashfs_get_fragment(inode->i_sb, - squashfs_i(inode)->fragment_block, - squashfs_i(inode)->fragment_size); - - if (buffer->error) { - ERROR("Unable to read page, block %llx, size %x\n", - squashfs_i(inode)->fragment_block, - squashfs_i(inode)->fragment_size); - squashfs_cache_put(buffer); - goto error_out; - } - bytes = i_size_read(inode) & (msblk->block_size - 1); - offset = squashfs_i(inode)->fragment_offset; - } + int i, mask = (1 << (msblk->block_log - PAGE_CACHE_SHIFT)) - 1; + int start_index = page->index & ~mask, end_index = start_index | mask; /* * Loop copying datablock into pages. As the datablock likely covers @@ -451,7 +389,7 @@ static int squashfs_readpage(struct file *file, struct page *page) for (i = start_index; i <= end_index && bytes > 0; i++, bytes -= PAGE_CACHE_SIZE, offset += PAGE_CACHE_SIZE) { struct page *push_page; - int avail = sparse ? 0 : min_t(int, bytes, PAGE_CACHE_SIZE); + int avail = buffer ? min_t(int, bytes, PAGE_CACHE_SIZE) : 0; TRACE("bytes %d, i %d, available_bytes %d\n", bytes, i, avail); @@ -475,11 +413,75 @@ skip_page: if (i != page->index) page_cache_release(push_page); } +} + +/* Read datablock stored packed inside a fragment (tail-end packed block) */ +static int squashfs_readpage_fragment(struct page *page) +{ + struct inode *inode = page->mapping->host; + struct squashfs_sb_info *msblk = inode->i_sb->s_fs_info; + struct squashfs_cache_entry *buffer = squashfs_get_fragment(inode->i_sb, + squashfs_i(inode)->fragment_block, + squashfs_i(inode)->fragment_size); + int res = buffer->error; + + if (res) + ERROR("Unable to read page, block %llx, size %x\n", + squashfs_i(inode)->fragment_block, + squashfs_i(inode)->fragment_size); + else + squashfs_copy_cache(page, buffer, i_size_read(inode) & + (msblk->block_size - 1), + squashfs_i(inode)->fragment_offset); + + squashfs_cache_put(buffer); + return res; +} - if (!sparse) - squashfs_cache_put(buffer); +static int squashfs_readpage_sparse(struct page *page, int index, int file_end) +{ + struct inode *inode = page->mapping->host; + struct squashfs_sb_info *msblk = inode->i_sb->s_fs_info; + int bytes = index == file_end ? + (i_size_read(inode) & (msblk->block_size - 1)) : + msblk->block_size; + squashfs_copy_cache(page, NULL, bytes, 0); return 0; +} + +static int squashfs_readpage(struct file *file, struct page *page) +{ + struct inode *inode = page->mapping->host; + struct squashfs_sb_info *msblk = inode->i_sb->s_fs_info; + int index = page->index >> (msblk->block_log - PAGE_CACHE_SHIFT); + int file_end = i_size_read(inode) >> msblk->block_log; + int res; + void *pageaddr; + + TRACE("Entered squashfs_readpage, page index %lx, start block %llx\n", + page->index, squashfs_i(inode)->start); + + if (page->index >= ((i_size_read(inode) + PAGE_CACHE_SIZE - 1) >> + PAGE_CACHE_SHIFT)) + goto out; + + if (index < file_end || squashfs_i(inode)->fragment_block == + SQUASHFS_INVALID_BLK) { + u64 block = 0; + int bsize = read_blocklist(inode, index, &block); + if (bsize < 0) + goto error_out; + + if (bsize == 0) + res = squashfs_readpage_sparse(page, index, file_end); + else + res = squashfs_readpage_block(page, block, bsize); + } else + res = squashfs_readpage_fragment(page); + + if (!res) + return 0; error_out: SetPageError(page); diff --git a/fs/squashfs/file_cache.c b/fs/squashfs/file_cache.c new file mode 100644 index 00000000000..f2310d2a201 --- /dev/null +++ b/fs/squashfs/file_cache.c @@ -0,0 +1,38 @@ +/* + * Copyright (c) 2013 + * Phillip Lougher <phillip@squashfs.org.uk> + * + * This work is licensed under the terms of the GNU GPL, version 2. See + * the COPYING file in the top-level directory. + */ + +#include <linux/fs.h> +#include <linux/vfs.h> +#include <linux/kernel.h> +#include <linux/slab.h> +#include <linux/string.h> +#include <linux/pagemap.h> +#include <linux/mutex.h> + +#include "squashfs_fs.h" +#include "squashfs_fs_sb.h" +#include "squashfs_fs_i.h" +#include "squashfs.h" + +/* Read separately compressed datablock and memcopy into page cache */ +int squashfs_readpage_block(struct page *page, u64 block, int bsize) +{ + struct inode *i = page->mapping->host; + struct squashfs_cache_entry *buffer = squashfs_get_datablock(i->i_sb, + block, bsize); + int res = buffer->error; + + if (res) + ERROR("Unable to read page, block %llx, size %x\n", block, + bsize); + else + squashfs_copy_cache(page, buffer, buffer->length, 0); + + squashfs_cache_put(buffer); + return res; +} diff --git a/fs/squashfs/file_direct.c b/fs/squashfs/file_direct.c new file mode 100644 index 00000000000..2943b2bfae4 --- /dev/null +++ b/fs/squashfs/file_direct.c @@ -0,0 +1,173 @@ +/* + * Copyright (c) 2013 + * Phillip Lougher <phillip@squashfs.org.uk> + * + * This work is licensed under the terms of the GNU GPL, version 2. See + * the COPYING file in the top-level directory. + */ + +#include <linux/fs.h> +#include <linux/vfs.h> +#include <linux/kernel.h> +#include <linux/slab.h> +#include <linux/string.h> +#include <linux/pagemap.h> +#include <linux/mutex.h> + +#include "squashfs_fs.h" +#include "squashfs_fs_sb.h" +#include "squashfs_fs_i.h" +#include "squashfs.h" +#include "page_actor.h" + +static int squashfs_read_cache(struct page *target_page, u64 block, int bsize, + int pages, struct page **page); + +/* Read separately compressed datablock directly into page cache */ +int squashfs_readpage_block(struct page *target_page, u64 block, int bsize) + +{ + struct inode *inode = target_page->mapping->host; + struct squashfs_sb_info *msblk = inode->i_sb->s_fs_info; + + int file_end = (i_size_read(inode) - 1) >> PAGE_CACHE_SHIFT; + int mask = (1 << (msblk->block_log - PAGE_CACHE_SHIFT)) - 1; + int start_index = target_page->index & ~mask; + int end_index = start_index | mask; + int i, n, pages, missing_pages, bytes, res = -ENOMEM; + struct page **page; + struct squashfs_page_actor *actor; + void *pageaddr; + + if (end_index > file_end) + end_index = file_end; + + pages = end_index - start_index + 1; + + page = kmalloc(sizeof(void *) * pages, GFP_KERNEL); + if (page == NULL) + return res; + + /* + * Create a "page actor" which will kmap and kunmap the + * page cache pages appropriately within the decompressor + */ + actor = squashfs_page_actor_init_special(page, pages, 0); + if (actor == NULL) + goto out; + + /* Try to grab all the pages covered by the Squashfs block */ + for (missing_pages = 0, i = 0, n = start_index; i < pages; i++, n++) { + page[i] = (n == target_page->index) ? target_page : + grab_cache_page_nowait(target_page->mapping, n); + + if (page[i] == NULL) { + missing_pages++; + continue; + } + + if (PageUptodate(page[i])) { + unlock_page(page[i]); + page_cache_release(page[i]); + page[i] = NULL; + missing_pages++; + } + } + + if (missing_pages) { + /* + * Couldn't get one or more pages, this page has either + * been VM reclaimed, but others are still in the page cache + * and uptodate, or we're racing with another thread in + * squashfs_readpage also trying to grab them. Fall back to + * using an intermediate buffer. + */ + res = squashfs_read_cache(target_page, block, bsize, pages, + page); + goto out; + } + + /* Decompress directly into the page cache buffers */ + res = squashfs_read_data(inode->i_sb, block, bsize, NULL, actor); + if (res < 0) + goto mark_errored; + + /* Last page may have trailing bytes not filled */ + bytes = res % PAGE_CACHE_SIZE; + if (bytes) { + pageaddr = kmap_atomic(page[pages - 1]); + memset(pageaddr + bytes, 0, PAGE_CACHE_SIZE - bytes); + kunmap_atomic(pageaddr); + } + + /* Mark pages as uptodate, unlock and release */ + for (i = 0; i < pages; i++) { + flush_dcache_page(page[i]); + SetPageUptodate(page[i]); + unlock_page(page[i]); + if (page[i] != target_page) + page_cache_release(page[i]); + } + + kfree(actor); + kfree(page); + + return 0; + +mark_errored: + /* Decompression failed, mark pages as errored. Target_page is + * dealt with by the caller + */ + for (i = 0; i < pages; i++) { + if (page[i] == target_page) + continue; + flush_dcache_page(page[i]); + SetPageError(page[i]); + unlock_page(page[i]); + page_cache_release(page[i]); + } + +out: + kfree(actor); + kfree(page); + return res; +} + + +static int squashfs_read_cache(struct page *target_page, u64 block, int bsize, + int pages, struct page **page) +{ + struct inode *i = target_page->mapping->host; + struct squashfs_cache_entry *buffer = squashfs_get_datablock(i->i_sb, + block, bsize); + int bytes = buffer->length, res = buffer->error, n, offset = 0; + void *pageaddr; + + if (res) { + ERROR("Unable to read page, block %llx, size %x\n", block, + bsize); + goto out; + } + + for (n = 0; n < pages && bytes > 0; n++, + bytes -= PAGE_CACHE_SIZE, offset += PAGE_CACHE_SIZE) { + int avail = min_t(int, bytes, PAGE_CACHE_SIZE); + + if (page[n] == NULL) + continue; + + pageaddr = kmap_atomic(page[n]); + squashfs_copy_data(pageaddr, buffer, offset, avail); + memset(pageaddr + avail, 0, PAGE_CACHE_SIZE - avail); + kunmap_atomic(pageaddr); + flush_dcache_page(page[n]); + SetPageUptodate(page[n]); + unlock_page(page[n]); + if (page[n] != target_page) + page_cache_release(page[n]); + } + +out: + squashfs_cache_put(buffer); + return res; +} diff --git a/fs/squashfs/lzo_wrapper.c b/fs/squashfs/lzo_wrapper.c index 00f4dfc5f08..244b9fbfff7 100644 --- a/fs/squashfs/lzo_wrapper.c +++ b/fs/squashfs/lzo_wrapper.c @@ -31,13 +31,14 @@ #include "squashfs_fs_sb.h" #include "squashfs.h" #include "decompressor.h" +#include "page_actor.h" struct squashfs_lzo { void *input; void *output; }; -static void *lzo_init(struct squashfs_sb_info *msblk, void *buff, int len) +static void *lzo_init(struct squashfs_sb_info *msblk, void *buff) { int block_size = max_t(int, msblk->block_size, SQUASHFS_METADATA_SIZE); @@ -74,22 +75,16 @@ static void lzo_free(void *strm) } -static int lzo_uncompress(struct squashfs_sb_info *msblk, void **buffer, - struct buffer_head **bh, int b, int offset, int length, int srclength, - int pages) +static int lzo_uncompress(struct squashfs_sb_info *msblk, void *strm, + struct buffer_head **bh, int b, int offset, int length, + struct squashfs_page_actor *output) { - struct squashfs_lzo *stream = msblk->stream; - void *buff = stream->input; + struct squashfs_lzo *stream = strm; + void *buff = stream->input, *data; int avail, i, bytes = length, res; - size_t out_len = srclength; - - mutex_lock(&msblk->read_data_mutex); + size_t out_len = output->length; for (i = 0; i < b; i++) { - wait_on_buffer(bh[i]); - if (!buffer_uptodate(bh[i])) - goto block_release; - avail = min(bytes, msblk->devblksize - offset); memcpy(buff, bh[i]->b_data + offset, avail); buff += avail; @@ -104,24 +99,24 @@ static int lzo_uncompress(struct squashfs_sb_info *msblk, void **buffer, goto failed; res = bytes = (int)out_len; - for (i = 0, buff = stream->output; bytes && i < pages; i++) { - avail = min_t(int, bytes, PAGE_CACHE_SIZE); - memcpy(buffer[i], buff, avail); - buff += avail; - bytes -= avail; + data = squashfs_first_page(output); + buff = stream->output; + while (data) { + if (bytes <= PAGE_CACHE_SIZE) { + memcpy(data, buff, bytes); + break; + } else { + memcpy(data, buff, PAGE_CACHE_SIZE); + buff += PAGE_CACHE_SIZE; + bytes -= PAGE_CACHE_SIZE; + data = squashfs_next_page(output); + } } + squashfs_finish_page(output); - mutex_unlock(&msblk->read_data_mutex); return res; -block_release: - for (; i < b; i++) - put_bh(bh[i]); - failed: - mutex_unlock(&msblk->read_data_mutex); - - ERROR("lzo decompression failed, data probably corrupt\n"); return -EIO; } diff --git a/fs/squashfs/page_actor.c b/fs/squashfs/page_actor.c new file mode 100644 index 00000000000..5a1c11f5644 --- /dev/null +++ b/fs/squashfs/page_actor.c @@ -0,0 +1,100 @@ +/* + * Copyright (c) 2013 + * Phillip Lougher <phillip@squashfs.org.uk> + * + * This work is licensed under the terms of the GNU GPL, version 2. See + * the COPYING file in the top-level directory. + */ + +#include <linux/kernel.h> +#include <linux/slab.h> +#include <linux/pagemap.h> +#include "page_actor.h" + +/* + * This file contains implementations of page_actor for decompressing into + * an intermediate buffer, and for decompressing directly into the + * page cache. + * + * Calling code should avoid sleeping between calls to squashfs_first_page() + * and squashfs_finish_page(). + */ + +/* Implementation of page_actor for decompressing into intermediate buffer */ +static void *cache_first_page(struct squashfs_page_actor *actor) +{ + actor->next_page = 1; + return actor->buffer[0]; +} + +static void *cache_next_page(struct squashfs_page_actor *actor) +{ + if (actor->next_page == actor->pages) + return NULL; + + return actor->buffer[actor->next_page++]; +} + +static void cache_finish_page(struct squashfs_page_actor *actor) +{ + /* empty */ +} + +struct squashfs_page_actor *squashfs_page_actor_init(void **buffer, + int pages, int length) +{ + struct squashfs_page_actor *actor = kmalloc(sizeof(*actor), GFP_KERNEL); + + if (actor == NULL) + return NULL; + + actor->length = length ? : pages * PAGE_CACHE_SIZE; + actor->buffer = buffer; + actor->pages = pages; + actor->next_page = 0; + actor->squashfs_first_page = cache_first_page; + actor->squashfs_next_page = cache_next_page; + actor->squashfs_finish_page = cache_finish_page; + return actor; +} + +/* Implementation of page_actor for decompressing directly into page cache. */ +static void *direct_first_page(struct squashfs_page_actor *actor) +{ + actor->next_page = 1; + return actor->pageaddr = kmap_atomic(actor->page[0]); +} + +static void *direct_next_page(struct squashfs_page_actor *actor) +{ + if (actor->pageaddr) + kunmap_atomic(actor->pageaddr); + + return actor->pageaddr = actor->next_page == actor->pages ? NULL : + kmap_atomic(actor->page[actor->next_page++]); +} + +static void direct_finish_page(struct squashfs_page_actor *actor) +{ + if (actor->pageaddr) + kunmap_atomic(actor->pageaddr); +} + +struct squashfs_page_actor *squashfs_page_actor_init_special(struct page **page, + int pages, int length) +{ + struct squashfs_page_actor *actor = kmalloc(sizeof(*actor), GFP_KERNEL); + + if (actor == NULL) + return NULL; + + actor->length = length ? : pages * PAGE_CACHE_SIZE; + actor->page = page; + actor->pages = pages; + actor->next_page = 0; + actor->pageaddr = NULL; + actor->squashfs_first_page = direct_first_page; + actor->squashfs_next_page = direct_next_page; + actor->squashfs_finish_page = direct_finish_page; + return actor; +} diff --git a/fs/squashfs/page_actor.h b/fs/squashfs/page_actor.h new file mode 100644 index 00000000000..26dd82008b8 --- /dev/null +++ b/fs/squashfs/page_actor.h @@ -0,0 +1,81 @@ +#ifndef PAGE_ACTOR_H +#define PAGE_ACTOR_H +/* + * Copyright (c) 2013 + * Phillip Lougher <phillip@squashfs.org.uk> + * + * This work is licensed under the terms of the GNU GPL, version 2. See + * the COPYING file in the top-level directory. + */ + +#ifndef CONFIG_SQUASHFS_FILE_DIRECT +struct squashfs_page_actor { + void **page; + int pages; + int length; + int next_page; +}; + +static inline struct squashfs_page_actor *squashfs_page_actor_init(void **page, + int pages, int length) +{ + struct squashfs_page_actor *actor = kmalloc(sizeof(*actor), GFP_KERNEL); + + if (actor == NULL) + return NULL; + + actor->length = length ? : pages * PAGE_CACHE_SIZE; + actor->page = page; + actor->pages = pages; + actor->next_page = 0; + return actor; +} + +static inline void *squashfs_first_page(struct squashfs_page_actor *actor) +{ + actor->next_page = 1; + return actor->page[0]; +} + +static inline void *squashfs_next_page(struct squashfs_page_actor *actor) +{ + return actor->next_page == actor->pages ? NULL : + actor->page[actor->next_page++]; +} + +static inline void squashfs_finish_page(struct squashfs_page_actor *actor) +{ + /* empty */ +} +#else +struct squashfs_page_actor { + union { + void **buffer; + struct page **page; + }; + void *pageaddr; + void *(*squashfs_first_page)(struct squashfs_page_actor *); + void *(*squashfs_next_page)(struct squashfs_page_actor *); + void (*squashfs_finish_page)(struct squashfs_page_actor *); + int pages; + int length; + int next_page; +}; + +extern struct squashfs_page_actor *squashfs_page_actor_init(void **, int, int); +extern struct squashfs_page_actor *squashfs_page_actor_init_special(struct page + **, int, int); +static inline void *squashfs_first_page(struct squashfs_page_actor *actor) +{ + return actor->squashfs_first_page(actor); +} +static inline void *squashfs_next_page(struct squashfs_page_actor *actor) +{ + return actor->squashfs_next_page(actor); +} +static inline void squashfs_finish_page(struct squashfs_page_actor *actor) +{ + actor->squashfs_finish_page(actor); +} +#endif +#endif diff --git a/fs/squashfs/squashfs.h b/fs/squashfs/squashfs.h index d1266516ed0..9e1bb79f7e6 100644 --- a/fs/squashfs/squashfs.h +++ b/fs/squashfs/squashfs.h @@ -28,8 +28,8 @@ #define WARNING(s, args...) pr_warning("SQUASHFS: "s, ## args) /* block.c */ -extern int squashfs_read_data(struct super_block *, void **, u64, int, u64 *, - int, int); +extern int squashfs_read_data(struct super_block *, u64, int, u64 *, + struct squashfs_page_actor *); /* cache.c */ extern struct squashfs_cache *squashfs_cache_init(char *, int, int); @@ -48,7 +48,14 @@ extern void *squashfs_read_table(struct super_block *, u64, int); /* decompressor.c */ extern const struct squashfs_decompressor *squashfs_lookup_decompressor(int); -extern void *squashfs_decompressor_init(struct super_block *, unsigned short); +extern void *squashfs_decompressor_setup(struct super_block *, unsigned short); + +/* decompressor_xxx.c */ +extern void *squashfs_decompressor_create(struct squashfs_sb_info *, void *); +extern void squashfs_decompressor_destroy(struct squashfs_sb_info *); +extern int squashfs_decompress(struct squashfs_sb_info *, struct buffer_head **, + int, int, int, struct squashfs_page_actor *); +extern int squashfs_max_decompressors(void); /* export.c */ extern __le64 *squashfs_read_inode_lookup_table(struct super_block *, u64, u64, @@ -59,6 +66,13 @@ extern int squashfs_frag_lookup(struct super_block *, unsigned int, u64 *); extern __le64 *squashfs_read_fragment_index_table(struct super_block *, u64, u64, unsigned int); +/* file.c */ +void squashfs_copy_cache(struct page *, struct squashfs_cache_entry *, int, + int); + +/* file_xxx.c */ +extern int squashfs_readpage_block(struct page *, u64, int); + /* id.c */ extern int squashfs_get_id(struct super_block *, unsigned int, unsigned int *); extern __le64 *squashfs_read_id_index_table(struct super_block *, u64, u64, diff --git a/fs/squashfs/squashfs_fs_sb.h b/fs/squashfs/squashfs_fs_sb.h index 52934a22f29..1da565cb50c 100644 --- a/fs/squashfs/squashfs_fs_sb.h +++ b/fs/squashfs/squashfs_fs_sb.h @@ -50,6 +50,7 @@ struct squashfs_cache_entry { wait_queue_head_t wait_queue; struct squashfs_cache *cache; void **data; + struct squashfs_page_actor *actor; }; struct squashfs_sb_info { @@ -63,10 +64,9 @@ struct squashfs_sb_info { __le64 *id_table; __le64 *fragment_index; __le64 *xattr_id_table; - struct mutex read_data_mutex; struct mutex meta_index_mutex; struct meta_index *meta_index; - void *stream; + struct squashfs_stream *stream; __le64 *inode_lookup_table; u64 inode_table; u64 directory_table; diff --git a/fs/squashfs/super.c b/fs/squashfs/super.c index 60553a9053c..202df6312d4 100644 --- a/fs/squashfs/super.c +++ b/fs/squashfs/super.c @@ -98,7 +98,6 @@ static int squashfs_fill_super(struct super_block *sb, void *data, int silent) msblk->devblksize = sb_min_blocksize(sb, SQUASHFS_DEVBLK_SIZE); msblk->devblksize_log2 = ffz(~msblk->devblksize); - mutex_init(&msblk->read_data_mutex); mutex_init(&msblk->meta_index_mutex); /* @@ -206,13 +205,14 @@ static int squashfs_fill_super(struct super_block *sb, void *data, int silent) goto failed_mount; /* Allocate read_page block */ - msblk->read_page = squashfs_cache_init("data", 1, msblk->block_size); + msblk->read_page = squashfs_cache_init("data", + squashfs_max_decompressors(), msblk->block_size); if (msblk->read_page == NULL) { ERROR("Failed to allocate read_page block\n"); goto failed_mount; } - msblk->stream = squashfs_decompressor_init(sb, flags); + msblk->stream = squashfs_decompressor_setup(sb, flags); if (IS_ERR(msblk->stream)) { err = PTR_ERR(msblk->stream); msblk->stream = NULL; @@ -336,7 +336,7 @@ failed_mount: squashfs_cache_delete(msblk->block_cache); squashfs_cache_delete(msblk->fragment_cache); squashfs_cache_delete(msblk->read_page); - squashfs_decompressor_free(msblk, msblk->stream); + squashfs_decompressor_destroy(msblk); kfree(msblk->inode_lookup_table); kfree(msblk->fragment_index); kfree(msblk->id_table); @@ -383,7 +383,7 @@ static void squashfs_put_super(struct super_block *sb) squashfs_cache_delete(sbi->block_cache); squashfs_cache_delete(sbi->fragment_cache); squashfs_cache_delete(sbi->read_page); - squashfs_decompressor_free(sbi, sbi->stream); + squashfs_decompressor_destroy(sbi); kfree(sbi->id_table); kfree(sbi->fragment_index); kfree(sbi->meta_index); diff --git a/fs/squashfs/xz_wrapper.c b/fs/squashfs/xz_wrapper.c index 1760b7d108f..c609624e4b8 100644 --- a/fs/squashfs/xz_wrapper.c +++ b/fs/squashfs/xz_wrapper.c @@ -32,44 +32,70 @@ #include "squashfs_fs_sb.h" #include "squashfs.h" #include "decompressor.h" +#include "page_actor.h" struct squashfs_xz { struct xz_dec *state; struct xz_buf buf; }; -struct comp_opts { +struct disk_comp_opts { __le32 dictionary_size; __le32 flags; }; -static void *squashfs_xz_init(struct squashfs_sb_info *msblk, void *buff, - int len) +struct comp_opts { + int dict_size; +}; + +static void *squashfs_xz_comp_opts(struct squashfs_sb_info *msblk, + void *buff, int len) { - struct comp_opts *comp_opts = buff; - struct squashfs_xz *stream; - int dict_size = msblk->block_size; - int err, n; + struct disk_comp_opts *comp_opts = buff; + struct comp_opts *opts; + int err = 0, n; + + opts = kmalloc(sizeof(*opts), GFP_KERNEL); + if (opts == NULL) { + err = -ENOMEM; + goto out2; + } if (comp_opts) { /* check compressor options are the expected length */ if (len < sizeof(*comp_opts)) { err = -EIO; - goto failed; + goto out; } - dict_size = le32_to_cpu(comp_opts->dictionary_size); + opts->dict_size = le32_to_cpu(comp_opts->dictionary_size); /* the dictionary size should be 2^n or 2^n+2^(n+1) */ - n = ffs(dict_size) - 1; - if (dict_size != (1 << n) && dict_size != (1 << n) + + n = ffs(opts->dict_size) - 1; + if (opts->dict_size != (1 << n) && opts->dict_size != (1 << n) + (1 << (n + 1))) { err = -EIO; - goto failed; + goto out; } - } + } else + /* use defaults */ + opts->dict_size = max_t(int, msblk->block_size, + SQUASHFS_METADATA_SIZE); + + return opts; + +out: + kfree(opts); +out2: + return ERR_PTR(err); +} + - dict_size = max_t(int, dict_size, SQUASHFS_METADATA_SIZE); +static void *squashfs_xz_init(struct squashfs_sb_info *msblk, void *buff) +{ + struct comp_opts *comp_opts = buff; + struct squashfs_xz *stream; + int err; stream = kmalloc(sizeof(*stream), GFP_KERNEL); if (stream == NULL) { @@ -77,7 +103,7 @@ static void *squashfs_xz_init(struct squashfs_sb_info *msblk, void *buff, goto failed; } - stream->state = xz_dec_init(XZ_PREALLOC, dict_size); + stream->state = xz_dec_init(XZ_PREALLOC, comp_opts->dict_size); if (stream->state == NULL) { kfree(stream); err = -ENOMEM; @@ -103,42 +129,37 @@ static void squashfs_xz_free(void *strm) } -static int squashfs_xz_uncompress(struct squashfs_sb_info *msblk, void **buffer, - struct buffer_head **bh, int b, int offset, int length, int srclength, - int pages) +static int squashfs_xz_uncompress(struct squashfs_sb_info *msblk, void *strm, + struct buffer_head **bh, int b, int offset, int length, + struct squashfs_page_actor *output) { enum xz_ret xz_err; - int avail, total = 0, k = 0, page = 0; - struct squashfs_xz *stream = msblk->stream; - - mutex_lock(&msblk->read_data_mutex); + int avail, total = 0, k = 0; + struct squashfs_xz *stream = strm; xz_dec_reset(stream->state); stream->buf.in_pos = 0; stream->buf.in_size = 0; stream->buf.out_pos = 0; stream->buf.out_size = PAGE_CACHE_SIZE; - stream->buf.out = buffer[page++]; + stream->buf.out = squashfs_first_page(output); do { if (stream->buf.in_pos == stream->buf.in_size && k < b) { avail = min(length, msblk->devblksize - offset); length -= avail; - wait_on_buffer(bh[k]); - if (!buffer_uptodate(bh[k])) - goto release_mutex; - stream->buf.in = bh[k]->b_data + offset; stream->buf.in_size = avail; stream->buf.in_pos = 0; offset = 0; } - if (stream->buf.out_pos == stream->buf.out_size - && page < pages) { - stream->buf.out = buffer[page++]; - stream->buf.out_pos = 0; - total += PAGE_CACHE_SIZE; + if (stream->buf.out_pos == stream->buf.out_size) { + stream->buf.out = squashfs_next_page(output); + if (stream->buf.out != NULL) { + stream->buf.out_pos = 0; + total += PAGE_CACHE_SIZE; + } } xz_err = xz_dec_run(stream->state, &stream->buf); @@ -147,23 +168,14 @@ static int squashfs_xz_uncompress(struct squashfs_sb_info *msblk, void **buffer, put_bh(bh[k++]); } while (xz_err == XZ_OK); - if (xz_err != XZ_STREAM_END) { - ERROR("xz_dec_run error, data probably corrupt\n"); - goto release_mutex; - } - - if (k < b) { - ERROR("xz_uncompress error, input remaining\n"); - goto release_mutex; - } + squashfs_finish_page(output); - total += stream->buf.out_pos; - mutex_unlock(&msblk->read_data_mutex); - return total; + if (xz_err != XZ_STREAM_END || k < b) + goto out; -release_mutex: - mutex_unlock(&msblk->read_data_mutex); + return total + stream->buf.out_pos; +out: for (; k < b; k++) put_bh(bh[k]); @@ -172,6 +184,7 @@ release_mutex: const struct squashfs_decompressor squashfs_xz_comp_ops = { .init = squashfs_xz_init, + .comp_opts = squashfs_xz_comp_opts, .free = squashfs_xz_free, .decompress = squashfs_xz_uncompress, .id = XZ_COMPRESSION, diff --git a/fs/squashfs/zlib_wrapper.c b/fs/squashfs/zlib_wrapper.c index 55d918fd2d8..8727caba688 100644 --- a/fs/squashfs/zlib_wrapper.c +++ b/fs/squashfs/zlib_wrapper.c @@ -32,8 +32,9 @@ #include "squashfs_fs_sb.h" #include "squashfs.h" #include "decompressor.h" +#include "page_actor.h" -static void *zlib_init(struct squashfs_sb_info *dummy, void *buff, int len) +static void *zlib_init(struct squashfs_sb_info *dummy, void *buff) { z_stream *stream = kmalloc(sizeof(z_stream), GFP_KERNEL); if (stream == NULL) @@ -61,44 +62,37 @@ static void zlib_free(void *strm) } -static int zlib_uncompress(struct squashfs_sb_info *msblk, void **buffer, - struct buffer_head **bh, int b, int offset, int length, int srclength, - int pages) +static int zlib_uncompress(struct squashfs_sb_info *msblk, void *strm, + struct buffer_head **bh, int b, int offset, int length, + struct squashfs_page_actor *output) { - int zlib_err, zlib_init = 0; - int k = 0, page = 0; - z_stream *stream = msblk->stream; - - mutex_lock(&msblk->read_data_mutex); + int zlib_err, zlib_init = 0, k = 0; + z_stream *stream = strm; - stream->avail_out = 0; + stream->avail_out = PAGE_CACHE_SIZE; + stream->next_out = squashfs_first_page(output); stream->avail_in = 0; do { if (stream->avail_in == 0 && k < b) { int avail = min(length, msblk->devblksize - offset); length -= avail; - wait_on_buffer(bh[k]); - if (!buffer_uptodate(bh[k])) - goto release_mutex; - stream->next_in = bh[k]->b_data + offset; stream->avail_in = avail; offset = 0; } - if (stream->avail_out == 0 && page < pages) { - stream->next_out = buffer[page++]; - stream->avail_out = PAGE_CACHE_SIZE; + if (stream->avail_out == 0) { + stream->next_out = squashfs_next_page(output); + if (stream->next_out != NULL) + stream->avail_out = PAGE_CACHE_SIZE; } if (!zlib_init) { zlib_err = zlib_inflateInit(stream); if (zlib_err != Z_OK) { - ERROR("zlib_inflateInit returned unexpected " - "result 0x%x, srclength %d\n", - zlib_err, srclength); - goto release_mutex; + squashfs_finish_page(output); + goto out; } zlib_init = 1; } @@ -109,29 +103,21 @@ static int zlib_uncompress(struct squashfs_sb_info *msblk, void **buffer, put_bh(bh[k++]); } while (zlib_err == Z_OK); - if (zlib_err != Z_STREAM_END) { - ERROR("zlib_inflate error, data probably corrupt\n"); - goto release_mutex; - } + squashfs_finish_page(output); - zlib_err = zlib_inflateEnd(stream); - if (zlib_err != Z_OK) { - ERROR("zlib_inflate error, data probably corrupt\n"); - goto release_mutex; - } + if (zlib_err != Z_STREAM_END) + goto out; - if (k < b) { - ERROR("zlib_uncompress error, data remaining\n"); - goto release_mutex; - } + zlib_err = zlib_inflateEnd(stream); + if (zlib_err != Z_OK) + goto out; - length = stream->total_out; - mutex_unlock(&msblk->read_data_mutex); - return length; + if (k < b) + goto out; -release_mutex: - mutex_unlock(&msblk->read_data_mutex); + return stream->total_out; +out: for (; k < b; k++) put_bh(bh[k]); diff --git a/fs/xfs/xfs_bmap.c b/fs/xfs/xfs_bmap.c index 1c02da8bb7d..3ef11b22e75 100644 --- a/fs/xfs/xfs_bmap.c +++ b/fs/xfs/xfs_bmap.c @@ -1137,6 +1137,7 @@ xfs_bmap_add_attrfork( int committed; /* xaction was committed */ int logflags; /* logging flags */ int error; /* error return value */ + int cancel_flags = 0; ASSERT(XFS_IFORK_Q(ip) == 0); @@ -1147,19 +1148,20 @@ xfs_bmap_add_attrfork( if (rsvd) tp->t_flags |= XFS_TRANS_RESERVE; error = xfs_trans_reserve(tp, &M_RES(mp)->tr_addafork, blks, 0); - if (error) - goto error0; + if (error) { + xfs_trans_cancel(tp, 0); + return error; + } + cancel_flags = XFS_TRANS_RELEASE_LOG_RES; xfs_ilock(ip, XFS_ILOCK_EXCL); error = xfs_trans_reserve_quota_nblks(tp, ip, blks, 0, rsvd ? XFS_QMOPT_RES_REGBLKS | XFS_QMOPT_FORCE_RES : XFS_QMOPT_RES_REGBLKS); - if (error) { - xfs_iunlock(ip, XFS_ILOCK_EXCL); - xfs_trans_cancel(tp, XFS_TRANS_RELEASE_LOG_RES); - return error; - } + if (error) + goto trans_cancel; + cancel_flags |= XFS_TRANS_ABORT; if (XFS_IFORK_Q(ip)) - goto error1; + goto trans_cancel; if (ip->i_d.di_aformat != XFS_DINODE_FMT_EXTENTS) { /* * For inodes coming from pre-6.2 filesystems. @@ -1169,7 +1171,7 @@ xfs_bmap_add_attrfork( } ASSERT(ip->i_d.di_anextents == 0); - xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL); + xfs_trans_ijoin(tp, ip, 0); xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); switch (ip->i_d.di_format) { @@ -1191,7 +1193,7 @@ xfs_bmap_add_attrfork( default: ASSERT(0); error = XFS_ERROR(EINVAL); - goto error1; + goto trans_cancel; } ASSERT(ip->i_afp == NULL); @@ -1219,7 +1221,7 @@ xfs_bmap_add_attrfork( if (logflags) xfs_trans_log_inode(tp, ip, logflags); if (error) - goto error2; + goto bmap_cancel; if (!xfs_sb_version_hasattr(&mp->m_sb) || (!xfs_sb_version_hasattr2(&mp->m_sb) && version == 2)) { __int64_t sbfields = 0; @@ -1242,14 +1244,16 @@ xfs_bmap_add_attrfork( error = xfs_bmap_finish(&tp, &flist, &committed); if (error) - goto error2; - return xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES); -error2: + goto bmap_cancel; + error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES); + xfs_iunlock(ip, XFS_ILOCK_EXCL); + return error; + +bmap_cancel: xfs_bmap_cancel(&flist); -error1: +trans_cancel: + xfs_trans_cancel(tp, cancel_flags); xfs_iunlock(ip, XFS_ILOCK_EXCL); -error0: - xfs_trans_cancel(tp, XFS_TRANS_RELEASE_LOG_RES|XFS_TRANS_ABORT); return error; } diff --git a/fs/xfs/xfs_mount.c b/fs/xfs/xfs_mount.c index da88f167af7..02df7b408a2 100644 --- a/fs/xfs/xfs_mount.c +++ b/fs/xfs/xfs_mount.c @@ -41,6 +41,7 @@ #include "xfs_fsops.h" #include "xfs_trace.h" #include "xfs_icache.h" +#include "xfs_dinode.h" #ifdef HAVE_PERCPU_SB @@ -718,8 +719,22 @@ xfs_mountfs( * Set the inode cluster size. * This may still be overridden by the file system * block size if it is larger than the chosen cluster size. + * + * For v5 filesystems, scale the cluster size with the inode size to + * keep a constant ratio of inode per cluster buffer, but only if mkfs + * has set the inode alignment value appropriately for larger cluster + * sizes. */ mp->m_inode_cluster_size = XFS_INODE_BIG_CLUSTER_SIZE; + if (xfs_sb_version_hascrc(&mp->m_sb)) { + int new_size = mp->m_inode_cluster_size; + + new_size *= mp->m_sb.sb_inodesize / XFS_DINODE_MIN_SIZE; + if (mp->m_sb.sb_inoalignmt >= XFS_B_TO_FSBT(mp, new_size)) + mp->m_inode_cluster_size = new_size; + xfs_info(mp, "Using inode cluster size of %d bytes", + mp->m_inode_cluster_size); + } /* * Set inode alignment fields diff --git a/fs/xfs/xfs_mount.h b/fs/xfs/xfs_mount.h index 1d8101a10d8..a466c5e5826 100644 --- a/fs/xfs/xfs_mount.h +++ b/fs/xfs/xfs_mount.h @@ -112,7 +112,7 @@ typedef struct xfs_mount { __uint8_t m_blkbb_log; /* blocklog - BBSHIFT */ __uint8_t m_agno_log; /* log #ag's */ __uint8_t m_agino_log; /* #bits for agino in inum */ - __uint16_t m_inode_cluster_size;/* min inode buf size */ + uint m_inode_cluster_size;/* min inode buf size */ uint m_blockmask; /* sb_blocksize-1 */ uint m_blockwsize; /* sb_blocksize in words */ uint m_blockwmask; /* blockwsize-1 */ diff --git a/fs/xfs/xfs_trans_inode.c b/fs/xfs/xfs_trans_inode.c index 1bba7f60d94..50c3f561428 100644 --- a/fs/xfs/xfs_trans_inode.c +++ b/fs/xfs/xfs_trans_inode.c @@ -111,12 +111,14 @@ xfs_trans_log_inode( /* * First time we log the inode in a transaction, bump the inode change - * counter if it is configured for this to occur. + * counter if it is configured for this to occur. We don't use + * inode_inc_version() because there is no need for extra locking around + * i_version as we already hold the inode locked exclusively for + * metadata modification. */ if (!(ip->i_itemp->ili_item.li_desc->lid_flags & XFS_LID_DIRTY) && IS_I_VERSION(VFS_I(ip))) { - inode_inc_iversion(VFS_I(ip)); - ip->i_d.di_changecount = VFS_I(ip)->i_version; + ip->i_d.di_changecount = ++VFS_I(ip)->i_version; flags |= XFS_ILOG_CORE; } diff --git a/fs/xfs/xfs_trans_resv.c b/fs/xfs/xfs_trans_resv.c index d53d9f0627a..2fd59c0dae6 100644 --- a/fs/xfs/xfs_trans_resv.c +++ b/fs/xfs/xfs_trans_resv.c @@ -385,8 +385,7 @@ xfs_calc_ifree_reservation( xfs_calc_inode_res(mp, 1) + xfs_calc_buf_res(2, mp->m_sb.sb_sectsize) + xfs_calc_buf_res(1, XFS_FSB_TO_B(mp, 1)) + - MAX((__uint16_t)XFS_FSB_TO_B(mp, 1), - XFS_INODE_CLUSTER_SIZE(mp)) + + max_t(uint, XFS_FSB_TO_B(mp, 1), XFS_INODE_CLUSTER_SIZE(mp)) + xfs_calc_buf_res(1, 0) + xfs_calc_buf_res(2 + XFS_IALLOC_BLOCKS(mp) + mp->m_in_maxlevels, 0) + diff --git a/include/acpi/acpi_bus.h b/include/acpi/acpi_bus.h index 89c60b0f640..7b2de026a4f 100644 --- a/include/acpi/acpi_bus.h +++ b/include/acpi/acpi_bus.h @@ -431,9 +431,9 @@ static inline acpi_handle acpi_get_child(acpi_handle handle, u64 addr) { return acpi_find_child(handle, addr, false); } +void acpi_preset_companion(struct device *dev, acpi_handle parent, u64 addr); int acpi_is_root_bridge(acpi_handle); struct acpi_pci_root *acpi_pci_find_root(acpi_handle handle); -#define DEVICE_ACPI_HANDLE(dev) ((acpi_handle)ACPI_HANDLE(dev)) int acpi_enable_wakeup_device_power(struct acpi_device *dev, int state); int acpi_disable_wakeup_device_power(struct acpi_device *dev); diff --git a/include/crypto/hash_info.h b/include/crypto/hash_info.h new file mode 100644 index 00000000000..e1e5a3e5dd1 --- /dev/null +++ b/include/crypto/hash_info.h @@ -0,0 +1,40 @@ +/* + * Hash Info: Hash algorithms information + * + * Copyright (c) 2013 Dmitry Kasatkin <d.kasatkin@samsung.com> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + * + */ + +#ifndef _CRYPTO_HASH_INFO_H +#define _CRYPTO_HASH_INFO_H + +#include <crypto/sha.h> +#include <crypto/md5.h> + +#include <uapi/linux/hash_info.h> + +/* not defined in include/crypto/ */ +#define RMD128_DIGEST_SIZE 16 +#define RMD160_DIGEST_SIZE 20 +#define RMD256_DIGEST_SIZE 32 +#define RMD320_DIGEST_SIZE 40 + +/* not defined in include/crypto/ */ +#define WP512_DIGEST_SIZE 64 +#define WP384_DIGEST_SIZE 48 +#define WP256_DIGEST_SIZE 32 + +/* not defined in include/crypto/ */ +#define TGR128_DIGEST_SIZE 16 +#define TGR160_DIGEST_SIZE 20 +#define TGR192_DIGEST_SIZE 24 + +extern const char *const hash_algo_name[HASH_ALGO__LAST]; +extern const int hash_digest_size[HASH_ALGO__LAST]; + +#endif /* _CRYPTO_HASH_INFO_H */ diff --git a/include/crypto/public_key.h b/include/crypto/public_key.h index f5b0224c996..fc09732613a 100644 --- a/include/crypto/public_key.h +++ b/include/crypto/public_key.h @@ -15,6 +15,7 @@ #define _LINUX_PUBLIC_KEY_H #include <linux/mpi.h> +#include <crypto/hash_info.h> enum pkey_algo { PKEY_ALGO_DSA, @@ -22,21 +23,11 @@ enum pkey_algo { PKEY_ALGO__LAST }; -extern const char *const pkey_algo[PKEY_ALGO__LAST]; +extern const char *const pkey_algo_name[PKEY_ALGO__LAST]; +extern const struct public_key_algorithm *pkey_algo[PKEY_ALGO__LAST]; -enum pkey_hash_algo { - PKEY_HASH_MD4, - PKEY_HASH_MD5, - PKEY_HASH_SHA1, - PKEY_HASH_RIPE_MD_160, - PKEY_HASH_SHA256, - PKEY_HASH_SHA384, - PKEY_HASH_SHA512, - PKEY_HASH_SHA224, - PKEY_HASH__LAST -}; - -extern const char *const pkey_hash_algo[PKEY_HASH__LAST]; +/* asymmetric key implementation supports only up to SHA224 */ +#define PKEY_HASH__LAST (HASH_ALGO_SHA224 + 1) enum pkey_id_type { PKEY_ID_PGP, /* OpenPGP generated key ID */ @@ -44,7 +35,7 @@ enum pkey_id_type { PKEY_ID_TYPE__LAST }; -extern const char *const pkey_id_type[PKEY_ID_TYPE__LAST]; +extern const char *const pkey_id_type_name[PKEY_ID_TYPE__LAST]; /* * Cryptographic data for the public-key subtype of the asymmetric key type. @@ -59,6 +50,7 @@ struct public_key { #define PKEY_CAN_DECRYPT 0x02 #define PKEY_CAN_SIGN 0x04 #define PKEY_CAN_VERIFY 0x08 + enum pkey_algo pkey_algo : 8; enum pkey_id_type id_type : 8; union { MPI mpi[5]; @@ -88,7 +80,8 @@ struct public_key_signature { u8 *digest; u8 digest_size; /* Number of bytes in digest */ u8 nr_mpi; /* Occupancy of mpi[] */ - enum pkey_hash_algo pkey_hash_algo : 8; + enum pkey_algo pkey_algo : 8; + enum hash_algo pkey_hash_algo : 8; union { MPI mpi[2]; struct { diff --git a/include/keys/big_key-type.h b/include/keys/big_key-type.h new file mode 100644 index 00000000000..d69bc8af329 --- /dev/null +++ b/include/keys/big_key-type.h @@ -0,0 +1,25 @@ +/* Big capacity key type. + * + * Copyright (C) 2013 Red Hat, Inc. All Rights Reserved. + * Written by David Howells (dhowells@redhat.com) + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + */ + +#ifndef _KEYS_BIG_KEY_TYPE_H +#define _KEYS_BIG_KEY_TYPE_H + +#include <linux/key-type.h> + +extern struct key_type key_type_big_key; + +extern int big_key_instantiate(struct key *key, struct key_preparsed_payload *prep); +extern void big_key_revoke(struct key *key); +extern void big_key_destroy(struct key *key); +extern void big_key_describe(const struct key *big_key, struct seq_file *m); +extern long big_key_read(const struct key *key, char __user *buffer, size_t buflen); + +#endif /* _KEYS_BIG_KEY_TYPE_H */ diff --git a/include/keys/keyring-type.h b/include/keys/keyring-type.h index cf49159b0e3..fca5c62340a 100644 --- a/include/keys/keyring-type.h +++ b/include/keys/keyring-type.h @@ -1,6 +1,6 @@ /* Keyring key type * - * Copyright (C) 2008 Red Hat, Inc. All Rights Reserved. + * Copyright (C) 2008, 2013 Red Hat, Inc. All Rights Reserved. * Written by David Howells (dhowells@redhat.com) * * This program is free software; you can redistribute it and/or @@ -13,19 +13,6 @@ #define _KEYS_KEYRING_TYPE_H #include <linux/key.h> -#include <linux/rcupdate.h> - -/* - * the keyring payload contains a list of the keys to which the keyring is - * subscribed - */ -struct keyring_list { - struct rcu_head rcu; /* RCU deletion hook */ - unsigned short maxkeys; /* max keys this list can hold */ - unsigned short nkeys; /* number of keys currently held */ - unsigned short delkey; /* key to be unlinked by RCU */ - struct key __rcu *keys[0]; -}; - +#include <linux/assoc_array.h> #endif /* _KEYS_KEYRING_TYPE_H */ diff --git a/include/keys/system_keyring.h b/include/keys/system_keyring.h new file mode 100644 index 00000000000..8dabc399bd1 --- /dev/null +++ b/include/keys/system_keyring.h @@ -0,0 +1,23 @@ +/* System keyring containing trusted public keys. + * + * Copyright (C) 2013 Red Hat, Inc. All Rights Reserved. + * Written by David Howells (dhowells@redhat.com) + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public Licence + * as published by the Free Software Foundation; either version + * 2 of the Licence, or (at your option) any later version. + */ + +#ifndef _KEYS_SYSTEM_KEYRING_H +#define _KEYS_SYSTEM_KEYRING_H + +#ifdef CONFIG_SYSTEM_TRUSTED_KEYRING + +#include <linux/key.h> + +extern struct key *system_trusted_keyring; + +#endif + +#endif /* _KEYS_SYSTEM_KEYRING_H */ diff --git a/include/linux/acpi.h b/include/linux/acpi.h index b0972c4ce81..d9099b15b47 100644 --- a/include/linux/acpi.h +++ b/include/linux/acpi.h @@ -44,6 +44,20 @@ #include <acpi/acpi_numa.h> #include <asm/acpi.h> +static inline acpi_handle acpi_device_handle(struct acpi_device *adev) +{ + return adev ? adev->handle : NULL; +} + +#define ACPI_COMPANION(dev) ((dev)->acpi_node.companion) +#define ACPI_COMPANION_SET(dev, adev) ACPI_COMPANION(dev) = (adev) +#define ACPI_HANDLE(dev) acpi_device_handle(ACPI_COMPANION(dev)) + +static inline const char *acpi_dev_name(struct acpi_device *adev) +{ + return dev_name(&adev->dev); +} + enum acpi_irq_model_id { ACPI_IRQ_MODEL_PIC = 0, ACPI_IRQ_MODEL_IOAPIC, @@ -401,6 +415,15 @@ static inline bool acpi_driver_match_device(struct device *dev, #define acpi_disabled 1 +#define ACPI_COMPANION(dev) (NULL) +#define ACPI_COMPANION_SET(dev, adev) do { } while (0) +#define ACPI_HANDLE(dev) (NULL) + +static inline const char *acpi_dev_name(struct acpi_device *adev) +{ + return NULL; +} + static inline void acpi_early_init(void) { } static inline int early_acpi_boot_init(void) diff --git a/include/linux/assoc_array.h b/include/linux/assoc_array.h new file mode 100644 index 00000000000..9a193b84238 --- /dev/null +++ b/include/linux/assoc_array.h @@ -0,0 +1,92 @@ +/* Generic associative array implementation. + * + * See Documentation/assoc_array.txt for information. + * + * Copyright (C) 2013 Red Hat, Inc. All Rights Reserved. + * Written by David Howells (dhowells@redhat.com) + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public Licence + * as published by the Free Software Foundation; either version + * 2 of the Licence, or (at your option) any later version. + */ + +#ifndef _LINUX_ASSOC_ARRAY_H +#define _LINUX_ASSOC_ARRAY_H + +#ifdef CONFIG_ASSOCIATIVE_ARRAY + +#include <linux/types.h> + +#define ASSOC_ARRAY_KEY_CHUNK_SIZE BITS_PER_LONG /* Key data retrieved in chunks of this size */ + +/* + * Generic associative array. + */ +struct assoc_array { + struct assoc_array_ptr *root; /* The node at the root of the tree */ + unsigned long nr_leaves_on_tree; +}; + +/* + * Operations on objects and index keys for use by array manipulation routines. + */ +struct assoc_array_ops { + /* Method to get a chunk of an index key from caller-supplied data */ + unsigned long (*get_key_chunk)(const void *index_key, int level); + + /* Method to get a piece of an object's index key */ + unsigned long (*get_object_key_chunk)(const void *object, int level); + + /* Is this the object we're looking for? */ + bool (*compare_object)(const void *object, const void *index_key); + + /* How different are two objects, to a bit position in their keys? (or + * -1 if they're the same) + */ + int (*diff_objects)(const void *a, const void *b); + + /* Method to free an object. */ + void (*free_object)(void *object); +}; + +/* + * Access and manipulation functions. + */ +struct assoc_array_edit; + +static inline void assoc_array_init(struct assoc_array *array) +{ + array->root = NULL; + array->nr_leaves_on_tree = 0; +} + +extern int assoc_array_iterate(const struct assoc_array *array, + int (*iterator)(const void *object, + void *iterator_data), + void *iterator_data); +extern void *assoc_array_find(const struct assoc_array *array, + const struct assoc_array_ops *ops, + const void *index_key); +extern void assoc_array_destroy(struct assoc_array *array, + const struct assoc_array_ops *ops); +extern struct assoc_array_edit *assoc_array_insert(struct assoc_array *array, + const struct assoc_array_ops *ops, + const void *index_key, + void *object); +extern void assoc_array_insert_set_object(struct assoc_array_edit *edit, + void *object); +extern struct assoc_array_edit *assoc_array_delete(struct assoc_array *array, + const struct assoc_array_ops *ops, + const void *index_key); +extern struct assoc_array_edit *assoc_array_clear(struct assoc_array *array, + const struct assoc_array_ops *ops); +extern void assoc_array_apply_edit(struct assoc_array_edit *edit); +extern void assoc_array_cancel_edit(struct assoc_array_edit *edit); +extern int assoc_array_gc(struct assoc_array *array, + const struct assoc_array_ops *ops, + bool (*iterator)(void *object, void *iterator_data), + void *iterator_data); + +#endif /* CONFIG_ASSOCIATIVE_ARRAY */ +#endif /* _LINUX_ASSOC_ARRAY_H */ diff --git a/include/linux/assoc_array_priv.h b/include/linux/assoc_array_priv.h new file mode 100644 index 00000000000..711275e6681 --- /dev/null +++ b/include/linux/assoc_array_priv.h @@ -0,0 +1,182 @@ +/* Private definitions for the generic associative array implementation. + * + * See Documentation/assoc_array.txt for information. + * + * Copyright (C) 2013 Red Hat, Inc. All Rights Reserved. + * Written by David Howells (dhowells@redhat.com) + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public Licence + * as published by the Free Software Foundation; either version + * 2 of the Licence, or (at your option) any later version. + */ + +#ifndef _LINUX_ASSOC_ARRAY_PRIV_H +#define _LINUX_ASSOC_ARRAY_PRIV_H + +#ifdef CONFIG_ASSOCIATIVE_ARRAY + +#include <linux/assoc_array.h> + +#define ASSOC_ARRAY_FAN_OUT 16 /* Number of slots per node */ +#define ASSOC_ARRAY_FAN_MASK (ASSOC_ARRAY_FAN_OUT - 1) +#define ASSOC_ARRAY_LEVEL_STEP (ilog2(ASSOC_ARRAY_FAN_OUT)) +#define ASSOC_ARRAY_LEVEL_STEP_MASK (ASSOC_ARRAY_LEVEL_STEP - 1) +#define ASSOC_ARRAY_KEY_CHUNK_MASK (ASSOC_ARRAY_KEY_CHUNK_SIZE - 1) +#define ASSOC_ARRAY_KEY_CHUNK_SHIFT (ilog2(BITS_PER_LONG)) + +/* + * Undefined type representing a pointer with type information in the bottom + * two bits. + */ +struct assoc_array_ptr; + +/* + * An N-way node in the tree. + * + * Each slot contains one of four things: + * + * (1) Nothing (NULL). + * + * (2) A leaf object (pointer types 0). + * + * (3) A next-level node (pointer type 1, subtype 0). + * + * (4) A shortcut (pointer type 1, subtype 1). + * + * The tree is optimised for search-by-ID, but permits reasonable iteration + * also. + * + * The tree is navigated by constructing an index key consisting of an array of + * segments, where each segment is ilog2(ASSOC_ARRAY_FAN_OUT) bits in size. + * + * The segments correspond to levels of the tree (the first segment is used at + * level 0, the second at level 1, etc.). + */ +struct assoc_array_node { + struct assoc_array_ptr *back_pointer; + u8 parent_slot; + struct assoc_array_ptr *slots[ASSOC_ARRAY_FAN_OUT]; + unsigned long nr_leaves_on_branch; +}; + +/* + * A shortcut through the index space out to where a collection of nodes/leaves + * with the same IDs live. + */ +struct assoc_array_shortcut { + struct assoc_array_ptr *back_pointer; + int parent_slot; + int skip_to_level; + struct assoc_array_ptr *next_node; + unsigned long index_key[]; +}; + +/* + * Preallocation cache. + */ +struct assoc_array_edit { + struct rcu_head rcu; + struct assoc_array *array; + const struct assoc_array_ops *ops; + const struct assoc_array_ops *ops_for_excised_subtree; + struct assoc_array_ptr *leaf; + struct assoc_array_ptr **leaf_p; + struct assoc_array_ptr *dead_leaf; + struct assoc_array_ptr *new_meta[3]; + struct assoc_array_ptr *excised_meta[1]; + struct assoc_array_ptr *excised_subtree; + struct assoc_array_ptr **set_backpointers[ASSOC_ARRAY_FAN_OUT]; + struct assoc_array_ptr *set_backpointers_to; + struct assoc_array_node *adjust_count_on; + long adjust_count_by; + struct { + struct assoc_array_ptr **ptr; + struct assoc_array_ptr *to; + } set[2]; + struct { + u8 *p; + u8 to; + } set_parent_slot[1]; + u8 segment_cache[ASSOC_ARRAY_FAN_OUT + 1]; +}; + +/* + * Internal tree member pointers are marked in the bottom one or two bits to + * indicate what type they are so that we don't have to look behind every + * pointer to see what it points to. + * + * We provide functions to test type annotations and to create and translate + * the annotated pointers. + */ +#define ASSOC_ARRAY_PTR_TYPE_MASK 0x1UL +#define ASSOC_ARRAY_PTR_LEAF_TYPE 0x0UL /* Points to leaf (or nowhere) */ +#define ASSOC_ARRAY_PTR_META_TYPE 0x1UL /* Points to node or shortcut */ +#define ASSOC_ARRAY_PTR_SUBTYPE_MASK 0x2UL +#define ASSOC_ARRAY_PTR_NODE_SUBTYPE 0x0UL +#define ASSOC_ARRAY_PTR_SHORTCUT_SUBTYPE 0x2UL + +static inline bool assoc_array_ptr_is_meta(const struct assoc_array_ptr *x) +{ + return (unsigned long)x & ASSOC_ARRAY_PTR_TYPE_MASK; +} +static inline bool assoc_array_ptr_is_leaf(const struct assoc_array_ptr *x) +{ + return !assoc_array_ptr_is_meta(x); +} +static inline bool assoc_array_ptr_is_shortcut(const struct assoc_array_ptr *x) +{ + return (unsigned long)x & ASSOC_ARRAY_PTR_SUBTYPE_MASK; +} +static inline bool assoc_array_ptr_is_node(const struct assoc_array_ptr *x) +{ + return !assoc_array_ptr_is_shortcut(x); +} + +static inline void *assoc_array_ptr_to_leaf(const struct assoc_array_ptr *x) +{ + return (void *)((unsigned long)x & ~ASSOC_ARRAY_PTR_TYPE_MASK); +} + +static inline +unsigned long __assoc_array_ptr_to_meta(const struct assoc_array_ptr *x) +{ + return (unsigned long)x & + ~(ASSOC_ARRAY_PTR_SUBTYPE_MASK | ASSOC_ARRAY_PTR_TYPE_MASK); +} +static inline +struct assoc_array_node *assoc_array_ptr_to_node(const struct assoc_array_ptr *x) +{ + return (struct assoc_array_node *)__assoc_array_ptr_to_meta(x); +} +static inline +struct assoc_array_shortcut *assoc_array_ptr_to_shortcut(const struct assoc_array_ptr *x) +{ + return (struct assoc_array_shortcut *)__assoc_array_ptr_to_meta(x); +} + +static inline +struct assoc_array_ptr *__assoc_array_x_to_ptr(const void *p, unsigned long t) +{ + return (struct assoc_array_ptr *)((unsigned long)p | t); +} +static inline +struct assoc_array_ptr *assoc_array_leaf_to_ptr(const void *p) +{ + return __assoc_array_x_to_ptr(p, ASSOC_ARRAY_PTR_LEAF_TYPE); +} +static inline +struct assoc_array_ptr *assoc_array_node_to_ptr(const struct assoc_array_node *p) +{ + return __assoc_array_x_to_ptr( + p, ASSOC_ARRAY_PTR_META_TYPE | ASSOC_ARRAY_PTR_NODE_SUBTYPE); +} +static inline +struct assoc_array_ptr *assoc_array_shortcut_to_ptr(const struct assoc_array_shortcut *p) +{ + return __assoc_array_x_to_ptr( + p, ASSOC_ARRAY_PTR_META_TYPE | ASSOC_ARRAY_PTR_SHORTCUT_SUBTYPE); +} + +#endif /* CONFIG_ASSOCIATIVE_ARRAY */ +#endif /* _LINUX_ASSOC_ARRAY_PRIV_H */ diff --git a/include/linux/audit.h b/include/linux/audit.h index 729a4d165bc..a40641954c2 100644 --- a/include/linux/audit.h +++ b/include/linux/audit.h @@ -73,6 +73,8 @@ struct audit_field { void *lsm_rule; }; +extern int is_audit_feature_set(int which); + extern int __init audit_register_class(int class, unsigned *list); extern int audit_classify_syscall(int abi, unsigned syscall); extern int audit_classify_arch(int arch); @@ -207,7 +209,7 @@ static inline int audit_get_sessionid(struct task_struct *tsk) extern void __audit_ipc_obj(struct kern_ipc_perm *ipcp); extern void __audit_ipc_set_perm(unsigned long qbytes, uid_t uid, gid_t gid, umode_t mode); -extern int __audit_bprm(struct linux_binprm *bprm); +extern void __audit_bprm(struct linux_binprm *bprm); extern int __audit_socketcall(int nargs, unsigned long *args); extern int __audit_sockaddr(int len, void *addr); extern void __audit_fd_pair(int fd1, int fd2); @@ -236,11 +238,10 @@ static inline void audit_ipc_set_perm(unsigned long qbytes, uid_t uid, gid_t gid if (unlikely(!audit_dummy_context())) __audit_ipc_set_perm(qbytes, uid, gid, mode); } -static inline int audit_bprm(struct linux_binprm *bprm) +static inline void audit_bprm(struct linux_binprm *bprm) { if (unlikely(!audit_dummy_context())) - return __audit_bprm(bprm); - return 0; + __audit_bprm(bprm); } static inline int audit_socketcall(int nargs, unsigned long *args) { @@ -367,10 +368,8 @@ static inline void audit_ipc_obj(struct kern_ipc_perm *ipcp) static inline void audit_ipc_set_perm(unsigned long qbytes, uid_t uid, gid_t gid, umode_t mode) { } -static inline int audit_bprm(struct linux_binprm *bprm) -{ - return 0; -} +static inline void audit_bprm(struct linux_binprm *bprm) +{ } static inline int audit_socketcall(int nargs, unsigned long *args) { return 0; diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index f26ec20f635..1b135d49b27 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -505,6 +505,9 @@ struct request_queue { (1 << QUEUE_FLAG_SAME_COMP) | \ (1 << QUEUE_FLAG_ADD_RANDOM)) +#define QUEUE_FLAG_MQ_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \ + (1 << QUEUE_FLAG_SAME_COMP)) + static inline void queue_lockdep_assert_held(struct request_queue *q) { if (q->queue_lock) diff --git a/include/linux/device.h b/include/linux/device.h index b025925df7f..952b01033c3 100644 --- a/include/linux/device.h +++ b/include/linux/device.h @@ -644,9 +644,11 @@ struct device_dma_parameters { unsigned long segment_boundary_mask; }; +struct acpi_device; + struct acpi_dev_node { #ifdef CONFIG_ACPI - void *handle; + struct acpi_device *companion; #endif }; @@ -790,14 +792,6 @@ static inline struct device *kobj_to_dev(struct kobject *kobj) return container_of(kobj, struct device, kobj); } -#ifdef CONFIG_ACPI -#define ACPI_HANDLE(dev) ((dev)->acpi_node.handle) -#define ACPI_HANDLE_SET(dev, _handle_) (dev)->acpi_node.handle = (_handle_) -#else -#define ACPI_HANDLE(dev) (NULL) -#define ACPI_HANDLE_SET(dev, _handle_) do { } while (0) -#endif - /* Get the wakeup routines, which depend on struct device */ #include <linux/pm_wakeup.h> diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h index 0bc72753410..41cf0c39928 100644 --- a/include/linux/dmaengine.h +++ b/include/linux/dmaengine.h @@ -45,13 +45,13 @@ static inline int dma_submit_error(dma_cookie_t cookie) /** * enum dma_status - DMA transaction status - * @DMA_SUCCESS: transaction completed successfully + * @DMA_COMPLETE: transaction completed * @DMA_IN_PROGRESS: transaction not yet processed * @DMA_PAUSED: transaction is paused * @DMA_ERROR: transaction failed */ enum dma_status { - DMA_SUCCESS, + DMA_COMPLETE, DMA_IN_PROGRESS, DMA_PAUSED, DMA_ERROR, @@ -171,12 +171,6 @@ struct dma_interleaved_template { * @DMA_CTRL_ACK - if clear, the descriptor cannot be reused until the client * acknowledges receipt, i.e. has has a chance to establish any dependency * chains - * @DMA_COMPL_SKIP_SRC_UNMAP - set to disable dma-unmapping the source buffer(s) - * @DMA_COMPL_SKIP_DEST_UNMAP - set to disable dma-unmapping the destination(s) - * @DMA_COMPL_SRC_UNMAP_SINGLE - set to do the source dma-unmapping as single - * (if not set, do the source dma-unmapping as page) - * @DMA_COMPL_DEST_UNMAP_SINGLE - set to do the destination dma-unmapping as single - * (if not set, do the destination dma-unmapping as page) * @DMA_PREP_PQ_DISABLE_P - prevent generation of P while generating Q * @DMA_PREP_PQ_DISABLE_Q - prevent generation of Q while generating P * @DMA_PREP_CONTINUE - indicate to a driver that it is reusing buffers as @@ -188,14 +182,10 @@ struct dma_interleaved_template { enum dma_ctrl_flags { DMA_PREP_INTERRUPT = (1 << 0), DMA_CTRL_ACK = (1 << 1), - DMA_COMPL_SKIP_SRC_UNMAP = (1 << 2), - DMA_COMPL_SKIP_DEST_UNMAP = (1 << 3), - DMA_COMPL_SRC_UNMAP_SINGLE = (1 << 4), - DMA_COMPL_DEST_UNMAP_SINGLE = (1 << 5), - DMA_PREP_PQ_DISABLE_P = (1 << 6), - DMA_PREP_PQ_DISABLE_Q = (1 << 7), - DMA_PREP_CONTINUE = (1 << 8), - DMA_PREP_FENCE = (1 << 9), + DMA_PREP_PQ_DISABLE_P = (1 << 2), + DMA_PREP_PQ_DISABLE_Q = (1 << 3), + DMA_PREP_CONTINUE = (1 << 4), + DMA_PREP_FENCE = (1 << 5), }; /** @@ -413,6 +403,17 @@ void dma_chan_cleanup(struct kref *kref); typedef bool (*dma_filter_fn)(struct dma_chan *chan, void *filter_param); typedef void (*dma_async_tx_callback)(void *dma_async_param); + +struct dmaengine_unmap_data { + u8 to_cnt; + u8 from_cnt; + u8 bidi_cnt; + struct device *dev; + struct kref kref; + size_t len; + dma_addr_t addr[0]; +}; + /** * struct dma_async_tx_descriptor - async transaction descriptor * ---dma generic offload fields--- @@ -438,6 +439,7 @@ struct dma_async_tx_descriptor { dma_cookie_t (*tx_submit)(struct dma_async_tx_descriptor *tx); dma_async_tx_callback callback; void *callback_param; + struct dmaengine_unmap_data *unmap; #ifdef CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH struct dma_async_tx_descriptor *next; struct dma_async_tx_descriptor *parent; @@ -445,6 +447,40 @@ struct dma_async_tx_descriptor { #endif }; +#ifdef CONFIG_DMA_ENGINE +static inline void dma_set_unmap(struct dma_async_tx_descriptor *tx, + struct dmaengine_unmap_data *unmap) +{ + kref_get(&unmap->kref); + tx->unmap = unmap; +} + +struct dmaengine_unmap_data * +dmaengine_get_unmap_data(struct device *dev, int nr, gfp_t flags); +void dmaengine_unmap_put(struct dmaengine_unmap_data *unmap); +#else +static inline void dma_set_unmap(struct dma_async_tx_descriptor *tx, + struct dmaengine_unmap_data *unmap) +{ +} +static inline struct dmaengine_unmap_data * +dmaengine_get_unmap_data(struct device *dev, int nr, gfp_t flags) +{ + return NULL; +} +static inline void dmaengine_unmap_put(struct dmaengine_unmap_data *unmap) +{ +} +#endif + +static inline void dma_descriptor_unmap(struct dma_async_tx_descriptor *tx) +{ + if (tx->unmap) { + dmaengine_unmap_put(tx->unmap); + tx->unmap = NULL; + } +} + #ifndef CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH static inline void txd_lock(struct dma_async_tx_descriptor *txd) { @@ -979,10 +1015,10 @@ static inline enum dma_status dma_async_is_complete(dma_cookie_t cookie, { if (last_complete <= last_used) { if ((cookie <= last_complete) || (cookie > last_used)) - return DMA_SUCCESS; + return DMA_COMPLETE; } else { if ((cookie <= last_complete) && (cookie > last_used)) - return DMA_SUCCESS; + return DMA_COMPLETE; } return DMA_IN_PROGRESS; } @@ -1013,11 +1049,11 @@ static inline struct dma_chan *dma_find_channel(enum dma_transaction_type tx_typ } static inline enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie) { - return DMA_SUCCESS; + return DMA_COMPLETE; } static inline enum dma_status dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx) { - return DMA_SUCCESS; + return DMA_COMPLETE; } static inline void dma_issue_pending_all(void) { diff --git a/include/linux/fs.h b/include/linux/fs.h index bf5d574ebdf..121f11f001c 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -2622,7 +2622,9 @@ extern int simple_write_begin(struct file *file, struct address_space *mapping, extern int simple_write_end(struct file *file, struct address_space *mapping, loff_t pos, unsigned len, unsigned copied, struct page *page, void *fsdata); +extern int always_delete_dentry(const struct dentry *); extern struct inode *alloc_anon_inode(struct super_block *); +extern const struct dentry_operations simple_dentry_operations; extern struct dentry *simple_lookup(struct inode *, struct dentry *, unsigned int flags); extern ssize_t generic_read_dir(struct file *, char __user *, size_t, loff_t *); diff --git a/include/linux/genl_magic_func.h b/include/linux/genl_magic_func.h index 023bc346b87..c0894dd8827 100644 --- a/include/linux/genl_magic_func.h +++ b/include/linux/genl_magic_func.h @@ -273,49 +273,40 @@ static struct genl_family ZZZ_genl_family __read_mostly = { * Magic: define multicast groups * Magic: define multicast group registration helper */ +#define ZZZ_genl_mcgrps CONCAT_(GENL_MAGIC_FAMILY, _genl_mcgrps) +static const struct genl_multicast_group ZZZ_genl_mcgrps[] = { +#undef GENL_mc_group +#define GENL_mc_group(group) { .name = #group, }, +#include GENL_MAGIC_INCLUDE_FILE +}; + +enum CONCAT_(GENL_MAGIC_FAMILY, group_ids) { +#undef GENL_mc_group +#define GENL_mc_group(group) CONCAT_(GENL_MAGIC_FAMILY, _group_ ## group), +#include GENL_MAGIC_INCLUDE_FILE +}; + #undef GENL_mc_group #define GENL_mc_group(group) \ -static struct genl_multicast_group \ -CONCAT_(GENL_MAGIC_FAMILY, _mcg_ ## group) __read_mostly = { \ - .name = #group, \ -}; \ static int CONCAT_(GENL_MAGIC_FAMILY, _genl_multicast_ ## group)( \ struct sk_buff *skb, gfp_t flags) \ { \ unsigned int group_id = \ - CONCAT_(GENL_MAGIC_FAMILY, _mcg_ ## group).id; \ - if (!group_id) \ - return -EINVAL; \ - return genlmsg_multicast(skb, 0, group_id, flags); \ + CONCAT_(GENL_MAGIC_FAMILY, _group_ ## group); \ + return genlmsg_multicast(&ZZZ_genl_family, skb, 0, \ + group_id, flags); \ } #include GENL_MAGIC_INCLUDE_FILE -int CONCAT_(GENL_MAGIC_FAMILY, _genl_register)(void) -{ - int err = genl_register_family_with_ops(&ZZZ_genl_family, - ZZZ_genl_ops, ARRAY_SIZE(ZZZ_genl_ops)); - if (err) - return err; -#undef GENL_mc_group -#define GENL_mc_group(group) \ - err = genl_register_mc_group(&ZZZ_genl_family, \ - &CONCAT_(GENL_MAGIC_FAMILY, _mcg_ ## group)); \ - if (err) \ - goto fail; \ - else \ - pr_info("%s: mcg %s: %u\n", #group, \ - __stringify(GENL_MAGIC_FAMILY), \ - CONCAT_(GENL_MAGIC_FAMILY, _mcg_ ## group).id); - -#include GENL_MAGIC_INCLUDE_FILE - #undef GENL_mc_group #define GENL_mc_group(group) - return 0; -fail: - genl_unregister_family(&ZZZ_genl_family); - return err; + +int CONCAT_(GENL_MAGIC_FAMILY, _genl_register)(void) +{ + return genl_register_family_with_ops_groups(&ZZZ_genl_family, \ + ZZZ_genl_ops, \ + ZZZ_genl_mcgrps); } void CONCAT_(GENL_MAGIC_FAMILY, _genl_unregister)(void) diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h index acd2010328f..9649ff0c63f 100644 --- a/include/linux/hugetlb.h +++ b/include/linux/hugetlb.h @@ -31,6 +31,7 @@ struct hugepage_subpool *hugepage_new_subpool(long nr_blocks); void hugepage_put_subpool(struct hugepage_subpool *spool); int PageHuge(struct page *page); +int PageHeadHuge(struct page *page_head); void reset_vma_resv_huge_pages(struct vm_area_struct *vma); int hugetlb_sysctl_handler(struct ctl_table *, int, void __user *, size_t *, loff_t *); @@ -69,7 +70,6 @@ int dequeue_hwpoisoned_huge_page(struct page *page); bool isolate_huge_page(struct page *page, struct list_head *list); void putback_active_hugepage(struct page *page); bool is_hugepage_active(struct page *page); -void copy_huge_page(struct page *dst, struct page *src); #ifdef CONFIG_ARCH_WANT_HUGE_PMD_SHARE pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud); @@ -104,6 +104,11 @@ static inline int PageHuge(struct page *page) return 0; } +static inline int PageHeadHuge(struct page *page_head) +{ + return 0; +} + static inline void reset_vma_resv_huge_pages(struct vm_area_struct *vma) { } @@ -140,9 +145,6 @@ static inline int dequeue_hwpoisoned_huge_page(struct page *page) #define isolate_huge_page(p, l) false #define putback_active_hugepage(p) do {} while (0) #define is_hugepage_active(x) false -static inline void copy_huge_page(struct page *dst, struct page *src) -{ -} static inline unsigned long hugetlb_change_protection(struct vm_area_struct *vma, unsigned long address, unsigned long end, pgprot_t newprot) diff --git a/include/linux/i2c.h b/include/linux/i2c.h index 2ab11dc3807..eff50e062be 100644 --- a/include/linux/i2c.h +++ b/include/linux/i2c.h @@ -205,7 +205,6 @@ struct i2c_driver { * @name: Indicates the type of the device, usually a chip name that's * generic enough to hide second-sourcing and compatible revisions. * @adapter: manages the bus segment hosting this I2C device - * @driver: device's driver, hence pointer to access routines * @dev: Driver model device node for the slave. * @irq: indicates the IRQ generated by this device (if any) * @detected: member of an i2c_driver.clients list or i2c-core's @@ -222,7 +221,6 @@ struct i2c_client { /* _LOWER_ 7 bits */ char name[I2C_NAME_SIZE]; struct i2c_adapter *adapter; /* the adapter we sit on */ - struct i2c_driver *driver; /* and our access routines */ struct device dev; /* the device structure */ int irq; /* irq issued by device */ struct list_head detected; diff --git a/include/linux/if_macvlan.h b/include/linux/if_macvlan.h index c2702856295..84ba5ac39e0 100644 --- a/include/linux/if_macvlan.h +++ b/include/linux/if_macvlan.h @@ -119,4 +119,21 @@ extern int macvlan_link_register(struct rtnl_link_ops *ops); extern netdev_tx_t macvlan_start_xmit(struct sk_buff *skb, struct net_device *dev); +#if IS_ENABLED(CONFIG_MACVLAN) +static inline struct net_device * +macvlan_dev_real_dev(const struct net_device *dev) +{ + struct macvlan_dev *macvlan = netdev_priv(dev); + + return macvlan->lowerdev; +} +#else +static inline struct net_device * +macvlan_dev_real_dev(const struct net_device *dev) +{ + BUG(); + return NULL; +} +#endif + #endif /* _LINUX_IF_MACVLAN_H */ diff --git a/include/linux/irq.h b/include/linux/irq.h index 56bb0dc8b7d..7dc10036eff 100644 --- a/include/linux/irq.h +++ b/include/linux/irq.h @@ -70,6 +70,9 @@ typedef void (*irq_preflow_handler_t)(struct irq_data *data); * IRQ_MOVE_PCNTXT - Interrupt can be migrated from process context * IRQ_NESTED_TRHEAD - Interrupt nests into another thread * IRQ_PER_CPU_DEVID - Dev_id is a per-cpu variable + * IRQ_IS_POLLED - Always polled by another interrupt. Exclude + * it from the spurious interrupt detection + * mechanism and from core side polling. */ enum { IRQ_TYPE_NONE = 0x00000000, @@ -94,12 +97,14 @@ enum { IRQ_NESTED_THREAD = (1 << 15), IRQ_NOTHREAD = (1 << 16), IRQ_PER_CPU_DEVID = (1 << 17), + IRQ_IS_POLLED = (1 << 18), }; #define IRQF_MODIFY_MASK \ (IRQ_TYPE_SENSE_MASK | IRQ_NOPROBE | IRQ_NOREQUEST | \ IRQ_NOAUTOEN | IRQ_MOVE_PCNTXT | IRQ_LEVEL | IRQ_NO_BALANCING | \ - IRQ_PER_CPU | IRQ_NESTED_THREAD | IRQ_NOTHREAD | IRQ_PER_CPU_DEVID) + IRQ_PER_CPU | IRQ_NESTED_THREAD | IRQ_NOTHREAD | IRQ_PER_CPU_DEVID | \ + IRQ_IS_POLLED) #define IRQ_NO_BALANCING_MASK (IRQ_PER_CPU | IRQ_NO_BALANCING) diff --git a/include/linux/key-type.h b/include/linux/key-type.h index 518a53afb9e..a74c3a84dfd 100644 --- a/include/linux/key-type.h +++ b/include/linux/key-type.h @@ -45,6 +45,7 @@ struct key_preparsed_payload { const void *data; /* Raw data */ size_t datalen; /* Raw datalen */ size_t quotalen; /* Quota length for proposed payload */ + bool trusted; /* True if key is trusted */ }; typedef int (*request_key_actor_t)(struct key_construction *key, @@ -63,6 +64,11 @@ struct key_type { */ size_t def_datalen; + /* Default key search algorithm. */ + unsigned def_lookup_type; +#define KEYRING_SEARCH_LOOKUP_DIRECT 0x0000 /* Direct lookup by description. */ +#define KEYRING_SEARCH_LOOKUP_ITERATE 0x0001 /* Iterative search. */ + /* vet a description */ int (*vet_description)(const char *description); diff --git a/include/linux/key.h b/include/linux/key.h index 4dfde1161c5..80d677483e3 100644 --- a/include/linux/key.h +++ b/include/linux/key.h @@ -22,6 +22,7 @@ #include <linux/sysctl.h> #include <linux/rwsem.h> #include <linux/atomic.h> +#include <linux/assoc_array.h> #ifdef __KERNEL__ #include <linux/uidgid.h> @@ -82,6 +83,12 @@ struct key_owner; struct keyring_list; struct keyring_name; +struct keyring_index_key { + struct key_type *type; + const char *description; + size_t desc_len; +}; + /*****************************************************************************/ /* * key reference with possession attribute handling @@ -99,7 +106,7 @@ struct keyring_name; typedef struct __key_reference_with_attributes *key_ref_t; static inline key_ref_t make_key_ref(const struct key *key, - unsigned long possession) + bool possession) { return (key_ref_t) ((unsigned long) key | possession); } @@ -109,7 +116,7 @@ static inline struct key *key_ref_to_ptr(const key_ref_t key_ref) return (struct key *) ((unsigned long) key_ref & ~1UL); } -static inline unsigned long is_key_possessed(const key_ref_t key_ref) +static inline bool is_key_possessed(const key_ref_t key_ref) { return (unsigned long) key_ref & 1UL; } @@ -129,7 +136,6 @@ struct key { struct list_head graveyard_link; struct rb_node serial_node; }; - struct key_type *type; /* type of key */ struct rw_semaphore sem; /* change vs change sem */ struct key_user *user; /* owner of this key */ void *security; /* security data for this key */ @@ -162,13 +168,21 @@ struct key { #define KEY_FLAG_NEGATIVE 5 /* set if key is negative */ #define KEY_FLAG_ROOT_CAN_CLEAR 6 /* set if key can be cleared by root without permission */ #define KEY_FLAG_INVALIDATED 7 /* set if key has been invalidated */ +#define KEY_FLAG_TRUSTED 8 /* set if key is trusted */ +#define KEY_FLAG_TRUSTED_ONLY 9 /* set if keyring only accepts links to trusted keys */ - /* the description string - * - this is used to match a key against search criteria - * - this should be a printable string + /* the key type and key description string + * - the desc is used to match a key against search criteria + * - it should be a printable string * - eg: for krb5 AFS, this might be "afs@REDHAT.COM" */ - char *description; + union { + struct keyring_index_key index_key; + struct { + struct key_type *type; /* type of key */ + char *description; + }; + }; /* type specific data * - this is used by the keyring type to index the name @@ -185,11 +199,14 @@ struct key { * whatever */ union { - unsigned long value; - void __rcu *rcudata; - void *data; - struct keyring_list __rcu *subscriptions; - } payload; + union { + unsigned long value; + void __rcu *rcudata; + void *data; + void *data2[2]; + } payload; + struct assoc_array keys; + }; }; extern struct key *key_alloc(struct key_type *type, @@ -203,18 +220,23 @@ extern struct key *key_alloc(struct key_type *type, #define KEY_ALLOC_IN_QUOTA 0x0000 /* add to quota, reject if would overrun */ #define KEY_ALLOC_QUOTA_OVERRUN 0x0001 /* add to quota, permit even if overrun */ #define KEY_ALLOC_NOT_IN_QUOTA 0x0002 /* not in quota */ +#define KEY_ALLOC_TRUSTED 0x0004 /* Key should be flagged as trusted */ extern void key_revoke(struct key *key); extern void key_invalidate(struct key *key); extern void key_put(struct key *key); -static inline struct key *key_get(struct key *key) +static inline struct key *__key_get(struct key *key) { - if (key) - atomic_inc(&key->usage); + atomic_inc(&key->usage); return key; } +static inline struct key *key_get(struct key *key) +{ + return key ? __key_get(key) : key; +} + static inline void key_ref_put(key_ref_t key_ref) { key_put(key_ref_to_ptr(key_ref)); diff --git a/include/linux/mlx5/device.h b/include/linux/mlx5/device.h index 5eb4e31af22..da78875807f 100644 --- a/include/linux/mlx5/device.h +++ b/include/linux/mlx5/device.h @@ -230,6 +230,15 @@ enum { MLX5_MAX_PAGE_SHIFT = 31 }; +enum { + MLX5_ADAPTER_PAGE_SHIFT = 12 +}; + +enum { + MLX5_CAP_OFF_DCT = 41, + MLX5_CAP_OFF_CMDIF_CSUM = 46, +}; + struct mlx5_inbox_hdr { __be16 opcode; u8 rsvd[4]; @@ -319,9 +328,9 @@ struct mlx5_hca_cap { u8 rsvd25[42]; __be16 log_uar_page_sz; u8 rsvd26[28]; - u8 log_msx_atomic_size_qp; + u8 log_max_atomic_size_qp; u8 rsvd27[2]; - u8 log_msx_atomic_size_dc; + u8 log_max_atomic_size_dc; u8 rsvd28[76]; }; diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h index 6b8c496572c..554548cd3dd 100644 --- a/include/linux/mlx5/driver.h +++ b/include/linux/mlx5/driver.h @@ -483,6 +483,7 @@ struct mlx5_priv { struct rb_root page_root; int fw_pages; int reg_pages; + struct list_head free_list; struct mlx5_core_health health; @@ -557,9 +558,11 @@ typedef void (*mlx5_cmd_cbk_t)(int status, void *context); struct mlx5_cmd_work_ent { struct mlx5_cmd_msg *in; struct mlx5_cmd_msg *out; + void *uout; + int uout_size; mlx5_cmd_cbk_t callback; void *context; - int idx; + int idx; struct completion done; struct mlx5_cmd *cmd; struct work_struct work; @@ -570,6 +573,7 @@ struct mlx5_cmd_work_ent { u8 token; struct timespec ts1; struct timespec ts2; + u16 op; }; struct mlx5_pas { @@ -653,6 +657,9 @@ void mlx5_cmd_use_polling(struct mlx5_core_dev *dev); int mlx5_cmd_status_to_err(struct mlx5_outbox_hdr *hdr); int mlx5_cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out, int out_size); +int mlx5_cmd_exec_cb(struct mlx5_core_dev *dev, void *in, int in_size, + void *out, int out_size, mlx5_cmd_cbk_t callback, + void *context); int mlx5_cmd_alloc_uar(struct mlx5_core_dev *dev, u32 *uarn); int mlx5_cmd_free_uar(struct mlx5_core_dev *dev, u32 uarn); int mlx5_alloc_uuars(struct mlx5_core_dev *dev, struct mlx5_uuar_info *uuari); @@ -676,7 +683,9 @@ int mlx5_core_query_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq, int mlx5_core_arm_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq, u16 lwm, int is_srq); int mlx5_core_create_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mr *mr, - struct mlx5_create_mkey_mbox_in *in, int inlen); + struct mlx5_create_mkey_mbox_in *in, int inlen, + mlx5_cmd_cbk_t callback, void *context, + struct mlx5_create_mkey_mbox_out *out); int mlx5_core_destroy_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mr *mr); int mlx5_core_query_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mr *mr, struct mlx5_query_mkey_mbox_out *out, int outlen); @@ -745,6 +754,11 @@ static inline u32 mlx5_idx_to_mkey(u32 mkey_idx) return mkey_idx << 8; } +static inline u8 mlx5_mkey_variant(u32 mkey) +{ + return mkey & 0xff; +} + enum { MLX5_PROF_MASK_QP_SIZE = (u64)1 << 0, MLX5_PROF_MASK_MR_CACHE = (u64)1 << 1, diff --git a/include/linux/mm.h b/include/linux/mm.h index 0548eb201e0..1cedd000cf2 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -1318,7 +1318,6 @@ static inline pmd_t *pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long a #if USE_SPLIT_PTE_PTLOCKS #if BLOATED_SPINLOCKS -void __init ptlock_cache_init(void); extern bool ptlock_alloc(struct page *page); extern void ptlock_free(struct page *page); @@ -1327,7 +1326,6 @@ static inline spinlock_t *ptlock_ptr(struct page *page) return page->ptl; } #else /* BLOATED_SPINLOCKS */ -static inline void ptlock_cache_init(void) {} static inline bool ptlock_alloc(struct page *page) { return true; @@ -1380,17 +1378,10 @@ static inline spinlock_t *pte_lockptr(struct mm_struct *mm, pmd_t *pmd) { return &mm->page_table_lock; } -static inline void ptlock_cache_init(void) {} static inline bool ptlock_init(struct page *page) { return true; } static inline void pte_lock_deinit(struct page *page) {} #endif /* USE_SPLIT_PTE_PTLOCKS */ -static inline void pgtable_init(void) -{ - ptlock_cache_init(); - pgtable_cache_init(); -} - static inline bool pgtable_page_ctor(struct page *page) { inc_zone_page_state(page, NR_PAGETABLE); diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h index 10f5a7272b8..bd299418a93 100644 --- a/include/linux/mm_types.h +++ b/include/linux/mm_types.h @@ -44,18 +44,22 @@ struct page { /* First double word block */ unsigned long flags; /* Atomic flags, some possibly * updated asynchronously */ - struct address_space *mapping; /* If low bit clear, points to - * inode address_space, or NULL. - * If page mapped as anonymous - * memory, low bit is set, and - * it points to anon_vma object: - * see PAGE_MAPPING_ANON below. - */ + union { + struct address_space *mapping; /* If low bit clear, points to + * inode address_space, or NULL. + * If page mapped as anonymous + * memory, low bit is set, and + * it points to anon_vma object: + * see PAGE_MAPPING_ANON below. + */ + void *s_mem; /* slab first object */ + }; + /* Second double word */ struct { union { pgoff_t index; /* Our offset within mapping. */ - void *freelist; /* slub/slob first free object */ + void *freelist; /* sl[aou]b first free object */ bool pfmemalloc; /* If set by the page allocator, * ALLOC_NO_WATERMARKS was set * and the low watermark was not @@ -65,9 +69,6 @@ struct page { * this page is only used to * free other pages. */ -#if defined(CONFIG_TRANSPARENT_HUGEPAGE) && USE_SPLIT_PMD_PTLOCKS - pgtable_t pmd_huge_pte; /* protected by page->ptl */ -#endif }; union { @@ -114,6 +115,7 @@ struct page { }; atomic_t _count; /* Usage count, see below. */ }; + unsigned int active; /* SLAB */ }; }; @@ -135,6 +137,12 @@ struct page { struct list_head list; /* slobs list of pages */ struct slab *slab_page; /* slab fields */ + struct rcu_head rcu_head; /* Used by SLAB + * when destroying via RCU + */ +#if defined(CONFIG_TRANSPARENT_HUGEPAGE) && USE_SPLIT_PMD_PTLOCKS + pgtable_t pmd_huge_pte; /* protected by page->ptl */ +#endif }; /* Remainder is not double word aligned */ diff --git a/include/linux/mmc/card.h b/include/linux/mmc/card.h index 842de3e21e7..176fdf824b1 100644 --- a/include/linux/mmc/card.h +++ b/include/linux/mmc/card.h @@ -240,6 +240,7 @@ struct mmc_part { struct mmc_card { struct mmc_host *host; /* the host this device belongs to */ struct device dev; /* the device */ + u32 ocr; /* the current OCR setting */ unsigned int rca; /* relative card address of device */ unsigned int type; /* card type */ #define MMC_TYPE_MMC 0 /* MMC card */ @@ -257,6 +258,7 @@ struct mmc_card { #define MMC_CARD_REMOVED (1<<7) /* card has been removed */ #define MMC_STATE_HIGHSPEED_200 (1<<8) /* card is in HS200 mode */ #define MMC_STATE_DOING_BKOPS (1<<10) /* card is doing BKOPS */ +#define MMC_STATE_SUSPENDED (1<<11) /* card is suspended */ unsigned int quirks; /* card quirks */ #define MMC_QUIRK_LENIENT_FN0 (1<<0) /* allow SDIO FN0 writes outside of the VS CCCR range */ #define MMC_QUIRK_BLKSZ_FOR_BYTE_MODE (1<<1) /* use func->cur_blksize */ @@ -420,10 +422,10 @@ static inline void __maybe_unused remove_quirk(struct mmc_card *card, int data) #define mmc_card_blockaddr(c) ((c)->state & MMC_STATE_BLOCKADDR) #define mmc_card_ddr_mode(c) ((c)->state & MMC_STATE_HIGHSPEED_DDR) #define mmc_card_uhs(c) ((c)->state & MMC_STATE_ULTRAHIGHSPEED) -#define mmc_sd_card_uhs(c) ((c)->state & MMC_STATE_ULTRAHIGHSPEED) #define mmc_card_ext_capacity(c) ((c)->state & MMC_CARD_SDXC) #define mmc_card_removed(c) ((c) && ((c)->state & MMC_CARD_REMOVED)) #define mmc_card_doing_bkops(c) ((c)->state & MMC_STATE_DOING_BKOPS) +#define mmc_card_suspended(c) ((c)->state & MMC_STATE_SUSPENDED) #define mmc_card_set_present(c) ((c)->state |= MMC_STATE_PRESENT) #define mmc_card_set_readonly(c) ((c)->state |= MMC_STATE_READONLY) @@ -432,11 +434,12 @@ static inline void __maybe_unused remove_quirk(struct mmc_card *card, int data) #define mmc_card_set_blockaddr(c) ((c)->state |= MMC_STATE_BLOCKADDR) #define mmc_card_set_ddr_mode(c) ((c)->state |= MMC_STATE_HIGHSPEED_DDR) #define mmc_card_set_uhs(c) ((c)->state |= MMC_STATE_ULTRAHIGHSPEED) -#define mmc_sd_card_set_uhs(c) ((c)->state |= MMC_STATE_ULTRAHIGHSPEED) #define mmc_card_set_ext_capacity(c) ((c)->state |= MMC_CARD_SDXC) #define mmc_card_set_removed(c) ((c)->state |= MMC_CARD_REMOVED) #define mmc_card_set_doing_bkops(c) ((c)->state |= MMC_STATE_DOING_BKOPS) #define mmc_card_clr_doing_bkops(c) ((c)->state &= ~MMC_STATE_DOING_BKOPS) +#define mmc_card_set_suspended(c) ((c)->state |= MMC_STATE_SUSPENDED) +#define mmc_card_clr_suspended(c) ((c)->state &= ~MMC_STATE_SUSPENDED) /* * Quirk add/remove for MMC products. diff --git a/include/linux/mmc/core.h b/include/linux/mmc/core.h index da51bec578c..87079fc3801 100644 --- a/include/linux/mmc/core.h +++ b/include/linux/mmc/core.h @@ -151,7 +151,8 @@ extern int mmc_app_cmd(struct mmc_host *, struct mmc_card *); extern int mmc_wait_for_app_cmd(struct mmc_host *, struct mmc_card *, struct mmc_command *, int); extern void mmc_start_bkops(struct mmc_card *card, bool from_exception); -extern int __mmc_switch(struct mmc_card *, u8, u8, u8, unsigned int, bool); +extern int __mmc_switch(struct mmc_card *, u8, u8, u8, unsigned int, bool, + bool); extern int mmc_switch(struct mmc_card *, u8, u8, u8, unsigned int); extern int mmc_send_ext_csd(struct mmc_card *card, u8 *ext_csd); @@ -188,7 +189,6 @@ extern unsigned int mmc_align_data_size(struct mmc_card *, unsigned int); extern int __mmc_claim_host(struct mmc_host *host, atomic_t *abort); extern void mmc_release_host(struct mmc_host *host); -extern int mmc_try_claim_host(struct mmc_host *host); extern void mmc_get_card(struct mmc_card *card); extern void mmc_put_card(struct mmc_card *card); diff --git a/include/linux/mmc/dw_mmc.h b/include/linux/mmc/dw_mmc.h index 198f0fa44e9..6ce7d2cd3c7 100644 --- a/include/linux/mmc/dw_mmc.h +++ b/include/linux/mmc/dw_mmc.h @@ -15,6 +15,7 @@ #define LINUX_MMC_DW_MMC_H #include <linux/scatterlist.h> +#include <linux/mmc/core.h> #define MAX_MCI_SLOTS 2 @@ -129,6 +130,9 @@ struct dw_mci { struct mmc_request *mrq; struct mmc_command *cmd; struct mmc_data *data; + struct mmc_command stop_abort; + unsigned int prev_blksz; + unsigned char timing; struct workqueue_struct *card_workqueue; /* DMA interface members*/ diff --git a/include/linux/mmc/host.h b/include/linux/mmc/host.h index 3b0c33ae13e..99f5709ac34 100644 --- a/include/linux/mmc/host.h +++ b/include/linux/mmc/host.h @@ -254,6 +254,7 @@ struct mmc_host { #define MMC_CAP_UHS_SDR50 (1 << 17) /* Host supports UHS SDR50 mode */ #define MMC_CAP_UHS_SDR104 (1 << 18) /* Host supports UHS SDR104 mode */ #define MMC_CAP_UHS_DDR50 (1 << 19) /* Host supports UHS DDR50 mode */ +#define MMC_CAP_RUNTIME_RESUME (1 << 20) /* Resume at runtime_resume. */ #define MMC_CAP_DRIVER_TYPE_A (1 << 23) /* Host supports Driver Type A */ #define MMC_CAP_DRIVER_TYPE_C (1 << 24) /* Host supports Driver Type C */ #define MMC_CAP_DRIVER_TYPE_D (1 << 25) /* Host supports Driver Type D */ @@ -309,7 +310,6 @@ struct mmc_host { spinlock_t lock; /* lock for claim and bus ops */ struct mmc_ios ios; /* current io bus settings */ - u32 ocr; /* the current OCR setting */ /* group bitfields together to minimize padding */ unsigned int use_spi_crc:1; @@ -382,9 +382,6 @@ static inline void *mmc_priv(struct mmc_host *host) #define mmc_classdev(x) (&(x)->class_dev) #define mmc_hostname(x) (dev_name(&(x)->class_dev)) -int mmc_suspend_host(struct mmc_host *); -int mmc_resume_host(struct mmc_host *); - int mmc_power_save_host(struct mmc_host *host); int mmc_power_restore_host(struct mmc_host *host); diff --git a/include/linux/pci-acpi.h b/include/linux/pci-acpi.h index d006f0ca60f..5a462c4e500 100644 --- a/include/linux/pci-acpi.h +++ b/include/linux/pci-acpi.h @@ -27,7 +27,7 @@ static inline acpi_handle acpi_find_root_bridge_handle(struct pci_dev *pdev) while (!pci_is_root_bus(pbus)) pbus = pbus->parent; - return DEVICE_ACPI_HANDLE(pbus->bridge); + return ACPI_HANDLE(pbus->bridge); } static inline acpi_handle acpi_pci_get_bridge_handle(struct pci_bus *pbus) @@ -39,7 +39,7 @@ static inline acpi_handle acpi_pci_get_bridge_handle(struct pci_bus *pbus) else dev = &pbus->self->dev; - return DEVICE_ACPI_HANDLE(dev); + return ACPI_HANDLE(dev); } void acpi_pci_add_bus(struct pci_bus *bus); diff --git a/include/linux/i2c/at24.h b/include/linux/platform_data/at24.h index 285025a9cdc..c42aa89d34e 100644 --- a/include/linux/i2c/at24.h +++ b/include/linux/platform_data/at24.h @@ -28,7 +28,7 @@ * * void get_mac_addr(struct memory_accessor *mem_acc, void *context) * { - * u8 *mac_addr = ethernet_pdata->mac_addr; + * u8 *mac_addr = ethernet_pdata->mac_addr; * off_t offset = context; * * // Read MAC addr from EEPROM diff --git a/include/linux/platform_data/edma.h b/include/linux/platform_data/edma.h index 179fb91bb5f..f50821cb64b 100644 --- a/include/linux/platform_data/edma.h +++ b/include/linux/platform_data/edma.h @@ -67,10 +67,10 @@ struct edmacc_param { #define ITCCHEN BIT(23) /*ch_status paramater of callback function possible values*/ -#define DMA_COMPLETE 1 -#define DMA_CC_ERROR 2 -#define DMA_TC1_ERROR 3 -#define DMA_TC2_ERROR 4 +#define EDMA_DMA_COMPLETE 1 +#define EDMA_DMA_CC_ERROR 2 +#define EDMA_DMA_TC1_ERROR 3 +#define EDMA_DMA_TC2_ERROR 4 enum address_mode { INCR = 0, diff --git a/include/linux/platform_data/mmc-esdhc-imx.h b/include/linux/platform_data/mmc-esdhc-imx.h index d44912d8157..75f70f6ac13 100644 --- a/include/linux/platform_data/mmc-esdhc-imx.h +++ b/include/linux/platform_data/mmc-esdhc-imx.h @@ -10,6 +10,8 @@ #ifndef __ASM_ARCH_IMX_ESDHC_H #define __ASM_ARCH_IMX_ESDHC_H +#include <linux/types.h> + enum wp_types { ESDHC_WP_NONE, /* no WP, neither controller nor gpio */ ESDHC_WP_CONTROLLER, /* mmc controller internal WP */ @@ -32,6 +34,7 @@ enum cd_types { * @cd_gpio: gpio for card_detect interrupt * @wp_type: type of write_protect method (see wp_types enum above) * @cd_type: type of card_detect method (see cd_types enum above) + * @support_vsel: indicate it supports 1.8v switching */ struct esdhc_platform_data { @@ -41,5 +44,7 @@ struct esdhc_platform_data { enum cd_types cd_type; int max_bus_width; unsigned int f_max; + bool support_vsel; + unsigned int delay_line; }; #endif /* __ASM_ARCH_IMX_ESDHC_H */ diff --git a/include/linux/power/bq24735-charger.h b/include/linux/power/bq24735-charger.h new file mode 100644 index 00000000000..f536164a606 --- /dev/null +++ b/include/linux/power/bq24735-charger.h @@ -0,0 +1,39 @@ +/* + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +#ifndef __CHARGER_BQ24735_H_ +#define __CHARGER_BQ24735_H_ + +#include <linux/types.h> +#include <linux/power_supply.h> + +struct bq24735_platform { + uint32_t charge_current; + uint32_t charge_voltage; + uint32_t input_current; + + const char *name; + + int status_gpio; + int status_gpio_active_low; + bool status_gpio_valid; + + char **supplied_to; + size_t num_supplicants; +}; + +#endif /* __CHARGER_BQ24735_H_ */ diff --git a/include/linux/preempt_mask.h b/include/linux/preempt_mask.h index 931bc616219..d169820203d 100644 --- a/include/linux/preempt_mask.h +++ b/include/linux/preempt_mask.h @@ -11,36 +11,23 @@ * - bits 0-7 are the preemption count (max preemption depth: 256) * - bits 8-15 are the softirq count (max # of softirqs: 256) * - * The hardirq count can in theory reach the same as NR_IRQS. - * In reality, the number of nested IRQS is limited to the stack - * size as well. For archs with over 1000 IRQS it is not practical - * to expect that they will all nest. We give a max of 10 bits for - * hardirq nesting. An arch may choose to give less than 10 bits. - * m68k expects it to be 8. + * The hardirq count could in theory be the same as the number of + * interrupts in the system, but we run all interrupt handlers with + * interrupts disabled, so we cannot have nesting interrupts. Though + * there are a few palaeontologic drivers which reenable interrupts in + * the handler, so we need more than one bit here. * - * - bits 16-25 are the hardirq count (max # of nested hardirqs: 1024) - * - bit 26 is the NMI_MASK - * - bit 27 is the PREEMPT_ACTIVE flag - * - * PREEMPT_MASK: 0x000000ff - * SOFTIRQ_MASK: 0x0000ff00 - * HARDIRQ_MASK: 0x03ff0000 - * NMI_MASK: 0x04000000 + * PREEMPT_MASK: 0x000000ff + * SOFTIRQ_MASK: 0x0000ff00 + * HARDIRQ_MASK: 0x000f0000 + * NMI_MASK: 0x00100000 + * PREEMPT_ACTIVE: 0x00200000 */ #define PREEMPT_BITS 8 #define SOFTIRQ_BITS 8 +#define HARDIRQ_BITS 4 #define NMI_BITS 1 -#define MAX_HARDIRQ_BITS 10 - -#ifndef HARDIRQ_BITS -# define HARDIRQ_BITS MAX_HARDIRQ_BITS -#endif - -#if HARDIRQ_BITS > MAX_HARDIRQ_BITS -#error HARDIRQ_BITS too high! -#endif - #define PREEMPT_SHIFT 0 #define SOFTIRQ_SHIFT (PREEMPT_SHIFT + PREEMPT_BITS) #define HARDIRQ_SHIFT (SOFTIRQ_SHIFT + SOFTIRQ_BITS) @@ -60,15 +47,9 @@ #define SOFTIRQ_DISABLE_OFFSET (2 * SOFTIRQ_OFFSET) -#ifndef PREEMPT_ACTIVE #define PREEMPT_ACTIVE_BITS 1 #define PREEMPT_ACTIVE_SHIFT (NMI_SHIFT + NMI_BITS) #define PREEMPT_ACTIVE (__IRQ_MASK(PREEMPT_ACTIVE_BITS) << PREEMPT_ACTIVE_SHIFT) -#endif - -#if PREEMPT_ACTIVE < (1 << (NMI_SHIFT + NMI_BITS)) -#error PREEMPT_ACTIVE is too low! -#endif #define hardirq_count() (preempt_count() & HARDIRQ_MASK) #define softirq_count() (preempt_count() & SOFTIRQ_MASK) diff --git a/include/linux/sched.h b/include/linux/sched.h index 6f7ffa46008..7e35d4b9e14 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -22,7 +22,7 @@ struct sched_param { #include <linux/errno.h> #include <linux/nodemask.h> #include <linux/mm_types.h> -#include <linux/preempt.h> +#include <linux/preempt_mask.h> #include <asm/page.h> #include <asm/ptrace.h> diff --git a/include/linux/security.h b/include/linux/security.h index 9d37e2b9d3e..5623a7f965b 100644 --- a/include/linux/security.h +++ b/include/linux/security.h @@ -1052,17 +1052,25 @@ static inline void security_free_mnt_opts(struct security_mnt_opts *opts) * @xfrm_policy_delete_security: * @ctx contains the xfrm_sec_ctx. * Authorize deletion of xp->security. - * @xfrm_state_alloc_security: + * @xfrm_state_alloc: * @x contains the xfrm_state being added to the Security Association * Database by the XFRM system. * @sec_ctx contains the security context information being provided by * the user-level SA generation program (e.g., setkey or racoon). - * @secid contains the secid from which to take the mls portion of the context. * Allocate a security structure to the x->security field; the security * field is initialized to NULL when the xfrm_state is allocated. Set the - * context to correspond to either sec_ctx or polsec, with the mls portion - * taken from secid in the latter case. - * Return 0 if operation was successful (memory to allocate, legal context). + * context to correspond to sec_ctx. Return 0 if operation was successful + * (memory to allocate, legal context). + * @xfrm_state_alloc_acquire: + * @x contains the xfrm_state being added to the Security Association + * Database by the XFRM system. + * @polsec contains the policy's security context. + * @secid contains the secid from which to take the mls portion of the + * context. + * Allocate a security structure to the x->security field; the security + * field is initialized to NULL when the xfrm_state is allocated. Set the + * context to correspond to secid. Return 0 if operation was successful + * (memory to allocate, legal context). * @xfrm_state_free_security: * @x contains the xfrm_state. * Deallocate x->security. @@ -1679,9 +1687,11 @@ struct security_operations { int (*xfrm_policy_clone_security) (struct xfrm_sec_ctx *old_ctx, struct xfrm_sec_ctx **new_ctx); void (*xfrm_policy_free_security) (struct xfrm_sec_ctx *ctx); int (*xfrm_policy_delete_security) (struct xfrm_sec_ctx *ctx); - int (*xfrm_state_alloc_security) (struct xfrm_state *x, - struct xfrm_user_sec_ctx *sec_ctx, - u32 secid); + int (*xfrm_state_alloc) (struct xfrm_state *x, + struct xfrm_user_sec_ctx *sec_ctx); + int (*xfrm_state_alloc_acquire) (struct xfrm_state *x, + struct xfrm_sec_ctx *polsec, + u32 secid); void (*xfrm_state_free_security) (struct xfrm_state *x); int (*xfrm_state_delete_security) (struct xfrm_state *x); int (*xfrm_policy_lookup) (struct xfrm_sec_ctx *ctx, u32 fl_secid, u8 dir); diff --git a/include/linux/seqlock.h b/include/linux/seqlock.h index 1e8a8b6e837..cf87a24c0f9 100644 --- a/include/linux/seqlock.h +++ b/include/linux/seqlock.h @@ -354,6 +354,35 @@ static inline void read_sequnlock_excl(seqlock_t *sl) spin_unlock(&sl->lock); } +/** + * read_seqbegin_or_lock - begin a sequence number check or locking block + * @lock: sequence lock + * @seq : sequence number to be checked + * + * First try it once optimistically without taking the lock. If that fails, + * take the lock. The sequence number is also used as a marker for deciding + * whether to be a reader (even) or writer (odd). + * N.B. seq must be initialized to an even number to begin with. + */ +static inline void read_seqbegin_or_lock(seqlock_t *lock, int *seq) +{ + if (!(*seq & 1)) /* Even */ + *seq = read_seqbegin(lock); + else /* Odd */ + read_seqlock_excl(lock); +} + +static inline int need_seqretry(seqlock_t *lock, int seq) +{ + return !(seq & 1) && read_seqretry(lock, seq); +} + +static inline void done_seqretry(seqlock_t *lock, int seq) +{ + if (seq & 1) + read_sequnlock_excl(lock); +} + static inline void read_seqlock_excl_bh(seqlock_t *sl) { spin_lock_bh(&sl->lock); diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index 215b5ea1cb3..bec1cc7d5e3 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -2263,24 +2263,6 @@ static inline void skb_postpull_rcsum(struct sk_buff *skb, unsigned char *skb_pull_rcsum(struct sk_buff *skb, unsigned int len); -/** - * pskb_trim_rcsum - trim received skb and update checksum - * @skb: buffer to trim - * @len: new length - * - * This is exactly the same as pskb_trim except that it ensures the - * checksum of received packets are still valid after the operation. - */ - -static inline int pskb_trim_rcsum(struct sk_buff *skb, unsigned int len) -{ - if (likely(len >= skb->len)) - return 0; - if (skb->ip_summed == CHECKSUM_COMPLETE) - skb->ip_summed = CHECKSUM_NONE; - return __pskb_trim(skb, len); -} - #define skb_queue_walk(queue, skb) \ for (skb = (queue)->next; \ skb != (struct sk_buff *)(queue); \ @@ -2378,6 +2360,27 @@ __wsum __skb_checksum(const struct sk_buff *skb, int offset, int len, __wsum skb_checksum(const struct sk_buff *skb, int offset, int len, __wsum csum); +/** + * pskb_trim_rcsum - trim received skb and update checksum + * @skb: buffer to trim + * @len: new length + * + * This is exactly the same as pskb_trim except that it ensures the + * checksum of received packets are still valid after the operation. + */ + +static inline int pskb_trim_rcsum(struct sk_buff *skb, unsigned int len) +{ + if (likely(len >= skb->len)) + return 0; + if (skb->ip_summed == CHECKSUM_COMPLETE) { + __wsum adj = skb_checksum(skb, len, skb->len - len, 0); + + skb->csum = csum_sub(skb->csum, adj); + } + return __pskb_trim(skb, len); +} + static inline void *skb_header_pointer(const struct sk_buff *skb, int offset, int len, void *buffer) { diff --git a/include/linux/slab.h b/include/linux/slab.h index 74f105847d1..c2bba248fa6 100644 --- a/include/linux/slab.h +++ b/include/linux/slab.h @@ -53,7 +53,14 @@ * } * rcu_read_unlock(); * - * See also the comment on struct slab_rcu in mm/slab.c. + * This is useful if we need to approach a kernel structure obliquely, + * from its address obtained without the usual locking. We can lock + * the structure to stabilize it and check it's still at the given address, + * only if we can be sure that the memory has not been meanwhile reused + * for some other kind of object (which our subsystem's lock might corrupt). + * + * rcu_read_lock before reading the address, then rcu_read_unlock after + * taking the spinlock within the structure expected at that address. */ #define SLAB_DESTROY_BY_RCU 0x00080000UL /* Defer freeing slabs to RCU */ #define SLAB_MEM_SPREAD 0x00100000UL /* Spread some memory over cpuset */ diff --git a/include/linux/slab_def.h b/include/linux/slab_def.h index e9346b4f1ef..09bfffb08a5 100644 --- a/include/linux/slab_def.h +++ b/include/linux/slab_def.h @@ -27,8 +27,8 @@ struct kmem_cache { size_t colour; /* cache colouring range */ unsigned int colour_off; /* colour offset */ - struct kmem_cache *slabp_cache; - unsigned int slab_size; + struct kmem_cache *freelist_cache; + unsigned int freelist_size; /* constructor func */ void (*ctor)(void *obj); diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h index cc0b67eada4..f56bfa9e452 100644 --- a/include/linux/slub_def.h +++ b/include/linux/slub_def.h @@ -11,7 +11,7 @@ enum stat_item { ALLOC_FASTPATH, /* Allocation from cpu slab */ ALLOC_SLOWPATH, /* Allocation by getting a new cpu slab */ - FREE_FASTPATH, /* Free to cpu slub */ + FREE_FASTPATH, /* Free to cpu slab */ FREE_SLOWPATH, /* Freeing not to cpu slab */ FREE_FROZEN, /* Freeing to frozen slab */ FREE_ADD_PARTIAL, /* Freeing moves slab to partial list */ diff --git a/include/linux/user_namespace.h b/include/linux/user_namespace.h index 4db29859464..4836ba3c1cd 100644 --- a/include/linux/user_namespace.h +++ b/include/linux/user_namespace.h @@ -27,6 +27,12 @@ struct user_namespace { kuid_t owner; kgid_t group; unsigned int proc_inum; + + /* Register of per-UID persistent keyrings for this namespace */ +#ifdef CONFIG_PERSISTENT_KEYRINGS + struct key *persistent_keyring_register; + struct rw_semaphore persistent_keyring_register_sem; +#endif }; extern struct user_namespace init_user_ns; diff --git a/include/linux/wait.h b/include/linux/wait.h index 61939ba30aa..eaa00b10aba 100644 --- a/include/linux/wait.h +++ b/include/linux/wait.h @@ -278,6 +278,31 @@ do { \ __ret; \ }) +#define __wait_event_cmd(wq, condition, cmd1, cmd2) \ + (void)___wait_event(wq, condition, TASK_UNINTERRUPTIBLE, 0, 0, \ + cmd1; schedule(); cmd2) + +/** + * wait_event_cmd - sleep until a condition gets true + * @wq: the waitqueue to wait on + * @condition: a C expression for the event to wait for + * cmd1: the command will be executed before sleep + * cmd2: the command will be executed after sleep + * + * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the + * @condition evaluates to true. The @condition is checked each time + * the waitqueue @wq is woken up. + * + * wake_up() has to be called after changing any variable that could + * change the result of the wait condition. + */ +#define wait_event_cmd(wq, condition, cmd1, cmd2) \ +do { \ + if (condition) \ + break; \ + __wait_event_cmd(wq, condition, cmd1, cmd2); \ +} while (0) + #define __wait_event_interruptible(wq, condition) \ ___wait_event(wq, condition, TASK_INTERRUPTIBLE, 0, 0, \ schedule()) diff --git a/include/media/lm3560.h b/include/media/lm3560.h new file mode 100644 index 00000000000..46670706d6f --- /dev/null +++ b/include/media/lm3560.h @@ -0,0 +1,97 @@ +/* + * include/media/lm3560.h + * + * Copyright (C) 2013 Texas Instruments + * + * Contact: Daniel Jeong <gshark.jeong@gmail.com> + * Ldd-Mlp <ldd-mlp@list.ti.com> + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA + * 02110-1301 USA + * + */ + +#ifndef __LM3560_H__ +#define __LM3560_H__ + +#include <media/v4l2-subdev.h> + +#define LM3560_NAME "lm3560" +#define LM3560_I2C_ADDR (0x53) + +/* FLASH Brightness + * min 62500uA, step 62500uA, max 1000000uA + */ +#define LM3560_FLASH_BRT_MIN 62500 +#define LM3560_FLASH_BRT_STEP 62500 +#define LM3560_FLASH_BRT_MAX 1000000 +#define LM3560_FLASH_BRT_uA_TO_REG(a) \ + ((a) < LM3560_FLASH_BRT_MIN ? 0 : \ + (((a) - LM3560_FLASH_BRT_MIN) / LM3560_FLASH_BRT_STEP)) +#define LM3560_FLASH_BRT_REG_TO_uA(a) \ + ((a) * LM3560_FLASH_BRT_STEP + LM3560_FLASH_BRT_MIN) + +/* FLASH TIMEOUT DURATION + * min 32ms, step 32ms, max 1024ms + */ +#define LM3560_FLASH_TOUT_MIN 32 +#define LM3560_FLASH_TOUT_STEP 32 +#define LM3560_FLASH_TOUT_MAX 1024 +#define LM3560_FLASH_TOUT_ms_TO_REG(a) \ + ((a) < LM3560_FLASH_TOUT_MIN ? 0 : \ + (((a) - LM3560_FLASH_TOUT_MIN) / LM3560_FLASH_TOUT_STEP)) +#define LM3560_FLASH_TOUT_REG_TO_ms(a) \ + ((a) * LM3560_FLASH_TOUT_STEP + LM3560_FLASH_TOUT_MIN) + +/* TORCH BRT + * min 31250uA, step 31250uA, max 250000uA + */ +#define LM3560_TORCH_BRT_MIN 31250 +#define LM3560_TORCH_BRT_STEP 31250 +#define LM3560_TORCH_BRT_MAX 250000 +#define LM3560_TORCH_BRT_uA_TO_REG(a) \ + ((a) < LM3560_TORCH_BRT_MIN ? 0 : \ + (((a) - LM3560_TORCH_BRT_MIN) / LM3560_TORCH_BRT_STEP)) +#define LM3560_TORCH_BRT_REG_TO_uA(a) \ + ((a) * LM3560_TORCH_BRT_STEP + LM3560_TORCH_BRT_MIN) + +enum lm3560_led_id { + LM3560_LED0 = 0, + LM3560_LED1, + LM3560_LED_MAX +}; + +enum lm3560_peak_current { + LM3560_PEAK_1600mA = 0x00, + LM3560_PEAK_2300mA = 0x20, + LM3560_PEAK_3000mA = 0x40, + LM3560_PEAK_3600mA = 0x60 +}; + +/* struct lm3560_platform_data + * + * @peak : peak current + * @max_flash_timeout: flash timeout + * @max_flash_brt: flash mode led brightness + * @max_torch_brt: torch mode led brightness + */ +struct lm3560_platform_data { + enum lm3560_peak_current peak; + + u32 max_flash_timeout; + u32 max_flash_brt[LM3560_LED_MAX]; + u32 max_torch_brt[LM3560_LED_MAX]; +}; + +#endif /* __LM3560_H__ */ diff --git a/include/media/soc_camera.h b/include/media/soc_camera.h index 34d2414f2b8..865246b0012 100644 --- a/include/media/soc_camera.h +++ b/include/media/soc_camera.h @@ -146,9 +146,14 @@ struct soc_camera_subdev_desc { /* sensor driver private platform data */ void *drv_priv; - /* Optional regulators that have to be managed on power on/off events */ - struct regulator_bulk_data *regulators; - int num_regulators; + /* + * Set unbalanced_power to true to deal with legacy drivers, failing to + * balance their calls to subdevice's .s_power() method. clock_state is + * then used internally by helper functions, it shouldn't be touched by + * drivers or the platform code. + */ + bool unbalanced_power; + unsigned long clock_state; /* Optional callbacks to power on or off and reset the sensor */ int (*power)(struct device *, int); @@ -162,6 +167,9 @@ struct soc_camera_subdev_desc { int (*set_bus_param)(struct soc_camera_subdev_desc *, unsigned long flags); unsigned long (*query_bus_param)(struct soc_camera_subdev_desc *); void (*free_bus)(struct soc_camera_subdev_desc *); + + /* Optional regulators that have to be managed on power on/off events */ + struct v4l2_subdev_platform_data sd_pdata; }; struct soc_camera_host_desc { @@ -202,9 +210,10 @@ struct soc_camera_link { void *priv; - /* Optional regulators that have to be managed on power on/off events */ - struct regulator_bulk_data *regulators; - int num_regulators; + /* Set by platforms to handle misbehaving drivers */ + bool unbalanced_power; + /* Used by soc-camera helper functions */ + unsigned long clock_state; /* Optional callbacks to power on or off and reset the sensor */ int (*power)(struct device *, int); @@ -218,6 +227,12 @@ struct soc_camera_link { unsigned long (*query_bus_param)(struct soc_camera_link *); void (*free_bus)(struct soc_camera_link *); + /* Optional regulators that have to be managed on power on/off events */ + struct regulator_bulk_data *regulators; + int num_regulators; + + void *host_priv; + /* * Host part - keep at bottom and compatible to * struct soc_camera_host_desc diff --git a/include/media/v4l2-clk.h b/include/media/v4l2-clk.h index 0503a90b48b..0b36cc13830 100644 --- a/include/media/v4l2-clk.h +++ b/include/media/v4l2-clk.h @@ -15,6 +15,7 @@ #define MEDIA_V4L2_CLK_H #include <linux/atomic.h> +#include <linux/export.h> #include <linux/list.h> #include <linux/mutex.h> @@ -51,4 +52,20 @@ void v4l2_clk_disable(struct v4l2_clk *clk); unsigned long v4l2_clk_get_rate(struct v4l2_clk *clk); int v4l2_clk_set_rate(struct v4l2_clk *clk, unsigned long rate); +struct module; + +struct v4l2_clk *__v4l2_clk_register_fixed(const char *dev_id, + const char *id, unsigned long rate, struct module *owner); +void v4l2_clk_unregister_fixed(struct v4l2_clk *clk); + +static inline struct v4l2_clk *v4l2_clk_register_fixed(const char *dev_id, + const char *id, + unsigned long rate) +{ + return __v4l2_clk_register_fixed(dev_id, id, rate, THIS_MODULE); +} + +#define v4l2_clk_name_i2c(name, size, adap, client) snprintf(name, size, \ + "%d-%04x", adap, client) + #endif diff --git a/include/media/v4l2-common.h b/include/media/v4l2-common.h index 16550c43900..48f974866f1 100644 --- a/include/media/v4l2-common.h +++ b/include/media/v4l2-common.h @@ -35,7 +35,7 @@ printk(level "%s %d-%04x: " fmt, name, i2c_adapter_id(adapter), addr , ## arg) #define v4l_client_printk(level, client, fmt, arg...) \ - v4l_printk(level, (client)->driver->driver.name, (client)->adapter, \ + v4l_printk(level, (client)->dev.driver->name, (client)->adapter, \ (client)->addr, fmt , ## arg) #define v4l_err(client, fmt, arg...) \ @@ -86,7 +86,7 @@ int v4l2_ctrl_check(struct v4l2_ext_control *ctrl, struct v4l2_queryctrl *qctrl, const char * const *menu_items); const char *v4l2_ctrl_get_name(u32 id); const char * const *v4l2_ctrl_get_menu(u32 id); -const s64 const *v4l2_ctrl_get_int_menu(u32 id, u32 *len); +const s64 *v4l2_ctrl_get_int_menu(u32 id, u32 *len); int v4l2_ctrl_query_fill(struct v4l2_queryctrl *qctrl, s32 min, s32 max, s32 step, s32 def); int v4l2_ctrl_query_menu(struct v4l2_querymenu *qmenu, struct v4l2_queryctrl *qctrl, const char * const *menu_items); diff --git a/include/media/v4l2-ctrls.h b/include/media/v4l2-ctrls.h index 47ada23345a..16f7f260651 100644 --- a/include/media/v4l2-ctrls.h +++ b/include/media/v4l2-ctrls.h @@ -571,7 +571,7 @@ static inline void v4l2_ctrl_lock(struct v4l2_ctrl *ctrl) mutex_lock(ctrl->handler->lock); } -/** v4l2_ctrl_lock() - Helper function to unlock the handler +/** v4l2_ctrl_unlock() - Helper function to unlock the handler * associated with the control. * @ctrl: The control to unlock. */ diff --git a/include/media/v4l2-fh.h b/include/media/v4l2-fh.h index a62ee18cb7b..528cdaf622e 100644 --- a/include/media/v4l2-fh.h +++ b/include/media/v4l2-fh.h @@ -26,7 +26,9 @@ #ifndef V4L2_FH_H #define V4L2_FH_H +#include <linux/fs.h> #include <linux/list.h> +#include <linux/videodev2.h> struct video_device; struct v4l2_ctrl_handler; diff --git a/include/media/v4l2-subdev.h b/include/media/v4l2-subdev.h index bfda0fe9aeb..d67210a37ef 100644 --- a/include/media/v4l2-subdev.h +++ b/include/media/v4l2-subdev.h @@ -559,6 +559,17 @@ struct v4l2_subdev_internal_ops { /* Set this flag if this subdev generates events. */ #define V4L2_SUBDEV_FL_HAS_EVENTS (1U << 3) +struct regulator_bulk_data; + +struct v4l2_subdev_platform_data { + /* Optional regulators uset to power on/off the subdevice */ + struct regulator_bulk_data *regulators; + int num_regulators; + + /* Per-subdevice data, specific for a certain video host device */ + void *host_priv; +}; + /* Each instance of a subdev driver should create this struct, either stand-alone or embedded in a larger struct. */ @@ -592,6 +603,8 @@ struct v4l2_subdev { struct v4l2_async_subdev *asd; /* Pointer to the managing notifier. */ struct v4l2_async_notifier *notifier; + /* common part of subdevice platform data */ + struct v4l2_subdev_platform_data *pdata; }; #define media_entity_to_v4l2_subdev(ent) \ @@ -622,13 +635,13 @@ struct v4l2_subdev_fh { v4l2_subdev_get_try_##fun_name(struct v4l2_subdev_fh *fh, \ unsigned int pad) \ { \ - BUG_ON(unlikely(pad >= vdev_to_v4l2_subdev( \ - fh->vfh.vdev)->entity.num_pads)); \ + BUG_ON(pad >= vdev_to_v4l2_subdev( \ + fh->vfh.vdev)->entity.num_pads); \ return &fh->pad[pad].field_name; \ } __V4L2_SUBDEV_MK_GET_TRY(v4l2_mbus_framefmt, format, try_fmt) -__V4L2_SUBDEV_MK_GET_TRY(v4l2_rect, crop, try_compose) +__V4L2_SUBDEV_MK_GET_TRY(v4l2_rect, crop, try_crop) __V4L2_SUBDEV_MK_GET_TRY(v4l2_rect, compose, try_compose) #endif diff --git a/include/media/videobuf2-core.h b/include/media/videobuf2-core.h index 6781258d0b6..bd8218b1500 100644 --- a/include/media/videobuf2-core.h +++ b/include/media/videobuf2-core.h @@ -391,7 +391,7 @@ unsigned long vb2_get_unmapped_area(struct vb2_queue *q, unsigned int vb2_poll(struct vb2_queue *q, struct file *file, poll_table *wait); size_t vb2_read(struct vb2_queue *q, char __user *data, size_t count, loff_t *ppos, int nonblock); -size_t vb2_write(struct vb2_queue *q, char __user *data, size_t count, +size_t vb2_write(struct vb2_queue *q, const char __user *data, size_t count, loff_t *ppos, int nonblock); /** @@ -491,7 +491,7 @@ int vb2_ioctl_expbuf(struct file *file, void *priv, int vb2_fop_mmap(struct file *file, struct vm_area_struct *vma); int vb2_fop_release(struct file *file); -ssize_t vb2_fop_write(struct file *file, char __user *buf, +ssize_t vb2_fop_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos); ssize_t vb2_fop_read(struct file *file, char __user *buf, size_t count, loff_t *ppos); diff --git a/include/media/videobuf2-dma-sg.h b/include/media/videobuf2-dma-sg.h index 0038526b8ef..7b89852779a 100644 --- a/include/media/videobuf2-dma-sg.h +++ b/include/media/videobuf2-dma-sg.h @@ -15,16 +15,10 @@ #include <media/videobuf2-core.h> -struct vb2_dma_sg_desc { - unsigned long size; - unsigned int num_pages; - struct scatterlist *sglist; -}; - -static inline struct vb2_dma_sg_desc *vb2_dma_sg_plane_desc( +static inline struct sg_table *vb2_dma_sg_plane_desc( struct vb2_buffer *vb, unsigned int plane_no) { - return (struct vb2_dma_sg_desc *)vb2_plane_cookie(vb, plane_no); + return (struct sg_table *)vb2_plane_cookie(vb, plane_no); } extern const struct vb2_mem_ops vb2_dma_sg_memops; diff --git a/include/net/genetlink.h b/include/net/genetlink.h index 9b787b62cf1..ace4abf118d 100644 --- a/include/net/genetlink.h +++ b/include/net/genetlink.h @@ -10,16 +10,9 @@ /** * struct genl_multicast_group - generic netlink multicast group * @name: name of the multicast group, names are per-family - * @id: multicast group ID, assigned by the core, to use with - * genlmsg_multicast(). - * @list: list entry for linking - * @family: pointer to family, need not be set before registering */ struct genl_multicast_group { - struct genl_family *family; /* private */ - struct list_head list; /* private */ char name[GENL_NAMSIZ]; - u32 id; }; struct genl_ops; @@ -39,9 +32,12 @@ struct genl_info; * @post_doit: called after an operation's doit callback, it may * undo operations done by pre_doit, for example release locks * @attrbuf: buffer to store parsed attributes - * @ops_list: list of all assigned operations * @family_list: family list - * @mcast_groups: multicast groups list + * @mcgrps: multicast groups used by this family (private) + * @n_mcgrps: number of multicast groups (private) + * @mcgrp_offset: starting number of multicast group IDs in this family + * @ops: the operations supported by this family (private) + * @n_ops: number of operations supported by this family (private) */ struct genl_family { unsigned int id; @@ -51,16 +47,19 @@ struct genl_family { unsigned int maxattr; bool netnsok; bool parallel_ops; - int (*pre_doit)(struct genl_ops *ops, + int (*pre_doit)(const struct genl_ops *ops, struct sk_buff *skb, struct genl_info *info); - void (*post_doit)(struct genl_ops *ops, + void (*post_doit)(const struct genl_ops *ops, struct sk_buff *skb, struct genl_info *info); struct nlattr ** attrbuf; /* private */ - struct list_head ops_list; /* private */ + const struct genl_ops * ops; /* private */ + const struct genl_multicast_group *mcgrps; /* private */ + unsigned int n_ops; /* private */ + unsigned int n_mcgrps; /* private */ + unsigned int mcgrp_offset; /* private */ struct list_head family_list; /* private */ - struct list_head mcast_groups; /* private */ struct module *module; }; @@ -110,16 +109,15 @@ static inline void genl_info_net_set(struct genl_info *info, struct net *net) * @ops_list: operations list */ struct genl_ops { - u8 cmd; - u8 internal_flags; - unsigned int flags; const struct nla_policy *policy; int (*doit)(struct sk_buff *skb, struct genl_info *info); int (*dumpit)(struct sk_buff *skb, struct netlink_callback *cb); int (*done)(struct netlink_callback *cb); - struct list_head ops_list; + u8 cmd; + u8 internal_flags; + u8 flags; }; int __genl_register_family(struct genl_family *family); @@ -130,24 +128,53 @@ static inline int genl_register_family(struct genl_family *family) return __genl_register_family(family); } -int __genl_register_family_with_ops(struct genl_family *family, - struct genl_ops *ops, size_t n_ops); - -static inline int genl_register_family_with_ops(struct genl_family *family, - struct genl_ops *ops, size_t n_ops) +/** + * genl_register_family_with_ops - register a generic netlink family with ops + * @family: generic netlink family + * @ops: operations to be registered + * @n_ops: number of elements to register + * + * Registers the specified family and operations from the specified table. + * Only one family may be registered with the same family name or identifier. + * + * The family id may equal GENL_ID_GENERATE causing an unique id to + * be automatically generated and assigned. + * + * Either a doit or dumpit callback must be specified for every registered + * operation or the function will fail. Only one operation structure per + * command identifier may be registered. + * + * See include/net/genetlink.h for more documenation on the operations + * structure. + * + * Return 0 on success or a negative error code. + */ +static inline int +_genl_register_family_with_ops_grps(struct genl_family *family, + const struct genl_ops *ops, size_t n_ops, + const struct genl_multicast_group *mcgrps, + size_t n_mcgrps) { family->module = THIS_MODULE; - return __genl_register_family_with_ops(family, ops, n_ops); + family->ops = ops; + family->n_ops = n_ops; + family->mcgrps = mcgrps; + family->n_mcgrps = n_mcgrps; + return __genl_register_family(family); } +#define genl_register_family_with_ops(family, ops) \ + _genl_register_family_with_ops_grps((family), \ + (ops), ARRAY_SIZE(ops), \ + NULL, 0) +#define genl_register_family_with_ops_groups(family, ops, grps) \ + _genl_register_family_with_ops_grps((family), \ + (ops), ARRAY_SIZE(ops), \ + (grps), ARRAY_SIZE(grps)) + int genl_unregister_family(struct genl_family *family); -int genl_register_ops(struct genl_family *, struct genl_ops *ops); -int genl_unregister_ops(struct genl_family *, struct genl_ops *ops); -int genl_register_mc_group(struct genl_family *family, - struct genl_multicast_group *grp); -void genl_unregister_mc_group(struct genl_family *family, - struct genl_multicast_group *grp); -void genl_notify(struct sk_buff *skb, struct net *net, u32 portid, +void genl_notify(struct genl_family *family, + struct sk_buff *skb, struct net *net, u32 portid, u32 group, struct nlmsghdr *nlh, gfp_t flags); void *genlmsg_put(struct sk_buff *skb, u32 portid, u32 seq, @@ -227,41 +254,54 @@ static inline void genlmsg_cancel(struct sk_buff *skb, void *hdr) /** * genlmsg_multicast_netns - multicast a netlink message to a specific netns + * @family: the generic netlink family * @net: the net namespace * @skb: netlink message as socket buffer * @portid: own netlink portid to avoid sending to yourself - * @group: multicast group id + * @group: offset of multicast group in groups array * @flags: allocation flags */ -static inline int genlmsg_multicast_netns(struct net *net, struct sk_buff *skb, +static inline int genlmsg_multicast_netns(struct genl_family *family, + struct net *net, struct sk_buff *skb, u32 portid, unsigned int group, gfp_t flags) { + if (group >= family->n_mcgrps) + return -EINVAL; + group = family->mcgrp_offset + group; return nlmsg_multicast(net->genl_sock, skb, portid, group, flags); } /** * genlmsg_multicast - multicast a netlink message to the default netns + * @family: the generic netlink family * @skb: netlink message as socket buffer * @portid: own netlink portid to avoid sending to yourself - * @group: multicast group id + * @group: offset of multicast group in groups array * @flags: allocation flags */ -static inline int genlmsg_multicast(struct sk_buff *skb, u32 portid, +static inline int genlmsg_multicast(struct genl_family *family, + struct sk_buff *skb, u32 portid, unsigned int group, gfp_t flags) { - return genlmsg_multicast_netns(&init_net, skb, portid, group, flags); + if (group >= family->n_mcgrps) + return -EINVAL; + group = family->mcgrp_offset + group; + return genlmsg_multicast_netns(family, &init_net, skb, + portid, group, flags); } /** * genlmsg_multicast_allns - multicast a netlink message to all net namespaces + * @family: the generic netlink family * @skb: netlink message as socket buffer * @portid: own netlink portid to avoid sending to yourself - * @group: multicast group id + * @group: offset of multicast group in groups array * @flags: allocation flags * * This function must hold the RTNL or rcu_read_lock(). */ -int genlmsg_multicast_allns(struct sk_buff *skb, u32 portid, +int genlmsg_multicast_allns(struct genl_family *family, + struct sk_buff *skb, u32 portid, unsigned int group, gfp_t flags); /** @@ -332,5 +372,22 @@ static inline struct sk_buff *genlmsg_new(size_t payload, gfp_t flags) return nlmsg_new(genlmsg_total_size(payload), flags); } +/** + * genl_set_err - report error to genetlink broadcast listeners + * @family: the generic netlink family + * @net: the network namespace to report the error to + * @portid: the PORTID of a process that we want to skip (if any) + * @group: the broadcast group that will notice the error + * (this is the offset of the multicast group in the groups array) + * @code: error code, must be negative (as usual in kernelspace) + * + * This function returns the number of broadcast listeners that have set the + * NETLINK_RECV_NO_ENOBUFS socket option. + */ +static inline int genl_set_err(struct genl_family *family, struct net *net, + u32 portid, u32 group, int code) +{ + return netlink_set_err(net->genl_sock, portid, group, code); +} #endif /* __NET_GENERIC_NETLINK_H */ diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h index e393171e2fa..979874c627e 100644 --- a/include/rdma/ib_verbs.h +++ b/include/rdma/ib_verbs.h @@ -67,12 +67,14 @@ enum rdma_node_type { RDMA_NODE_IB_CA = 1, RDMA_NODE_IB_SWITCH, RDMA_NODE_IB_ROUTER, - RDMA_NODE_RNIC + RDMA_NODE_RNIC, + RDMA_NODE_USNIC, }; enum rdma_transport_type { RDMA_TRANSPORT_IB, - RDMA_TRANSPORT_IWARP + RDMA_TRANSPORT_IWARP, + RDMA_TRANSPORT_USNIC }; enum rdma_transport_type @@ -1436,6 +1438,7 @@ struct ib_device { int uverbs_abi_ver; u64 uverbs_cmd_mask; + u64 uverbs_ex_cmd_mask; char node_desc[64]; __be64 node_guid; @@ -2384,4 +2387,17 @@ struct ib_flow *ib_create_flow(struct ib_qp *qp, struct ib_flow_attr *flow_attr, int domain); int ib_destroy_flow(struct ib_flow *flow_id); +static inline int ib_check_mr_access(int flags) +{ + /* + * Local write permission is required if remote write or + * remote atomic permission is also requested. + */ + if (flags & (IB_ACCESS_REMOTE_ATOMIC | IB_ACCESS_REMOTE_WRITE) && + !(flags & IB_ACCESS_LOCAL_WRITE)) + return -EINVAL; + + return 0; +} + #endif /* IB_VERBS_H */ diff --git a/include/scsi/scsi_transport_srp.h b/include/scsi/scsi_transport_srp.h index ff0f04ac91a..4ebf6913b7b 100644 --- a/include/scsi/scsi_transport_srp.h +++ b/include/scsi/scsi_transport_srp.h @@ -13,6 +13,27 @@ struct srp_rport_identifiers { u8 roles; }; +/** + * enum srp_rport_state - SRP transport layer state + * @SRP_RPORT_RUNNING: Transport layer operational. + * @SRP_RPORT_BLOCKED: Transport layer not operational; fast I/O fail timer + * is running and I/O has been blocked. + * @SRP_RPORT_FAIL_FAST: Fast I/O fail timer has expired; fail I/O fast. + * @SRP_RPORT_LOST: Device loss timer has expired; port is being removed. + */ +enum srp_rport_state { + SRP_RPORT_RUNNING, + SRP_RPORT_BLOCKED, + SRP_RPORT_FAIL_FAST, + SRP_RPORT_LOST, +}; + +/** + * struct srp_rport + * @lld_data: LLD private data. + * @mutex: Protects against concurrent rport reconnect / fast_io_fail / + * dev_loss_tmo activity. + */ struct srp_rport { /* for initiator and target drivers */ @@ -23,11 +44,43 @@ struct srp_rport { /* for initiator drivers */ - void *lld_data; /* LLD private data */ + void *lld_data; + + struct mutex mutex; + enum srp_rport_state state; + bool deleted; + int reconnect_delay; + int failed_reconnects; + struct delayed_work reconnect_work; + int fast_io_fail_tmo; + int dev_loss_tmo; + struct delayed_work fast_io_fail_work; + struct delayed_work dev_loss_work; }; +/** + * struct srp_function_template + * @has_rport_state: Whether or not to create the state, fast_io_fail_tmo and + * dev_loss_tmo sysfs attribute for an rport. + * @reset_timer_if_blocked: Whether or srp_timed_out() should reset the command + * timer if the device on which it has been queued is blocked. + * @reconnect_delay: If not NULL, points to the default reconnect_delay value. + * @fast_io_fail_tmo: If not NULL, points to the default fast_io_fail_tmo value. + * @dev_loss_tmo: If not NULL, points to the default dev_loss_tmo value. + * @reconnect: Callback function for reconnecting to the target. See also + * srp_reconnect_rport(). + * @terminate_rport_io: Callback function for terminating all outstanding I/O + * requests for an rport. + */ struct srp_function_template { /* for initiator drivers */ + bool has_rport_state; + bool reset_timer_if_blocked; + int *reconnect_delay; + int *fast_io_fail_tmo; + int *dev_loss_tmo; + int (*reconnect)(struct srp_rport *rport); + void (*terminate_rport_io)(struct srp_rport *rport); void (*rport_delete)(struct srp_rport *rport); /* for target drivers */ int (* tsk_mgmt_response)(struct Scsi_Host *, u64, u64, int); @@ -38,10 +91,36 @@ extern struct scsi_transport_template * srp_attach_transport(struct srp_function_template *); extern void srp_release_transport(struct scsi_transport_template *); +extern void srp_rport_get(struct srp_rport *rport); +extern void srp_rport_put(struct srp_rport *rport); extern struct srp_rport *srp_rport_add(struct Scsi_Host *, struct srp_rport_identifiers *); extern void srp_rport_del(struct srp_rport *); - +extern int srp_tmo_valid(int reconnect_delay, int fast_io_fail_tmo, + int dev_loss_tmo); +extern int srp_reconnect_rport(struct srp_rport *rport); +extern void srp_start_tl_fail_timers(struct srp_rport *rport); extern void srp_remove_host(struct Scsi_Host *); +/** + * srp_chkready() - evaluate the transport layer state before I/O + * + * Returns a SCSI result code that can be returned by the LLD queuecommand() + * implementation. The role of this function is similar to that of + * fc_remote_port_chkready(). + */ +static inline int srp_chkready(struct srp_rport *rport) +{ + switch (rport->state) { + case SRP_RPORT_RUNNING: + case SRP_RPORT_BLOCKED: + default: + return 0; + case SRP_RPORT_FAIL_FAST: + return DID_TRANSPORT_FAILFAST << 16; + case SRP_RPORT_LOST: + return DID_NO_CONNECT << 16; + } +} + #endif diff --git a/include/trace/events/btrfs.h b/include/trace/events/btrfs.h index f18b3b76e01..4832d75dcba 100644 --- a/include/trace/events/btrfs.h +++ b/include/trace/events/btrfs.h @@ -162,12 +162,14 @@ DEFINE_EVENT(btrfs__inode, btrfs_inode_evict, { EXTENT_FLAG_LOGGING, "LOGGING" }, \ { EXTENT_FLAG_FILLING, "FILLING" }) -TRACE_EVENT(btrfs_get_extent, +TRACE_EVENT_CONDITION(btrfs_get_extent, TP_PROTO(struct btrfs_root *root, struct extent_map *map), TP_ARGS(root, map), + TP_CONDITION(map), + TP_STRUCT__entry( __field( u64, root_objectid ) __field( u64, start ) diff --git a/include/uapi/linux/audit.h b/include/uapi/linux/audit.h index db0b825b481..44b05a09f19 100644 --- a/include/uapi/linux/audit.h +++ b/include/uapi/linux/audit.h @@ -68,6 +68,9 @@ #define AUDIT_MAKE_EQUIV 1015 /* Append to watched tree */ #define AUDIT_TTY_GET 1016 /* Get TTY auditing status */ #define AUDIT_TTY_SET 1017 /* Set TTY auditing status */ +#define AUDIT_SET_FEATURE 1018 /* Turn an audit feature on or off */ +#define AUDIT_GET_FEATURE 1019 /* Get which features are enabled */ +#define AUDIT_FEATURE_CHANGE 1020 /* audit log listing feature changes */ #define AUDIT_FIRST_USER_MSG 1100 /* Userspace messages mostly uninteresting to kernel */ #define AUDIT_USER_AVC 1107 /* We filter this differently */ @@ -357,6 +360,12 @@ enum { #define AUDIT_PERM_READ 4 #define AUDIT_PERM_ATTR 8 +/* MAX_AUDIT_MESSAGE_LENGTH is set in audit:lib/libaudit.h as: + * 8970 // PATH_MAX*2+CONTEXT_SIZE*2+11+256+1 + * max header+body+tailer: 44 + 29 + 32 + 262 + 7 + pad + */ +#define AUDIT_MESSAGE_TEXT_MAX 8560 + struct audit_status { __u32 mask; /* Bit mask for valid entries */ __u32 enabled; /* 1 = enabled, 0 = disabled */ @@ -368,11 +377,28 @@ struct audit_status { __u32 backlog; /* messages waiting in queue */ }; +struct audit_features { +#define AUDIT_FEATURE_VERSION 1 + __u32 vers; + __u32 mask; /* which bits we are dealing with */ + __u32 features; /* which feature to enable/disable */ + __u32 lock; /* which features to lock */ +}; + +#define AUDIT_FEATURE_ONLY_UNSET_LOGINUID 0 +#define AUDIT_FEATURE_LOGINUID_IMMUTABLE 1 +#define AUDIT_LAST_FEATURE AUDIT_FEATURE_LOGINUID_IMMUTABLE + +#define audit_feature_valid(x) ((x) >= 0 && (x) <= AUDIT_LAST_FEATURE) +#define AUDIT_FEATURE_TO_MASK(x) (1 << ((x) & 31)) /* mask for __u32 */ + struct audit_tty_status { __u32 enabled; /* 1 = enabled, 0 = disabled */ __u32 log_passwd; /* 1 = enabled, 0 = disabled */ }; +#define AUDIT_UID_UNSET (unsigned int)-1 + /* audit_rule_data supports filter rules with both integer and string * fields. It corresponds with AUDIT_ADD_RULE, AUDIT_DEL_RULE and * AUDIT_LIST_RULES requests. diff --git a/include/uapi/linux/genetlink.h b/include/uapi/linux/genetlink.h index c880a417d8a..1af72d8228e 100644 --- a/include/uapi/linux/genetlink.h +++ b/include/uapi/linux/genetlink.h @@ -27,6 +27,7 @@ struct genlmsghdr { */ #define GENL_ID_GENERATE 0 #define GENL_ID_CTRL NLMSG_MIN_TYPE +#define GENL_ID_VFS_DQUOT (NLMSG_MIN_TYPE + 1) /************************************************************************** * Controller diff --git a/include/uapi/linux/hash_info.h b/include/uapi/linux/hash_info.h new file mode 100644 index 00000000000..ca18c45f830 --- /dev/null +++ b/include/uapi/linux/hash_info.h @@ -0,0 +1,37 @@ +/* + * Hash Info: Hash algorithms information + * + * Copyright (c) 2013 Dmitry Kasatkin <d.kasatkin@samsung.com> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + * + */ + +#ifndef _UAPI_LINUX_HASH_INFO_H +#define _UAPI_LINUX_HASH_INFO_H + +enum hash_algo { + HASH_ALGO_MD4, + HASH_ALGO_MD5, + HASH_ALGO_SHA1, + HASH_ALGO_RIPE_MD_160, + HASH_ALGO_SHA256, + HASH_ALGO_SHA384, + HASH_ALGO_SHA512, + HASH_ALGO_SHA224, + HASH_ALGO_RIPE_MD_128, + HASH_ALGO_RIPE_MD_256, + HASH_ALGO_RIPE_MD_320, + HASH_ALGO_WP_256, + HASH_ALGO_WP_384, + HASH_ALGO_WP_512, + HASH_ALGO_TGR_128, + HASH_ALGO_TGR_160, + HASH_ALGO_TGR_192, + HASH_ALGO__LAST +}; + +#endif /* _UAPI_LINUX_HASH_INFO_H */ diff --git a/include/uapi/linux/keyctl.h b/include/uapi/linux/keyctl.h index c9b7f4faf97..840cb990abe 100644 --- a/include/uapi/linux/keyctl.h +++ b/include/uapi/linux/keyctl.h @@ -56,5 +56,6 @@ #define KEYCTL_REJECT 19 /* reject a partially constructed key */ #define KEYCTL_INSTANTIATE_IOV 20 /* instantiate a partially constructed key */ #define KEYCTL_INVALIDATE 21 /* invalidate a key */ +#define KEYCTL_GET_PERSISTENT 22 /* get a user's persistent keyring */ #endif /* _LINUX_KEYCTL_H */ diff --git a/include/uapi/linux/pkt_sched.h b/include/uapi/linux/pkt_sched.h index 307f293477e..a806687ad98 100644 --- a/include/uapi/linux/pkt_sched.h +++ b/include/uapi/linux/pkt_sched.h @@ -763,13 +763,14 @@ enum { TCA_FQ_RATE_ENABLE, /* enable/disable rate limiting */ - TCA_FQ_FLOW_DEFAULT_RATE,/* for sockets with unspecified sk_rate, - * use the following rate - */ + TCA_FQ_FLOW_DEFAULT_RATE,/* obsolete, do not use */ TCA_FQ_FLOW_MAX_RATE, /* per flow max rate */ TCA_FQ_BUCKETS_LOG, /* log2(number of buckets) */ + + TCA_FQ_FLOW_REFILL_DELAY, /* flow credit refill delay in usec */ + __TCA_FQ_MAX }; diff --git a/include/uapi/linux/raid/md_p.h b/include/uapi/linux/raid/md_p.h index fe1a5406d4d..f7cf7f35114 100644 --- a/include/uapi/linux/raid/md_p.h +++ b/include/uapi/linux/raid/md_p.h @@ -16,6 +16,7 @@ #define _MD_P_H #include <linux/types.h> +#include <asm/byteorder.h> /* * RAID superblock. diff --git a/include/uapi/linux/v4l2-controls.h b/include/uapi/linux/v4l2-controls.h index 083bb5a5aae..1666aabbbb8 100644 --- a/include/uapi/linux/v4l2-controls.h +++ b/include/uapi/linux/v4l2-controls.h @@ -160,6 +160,10 @@ enum v4l2_colorfx { * of controls. Total of 16 controls is reserved for this driver */ #define V4L2_CID_USER_SI476X_BASE (V4L2_CID_USER_BASE + 0x1040) +/* The base for the TI VPE driver controls. Total of 16 controls is reserved for + * this driver */ +#define V4L2_CID_USER_TI_VPE_BASE (V4L2_CID_USER_BASE + 0x1050) + /* MPEG-class control IDs */ /* The MPEG controls are applicable to all codec controls * and the 'MPEG' part of the define is historical */ diff --git a/include/uapi/rdma/ib_user_verbs.h b/include/uapi/rdma/ib_user_verbs.h index e3ddd86c90a..cbfdd4ca951 100644 --- a/include/uapi/rdma/ib_user_verbs.h +++ b/include/uapi/rdma/ib_user_verbs.h @@ -87,10 +87,11 @@ enum { IB_USER_VERBS_CMD_CLOSE_XRCD, IB_USER_VERBS_CMD_CREATE_XSRQ, IB_USER_VERBS_CMD_OPEN_QP, -#ifdef CONFIG_INFINIBAND_EXPERIMENTAL_UVERBS_FLOW_STEERING - IB_USER_VERBS_CMD_CREATE_FLOW = IB_USER_VERBS_CMD_THRESHOLD, - IB_USER_VERBS_CMD_DESTROY_FLOW -#endif /* CONFIG_INFINIBAND_EXPERIMENTAL_UVERBS_FLOW_STEERING */ +}; + +enum { + IB_USER_VERBS_EX_CMD_CREATE_FLOW = IB_USER_VERBS_CMD_THRESHOLD, + IB_USER_VERBS_EX_CMD_DESTROY_FLOW }; /* @@ -122,22 +123,24 @@ struct ib_uverbs_comp_event_desc { * the rest of the command struct based on these value. */ +#define IB_USER_VERBS_CMD_COMMAND_MASK 0xff +#define IB_USER_VERBS_CMD_FLAGS_MASK 0xff000000u +#define IB_USER_VERBS_CMD_FLAGS_SHIFT 24 + +#define IB_USER_VERBS_CMD_FLAG_EXTENDED 0x80 + struct ib_uverbs_cmd_hdr { __u32 command; __u16 in_words; __u16 out_words; }; -#ifdef CONFIG_INFINIBAND_EXPERIMENTAL_UVERBS_FLOW_STEERING -struct ib_uverbs_cmd_hdr_ex { - __u32 command; - __u16 in_words; - __u16 out_words; +struct ib_uverbs_ex_cmd_hdr { + __u64 response; __u16 provider_in_words; __u16 provider_out_words; __u32 cmd_hdr_reserved; }; -#endif /* CONFIG_INFINIBAND_EXPERIMENTAL_UVERBS_FLOW_STEERING */ struct ib_uverbs_get_context { __u64 response; @@ -700,62 +703,71 @@ struct ib_uverbs_detach_mcast { __u64 driver_data[0]; }; -#ifdef CONFIG_INFINIBAND_EXPERIMENTAL_UVERBS_FLOW_STEERING -struct ib_kern_eth_filter { +struct ib_uverbs_flow_spec_hdr { + __u32 type; + __u16 size; + __u16 reserved; + /* followed by flow_spec */ + __u64 flow_spec_data[0]; +}; + +struct ib_uverbs_flow_eth_filter { __u8 dst_mac[6]; __u8 src_mac[6]; __be16 ether_type; __be16 vlan_tag; }; -struct ib_kern_spec_eth { - __u32 type; - __u16 size; - __u16 reserved; - struct ib_kern_eth_filter val; - struct ib_kern_eth_filter mask; +struct ib_uverbs_flow_spec_eth { + union { + struct ib_uverbs_flow_spec_hdr hdr; + struct { + __u32 type; + __u16 size; + __u16 reserved; + }; + }; + struct ib_uverbs_flow_eth_filter val; + struct ib_uverbs_flow_eth_filter mask; }; -struct ib_kern_ipv4_filter { +struct ib_uverbs_flow_ipv4_filter { __be32 src_ip; __be32 dst_ip; }; -struct ib_kern_spec_ipv4 { - __u32 type; - __u16 size; - __u16 reserved; - struct ib_kern_ipv4_filter val; - struct ib_kern_ipv4_filter mask; +struct ib_uverbs_flow_spec_ipv4 { + union { + struct ib_uverbs_flow_spec_hdr hdr; + struct { + __u32 type; + __u16 size; + __u16 reserved; + }; + }; + struct ib_uverbs_flow_ipv4_filter val; + struct ib_uverbs_flow_ipv4_filter mask; }; -struct ib_kern_tcp_udp_filter { +struct ib_uverbs_flow_tcp_udp_filter { __be16 dst_port; __be16 src_port; }; -struct ib_kern_spec_tcp_udp { - __u32 type; - __u16 size; - __u16 reserved; - struct ib_kern_tcp_udp_filter val; - struct ib_kern_tcp_udp_filter mask; -}; - -struct ib_kern_spec { +struct ib_uverbs_flow_spec_tcp_udp { union { + struct ib_uverbs_flow_spec_hdr hdr; struct { __u32 type; __u16 size; __u16 reserved; }; - struct ib_kern_spec_eth eth; - struct ib_kern_spec_ipv4 ipv4; - struct ib_kern_spec_tcp_udp tcp_udp; }; + struct ib_uverbs_flow_tcp_udp_filter val; + struct ib_uverbs_flow_tcp_udp_filter mask; }; -struct ib_kern_flow_attr { +struct ib_uverbs_flow_attr { __u32 type; __u16 size; __u16 priority; @@ -767,13 +779,13 @@ struct ib_kern_flow_attr { * struct ib_flow_spec_xxx * struct ib_flow_spec_yyy */ + struct ib_uverbs_flow_spec_hdr flow_specs[0]; }; struct ib_uverbs_create_flow { __u32 comp_mask; - __u64 response; __u32 qp_handle; - struct ib_kern_flow_attr flow_attr; + struct ib_uverbs_flow_attr flow_attr; }; struct ib_uverbs_create_flow_resp { @@ -785,7 +797,6 @@ struct ib_uverbs_destroy_flow { __u32 comp_mask; __u32 flow_handle; }; -#endif /* CONFIG_INFINIBAND_EXPERIMENTAL_UVERBS_FLOW_STEERING */ struct ib_uverbs_create_srq { __u64 response; diff --git a/init/Kconfig b/init/Kconfig index 3fc8a2f2fac..79383d3aa5d 100644 --- a/init/Kconfig +++ b/init/Kconfig @@ -301,20 +301,6 @@ config AUDIT_TREE depends on AUDITSYSCALL select FSNOTIFY -config AUDIT_LOGINUID_IMMUTABLE - bool "Make audit loginuid immutable" - depends on AUDIT - help - The config option toggles if a task setting its loginuid requires - CAP_SYS_AUDITCONTROL or if that task should require no special permissions - but should instead only allow setting its loginuid if it was never - previously set. On systems which use systemd or a similar central - process to restart login services this should be set to true. On older - systems in which an admin would typically have to directly stop and - start processes this should be set to false. Setting this to true allows - one to drop potentially dangerous capabilites from the login tasks, - but may not be backwards compatible with older init systems. - source "kernel/irq/Kconfig" source "kernel/time/Kconfig" @@ -1669,6 +1655,18 @@ config BASE_SMALL default 0 if BASE_FULL default 1 if !BASE_FULL +config SYSTEM_TRUSTED_KEYRING + bool "Provide system-wide ring of trusted keys" + depends on KEYS + help + Provide a system keyring to which trusted keys can be added. Keys in + the keyring are considered to be trusted. Keys may be added at will + by the kernel from compiled-in data and from hardware key stores, but + userspace may only add extra keys if those keys can be verified by + keys already in the keyring. + + Keys in this keyring are used by module signature checking. + menuconfig MODULES bool "Enable loadable module support" option modules @@ -1742,6 +1740,7 @@ config MODULE_SRCVERSION_ALL config MODULE_SIG bool "Module signature verification" depends on MODULES + select SYSTEM_TRUSTED_KEYRING select KEYS select CRYPTO select ASYMMETRIC_KEY_TYPE diff --git a/init/main.c b/init/main.c index 01573fdfa18..febc511e078 100644 --- a/init/main.c +++ b/init/main.c @@ -476,7 +476,7 @@ static void __init mm_init(void) mem_init(); kmem_cache_init(); percpu_init_late(); - pgtable_init(); + pgtable_cache_init(); vmalloc_init(); } diff --git a/ipc/shm.c b/ipc/shm.c index d69739610fd..7a51443a51d 100644 --- a/ipc/shm.c +++ b/ipc/shm.c @@ -208,15 +208,18 @@ static void shm_open(struct vm_area_struct *vma) */ static void shm_destroy(struct ipc_namespace *ns, struct shmid_kernel *shp) { + struct file *shm_file; + + shm_file = shp->shm_file; + shp->shm_file = NULL; ns->shm_tot -= (shp->shm_segsz + PAGE_SIZE - 1) >> PAGE_SHIFT; shm_rmid(ns, shp); shm_unlock(shp); - if (!is_file_hugepages(shp->shm_file)) - shmem_lock(shp->shm_file, 0, shp->mlock_user); + if (!is_file_hugepages(shm_file)) + shmem_lock(shm_file, 0, shp->mlock_user); else if (shp->mlock_user) - user_shm_unlock(file_inode(shp->shm_file)->i_size, - shp->mlock_user); - fput (shp->shm_file); + user_shm_unlock(file_inode(shm_file)->i_size, shp->mlock_user); + fput(shm_file); ipc_rcu_putref(shp, shm_rcu_free); } @@ -974,15 +977,25 @@ SYSCALL_DEFINE3(shmctl, int, shmid, int, cmd, struct shmid_ds __user *, buf) ipc_lock_object(&shp->shm_perm); if (!ns_capable(ns->user_ns, CAP_IPC_LOCK)) { kuid_t euid = current_euid(); - err = -EPERM; if (!uid_eq(euid, shp->shm_perm.uid) && - !uid_eq(euid, shp->shm_perm.cuid)) + !uid_eq(euid, shp->shm_perm.cuid)) { + err = -EPERM; goto out_unlock0; - if (cmd == SHM_LOCK && !rlimit(RLIMIT_MEMLOCK)) + } + if (cmd == SHM_LOCK && !rlimit(RLIMIT_MEMLOCK)) { + err = -EPERM; goto out_unlock0; + } } shm_file = shp->shm_file; + + /* check if shm_destroy() is tearing down shp */ + if (shm_file == NULL) { + err = -EIDRM; + goto out_unlock0; + } + if (is_file_hugepages(shm_file)) goto out_unlock0; @@ -1101,6 +1114,14 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr, goto out_unlock; ipc_lock_object(&shp->shm_perm); + + /* check if shm_destroy() is tearing down shp */ + if (shp->shm_file == NULL) { + ipc_unlock_object(&shp->shm_perm); + err = -EIDRM; + goto out_unlock; + } + path = shp->shm_file->f_path; path_get(&path); shp->shm_nattch++; diff --git a/kernel/Makefile b/kernel/Makefile index 09a9c94f42b..bbaf7d59c1b 100644 --- a/kernel/Makefile +++ b/kernel/Makefile @@ -41,8 +41,9 @@ ifneq ($(CONFIG_SMP),y) obj-y += up.o endif obj-$(CONFIG_UID16) += uid16.o +obj-$(CONFIG_SYSTEM_TRUSTED_KEYRING) += system_keyring.o system_certificates.o obj-$(CONFIG_MODULES) += module.o -obj-$(CONFIG_MODULE_SIG) += module_signing.o modsign_pubkey.o modsign_certificate.o +obj-$(CONFIG_MODULE_SIG) += module_signing.o obj-$(CONFIG_KALLSYMS) += kallsyms.o obj-$(CONFIG_BSD_PROCESS_ACCT) += acct.o obj-$(CONFIG_KEXEC) += kexec.o @@ -122,19 +123,52 @@ targets += timeconst.h $(obj)/timeconst.h: $(obj)/hz.bc $(src)/timeconst.bc FORCE $(call if_changed,bc) -ifeq ($(CONFIG_MODULE_SIG),y) +############################################################################### +# +# Roll all the X.509 certificates that we can find together and pull them into +# the kernel so that they get loaded into the system trusted keyring during +# boot. # -# Pull the signing certificate and any extra certificates into the kernel +# We look in the source root and the build root for all files whose name ends +# in ".x509". Unfortunately, this will generate duplicate filenames, so we +# have make canonicalise the pathnames and then sort them to discard the +# duplicates. # +############################################################################### +ifeq ($(CONFIG_SYSTEM_TRUSTED_KEYRING),y) +X509_CERTIFICATES-y := $(wildcard *.x509) $(wildcard $(srctree)/*.x509) +X509_CERTIFICATES-$(CONFIG_MODULE_SIG) += signing_key.x509 +X509_CERTIFICATES := $(sort $(foreach CERT,$(X509_CERTIFICATES-y), \ + $(or $(realpath $(CERT)),$(CERT)))) + +ifeq ($(X509_CERTIFICATES),) +$(warning *** No X.509 certificates found ***) +endif + +ifneq ($(wildcard $(obj)/.x509.list),) +ifneq ($(shell cat $(obj)/.x509.list),$(X509_CERTIFICATES)) +$(info X.509 certificate list changed) +$(shell rm $(obj)/.x509.list) +endif +endif + +kernel/system_certificates.o: $(obj)/x509_certificate_list -quiet_cmd_touch = TOUCH $@ - cmd_touch = touch $@ +quiet_cmd_x509certs = CERTS $@ + cmd_x509certs = cat $(X509_CERTIFICATES) /dev/null >$@ $(foreach X509,$(X509_CERTIFICATES),; echo " - Including cert $(X509)") -extra_certificates: - $(call cmd,touch) +targets += $(obj)/x509_certificate_list +$(obj)/x509_certificate_list: $(X509_CERTIFICATES) $(obj)/.x509.list + $(call if_changed,x509certs) -kernel/modsign_certificate.o: signing_key.x509 extra_certificates +targets += $(obj)/.x509.list +$(obj)/.x509.list: + @echo $(X509_CERTIFICATES) >$@ +clean-files := x509_certificate_list .x509.list +endif + +ifeq ($(CONFIG_MODULE_SIG),y) ############################################################################### # # If module signing is requested, say by allyesconfig, but a key has not been diff --git a/kernel/audit.c b/kernel/audit.c index 7b0e23a740c..906ae5a0233 100644 --- a/kernel/audit.c +++ b/kernel/audit.c @@ -60,7 +60,6 @@ #ifdef CONFIG_SECURITY #include <linux/security.h> #endif -#include <net/netlink.h> #include <linux/freezer.h> #include <linux/tty.h> #include <linux/pid_namespace.h> @@ -140,6 +139,17 @@ static struct task_struct *kauditd_task; static DECLARE_WAIT_QUEUE_HEAD(kauditd_wait); static DECLARE_WAIT_QUEUE_HEAD(audit_backlog_wait); +static struct audit_features af = {.vers = AUDIT_FEATURE_VERSION, + .mask = -1, + .features = 0, + .lock = 0,}; + +static char *audit_feature_names[2] = { + "only_unset_loginuid", + "loginuid_immutable", +}; + + /* Serialize requests from userspace. */ DEFINE_MUTEX(audit_cmd_mutex); @@ -584,6 +594,8 @@ static int audit_netlink_ok(struct sk_buff *skb, u16 msg_type) return -EOPNOTSUPP; case AUDIT_GET: case AUDIT_SET: + case AUDIT_GET_FEATURE: + case AUDIT_SET_FEATURE: case AUDIT_LIST_RULES: case AUDIT_ADD_RULE: case AUDIT_DEL_RULE: @@ -613,7 +625,7 @@ static int audit_log_common_recv_msg(struct audit_buffer **ab, u16 msg_type) int rc = 0; uid_t uid = from_kuid(&init_user_ns, current_uid()); - if (!audit_enabled) { + if (!audit_enabled && msg_type != AUDIT_USER_AVC) { *ab = NULL; return rc; } @@ -628,6 +640,94 @@ static int audit_log_common_recv_msg(struct audit_buffer **ab, u16 msg_type) return rc; } +int is_audit_feature_set(int i) +{ + return af.features & AUDIT_FEATURE_TO_MASK(i); +} + + +static int audit_get_feature(struct sk_buff *skb) +{ + u32 seq; + + seq = nlmsg_hdr(skb)->nlmsg_seq; + + audit_send_reply(NETLINK_CB(skb).portid, seq, AUDIT_GET, 0, 0, + &af, sizeof(af)); + + return 0; +} + +static void audit_log_feature_change(int which, u32 old_feature, u32 new_feature, + u32 old_lock, u32 new_lock, int res) +{ + struct audit_buffer *ab; + + ab = audit_log_start(NULL, GFP_KERNEL, AUDIT_FEATURE_CHANGE); + audit_log_format(ab, "feature=%s new=%d old=%d old_lock=%d new_lock=%d res=%d", + audit_feature_names[which], !!old_feature, !!new_feature, + !!old_lock, !!new_lock, res); + audit_log_end(ab); +} + +static int audit_set_feature(struct sk_buff *skb) +{ + struct audit_features *uaf; + int i; + + BUILD_BUG_ON(AUDIT_LAST_FEATURE + 1 > sizeof(audit_feature_names)/sizeof(audit_feature_names[0])); + uaf = nlmsg_data(nlmsg_hdr(skb)); + + /* if there is ever a version 2 we should handle that here */ + + for (i = 0; i <= AUDIT_LAST_FEATURE; i++) { + u32 feature = AUDIT_FEATURE_TO_MASK(i); + u32 old_feature, new_feature, old_lock, new_lock; + + /* if we are not changing this feature, move along */ + if (!(feature & uaf->mask)) + continue; + + old_feature = af.features & feature; + new_feature = uaf->features & feature; + new_lock = (uaf->lock | af.lock) & feature; + old_lock = af.lock & feature; + + /* are we changing a locked feature? */ + if ((af.lock & feature) && (new_feature != old_feature)) { + audit_log_feature_change(i, old_feature, new_feature, + old_lock, new_lock, 0); + return -EPERM; + } + } + /* nothing invalid, do the changes */ + for (i = 0; i <= AUDIT_LAST_FEATURE; i++) { + u32 feature = AUDIT_FEATURE_TO_MASK(i); + u32 old_feature, new_feature, old_lock, new_lock; + + /* if we are not changing this feature, move along */ + if (!(feature & uaf->mask)) + continue; + + old_feature = af.features & feature; + new_feature = uaf->features & feature; + old_lock = af.lock & feature; + new_lock = (uaf->lock | af.lock) & feature; + + if (new_feature != old_feature) + audit_log_feature_change(i, old_feature, new_feature, + old_lock, new_lock, 1); + + if (new_feature) + af.features |= feature; + else + af.features &= ~feature; + af.lock |= new_lock; + } + + return 0; +} + static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh) { u32 seq; @@ -659,6 +759,7 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh) switch (msg_type) { case AUDIT_GET: + memset(&status_set, 0, sizeof(status_set)); status_set.enabled = audit_enabled; status_set.failure = audit_failure; status_set.pid = audit_pid; @@ -670,7 +771,7 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh) &status_set, sizeof(status_set)); break; case AUDIT_SET: - if (nlh->nlmsg_len < sizeof(struct audit_status)) + if (nlmsg_len(nlh) < sizeof(struct audit_status)) return -EINVAL; status_get = (struct audit_status *)data; if (status_get->mask & AUDIT_STATUS_ENABLED) { @@ -699,6 +800,16 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh) if (status_get->mask & AUDIT_STATUS_BACKLOG_LIMIT) err = audit_set_backlog_limit(status_get->backlog_limit); break; + case AUDIT_GET_FEATURE: + err = audit_get_feature(skb); + if (err) + return err; + break; + case AUDIT_SET_FEATURE: + err = audit_set_feature(skb); + if (err) + return err; + break; case AUDIT_USER: case AUDIT_FIRST_USER_MSG ... AUDIT_LAST_USER_MSG: case AUDIT_FIRST_USER_MSG2 ... AUDIT_LAST_USER_MSG2: @@ -715,7 +826,8 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh) } audit_log_common_recv_msg(&ab, msg_type); if (msg_type != AUDIT_USER_TTY) - audit_log_format(ab, " msg='%.1024s'", + audit_log_format(ab, " msg='%.*s'", + AUDIT_MESSAGE_TEXT_MAX, (char *)data); else { int size; @@ -818,7 +930,7 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh) struct task_struct *tsk = current; spin_lock(&tsk->sighand->siglock); - s.enabled = tsk->signal->audit_tty != 0; + s.enabled = tsk->signal->audit_tty; s.log_passwd = tsk->signal->audit_tty_log_passwd; spin_unlock(&tsk->sighand->siglock); @@ -832,7 +944,7 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh) memset(&s, 0, sizeof(s)); /* guard against past and future API changes */ - memcpy(&s, data, min(sizeof(s), (size_t)nlh->nlmsg_len)); + memcpy(&s, data, min_t(size_t, sizeof(s), nlmsg_len(nlh))); if ((s.enabled != 0 && s.enabled != 1) || (s.log_passwd != 0 && s.log_passwd != 1)) return -EINVAL; @@ -1067,13 +1179,6 @@ static void wait_for_auditd(unsigned long sleep_time) remove_wait_queue(&audit_backlog_wait, &wait); } -/* Obtain an audit buffer. This routine does locking to obtain the - * audit buffer, but then no locking is required for calls to - * audit_log_*format. If the tsk is a task that is currently in a - * syscall, then the syscall is marked as auditable and an audit record - * will be written at syscall exit. If there is no associated task, tsk - * should be NULL. */ - /** * audit_log_start - obtain an audit buffer * @ctx: audit_context (may be NULL) @@ -1389,7 +1494,7 @@ void audit_log_session_info(struct audit_buffer *ab) u32 sessionid = audit_get_sessionid(current); uid_t auid = from_kuid(&init_user_ns, audit_get_loginuid(current)); - audit_log_format(ab, " auid=%u ses=%u\n", auid, sessionid); + audit_log_format(ab, " auid=%u ses=%u", auid, sessionid); } void audit_log_key(struct audit_buffer *ab, char *key) @@ -1536,6 +1641,26 @@ void audit_log_name(struct audit_context *context, struct audit_names *n, } } + /* log the audit_names record type */ + audit_log_format(ab, " nametype="); + switch(n->type) { + case AUDIT_TYPE_NORMAL: + audit_log_format(ab, "NORMAL"); + break; + case AUDIT_TYPE_PARENT: + audit_log_format(ab, "PARENT"); + break; + case AUDIT_TYPE_CHILD_DELETE: + audit_log_format(ab, "DELETE"); + break; + case AUDIT_TYPE_CHILD_CREATE: + audit_log_format(ab, "CREATE"); + break; + default: + audit_log_format(ab, "UNKNOWN"); + break; + } + audit_log_fcaps(ab, n); audit_log_end(ab); } diff --git a/kernel/audit.h b/kernel/audit.h index 123c9b7c397..b779642b29a 100644 --- a/kernel/audit.h +++ b/kernel/audit.h @@ -197,6 +197,9 @@ struct audit_context { int fd; int flags; } mmap; + struct { + int argc; + } execve; }; int fds[2]; diff --git a/kernel/auditfilter.c b/kernel/auditfilter.c index f7aee8be7fb..51f3fd4c1ed 100644 --- a/kernel/auditfilter.c +++ b/kernel/auditfilter.c @@ -343,6 +343,7 @@ static int audit_field_valid(struct audit_entry *entry, struct audit_field *f) case AUDIT_DEVMINOR: case AUDIT_EXIT: case AUDIT_SUCCESS: + case AUDIT_INODE: /* bit ops are only useful on syscall args */ if (f->op == Audit_bitmask || f->op == Audit_bittest) return -EINVAL; @@ -423,7 +424,7 @@ static struct audit_entry *audit_data_to_entry(struct audit_rule_data *data, f->lsm_rule = NULL; /* Support legacy tests for a valid loginuid */ - if ((f->type == AUDIT_LOGINUID) && (f->val == ~0U)) { + if ((f->type == AUDIT_LOGINUID) && (f->val == AUDIT_UID_UNSET)) { f->type = AUDIT_LOGINUID_SET; f->val = 0; } diff --git a/kernel/auditsc.c b/kernel/auditsc.c index 9845cb32b60..90594c9f755 100644 --- a/kernel/auditsc.c +++ b/kernel/auditsc.c @@ -95,13 +95,6 @@ struct audit_aux_data { /* Number of target pids per aux struct. */ #define AUDIT_AUX_PIDS 16 -struct audit_aux_data_execve { - struct audit_aux_data d; - int argc; - int envc; - struct mm_struct *mm; -}; - struct audit_aux_data_pids { struct audit_aux_data d; pid_t target_pid[AUDIT_AUX_PIDS]; @@ -121,12 +114,6 @@ struct audit_aux_data_bprm_fcaps { struct audit_cap_data new_pcap; }; -struct audit_aux_data_capset { - struct audit_aux_data d; - pid_t pid; - struct audit_cap_data cap; -}; - struct audit_tree_refs { struct audit_tree_refs *next; struct audit_chunk *c[31]; @@ -566,7 +553,7 @@ static int audit_filter_rules(struct task_struct *tsk, break; case AUDIT_INODE: if (name) - result = (name->ino == f->val); + result = audit_comparator(name->ino, f->op, f->val); else if (ctx) { list_for_each_entry(n, &ctx->names_list, list) { if (audit_comparator(n->ino, f->op, f->val)) { @@ -943,8 +930,10 @@ int audit_alloc(struct task_struct *tsk) return 0; /* Return if not auditing. */ state = audit_filter_task(tsk, &key); - if (state == AUDIT_DISABLED) + if (state == AUDIT_DISABLED) { + clear_tsk_thread_flag(tsk, TIF_SYSCALL_AUDIT); return 0; + } if (!(context = audit_alloc_context(state))) { kfree(key); @@ -1149,20 +1138,16 @@ static int audit_log_single_execve_arg(struct audit_context *context, } static void audit_log_execve_info(struct audit_context *context, - struct audit_buffer **ab, - struct audit_aux_data_execve *axi) + struct audit_buffer **ab) { int i, len; size_t len_sent = 0; const char __user *p; char *buf; - if (axi->mm != current->mm) - return; /* execve failed, no additional info */ - - p = (const char __user *)axi->mm->arg_start; + p = (const char __user *)current->mm->arg_start; - audit_log_format(*ab, "argc=%d", axi->argc); + audit_log_format(*ab, "argc=%d", context->execve.argc); /* * we need some kernel buffer to hold the userspace args. Just @@ -1176,7 +1161,7 @@ static void audit_log_execve_info(struct audit_context *context, return; } - for (i = 0; i < axi->argc; i++) { + for (i = 0; i < context->execve.argc; i++) { len = audit_log_single_execve_arg(context, ab, i, &len_sent, p, buf); if (len <= 0) @@ -1279,6 +1264,9 @@ static void show_special(struct audit_context *context, int *call_panic) audit_log_format(ab, "fd=%d flags=0x%x", context->mmap.fd, context->mmap.flags); break; } + case AUDIT_EXECVE: { + audit_log_execve_info(context, &ab); + break; } } audit_log_end(ab); } @@ -1325,11 +1313,6 @@ static void audit_log_exit(struct audit_context *context, struct task_struct *ts switch (aux->type) { - case AUDIT_EXECVE: { - struct audit_aux_data_execve *axi = (void *)aux; - audit_log_execve_info(context, &ab, axi); - break; } - case AUDIT_BPRM_FCAPS: { struct audit_aux_data_bprm_fcaps *axs = (void *)aux; audit_log_format(ab, "fver=%x", axs->fcap_ver); @@ -1964,6 +1947,43 @@ int auditsc_get_stamp(struct audit_context *ctx, /* global counter which is incremented every time something logs in */ static atomic_t session_id = ATOMIC_INIT(0); +static int audit_set_loginuid_perm(kuid_t loginuid) +{ + /* if we are unset, we don't need privs */ + if (!audit_loginuid_set(current)) + return 0; + /* if AUDIT_FEATURE_LOGINUID_IMMUTABLE means never ever allow a change*/ + if (is_audit_feature_set(AUDIT_FEATURE_LOGINUID_IMMUTABLE)) + return -EPERM; + /* it is set, you need permission */ + if (!capable(CAP_AUDIT_CONTROL)) + return -EPERM; + /* reject if this is not an unset and we don't allow that */ + if (is_audit_feature_set(AUDIT_FEATURE_ONLY_UNSET_LOGINUID) && uid_valid(loginuid)) + return -EPERM; + return 0; +} + +static void audit_log_set_loginuid(kuid_t koldloginuid, kuid_t kloginuid, + unsigned int oldsessionid, unsigned int sessionid, + int rc) +{ + struct audit_buffer *ab; + uid_t uid, ologinuid, nloginuid; + + uid = from_kuid(&init_user_ns, task_uid(current)); + ologinuid = from_kuid(&init_user_ns, koldloginuid); + nloginuid = from_kuid(&init_user_ns, kloginuid), + + ab = audit_log_start(NULL, GFP_KERNEL, AUDIT_LOGIN); + if (!ab) + return; + audit_log_format(ab, "pid=%d uid=%u old auid=%u new auid=%u old " + "ses=%u new ses=%u res=%d", current->pid, uid, ologinuid, + nloginuid, oldsessionid, sessionid, !rc); + audit_log_end(ab); +} + /** * audit_set_loginuid - set current task's audit_context loginuid * @loginuid: loginuid value @@ -1975,37 +1995,26 @@ static atomic_t session_id = ATOMIC_INIT(0); int audit_set_loginuid(kuid_t loginuid) { struct task_struct *task = current; - struct audit_context *context = task->audit_context; - unsigned int sessionid; + unsigned int oldsessionid, sessionid = (unsigned int)-1; + kuid_t oldloginuid; + int rc; -#ifdef CONFIG_AUDIT_LOGINUID_IMMUTABLE - if (audit_loginuid_set(task)) - return -EPERM; -#else /* CONFIG_AUDIT_LOGINUID_IMMUTABLE */ - if (!capable(CAP_AUDIT_CONTROL)) - return -EPERM; -#endif /* CONFIG_AUDIT_LOGINUID_IMMUTABLE */ + oldloginuid = audit_get_loginuid(current); + oldsessionid = audit_get_sessionid(current); - sessionid = atomic_inc_return(&session_id); - if (context && context->in_syscall) { - struct audit_buffer *ab; + rc = audit_set_loginuid_perm(loginuid); + if (rc) + goto out; + + /* are we setting or clearing? */ + if (uid_valid(loginuid)) + sessionid = atomic_inc_return(&session_id); - ab = audit_log_start(NULL, GFP_KERNEL, AUDIT_LOGIN); - if (ab) { - audit_log_format(ab, "login pid=%d uid=%u " - "old auid=%u new auid=%u" - " old ses=%u new ses=%u", - task->pid, - from_kuid(&init_user_ns, task_uid(task)), - from_kuid(&init_user_ns, task->loginuid), - from_kuid(&init_user_ns, loginuid), - task->sessionid, sessionid); - audit_log_end(ab); - } - } task->sessionid = sessionid; task->loginuid = loginuid; - return 0; +out: + audit_log_set_loginuid(oldloginuid, loginuid, oldsessionid, sessionid, rc); + return rc; } /** @@ -2126,22 +2135,12 @@ void __audit_ipc_set_perm(unsigned long qbytes, uid_t uid, gid_t gid, umode_t mo context->ipc.has_perm = 1; } -int __audit_bprm(struct linux_binprm *bprm) +void __audit_bprm(struct linux_binprm *bprm) { - struct audit_aux_data_execve *ax; struct audit_context *context = current->audit_context; - ax = kmalloc(sizeof(*ax), GFP_KERNEL); - if (!ax) - return -ENOMEM; - - ax->argc = bprm->argc; - ax->envc = bprm->envc; - ax->mm = bprm->mm; - ax->d.type = AUDIT_EXECVE; - ax->d.next = context->aux; - context->aux = (void *)ax; - return 0; + context->type = AUDIT_EXECVE; + context->execve.argc = bprm->argc; } diff --git a/kernel/bounds.c b/kernel/bounds.c index 578782ef6ae..5253204afdc 100644 --- a/kernel/bounds.c +++ b/kernel/bounds.c @@ -11,7 +11,7 @@ #include <linux/kbuild.h> #include <linux/page_cgroup.h> #include <linux/log2.h> -#include <linux/spinlock.h> +#include <linux/spinlock_types.h> void foo(void) { diff --git a/kernel/cgroup.c b/kernel/cgroup.c index e0839bcd48c..4c62513fe19 100644 --- a/kernel/cgroup.c +++ b/kernel/cgroup.c @@ -895,11 +895,6 @@ static void cgroup_diput(struct dentry *dentry, struct inode *inode) iput(inode); } -static int cgroup_delete(const struct dentry *d) -{ - return 1; -} - static void remove_dir(struct dentry *d) { struct dentry *parent = dget(d->d_parent); @@ -1486,7 +1481,7 @@ static int cgroup_get_rootdir(struct super_block *sb) { static const struct dentry_operations cgroup_dops = { .d_iput = cgroup_diput, - .d_delete = cgroup_delete, + .d_delete = always_delete_dentry, }; struct inode *inode = diff --git a/kernel/irq/settings.h b/kernel/irq/settings.h index 1162f1030f1..3320b84cc60 100644 --- a/kernel/irq/settings.h +++ b/kernel/irq/settings.h @@ -14,6 +14,7 @@ enum { _IRQ_NO_BALANCING = IRQ_NO_BALANCING, _IRQ_NESTED_THREAD = IRQ_NESTED_THREAD, _IRQ_PER_CPU_DEVID = IRQ_PER_CPU_DEVID, + _IRQ_IS_POLLED = IRQ_IS_POLLED, _IRQF_MODIFY_MASK = IRQF_MODIFY_MASK, }; @@ -26,6 +27,7 @@ enum { #define IRQ_NOAUTOEN GOT_YOU_MORON #define IRQ_NESTED_THREAD GOT_YOU_MORON #define IRQ_PER_CPU_DEVID GOT_YOU_MORON +#define IRQ_IS_POLLED GOT_YOU_MORON #undef IRQF_MODIFY_MASK #define IRQF_MODIFY_MASK GOT_YOU_MORON @@ -147,3 +149,8 @@ static inline bool irq_settings_is_nested_thread(struct irq_desc *desc) { return desc->status_use_accessors & _IRQ_NESTED_THREAD; } + +static inline bool irq_settings_is_polled(struct irq_desc *desc) +{ + return desc->status_use_accessors & _IRQ_IS_POLLED; +} diff --git a/kernel/irq/spurious.c b/kernel/irq/spurious.c index 7b5f012bde9..a1d8cc63b56 100644 --- a/kernel/irq/spurious.c +++ b/kernel/irq/spurious.c @@ -67,8 +67,13 @@ static int try_one_irq(int irq, struct irq_desc *desc, bool force) raw_spin_lock(&desc->lock); - /* PER_CPU and nested thread interrupts are never polled */ - if (irq_settings_is_per_cpu(desc) || irq_settings_is_nested_thread(desc)) + /* + * PER_CPU, nested thread interrupts and interrupts explicitely + * marked polled are excluded from polling. + */ + if (irq_settings_is_per_cpu(desc) || + irq_settings_is_nested_thread(desc) || + irq_settings_is_polled(desc)) goto out; /* @@ -268,7 +273,8 @@ try_misrouted_irq(unsigned int irq, struct irq_desc *desc, void note_interrupt(unsigned int irq, struct irq_desc *desc, irqreturn_t action_ret) { - if (desc->istate & IRQS_POLL_INPROGRESS) + if (desc->istate & IRQS_POLL_INPROGRESS || + irq_settings_is_polled(desc)) return; /* we get here again via the threaded handler */ diff --git a/kernel/modsign_certificate.S b/kernel/modsign_certificate.S deleted file mode 100644 index 4a9a86d12c8..00000000000 --- a/kernel/modsign_certificate.S +++ /dev/null @@ -1,12 +0,0 @@ -#include <linux/export.h> - -#define GLOBAL(name) \ - .globl VMLINUX_SYMBOL(name); \ - VMLINUX_SYMBOL(name): - - .section ".init.data","aw" - -GLOBAL(modsign_certificate_list) - .incbin "signing_key.x509" - .incbin "extra_certificates" -GLOBAL(modsign_certificate_list_end) diff --git a/kernel/modsign_pubkey.c b/kernel/modsign_pubkey.c deleted file mode 100644 index 7cbd4507a7e..00000000000 --- a/kernel/modsign_pubkey.c +++ /dev/null @@ -1,104 +0,0 @@ -/* Public keys for module signature verification - * - * Copyright (C) 2012 Red Hat, Inc. All Rights Reserved. - * Written by David Howells (dhowells@redhat.com) - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public Licence - * as published by the Free Software Foundation; either version - * 2 of the Licence, or (at your option) any later version. - */ - -#include <linux/kernel.h> -#include <linux/sched.h> -#include <linux/cred.h> -#include <linux/err.h> -#include <keys/asymmetric-type.h> -#include "module-internal.h" - -struct key *modsign_keyring; - -extern __initconst const u8 modsign_certificate_list[]; -extern __initconst const u8 modsign_certificate_list_end[]; - -/* - * We need to make sure ccache doesn't cache the .o file as it doesn't notice - * if modsign.pub changes. - */ -static __initconst const char annoy_ccache[] = __TIME__ "foo"; - -/* - * Load the compiled-in keys - */ -static __init int module_verify_init(void) -{ - pr_notice("Initialise module verification\n"); - - modsign_keyring = keyring_alloc(".module_sign", - KUIDT_INIT(0), KGIDT_INIT(0), - current_cred(), - ((KEY_POS_ALL & ~KEY_POS_SETATTR) | - KEY_USR_VIEW | KEY_USR_READ), - KEY_ALLOC_NOT_IN_QUOTA, NULL); - if (IS_ERR(modsign_keyring)) - panic("Can't allocate module signing keyring\n"); - - return 0; -} - -/* - * Must be initialised before we try and load the keys into the keyring. - */ -device_initcall(module_verify_init); - -/* - * Load the compiled-in keys - */ -static __init int load_module_signing_keys(void) -{ - key_ref_t key; - const u8 *p, *end; - size_t plen; - - pr_notice("Loading module verification certificates\n"); - - end = modsign_certificate_list_end; - p = modsign_certificate_list; - while (p < end) { - /* Each cert begins with an ASN.1 SEQUENCE tag and must be more - * than 256 bytes in size. - */ - if (end - p < 4) - goto dodgy_cert; - if (p[0] != 0x30 && - p[1] != 0x82) - goto dodgy_cert; - plen = (p[2] << 8) | p[3]; - plen += 4; - if (plen > end - p) - goto dodgy_cert; - - key = key_create_or_update(make_key_ref(modsign_keyring, 1), - "asymmetric", - NULL, - p, - plen, - (KEY_POS_ALL & ~KEY_POS_SETATTR) | - KEY_USR_VIEW, - KEY_ALLOC_NOT_IN_QUOTA); - if (IS_ERR(key)) - pr_err("MODSIGN: Problem loading in-kernel X.509 certificate (%ld)\n", - PTR_ERR(key)); - else - pr_notice("MODSIGN: Loaded cert '%s'\n", - key_ref_to_ptr(key)->description); - p += plen; - } - - return 0; - -dodgy_cert: - pr_err("MODSIGN: Problem parsing in-kernel X.509 certificate list\n"); - return 0; -} -late_initcall(load_module_signing_keys); diff --git a/kernel/module-internal.h b/kernel/module-internal.h index 24f9247b7d0..915e123a430 100644 --- a/kernel/module-internal.h +++ b/kernel/module-internal.h @@ -9,6 +9,4 @@ * 2 of the Licence, or (at your option) any later version. */ -extern struct key *modsign_keyring; - extern int mod_verify_sig(const void *mod, unsigned long *_modlen); diff --git a/kernel/module_signing.c b/kernel/module_signing.c index f2970bddc5e..be5b8fac4bd 100644 --- a/kernel/module_signing.c +++ b/kernel/module_signing.c @@ -14,6 +14,7 @@ #include <crypto/public_key.h> #include <crypto/hash.h> #include <keys/asymmetric-type.h> +#include <keys/system_keyring.h> #include "module-internal.h" /* @@ -28,7 +29,7 @@ */ struct module_signature { u8 algo; /* Public-key crypto algorithm [enum pkey_algo] */ - u8 hash; /* Digest algorithm [enum pkey_hash_algo] */ + u8 hash; /* Digest algorithm [enum hash_algo] */ u8 id_type; /* Key identifier type [enum pkey_id_type] */ u8 signer_len; /* Length of signer's name */ u8 key_id_len; /* Length of key identifier */ @@ -39,7 +40,7 @@ struct module_signature { /* * Digest the module contents. */ -static struct public_key_signature *mod_make_digest(enum pkey_hash_algo hash, +static struct public_key_signature *mod_make_digest(enum hash_algo hash, const void *mod, unsigned long modlen) { @@ -54,7 +55,7 @@ static struct public_key_signature *mod_make_digest(enum pkey_hash_algo hash, /* Allocate the hashing algorithm we're going to need and find out how * big the hash operational data will be. */ - tfm = crypto_alloc_shash(pkey_hash_algo[hash], 0, 0); + tfm = crypto_alloc_shash(hash_algo_name[hash], 0, 0); if (IS_ERR(tfm)) return (PTR_ERR(tfm) == -ENOENT) ? ERR_PTR(-ENOPKG) : ERR_CAST(tfm); @@ -157,7 +158,7 @@ static struct key *request_asymmetric_key(const char *signer, size_t signer_len, pr_debug("Look up: \"%s\"\n", id); - key = keyring_search(make_key_ref(modsign_keyring, 1), + key = keyring_search(make_key_ref(system_trusted_keyring, 1), &key_type_asymmetric, id); if (IS_ERR(key)) pr_warn("Request for unknown module key '%s' err %ld\n", @@ -217,7 +218,7 @@ int mod_verify_sig(const void *mod, unsigned long *_modlen) return -ENOPKG; if (ms.hash >= PKEY_HASH__LAST || - !pkey_hash_algo[ms.hash]) + !hash_algo_name[ms.hash]) return -ENOPKG; key = request_asymmetric_key(sig, ms.signer_len, diff --git a/kernel/power/snapshot.c b/kernel/power/snapshot.c index 10c22cae83a..b38109e204a 100644 --- a/kernel/power/snapshot.c +++ b/kernel/power/snapshot.c @@ -792,7 +792,8 @@ void free_basic_memory_bitmaps(void) { struct memory_bitmap *bm1, *bm2; - BUG_ON(!(forbidden_pages_map && free_pages_map)); + if (WARN_ON(!(forbidden_pages_map && free_pages_map))) + return; bm1 = forbidden_pages_map; bm2 = free_pages_map; diff --git a/kernel/power/user.c b/kernel/power/user.c index 24850270c80..98d357584cd 100644 --- a/kernel/power/user.c +++ b/kernel/power/user.c @@ -70,6 +70,7 @@ static int snapshot_open(struct inode *inode, struct file *filp) data->swap = swsusp_resume_device ? swap_type_of(swsusp_resume_device, 0, NULL) : -1; data->mode = O_RDONLY; + data->free_bitmaps = false; error = pm_notifier_call_chain(PM_HIBERNATION_PREPARE); if (error) pm_notifier_call_chain(PM_POST_HIBERNATION); diff --git a/kernel/system_certificates.S b/kernel/system_certificates.S new file mode 100644 index 00000000000..4aef390671c --- /dev/null +++ b/kernel/system_certificates.S @@ -0,0 +1,10 @@ +#include <linux/export.h> +#include <linux/init.h> + + __INITRODATA + + .globl VMLINUX_SYMBOL(system_certificate_list) +VMLINUX_SYMBOL(system_certificate_list): + .incbin "kernel/x509_certificate_list" + .globl VMLINUX_SYMBOL(system_certificate_list_end) +VMLINUX_SYMBOL(system_certificate_list_end): diff --git a/kernel/system_keyring.c b/kernel/system_keyring.c new file mode 100644 index 00000000000..564dd93430a --- /dev/null +++ b/kernel/system_keyring.c @@ -0,0 +1,105 @@ +/* System trusted keyring for trusted public keys + * + * Copyright (C) 2012 Red Hat, Inc. All Rights Reserved. + * Written by David Howells (dhowells@redhat.com) + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public Licence + * as published by the Free Software Foundation; either version + * 2 of the Licence, or (at your option) any later version. + */ + +#include <linux/export.h> +#include <linux/kernel.h> +#include <linux/sched.h> +#include <linux/cred.h> +#include <linux/err.h> +#include <keys/asymmetric-type.h> +#include <keys/system_keyring.h> +#include "module-internal.h" + +struct key *system_trusted_keyring; +EXPORT_SYMBOL_GPL(system_trusted_keyring); + +extern __initconst const u8 system_certificate_list[]; +extern __initconst const u8 system_certificate_list_end[]; + +/* + * Load the compiled-in keys + */ +static __init int system_trusted_keyring_init(void) +{ + pr_notice("Initialise system trusted keyring\n"); + + system_trusted_keyring = + keyring_alloc(".system_keyring", + KUIDT_INIT(0), KGIDT_INIT(0), current_cred(), + ((KEY_POS_ALL & ~KEY_POS_SETATTR) | + KEY_USR_VIEW | KEY_USR_READ | KEY_USR_SEARCH), + KEY_ALLOC_NOT_IN_QUOTA, NULL); + if (IS_ERR(system_trusted_keyring)) + panic("Can't allocate system trusted keyring\n"); + + set_bit(KEY_FLAG_TRUSTED_ONLY, &system_trusted_keyring->flags); + return 0; +} + +/* + * Must be initialised before we try and load the keys into the keyring. + */ +device_initcall(system_trusted_keyring_init); + +/* + * Load the compiled-in list of X.509 certificates. + */ +static __init int load_system_certificate_list(void) +{ + key_ref_t key; + const u8 *p, *end; + size_t plen; + + pr_notice("Loading compiled-in X.509 certificates\n"); + + end = system_certificate_list_end; + p = system_certificate_list; + while (p < end) { + /* Each cert begins with an ASN.1 SEQUENCE tag and must be more + * than 256 bytes in size. + */ + if (end - p < 4) + goto dodgy_cert; + if (p[0] != 0x30 && + p[1] != 0x82) + goto dodgy_cert; + plen = (p[2] << 8) | p[3]; + plen += 4; + if (plen > end - p) + goto dodgy_cert; + + key = key_create_or_update(make_key_ref(system_trusted_keyring, 1), + "asymmetric", + NULL, + p, + plen, + ((KEY_POS_ALL & ~KEY_POS_SETATTR) | + KEY_USR_VIEW | KEY_USR_READ), + KEY_ALLOC_NOT_IN_QUOTA | + KEY_ALLOC_TRUSTED); + if (IS_ERR(key)) { + pr_err("Problem loading in-kernel X.509 certificate (%ld)\n", + PTR_ERR(key)); + } else { + pr_notice("Loaded X.509 cert '%s'\n", + key_ref_to_ptr(key)->description); + key_ref_put(key); + } + p += plen; + } + + return 0; + +dodgy_cert: + pr_err("Problem parsing in-kernel X.509 certificate list\n"); + return 0; +} +late_initcall(load_system_certificate_list); diff --git a/kernel/taskstats.c b/kernel/taskstats.c index 9f4618eb51c..13d2f7cd65d 100644 --- a/kernel/taskstats.c +++ b/kernel/taskstats.c @@ -673,17 +673,18 @@ err: nlmsg_free(rep_skb); } -static struct genl_ops taskstats_ops = { - .cmd = TASKSTATS_CMD_GET, - .doit = taskstats_user_cmd, - .policy = taskstats_cmd_get_policy, - .flags = GENL_ADMIN_PERM, -}; - -static struct genl_ops cgroupstats_ops = { - .cmd = CGROUPSTATS_CMD_GET, - .doit = cgroupstats_user_cmd, - .policy = cgroupstats_cmd_get_policy, +static const struct genl_ops taskstats_ops[] = { + { + .cmd = TASKSTATS_CMD_GET, + .doit = taskstats_user_cmd, + .policy = taskstats_cmd_get_policy, + .flags = GENL_ADMIN_PERM, + }, + { + .cmd = CGROUPSTATS_CMD_GET, + .doit = cgroupstats_user_cmd, + .policy = cgroupstats_cmd_get_policy, + }, }; /* Needed early in initialization */ @@ -702,26 +703,13 @@ static int __init taskstats_init(void) { int rc; - rc = genl_register_family(&family); + rc = genl_register_family_with_ops(&family, taskstats_ops); if (rc) return rc; - rc = genl_register_ops(&family, &taskstats_ops); - if (rc < 0) - goto err; - - rc = genl_register_ops(&family, &cgroupstats_ops); - if (rc < 0) - goto err_cgroup_ops; - family_registered = 1; pr_info("registered taskstats version %d\n", TASKSTATS_GENL_VERSION); return 0; -err_cgroup_ops: - genl_unregister_ops(&family, &taskstats_ops); -err: - genl_unregister_family(&family); - return rc; } /* diff --git a/kernel/user.c b/kernel/user.c index 5bbb91988e6..a3a0dbfda32 100644 --- a/kernel/user.c +++ b/kernel/user.c @@ -51,6 +51,10 @@ struct user_namespace init_user_ns = { .owner = GLOBAL_ROOT_UID, .group = GLOBAL_ROOT_GID, .proc_inum = PROC_USER_INIT_INO, +#ifdef CONFIG_KEYS_KERBEROS_CACHE + .krb_cache_register_sem = + __RWSEM_INITIALIZER(init_user_ns.krb_cache_register_sem), +#endif }; EXPORT_SYMBOL_GPL(init_user_ns); diff --git a/kernel/user_namespace.c b/kernel/user_namespace.c index 13fb1134ba5..240fb62cf39 100644 --- a/kernel/user_namespace.c +++ b/kernel/user_namespace.c @@ -101,6 +101,9 @@ int create_user_ns(struct cred *new) set_cred_user_ns(new, ns); +#ifdef CONFIG_PERSISTENT_KEYRINGS + init_rwsem(&ns->persistent_keyring_register_sem); +#endif return 0; } @@ -130,6 +133,9 @@ void free_user_ns(struct user_namespace *ns) do { parent = ns->parent; +#ifdef CONFIG_PERSISTENT_KEYRINGS + key_put(ns->persistent_keyring_register); +#endif proc_free_inum(ns->proc_inum); kmem_cache_free(user_ns_cachep, ns); ns = parent; diff --git a/lib/Kconfig b/lib/Kconfig index 06dc74200a5..991c98bc4a3 100644 --- a/lib/Kconfig +++ b/lib/Kconfig @@ -322,6 +322,20 @@ config TEXTSEARCH_FSM config BTREE boolean +config ASSOCIATIVE_ARRAY + bool + help + Generic associative array. Can be searched and iterated over whilst + it is being modified. It is also reasonably quick to search and + modify. The algorithms are non-recursive, and the trees are highly + capacious. + + See: + + Documentation/assoc_array.txt + + for more information. + config HAS_IOMEM boolean depends on !NO_IOMEM diff --git a/lib/Makefile b/lib/Makefile index d480a8c9238..b46065fd67a 100644 --- a/lib/Makefile +++ b/lib/Makefile @@ -47,6 +47,7 @@ CFLAGS_hweight.o = $(subst $(quote),,$(CONFIG_ARCH_HWEIGHT_CFLAGS)) obj-$(CONFIG_GENERIC_HWEIGHT) += hweight.o obj-$(CONFIG_BTREE) += btree.o +obj-$(CONFIG_ASSOCIATIVE_ARRAY) += assoc_array.o obj-$(CONFIG_DEBUG_PREEMPT) += smp_processor_id.o obj-$(CONFIG_DEBUG_LIST) += list_debug.o obj-$(CONFIG_DEBUG_OBJECTS) += debugobjects.o diff --git a/lib/assoc_array.c b/lib/assoc_array.c new file mode 100644 index 00000000000..17edeaf1918 --- /dev/null +++ b/lib/assoc_array.c @@ -0,0 +1,1746 @@ +/* Generic associative array implementation. + * + * See Documentation/assoc_array.txt for information. + * + * Copyright (C) 2013 Red Hat, Inc. All Rights Reserved. + * Written by David Howells (dhowells@redhat.com) + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public Licence + * as published by the Free Software Foundation; either version + * 2 of the Licence, or (at your option) any later version. + */ +//#define DEBUG +#include <linux/slab.h> +#include <linux/err.h> +#include <linux/assoc_array_priv.h> + +/* + * Iterate over an associative array. The caller must hold the RCU read lock + * or better. + */ +static int assoc_array_subtree_iterate(const struct assoc_array_ptr *root, + const struct assoc_array_ptr *stop, + int (*iterator)(const void *leaf, + void *iterator_data), + void *iterator_data) +{ + const struct assoc_array_shortcut *shortcut; + const struct assoc_array_node *node; + const struct assoc_array_ptr *cursor, *ptr, *parent; + unsigned long has_meta; + int slot, ret; + + cursor = root; + +begin_node: + if (assoc_array_ptr_is_shortcut(cursor)) { + /* Descend through a shortcut */ + shortcut = assoc_array_ptr_to_shortcut(cursor); + smp_read_barrier_depends(); + cursor = ACCESS_ONCE(shortcut->next_node); + } + + node = assoc_array_ptr_to_node(cursor); + smp_read_barrier_depends(); + slot = 0; + + /* We perform two passes of each node. + * + * The first pass does all the leaves in this node. This means we + * don't miss any leaves if the node is split up by insertion whilst + * we're iterating over the branches rooted here (we may, however, see + * some leaves twice). + */ + has_meta = 0; + for (; slot < ASSOC_ARRAY_FAN_OUT; slot++) { + ptr = ACCESS_ONCE(node->slots[slot]); + has_meta |= (unsigned long)ptr; + if (ptr && assoc_array_ptr_is_leaf(ptr)) { + /* We need a barrier between the read of the pointer + * and dereferencing the pointer - but only if we are + * actually going to dereference it. + */ + smp_read_barrier_depends(); + + /* Invoke the callback */ + ret = iterator(assoc_array_ptr_to_leaf(ptr), + iterator_data); + if (ret) + return ret; + } + } + + /* The second pass attends to all the metadata pointers. If we follow + * one of these we may find that we don't come back here, but rather go + * back to a replacement node with the leaves in a different layout. + * + * We are guaranteed to make progress, however, as the slot number for + * a particular portion of the key space cannot change - and we + * continue at the back pointer + 1. + */ + if (!(has_meta & ASSOC_ARRAY_PTR_META_TYPE)) + goto finished_node; + slot = 0; + +continue_node: + node = assoc_array_ptr_to_node(cursor); + smp_read_barrier_depends(); + + for (; slot < ASSOC_ARRAY_FAN_OUT; slot++) { + ptr = ACCESS_ONCE(node->slots[slot]); + if (assoc_array_ptr_is_meta(ptr)) { + cursor = ptr; + goto begin_node; + } + } + +finished_node: + /* Move up to the parent (may need to skip back over a shortcut) */ + parent = ACCESS_ONCE(node->back_pointer); + slot = node->parent_slot; + if (parent == stop) + return 0; + + if (assoc_array_ptr_is_shortcut(parent)) { + shortcut = assoc_array_ptr_to_shortcut(parent); + smp_read_barrier_depends(); + cursor = parent; + parent = ACCESS_ONCE(shortcut->back_pointer); + slot = shortcut->parent_slot; + if (parent == stop) + return 0; + } + + /* Ascend to next slot in parent node */ + cursor = parent; + slot++; + goto continue_node; +} + +/** + * assoc_array_iterate - Pass all objects in the array to a callback + * @array: The array to iterate over. + * @iterator: The callback function. + * @iterator_data: Private data for the callback function. + * + * Iterate over all the objects in an associative array. Each one will be + * presented to the iterator function. + * + * If the array is being modified concurrently with the iteration then it is + * possible that some objects in the array will be passed to the iterator + * callback more than once - though every object should be passed at least + * once. If this is undesirable then the caller must lock against modification + * for the duration of this function. + * + * The function will return 0 if no objects were in the array or else it will + * return the result of the last iterator function called. Iteration stops + * immediately if any call to the iteration function results in a non-zero + * return. + * + * The caller should hold the RCU read lock or better if concurrent + * modification is possible. + */ +int assoc_array_iterate(const struct assoc_array *array, + int (*iterator)(const void *object, + void *iterator_data), + void *iterator_data) +{ + struct assoc_array_ptr *root = ACCESS_ONCE(array->root); + + if (!root) + return 0; + return assoc_array_subtree_iterate(root, NULL, iterator, iterator_data); +} + +enum assoc_array_walk_status { + assoc_array_walk_tree_empty, + assoc_array_walk_found_terminal_node, + assoc_array_walk_found_wrong_shortcut, +} status; + +struct assoc_array_walk_result { + struct { + struct assoc_array_node *node; /* Node in which leaf might be found */ + int level; + int slot; + } terminal_node; + struct { + struct assoc_array_shortcut *shortcut; + int level; + int sc_level; + unsigned long sc_segments; + unsigned long dissimilarity; + } wrong_shortcut; +}; + +/* + * Navigate through the internal tree looking for the closest node to the key. + */ +static enum assoc_array_walk_status +assoc_array_walk(const struct assoc_array *array, + const struct assoc_array_ops *ops, + const void *index_key, + struct assoc_array_walk_result *result) +{ + struct assoc_array_shortcut *shortcut; + struct assoc_array_node *node; + struct assoc_array_ptr *cursor, *ptr; + unsigned long sc_segments, dissimilarity; + unsigned long segments; + int level, sc_level, next_sc_level; + int slot; + + pr_devel("-->%s()\n", __func__); + + cursor = ACCESS_ONCE(array->root); + if (!cursor) + return assoc_array_walk_tree_empty; + + level = 0; + + /* Use segments from the key for the new leaf to navigate through the + * internal tree, skipping through nodes and shortcuts that are on + * route to the destination. Eventually we'll come to a slot that is + * either empty or contains a leaf at which point we've found a node in + * which the leaf we're looking for might be found or into which it + * should be inserted. + */ +jumped: + segments = ops->get_key_chunk(index_key, level); + pr_devel("segments[%d]: %lx\n", level, segments); + + if (assoc_array_ptr_is_shortcut(cursor)) + goto follow_shortcut; + +consider_node: + node = assoc_array_ptr_to_node(cursor); + smp_read_barrier_depends(); + + slot = segments >> (level & ASSOC_ARRAY_KEY_CHUNK_MASK); + slot &= ASSOC_ARRAY_FAN_MASK; + ptr = ACCESS_ONCE(node->slots[slot]); + + pr_devel("consider slot %x [ix=%d type=%lu]\n", + slot, level, (unsigned long)ptr & 3); + + if (!assoc_array_ptr_is_meta(ptr)) { + /* The node doesn't have a node/shortcut pointer in the slot + * corresponding to the index key that we have to follow. + */ + result->terminal_node.node = node; + result->terminal_node.level = level; + result->terminal_node.slot = slot; + pr_devel("<--%s() = terminal_node\n", __func__); + return assoc_array_walk_found_terminal_node; + } + + if (assoc_array_ptr_is_node(ptr)) { + /* There is a pointer to a node in the slot corresponding to + * this index key segment, so we need to follow it. + */ + cursor = ptr; + level += ASSOC_ARRAY_LEVEL_STEP; + if ((level & ASSOC_ARRAY_KEY_CHUNK_MASK) != 0) + goto consider_node; + goto jumped; + } + + /* There is a shortcut in the slot corresponding to the index key + * segment. We follow the shortcut if its partial index key matches + * this leaf's. Otherwise we need to split the shortcut. + */ + cursor = ptr; +follow_shortcut: + shortcut = assoc_array_ptr_to_shortcut(cursor); + smp_read_barrier_depends(); + pr_devel("shortcut to %d\n", shortcut->skip_to_level); + sc_level = level + ASSOC_ARRAY_LEVEL_STEP; + BUG_ON(sc_level > shortcut->skip_to_level); + + do { + /* Check the leaf against the shortcut's index key a word at a + * time, trimming the final word (the shortcut stores the index + * key completely from the root to the shortcut's target). + */ + if ((sc_level & ASSOC_ARRAY_KEY_CHUNK_MASK) == 0) + segments = ops->get_key_chunk(index_key, sc_level); + + sc_segments = shortcut->index_key[sc_level >> ASSOC_ARRAY_KEY_CHUNK_SHIFT]; + dissimilarity = segments ^ sc_segments; + + if (round_up(sc_level, ASSOC_ARRAY_KEY_CHUNK_SIZE) > shortcut->skip_to_level) { + /* Trim segments that are beyond the shortcut */ + int shift = shortcut->skip_to_level & ASSOC_ARRAY_KEY_CHUNK_MASK; + dissimilarity &= ~(ULONG_MAX << shift); + next_sc_level = shortcut->skip_to_level; + } else { + next_sc_level = sc_level + ASSOC_ARRAY_KEY_CHUNK_SIZE; + next_sc_level = round_down(next_sc_level, ASSOC_ARRAY_KEY_CHUNK_SIZE); + } + + if (dissimilarity != 0) { + /* This shortcut points elsewhere */ + result->wrong_shortcut.shortcut = shortcut; + result->wrong_shortcut.level = level; + result->wrong_shortcut.sc_level = sc_level; + result->wrong_shortcut.sc_segments = sc_segments; + result->wrong_shortcut.dissimilarity = dissimilarity; + return assoc_array_walk_found_wrong_shortcut; + } + + sc_level = next_sc_level; + } while (sc_level < shortcut->skip_to_level); + + /* The shortcut matches the leaf's index to this point. */ + cursor = ACCESS_ONCE(shortcut->next_node); + if (((level ^ sc_level) & ~ASSOC_ARRAY_KEY_CHUNK_MASK) != 0) { + level = sc_level; + goto jumped; + } else { + level = sc_level; + goto consider_node; + } +} + +/** + * assoc_array_find - Find an object by index key + * @array: The associative array to search. + * @ops: The operations to use. + * @index_key: The key to the object. + * + * Find an object in an associative array by walking through the internal tree + * to the node that should contain the object and then searching the leaves + * there. NULL is returned if the requested object was not found in the array. + * + * The caller must hold the RCU read lock or better. + */ +void *assoc_array_find(const struct assoc_array *array, + const struct assoc_array_ops *ops, + const void *index_key) +{ + struct assoc_array_walk_result result; + const struct assoc_array_node *node; + const struct assoc_array_ptr *ptr; + const void *leaf; + int slot; + + if (assoc_array_walk(array, ops, index_key, &result) != + assoc_array_walk_found_terminal_node) + return NULL; + + node = result.terminal_node.node; + smp_read_barrier_depends(); + + /* If the target key is available to us, it's has to be pointed to by + * the terminal node. + */ + for (slot = 0; slot < ASSOC_ARRAY_FAN_OUT; slot++) { + ptr = ACCESS_ONCE(node->slots[slot]); + if (ptr && assoc_array_ptr_is_leaf(ptr)) { + /* We need a barrier between the read of the pointer + * and dereferencing the pointer - but only if we are + * actually going to dereference it. + */ + leaf = assoc_array_ptr_to_leaf(ptr); + smp_read_barrier_depends(); + if (ops->compare_object(leaf, index_key)) + return (void *)leaf; + } + } + + return NULL; +} + +/* + * Destructively iterate over an associative array. The caller must prevent + * other simultaneous accesses. + */ +static void assoc_array_destroy_subtree(struct assoc_array_ptr *root, + const struct assoc_array_ops *ops) +{ + struct assoc_array_shortcut *shortcut; + struct assoc_array_node *node; + struct assoc_array_ptr *cursor, *parent = NULL; + int slot = -1; + + pr_devel("-->%s()\n", __func__); + + cursor = root; + if (!cursor) { + pr_devel("empty\n"); + return; + } + +move_to_meta: + if (assoc_array_ptr_is_shortcut(cursor)) { + /* Descend through a shortcut */ + pr_devel("[%d] shortcut\n", slot); + BUG_ON(!assoc_array_ptr_is_shortcut(cursor)); + shortcut = assoc_array_ptr_to_shortcut(cursor); + BUG_ON(shortcut->back_pointer != parent); + BUG_ON(slot != -1 && shortcut->parent_slot != slot); + parent = cursor; + cursor = shortcut->next_node; + slot = -1; + BUG_ON(!assoc_array_ptr_is_node(cursor)); + } + + pr_devel("[%d] node\n", slot); + node = assoc_array_ptr_to_node(cursor); + BUG_ON(node->back_pointer != parent); + BUG_ON(slot != -1 && node->parent_slot != slot); + slot = 0; + +continue_node: + pr_devel("Node %p [back=%p]\n", node, node->back_pointer); + for (; slot < ASSOC_ARRAY_FAN_OUT; slot++) { + struct assoc_array_ptr *ptr = node->slots[slot]; + if (!ptr) + continue; + if (assoc_array_ptr_is_meta(ptr)) { + parent = cursor; + cursor = ptr; + goto move_to_meta; + } + + if (ops) { + pr_devel("[%d] free leaf\n", slot); + ops->free_object(assoc_array_ptr_to_leaf(ptr)); + } + } + + parent = node->back_pointer; + slot = node->parent_slot; + pr_devel("free node\n"); + kfree(node); + if (!parent) + return; /* Done */ + + /* Move back up to the parent (may need to free a shortcut on + * the way up) */ + if (assoc_array_ptr_is_shortcut(parent)) { + shortcut = assoc_array_ptr_to_shortcut(parent); + BUG_ON(shortcut->next_node != cursor); + cursor = parent; + parent = shortcut->back_pointer; + slot = shortcut->parent_slot; + pr_devel("free shortcut\n"); + kfree(shortcut); + if (!parent) + return; + + BUG_ON(!assoc_array_ptr_is_node(parent)); + } + + /* Ascend to next slot in parent node */ + pr_devel("ascend to %p[%d]\n", parent, slot); + cursor = parent; + node = assoc_array_ptr_to_node(cursor); + slot++; + goto continue_node; +} + +/** + * assoc_array_destroy - Destroy an associative array + * @array: The array to destroy. + * @ops: The operations to use. + * + * Discard all metadata and free all objects in an associative array. The + * array will be empty and ready to use again upon completion. This function + * cannot fail. + * + * The caller must prevent all other accesses whilst this takes place as no + * attempt is made to adjust pointers gracefully to permit RCU readlock-holding + * accesses to continue. On the other hand, no memory allocation is required. + */ +void assoc_array_destroy(struct assoc_array *array, + const struct assoc_array_ops *ops) +{ + assoc_array_destroy_subtree(array->root, ops); + array->root = NULL; +} + +/* + * Handle insertion into an empty tree. + */ +static bool assoc_array_insert_in_empty_tree(struct assoc_array_edit *edit) +{ + struct assoc_array_node *new_n0; + + pr_devel("-->%s()\n", __func__); + + new_n0 = kzalloc(sizeof(struct assoc_array_node), GFP_KERNEL); + if (!new_n0) + return false; + + edit->new_meta[0] = assoc_array_node_to_ptr(new_n0); + edit->leaf_p = &new_n0->slots[0]; + edit->adjust_count_on = new_n0; + edit->set[0].ptr = &edit->array->root; + edit->set[0].to = assoc_array_node_to_ptr(new_n0); + + pr_devel("<--%s() = ok [no root]\n", __func__); + return true; +} + +/* + * Handle insertion into a terminal node. + */ +static bool assoc_array_insert_into_terminal_node(struct assoc_array_edit *edit, + const struct assoc_array_ops *ops, + const void *index_key, + struct assoc_array_walk_result *result) +{ + struct assoc_array_shortcut *shortcut, *new_s0; + struct assoc_array_node *node, *new_n0, *new_n1, *side; + struct assoc_array_ptr *ptr; + unsigned long dissimilarity, base_seg, blank; + size_t keylen; + bool have_meta; + int level, diff; + int slot, next_slot, free_slot, i, j; + + node = result->terminal_node.node; + level = result->terminal_node.level; + edit->segment_cache[ASSOC_ARRAY_FAN_OUT] = result->terminal_node.slot; + + pr_devel("-->%s()\n", __func__); + + /* We arrived at a node which doesn't have an onward node or shortcut + * pointer that we have to follow. This means that (a) the leaf we + * want must go here (either by insertion or replacement) or (b) we + * need to split this node and insert in one of the fragments. + */ + free_slot = -1; + + /* Firstly, we have to check the leaves in this node to see if there's + * a matching one we should replace in place. + */ + for (i = 0; i < ASSOC_ARRAY_FAN_OUT; i++) { + ptr = node->slots[i]; + if (!ptr) { + free_slot = i; + continue; + } + if (ops->compare_object(assoc_array_ptr_to_leaf(ptr), index_key)) { + pr_devel("replace in slot %d\n", i); + edit->leaf_p = &node->slots[i]; + edit->dead_leaf = node->slots[i]; + pr_devel("<--%s() = ok [replace]\n", __func__); + return true; + } + } + + /* If there is a free slot in this node then we can just insert the + * leaf here. + */ + if (free_slot >= 0) { + pr_devel("insert in free slot %d\n", free_slot); + edit->leaf_p = &node->slots[free_slot]; + edit->adjust_count_on = node; + pr_devel("<--%s() = ok [insert]\n", __func__); + return true; + } + + /* The node has no spare slots - so we're either going to have to split + * it or insert another node before it. + * + * Whatever, we're going to need at least two new nodes - so allocate + * those now. We may also need a new shortcut, but we deal with that + * when we need it. + */ + new_n0 = kzalloc(sizeof(struct assoc_array_node), GFP_KERNEL); + if (!new_n0) + return false; + edit->new_meta[0] = assoc_array_node_to_ptr(new_n0); + new_n1 = kzalloc(sizeof(struct assoc_array_node), GFP_KERNEL); + if (!new_n1) + return false; + edit->new_meta[1] = assoc_array_node_to_ptr(new_n1); + + /* We need to find out how similar the leaves are. */ + pr_devel("no spare slots\n"); + have_meta = false; + for (i = 0; i < ASSOC_ARRAY_FAN_OUT; i++) { + ptr = node->slots[i]; + if (assoc_array_ptr_is_meta(ptr)) { + edit->segment_cache[i] = 0xff; + have_meta = true; + continue; + } + base_seg = ops->get_object_key_chunk( + assoc_array_ptr_to_leaf(ptr), level); + base_seg >>= level & ASSOC_ARRAY_KEY_CHUNK_MASK; + edit->segment_cache[i] = base_seg & ASSOC_ARRAY_FAN_MASK; + } + + if (have_meta) { + pr_devel("have meta\n"); + goto split_node; + } + + /* The node contains only leaves */ + dissimilarity = 0; + base_seg = edit->segment_cache[0]; + for (i = 1; i < ASSOC_ARRAY_FAN_OUT; i++) + dissimilarity |= edit->segment_cache[i] ^ base_seg; + + pr_devel("only leaves; dissimilarity=%lx\n", dissimilarity); + + if ((dissimilarity & ASSOC_ARRAY_FAN_MASK) == 0) { + /* The old leaves all cluster in the same slot. We will need + * to insert a shortcut if the new node wants to cluster with them. + */ + if ((edit->segment_cache[ASSOC_ARRAY_FAN_OUT] ^ base_seg) == 0) + goto all_leaves_cluster_together; + + /* Otherwise we can just insert a new node ahead of the old + * one. + */ + goto present_leaves_cluster_but_not_new_leaf; + } + +split_node: + pr_devel("split node\n"); + + /* We need to split the current node; we know that the node doesn't + * simply contain a full set of leaves that cluster together (it + * contains meta pointers and/or non-clustering leaves). + * + * We need to expel at least two leaves out of a set consisting of the + * leaves in the node and the new leaf. + * + * We need a new node (n0) to replace the current one and a new node to + * take the expelled nodes (n1). + */ + edit->set[0].to = assoc_array_node_to_ptr(new_n0); + new_n0->back_pointer = node->back_pointer; + new_n0->parent_slot = node->parent_slot; + new_n1->back_pointer = assoc_array_node_to_ptr(new_n0); + new_n1->parent_slot = -1; /* Need to calculate this */ + +do_split_node: + pr_devel("do_split_node\n"); + + new_n0->nr_leaves_on_branch = node->nr_leaves_on_branch; + new_n1->nr_leaves_on_branch = 0; + + /* Begin by finding two matching leaves. There have to be at least two + * that match - even if there are meta pointers - because any leaf that + * would match a slot with a meta pointer in it must be somewhere + * behind that meta pointer and cannot be here. Further, given N + * remaining leaf slots, we now have N+1 leaves to go in them. + */ + for (i = 0; i < ASSOC_ARRAY_FAN_OUT; i++) { + slot = edit->segment_cache[i]; + if (slot != 0xff) + for (j = i + 1; j < ASSOC_ARRAY_FAN_OUT + 1; j++) + if (edit->segment_cache[j] == slot) + goto found_slot_for_multiple_occupancy; + } +found_slot_for_multiple_occupancy: + pr_devel("same slot: %x %x [%02x]\n", i, j, slot); + BUG_ON(i >= ASSOC_ARRAY_FAN_OUT); + BUG_ON(j >= ASSOC_ARRAY_FAN_OUT + 1); + BUG_ON(slot >= ASSOC_ARRAY_FAN_OUT); + + new_n1->parent_slot = slot; + + /* Metadata pointers cannot change slot */ + for (i = 0; i < ASSOC_ARRAY_FAN_OUT; i++) + if (assoc_array_ptr_is_meta(node->slots[i])) + new_n0->slots[i] = node->slots[i]; + else + new_n0->slots[i] = NULL; + BUG_ON(new_n0->slots[slot] != NULL); + new_n0->slots[slot] = assoc_array_node_to_ptr(new_n1); + + /* Filter the leaf pointers between the new nodes */ + free_slot = -1; + next_slot = 0; + for (i = 0; i < ASSOC_ARRAY_FAN_OUT; i++) { + if (assoc_array_ptr_is_meta(node->slots[i])) + continue; + if (edit->segment_cache[i] == slot) { + new_n1->slots[next_slot++] = node->slots[i]; + new_n1->nr_leaves_on_branch++; + } else { + do { + free_slot++; + } while (new_n0->slots[free_slot] != NULL); + new_n0->slots[free_slot] = node->slots[i]; + } + } + + pr_devel("filtered: f=%x n=%x\n", free_slot, next_slot); + + if (edit->segment_cache[ASSOC_ARRAY_FAN_OUT] != slot) { + do { + free_slot++; + } while (new_n0->slots[free_slot] != NULL); + edit->leaf_p = &new_n0->slots[free_slot]; + edit->adjust_count_on = new_n0; + } else { + edit->leaf_p = &new_n1->slots[next_slot++]; + edit->adjust_count_on = new_n1; + } + + BUG_ON(next_slot <= 1); + + edit->set_backpointers_to = assoc_array_node_to_ptr(new_n0); + for (i = 0; i < ASSOC_ARRAY_FAN_OUT; i++) { + if (edit->segment_cache[i] == 0xff) { + ptr = node->slots[i]; + BUG_ON(assoc_array_ptr_is_leaf(ptr)); + if (assoc_array_ptr_is_node(ptr)) { + side = assoc_array_ptr_to_node(ptr); + edit->set_backpointers[i] = &side->back_pointer; + } else { + shortcut = assoc_array_ptr_to_shortcut(ptr); + edit->set_backpointers[i] = &shortcut->back_pointer; + } + } + } + + ptr = node->back_pointer; + if (!ptr) + edit->set[0].ptr = &edit->array->root; + else if (assoc_array_ptr_is_node(ptr)) + edit->set[0].ptr = &assoc_array_ptr_to_node(ptr)->slots[node->parent_slot]; + else + edit->set[0].ptr = &assoc_array_ptr_to_shortcut(ptr)->next_node; + edit->excised_meta[0] = assoc_array_node_to_ptr(node); + pr_devel("<--%s() = ok [split node]\n", __func__); + return true; + +present_leaves_cluster_but_not_new_leaf: + /* All the old leaves cluster in the same slot, but the new leaf wants + * to go into a different slot, so we create a new node to hold the new + * leaf and a pointer to a new node holding all the old leaves. + */ + pr_devel("present leaves cluster but not new leaf\n"); + + new_n0->back_pointer = node->back_pointer; + new_n0->parent_slot = node->parent_slot; + new_n0->nr_leaves_on_branch = node->nr_leaves_on_branch; + new_n1->back_pointer = assoc_array_node_to_ptr(new_n0); + new_n1->parent_slot = edit->segment_cache[0]; + new_n1->nr_leaves_on_branch = node->nr_leaves_on_branch; + edit->adjust_count_on = new_n0; + + for (i = 0; i < ASSOC_ARRAY_FAN_OUT; i++) + new_n1->slots[i] = node->slots[i]; + + new_n0->slots[edit->segment_cache[0]] = assoc_array_node_to_ptr(new_n0); + edit->leaf_p = &new_n0->slots[edit->segment_cache[ASSOC_ARRAY_FAN_OUT]]; + + edit->set[0].ptr = &assoc_array_ptr_to_node(node->back_pointer)->slots[node->parent_slot]; + edit->set[0].to = assoc_array_node_to_ptr(new_n0); + edit->excised_meta[0] = assoc_array_node_to_ptr(node); + pr_devel("<--%s() = ok [insert node before]\n", __func__); + return true; + +all_leaves_cluster_together: + /* All the leaves, new and old, want to cluster together in this node + * in the same slot, so we have to replace this node with a shortcut to + * skip over the identical parts of the key and then place a pair of + * nodes, one inside the other, at the end of the shortcut and + * distribute the keys between them. + * + * Firstly we need to work out where the leaves start diverging as a + * bit position into their keys so that we know how big the shortcut + * needs to be. + * + * We only need to make a single pass of N of the N+1 leaves because if + * any keys differ between themselves at bit X then at least one of + * them must also differ with the base key at bit X or before. + */ + pr_devel("all leaves cluster together\n"); + diff = INT_MAX; + for (i = 0; i < ASSOC_ARRAY_FAN_OUT; i++) { + int x = ops->diff_objects(assoc_array_ptr_to_leaf(edit->leaf), + assoc_array_ptr_to_leaf(node->slots[i])); + if (x < diff) { + BUG_ON(x < 0); + diff = x; + } + } + BUG_ON(diff == INT_MAX); + BUG_ON(diff < level + ASSOC_ARRAY_LEVEL_STEP); + + keylen = round_up(diff, ASSOC_ARRAY_KEY_CHUNK_SIZE); + keylen >>= ASSOC_ARRAY_KEY_CHUNK_SHIFT; + + new_s0 = kzalloc(sizeof(struct assoc_array_shortcut) + + keylen * sizeof(unsigned long), GFP_KERNEL); + if (!new_s0) + return false; + edit->new_meta[2] = assoc_array_shortcut_to_ptr(new_s0); + + edit->set[0].to = assoc_array_shortcut_to_ptr(new_s0); + new_s0->back_pointer = node->back_pointer; + new_s0->parent_slot = node->parent_slot; + new_s0->next_node = assoc_array_node_to_ptr(new_n0); + new_n0->back_pointer = assoc_array_shortcut_to_ptr(new_s0); + new_n0->parent_slot = 0; + new_n1->back_pointer = assoc_array_node_to_ptr(new_n0); + new_n1->parent_slot = -1; /* Need to calculate this */ + + new_s0->skip_to_level = level = diff & ~ASSOC_ARRAY_LEVEL_STEP_MASK; + pr_devel("skip_to_level = %d [diff %d]\n", level, diff); + BUG_ON(level <= 0); + + for (i = 0; i < keylen; i++) + new_s0->index_key[i] = + ops->get_key_chunk(index_key, i * ASSOC_ARRAY_KEY_CHUNK_SIZE); + + blank = ULONG_MAX << (level & ASSOC_ARRAY_KEY_CHUNK_MASK); + pr_devel("blank off [%zu] %d: %lx\n", keylen - 1, level, blank); + new_s0->index_key[keylen - 1] &= ~blank; + + /* This now reduces to a node splitting exercise for which we'll need + * to regenerate the disparity table. + */ + for (i = 0; i < ASSOC_ARRAY_FAN_OUT; i++) { + ptr = node->slots[i]; + base_seg = ops->get_object_key_chunk(assoc_array_ptr_to_leaf(ptr), + level); + base_seg >>= level & ASSOC_ARRAY_KEY_CHUNK_MASK; + edit->segment_cache[i] = base_seg & ASSOC_ARRAY_FAN_MASK; + } + + base_seg = ops->get_key_chunk(index_key, level); + base_seg >>= level & ASSOC_ARRAY_KEY_CHUNK_MASK; + edit->segment_cache[ASSOC_ARRAY_FAN_OUT] = base_seg & ASSOC_ARRAY_FAN_MASK; + goto do_split_node; +} + +/* + * Handle insertion into the middle of a shortcut. + */ +static bool assoc_array_insert_mid_shortcut(struct assoc_array_edit *edit, + const struct assoc_array_ops *ops, + struct assoc_array_walk_result *result) +{ + struct assoc_array_shortcut *shortcut, *new_s0, *new_s1; + struct assoc_array_node *node, *new_n0, *side; + unsigned long sc_segments, dissimilarity, blank; + size_t keylen; + int level, sc_level, diff; + int sc_slot; + + shortcut = result->wrong_shortcut.shortcut; + level = result->wrong_shortcut.level; + sc_level = result->wrong_shortcut.sc_level; + sc_segments = result->wrong_shortcut.sc_segments; + dissimilarity = result->wrong_shortcut.dissimilarity; + + pr_devel("-->%s(ix=%d dis=%lx scix=%d)\n", + __func__, level, dissimilarity, sc_level); + + /* We need to split a shortcut and insert a node between the two + * pieces. Zero-length pieces will be dispensed with entirely. + * + * First of all, we need to find out in which level the first + * difference was. + */ + diff = __ffs(dissimilarity); + diff &= ~ASSOC_ARRAY_LEVEL_STEP_MASK; + diff += sc_level & ~ASSOC_ARRAY_KEY_CHUNK_MASK; + pr_devel("diff=%d\n", diff); + + if (!shortcut->back_pointer) { + edit->set[0].ptr = &edit->array->root; + } else if (assoc_array_ptr_is_node(shortcut->back_pointer)) { + node = assoc_array_ptr_to_node(shortcut->back_pointer); + edit->set[0].ptr = &node->slots[shortcut->parent_slot]; + } else { + BUG(); + } + + edit->excised_meta[0] = assoc_array_shortcut_to_ptr(shortcut); + + /* Create a new node now since we're going to need it anyway */ + new_n0 = kzalloc(sizeof(struct assoc_array_node), GFP_KERNEL); + if (!new_n0) + return false; + edit->new_meta[0] = assoc_array_node_to_ptr(new_n0); + edit->adjust_count_on = new_n0; + + /* Insert a new shortcut before the new node if this segment isn't of + * zero length - otherwise we just connect the new node directly to the + * parent. + */ + level += ASSOC_ARRAY_LEVEL_STEP; + if (diff > level) { + pr_devel("pre-shortcut %d...%d\n", level, diff); + keylen = round_up(diff, ASSOC_ARRAY_KEY_CHUNK_SIZE); + keylen >>= ASSOC_ARRAY_KEY_CHUNK_SHIFT; + + new_s0 = kzalloc(sizeof(struct assoc_array_shortcut) + + keylen * sizeof(unsigned long), GFP_KERNEL); + if (!new_s0) + return false; + edit->new_meta[1] = assoc_array_shortcut_to_ptr(new_s0); + edit->set[0].to = assoc_array_shortcut_to_ptr(new_s0); + new_s0->back_pointer = shortcut->back_pointer; + new_s0->parent_slot = shortcut->parent_slot; + new_s0->next_node = assoc_array_node_to_ptr(new_n0); + new_s0->skip_to_level = diff; + + new_n0->back_pointer = assoc_array_shortcut_to_ptr(new_s0); + new_n0->parent_slot = 0; + + memcpy(new_s0->index_key, shortcut->index_key, + keylen * sizeof(unsigned long)); + + blank = ULONG_MAX << (diff & ASSOC_ARRAY_KEY_CHUNK_MASK); + pr_devel("blank off [%zu] %d: %lx\n", keylen - 1, diff, blank); + new_s0->index_key[keylen - 1] &= ~blank; + } else { + pr_devel("no pre-shortcut\n"); + edit->set[0].to = assoc_array_node_to_ptr(new_n0); + new_n0->back_pointer = shortcut->back_pointer; + new_n0->parent_slot = shortcut->parent_slot; + } + + side = assoc_array_ptr_to_node(shortcut->next_node); + new_n0->nr_leaves_on_branch = side->nr_leaves_on_branch; + + /* We need to know which slot in the new node is going to take a + * metadata pointer. + */ + sc_slot = sc_segments >> (diff & ASSOC_ARRAY_KEY_CHUNK_MASK); + sc_slot &= ASSOC_ARRAY_FAN_MASK; + + pr_devel("new slot %lx >> %d -> %d\n", + sc_segments, diff & ASSOC_ARRAY_KEY_CHUNK_MASK, sc_slot); + + /* Determine whether we need to follow the new node with a replacement + * for the current shortcut. We could in theory reuse the current + * shortcut if its parent slot number doesn't change - but that's a + * 1-in-16 chance so not worth expending the code upon. + */ + level = diff + ASSOC_ARRAY_LEVEL_STEP; + if (level < shortcut->skip_to_level) { + pr_devel("post-shortcut %d...%d\n", level, shortcut->skip_to_level); + keylen = round_up(shortcut->skip_to_level, ASSOC_ARRAY_KEY_CHUNK_SIZE); + keylen >>= ASSOC_ARRAY_KEY_CHUNK_SHIFT; + + new_s1 = kzalloc(sizeof(struct assoc_array_shortcut) + + keylen * sizeof(unsigned long), GFP_KERNEL); + if (!new_s1) + return false; + edit->new_meta[2] = assoc_array_shortcut_to_ptr(new_s1); + + new_s1->back_pointer = assoc_array_node_to_ptr(new_n0); + new_s1->parent_slot = sc_slot; + new_s1->next_node = shortcut->next_node; + new_s1->skip_to_level = shortcut->skip_to_level; + + new_n0->slots[sc_slot] = assoc_array_shortcut_to_ptr(new_s1); + + memcpy(new_s1->index_key, shortcut->index_key, + keylen * sizeof(unsigned long)); + + edit->set[1].ptr = &side->back_pointer; + edit->set[1].to = assoc_array_shortcut_to_ptr(new_s1); + } else { + pr_devel("no post-shortcut\n"); + + /* We don't have to replace the pointed-to node as long as we + * use memory barriers to make sure the parent slot number is + * changed before the back pointer (the parent slot number is + * irrelevant to the old parent shortcut). + */ + new_n0->slots[sc_slot] = shortcut->next_node; + edit->set_parent_slot[0].p = &side->parent_slot; + edit->set_parent_slot[0].to = sc_slot; + edit->set[1].ptr = &side->back_pointer; + edit->set[1].to = assoc_array_node_to_ptr(new_n0); + } + + /* Install the new leaf in a spare slot in the new node. */ + if (sc_slot == 0) + edit->leaf_p = &new_n0->slots[1]; + else + edit->leaf_p = &new_n0->slots[0]; + + pr_devel("<--%s() = ok [split shortcut]\n", __func__); + return edit; +} + +/** + * assoc_array_insert - Script insertion of an object into an associative array + * @array: The array to insert into. + * @ops: The operations to use. + * @index_key: The key to insert at. + * @object: The object to insert. + * + * Precalculate and preallocate a script for the insertion or replacement of an + * object in an associative array. This results in an edit script that can + * either be applied or cancelled. + * + * The function returns a pointer to an edit script or -ENOMEM. + * + * The caller should lock against other modifications and must continue to hold + * the lock until assoc_array_apply_edit() has been called. + * + * Accesses to the tree may take place concurrently with this function, + * provided they hold the RCU read lock. + */ +struct assoc_array_edit *assoc_array_insert(struct assoc_array *array, + const struct assoc_array_ops *ops, + const void *index_key, + void *object) +{ + struct assoc_array_walk_result result; + struct assoc_array_edit *edit; + + pr_devel("-->%s()\n", __func__); + + /* The leaf pointer we're given must not have the bottom bit set as we + * use those for type-marking the pointer. NULL pointers are also not + * allowed as they indicate an empty slot but we have to allow them + * here as they can be updated later. + */ + BUG_ON(assoc_array_ptr_is_meta(object)); + + edit = kzalloc(sizeof(struct assoc_array_edit), GFP_KERNEL); + if (!edit) + return ERR_PTR(-ENOMEM); + edit->array = array; + edit->ops = ops; + edit->leaf = assoc_array_leaf_to_ptr(object); + edit->adjust_count_by = 1; + + switch (assoc_array_walk(array, ops, index_key, &result)) { + case assoc_array_walk_tree_empty: + /* Allocate a root node if there isn't one yet */ + if (!assoc_array_insert_in_empty_tree(edit)) + goto enomem; + return edit; + + case assoc_array_walk_found_terminal_node: + /* We found a node that doesn't have a node/shortcut pointer in + * the slot corresponding to the index key that we have to + * follow. + */ + if (!assoc_array_insert_into_terminal_node(edit, ops, index_key, + &result)) + goto enomem; + return edit; + + case assoc_array_walk_found_wrong_shortcut: + /* We found a shortcut that didn't match our key in a slot we + * needed to follow. + */ + if (!assoc_array_insert_mid_shortcut(edit, ops, &result)) + goto enomem; + return edit; + } + +enomem: + /* Clean up after an out of memory error */ + pr_devel("enomem\n"); + assoc_array_cancel_edit(edit); + return ERR_PTR(-ENOMEM); +} + +/** + * assoc_array_insert_set_object - Set the new object pointer in an edit script + * @edit: The edit script to modify. + * @object: The object pointer to set. + * + * Change the object to be inserted in an edit script. The object pointed to + * by the old object is not freed. This must be done prior to applying the + * script. + */ +void assoc_array_insert_set_object(struct assoc_array_edit *edit, void *object) +{ + BUG_ON(!object); + edit->leaf = assoc_array_leaf_to_ptr(object); +} + +struct assoc_array_delete_collapse_context { + struct assoc_array_node *node; + const void *skip_leaf; + int slot; +}; + +/* + * Subtree collapse to node iterator. + */ +static int assoc_array_delete_collapse_iterator(const void *leaf, + void *iterator_data) +{ + struct assoc_array_delete_collapse_context *collapse = iterator_data; + + if (leaf == collapse->skip_leaf) + return 0; + + BUG_ON(collapse->slot >= ASSOC_ARRAY_FAN_OUT); + + collapse->node->slots[collapse->slot++] = assoc_array_leaf_to_ptr(leaf); + return 0; +} + +/** + * assoc_array_delete - Script deletion of an object from an associative array + * @array: The array to search. + * @ops: The operations to use. + * @index_key: The key to the object. + * + * Precalculate and preallocate a script for the deletion of an object from an + * associative array. This results in an edit script that can either be + * applied or cancelled. + * + * The function returns a pointer to an edit script if the object was found, + * NULL if the object was not found or -ENOMEM. + * + * The caller should lock against other modifications and must continue to hold + * the lock until assoc_array_apply_edit() has been called. + * + * Accesses to the tree may take place concurrently with this function, + * provided they hold the RCU read lock. + */ +struct assoc_array_edit *assoc_array_delete(struct assoc_array *array, + const struct assoc_array_ops *ops, + const void *index_key) +{ + struct assoc_array_delete_collapse_context collapse; + struct assoc_array_walk_result result; + struct assoc_array_node *node, *new_n0; + struct assoc_array_edit *edit; + struct assoc_array_ptr *ptr; + bool has_meta; + int slot, i; + + pr_devel("-->%s()\n", __func__); + + edit = kzalloc(sizeof(struct assoc_array_edit), GFP_KERNEL); + if (!edit) + return ERR_PTR(-ENOMEM); + edit->array = array; + edit->ops = ops; + edit->adjust_count_by = -1; + + switch (assoc_array_walk(array, ops, index_key, &result)) { + case assoc_array_walk_found_terminal_node: + /* We found a node that should contain the leaf we've been + * asked to remove - *if* it's in the tree. + */ + pr_devel("terminal_node\n"); + node = result.terminal_node.node; + + for (slot = 0; slot < ASSOC_ARRAY_FAN_OUT; slot++) { + ptr = node->slots[slot]; + if (ptr && + assoc_array_ptr_is_leaf(ptr) && + ops->compare_object(assoc_array_ptr_to_leaf(ptr), + index_key)) + goto found_leaf; + } + case assoc_array_walk_tree_empty: + case assoc_array_walk_found_wrong_shortcut: + default: + assoc_array_cancel_edit(edit); + pr_devel("not found\n"); + return NULL; + } + +found_leaf: + BUG_ON(array->nr_leaves_on_tree <= 0); + + /* In the simplest form of deletion we just clear the slot and release + * the leaf after a suitable interval. + */ + edit->dead_leaf = node->slots[slot]; + edit->set[0].ptr = &node->slots[slot]; + edit->set[0].to = NULL; + edit->adjust_count_on = node; + + /* If that concludes erasure of the last leaf, then delete the entire + * internal array. + */ + if (array->nr_leaves_on_tree == 1) { + edit->set[1].ptr = &array->root; + edit->set[1].to = NULL; + edit->adjust_count_on = NULL; + edit->excised_subtree = array->root; + pr_devel("all gone\n"); + return edit; + } + + /* However, we'd also like to clear up some metadata blocks if we + * possibly can. + * + * We go for a simple algorithm of: if this node has FAN_OUT or fewer + * leaves in it, then attempt to collapse it - and attempt to + * recursively collapse up the tree. + * + * We could also try and collapse in partially filled subtrees to take + * up space in this node. + */ + if (node->nr_leaves_on_branch <= ASSOC_ARRAY_FAN_OUT + 1) { + struct assoc_array_node *parent, *grandparent; + struct assoc_array_ptr *ptr; + + /* First of all, we need to know if this node has metadata so + * that we don't try collapsing if all the leaves are already + * here. + */ + has_meta = false; + for (i = 0; i < ASSOC_ARRAY_FAN_OUT; i++) { + ptr = node->slots[i]; + if (assoc_array_ptr_is_meta(ptr)) { + has_meta = true; + break; + } + } + + pr_devel("leaves: %ld [m=%d]\n", + node->nr_leaves_on_branch - 1, has_meta); + + /* Look further up the tree to see if we can collapse this node + * into a more proximal node too. + */ + parent = node; + collapse_up: + pr_devel("collapse subtree: %ld\n", parent->nr_leaves_on_branch); + + ptr = parent->back_pointer; + if (!ptr) + goto do_collapse; + if (assoc_array_ptr_is_shortcut(ptr)) { + struct assoc_array_shortcut *s = assoc_array_ptr_to_shortcut(ptr); + ptr = s->back_pointer; + if (!ptr) + goto do_collapse; + } + + grandparent = assoc_array_ptr_to_node(ptr); + if (grandparent->nr_leaves_on_branch <= ASSOC_ARRAY_FAN_OUT + 1) { + parent = grandparent; + goto collapse_up; + } + + do_collapse: + /* There's no point collapsing if the original node has no meta + * pointers to discard and if we didn't merge into one of that + * node's ancestry. + */ + if (has_meta || parent != node) { + node = parent; + + /* Create a new node to collapse into */ + new_n0 = kzalloc(sizeof(struct assoc_array_node), GFP_KERNEL); + if (!new_n0) + goto enomem; + edit->new_meta[0] = assoc_array_node_to_ptr(new_n0); + + new_n0->back_pointer = node->back_pointer; + new_n0->parent_slot = node->parent_slot; + new_n0->nr_leaves_on_branch = node->nr_leaves_on_branch; + edit->adjust_count_on = new_n0; + + collapse.node = new_n0; + collapse.skip_leaf = assoc_array_ptr_to_leaf(edit->dead_leaf); + collapse.slot = 0; + assoc_array_subtree_iterate(assoc_array_node_to_ptr(node), + node->back_pointer, + assoc_array_delete_collapse_iterator, + &collapse); + pr_devel("collapsed %d,%lu\n", collapse.slot, new_n0->nr_leaves_on_branch); + BUG_ON(collapse.slot != new_n0->nr_leaves_on_branch - 1); + + if (!node->back_pointer) { + edit->set[1].ptr = &array->root; + } else if (assoc_array_ptr_is_leaf(node->back_pointer)) { + BUG(); + } else if (assoc_array_ptr_is_node(node->back_pointer)) { + struct assoc_array_node *p = + assoc_array_ptr_to_node(node->back_pointer); + edit->set[1].ptr = &p->slots[node->parent_slot]; + } else if (assoc_array_ptr_is_shortcut(node->back_pointer)) { + struct assoc_array_shortcut *s = + assoc_array_ptr_to_shortcut(node->back_pointer); + edit->set[1].ptr = &s->next_node; + } + edit->set[1].to = assoc_array_node_to_ptr(new_n0); + edit->excised_subtree = assoc_array_node_to_ptr(node); + } + } + + return edit; + +enomem: + /* Clean up after an out of memory error */ + pr_devel("enomem\n"); + assoc_array_cancel_edit(edit); + return ERR_PTR(-ENOMEM); +} + +/** + * assoc_array_clear - Script deletion of all objects from an associative array + * @array: The array to clear. + * @ops: The operations to use. + * + * Precalculate and preallocate a script for the deletion of all the objects + * from an associative array. This results in an edit script that can either + * be applied or cancelled. + * + * The function returns a pointer to an edit script if there are objects to be + * deleted, NULL if there are no objects in the array or -ENOMEM. + * + * The caller should lock against other modifications and must continue to hold + * the lock until assoc_array_apply_edit() has been called. + * + * Accesses to the tree may take place concurrently with this function, + * provided they hold the RCU read lock. + */ +struct assoc_array_edit *assoc_array_clear(struct assoc_array *array, + const struct assoc_array_ops *ops) +{ + struct assoc_array_edit *edit; + + pr_devel("-->%s()\n", __func__); + + if (!array->root) + return NULL; + + edit = kzalloc(sizeof(struct assoc_array_edit), GFP_KERNEL); + if (!edit) + return ERR_PTR(-ENOMEM); + edit->array = array; + edit->ops = ops; + edit->set[1].ptr = &array->root; + edit->set[1].to = NULL; + edit->excised_subtree = array->root; + edit->ops_for_excised_subtree = ops; + pr_devel("all gone\n"); + return edit; +} + +/* + * Handle the deferred destruction after an applied edit. + */ +static void assoc_array_rcu_cleanup(struct rcu_head *head) +{ + struct assoc_array_edit *edit = + container_of(head, struct assoc_array_edit, rcu); + int i; + + pr_devel("-->%s()\n", __func__); + + if (edit->dead_leaf) + edit->ops->free_object(assoc_array_ptr_to_leaf(edit->dead_leaf)); + for (i = 0; i < ARRAY_SIZE(edit->excised_meta); i++) + if (edit->excised_meta[i]) + kfree(assoc_array_ptr_to_node(edit->excised_meta[i])); + + if (edit->excised_subtree) { + BUG_ON(assoc_array_ptr_is_leaf(edit->excised_subtree)); + if (assoc_array_ptr_is_node(edit->excised_subtree)) { + struct assoc_array_node *n = + assoc_array_ptr_to_node(edit->excised_subtree); + n->back_pointer = NULL; + } else { + struct assoc_array_shortcut *s = + assoc_array_ptr_to_shortcut(edit->excised_subtree); + s->back_pointer = NULL; + } + assoc_array_destroy_subtree(edit->excised_subtree, + edit->ops_for_excised_subtree); + } + + kfree(edit); +} + +/** + * assoc_array_apply_edit - Apply an edit script to an associative array + * @edit: The script to apply. + * + * Apply an edit script to an associative array to effect an insertion, + * deletion or clearance. As the edit script includes preallocated memory, + * this is guaranteed not to fail. + * + * The edit script, dead objects and dead metadata will be scheduled for + * destruction after an RCU grace period to permit those doing read-only + * accesses on the array to continue to do so under the RCU read lock whilst + * the edit is taking place. + */ +void assoc_array_apply_edit(struct assoc_array_edit *edit) +{ + struct assoc_array_shortcut *shortcut; + struct assoc_array_node *node; + struct assoc_array_ptr *ptr; + int i; + + pr_devel("-->%s()\n", __func__); + + smp_wmb(); + if (edit->leaf_p) + *edit->leaf_p = edit->leaf; + + smp_wmb(); + for (i = 0; i < ARRAY_SIZE(edit->set_parent_slot); i++) + if (edit->set_parent_slot[i].p) + *edit->set_parent_slot[i].p = edit->set_parent_slot[i].to; + + smp_wmb(); + for (i = 0; i < ARRAY_SIZE(edit->set_backpointers); i++) + if (edit->set_backpointers[i]) + *edit->set_backpointers[i] = edit->set_backpointers_to; + + smp_wmb(); + for (i = 0; i < ARRAY_SIZE(edit->set); i++) + if (edit->set[i].ptr) + *edit->set[i].ptr = edit->set[i].to; + + if (edit->array->root == NULL) { + edit->array->nr_leaves_on_tree = 0; + } else if (edit->adjust_count_on) { + node = edit->adjust_count_on; + for (;;) { + node->nr_leaves_on_branch += edit->adjust_count_by; + + ptr = node->back_pointer; + if (!ptr) + break; + if (assoc_array_ptr_is_shortcut(ptr)) { + shortcut = assoc_array_ptr_to_shortcut(ptr); + ptr = shortcut->back_pointer; + if (!ptr) + break; + } + BUG_ON(!assoc_array_ptr_is_node(ptr)); + node = assoc_array_ptr_to_node(ptr); + } + + edit->array->nr_leaves_on_tree += edit->adjust_count_by; + } + + call_rcu(&edit->rcu, assoc_array_rcu_cleanup); +} + +/** + * assoc_array_cancel_edit - Discard an edit script. + * @edit: The script to discard. + * + * Free an edit script and all the preallocated data it holds without making + * any changes to the associative array it was intended for. + * + * NOTE! In the case of an insertion script, this does _not_ release the leaf + * that was to be inserted. That is left to the caller. + */ +void assoc_array_cancel_edit(struct assoc_array_edit *edit) +{ + struct assoc_array_ptr *ptr; + int i; + + pr_devel("-->%s()\n", __func__); + + /* Clean up after an out of memory error */ + for (i = 0; i < ARRAY_SIZE(edit->new_meta); i++) { + ptr = edit->new_meta[i]; + if (ptr) { + if (assoc_array_ptr_is_node(ptr)) + kfree(assoc_array_ptr_to_node(ptr)); + else + kfree(assoc_array_ptr_to_shortcut(ptr)); + } + } + kfree(edit); +} + +/** + * assoc_array_gc - Garbage collect an associative array. + * @array: The array to clean. + * @ops: The operations to use. + * @iterator: A callback function to pass judgement on each object. + * @iterator_data: Private data for the callback function. + * + * Collect garbage from an associative array and pack down the internal tree to + * save memory. + * + * The iterator function is asked to pass judgement upon each object in the + * array. If it returns false, the object is discard and if it returns true, + * the object is kept. If it returns true, it must increment the object's + * usage count (or whatever it needs to do to retain it) before returning. + * + * This function returns 0 if successful or -ENOMEM if out of memory. In the + * latter case, the array is not changed. + * + * The caller should lock against other modifications and must continue to hold + * the lock until assoc_array_apply_edit() has been called. + * + * Accesses to the tree may take place concurrently with this function, + * provided they hold the RCU read lock. + */ +int assoc_array_gc(struct assoc_array *array, + const struct assoc_array_ops *ops, + bool (*iterator)(void *object, void *iterator_data), + void *iterator_data) +{ + struct assoc_array_shortcut *shortcut, *new_s; + struct assoc_array_node *node, *new_n; + struct assoc_array_edit *edit; + struct assoc_array_ptr *cursor, *ptr; + struct assoc_array_ptr *new_root, *new_parent, **new_ptr_pp; + unsigned long nr_leaves_on_tree; + int keylen, slot, nr_free, next_slot, i; + + pr_devel("-->%s()\n", __func__); + + if (!array->root) + return 0; + + edit = kzalloc(sizeof(struct assoc_array_edit), GFP_KERNEL); + if (!edit) + return -ENOMEM; + edit->array = array; + edit->ops = ops; + edit->ops_for_excised_subtree = ops; + edit->set[0].ptr = &array->root; + edit->excised_subtree = array->root; + + new_root = new_parent = NULL; + new_ptr_pp = &new_root; + cursor = array->root; + +descend: + /* If this point is a shortcut, then we need to duplicate it and + * advance the target cursor. + */ + if (assoc_array_ptr_is_shortcut(cursor)) { + shortcut = assoc_array_ptr_to_shortcut(cursor); + keylen = round_up(shortcut->skip_to_level, ASSOC_ARRAY_KEY_CHUNK_SIZE); + keylen >>= ASSOC_ARRAY_KEY_CHUNK_SHIFT; + new_s = kmalloc(sizeof(struct assoc_array_shortcut) + + keylen * sizeof(unsigned long), GFP_KERNEL); + if (!new_s) + goto enomem; + pr_devel("dup shortcut %p -> %p\n", shortcut, new_s); + memcpy(new_s, shortcut, (sizeof(struct assoc_array_shortcut) + + keylen * sizeof(unsigned long))); + new_s->back_pointer = new_parent; + new_s->parent_slot = shortcut->parent_slot; + *new_ptr_pp = new_parent = assoc_array_shortcut_to_ptr(new_s); + new_ptr_pp = &new_s->next_node; + cursor = shortcut->next_node; + } + + /* Duplicate the node at this position */ + node = assoc_array_ptr_to_node(cursor); + new_n = kzalloc(sizeof(struct assoc_array_node), GFP_KERNEL); + if (!new_n) + goto enomem; + pr_devel("dup node %p -> %p\n", node, new_n); + new_n->back_pointer = new_parent; + new_n->parent_slot = node->parent_slot; + *new_ptr_pp = new_parent = assoc_array_node_to_ptr(new_n); + new_ptr_pp = NULL; + slot = 0; + +continue_node: + /* Filter across any leaves and gc any subtrees */ + for (; slot < ASSOC_ARRAY_FAN_OUT; slot++) { + ptr = node->slots[slot]; + if (!ptr) + continue; + + if (assoc_array_ptr_is_leaf(ptr)) { + if (iterator(assoc_array_ptr_to_leaf(ptr), + iterator_data)) + /* The iterator will have done any reference + * counting on the object for us. + */ + new_n->slots[slot] = ptr; + continue; + } + + new_ptr_pp = &new_n->slots[slot]; + cursor = ptr; + goto descend; + } + + pr_devel("-- compress node %p --\n", new_n); + + /* Count up the number of empty slots in this node and work out the + * subtree leaf count. + */ + new_n->nr_leaves_on_branch = 0; + nr_free = 0; + for (slot = 0; slot < ASSOC_ARRAY_FAN_OUT; slot++) { + ptr = new_n->slots[slot]; + if (!ptr) + nr_free++; + else if (assoc_array_ptr_is_leaf(ptr)) + new_n->nr_leaves_on_branch++; + } + pr_devel("free=%d, leaves=%lu\n", nr_free, new_n->nr_leaves_on_branch); + + /* See what we can fold in */ + next_slot = 0; + for (slot = 0; slot < ASSOC_ARRAY_FAN_OUT; slot++) { + struct assoc_array_shortcut *s; + struct assoc_array_node *child; + + ptr = new_n->slots[slot]; + if (!ptr || assoc_array_ptr_is_leaf(ptr)) + continue; + + s = NULL; + if (assoc_array_ptr_is_shortcut(ptr)) { + s = assoc_array_ptr_to_shortcut(ptr); + ptr = s->next_node; + } + + child = assoc_array_ptr_to_node(ptr); + new_n->nr_leaves_on_branch += child->nr_leaves_on_branch; + + if (child->nr_leaves_on_branch <= nr_free + 1) { + /* Fold the child node into this one */ + pr_devel("[%d] fold node %lu/%d [nx %d]\n", + slot, child->nr_leaves_on_branch, nr_free + 1, + next_slot); + + /* We would already have reaped an intervening shortcut + * on the way back up the tree. + */ + BUG_ON(s); + + new_n->slots[slot] = NULL; + nr_free++; + if (slot < next_slot) + next_slot = slot; + for (i = 0; i < ASSOC_ARRAY_FAN_OUT; i++) { + struct assoc_array_ptr *p = child->slots[i]; + if (!p) + continue; + BUG_ON(assoc_array_ptr_is_meta(p)); + while (new_n->slots[next_slot]) + next_slot++; + BUG_ON(next_slot >= ASSOC_ARRAY_FAN_OUT); + new_n->slots[next_slot++] = p; + nr_free--; + } + kfree(child); + } else { + pr_devel("[%d] retain node %lu/%d [nx %d]\n", + slot, child->nr_leaves_on_branch, nr_free + 1, + next_slot); + } + } + + pr_devel("after: %lu\n", new_n->nr_leaves_on_branch); + + nr_leaves_on_tree = new_n->nr_leaves_on_branch; + + /* Excise this node if it is singly occupied by a shortcut */ + if (nr_free == ASSOC_ARRAY_FAN_OUT - 1) { + for (slot = 0; slot < ASSOC_ARRAY_FAN_OUT; slot++) + if ((ptr = new_n->slots[slot])) + break; + + if (assoc_array_ptr_is_meta(ptr) && + assoc_array_ptr_is_shortcut(ptr)) { + pr_devel("excise node %p with 1 shortcut\n", new_n); + new_s = assoc_array_ptr_to_shortcut(ptr); + new_parent = new_n->back_pointer; + slot = new_n->parent_slot; + kfree(new_n); + if (!new_parent) { + new_s->back_pointer = NULL; + new_s->parent_slot = 0; + new_root = ptr; + goto gc_complete; + } + + if (assoc_array_ptr_is_shortcut(new_parent)) { + /* We can discard any preceding shortcut also */ + struct assoc_array_shortcut *s = + assoc_array_ptr_to_shortcut(new_parent); + + pr_devel("excise preceding shortcut\n"); + + new_parent = new_s->back_pointer = s->back_pointer; + slot = new_s->parent_slot = s->parent_slot; + kfree(s); + if (!new_parent) { + new_s->back_pointer = NULL; + new_s->parent_slot = 0; + new_root = ptr; + goto gc_complete; + } + } + + new_s->back_pointer = new_parent; + new_s->parent_slot = slot; + new_n = assoc_array_ptr_to_node(new_parent); + new_n->slots[slot] = ptr; + goto ascend_old_tree; + } + } + + /* Excise any shortcuts we might encounter that point to nodes that + * only contain leaves. + */ + ptr = new_n->back_pointer; + if (!ptr) + goto gc_complete; + + if (assoc_array_ptr_is_shortcut(ptr)) { + new_s = assoc_array_ptr_to_shortcut(ptr); + new_parent = new_s->back_pointer; + slot = new_s->parent_slot; + + if (new_n->nr_leaves_on_branch <= ASSOC_ARRAY_FAN_OUT) { + struct assoc_array_node *n; + + pr_devel("excise shortcut\n"); + new_n->back_pointer = new_parent; + new_n->parent_slot = slot; + kfree(new_s); + if (!new_parent) { + new_root = assoc_array_node_to_ptr(new_n); + goto gc_complete; + } + + n = assoc_array_ptr_to_node(new_parent); + n->slots[slot] = assoc_array_node_to_ptr(new_n); + } + } else { + new_parent = ptr; + } + new_n = assoc_array_ptr_to_node(new_parent); + +ascend_old_tree: + ptr = node->back_pointer; + if (assoc_array_ptr_is_shortcut(ptr)) { + shortcut = assoc_array_ptr_to_shortcut(ptr); + slot = shortcut->parent_slot; + cursor = shortcut->back_pointer; + } else { + slot = node->parent_slot; + cursor = ptr; + } + BUG_ON(!ptr); + node = assoc_array_ptr_to_node(cursor); + slot++; + goto continue_node; + +gc_complete: + edit->set[0].to = new_root; + assoc_array_apply_edit(edit); + edit->array->nr_leaves_on_tree = nr_leaves_on_tree; + return 0; + +enomem: + pr_devel("enomem\n"); + assoc_array_destroy_subtree(new_root, edit->ops); + kfree(edit); + return -ENOMEM; +} diff --git a/lib/mpi/mpiutil.c b/lib/mpi/mpiutil.c index 657979f71be..bf076d281d4 100644 --- a/lib/mpi/mpiutil.c +++ b/lib/mpi/mpiutil.c @@ -121,3 +121,6 @@ void mpi_free(MPI a) kfree(a); } EXPORT_SYMBOL_GPL(mpi_free); + +MODULE_DESCRIPTION("Multiprecision maths library"); +MODULE_LICENSE("GPL"); diff --git a/lib/random32.c b/lib/random32.c index 82da4f4c348..1e5b2df4429 100644 --- a/lib/random32.c +++ b/lib/random32.c @@ -214,18 +214,22 @@ static DEFINE_TIMER(seed_timer, __prandom_timer, 0, 0); static void __prandom_timer(unsigned long dontcare) { u32 entropy; + unsigned long expires; get_random_bytes(&entropy, sizeof(entropy)); prandom_seed(entropy); + /* reseed every ~60 seconds, in [40 .. 80) interval with slack */ - seed_timer.expires = jiffies + (40 * HZ + (prandom_u32() % (40 * HZ))); + expires = 40 + (prandom_u32() % 40); + seed_timer.expires = jiffies + msecs_to_jiffies(expires * MSEC_PER_SEC); + add_timer(&seed_timer); } -static void prandom_start_seed_timer(void) +static void __init __prandom_start_seed_timer(void) { set_timer_slack(&seed_timer, HZ); - seed_timer.expires = jiffies + 40 * HZ; + seed_timer.expires = jiffies + msecs_to_jiffies(40 * MSEC_PER_SEC); add_timer(&seed_timer); } @@ -270,7 +274,7 @@ void prandom_reseed_late(void) static int __init prandom_reseed(void) { __prandom_reseed(false); - prandom_start_seed_timer(); + __prandom_start_seed_timer(); return 0; } late_initcall(prandom_reseed); diff --git a/mm/hugetlb.c b/mm/hugetlb.c index 7d57af21f49..dee6cf4e6d3 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -476,40 +476,6 @@ static int vma_has_reserves(struct vm_area_struct *vma, long chg) return 0; } -static void copy_gigantic_page(struct page *dst, struct page *src) -{ - int i; - struct hstate *h = page_hstate(src); - struct page *dst_base = dst; - struct page *src_base = src; - - for (i = 0; i < pages_per_huge_page(h); ) { - cond_resched(); - copy_highpage(dst, src); - - i++; - dst = mem_map_next(dst, dst_base, i); - src = mem_map_next(src, src_base, i); - } -} - -void copy_huge_page(struct page *dst, struct page *src) -{ - int i; - struct hstate *h = page_hstate(src); - - if (unlikely(pages_per_huge_page(h) > MAX_ORDER_NR_PAGES)) { - copy_gigantic_page(dst, src); - return; - } - - might_sleep(); - for (i = 0; i < pages_per_huge_page(h); i++) { - cond_resched(); - copy_highpage(dst + i, src + i); - } -} - static void enqueue_huge_page(struct hstate *h, struct page *page) { int nid = page_to_nid(page); @@ -736,6 +702,23 @@ int PageHuge(struct page *page) } EXPORT_SYMBOL_GPL(PageHuge); +/* + * PageHeadHuge() only returns true for hugetlbfs head page, but not for + * normal or transparent huge pages. + */ +int PageHeadHuge(struct page *page_head) +{ + compound_page_dtor *dtor; + + if (!PageHead(page_head)) + return 0; + + dtor = get_compound_page_dtor(page_head); + + return dtor == free_huge_page; +} +EXPORT_SYMBOL_GPL(PageHeadHuge); + pgoff_t __basepage_index(struct page *page) { struct page *page_head = compound_head(page); diff --git a/mm/memory.c b/mm/memory.c index 0409e8f43fa..5d9025f3b3e 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -4272,13 +4272,6 @@ void copy_user_huge_page(struct page *dst, struct page *src, #endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLBFS */ #if USE_SPLIT_PTE_PTLOCKS && BLOATED_SPINLOCKS -static struct kmem_cache *page_ptl_cachep; -void __init ptlock_cache_init(void) -{ - page_ptl_cachep = kmem_cache_create("page->ptl", sizeof(spinlock_t), 0, - SLAB_PANIC, NULL); -} - bool ptlock_alloc(struct page *page) { spinlock_t *ptl; diff --git a/mm/mempolicy.c b/mm/mempolicy.c index c4403cdf343..eca4a312912 100644 --- a/mm/mempolicy.c +++ b/mm/mempolicy.c @@ -2950,7 +2950,7 @@ void mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol) return; } - p += snprintf(p, maxlen, policy_modes[mode]); + p += snprintf(p, maxlen, "%s", policy_modes[mode]); if (flags & MPOL_MODE_FLAGS) { p += snprintf(p, buffer + maxlen - p, "="); diff --git a/mm/migrate.c b/mm/migrate.c index 316e720a202..bb940045fe8 100644 --- a/mm/migrate.c +++ b/mm/migrate.c @@ -442,6 +442,54 @@ int migrate_huge_page_move_mapping(struct address_space *mapping, } /* + * Gigantic pages are so large that we do not guarantee that page++ pointer + * arithmetic will work across the entire page. We need something more + * specialized. + */ +static void __copy_gigantic_page(struct page *dst, struct page *src, + int nr_pages) +{ + int i; + struct page *dst_base = dst; + struct page *src_base = src; + + for (i = 0; i < nr_pages; ) { + cond_resched(); + copy_highpage(dst, src); + + i++; + dst = mem_map_next(dst, dst_base, i); + src = mem_map_next(src, src_base, i); + } +} + +static void copy_huge_page(struct page *dst, struct page *src) +{ + int i; + int nr_pages; + + if (PageHuge(src)) { + /* hugetlbfs page */ + struct hstate *h = page_hstate(src); + nr_pages = pages_per_huge_page(h); + + if (unlikely(nr_pages > MAX_ORDER_NR_PAGES)) { + __copy_gigantic_page(dst, src, nr_pages); + return; + } + } else { + /* thp page */ + BUG_ON(!PageTransHuge(src)); + nr_pages = hpage_nr_pages(src); + } + + for (i = 0; i < nr_pages; i++) { + cond_resched(); + copy_highpage(dst + i, src + i); + } +} + +/* * Copy the page to its new location */ void migrate_page_copy(struct page *newpage, struct page *page) diff --git a/mm/slab.c b/mm/slab.c index 0c8967bb201..eb043bf05f4 100644 --- a/mm/slab.c +++ b/mm/slab.c @@ -164,72 +164,6 @@ static bool pfmemalloc_active __read_mostly; /* - * kmem_bufctl_t: - * - * Bufctl's are used for linking objs within a slab - * linked offsets. - * - * This implementation relies on "struct page" for locating the cache & - * slab an object belongs to. - * This allows the bufctl structure to be small (one int), but limits - * the number of objects a slab (not a cache) can contain when off-slab - * bufctls are used. The limit is the size of the largest general cache - * that does not use off-slab slabs. - * For 32bit archs with 4 kB pages, is this 56. - * This is not serious, as it is only for large objects, when it is unwise - * to have too many per slab. - * Note: This limit can be raised by introducing a general cache whose size - * is less than 512 (PAGE_SIZE<<3), but greater than 256. - */ - -typedef unsigned int kmem_bufctl_t; -#define BUFCTL_END (((kmem_bufctl_t)(~0U))-0) -#define BUFCTL_FREE (((kmem_bufctl_t)(~0U))-1) -#define BUFCTL_ACTIVE (((kmem_bufctl_t)(~0U))-2) -#define SLAB_LIMIT (((kmem_bufctl_t)(~0U))-3) - -/* - * struct slab_rcu - * - * slab_destroy on a SLAB_DESTROY_BY_RCU cache uses this structure to - * arrange for kmem_freepages to be called via RCU. This is useful if - * we need to approach a kernel structure obliquely, from its address - * obtained without the usual locking. We can lock the structure to - * stabilize it and check it's still at the given address, only if we - * can be sure that the memory has not been meanwhile reused for some - * other kind of object (which our subsystem's lock might corrupt). - * - * rcu_read_lock before reading the address, then rcu_read_unlock after - * taking the spinlock within the structure expected at that address. - */ -struct slab_rcu { - struct rcu_head head; - struct kmem_cache *cachep; - void *addr; -}; - -/* - * struct slab - * - * Manages the objs in a slab. Placed either at the beginning of mem allocated - * for a slab, or allocated from an general cache. - * Slabs are chained into three list: fully used, partial, fully free slabs. - */ -struct slab { - union { - struct { - struct list_head list; - unsigned long colouroff; - void *s_mem; /* including colour offset */ - unsigned int inuse; /* num of objs active in slab */ - kmem_bufctl_t free; - unsigned short nodeid; - }; - struct slab_rcu __slab_cover_slab_rcu; - }; -}; - -/* * struct array_cache * * Purpose: @@ -456,18 +390,10 @@ static inline struct kmem_cache *virt_to_cache(const void *obj) return page->slab_cache; } -static inline struct slab *virt_to_slab(const void *obj) -{ - struct page *page = virt_to_head_page(obj); - - VM_BUG_ON(!PageSlab(page)); - return page->slab_page; -} - -static inline void *index_to_obj(struct kmem_cache *cache, struct slab *slab, +static inline void *index_to_obj(struct kmem_cache *cache, struct page *page, unsigned int idx) { - return slab->s_mem + cache->size * idx; + return page->s_mem + cache->size * idx; } /* @@ -477,9 +403,9 @@ static inline void *index_to_obj(struct kmem_cache *cache, struct slab *slab, * reciprocal_divide(offset, cache->reciprocal_buffer_size) */ static inline unsigned int obj_to_index(const struct kmem_cache *cache, - const struct slab *slab, void *obj) + const struct page *page, void *obj) { - u32 offset = (obj - slab->s_mem); + u32 offset = (obj - page->s_mem); return reciprocal_divide(offset, cache->reciprocal_buffer_size); } @@ -641,7 +567,7 @@ static inline struct array_cache *cpu_cache_get(struct kmem_cache *cachep) static size_t slab_mgmt_size(size_t nr_objs, size_t align) { - return ALIGN(sizeof(struct slab)+nr_objs*sizeof(kmem_bufctl_t), align); + return ALIGN(nr_objs * sizeof(unsigned int), align); } /* @@ -660,8 +586,7 @@ static void cache_estimate(unsigned long gfporder, size_t buffer_size, * on it. For the latter case, the memory allocated for a * slab is used for: * - * - The struct slab - * - One kmem_bufctl_t for each object + * - One unsigned int for each object * - Padding to respect alignment of @align * - @buffer_size bytes for each object * @@ -674,8 +599,6 @@ static void cache_estimate(unsigned long gfporder, size_t buffer_size, mgmt_size = 0; nr_objs = slab_size / buffer_size; - if (nr_objs > SLAB_LIMIT) - nr_objs = SLAB_LIMIT; } else { /* * Ignore padding for the initial guess. The padding @@ -685,8 +608,7 @@ static void cache_estimate(unsigned long gfporder, size_t buffer_size, * into the memory allocation when taking the padding * into account. */ - nr_objs = (slab_size - sizeof(struct slab)) / - (buffer_size + sizeof(kmem_bufctl_t)); + nr_objs = (slab_size) / (buffer_size + sizeof(unsigned int)); /* * This calculated number will be either the right @@ -696,9 +618,6 @@ static void cache_estimate(unsigned long gfporder, size_t buffer_size, > slab_size) nr_objs--; - if (nr_objs > SLAB_LIMIT) - nr_objs = SLAB_LIMIT; - mgmt_size = slab_mgmt_size(nr_objs, align); } *num = nr_objs; @@ -829,10 +748,8 @@ static struct array_cache *alloc_arraycache(int node, int entries, return nc; } -static inline bool is_slab_pfmemalloc(struct slab *slabp) +static inline bool is_slab_pfmemalloc(struct page *page) { - struct page *page = virt_to_page(slabp->s_mem); - return PageSlabPfmemalloc(page); } @@ -841,23 +758,23 @@ static void recheck_pfmemalloc_active(struct kmem_cache *cachep, struct array_cache *ac) { struct kmem_cache_node *n = cachep->node[numa_mem_id()]; - struct slab *slabp; + struct page *page; unsigned long flags; if (!pfmemalloc_active) return; spin_lock_irqsave(&n->list_lock, flags); - list_for_each_entry(slabp, &n->slabs_full, list) - if (is_slab_pfmemalloc(slabp)) + list_for_each_entry(page, &n->slabs_full, lru) + if (is_slab_pfmemalloc(page)) goto out; - list_for_each_entry(slabp, &n->slabs_partial, list) - if (is_slab_pfmemalloc(slabp)) + list_for_each_entry(page, &n->slabs_partial, lru) + if (is_slab_pfmemalloc(page)) goto out; - list_for_each_entry(slabp, &n->slabs_free, list) - if (is_slab_pfmemalloc(slabp)) + list_for_each_entry(page, &n->slabs_free, lru) + if (is_slab_pfmemalloc(page)) goto out; pfmemalloc_active = false; @@ -897,8 +814,8 @@ static void *__ac_get_obj(struct kmem_cache *cachep, struct array_cache *ac, */ n = cachep->node[numa_mem_id()]; if (!list_empty(&n->slabs_free) && force_refill) { - struct slab *slabp = virt_to_slab(objp); - ClearPageSlabPfmemalloc(virt_to_head_page(slabp->s_mem)); + struct page *page = virt_to_head_page(objp); + ClearPageSlabPfmemalloc(page); clear_obj_pfmemalloc(&objp); recheck_pfmemalloc_active(cachep, ac); return objp; @@ -1099,8 +1016,7 @@ static void drain_alien_cache(struct kmem_cache *cachep, static inline int cache_free_alien(struct kmem_cache *cachep, void *objp) { - struct slab *slabp = virt_to_slab(objp); - int nodeid = slabp->nodeid; + int nodeid = page_to_nid(virt_to_page(objp)); struct kmem_cache_node *n; struct array_cache *alien = NULL; int node; @@ -1111,7 +1027,7 @@ static inline int cache_free_alien(struct kmem_cache *cachep, void *objp) * Make sure we are not freeing a object from another node to the array * cache on this cpu. */ - if (likely(slabp->nodeid == node)) + if (likely(nodeid == node)) return 0; n = cachep->node[node]; @@ -1512,6 +1428,8 @@ void __init kmem_cache_init(void) { int i; + BUILD_BUG_ON(sizeof(((struct page *)NULL)->lru) < + sizeof(struct rcu_head)); kmem_cache = &kmem_cache_boot; setup_node_pointer(kmem_cache); @@ -1687,7 +1605,7 @@ static noinline void slab_out_of_memory(struct kmem_cache *cachep, gfp_t gfpflags, int nodeid) { struct kmem_cache_node *n; - struct slab *slabp; + struct page *page; unsigned long flags; int node; @@ -1706,15 +1624,15 @@ slab_out_of_memory(struct kmem_cache *cachep, gfp_t gfpflags, int nodeid) continue; spin_lock_irqsave(&n->list_lock, flags); - list_for_each_entry(slabp, &n->slabs_full, list) { + list_for_each_entry(page, &n->slabs_full, lru) { active_objs += cachep->num; active_slabs++; } - list_for_each_entry(slabp, &n->slabs_partial, list) { - active_objs += slabp->inuse; + list_for_each_entry(page, &n->slabs_partial, lru) { + active_objs += page->active; active_slabs++; } - list_for_each_entry(slabp, &n->slabs_free, list) + list_for_each_entry(page, &n->slabs_free, lru) num_slabs++; free_objects += n->free_objects; @@ -1736,19 +1654,11 @@ slab_out_of_memory(struct kmem_cache *cachep, gfp_t gfpflags, int nodeid) * did not request dmaable memory, we might get it, but that * would be relatively rare and ignorable. */ -static void *kmem_getpages(struct kmem_cache *cachep, gfp_t flags, int nodeid) +static struct page *kmem_getpages(struct kmem_cache *cachep, gfp_t flags, + int nodeid) { struct page *page; int nr_pages; - int i; - -#ifndef CONFIG_MMU - /* - * Nommu uses slab's for process anonymous memory allocations, and thus - * requires __GFP_COMP to properly refcount higher order allocations - */ - flags |= __GFP_COMP; -#endif flags |= cachep->allocflags; if (cachep->flags & SLAB_RECLAIM_ACCOUNT) @@ -1772,12 +1682,9 @@ static void *kmem_getpages(struct kmem_cache *cachep, gfp_t flags, int nodeid) else add_zone_page_state(page_zone(page), NR_SLAB_UNRECLAIMABLE, nr_pages); - for (i = 0; i < nr_pages; i++) { - __SetPageSlab(page + i); - - if (page->pfmemalloc) - SetPageSlabPfmemalloc(page + i); - } + __SetPageSlab(page); + if (page->pfmemalloc) + SetPageSlabPfmemalloc(page); memcg_bind_pages(cachep, cachep->gfporder); if (kmemcheck_enabled && !(cachep->flags & SLAB_NOTRACK)) { @@ -1789,17 +1696,15 @@ static void *kmem_getpages(struct kmem_cache *cachep, gfp_t flags, int nodeid) kmemcheck_mark_unallocated_pages(page, nr_pages); } - return page_address(page); + return page; } /* * Interface to system's page release. */ -static void kmem_freepages(struct kmem_cache *cachep, void *addr) +static void kmem_freepages(struct kmem_cache *cachep, struct page *page) { - unsigned long i = (1 << cachep->gfporder); - struct page *page = virt_to_page(addr); - const unsigned long nr_freed = i; + const unsigned long nr_freed = (1 << cachep->gfporder); kmemcheck_free_shadow(page, cachep->gfporder); @@ -1809,27 +1714,28 @@ static void kmem_freepages(struct kmem_cache *cachep, void *addr) else sub_zone_page_state(page_zone(page), NR_SLAB_UNRECLAIMABLE, nr_freed); - while (i--) { - BUG_ON(!PageSlab(page)); - __ClearPageSlabPfmemalloc(page); - __ClearPageSlab(page); - page++; - } + + BUG_ON(!PageSlab(page)); + __ClearPageSlabPfmemalloc(page); + __ClearPageSlab(page); + page_mapcount_reset(page); + page->mapping = NULL; memcg_release_pages(cachep, cachep->gfporder); if (current->reclaim_state) current->reclaim_state->reclaimed_slab += nr_freed; - free_memcg_kmem_pages((unsigned long)addr, cachep->gfporder); + __free_memcg_kmem_pages(page, cachep->gfporder); } static void kmem_rcu_free(struct rcu_head *head) { - struct slab_rcu *slab_rcu = (struct slab_rcu *)head; - struct kmem_cache *cachep = slab_rcu->cachep; + struct kmem_cache *cachep; + struct page *page; - kmem_freepages(cachep, slab_rcu->addr); - if (OFF_SLAB(cachep)) - kmem_cache_free(cachep->slabp_cache, slab_rcu); + page = container_of(head, struct page, rcu_head); + cachep = page->slab_cache; + + kmem_freepages(cachep, page); } #if DEBUG @@ -1978,19 +1884,19 @@ static void check_poison_obj(struct kmem_cache *cachep, void *objp) /* Print some data about the neighboring objects, if they * exist: */ - struct slab *slabp = virt_to_slab(objp); + struct page *page = virt_to_head_page(objp); unsigned int objnr; - objnr = obj_to_index(cachep, slabp, objp); + objnr = obj_to_index(cachep, page, objp); if (objnr) { - objp = index_to_obj(cachep, slabp, objnr - 1); + objp = index_to_obj(cachep, page, objnr - 1); realobj = (char *)objp + obj_offset(cachep); printk(KERN_ERR "Prev obj: start=%p, len=%d\n", realobj, size); print_objinfo(cachep, objp, 2); } if (objnr + 1 < cachep->num) { - objp = index_to_obj(cachep, slabp, objnr + 1); + objp = index_to_obj(cachep, page, objnr + 1); realobj = (char *)objp + obj_offset(cachep); printk(KERN_ERR "Next obj: start=%p, len=%d\n", realobj, size); @@ -2001,11 +1907,12 @@ static void check_poison_obj(struct kmem_cache *cachep, void *objp) #endif #if DEBUG -static void slab_destroy_debugcheck(struct kmem_cache *cachep, struct slab *slabp) +static void slab_destroy_debugcheck(struct kmem_cache *cachep, + struct page *page) { int i; for (i = 0; i < cachep->num; i++) { - void *objp = index_to_obj(cachep, slabp, i); + void *objp = index_to_obj(cachep, page, i); if (cachep->flags & SLAB_POISON) { #ifdef CONFIG_DEBUG_PAGEALLOC @@ -2030,7 +1937,8 @@ static void slab_destroy_debugcheck(struct kmem_cache *cachep, struct slab *slab } } #else -static void slab_destroy_debugcheck(struct kmem_cache *cachep, struct slab *slabp) +static void slab_destroy_debugcheck(struct kmem_cache *cachep, + struct page *page) { } #endif @@ -2044,23 +1952,34 @@ static void slab_destroy_debugcheck(struct kmem_cache *cachep, struct slab *slab * Before calling the slab must have been unlinked from the cache. The * cache-lock is not held/needed. */ -static void slab_destroy(struct kmem_cache *cachep, struct slab *slabp) +static void slab_destroy(struct kmem_cache *cachep, struct page *page) { - void *addr = slabp->s_mem - slabp->colouroff; + void *freelist; - slab_destroy_debugcheck(cachep, slabp); + freelist = page->freelist; + slab_destroy_debugcheck(cachep, page); if (unlikely(cachep->flags & SLAB_DESTROY_BY_RCU)) { - struct slab_rcu *slab_rcu; + struct rcu_head *head; + + /* + * RCU free overloads the RCU head over the LRU. + * slab_page has been overloeaded over the LRU, + * however it is not used from now on so that + * we can use it safely. + */ + head = (void *)&page->rcu_head; + call_rcu(head, kmem_rcu_free); - slab_rcu = (struct slab_rcu *)slabp; - slab_rcu->cachep = cachep; - slab_rcu->addr = addr; - call_rcu(&slab_rcu->head, kmem_rcu_free); } else { - kmem_freepages(cachep, addr); - if (OFF_SLAB(cachep)) - kmem_cache_free(cachep->slabp_cache, slabp); + kmem_freepages(cachep, page); } + + /* + * From now on, we don't use freelist + * although actual page can be freed in rcu context + */ + if (OFF_SLAB(cachep)) + kmem_cache_free(cachep->freelist_cache, freelist); } /** @@ -2097,8 +2016,8 @@ static size_t calculate_slab_order(struct kmem_cache *cachep, * use off-slab slabs. Needed to avoid a possible * looping condition in cache_grow(). */ - offslab_limit = size - sizeof(struct slab); - offslab_limit /= sizeof(kmem_bufctl_t); + offslab_limit = size; + offslab_limit /= sizeof(unsigned int); if (num > offslab_limit) break; @@ -2220,7 +2139,7 @@ static int __init_refok setup_cpu_cache(struct kmem_cache *cachep, gfp_t gfp) int __kmem_cache_create (struct kmem_cache *cachep, unsigned long flags) { - size_t left_over, slab_size, ralign; + size_t left_over, freelist_size, ralign; gfp_t gfp; int err; size_t size = cachep->size; @@ -2339,22 +2258,21 @@ __kmem_cache_create (struct kmem_cache *cachep, unsigned long flags) if (!cachep->num) return -E2BIG; - slab_size = ALIGN(cachep->num * sizeof(kmem_bufctl_t) - + sizeof(struct slab), cachep->align); + freelist_size = + ALIGN(cachep->num * sizeof(unsigned int), cachep->align); /* * If the slab has been placed off-slab, and we have enough space then * move it on-slab. This is at the expense of any extra colouring. */ - if (flags & CFLGS_OFF_SLAB && left_over >= slab_size) { + if (flags & CFLGS_OFF_SLAB && left_over >= freelist_size) { flags &= ~CFLGS_OFF_SLAB; - left_over -= slab_size; + left_over -= freelist_size; } if (flags & CFLGS_OFF_SLAB) { /* really off slab. No need for manual alignment */ - slab_size = - cachep->num * sizeof(kmem_bufctl_t) + sizeof(struct slab); + freelist_size = cachep->num * sizeof(unsigned int); #ifdef CONFIG_PAGE_POISONING /* If we're going to use the generic kernel_map_pages() @@ -2371,16 +2289,16 @@ __kmem_cache_create (struct kmem_cache *cachep, unsigned long flags) if (cachep->colour_off < cachep->align) cachep->colour_off = cachep->align; cachep->colour = left_over / cachep->colour_off; - cachep->slab_size = slab_size; + cachep->freelist_size = freelist_size; cachep->flags = flags; - cachep->allocflags = 0; + cachep->allocflags = __GFP_COMP; if (CONFIG_ZONE_DMA_FLAG && (flags & SLAB_CACHE_DMA)) cachep->allocflags |= GFP_DMA; cachep->size = size; cachep->reciprocal_buffer_size = reciprocal_value(size); if (flags & CFLGS_OFF_SLAB) { - cachep->slabp_cache = kmalloc_slab(slab_size, 0u); + cachep->freelist_cache = kmalloc_slab(freelist_size, 0u); /* * This is a possibility for one of the malloc_sizes caches. * But since we go off slab only for object size greater than @@ -2388,7 +2306,7 @@ __kmem_cache_create (struct kmem_cache *cachep, unsigned long flags) * this should not happen at all. * But leave a BUG_ON for some lucky dude. */ - BUG_ON(ZERO_OR_NULL_PTR(cachep->slabp_cache)); + BUG_ON(ZERO_OR_NULL_PTR(cachep->freelist_cache)); } err = setup_cpu_cache(cachep, gfp); @@ -2494,7 +2412,7 @@ static int drain_freelist(struct kmem_cache *cache, { struct list_head *p; int nr_freed; - struct slab *slabp; + struct page *page; nr_freed = 0; while (nr_freed < tofree && !list_empty(&n->slabs_free)) { @@ -2506,18 +2424,18 @@ static int drain_freelist(struct kmem_cache *cache, goto out; } - slabp = list_entry(p, struct slab, list); + page = list_entry(p, struct page, lru); #if DEBUG - BUG_ON(slabp->inuse); + BUG_ON(page->active); #endif - list_del(&slabp->list); + list_del(&page->lru); /* * Safe to drop the lock. The slab is no longer linked * to the cache. */ n->free_objects -= cache->num; spin_unlock_irq(&n->list_lock); - slab_destroy(cache, slabp); + slab_destroy(cache, page); nr_freed++; } out: @@ -2600,52 +2518,42 @@ int __kmem_cache_shutdown(struct kmem_cache *cachep) * descriptors in kmem_cache_create, we search through the malloc_sizes array. * If we are creating a malloc_sizes cache here it would not be visible to * kmem_find_general_cachep till the initialization is complete. - * Hence we cannot have slabp_cache same as the original cache. + * Hence we cannot have freelist_cache same as the original cache. */ -static struct slab *alloc_slabmgmt(struct kmem_cache *cachep, void *objp, - int colour_off, gfp_t local_flags, - int nodeid) +static void *alloc_slabmgmt(struct kmem_cache *cachep, + struct page *page, int colour_off, + gfp_t local_flags, int nodeid) { - struct slab *slabp; + void *freelist; + void *addr = page_address(page); if (OFF_SLAB(cachep)) { /* Slab management obj is off-slab. */ - slabp = kmem_cache_alloc_node(cachep->slabp_cache, + freelist = kmem_cache_alloc_node(cachep->freelist_cache, local_flags, nodeid); - /* - * If the first object in the slab is leaked (it's allocated - * but no one has a reference to it), we want to make sure - * kmemleak does not treat the ->s_mem pointer as a reference - * to the object. Otherwise we will not report the leak. - */ - kmemleak_scan_area(&slabp->list, sizeof(struct list_head), - local_flags); - if (!slabp) + if (!freelist) return NULL; } else { - slabp = objp + colour_off; - colour_off += cachep->slab_size; + freelist = addr + colour_off; + colour_off += cachep->freelist_size; } - slabp->inuse = 0; - slabp->colouroff = colour_off; - slabp->s_mem = objp + colour_off; - slabp->nodeid = nodeid; - slabp->free = 0; - return slabp; + page->active = 0; + page->s_mem = addr + colour_off; + return freelist; } -static inline kmem_bufctl_t *slab_bufctl(struct slab *slabp) +static inline unsigned int *slab_freelist(struct page *page) { - return (kmem_bufctl_t *) (slabp + 1); + return (unsigned int *)(page->freelist); } static void cache_init_objs(struct kmem_cache *cachep, - struct slab *slabp) + struct page *page) { int i; for (i = 0; i < cachep->num; i++) { - void *objp = index_to_obj(cachep, slabp, i); + void *objp = index_to_obj(cachep, page, i); #if DEBUG /* need to poison the objs? */ if (cachep->flags & SLAB_POISON) @@ -2681,9 +2589,8 @@ static void cache_init_objs(struct kmem_cache *cachep, if (cachep->ctor) cachep->ctor(objp); #endif - slab_bufctl(slabp)[i] = i + 1; + slab_freelist(page)[i] = i; } - slab_bufctl(slabp)[i - 1] = BUFCTL_END; } static void kmem_flagcheck(struct kmem_cache *cachep, gfp_t flags) @@ -2696,41 +2603,41 @@ static void kmem_flagcheck(struct kmem_cache *cachep, gfp_t flags) } } -static void *slab_get_obj(struct kmem_cache *cachep, struct slab *slabp, +static void *slab_get_obj(struct kmem_cache *cachep, struct page *page, int nodeid) { - void *objp = index_to_obj(cachep, slabp, slabp->free); - kmem_bufctl_t next; + void *objp; - slabp->inuse++; - next = slab_bufctl(slabp)[slabp->free]; + objp = index_to_obj(cachep, page, slab_freelist(page)[page->active]); + page->active++; #if DEBUG - slab_bufctl(slabp)[slabp->free] = BUFCTL_FREE; - WARN_ON(slabp->nodeid != nodeid); + WARN_ON(page_to_nid(virt_to_page(objp)) != nodeid); #endif - slabp->free = next; return objp; } -static void slab_put_obj(struct kmem_cache *cachep, struct slab *slabp, +static void slab_put_obj(struct kmem_cache *cachep, struct page *page, void *objp, int nodeid) { - unsigned int objnr = obj_to_index(cachep, slabp, objp); - + unsigned int objnr = obj_to_index(cachep, page, objp); #if DEBUG + unsigned int i; + /* Verify that the slab belongs to the intended node */ - WARN_ON(slabp->nodeid != nodeid); + WARN_ON(page_to_nid(virt_to_page(objp)) != nodeid); - if (slab_bufctl(slabp)[objnr] + 1 <= SLAB_LIMIT + 1) { - printk(KERN_ERR "slab: double free detected in cache " - "'%s', objp %p\n", cachep->name, objp); - BUG(); + /* Verify double free bug */ + for (i = page->active; i < cachep->num; i++) { + if (slab_freelist(page)[i] == objnr) { + printk(KERN_ERR "slab: double free detected in cache " + "'%s', objp %p\n", cachep->name, objp); + BUG(); + } } #endif - slab_bufctl(slabp)[objnr] = slabp->free; - slabp->free = objnr; - slabp->inuse--; + page->active--; + slab_freelist(page)[page->active] = objnr; } /* @@ -2738,23 +2645,11 @@ static void slab_put_obj(struct kmem_cache *cachep, struct slab *slabp, * for the slab allocator to be able to lookup the cache and slab of a * virtual address for kfree, ksize, and slab debugging. */ -static void slab_map_pages(struct kmem_cache *cache, struct slab *slab, - void *addr) +static void slab_map_pages(struct kmem_cache *cache, struct page *page, + void *freelist) { - int nr_pages; - struct page *page; - - page = virt_to_page(addr); - - nr_pages = 1; - if (likely(!PageCompound(page))) - nr_pages <<= cache->gfporder; - - do { - page->slab_cache = cache; - page->slab_page = slab; - page++; - } while (--nr_pages); + page->slab_cache = cache; + page->freelist = freelist; } /* @@ -2762,9 +2657,9 @@ static void slab_map_pages(struct kmem_cache *cache, struct slab *slab, * kmem_cache_alloc() when there are no active objs left in a cache. */ static int cache_grow(struct kmem_cache *cachep, - gfp_t flags, int nodeid, void *objp) + gfp_t flags, int nodeid, struct page *page) { - struct slab *slabp; + void *freelist; size_t offset; gfp_t local_flags; struct kmem_cache_node *n; @@ -2805,20 +2700,20 @@ static int cache_grow(struct kmem_cache *cachep, * Get mem for the objs. Attempt to allocate a physical page from * 'nodeid'. */ - if (!objp) - objp = kmem_getpages(cachep, local_flags, nodeid); - if (!objp) + if (!page) + page = kmem_getpages(cachep, local_flags, nodeid); + if (!page) goto failed; /* Get slab management. */ - slabp = alloc_slabmgmt(cachep, objp, offset, + freelist = alloc_slabmgmt(cachep, page, offset, local_flags & ~GFP_CONSTRAINT_MASK, nodeid); - if (!slabp) + if (!freelist) goto opps1; - slab_map_pages(cachep, slabp, objp); + slab_map_pages(cachep, page, freelist); - cache_init_objs(cachep, slabp); + cache_init_objs(cachep, page); if (local_flags & __GFP_WAIT) local_irq_disable(); @@ -2826,13 +2721,13 @@ static int cache_grow(struct kmem_cache *cachep, spin_lock(&n->list_lock); /* Make slab active. */ - list_add_tail(&slabp->list, &(n->slabs_free)); + list_add_tail(&page->lru, &(n->slabs_free)); STATS_INC_GROWN(cachep); n->free_objects += cachep->num; spin_unlock(&n->list_lock); return 1; opps1: - kmem_freepages(cachep, objp); + kmem_freepages(cachep, page); failed: if (local_flags & __GFP_WAIT) local_irq_disable(); @@ -2880,9 +2775,8 @@ static inline void verify_redzone_free(struct kmem_cache *cache, void *obj) static void *cache_free_debugcheck(struct kmem_cache *cachep, void *objp, unsigned long caller) { - struct page *page; unsigned int objnr; - struct slab *slabp; + struct page *page; BUG_ON(virt_to_cache(objp) != cachep); @@ -2890,8 +2784,6 @@ static void *cache_free_debugcheck(struct kmem_cache *cachep, void *objp, kfree_debugcheck(objp); page = virt_to_head_page(objp); - slabp = page->slab_page; - if (cachep->flags & SLAB_RED_ZONE) { verify_redzone_free(cachep, objp); *dbg_redzone1(cachep, objp) = RED_INACTIVE; @@ -2900,14 +2792,11 @@ static void *cache_free_debugcheck(struct kmem_cache *cachep, void *objp, if (cachep->flags & SLAB_STORE_USER) *dbg_userword(cachep, objp) = (void *)caller; - objnr = obj_to_index(cachep, slabp, objp); + objnr = obj_to_index(cachep, page, objp); BUG_ON(objnr >= cachep->num); - BUG_ON(objp != index_to_obj(cachep, slabp, objnr)); + BUG_ON(objp != index_to_obj(cachep, page, objnr)); -#ifdef CONFIG_DEBUG_SLAB_LEAK - slab_bufctl(slabp)[objnr] = BUFCTL_FREE; -#endif if (cachep->flags & SLAB_POISON) { #ifdef CONFIG_DEBUG_PAGEALLOC if ((cachep->size % PAGE_SIZE)==0 && OFF_SLAB(cachep)) { @@ -2924,33 +2813,9 @@ static void *cache_free_debugcheck(struct kmem_cache *cachep, void *objp, return objp; } -static void check_slabp(struct kmem_cache *cachep, struct slab *slabp) -{ - kmem_bufctl_t i; - int entries = 0; - - /* Check slab's freelist to see if this obj is there. */ - for (i = slabp->free; i != BUFCTL_END; i = slab_bufctl(slabp)[i]) { - entries++; - if (entries > cachep->num || i >= cachep->num) - goto bad; - } - if (entries != cachep->num - slabp->inuse) { -bad: - printk(KERN_ERR "slab: Internal list corruption detected in " - "cache '%s'(%d), slabp %p(%d). Tainted(%s). Hexdump:\n", - cachep->name, cachep->num, slabp, slabp->inuse, - print_tainted()); - print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 16, 1, slabp, - sizeof(*slabp) + cachep->num * sizeof(kmem_bufctl_t), - 1); - BUG(); - } -} #else #define kfree_debugcheck(x) do { } while(0) #define cache_free_debugcheck(x,objp,z) (objp) -#define check_slabp(x,y) do { } while(0) #endif static void *cache_alloc_refill(struct kmem_cache *cachep, gfp_t flags, @@ -2989,7 +2854,7 @@ retry: while (batchcount > 0) { struct list_head *entry; - struct slab *slabp; + struct page *page; /* Get slab alloc is to come from. */ entry = n->slabs_partial.next; if (entry == &n->slabs_partial) { @@ -2999,8 +2864,7 @@ retry: goto must_grow; } - slabp = list_entry(entry, struct slab, list); - check_slabp(cachep, slabp); + page = list_entry(entry, struct page, lru); check_spinlock_acquired(cachep); /* @@ -3008,24 +2872,23 @@ retry: * there must be at least one object available for * allocation. */ - BUG_ON(slabp->inuse >= cachep->num); + BUG_ON(page->active >= cachep->num); - while (slabp->inuse < cachep->num && batchcount--) { + while (page->active < cachep->num && batchcount--) { STATS_INC_ALLOCED(cachep); STATS_INC_ACTIVE(cachep); STATS_SET_HIGH(cachep); - ac_put_obj(cachep, ac, slab_get_obj(cachep, slabp, + ac_put_obj(cachep, ac, slab_get_obj(cachep, page, node)); } - check_slabp(cachep, slabp); /* move slabp to correct slabp list: */ - list_del(&slabp->list); - if (slabp->free == BUFCTL_END) - list_add(&slabp->list, &n->slabs_full); + list_del(&page->lru); + if (page->active == cachep->num) + list_add(&page->list, &n->slabs_full); else - list_add(&slabp->list, &n->slabs_partial); + list_add(&page->list, &n->slabs_partial); } must_grow: @@ -3097,16 +2960,6 @@ static void *cache_alloc_debugcheck_after(struct kmem_cache *cachep, *dbg_redzone1(cachep, objp) = RED_ACTIVE; *dbg_redzone2(cachep, objp) = RED_ACTIVE; } -#ifdef CONFIG_DEBUG_SLAB_LEAK - { - struct slab *slabp; - unsigned objnr; - - slabp = virt_to_head_page(objp)->slab_page; - objnr = (unsigned)(objp - slabp->s_mem) / cachep->size; - slab_bufctl(slabp)[objnr] = BUFCTL_ACTIVE; - } -#endif objp += obj_offset(cachep); if (cachep->ctor && cachep->flags & SLAB_POISON) cachep->ctor(objp); @@ -3248,18 +3101,20 @@ retry: * We may trigger various forms of reclaim on the allowed * set and go into memory reserves if necessary. */ + struct page *page; + if (local_flags & __GFP_WAIT) local_irq_enable(); kmem_flagcheck(cache, flags); - obj = kmem_getpages(cache, local_flags, numa_mem_id()); + page = kmem_getpages(cache, local_flags, numa_mem_id()); if (local_flags & __GFP_WAIT) local_irq_disable(); - if (obj) { + if (page) { /* * Insert into the appropriate per node queues */ - nid = page_to_nid(virt_to_page(obj)); - if (cache_grow(cache, flags, nid, obj)) { + nid = page_to_nid(page); + if (cache_grow(cache, flags, nid, page)) { obj = ____cache_alloc_node(cache, flags | GFP_THISNODE, nid); if (!obj) @@ -3288,7 +3143,7 @@ static void *____cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid) { struct list_head *entry; - struct slab *slabp; + struct page *page; struct kmem_cache_node *n; void *obj; int x; @@ -3308,26 +3163,24 @@ retry: goto must_grow; } - slabp = list_entry(entry, struct slab, list); + page = list_entry(entry, struct page, lru); check_spinlock_acquired_node(cachep, nodeid); - check_slabp(cachep, slabp); STATS_INC_NODEALLOCS(cachep); STATS_INC_ACTIVE(cachep); STATS_SET_HIGH(cachep); - BUG_ON(slabp->inuse == cachep->num); + BUG_ON(page->active == cachep->num); - obj = slab_get_obj(cachep, slabp, nodeid); - check_slabp(cachep, slabp); + obj = slab_get_obj(cachep, page, nodeid); n->free_objects--; /* move slabp to correct slabp list: */ - list_del(&slabp->list); + list_del(&page->lru); - if (slabp->free == BUFCTL_END) - list_add(&slabp->list, &n->slabs_full); + if (page->active == cachep->num) + list_add(&page->lru, &n->slabs_full); else - list_add(&slabp->list, &n->slabs_partial); + list_add(&page->lru, &n->slabs_partial); spin_unlock(&n->list_lock); goto done; @@ -3477,23 +3330,21 @@ static void free_block(struct kmem_cache *cachep, void **objpp, int nr_objects, for (i = 0; i < nr_objects; i++) { void *objp; - struct slab *slabp; + struct page *page; clear_obj_pfmemalloc(&objpp[i]); objp = objpp[i]; - slabp = virt_to_slab(objp); + page = virt_to_head_page(objp); n = cachep->node[node]; - list_del(&slabp->list); + list_del(&page->lru); check_spinlock_acquired_node(cachep, node); - check_slabp(cachep, slabp); - slab_put_obj(cachep, slabp, objp, node); + slab_put_obj(cachep, page, objp, node); STATS_DEC_ACTIVE(cachep); n->free_objects++; - check_slabp(cachep, slabp); /* fixup slab chains */ - if (slabp->inuse == 0) { + if (page->active == 0) { if (n->free_objects > n->free_limit) { n->free_objects -= cachep->num; /* No need to drop any previously held @@ -3502,16 +3353,16 @@ static void free_block(struct kmem_cache *cachep, void **objpp, int nr_objects, * a different cache, refer to comments before * alloc_slabmgmt. */ - slab_destroy(cachep, slabp); + slab_destroy(cachep, page); } else { - list_add(&slabp->list, &n->slabs_free); + list_add(&page->lru, &n->slabs_free); } } else { /* Unconditionally move a slab to the end of the * partial list on free - maximum time for the * other objects to be freed, too. */ - list_add_tail(&slabp->list, &n->slabs_partial); + list_add_tail(&page->lru, &n->slabs_partial); } } } @@ -3551,10 +3402,10 @@ free_done: p = n->slabs_free.next; while (p != &(n->slabs_free)) { - struct slab *slabp; + struct page *page; - slabp = list_entry(p, struct slab, list); - BUG_ON(slabp->inuse); + page = list_entry(p, struct page, lru); + BUG_ON(page->active); i++; p = p->next; @@ -4158,7 +4009,7 @@ out: #ifdef CONFIG_SLABINFO void get_slabinfo(struct kmem_cache *cachep, struct slabinfo *sinfo) { - struct slab *slabp; + struct page *page; unsigned long active_objs; unsigned long num_objs; unsigned long active_slabs = 0; @@ -4178,23 +4029,23 @@ void get_slabinfo(struct kmem_cache *cachep, struct slabinfo *sinfo) check_irq_on(); spin_lock_irq(&n->list_lock); - list_for_each_entry(slabp, &n->slabs_full, list) { - if (slabp->inuse != cachep->num && !error) + list_for_each_entry(page, &n->slabs_full, lru) { + if (page->active != cachep->num && !error) error = "slabs_full accounting error"; active_objs += cachep->num; active_slabs++; } - list_for_each_entry(slabp, &n->slabs_partial, list) { - if (slabp->inuse == cachep->num && !error) - error = "slabs_partial inuse accounting error"; - if (!slabp->inuse && !error) - error = "slabs_partial/inuse accounting error"; - active_objs += slabp->inuse; + list_for_each_entry(page, &n->slabs_partial, lru) { + if (page->active == cachep->num && !error) + error = "slabs_partial accounting error"; + if (!page->active && !error) + error = "slabs_partial accounting error"; + active_objs += page->active; active_slabs++; } - list_for_each_entry(slabp, &n->slabs_free, list) { - if (slabp->inuse && !error) - error = "slabs_free/inuse accounting error"; + list_for_each_entry(page, &n->slabs_free, lru) { + if (page->active && !error) + error = "slabs_free accounting error"; num_slabs++; } free_objects += n->free_objects; @@ -4346,15 +4197,27 @@ static inline int add_caller(unsigned long *n, unsigned long v) return 1; } -static void handle_slab(unsigned long *n, struct kmem_cache *c, struct slab *s) +static void handle_slab(unsigned long *n, struct kmem_cache *c, + struct page *page) { void *p; - int i; + int i, j; + if (n[0] == n[1]) return; - for (i = 0, p = s->s_mem; i < c->num; i++, p += c->size) { - if (slab_bufctl(s)[i] != BUFCTL_ACTIVE) + for (i = 0, p = page->s_mem; i < c->num; i++, p += c->size) { + bool active = true; + + for (j = page->active; j < c->num; j++) { + /* Skip freed item */ + if (slab_freelist(page)[j] == i) { + active = false; + break; + } + } + if (!active) continue; + if (!add_caller(n, (unsigned long)*dbg_userword(c, p))) return; } @@ -4379,7 +4242,7 @@ static void show_symbol(struct seq_file *m, unsigned long address) static int leaks_show(struct seq_file *m, void *p) { struct kmem_cache *cachep = list_entry(p, struct kmem_cache, list); - struct slab *slabp; + struct page *page; struct kmem_cache_node *n; const char *name; unsigned long *x = m->private; @@ -4403,10 +4266,10 @@ static int leaks_show(struct seq_file *m, void *p) check_irq_on(); spin_lock_irq(&n->list_lock); - list_for_each_entry(slabp, &n->slabs_full, list) - handle_slab(x, cachep, slabp); - list_for_each_entry(slabp, &n->slabs_partial, list) - handle_slab(x, cachep, slabp); + list_for_each_entry(page, &n->slabs_full, lru) + handle_slab(x, cachep, page); + list_for_each_entry(page, &n->slabs_partial, lru) + handle_slab(x, cachep, page); spin_unlock_irq(&n->list_lock); } name = cachep->name; diff --git a/mm/slub.c b/mm/slub.c index 7e8bd8d828b..545a170ebf9 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -155,7 +155,7 @@ static inline bool kmem_cache_has_cpu_partial(struct kmem_cache *s) /* * Maximum number of desirable partial slabs. * The existence of more partial slabs makes kmem_cache_shrink - * sort the partial list by the number of objects in the. + * sort the partial list by the number of objects in use. */ #define MAX_PARTIAL 10 @@ -933,6 +933,16 @@ static void trace(struct kmem_cache *s, struct page *page, void *object, * Hooks for other subsystems that check memory allocations. In a typical * production configuration these hooks all should produce no code at all. */ +static inline void kmalloc_large_node_hook(void *ptr, size_t size, gfp_t flags) +{ + kmemleak_alloc(ptr, size, 1, flags); +} + +static inline void kfree_hook(const void *x) +{ + kmemleak_free(x); +} + static inline int slab_pre_alloc_hook(struct kmem_cache *s, gfp_t flags) { flags &= gfp_allowed_mask; @@ -1217,8 +1227,8 @@ static unsigned long kmem_cache_flags(unsigned long object_size, /* * Enable debugging if selected on the kernel commandline. */ - if (slub_debug && (!slub_debug_slabs || - !strncmp(slub_debug_slabs, name, strlen(slub_debug_slabs)))) + if (slub_debug && (!slub_debug_slabs || (name && + !strncmp(slub_debug_slabs, name, strlen(slub_debug_slabs))))) flags |= slub_debug; return flags; @@ -1260,13 +1270,30 @@ static inline void inc_slabs_node(struct kmem_cache *s, int node, static inline void dec_slabs_node(struct kmem_cache *s, int node, int objects) {} +static inline void kmalloc_large_node_hook(void *ptr, size_t size, gfp_t flags) +{ + kmemleak_alloc(ptr, size, 1, flags); +} + +static inline void kfree_hook(const void *x) +{ + kmemleak_free(x); +} + static inline int slab_pre_alloc_hook(struct kmem_cache *s, gfp_t flags) { return 0; } static inline void slab_post_alloc_hook(struct kmem_cache *s, gfp_t flags, - void *object) {} + void *object) +{ + kmemleak_alloc_recursive(object, s->object_size, 1, s->flags, + flags & gfp_allowed_mask); +} -static inline void slab_free_hook(struct kmem_cache *s, void *x) {} +static inline void slab_free_hook(struct kmem_cache *s, void *x) +{ + kmemleak_free_recursive(x, s->flags); +} #endif /* CONFIG_SLUB_DEBUG */ @@ -2829,8 +2856,8 @@ static struct kmem_cache *kmem_cache_node; * slab on the node for this slabcache. There are no concurrent accesses * possible. * - * Note that this function only works on the kmalloc_node_cache - * when allocating for the kmalloc_node_cache. This is used for bootstrapping + * Note that this function only works on the kmem_cache_node + * when allocating for the kmem_cache_node. This is used for bootstrapping * memory on a fresh node that has no slab structures yet. */ static void early_kmem_cache_node_alloc(int node) @@ -3272,7 +3299,7 @@ static void *kmalloc_large_node(size_t size, gfp_t flags, int node) if (page) ptr = page_address(page); - kmemleak_alloc(ptr, size, 1, flags); + kmalloc_large_node_hook(ptr, size, flags); return ptr; } @@ -3336,7 +3363,7 @@ void kfree(const void *x) page = virt_to_head_page(x); if (unlikely(!PageSlab(page))) { BUG_ON(!PageCompound(page)); - kmemleak_free(x); + kfree_hook(x); __free_memcg_kmem_pages(page, compound_order(page)); return; } diff --git a/mm/swap.c b/mm/swap.c index 7a9f80d451f..84b26aaabd0 100644 --- a/mm/swap.c +++ b/mm/swap.c @@ -82,19 +82,6 @@ static void __put_compound_page(struct page *page) static void put_compound_page(struct page *page) { - /* - * hugetlbfs pages cannot be split from under us. If this is a - * hugetlbfs page, check refcount on head page and release the page if - * the refcount becomes zero. - */ - if (PageHuge(page)) { - page = compound_head(page); - if (put_page_testzero(page)) - __put_compound_page(page); - - return; - } - if (unlikely(PageTail(page))) { /* __split_huge_page_refcount can run under us */ struct page *page_head = compound_trans_head(page); @@ -111,14 +98,31 @@ static void put_compound_page(struct page *page) * still hot on arches that do not support * this_cpu_cmpxchg_double(). */ - if (PageSlab(page_head)) { - if (PageTail(page)) { + if (PageSlab(page_head) || PageHeadHuge(page_head)) { + if (likely(PageTail(page))) { + /* + * __split_huge_page_refcount + * cannot race here. + */ + VM_BUG_ON(!PageHead(page_head)); + atomic_dec(&page->_mapcount); if (put_page_testzero(page_head)) VM_BUG_ON(1); - - atomic_dec(&page->_mapcount); - goto skip_lock_tail; + if (put_page_testzero(page_head)) + __put_compound_page(page_head); + return; } else + /* + * __split_huge_page_refcount + * run before us, "page" was a + * THP tail. The split + * page_head has been freed + * and reallocated as slab or + * hugetlbfs page of smaller + * order (only possible if + * reallocated as slab on + * x86). + */ goto skip_lock; } /* @@ -132,8 +136,27 @@ static void put_compound_page(struct page *page) /* __split_huge_page_refcount run before us */ compound_unlock_irqrestore(page_head, flags); skip_lock: - if (put_page_testzero(page_head)) - __put_single_page(page_head); + if (put_page_testzero(page_head)) { + /* + * The head page may have been + * freed and reallocated as a + * compound page of smaller + * order and then freed again. + * All we know is that it + * cannot have become: a THP + * page, a compound page of + * higher order, a tail page. + * That is because we still + * hold the refcount of the + * split THP tail and + * page_head was the THP head + * before the split. + */ + if (PageHead(page_head)) + __put_compound_page(page_head); + else + __put_single_page(page_head); + } out_put_single: if (put_page_testzero(page)) __put_single_page(page); @@ -155,7 +178,6 @@ out_put_single: VM_BUG_ON(atomic_read(&page->_count) != 0); compound_unlock_irqrestore(page_head, flags); -skip_lock_tail: if (put_page_testzero(page_head)) { if (PageHead(page_head)) __put_compound_page(page_head); @@ -198,51 +220,52 @@ bool __get_page_tail(struct page *page) * proper PT lock that already serializes against * split_huge_page(). */ + unsigned long flags; bool got = false; - struct page *page_head; - - /* - * If this is a hugetlbfs page it cannot be split under us. Simply - * increment refcount for the head page. - */ - if (PageHuge(page)) { - page_head = compound_head(page); - atomic_inc(&page_head->_count); - got = true; - } else { - unsigned long flags; + struct page *page_head = compound_trans_head(page); - page_head = compound_trans_head(page); - if (likely(page != page_head && - get_page_unless_zero(page_head))) { - - /* Ref to put_compound_page() comment. */ - if (PageSlab(page_head)) { - if (likely(PageTail(page))) { - __get_page_tail_foll(page, false); - return true; - } else { - put_page(page_head); - return false; - } - } - - /* - * page_head wasn't a dangling pointer but it - * may not be a head page anymore by the time - * we obtain the lock. That is ok as long as it - * can't be freed from under us. - */ - flags = compound_lock_irqsave(page_head); - /* here __split_huge_page_refcount won't run anymore */ + if (likely(page != page_head && get_page_unless_zero(page_head))) { + /* Ref to put_compound_page() comment. */ + if (PageSlab(page_head) || PageHeadHuge(page_head)) { if (likely(PageTail(page))) { + /* + * This is a hugetlbfs page or a slab + * page. __split_huge_page_refcount + * cannot race here. + */ + VM_BUG_ON(!PageHead(page_head)); __get_page_tail_foll(page, false); - got = true; - } - compound_unlock_irqrestore(page_head, flags); - if (unlikely(!got)) + return true; + } else { + /* + * __split_huge_page_refcount run + * before us, "page" was a THP + * tail. The split page_head has been + * freed and reallocated as slab or + * hugetlbfs page of smaller order + * (only possible if reallocated as + * slab on x86). + */ put_page(page_head); + return false; + } + } + + /* + * page_head wasn't a dangling pointer but it + * may not be a head page anymore by the time + * we obtain the lock. That is ok as long as it + * can't be freed from under us. + */ + flags = compound_lock_irqsave(page_head); + /* here __split_huge_page_refcount won't run anymore */ + if (likely(PageTail(page))) { + __get_page_tail_foll(page, false); + got = true; } + compound_unlock_irqrestore(page_head, flags); + if (unlikely(!got)) + put_page(page_head); } return got; } diff --git a/net/Kconfig b/net/Kconfig index 0715db64a5c..d334678c0bd 100644 --- a/net/Kconfig +++ b/net/Kconfig @@ -224,7 +224,7 @@ source "net/hsr/Kconfig" config RPS boolean - depends on SMP && SYSFS && USE_GENERIC_SMP_HELPERS + depends on SMP && SYSFS default y config RFS_ACCEL @@ -235,7 +235,7 @@ config RFS_ACCEL config XPS boolean - depends on SMP && USE_GENERIC_SMP_HELPERS + depends on SMP default y config NETPRIO_CGROUP diff --git a/net/bridge/br_if.c b/net/bridge/br_if.c index c41d5fbb91d..6e6194fcd88 100644 --- a/net/bridge/br_if.c +++ b/net/bridge/br_if.c @@ -172,6 +172,7 @@ void br_dev_delete(struct net_device *dev, struct list_head *head) del_nbp(p); } + br_vlan_flush(br); del_timer_sync(&br->gc_timer); br_sysfs_delbr(br->dev); diff --git a/net/bridge/br_vlan.c b/net/bridge/br_vlan.c index 53f0990eab5..af5ebd18d70 100644 --- a/net/bridge/br_vlan.c +++ b/net/bridge/br_vlan.c @@ -34,7 +34,6 @@ static void __vlan_add_flags(struct net_port_vlans *v, u16 vid, u16 flags) static int __vlan_add(struct net_port_vlans *v, u16 vid, u16 flags) { - const struct net_device_ops *ops; struct net_bridge_port *p = NULL; struct net_bridge *br; struct net_device *dev; @@ -53,17 +52,15 @@ static int __vlan_add(struct net_port_vlans *v, u16 vid, u16 flags) br = v->parent.br; dev = br->dev; } - ops = dev->netdev_ops; - if (p && (dev->features & NETIF_F_HW_VLAN_CTAG_FILTER)) { + if (p) { /* Add VLAN to the device filter if it is supported. * Stricly speaking, this is not necessary now, since * devices are made promiscuous by the bridge, but if * that ever changes this code will allow tagged * traffic to enter the bridge. */ - err = ops->ndo_vlan_rx_add_vid(dev, htons(ETH_P_8021Q), - vid); + err = vlan_vid_add(dev, htons(ETH_P_8021Q), vid); if (err) return err; } @@ -82,8 +79,8 @@ static int __vlan_add(struct net_port_vlans *v, u16 vid, u16 flags) return 0; out_filt: - if (p && (dev->features & NETIF_F_HW_VLAN_CTAG_FILTER)) - ops->ndo_vlan_rx_kill_vid(dev, htons(ETH_P_8021Q), vid); + if (p) + vlan_vid_del(dev, htons(ETH_P_8021Q), vid); return err; } @@ -95,13 +92,8 @@ static int __vlan_del(struct net_port_vlans *v, u16 vid) __vlan_delete_pvid(v, vid); clear_bit(vid, v->untagged_bitmap); - if (v->port_idx) { - struct net_device *dev = v->parent.port->dev; - const struct net_device_ops *ops = dev->netdev_ops; - - if (dev->features & NETIF_F_HW_VLAN_CTAG_FILTER) - ops->ndo_vlan_rx_kill_vid(dev, htons(ETH_P_8021Q), vid); - } + if (v->port_idx) + vlan_vid_del(v->parent.port->dev, htons(ETH_P_8021Q), vid); clear_bit(vid, v->vlan_bitmap); v->num_vlans--; @@ -398,6 +390,7 @@ int nbp_vlan_delete(struct net_bridge_port *port, u16 vid) void nbp_vlan_flush(struct net_bridge_port *port) { struct net_port_vlans *pv; + u16 vid; ASSERT_RTNL(); @@ -405,6 +398,9 @@ void nbp_vlan_flush(struct net_bridge_port *port) if (!pv) return; + for_each_set_bit(vid, pv->vlan_bitmap, VLAN_N_VID) + vlan_vid_del(port->dev, htons(ETH_P_8021Q), vid); + __vlan_flush(pv); } diff --git a/net/core/dev.c b/net/core/dev.c index 8ffc52e01ec..7e00a7342ee 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -131,6 +131,7 @@ #include <linux/static_key.h> #include <linux/hashtable.h> #include <linux/vmalloc.h> +#include <linux/if_macvlan.h> #include "net-sysfs.h" @@ -1424,6 +1425,10 @@ void dev_disable_lro(struct net_device *dev) if (is_vlan_dev(dev)) dev = vlan_dev_real_dev(dev); + /* the same for macvlan devices */ + if (netif_is_macvlan(dev)) + dev = macvlan_dev_real_dev(dev); + dev->wanted_features &= ~NETIF_F_LRO; netdev_update_features(dev); @@ -1690,13 +1695,9 @@ int dev_forward_skb(struct net_device *dev, struct sk_buff *skb) kfree_skb(skb); return NET_RX_DROP; } - skb->protocol = eth_type_trans(skb, dev); - /* eth_type_trans() can set pkt_type. - * call skb_scrub_packet() after it to clear pkt_type _after_ calling - * eth_type_trans(). - */ skb_scrub_packet(skb, true); + skb->protocol = eth_type_trans(skb, dev); return netif_rx(skb); } diff --git a/net/core/drop_monitor.c b/net/core/drop_monitor.c index 5e78d44333b..95897183226 100644 --- a/net/core/drop_monitor.c +++ b/net/core/drop_monitor.c @@ -106,6 +106,10 @@ static struct sk_buff *reset_per_cpu_data(struct per_cpu_dm_data *data) return skb; } +static struct genl_multicast_group dropmon_mcgrps[] = { + { .name = "events", }, +}; + static void send_dm_alert(struct work_struct *work) { struct sk_buff *skb; @@ -116,7 +120,8 @@ static void send_dm_alert(struct work_struct *work) skb = reset_per_cpu_data(data); if (skb) - genlmsg_multicast(skb, 0, NET_DM_GRP_ALERT, GFP_KERNEL); + genlmsg_multicast(&net_drop_monitor_family, skb, 0, + 0, GFP_KERNEL); } /* @@ -333,7 +338,7 @@ out: return NOTIFY_DONE; } -static struct genl_ops dropmon_ops[] = { +static const struct genl_ops dropmon_ops[] = { { .cmd = NET_DM_CMD_CONFIG, .doit = net_dm_cmd_config, @@ -364,13 +369,13 @@ static int __init init_net_drop_monitor(void) return -ENOSPC; } - rc = genl_register_family_with_ops(&net_drop_monitor_family, - dropmon_ops, - ARRAY_SIZE(dropmon_ops)); + rc = genl_register_family_with_ops_groups(&net_drop_monitor_family, + dropmon_ops, dropmon_mcgrps); if (rc) { pr_err("Could not create drop monitor netlink family\n"); return rc; } + WARN_ON(net_drop_monitor_family.mcgrp_offset != NET_DM_GRP_ALERT); rc = register_netdevice_notifier(&dropmon_net_notifier); if (rc < 0) { diff --git a/net/hsr/hsr_netlink.c b/net/hsr/hsr_netlink.c index 4e66bf61f58..5325af85eea 100644 --- a/net/hsr/hsr_netlink.c +++ b/net/hsr/hsr_netlink.c @@ -90,8 +90,8 @@ static struct genl_family hsr_genl_family = { .maxattr = HSR_A_MAX, }; -static struct genl_multicast_group hsr_network_genl_mcgrp = { - .name = "hsr-network", +static const struct genl_multicast_group hsr_mcgrps[] = { + { .name = "hsr-network", }, }; @@ -129,7 +129,7 @@ void hsr_nl_ringerror(struct hsr_priv *hsr_priv, unsigned char addr[ETH_ALEN], goto nla_put_failure; genlmsg_end(skb, msg_head); - genlmsg_multicast(skb, 0, hsr_network_genl_mcgrp.id, GFP_ATOMIC); + genlmsg_multicast(&hsr_genl_family, skb, 0, 0, GFP_ATOMIC); return; @@ -163,7 +163,7 @@ void hsr_nl_nodedown(struct hsr_priv *hsr_priv, unsigned char addr[ETH_ALEN]) goto nla_put_failure; genlmsg_end(skb, msg_head); - genlmsg_multicast(skb, 0, hsr_network_genl_mcgrp.id, GFP_ATOMIC); + genlmsg_multicast(&hsr_genl_family, skb, 0, 0, GFP_ATOMIC); return; @@ -249,7 +249,7 @@ static int hsr_get_node_status(struct sk_buff *skb_in, struct genl_info *info) &hsr_node_if2_age, &hsr_node_if2_seq); if (res < 0) - goto fail; + goto nla_put_failure; res = nla_put(skb_out, HSR_A_NODE_ADDR, ETH_ALEN, nla_data(info->attrs[HSR_A_NODE_ADDR])); @@ -306,15 +306,6 @@ fail: return res; } -static struct genl_ops hsr_ops_get_node_status = { - .cmd = HSR_C_GET_NODE_STATUS, - .flags = 0, - .policy = hsr_genl_policy, - .doit = hsr_get_node_status, - .dumpit = NULL, -}; - - /* Get a list of MacAddressA of all nodes known to this node (other than self). */ static int hsr_get_node_list(struct sk_buff *skb_in, struct genl_info *info) @@ -398,12 +389,21 @@ fail: } -static struct genl_ops hsr_ops_get_node_list = { - .cmd = HSR_C_GET_NODE_LIST, - .flags = 0, - .policy = hsr_genl_policy, - .doit = hsr_get_node_list, - .dumpit = NULL, +static const struct genl_ops hsr_ops[] = { + { + .cmd = HSR_C_GET_NODE_STATUS, + .flags = 0, + .policy = hsr_genl_policy, + .doit = hsr_get_node_status, + .dumpit = NULL, + }, + { + .cmd = HSR_C_GET_NODE_LIST, + .flags = 0, + .policy = hsr_genl_policy, + .doit = hsr_get_node_list, + .dumpit = NULL, + }, }; int __init hsr_netlink_init(void) @@ -414,30 +414,13 @@ int __init hsr_netlink_init(void) if (rc) goto fail_rtnl_link_register; - rc = genl_register_family(&hsr_genl_family); + rc = genl_register_family_with_ops_groups(&hsr_genl_family, hsr_ops, + hsr_mcgrps); if (rc) goto fail_genl_register_family; - rc = genl_register_ops(&hsr_genl_family, &hsr_ops_get_node_status); - if (rc) - goto fail_genl_register_ops; - - rc = genl_register_ops(&hsr_genl_family, &hsr_ops_get_node_list); - if (rc) - goto fail_genl_register_ops_node_list; - - rc = genl_register_mc_group(&hsr_genl_family, &hsr_network_genl_mcgrp); - if (rc) - goto fail_genl_register_mc_group; - return 0; -fail_genl_register_mc_group: - genl_unregister_ops(&hsr_genl_family, &hsr_ops_get_node_list); -fail_genl_register_ops_node_list: - genl_unregister_ops(&hsr_genl_family, &hsr_ops_get_node_status); -fail_genl_register_ops: - genl_unregister_family(&hsr_genl_family); fail_genl_register_family: rtnl_link_unregister(&hsr_link_ops); fail_rtnl_link_register: @@ -447,10 +430,7 @@ fail_rtnl_link_register: void __exit hsr_netlink_exit(void) { - genl_unregister_mc_group(&hsr_genl_family, &hsr_network_genl_mcgrp); - genl_unregister_ops(&hsr_genl_family, &hsr_ops_get_node_status); genl_unregister_family(&hsr_genl_family); - rtnl_link_unregister(&hsr_link_ops); } diff --git a/net/ieee802154/6lowpan.c b/net/ieee802154/6lowpan.c index 426b5df1c98..459e200c08a 100644 --- a/net/ieee802154/6lowpan.c +++ b/net/ieee802154/6lowpan.c @@ -956,7 +956,7 @@ lowpan_process_data(struct sk_buff *skb) * Traffic class carried in-line * ECN + DSCP (1 byte), Flow Label is elided */ - case 1: /* 10b */ + case 2: /* 10b */ if (lowpan_fetch_skb_u8(skb, &tmp)) goto drop; @@ -967,7 +967,7 @@ lowpan_process_data(struct sk_buff *skb) * Flow Label carried in-line * ECN + 2-bit Pad + Flow Label (3 bytes), DSCP is elided */ - case 2: /* 01b */ + case 1: /* 01b */ if (lowpan_fetch_skb_u8(skb, &tmp)) goto drop; diff --git a/net/ieee802154/dgram.c b/net/ieee802154/dgram.c index 581a59504bd..1865fdf5a5a 100644 --- a/net/ieee802154/dgram.c +++ b/net/ieee802154/dgram.c @@ -315,9 +315,8 @@ static int dgram_recvmsg(struct kiocb *iocb, struct sock *sk, if (saddr) { saddr->family = AF_IEEE802154; saddr->addr = mac_cb(skb)->sa; - } - if (addr_len) *addr_len = sizeof(*saddr); + } if (flags & MSG_TRUNC) copied = skb->len; diff --git a/net/ieee802154/ieee802154.h b/net/ieee802154/ieee802154.h index aadec428e6e..cee4425b995 100644 --- a/net/ieee802154/ieee802154.h +++ b/net/ieee802154/ieee802154.h @@ -47,7 +47,24 @@ struct sk_buff *ieee802154_nl_new_reply(struct genl_info *info, int ieee802154_nl_reply(struct sk_buff *msg, struct genl_info *info); extern struct genl_family nl802154_family; -int nl802154_mac_register(void); -int nl802154_phy_register(void); + +/* genetlink ops/groups */ +int ieee802154_list_phy(struct sk_buff *skb, struct genl_info *info); +int ieee802154_dump_phy(struct sk_buff *skb, struct netlink_callback *cb); +int ieee802154_add_iface(struct sk_buff *skb, struct genl_info *info); +int ieee802154_del_iface(struct sk_buff *skb, struct genl_info *info); + +enum ieee802154_mcgrp_ids { + IEEE802154_COORD_MCGRP, + IEEE802154_BEACON_MCGRP, +}; + +int ieee802154_associate_req(struct sk_buff *skb, struct genl_info *info); +int ieee802154_associate_resp(struct sk_buff *skb, struct genl_info *info); +int ieee802154_disassociate_req(struct sk_buff *skb, struct genl_info *info); +int ieee802154_scan_req(struct sk_buff *skb, struct genl_info *info); +int ieee802154_start_req(struct sk_buff *skb, struct genl_info *info); +int ieee802154_list_iface(struct sk_buff *skb, struct genl_info *info); +int ieee802154_dump_iface(struct sk_buff *skb, struct netlink_callback *cb); #endif diff --git a/net/ieee802154/netlink.c b/net/ieee802154/netlink.c index 7e49bbcc696..43f1b2bf469 100644 --- a/net/ieee802154/netlink.c +++ b/net/ieee802154/netlink.c @@ -70,7 +70,7 @@ int ieee802154_nl_mcast(struct sk_buff *msg, unsigned int group) if (genlmsg_end(msg, hdr) < 0) goto out; - return genlmsg_multicast(msg, 0, group, GFP_ATOMIC); + return genlmsg_multicast(&nl802154_family, msg, 0, group, GFP_ATOMIC); out: nlmsg_free(msg); return -ENOBUFS; @@ -109,31 +109,36 @@ out: return -ENOBUFS; } -int __init ieee802154_nl_init(void) -{ - int rc; - - rc = genl_register_family(&nl802154_family); - if (rc) - goto fail; - - rc = nl802154_mac_register(); - if (rc) - goto fail; +static const struct genl_ops ieee8021154_ops[] = { + /* see nl-phy.c */ + IEEE802154_DUMP(IEEE802154_LIST_PHY, ieee802154_list_phy, + ieee802154_dump_phy), + IEEE802154_OP(IEEE802154_ADD_IFACE, ieee802154_add_iface), + IEEE802154_OP(IEEE802154_DEL_IFACE, ieee802154_del_iface), + /* see nl-mac.c */ + IEEE802154_OP(IEEE802154_ASSOCIATE_REQ, ieee802154_associate_req), + IEEE802154_OP(IEEE802154_ASSOCIATE_RESP, ieee802154_associate_resp), + IEEE802154_OP(IEEE802154_DISASSOCIATE_REQ, ieee802154_disassociate_req), + IEEE802154_OP(IEEE802154_SCAN_REQ, ieee802154_scan_req), + IEEE802154_OP(IEEE802154_START_REQ, ieee802154_start_req), + IEEE802154_DUMP(IEEE802154_LIST_IFACE, ieee802154_list_iface, + ieee802154_dump_iface), +}; - rc = nl802154_phy_register(); - if (rc) - goto fail; +static const struct genl_multicast_group ieee802154_mcgrps[] = { + [IEEE802154_COORD_MCGRP] = { .name = IEEE802154_MCAST_COORD_NAME, }, + [IEEE802154_BEACON_MCGRP] = { .name = IEEE802154_MCAST_BEACON_NAME, }, +}; - return 0; -fail: - genl_unregister_family(&nl802154_family); - return rc; +int __init ieee802154_nl_init(void) +{ + return genl_register_family_with_ops_groups(&nl802154_family, + ieee8021154_ops, + ieee802154_mcgrps); } void __exit ieee802154_nl_exit(void) { genl_unregister_family(&nl802154_family); } - diff --git a/net/ieee802154/nl-mac.c b/net/ieee802154/nl-mac.c index b0bdd8c51e9..ba5c1e002f3 100644 --- a/net/ieee802154/nl-mac.c +++ b/net/ieee802154/nl-mac.c @@ -39,14 +39,6 @@ #include "ieee802154.h" -static struct genl_multicast_group ieee802154_coord_mcgrp = { - .name = IEEE802154_MCAST_COORD_NAME, -}; - -static struct genl_multicast_group ieee802154_beacon_mcgrp = { - .name = IEEE802154_MCAST_BEACON_NAME, -}; - int ieee802154_nl_assoc_indic(struct net_device *dev, struct ieee802154_addr *addr, u8 cap) { @@ -72,7 +64,7 @@ int ieee802154_nl_assoc_indic(struct net_device *dev, nla_put_u8(msg, IEEE802154_ATTR_CAPABILITY, cap)) goto nla_put_failure; - return ieee802154_nl_mcast(msg, ieee802154_coord_mcgrp.id); + return ieee802154_nl_mcast(msg, IEEE802154_COORD_MCGRP); nla_put_failure: nlmsg_free(msg); @@ -98,7 +90,7 @@ int ieee802154_nl_assoc_confirm(struct net_device *dev, u16 short_addr, nla_put_u16(msg, IEEE802154_ATTR_SHORT_ADDR, short_addr) || nla_put_u8(msg, IEEE802154_ATTR_STATUS, status)) goto nla_put_failure; - return ieee802154_nl_mcast(msg, ieee802154_coord_mcgrp.id); + return ieee802154_nl_mcast(msg, IEEE802154_COORD_MCGRP); nla_put_failure: nlmsg_free(msg); @@ -133,7 +125,7 @@ int ieee802154_nl_disassoc_indic(struct net_device *dev, } if (nla_put_u8(msg, IEEE802154_ATTR_REASON, reason)) goto nla_put_failure; - return ieee802154_nl_mcast(msg, ieee802154_coord_mcgrp.id); + return ieee802154_nl_mcast(msg, IEEE802154_COORD_MCGRP); nla_put_failure: nlmsg_free(msg); @@ -157,7 +149,7 @@ int ieee802154_nl_disassoc_confirm(struct net_device *dev, u8 status) dev->dev_addr) || nla_put_u8(msg, IEEE802154_ATTR_STATUS, status)) goto nla_put_failure; - return ieee802154_nl_mcast(msg, ieee802154_coord_mcgrp.id); + return ieee802154_nl_mcast(msg, IEEE802154_COORD_MCGRP); nla_put_failure: nlmsg_free(msg); @@ -183,7 +175,7 @@ int ieee802154_nl_beacon_indic(struct net_device *dev, nla_put_u16(msg, IEEE802154_ATTR_COORD_SHORT_ADDR, coord_addr) || nla_put_u16(msg, IEEE802154_ATTR_COORD_PAN_ID, panid)) goto nla_put_failure; - return ieee802154_nl_mcast(msg, ieee802154_coord_mcgrp.id); + return ieee802154_nl_mcast(msg, IEEE802154_COORD_MCGRP); nla_put_failure: nlmsg_free(msg); @@ -214,7 +206,7 @@ int ieee802154_nl_scan_confirm(struct net_device *dev, (edl && nla_put(msg, IEEE802154_ATTR_ED_LIST, 27, edl))) goto nla_put_failure; - return ieee802154_nl_mcast(msg, ieee802154_coord_mcgrp.id); + return ieee802154_nl_mcast(msg, IEEE802154_COORD_MCGRP); nla_put_failure: nlmsg_free(msg); @@ -238,7 +230,7 @@ int ieee802154_nl_start_confirm(struct net_device *dev, u8 status) dev->dev_addr) || nla_put_u8(msg, IEEE802154_ATTR_STATUS, status)) goto nla_put_failure; - return ieee802154_nl_mcast(msg, ieee802154_coord_mcgrp.id); + return ieee802154_nl_mcast(msg, IEEE802154_COORD_MCGRP); nla_put_failure: nlmsg_free(msg); @@ -309,8 +301,7 @@ static struct net_device *ieee802154_nl_get_dev(struct genl_info *info) return dev; } -static int ieee802154_associate_req(struct sk_buff *skb, - struct genl_info *info) +int ieee802154_associate_req(struct sk_buff *skb, struct genl_info *info) { struct net_device *dev; struct ieee802154_addr addr; @@ -357,8 +348,7 @@ out: return ret; } -static int ieee802154_associate_resp(struct sk_buff *skb, - struct genl_info *info) +int ieee802154_associate_resp(struct sk_buff *skb, struct genl_info *info) { struct net_device *dev; struct ieee802154_addr addr; @@ -390,8 +380,7 @@ out: return ret; } -static int ieee802154_disassociate_req(struct sk_buff *skb, - struct genl_info *info) +int ieee802154_disassociate_req(struct sk_buff *skb, struct genl_info *info) { struct net_device *dev; struct ieee802154_addr addr; @@ -433,7 +422,7 @@ out: * PAN_coordinator, battery_life_extension = 0, * coord_realignment = 0, security_enable = 0 */ -static int ieee802154_start_req(struct sk_buff *skb, struct genl_info *info) +int ieee802154_start_req(struct sk_buff *skb, struct genl_info *info) { struct net_device *dev; struct ieee802154_addr addr; @@ -492,7 +481,7 @@ out: return ret; } -static int ieee802154_scan_req(struct sk_buff *skb, struct genl_info *info) +int ieee802154_scan_req(struct sk_buff *skb, struct genl_info *info) { struct net_device *dev; int ret = -EOPNOTSUPP; @@ -530,8 +519,7 @@ out: return ret; } -static int ieee802154_list_iface(struct sk_buff *skb, - struct genl_info *info) +int ieee802154_list_iface(struct sk_buff *skb, struct genl_info *info) { /* Request for interface name, index, type, IEEE address, PAN Id, short address */ @@ -565,8 +553,7 @@ out_dev: } -static int ieee802154_dump_iface(struct sk_buff *skb, - struct netlink_callback *cb) +int ieee802154_dump_iface(struct sk_buff *skb, struct netlink_callback *cb) { struct net *net = sock_net(skb->sk); struct net_device *dev; @@ -590,41 +577,3 @@ cont: return skb->len; } - -static struct genl_ops ieee802154_coordinator_ops[] = { - IEEE802154_OP(IEEE802154_ASSOCIATE_REQ, ieee802154_associate_req), - IEEE802154_OP(IEEE802154_ASSOCIATE_RESP, ieee802154_associate_resp), - IEEE802154_OP(IEEE802154_DISASSOCIATE_REQ, ieee802154_disassociate_req), - IEEE802154_OP(IEEE802154_SCAN_REQ, ieee802154_scan_req), - IEEE802154_OP(IEEE802154_START_REQ, ieee802154_start_req), - IEEE802154_DUMP(IEEE802154_LIST_IFACE, ieee802154_list_iface, - ieee802154_dump_iface), -}; - -/* - * No need to unregister as family unregistration will do it. - */ -int nl802154_mac_register(void) -{ - int i; - int rc; - - rc = genl_register_mc_group(&nl802154_family, - &ieee802154_coord_mcgrp); - if (rc) - return rc; - - rc = genl_register_mc_group(&nl802154_family, - &ieee802154_beacon_mcgrp); - if (rc) - return rc; - - for (i = 0; i < ARRAY_SIZE(ieee802154_coordinator_ops); i++) { - rc = genl_register_ops(&nl802154_family, - &ieee802154_coordinator_ops[i]); - if (rc) - return rc; - } - - return 0; -} diff --git a/net/ieee802154/nl-phy.c b/net/ieee802154/nl-phy.c index 22b1a7058fd..d08c7a43dcd 100644 --- a/net/ieee802154/nl-phy.c +++ b/net/ieee802154/nl-phy.c @@ -77,8 +77,7 @@ out: return -EMSGSIZE; } -static int ieee802154_list_phy(struct sk_buff *skb, - struct genl_info *info) +int ieee802154_list_phy(struct sk_buff *skb, struct genl_info *info) { /* Request for interface name, index, type, IEEE address, PAN Id, short address */ @@ -151,8 +150,7 @@ static int ieee802154_dump_phy_iter(struct wpan_phy *phy, void *_data) return 0; } -static int ieee802154_dump_phy(struct sk_buff *skb, - struct netlink_callback *cb) +int ieee802154_dump_phy(struct sk_buff *skb, struct netlink_callback *cb) { struct dump_phy_data data = { .cb = cb, @@ -170,8 +168,7 @@ static int ieee802154_dump_phy(struct sk_buff *skb, return skb->len; } -static int ieee802154_add_iface(struct sk_buff *skb, - struct genl_info *info) +int ieee802154_add_iface(struct sk_buff *skb, struct genl_info *info) { struct sk_buff *msg; struct wpan_phy *phy; @@ -273,8 +270,7 @@ out_dev: return rc; } -static int ieee802154_del_iface(struct sk_buff *skb, - struct genl_info *info) +int ieee802154_del_iface(struct sk_buff *skb, struct genl_info *info) { struct sk_buff *msg; struct wpan_phy *phy; @@ -356,28 +352,3 @@ out_dev: return rc; } - -static struct genl_ops ieee802154_phy_ops[] = { - IEEE802154_DUMP(IEEE802154_LIST_PHY, ieee802154_list_phy, - ieee802154_dump_phy), - IEEE802154_OP(IEEE802154_ADD_IFACE, ieee802154_add_iface), - IEEE802154_OP(IEEE802154_DEL_IFACE, ieee802154_del_iface), -}; - -/* - * No need to unregister as family unregistration will do it. - */ -int nl802154_phy_register(void) -{ - int i; - int rc; - - for (i = 0; i < ARRAY_SIZE(ieee802154_phy_ops); i++) { - rc = genl_register_ops(&nl802154_family, - &ieee802154_phy_ops[i]); - if (rc) - return rc; - } - - return 0; -} diff --git a/net/ipv4/datagram.c b/net/ipv4/datagram.c index b28e863fe0a..19e36376d2a 100644 --- a/net/ipv4/datagram.c +++ b/net/ipv4/datagram.c @@ -57,7 +57,7 @@ int ip4_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len) if (IS_ERR(rt)) { err = PTR_ERR(rt); if (err == -ENETUNREACH) - IP_INC_STATS_BH(sock_net(sk), IPSTATS_MIB_OUTNOROUTES); + IP_INC_STATS(sock_net(sk), IPSTATS_MIB_OUTNOROUTES); goto out; } diff --git a/net/ipv4/ip_tunnel.c b/net/ipv4/ip_tunnel.c index caf01176a5e..90ff9570d7d 100644 --- a/net/ipv4/ip_tunnel.c +++ b/net/ipv4/ip_tunnel.c @@ -454,6 +454,8 @@ int ip_tunnel_rcv(struct ip_tunnel *tunnel, struct sk_buff *skb, tstats->rx_bytes += skb->len; u64_stats_update_end(&tstats->syncp); + skb_scrub_packet(skb, !net_eq(tunnel->net, dev_net(tunnel->dev))); + if (tunnel->dev->type == ARPHRD_ETHER) { skb->protocol = eth_type_trans(skb, tunnel->dev); skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN); @@ -461,8 +463,6 @@ int ip_tunnel_rcv(struct ip_tunnel *tunnel, struct sk_buff *skb, skb->dev = tunnel->dev; } - skb_scrub_packet(skb, !net_eq(tunnel->net, dev_net(tunnel->dev))); - gro_cells_receive(&tunnel->gro_cells, skb); return 0; diff --git a/net/ipv4/ip_vti.c b/net/ipv4/ip_vti.c index 5d9c845d288..52b802a0cd8 100644 --- a/net/ipv4/ip_vti.c +++ b/net/ipv4/ip_vti.c @@ -126,6 +126,7 @@ static netdev_tx_t vti_tunnel_xmit(struct sk_buff *skb, struct net_device *dev) if (!rt->dst.xfrm || rt->dst.xfrm->props.mode != XFRM_MODE_TUNNEL) { dev->stats.tx_carrier_errors++; + ip_rt_put(rt); goto tx_error_icmp; } tdev = rt->dst.dev; diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c index cbc85f660d5..876c6ca2d8f 100644 --- a/net/ipv4/ping.c +++ b/net/ipv4/ping.c @@ -830,8 +830,6 @@ int ping_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, { struct inet_sock *isk = inet_sk(sk); int family = sk->sk_family; - struct sockaddr_in *sin; - struct sockaddr_in6 *sin6; struct sk_buff *skb; int copied, err; @@ -841,13 +839,6 @@ int ping_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, if (flags & MSG_OOB) goto out; - if (addr_len) { - if (family == AF_INET) - *addr_len = sizeof(*sin); - else if (family == AF_INET6 && addr_len) - *addr_len = sizeof(*sin6); - } - if (flags & MSG_ERRQUEUE) { if (family == AF_INET) { return ip_recv_error(sk, msg, len); @@ -877,11 +868,15 @@ int ping_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, /* Copy the address and add cmsg data. */ if (family == AF_INET) { - sin = (struct sockaddr_in *) msg->msg_name; - sin->sin_family = AF_INET; - sin->sin_port = 0 /* skb->h.uh->source */; - sin->sin_addr.s_addr = ip_hdr(skb)->saddr; - memset(sin->sin_zero, 0, sizeof(sin->sin_zero)); + struct sockaddr_in *sin = (struct sockaddr_in *)msg->msg_name; + + if (sin) { + sin->sin_family = AF_INET; + sin->sin_port = 0 /* skb->h.uh->source */; + sin->sin_addr.s_addr = ip_hdr(skb)->saddr; + memset(sin->sin_zero, 0, sizeof(sin->sin_zero)); + *addr_len = sizeof(*sin); + } if (isk->cmsg_flags) ip_cmsg_recv(msg, skb); @@ -890,17 +885,21 @@ int ping_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, } else if (family == AF_INET6) { struct ipv6_pinfo *np = inet6_sk(sk); struct ipv6hdr *ip6 = ipv6_hdr(skb); - sin6 = (struct sockaddr_in6 *) msg->msg_name; - sin6->sin6_family = AF_INET6; - sin6->sin6_port = 0; - sin6->sin6_addr = ip6->saddr; - - sin6->sin6_flowinfo = 0; - if (np->sndflow) - sin6->sin6_flowinfo = ip6_flowinfo(ip6); - - sin6->sin6_scope_id = ipv6_iface_scope_id(&sin6->sin6_addr, - IP6CB(skb)->iif); + struct sockaddr_in6 *sin6 = + (struct sockaddr_in6 *)msg->msg_name; + + if (sin6) { + sin6->sin6_family = AF_INET6; + sin6->sin6_port = 0; + sin6->sin6_addr = ip6->saddr; + sin6->sin6_flowinfo = 0; + if (np->sndflow) + sin6->sin6_flowinfo = ip6_flowinfo(ip6); + sin6->sin6_scope_id = + ipv6_iface_scope_id(&sin6->sin6_addr, + IP6CB(skb)->iif); + *addr_len = sizeof(*sin6); + } if (inet6_sk(sk)->rxopt.all) pingv6_ops.ip6_datagram_recv_ctl(sk, msg, skb); diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c index 41e1d2845c8..5cb8ddb505e 100644 --- a/net/ipv4/raw.c +++ b/net/ipv4/raw.c @@ -696,9 +696,6 @@ static int raw_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, if (flags & MSG_OOB) goto out; - if (addr_len) - *addr_len = sizeof(*sin); - if (flags & MSG_ERRQUEUE) { err = ip_recv_error(sk, msg, len); goto out; @@ -726,6 +723,7 @@ static int raw_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, sin->sin_addr.s_addr = ip_hdr(skb)->saddr; sin->sin_port = 0; memset(&sin->sin_zero, 0, sizeof(sin->sin_zero)); + *addr_len = sizeof(*sin); } if (inet->cmsg_flags) ip_cmsg_recv(msg, skb); diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index 8e8529d3c8c..c4638e6f023 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c @@ -808,12 +808,6 @@ static unsigned int tcp_xmit_size_goal(struct sock *sk, u32 mss_now, xmit_size_goal = min_t(u32, gso_size, sk->sk_gso_max_size - 1 - hlen); - /* TSQ : try to have at least two segments in flight - * (one in NIC TX ring, another in Qdisc) - */ - xmit_size_goal = min_t(u32, xmit_size_goal, - sysctl_tcp_limit_output_bytes >> 1); - xmit_size_goal = tcp_bound_to_half_wnd(tp, xmit_size_goal); /* We try hard to avoid divides here */ @@ -1431,7 +1425,7 @@ static void tcp_service_net_dma(struct sock *sk, bool wait) do { if (dma_async_is_tx_complete(tp->ucopy.dma_chan, last_issued, &done, - &used) == DMA_SUCCESS) { + &used) == DMA_COMPLETE) { /* Safe to free early-copied skbs now */ __skb_queue_purge(&sk->sk_async_wait_queue); break; @@ -1439,7 +1433,7 @@ static void tcp_service_net_dma(struct sock *sk, bool wait) struct sk_buff *skb; while ((skb = skb_peek(&sk->sk_async_wait_queue)) && (dma_async_is_complete(skb->dma_cookie, done, - used) == DMA_SUCCESS)) { + used) == DMA_COMPLETE)) { __skb_dequeue(&sk->sk_async_wait_queue); kfree_skb(skb); } diff --git a/net/ipv4/tcp_metrics.c b/net/ipv4/tcp_metrics.c index 2ab09cbae74..06493736fbc 100644 --- a/net/ipv4/tcp_metrics.c +++ b/net/ipv4/tcp_metrics.c @@ -663,10 +663,13 @@ void tcp_fastopen_cache_get(struct sock *sk, u16 *mss, void tcp_fastopen_cache_set(struct sock *sk, u16 mss, struct tcp_fastopen_cookie *cookie, bool syn_lost) { + struct dst_entry *dst = __sk_dst_get(sk); struct tcp_metrics_block *tm; + if (!dst) + return; rcu_read_lock(); - tm = tcp_get_metrics(sk, __sk_dst_get(sk), true); + tm = tcp_get_metrics(sk, dst, true); if (tm) { struct tcp_fastopen_metrics *tfom = &tm->tcpm_fastopen; @@ -988,7 +991,7 @@ static int tcp_metrics_nl_cmd_del(struct sk_buff *skb, struct genl_info *info) return 0; } -static struct genl_ops tcp_metrics_nl_ops[] = { +static const struct genl_ops tcp_metrics_nl_ops[] = { { .cmd = TCP_METRICS_CMD_GET, .doit = tcp_metrics_nl_cmd_get, @@ -1079,8 +1082,7 @@ void __init tcp_metrics_init(void) if (ret < 0) goto cleanup; ret = genl_register_family_with_ops(&tcp_metrics_nl_family, - tcp_metrics_nl_ops, - ARRAY_SIZE(tcp_metrics_nl_ops)); + tcp_metrics_nl_ops); if (ret < 0) goto cleanup_subsys; return; diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index 672854664ff..7820f3a7dd7 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c @@ -1875,8 +1875,12 @@ static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle, * - better RTT estimation and ACK scheduling * - faster recovery * - high rates + * Alas, some drivers / subsystems require a fair amount + * of queued bytes to ensure line rate. + * One example is wifi aggregation (802.11 AMPDU) */ - limit = max(skb->truesize, sk->sk_pacing_rate >> 10); + limit = max_t(unsigned int, sysctl_tcp_limit_output_bytes, + sk->sk_pacing_rate >> 10); if (atomic_read(&sk->sk_wmem_alloc) > limit) { set_bit(TSQ_THROTTLED, &tp->tsq_flags); @@ -3093,7 +3097,6 @@ void tcp_send_window_probe(struct sock *sk) { if (sk->sk_state == TCP_ESTABLISHED) { tcp_sk(sk)->snd_wl1 = tcp_sk(sk)->rcv_nxt - 1; - tcp_sk(sk)->snd_nxt = tcp_sk(sk)->write_seq; tcp_xmit_probe_skb(sk, 0); } } diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c index de86e5bc446..5944d7d668d 100644 --- a/net/ipv4/udp.c +++ b/net/ipv4/udp.c @@ -1235,12 +1235,6 @@ int udp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, int is_udplite = IS_UDPLITE(sk); bool slow; - /* - * Check any passed addresses - */ - if (addr_len) - *addr_len = sizeof(*sin); - if (flags & MSG_ERRQUEUE) return ip_recv_error(sk, msg, len); @@ -1302,6 +1296,7 @@ try_again: sin->sin_port = udp_hdr(skb)->source; sin->sin_addr.s_addr = ip_hdr(skb)->saddr; memset(sin->sin_zero, 0, sizeof(sin->sin_zero)); + *addr_len = sizeof(*sin); } if (inet->cmsg_flags) ip_cmsg_recv(msg, skb); diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c index 5658d9d5163..12c97d8aa6b 100644 --- a/net/ipv6/addrconf.c +++ b/net/ipv6/addrconf.c @@ -1996,23 +1996,6 @@ static void addrconf_add_mroute(struct net_device *dev) ip6_route_add(&cfg); } -#if IS_ENABLED(CONFIG_IPV6_SIT) -static void sit_route_add(struct net_device *dev) -{ - struct fib6_config cfg = { - .fc_table = RT6_TABLE_MAIN, - .fc_metric = IP6_RT_PRIO_ADDRCONF, - .fc_ifindex = dev->ifindex, - .fc_dst_len = 96, - .fc_flags = RTF_UP | RTF_NONEXTHOP, - .fc_nlinfo.nl_net = dev_net(dev), - }; - - /* prefix length - 96 bits "::d.d.d.d" */ - ip6_route_add(&cfg); -} -#endif - static struct inet6_dev *addrconf_add_dev(struct net_device *dev) { struct inet6_dev *idev; @@ -2542,7 +2525,8 @@ static void sit_add_v4_addrs(struct inet6_dev *idev) struct in6_addr addr; struct net_device *dev; struct net *net = dev_net(idev->dev); - int scope; + int scope, plen; + u32 pflags = 0; ASSERT_RTNL(); @@ -2552,12 +2536,16 @@ static void sit_add_v4_addrs(struct inet6_dev *idev) if (idev->dev->flags&IFF_POINTOPOINT) { addr.s6_addr32[0] = htonl(0xfe800000); scope = IFA_LINK; + plen = 64; } else { scope = IPV6_ADDR_COMPATv4; + plen = 96; + pflags |= RTF_NONEXTHOP; } if (addr.s6_addr32[3]) { - add_addr(idev, &addr, 128, scope); + add_addr(idev, &addr, plen, scope); + addrconf_prefix_route(&addr, plen, idev->dev, 0, pflags); return; } @@ -2569,7 +2557,6 @@ static void sit_add_v4_addrs(struct inet6_dev *idev) int flag = scope; for (ifa = in_dev->ifa_list; ifa; ifa = ifa->ifa_next) { - int plen; addr.s6_addr32[3] = ifa->ifa_local; @@ -2580,12 +2567,10 @@ static void sit_add_v4_addrs(struct inet6_dev *idev) continue; flag |= IFA_HOST; } - if (idev->dev->flags&IFF_POINTOPOINT) - plen = 64; - else - plen = 96; add_addr(idev, &addr, plen, flag); + addrconf_prefix_route(&addr, plen, idev->dev, 0, + pflags); } } } @@ -2711,7 +2696,6 @@ static void addrconf_sit_config(struct net_device *dev) struct in6_addr addr; ipv6_addr_set(&addr, htonl(0xFE800000), 0, 0, 0); - addrconf_prefix_route(&addr, 64, dev, 0, 0); if (!ipv6_generate_eui64(addr.s6_addr + 8, dev)) addrconf_add_linklocal(idev, &addr); return; @@ -2721,8 +2705,6 @@ static void addrconf_sit_config(struct net_device *dev) if (dev->flags&IFF_POINTOPOINT) addrconf_add_mroute(dev); - else - sit_route_add(dev); } #endif @@ -2740,8 +2722,6 @@ static void addrconf_gre_config(struct net_device *dev) } ipv6_addr_set(&addr, htonl(0xFE800000), 0, 0, 0); - addrconf_prefix_route(&addr, 64, dev, 0, 0); - if (!ipv6_generate_eui64(addr.s6_addr + 8, dev)) addrconf_add_linklocal(idev, &addr); } diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c index ff75313f27a..4fbdb7046d2 100644 --- a/net/ipv6/af_inet6.c +++ b/net/ipv6/af_inet6.c @@ -972,10 +972,10 @@ out: #ifdef CONFIG_SYSCTL sysctl_fail: - ipv6_packet_cleanup(); + pingv6_exit(); #endif pingv6_fail: - pingv6_exit(); + ipv6_packet_cleanup(); ipv6_packet_fail: tcpv6_exit(); tcpv6_fail: diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c index df1fa58528c..d6062325db0 100644 --- a/net/ipv6/ip6_tunnel.c +++ b/net/ipv6/ip6_tunnel.c @@ -1642,6 +1642,15 @@ static int ip6_tnl_changelink(struct net_device *dev, struct nlattr *tb[], return ip6_tnl_update(t, &p); } +static void ip6_tnl_dellink(struct net_device *dev, struct list_head *head) +{ + struct net *net = dev_net(dev); + struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id); + + if (dev != ip6n->fb_tnl_dev) + unregister_netdevice_queue(dev, head); +} + static size_t ip6_tnl_get_size(const struct net_device *dev) { return @@ -1706,6 +1715,7 @@ static struct rtnl_link_ops ip6_link_ops __read_mostly = { .validate = ip6_tnl_validate, .newlink = ip6_tnl_newlink, .changelink = ip6_tnl_changelink, + .dellink = ip6_tnl_dellink, .get_size = ip6_tnl_get_size, .fill_info = ip6_tnl_fill_info, }; @@ -1722,9 +1732,9 @@ static struct xfrm6_tunnel ip6ip6_handler __read_mostly = { .priority = 1, }; -static void __net_exit ip6_tnl_destroy_tunnels(struct ip6_tnl_net *ip6n) +static void __net_exit ip6_tnl_destroy_tunnels(struct net *net) { - struct net *net = dev_net(ip6n->fb_tnl_dev); + struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id); struct net_device *dev, *aux; int h; struct ip6_tnl *t; @@ -1792,10 +1802,8 @@ err_alloc_dev: static void __net_exit ip6_tnl_exit_net(struct net *net) { - struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id); - rtnl_lock(); - ip6_tnl_destroy_tunnels(ip6n); + ip6_tnl_destroy_tunnels(net); rtnl_unlock(); } diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c index f8a55ff1971..3512177deb4 100644 --- a/net/ipv6/ndisc.c +++ b/net/ipv6/ndisc.c @@ -1726,8 +1726,8 @@ int __init ndisc_init(void) &ndisc_ifinfo_sysctl_change); if (err) goto out_unregister_pernet; -#endif out: +#endif return err; #ifdef CONFIG_SYSCTL diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c index 3c00842b007..e24ff1df040 100644 --- a/net/ipv6/raw.c +++ b/net/ipv6/raw.c @@ -465,9 +465,6 @@ static int rawv6_recvmsg(struct kiocb *iocb, struct sock *sk, if (flags & MSG_OOB) return -EOPNOTSUPP; - if (addr_len) - *addr_len=sizeof(*sin6); - if (flags & MSG_ERRQUEUE) return ipv6_recv_error(sk, msg, len); @@ -506,6 +503,7 @@ static int rawv6_recvmsg(struct kiocb *iocb, struct sock *sk, sin6->sin6_flowinfo = 0; sin6->sin6_scope_id = ipv6_iface_scope_id(&sin6->sin6_addr, IP6CB(skb)->iif); + *addr_len = sizeof(*sin6); } sock_recv_ts_and_drops(msg, sk, skb); diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c index bfc6fcea384..1b4a4a95367 100644 --- a/net/ipv6/sit.c +++ b/net/ipv6/sit.c @@ -1619,6 +1619,15 @@ static const struct nla_policy ipip6_policy[IFLA_IPTUN_MAX + 1] = { #endif }; +static void ipip6_dellink(struct net_device *dev, struct list_head *head) +{ + struct net *net = dev_net(dev); + struct sit_net *sitn = net_generic(net, sit_net_id); + + if (dev != sitn->fb_tunnel_dev) + unregister_netdevice_queue(dev, head); +} + static struct rtnl_link_ops sit_link_ops __read_mostly = { .kind = "sit", .maxtype = IFLA_IPTUN_MAX, @@ -1630,6 +1639,7 @@ static struct rtnl_link_ops sit_link_ops __read_mostly = { .changelink = ipip6_changelink, .get_size = ipip6_get_size, .fill_info = ipip6_fill_info, + .dellink = ipip6_dellink, }; static struct xfrm_tunnel sit_handler __read_mostly = { @@ -1644,9 +1654,10 @@ static struct xfrm_tunnel ipip_handler __read_mostly = { .priority = 2, }; -static void __net_exit sit_destroy_tunnels(struct sit_net *sitn, struct list_head *head) +static void __net_exit sit_destroy_tunnels(struct net *net, + struct list_head *head) { - struct net *net = dev_net(sitn->fb_tunnel_dev); + struct sit_net *sitn = net_generic(net, sit_net_id); struct net_device *dev, *aux; int prio; @@ -1721,11 +1732,10 @@ err_alloc_dev: static void __net_exit sit_exit_net(struct net *net) { - struct sit_net *sitn = net_generic(net, sit_net_id); LIST_HEAD(list); rtnl_lock(); - sit_destroy_tunnels(sitn, &list); + sit_destroy_tunnels(net, &list); unregister_netdevice_many(&list); rtnl_unlock(); } diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c index f3893e897f7..81eb8cf8389 100644 --- a/net/ipv6/udp.c +++ b/net/ipv6/udp.c @@ -392,9 +392,6 @@ int udpv6_recvmsg(struct kiocb *iocb, struct sock *sk, int is_udp4; bool slow; - if (addr_len) - *addr_len = sizeof(struct sockaddr_in6); - if (flags & MSG_ERRQUEUE) return ipv6_recv_error(sk, msg, len); @@ -480,7 +477,7 @@ try_again: ipv6_iface_scope_id(&sin6->sin6_addr, IP6CB(skb)->iif); } - + *addr_len = sizeof(*sin6); } if (is_udp4) { if (inet->cmsg_flags) diff --git a/net/irda/irnetlink.c b/net/irda/irnetlink.c index c3297126928..a37b81fe047 100644 --- a/net/irda/irnetlink.c +++ b/net/irda/irnetlink.c @@ -131,7 +131,7 @@ static const struct nla_policy irda_nl_policy[IRDA_NL_ATTR_MAX + 1] = { [IRDA_NL_ATTR_MODE] = { .type = NLA_U32 }, }; -static struct genl_ops irda_nl_ops[] = { +static const struct genl_ops irda_nl_ops[] = { { .cmd = IRDA_NL_CMD_SET_MODE, .doit = irda_nl_set_mode, @@ -149,8 +149,7 @@ static struct genl_ops irda_nl_ops[] = { int irda_nl_register(void) { - return genl_register_family_with_ops(&irda_nl_family, - irda_nl_ops, ARRAY_SIZE(irda_nl_ops)); + return genl_register_family_with_ops(&irda_nl_family, irda_nl_ops); } void irda_nl_unregister(void) diff --git a/net/l2tp/l2tp_ip.c b/net/l2tp/l2tp_ip.c index 571db8dd229..da1a1cee1a0 100644 --- a/net/l2tp/l2tp_ip.c +++ b/net/l2tp/l2tp_ip.c @@ -518,9 +518,6 @@ static int l2tp_ip_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *m if (flags & MSG_OOB) goto out; - if (addr_len) - *addr_len = sizeof(*sin); - skb = skb_recv_datagram(sk, flags, noblock, &err); if (!skb) goto out; @@ -543,6 +540,7 @@ static int l2tp_ip_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *m sin->sin_addr.s_addr = ip_hdr(skb)->saddr; sin->sin_port = 0; memset(&sin->sin_zero, 0, sizeof(sin->sin_zero)); + *addr_len = sizeof(*sin); } if (inet->cmsg_flags) ip_cmsg_recv(msg, skb); diff --git a/net/l2tp/l2tp_netlink.c b/net/l2tp/l2tp_netlink.c index be446d517bc..4cfd722e915 100644 --- a/net/l2tp/l2tp_netlink.c +++ b/net/l2tp/l2tp_netlink.c @@ -793,7 +793,7 @@ static struct nla_policy l2tp_nl_policy[L2TP_ATTR_MAX + 1] = { }, }; -static struct genl_ops l2tp_nl_ops[] = { +static const struct genl_ops l2tp_nl_ops[] = { { .cmd = L2TP_CMD_NOOP, .doit = l2tp_nl_cmd_noop, @@ -887,13 +887,8 @@ EXPORT_SYMBOL_GPL(l2tp_nl_unregister_ops); static int l2tp_nl_init(void) { - int err; - pr_info("L2TP netlink interface\n"); - err = genl_register_family_with_ops(&l2tp_nl_family, l2tp_nl_ops, - ARRAY_SIZE(l2tp_nl_ops)); - - return err; + return genl_register_family_with_ops(&l2tp_nl_family, l2tp_nl_ops); } static void l2tp_nl_cleanup(void) diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c index 1ded5c6d268..35be035ee0c 100644 --- a/net/netfilter/ipvs/ip_vs_ctl.c +++ b/net/netfilter/ipvs/ip_vs_ctl.c @@ -3580,7 +3580,7 @@ out: } -static struct genl_ops ip_vs_genl_ops[] __read_mostly = { +static const struct genl_ops ip_vs_genl_ops[] __read_mostly = { { .cmd = IPVS_CMD_NEW_SERVICE, .flags = GENL_ADMIN_PERM, @@ -3679,7 +3679,7 @@ static struct genl_ops ip_vs_genl_ops[] __read_mostly = { static int __init ip_vs_genl_register(void) { return genl_register_family_with_ops(&ip_vs_genl_family, - ip_vs_genl_ops, ARRAY_SIZE(ip_vs_genl_ops)); + ip_vs_genl_ops); } static void ip_vs_genl_unregister(void) diff --git a/net/netlabel/netlabel_cipso_v4.c b/net/netlabel/netlabel_cipso_v4.c index a1100640495..69345cebe3a 100644 --- a/net/netlabel/netlabel_cipso_v4.c +++ b/net/netlabel/netlabel_cipso_v4.c @@ -737,7 +737,7 @@ static int netlbl_cipsov4_remove(struct sk_buff *skb, struct genl_info *info) * NetLabel Generic NETLINK Command Definitions */ -static struct genl_ops netlbl_cipsov4_ops[] = { +static const struct genl_ops netlbl_cipsov4_ops[] = { { .cmd = NLBL_CIPSOV4_C_ADD, .flags = GENL_ADMIN_PERM, @@ -783,5 +783,5 @@ static struct genl_ops netlbl_cipsov4_ops[] = { int __init netlbl_cipsov4_genl_init(void) { return genl_register_family_with_ops(&netlbl_cipsov4_gnl_family, - netlbl_cipsov4_ops, ARRAY_SIZE(netlbl_cipsov4_ops)); + netlbl_cipsov4_ops); } diff --git a/net/netlabel/netlabel_mgmt.c b/net/netlabel/netlabel_mgmt.c index dd1c37d7acb..8ef83ee97c6 100644 --- a/net/netlabel/netlabel_mgmt.c +++ b/net/netlabel/netlabel_mgmt.c @@ -705,7 +705,7 @@ version_failure: * NetLabel Generic NETLINK Command Definitions */ -static struct genl_ops netlbl_mgmt_genl_ops[] = { +static const struct genl_ops netlbl_mgmt_genl_ops[] = { { .cmd = NLBL_MGMT_C_ADD, .flags = GENL_ADMIN_PERM, @@ -779,5 +779,5 @@ static struct genl_ops netlbl_mgmt_genl_ops[] = { int __init netlbl_mgmt_genl_init(void) { return genl_register_family_with_ops(&netlbl_mgmt_gnl_family, - netlbl_mgmt_genl_ops, ARRAY_SIZE(netlbl_mgmt_genl_ops)); + netlbl_mgmt_genl_ops); } diff --git a/net/netlabel/netlabel_unlabeled.c b/net/netlabel/netlabel_unlabeled.c index 8f0897407a2..43817d73ccf 100644 --- a/net/netlabel/netlabel_unlabeled.c +++ b/net/netlabel/netlabel_unlabeled.c @@ -1323,7 +1323,7 @@ unlabel_staticlistdef_return: * NetLabel Generic NETLINK Command Definitions */ -static struct genl_ops netlbl_unlabel_genl_ops[] = { +static const struct genl_ops netlbl_unlabel_genl_ops[] = { { .cmd = NLBL_UNLABEL_C_STATICADD, .flags = GENL_ADMIN_PERM, @@ -1397,7 +1397,7 @@ static struct genl_ops netlbl_unlabel_genl_ops[] = { int __init netlbl_unlabel_genl_init(void) { return genl_register_family_with_ops(&netlbl_unlabel_gnl_family, - netlbl_unlabel_genl_ops, ARRAY_SIZE(netlbl_unlabel_genl_ops)); + netlbl_unlabel_genl_ops); } /* diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c index 8df7f64c6db..f0176e1a5a8 100644 --- a/net/netlink/af_netlink.c +++ b/net/netlink/af_netlink.c @@ -2017,7 +2017,7 @@ out: * netlink_set_err - report error to broadcast listeners * @ssk: the kernel netlink socket, as returned by netlink_kernel_create() * @portid: the PORTID of a process that we want to skip (if any) - * @groups: the broadcast group that will notice the error + * @group: the broadcast group that will notice the error * @code: error code, must be negative (as usual in kernelspace) * * This function returns the number of broadcast listeners that have set the diff --git a/net/netlink/genetlink.c b/net/netlink/genetlink.c index 0c741cec4d0..7dbc4f732c7 100644 --- a/net/netlink/genetlink.c +++ b/net/netlink/genetlink.c @@ -65,12 +65,24 @@ static struct list_head family_ht[GENL_FAM_TAB_SIZE]; * To avoid an allocation at boot of just one unsigned long, * declare it global instead. * Bit 0 is marked as already used since group 0 is invalid. + * Bit 1 is marked as already used since the drop-monitor code + * abuses the API and thinks it can statically use group 1. + * That group will typically conflict with other groups that + * any proper users use. + * Bit 16 is marked as used since it's used for generic netlink + * and the code no longer marks pre-reserved IDs as used. + * Bit 17 is marked as already used since the VFS quota code + * also abused this API and relied on family == group ID, we + * cater to that by giving it a static family and group ID. */ -static unsigned long mc_group_start = 0x1; +static unsigned long mc_group_start = 0x3 | BIT(GENL_ID_CTRL) | + BIT(GENL_ID_VFS_DQUOT); static unsigned long *mc_groups = &mc_group_start; static unsigned long mc_groups_longs = 1; -static int genl_ctrl_event(int event, void *data); +static int genl_ctrl_event(int event, struct genl_family *family, + const struct genl_multicast_group *grp, + int grp_id); static inline unsigned int genl_family_hash(unsigned int id) { @@ -106,13 +118,13 @@ static struct genl_family *genl_family_find_byname(char *name) return NULL; } -static struct genl_ops *genl_get_cmd(u8 cmd, struct genl_family *family) +static const struct genl_ops *genl_get_cmd(u8 cmd, struct genl_family *family) { - struct genl_ops *ops; + int i; - list_for_each_entry(ops, &family->ops_list, ops_list) - if (ops->cmd == cmd) - return ops; + for (i = 0; i < family->n_ops; i++) + if (family->ops[i].cmd == cmd) + return &family->ops[i]; return NULL; } @@ -126,7 +138,8 @@ static u16 genl_generate_id(void) int i; for (i = 0; i <= GENL_MAX_ID - GENL_MIN_ID; i++) { - if (!genl_family_find_byid(id_gen_idx)) + if (id_gen_idx != GENL_ID_VFS_DQUOT && + !genl_family_find_byid(id_gen_idx)) return id_gen_idx; if (++id_gen_idx > GENL_MAX_ID) id_gen_idx = GENL_MIN_ID; @@ -135,62 +148,110 @@ static u16 genl_generate_id(void) return 0; } -static struct genl_multicast_group notify_grp; - -/** - * genl_register_mc_group - register a multicast group - * - * Registers the specified multicast group and notifies userspace - * about the new group. - * - * Returns 0 on success or a negative error code. - * - * @family: The generic netlink family the group shall be registered for. - * @grp: The group to register, must have a name. - */ -int genl_register_mc_group(struct genl_family *family, - struct genl_multicast_group *grp) +static int genl_allocate_reserve_groups(int n_groups, int *first_id) { - int id; unsigned long *new_groups; - int err = 0; + int start = 0; + int i; + int id; + bool fits; + + do { + if (start == 0) + id = find_first_zero_bit(mc_groups, + mc_groups_longs * + BITS_PER_LONG); + else + id = find_next_zero_bit(mc_groups, + mc_groups_longs * BITS_PER_LONG, + start); + + fits = true; + for (i = id; + i < min_t(int, id + n_groups, + mc_groups_longs * BITS_PER_LONG); + i++) { + if (test_bit(i, mc_groups)) { + start = i; + fits = false; + break; + } + } - BUG_ON(grp->name[0] == '\0'); - BUG_ON(memchr(grp->name, '\0', GENL_NAMSIZ) == NULL); + if (id >= mc_groups_longs * BITS_PER_LONG) { + unsigned long new_longs = mc_groups_longs + + BITS_TO_LONGS(n_groups); + size_t nlen = new_longs * sizeof(unsigned long); + + if (mc_groups == &mc_group_start) { + new_groups = kzalloc(nlen, GFP_KERNEL); + if (!new_groups) + return -ENOMEM; + mc_groups = new_groups; + *mc_groups = mc_group_start; + } else { + new_groups = krealloc(mc_groups, nlen, + GFP_KERNEL); + if (!new_groups) + return -ENOMEM; + mc_groups = new_groups; + for (i = 0; i < BITS_TO_LONGS(n_groups); i++) + mc_groups[mc_groups_longs + i] = 0; + } + mc_groups_longs = new_longs; + } + } while (!fits); - genl_lock_all(); + for (i = id; i < id + n_groups; i++) + set_bit(i, mc_groups); + *first_id = id; + return 0; +} - /* special-case our own group */ - if (grp == ¬ify_grp) - id = GENL_ID_CTRL; - else - id = find_first_zero_bit(mc_groups, - mc_groups_longs * BITS_PER_LONG); +static struct genl_family genl_ctrl; +static int genl_validate_assign_mc_groups(struct genl_family *family) +{ + int first_id; + int n_groups = family->n_mcgrps; + int err, i; + bool groups_allocated = false; - if (id >= mc_groups_longs * BITS_PER_LONG) { - size_t nlen = (mc_groups_longs + 1) * sizeof(unsigned long); + if (!n_groups) + return 0; - if (mc_groups == &mc_group_start) { - new_groups = kzalloc(nlen, GFP_KERNEL); - if (!new_groups) { - err = -ENOMEM; - goto out; - } - mc_groups = new_groups; - *mc_groups = mc_group_start; - } else { - new_groups = krealloc(mc_groups, nlen, GFP_KERNEL); - if (!new_groups) { - err = -ENOMEM; - goto out; - } - mc_groups = new_groups; - mc_groups[mc_groups_longs] = 0; - } - mc_groups_longs++; + for (i = 0; i < n_groups; i++) { + const struct genl_multicast_group *grp = &family->mcgrps[i]; + + if (WARN_ON(grp->name[0] == '\0')) + return -EINVAL; + if (WARN_ON(memchr(grp->name, '\0', GENL_NAMSIZ) == NULL)) + return -EINVAL; + } + + /* special-case our own group and hacks */ + if (family == &genl_ctrl) { + first_id = GENL_ID_CTRL; + BUG_ON(n_groups != 1); + } else if (strcmp(family->name, "NET_DM") == 0) { + first_id = 1; + BUG_ON(n_groups != 1); + } else if (strcmp(family->name, "VFS_DQUOT") == 0) { + first_id = GENL_ID_VFS_DQUOT; + BUG_ON(n_groups != 1); + } else { + groups_allocated = true; + err = genl_allocate_reserve_groups(n_groups, &first_id); + if (err) + return err; } + family->mcgrp_offset = first_id; + + /* if still initializing, can't and don't need to to realloc bitmaps */ + if (!init_net.genl_sock) + return 0; + if (family->netnsok) { struct net *net; @@ -206,9 +267,7 @@ int genl_register_mc_group(struct genl_family *family, * number of _possible_ groups has been * increased on some sockets which is ok. */ - rcu_read_unlock(); - netlink_table_ungrab(); - goto out; + break; } } rcu_read_unlock(); @@ -216,152 +275,67 @@ int genl_register_mc_group(struct genl_family *family, } else { err = netlink_change_ngroups(init_net.genl_sock, mc_groups_longs * BITS_PER_LONG); - if (err) - goto out; } - grp->id = id; - set_bit(id, mc_groups); - list_add_tail(&grp->list, &family->mcast_groups); - grp->family = family; + if (groups_allocated && err) { + for (i = 0; i < family->n_mcgrps; i++) + clear_bit(family->mcgrp_offset + i, mc_groups); + } - genl_ctrl_event(CTRL_CMD_NEWMCAST_GRP, grp); - out: - genl_unlock_all(); return err; } -EXPORT_SYMBOL(genl_register_mc_group); -static void __genl_unregister_mc_group(struct genl_family *family, - struct genl_multicast_group *grp) +static void genl_unregister_mc_groups(struct genl_family *family) { struct net *net; - BUG_ON(grp->family != family); + int i; netlink_table_grab(); rcu_read_lock(); - for_each_net_rcu(net) - __netlink_clear_multicast_users(net->genl_sock, grp->id); + for_each_net_rcu(net) { + for (i = 0; i < family->n_mcgrps; i++) + __netlink_clear_multicast_users( + net->genl_sock, family->mcgrp_offset + i); + } rcu_read_unlock(); netlink_table_ungrab(); - clear_bit(grp->id, mc_groups); - list_del(&grp->list); - genl_ctrl_event(CTRL_CMD_DELMCAST_GRP, grp); - grp->id = 0; - grp->family = NULL; -} + for (i = 0; i < family->n_mcgrps; i++) { + int grp_id = family->mcgrp_offset + i; -/** - * genl_unregister_mc_group - unregister a multicast group - * - * Unregisters the specified multicast group and notifies userspace - * about it. All current listeners on the group are removed. - * - * Note: It is not necessary to unregister all multicast groups before - * unregistering the family, unregistering the family will cause - * all assigned multicast groups to be unregistered automatically. - * - * @family: Generic netlink family the group belongs to. - * @grp: The group to unregister, must have been registered successfully - * previously. - */ -void genl_unregister_mc_group(struct genl_family *family, - struct genl_multicast_group *grp) -{ - genl_lock_all(); - __genl_unregister_mc_group(family, grp); - genl_unlock_all(); + if (grp_id != 1) + clear_bit(grp_id, mc_groups); + genl_ctrl_event(CTRL_CMD_DELMCAST_GRP, family, + &family->mcgrps[i], grp_id); + } } -EXPORT_SYMBOL(genl_unregister_mc_group); -static void genl_unregister_mc_groups(struct genl_family *family) +static int genl_validate_ops(struct genl_family *family) { - struct genl_multicast_group *grp, *tmp; + const struct genl_ops *ops = family->ops; + unsigned int n_ops = family->n_ops; + int i, j; - list_for_each_entry_safe(grp, tmp, &family->mcast_groups, list) - __genl_unregister_mc_group(family, grp); -} - -/** - * genl_register_ops - register generic netlink operations - * @family: generic netlink family - * @ops: operations to be registered - * - * Registers the specified operations and assigns them to the specified - * family. Either a doit or dumpit callback must be specified or the - * operation will fail. Only one operation structure per command - * identifier may be registered. - * - * See include/net/genetlink.h for more documenation on the operations - * structure. - * - * Returns 0 on success or a negative error code. - */ -int genl_register_ops(struct genl_family *family, struct genl_ops *ops) -{ - int err = -EINVAL; + if (WARN_ON(n_ops && !ops)) + return -EINVAL; - if (ops->dumpit == NULL && ops->doit == NULL) - goto errout; + if (!n_ops) + return 0; - if (genl_get_cmd(ops->cmd, family)) { - err = -EEXIST; - goto errout; + for (i = 0; i < n_ops; i++) { + if (ops[i].dumpit == NULL && ops[i].doit == NULL) + return -EINVAL; + for (j = i + 1; j < n_ops; j++) + if (ops[i].cmd == ops[j].cmd) + return -EINVAL; } - if (ops->dumpit) - ops->flags |= GENL_CMD_CAP_DUMP; - if (ops->doit) - ops->flags |= GENL_CMD_CAP_DO; - if (ops->policy) - ops->flags |= GENL_CMD_CAP_HASPOL; + /* family is not registered yet, so no locking needed */ + family->ops = ops; + family->n_ops = n_ops; - genl_lock_all(); - list_add_tail(&ops->ops_list, &family->ops_list); - genl_unlock_all(); - - genl_ctrl_event(CTRL_CMD_NEWOPS, ops); - err = 0; -errout: - return err; -} -EXPORT_SYMBOL(genl_register_ops); - -/** - * genl_unregister_ops - unregister generic netlink operations - * @family: generic netlink family - * @ops: operations to be unregistered - * - * Unregisters the specified operations and unassigns them from the - * specified family. The operation blocks until the current message - * processing has finished and doesn't start again until the - * unregister process has finished. - * - * Note: It is not necessary to unregister all operations before - * unregistering the family, unregistering the family will cause - * all assigned operations to be unregistered automatically. - * - * Returns 0 on success or a negative error code. - */ -int genl_unregister_ops(struct genl_family *family, struct genl_ops *ops) -{ - struct genl_ops *rc; - - genl_lock_all(); - list_for_each_entry(rc, &family->ops_list, ops_list) { - if (rc == ops) { - list_del(&ops->ops_list); - genl_unlock_all(); - genl_ctrl_event(CTRL_CMD_DELOPS, ops); - return 0; - } - } - genl_unlock_all(); - - return -ENOENT; + return 0; } -EXPORT_SYMBOL(genl_unregister_ops); /** * __genl_register_family - register a generic netlink family @@ -372,11 +346,14 @@ EXPORT_SYMBOL(genl_unregister_ops); * The family id may equal GENL_ID_GENERATE causing an unique id to * be automatically generated and assigned. * + * The family's ops array must already be assigned, you can use the + * genl_register_family_with_ops() helper function. + * * Return 0 on success or a negative error code. */ int __genl_register_family(struct genl_family *family) { - int err = -EINVAL; + int err = -EINVAL, i; if (family->id && family->id < GENL_MIN_ID) goto errout; @@ -384,8 +361,9 @@ int __genl_register_family(struct genl_family *family) if (family->id > GENL_MAX_ID) goto errout; - INIT_LIST_HEAD(&family->ops_list); - INIT_LIST_HEAD(&family->mcast_groups); + err = genl_validate_ops(family); + if (err) + return err; genl_lock_all(); @@ -418,10 +396,18 @@ int __genl_register_family(struct genl_family *family) } else family->attrbuf = NULL; + err = genl_validate_assign_mc_groups(family); + if (err) + goto errout_locked; + list_add_tail(&family->family_list, genl_family_chain(family->id)); genl_unlock_all(); - genl_ctrl_event(CTRL_CMD_NEWFAMILY, family); + /* send all events */ + genl_ctrl_event(CTRL_CMD_NEWFAMILY, family, NULL, 0); + for (i = 0; i < family->n_mcgrps; i++) + genl_ctrl_event(CTRL_CMD_NEWMCAST_GRP, family, + &family->mcgrps[i], family->mcgrp_offset + i); return 0; @@ -433,52 +419,6 @@ errout: EXPORT_SYMBOL(__genl_register_family); /** - * __genl_register_family_with_ops - register a generic netlink family - * @family: generic netlink family - * @ops: operations to be registered - * @n_ops: number of elements to register - * - * Registers the specified family and operations from the specified table. - * Only one family may be registered with the same family name or identifier. - * - * The family id may equal GENL_ID_GENERATE causing an unique id to - * be automatically generated and assigned. - * - * Either a doit or dumpit callback must be specified for every registered - * operation or the function will fail. Only one operation structure per - * command identifier may be registered. - * - * See include/net/genetlink.h for more documenation on the operations - * structure. - * - * This is equivalent to calling genl_register_family() followed by - * genl_register_ops() for every operation entry in the table taking - * care to unregister the family on error path. - * - * Return 0 on success or a negative error code. - */ -int __genl_register_family_with_ops(struct genl_family *family, - struct genl_ops *ops, size_t n_ops) -{ - int err, i; - - err = __genl_register_family(family); - if (err) - return err; - - for (i = 0; i < n_ops; ++i, ++ops) { - err = genl_register_ops(family, ops); - if (err) - goto err_out; - } - return 0; -err_out: - genl_unregister_family(family); - return err; -} -EXPORT_SYMBOL(__genl_register_family_with_ops); - -/** * genl_unregister_family - unregister generic netlink family * @family: generic netlink family * @@ -499,11 +439,11 @@ int genl_unregister_family(struct genl_family *family) continue; list_del(&rc->family_list); - INIT_LIST_HEAD(&family->ops_list); + family->n_ops = 0; genl_unlock_all(); kfree(family->attrbuf); - genl_ctrl_event(CTRL_CMD_DELFAMILY, family); + genl_ctrl_event(CTRL_CMD_DELFAMILY, family, NULL, 0); return 0; } @@ -546,7 +486,8 @@ EXPORT_SYMBOL(genlmsg_put); static int genl_lock_dumpit(struct sk_buff *skb, struct netlink_callback *cb) { - struct genl_ops *ops = cb->data; + /* our ops are always const - netlink API doesn't propagate that */ + const struct genl_ops *ops = cb->data; int rc; genl_lock(); @@ -557,7 +498,8 @@ static int genl_lock_dumpit(struct sk_buff *skb, struct netlink_callback *cb) static int genl_lock_done(struct netlink_callback *cb) { - struct genl_ops *ops = cb->data; + /* our ops are always const - netlink API doesn't propagate that */ + const struct genl_ops *ops = cb->data; int rc = 0; if (ops->done) { @@ -572,7 +514,7 @@ static int genl_family_rcv_msg(struct genl_family *family, struct sk_buff *skb, struct nlmsghdr *nlh) { - struct genl_ops *ops; + const struct genl_ops *ops; struct net *net = sock_net(skb->sk); struct genl_info info; struct genlmsghdr *hdr = nlmsg_data(nlh); @@ -604,7 +546,8 @@ static int genl_family_rcv_msg(struct genl_family *family, if (!family->parallel_ops) { struct netlink_dump_control c = { .module = family->module, - .data = ops, + /* we have const, but the netlink API doesn't */ + .data = (void *)ops, .dump = genl_lock_dumpit, .done = genl_lock_done, }; @@ -726,24 +669,32 @@ static int ctrl_fill_info(struct genl_family *family, u32 portid, u32 seq, nla_put_u32(skb, CTRL_ATTR_MAXATTR, family->maxattr)) goto nla_put_failure; - if (!list_empty(&family->ops_list)) { + if (family->n_ops) { struct nlattr *nla_ops; - struct genl_ops *ops; - int idx = 1; + int i; nla_ops = nla_nest_start(skb, CTRL_ATTR_OPS); if (nla_ops == NULL) goto nla_put_failure; - list_for_each_entry(ops, &family->ops_list, ops_list) { + for (i = 0; i < family->n_ops; i++) { struct nlattr *nest; + const struct genl_ops *ops = &family->ops[i]; + u32 op_flags = ops->flags; - nest = nla_nest_start(skb, idx++); + if (ops->dumpit) + op_flags |= GENL_CMD_CAP_DUMP; + if (ops->doit) + op_flags |= GENL_CMD_CAP_DO; + if (ops->policy) + op_flags |= GENL_CMD_CAP_HASPOL; + + nest = nla_nest_start(skb, i + 1); if (nest == NULL) goto nla_put_failure; if (nla_put_u32(skb, CTRL_ATTR_OP_ID, ops->cmd) || - nla_put_u32(skb, CTRL_ATTR_OP_FLAGS, ops->flags)) + nla_put_u32(skb, CTRL_ATTR_OP_FLAGS, op_flags)) goto nla_put_failure; nla_nest_end(skb, nest); @@ -752,23 +703,26 @@ static int ctrl_fill_info(struct genl_family *family, u32 portid, u32 seq, nla_nest_end(skb, nla_ops); } - if (!list_empty(&family->mcast_groups)) { - struct genl_multicast_group *grp; + if (family->n_mcgrps) { struct nlattr *nla_grps; - int idx = 1; + int i; nla_grps = nla_nest_start(skb, CTRL_ATTR_MCAST_GROUPS); if (nla_grps == NULL) goto nla_put_failure; - list_for_each_entry(grp, &family->mcast_groups, list) { + for (i = 0; i < family->n_mcgrps; i++) { struct nlattr *nest; + const struct genl_multicast_group *grp; + + grp = &family->mcgrps[i]; - nest = nla_nest_start(skb, idx++); + nest = nla_nest_start(skb, i + 1); if (nest == NULL) goto nla_put_failure; - if (nla_put_u32(skb, CTRL_ATTR_MCAST_GRP_ID, grp->id) || + if (nla_put_u32(skb, CTRL_ATTR_MCAST_GRP_ID, + family->mcgrp_offset + i) || nla_put_string(skb, CTRL_ATTR_MCAST_GRP_NAME, grp->name)) goto nla_put_failure; @@ -785,9 +739,10 @@ nla_put_failure: return -EMSGSIZE; } -static int ctrl_fill_mcgrp_info(struct genl_multicast_group *grp, u32 portid, - u32 seq, u32 flags, struct sk_buff *skb, - u8 cmd) +static int ctrl_fill_mcgrp_info(struct genl_family *family, + const struct genl_multicast_group *grp, + int grp_id, u32 portid, u32 seq, u32 flags, + struct sk_buff *skb, u8 cmd) { void *hdr; struct nlattr *nla_grps; @@ -797,8 +752,8 @@ static int ctrl_fill_mcgrp_info(struct genl_multicast_group *grp, u32 portid, if (hdr == NULL) return -1; - if (nla_put_string(skb, CTRL_ATTR_FAMILY_NAME, grp->family->name) || - nla_put_u16(skb, CTRL_ATTR_FAMILY_ID, grp->family->id)) + if (nla_put_string(skb, CTRL_ATTR_FAMILY_NAME, family->name) || + nla_put_u16(skb, CTRL_ATTR_FAMILY_ID, family->id)) goto nla_put_failure; nla_grps = nla_nest_start(skb, CTRL_ATTR_MCAST_GROUPS); @@ -809,7 +764,7 @@ static int ctrl_fill_mcgrp_info(struct genl_multicast_group *grp, u32 portid, if (nest == NULL) goto nla_put_failure; - if (nla_put_u32(skb, CTRL_ATTR_MCAST_GRP_ID, grp->id) || + if (nla_put_u32(skb, CTRL_ATTR_MCAST_GRP_ID, grp_id) || nla_put_string(skb, CTRL_ATTR_MCAST_GRP_NAME, grp->name)) goto nla_put_failure; @@ -875,8 +830,10 @@ static struct sk_buff *ctrl_build_family_msg(struct genl_family *family, return skb; } -static struct sk_buff *ctrl_build_mcgrp_msg(struct genl_multicast_group *grp, - u32 portid, int seq, u8 cmd) +static struct sk_buff * +ctrl_build_mcgrp_msg(struct genl_family *family, + const struct genl_multicast_group *grp, + int grp_id, u32 portid, int seq, u8 cmd) { struct sk_buff *skb; int err; @@ -885,7 +842,8 @@ static struct sk_buff *ctrl_build_mcgrp_msg(struct genl_multicast_group *grp, if (skb == NULL) return ERR_PTR(-ENOBUFS); - err = ctrl_fill_mcgrp_info(grp, portid, seq, 0, skb, cmd); + err = ctrl_fill_mcgrp_info(family, grp, grp_id, portid, + seq, 0, skb, cmd); if (err < 0) { nlmsg_free(skb); return ERR_PTR(err); @@ -947,11 +905,11 @@ static int ctrl_getfamily(struct sk_buff *skb, struct genl_info *info) return genlmsg_reply(msg, info); } -static int genl_ctrl_event(int event, void *data) +static int genl_ctrl_event(int event, struct genl_family *family, + const struct genl_multicast_group *grp, + int grp_id) { struct sk_buff *msg; - struct genl_family *family; - struct genl_multicast_group *grp; /* genl is still initialising */ if (!init_net.genl_sock) @@ -960,14 +918,13 @@ static int genl_ctrl_event(int event, void *data) switch (event) { case CTRL_CMD_NEWFAMILY: case CTRL_CMD_DELFAMILY: - family = data; + WARN_ON(grp); msg = ctrl_build_family_msg(family, 0, 0, event); break; case CTRL_CMD_NEWMCAST_GRP: case CTRL_CMD_DELMCAST_GRP: - grp = data; - family = grp->family; - msg = ctrl_build_mcgrp_msg(data, 0, 0, event); + BUG_ON(!grp); + msg = ctrl_build_mcgrp_msg(family, grp, grp_id, 0, 0, event); break; default: return -EINVAL; @@ -977,26 +934,29 @@ static int genl_ctrl_event(int event, void *data) return PTR_ERR(msg); if (!family->netnsok) { - genlmsg_multicast_netns(&init_net, msg, 0, - GENL_ID_CTRL, GFP_KERNEL); + genlmsg_multicast_netns(&genl_ctrl, &init_net, msg, 0, + 0, GFP_KERNEL); } else { rcu_read_lock(); - genlmsg_multicast_allns(msg, 0, GENL_ID_CTRL, GFP_ATOMIC); + genlmsg_multicast_allns(&genl_ctrl, msg, 0, + 0, GFP_ATOMIC); rcu_read_unlock(); } return 0; } -static struct genl_ops genl_ctrl_ops = { - .cmd = CTRL_CMD_GETFAMILY, - .doit = ctrl_getfamily, - .dumpit = ctrl_dumpfamily, - .policy = ctrl_policy, +static struct genl_ops genl_ctrl_ops[] = { + { + .cmd = CTRL_CMD_GETFAMILY, + .doit = ctrl_getfamily, + .dumpit = ctrl_dumpfamily, + .policy = ctrl_policy, + }, }; -static struct genl_multicast_group notify_grp = { - .name = "notify", +static struct genl_multicast_group genl_ctrl_groups[] = { + { .name = "notify", }, }; static int __net_init genl_pernet_init(struct net *net) @@ -1036,7 +996,8 @@ static int __init genl_init(void) for (i = 0; i < GENL_FAM_TAB_SIZE; i++) INIT_LIST_HEAD(&family_ht[i]); - err = genl_register_family_with_ops(&genl_ctrl, &genl_ctrl_ops, 1); + err = genl_register_family_with_ops_groups(&genl_ctrl, genl_ctrl_ops, + genl_ctrl_groups); if (err < 0) goto problem; @@ -1044,10 +1005,6 @@ static int __init genl_init(void) if (err) goto problem; - err = genl_register_mc_group(&genl_ctrl, ¬ify_grp); - if (err < 0) - goto problem; - return 0; problem: @@ -1085,14 +1042,18 @@ static int genlmsg_mcast(struct sk_buff *skb, u32 portid, unsigned long group, return err; } -int genlmsg_multicast_allns(struct sk_buff *skb, u32 portid, unsigned int group, - gfp_t flags) +int genlmsg_multicast_allns(struct genl_family *family, struct sk_buff *skb, + u32 portid, unsigned int group, gfp_t flags) { + if (group >= family->n_mcgrps) + return -EINVAL; + group = family->mcgrp_offset + group; return genlmsg_mcast(skb, portid, group, flags); } EXPORT_SYMBOL(genlmsg_multicast_allns); -void genl_notify(struct sk_buff *skb, struct net *net, u32 portid, u32 group, +void genl_notify(struct genl_family *family, + struct sk_buff *skb, struct net *net, u32 portid, u32 group, struct nlmsghdr *nlh, gfp_t flags) { struct sock *sk = net->genl_sock; @@ -1101,6 +1062,9 @@ void genl_notify(struct sk_buff *skb, struct net *net, u32 portid, u32 group, if (nlh) report = nlmsg_report(nlh); + if (group >= family->n_mcgrps) + return; + group = family->mcgrp_offset + group; nlmsg_notify(sk, skb, portid, group, report, flags); } EXPORT_SYMBOL(genl_notify); diff --git a/net/nfc/netlink.c b/net/nfc/netlink.c index 84b7e3ea7b7..a9b2342d525 100644 --- a/net/nfc/netlink.c +++ b/net/nfc/netlink.c @@ -30,8 +30,8 @@ #include "nfc.h" #include "llcp.h" -static struct genl_multicast_group nfc_genl_event_mcgrp = { - .name = NFC_GENL_MCAST_EVENT_NAME, +static const struct genl_multicast_group nfc_genl_mcgrps[] = { + { .name = NFC_GENL_MCAST_EVENT_NAME, }, }; static struct genl_family nfc_genl_family = { @@ -194,7 +194,7 @@ int nfc_genl_targets_found(struct nfc_dev *dev) genlmsg_end(msg, hdr); - return genlmsg_multicast(msg, 0, nfc_genl_event_mcgrp.id, GFP_ATOMIC); + return genlmsg_multicast(&nfc_genl_family, msg, 0, 0, GFP_ATOMIC); nla_put_failure: genlmsg_cancel(msg, hdr); @@ -223,7 +223,7 @@ int nfc_genl_target_lost(struct nfc_dev *dev, u32 target_idx) genlmsg_end(msg, hdr); - genlmsg_multicast(msg, 0, nfc_genl_event_mcgrp.id, GFP_KERNEL); + genlmsg_multicast(&nfc_genl_family, msg, 0, 0, GFP_KERNEL); return 0; @@ -255,7 +255,7 @@ int nfc_genl_tm_activated(struct nfc_dev *dev, u32 protocol) genlmsg_end(msg, hdr); - genlmsg_multicast(msg, 0, nfc_genl_event_mcgrp.id, GFP_KERNEL); + genlmsg_multicast(&nfc_genl_family, msg, 0, 0, GFP_KERNEL); return 0; @@ -285,7 +285,7 @@ int nfc_genl_tm_deactivated(struct nfc_dev *dev) genlmsg_end(msg, hdr); - genlmsg_multicast(msg, 0, nfc_genl_event_mcgrp.id, GFP_KERNEL); + genlmsg_multicast(&nfc_genl_family, msg, 0, 0, GFP_KERNEL); return 0; @@ -318,7 +318,7 @@ int nfc_genl_device_added(struct nfc_dev *dev) genlmsg_end(msg, hdr); - genlmsg_multicast(msg, 0, nfc_genl_event_mcgrp.id, GFP_KERNEL); + genlmsg_multicast(&nfc_genl_family, msg, 0, 0, GFP_KERNEL); return 0; @@ -348,7 +348,7 @@ int nfc_genl_device_removed(struct nfc_dev *dev) genlmsg_end(msg, hdr); - genlmsg_multicast(msg, 0, nfc_genl_event_mcgrp.id, GFP_KERNEL); + genlmsg_multicast(&nfc_genl_family, msg, 0, 0, GFP_KERNEL); return 0; @@ -414,7 +414,7 @@ int nfc_genl_llc_send_sdres(struct nfc_dev *dev, struct hlist_head *sdres_list) genlmsg_end(msg, hdr); - return genlmsg_multicast(msg, 0, nfc_genl_event_mcgrp.id, GFP_ATOMIC); + return genlmsg_multicast(&nfc_genl_family, msg, 0, 0, GFP_ATOMIC); nla_put_failure: genlmsg_cancel(msg, hdr); @@ -448,7 +448,7 @@ int nfc_genl_se_added(struct nfc_dev *dev, u32 se_idx, u16 type) genlmsg_end(msg, hdr); - genlmsg_multicast(msg, 0, nfc_genl_event_mcgrp.id, GFP_KERNEL); + genlmsg_multicast(&nfc_genl_family, msg, 0, 0, GFP_KERNEL); return 0; @@ -479,7 +479,7 @@ int nfc_genl_se_removed(struct nfc_dev *dev, u32 se_idx) genlmsg_end(msg, hdr); - genlmsg_multicast(msg, 0, nfc_genl_event_mcgrp.id, GFP_KERNEL); + genlmsg_multicast(&nfc_genl_family, msg, 0, 0, GFP_KERNEL); return 0; @@ -600,7 +600,7 @@ int nfc_genl_dep_link_up_event(struct nfc_dev *dev, u32 target_idx, dev->dep_link_up = true; - genlmsg_multicast(msg, 0, nfc_genl_event_mcgrp.id, GFP_ATOMIC); + genlmsg_multicast(&nfc_genl_family, msg, 0, 0, GFP_ATOMIC); return 0; @@ -632,7 +632,7 @@ int nfc_genl_dep_link_down_event(struct nfc_dev *dev) genlmsg_end(msg, hdr); - genlmsg_multicast(msg, 0, nfc_genl_event_mcgrp.id, GFP_ATOMIC); + genlmsg_multicast(&nfc_genl_family, msg, 0, 0, GFP_ATOMIC); return 0; @@ -1137,7 +1137,7 @@ int nfc_genl_fw_download_done(struct nfc_dev *dev, const char *firmware_name, genlmsg_end(msg, hdr); - genlmsg_multicast(msg, 0, nfc_genl_event_mcgrp.id, GFP_KERNEL); + genlmsg_multicast(&nfc_genl_family, msg, 0, 0, GFP_KERNEL); return 0; @@ -1308,7 +1308,7 @@ static void se_io_cb(void *context, u8 *apdu, size_t apdu_len, int err) genlmsg_end(msg, hdr); - genlmsg_multicast(msg, 0, nfc_genl_event_mcgrp.id, GFP_KERNEL); + genlmsg_multicast(&nfc_genl_family, msg, 0, 0, GFP_KERNEL); kfree(ctx); @@ -1364,7 +1364,7 @@ static int nfc_genl_se_io(struct sk_buff *skb, struct genl_info *info) return dev->ops->se_io(dev, se_idx, apdu, apdu_len, se_io_cb, ctx); } -static struct genl_ops nfc_genl_ops[] = { +static const struct genl_ops nfc_genl_ops[] = { { .cmd = NFC_CMD_GET_DEVICE, .doit = nfc_genl_get_device, @@ -1536,16 +1536,15 @@ int __init nfc_genl_init(void) { int rc; - rc = genl_register_family_with_ops(&nfc_genl_family, nfc_genl_ops, - ARRAY_SIZE(nfc_genl_ops)); + rc = genl_register_family_with_ops_groups(&nfc_genl_family, + nfc_genl_ops, + nfc_genl_mcgrps); if (rc) return rc; - rc = genl_register_mc_group(&nfc_genl_family, &nfc_genl_event_mcgrp); - netlink_register_notifier(&nl_notifier); - return rc; + return 0; } /** diff --git a/net/openvswitch/datapath.c b/net/openvswitch/datapath.c index 449e0776a2c..6f5e1dd3be2 100644 --- a/net/openvswitch/datapath.c +++ b/net/openvswitch/datapath.c @@ -61,11 +61,11 @@ int ovs_net_id __read_mostly; -static void ovs_notify(struct sk_buff *skb, struct genl_info *info, - struct genl_multicast_group *grp) +static void ovs_notify(struct genl_family *family, + struct sk_buff *skb, struct genl_info *info) { - genl_notify(skb, genl_info_net(info), info->snd_portid, - grp->id, info->nlhdr, GFP_KERNEL); + genl_notify(family, skb, genl_info_net(info), info->snd_portid, + 0, info->nlhdr, GFP_KERNEL); } /** @@ -557,7 +557,7 @@ static const struct nla_policy packet_policy[OVS_PACKET_ATTR_MAX + 1] = { [OVS_PACKET_ATTR_ACTIONS] = { .type = NLA_NESTED }, }; -static struct genl_ops dp_packet_genl_ops[] = { +static const struct genl_ops dp_packet_genl_ops[] = { { .cmd = OVS_PACKET_CMD_EXECUTE, .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */ .policy = packet_policy, @@ -877,10 +877,10 @@ static int ovs_flow_cmd_new_or_set(struct sk_buff *skb, struct genl_info *info) ovs_unlock(); if (!IS_ERR(reply)) - ovs_notify(reply, info, &ovs_dp_flow_multicast_group); + ovs_notify(&dp_flow_genl_family, reply, info); else - netlink_set_err(sock_net(skb->sk)->genl_sock, 0, - ovs_dp_flow_multicast_group.id, PTR_ERR(reply)); + genl_set_err(&dp_flow_genl_family, sock_net(skb->sk), 0, + 0, PTR_ERR(reply)); return 0; err_flow_free: @@ -990,7 +990,7 @@ static int ovs_flow_cmd_del(struct sk_buff *skb, struct genl_info *info) ovs_flow_free(flow, true); ovs_unlock(); - ovs_notify(reply, info, &ovs_dp_flow_multicast_group); + ovs_notify(&dp_flow_genl_family, reply, info); return 0; unlock: ovs_unlock(); @@ -1034,7 +1034,7 @@ static int ovs_flow_cmd_dump(struct sk_buff *skb, struct netlink_callback *cb) return skb->len; } -static struct genl_ops dp_flow_genl_ops[] = { +static const struct genl_ops dp_flow_genl_ops[] = { { .cmd = OVS_FLOW_CMD_NEW, .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */ .policy = flow_policy, @@ -1243,7 +1243,7 @@ static int ovs_dp_cmd_new(struct sk_buff *skb, struct genl_info *info) ovs_unlock(); - ovs_notify(reply, info, &ovs_dp_datapath_multicast_group); + ovs_notify(&dp_datapath_genl_family, reply, info); return 0; err_destroy_local_port: @@ -1308,7 +1308,7 @@ static int ovs_dp_cmd_del(struct sk_buff *skb, struct genl_info *info) __dp_destroy(dp); ovs_unlock(); - ovs_notify(reply, info, &ovs_dp_datapath_multicast_group); + ovs_notify(&dp_datapath_genl_family, reply, info); return 0; unlock: @@ -1332,14 +1332,14 @@ static int ovs_dp_cmd_set(struct sk_buff *skb, struct genl_info *info) info->snd_seq, OVS_DP_CMD_NEW); if (IS_ERR(reply)) { err = PTR_ERR(reply); - netlink_set_err(sock_net(skb->sk)->genl_sock, 0, - ovs_dp_datapath_multicast_group.id, err); + genl_set_err(&dp_datapath_genl_family, sock_net(skb->sk), 0, + 0, err); err = 0; goto unlock; } ovs_unlock(); - ovs_notify(reply, info, &ovs_dp_datapath_multicast_group); + ovs_notify(&dp_datapath_genl_family, reply, info); return 0; unlock: @@ -1398,7 +1398,7 @@ static int ovs_dp_cmd_dump(struct sk_buff *skb, struct netlink_callback *cb) return skb->len; } -static struct genl_ops dp_datapath_genl_ops[] = { +static const struct genl_ops dp_datapath_genl_ops[] = { { .cmd = OVS_DP_CMD_NEW, .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */ .policy = datapath_policy, @@ -1431,7 +1431,7 @@ static const struct nla_policy vport_policy[OVS_VPORT_ATTR_MAX + 1] = { [OVS_VPORT_ATTR_OPTIONS] = { .type = NLA_NESTED }, }; -static struct genl_family dp_vport_genl_family = { +struct genl_family dp_vport_genl_family = { .id = GENL_ID_GENERATE, .hdrsize = sizeof(struct ovs_header), .name = OVS_VPORT_FAMILY, @@ -1601,7 +1601,7 @@ static int ovs_vport_cmd_new(struct sk_buff *skb, struct genl_info *info) goto exit_unlock; } - ovs_notify(reply, info, &ovs_dp_vport_multicast_group); + ovs_notify(&dp_vport_genl_family, reply, info); exit_unlock: ovs_unlock(); @@ -1648,7 +1648,7 @@ static int ovs_vport_cmd_set(struct sk_buff *skb, struct genl_info *info) BUG_ON(err < 0); ovs_unlock(); - ovs_notify(reply, info, &ovs_dp_vport_multicast_group); + ovs_notify(&dp_vport_genl_family, reply, info); return 0; exit_free: @@ -1685,7 +1685,7 @@ static int ovs_vport_cmd_del(struct sk_buff *skb, struct genl_info *info) err = 0; ovs_dp_detach_port(vport); - ovs_notify(reply, info, &ovs_dp_vport_multicast_group); + ovs_notify(&dp_vport_genl_family, reply, info); exit_unlock: ovs_unlock(); @@ -1759,7 +1759,7 @@ out: return skb->len; } -static struct genl_ops dp_vport_genl_ops[] = { +static const struct genl_ops dp_vport_genl_ops[] = { { .cmd = OVS_VPORT_CMD_NEW, .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */ .policy = vport_policy, @@ -1785,9 +1785,9 @@ static struct genl_ops dp_vport_genl_ops[] = { struct genl_family_and_ops { struct genl_family *family; - struct genl_ops *ops; + const struct genl_ops *ops; int n_ops; - struct genl_multicast_group *group; + const struct genl_multicast_group *group; }; static const struct genl_family_and_ops dp_genl_families[] = { @@ -1823,17 +1823,14 @@ static int dp_register_genl(void) for (i = 0; i < ARRAY_SIZE(dp_genl_families); i++) { const struct genl_family_and_ops *f = &dp_genl_families[i]; - err = genl_register_family_with_ops(f->family, f->ops, - f->n_ops); + f->family->ops = f->ops; + f->family->n_ops = f->n_ops; + f->family->mcgrps = f->group; + f->family->n_mcgrps = f->group ? 1 : 0; + err = genl_register_family(f->family); if (err) goto error; n_registered++; - - if (f->group) { - err = genl_register_mc_group(f->family, f->group); - if (err) - goto error; - } } return 0; diff --git a/net/openvswitch/datapath.h b/net/openvswitch/datapath.h index d3d14a58aa9..4067ea41be2 100644 --- a/net/openvswitch/datapath.h +++ b/net/openvswitch/datapath.h @@ -177,6 +177,7 @@ static inline struct vport *ovs_vport_ovsl(const struct datapath *dp, int port_n } extern struct notifier_block ovs_dp_device_notifier; +extern struct genl_family dp_vport_genl_family; extern struct genl_multicast_group ovs_dp_vport_multicast_group; void ovs_dp_process_received_packet(struct vport *, struct sk_buff *); diff --git a/net/openvswitch/dp_notify.c b/net/openvswitch/dp_notify.c index 5c2dab27610..2c631fe76be 100644 --- a/net/openvswitch/dp_notify.c +++ b/net/openvswitch/dp_notify.c @@ -34,15 +34,14 @@ static void dp_detach_port_notify(struct vport *vport) OVS_VPORT_CMD_DEL); ovs_dp_detach_port(vport); if (IS_ERR(notify)) { - netlink_set_err(ovs_dp_get_net(dp)->genl_sock, 0, - ovs_dp_vport_multicast_group.id, - PTR_ERR(notify)); + genl_set_err(&dp_vport_genl_family, ovs_dp_get_net(dp), 0, + 0, PTR_ERR(notify)); return; } - genlmsg_multicast_netns(ovs_dp_get_net(dp), notify, 0, - ovs_dp_vport_multicast_group.id, - GFP_KERNEL); + genlmsg_multicast_netns(&dp_vport_genl_family, + ovs_dp_get_net(dp), notify, 0, + 0, GFP_KERNEL); } void ovs_dp_notify_wq(struct work_struct *work) diff --git a/net/phonet/datagram.c b/net/phonet/datagram.c index 12c30f3e643..38946b26e47 100644 --- a/net/phonet/datagram.c +++ b/net/phonet/datagram.c @@ -139,9 +139,6 @@ static int pn_recvmsg(struct kiocb *iocb, struct sock *sk, MSG_CMSG_COMPAT)) goto out_nofree; - if (addr_len) - *addr_len = sizeof(sa); - skb = skb_recv_datagram(sk, flags, noblock, &rval); if (skb == NULL) goto out_nofree; @@ -162,8 +159,10 @@ static int pn_recvmsg(struct kiocb *iocb, struct sock *sk, rval = (flags & MSG_TRUNC) ? skb->len : copylen; - if (msg->msg_name != NULL) - memcpy(msg->msg_name, &sa, sizeof(struct sockaddr_pn)); + if (msg->msg_name != NULL) { + memcpy(msg->msg_name, &sa, sizeof(sa)); + *addr_len = sizeof(sa); + } out: skb_free_datagram(sk, skb); diff --git a/net/sched/sch_fq.c b/net/sched/sch_fq.c index fdc041c5785..95d84396190 100644 --- a/net/sched/sch_fq.c +++ b/net/sched/sch_fq.c @@ -88,7 +88,7 @@ struct fq_sched_data { struct fq_flow internal; /* for non classified or high prio packets */ u32 quantum; u32 initial_quantum; - u32 flow_default_rate;/* rate per flow : bytes per second */ + u32 flow_refill_delay; u32 flow_max_rate; /* optional max rate per flow */ u32 flow_plimit; /* max packets per flow */ struct rb_root *fq_root; @@ -115,6 +115,7 @@ static struct fq_flow detached, throttled; static void fq_flow_set_detached(struct fq_flow *f) { f->next = &detached; + f->age = jiffies; } static bool fq_flow_is_detached(const struct fq_flow *f) @@ -209,21 +210,15 @@ static void fq_gc(struct fq_sched_data *q, } } -static const u8 prio2band[TC_PRIO_MAX + 1] = { - 1, 2, 2, 2, 1, 2, 0, 0 , 1, 1, 1, 1, 1, 1, 1, 1 -}; - static struct fq_flow *fq_classify(struct sk_buff *skb, struct fq_sched_data *q) { struct rb_node **p, *parent; struct sock *sk = skb->sk; struct rb_root *root; struct fq_flow *f; - int band; /* warning: no starvation prevention... */ - band = prio2band[skb->priority & TC_PRIO_MAX]; - if (unlikely(band == 0)) + if (unlikely((skb->priority & TC_PRIO_MAX) == TC_PRIO_CONTROL)) return &q->internal; if (unlikely(!sk)) { @@ -373,17 +368,20 @@ static int fq_enqueue(struct sk_buff *skb, struct Qdisc *sch) } f->qlen++; - flow_queue_add(f, skb); if (skb_is_retransmit(skb)) q->stat_tcp_retrans++; sch->qstats.backlog += qdisc_pkt_len(skb); if (fq_flow_is_detached(f)) { fq_flow_add_tail(&q->new_flows, f); - if (q->quantum > f->credit) - f->credit = q->quantum; + if (time_after(jiffies, f->age + q->flow_refill_delay)) + f->credit = max_t(u32, f->credit, q->quantum); q->inactive_flows--; qdisc_unthrottled(sch); } + + /* Note: this overwrites f->age */ + flow_queue_add(f, skb); + if (unlikely(f == &q->internal)) { q->stat_internal_packets++; qdisc_unthrottled(sch); @@ -461,7 +459,6 @@ begin: fq_flow_add_tail(&q->old_flows, f); } else { fq_flow_set_detached(f); - f->age = jiffies; q->inactive_flows++; } goto begin; @@ -615,6 +612,7 @@ static const struct nla_policy fq_policy[TCA_FQ_MAX + 1] = { [TCA_FQ_FLOW_DEFAULT_RATE] = { .type = NLA_U32 }, [TCA_FQ_FLOW_MAX_RATE] = { .type = NLA_U32 }, [TCA_FQ_BUCKETS_LOG] = { .type = NLA_U32 }, + [TCA_FQ_FLOW_REFILL_DELAY] = { .type = NLA_U32 }, }; static int fq_change(struct Qdisc *sch, struct nlattr *opt) @@ -656,7 +654,8 @@ static int fq_change(struct Qdisc *sch, struct nlattr *opt) q->initial_quantum = nla_get_u32(tb[TCA_FQ_INITIAL_QUANTUM]); if (tb[TCA_FQ_FLOW_DEFAULT_RATE]) - q->flow_default_rate = nla_get_u32(tb[TCA_FQ_FLOW_DEFAULT_RATE]); + pr_warn_ratelimited("sch_fq: defrate %u ignored.\n", + nla_get_u32(tb[TCA_FQ_FLOW_DEFAULT_RATE])); if (tb[TCA_FQ_FLOW_MAX_RATE]) q->flow_max_rate = nla_get_u32(tb[TCA_FQ_FLOW_MAX_RATE]); @@ -670,6 +669,12 @@ static int fq_change(struct Qdisc *sch, struct nlattr *opt) err = -EINVAL; } + if (tb[TCA_FQ_FLOW_REFILL_DELAY]) { + u32 usecs_delay = nla_get_u32(tb[TCA_FQ_FLOW_REFILL_DELAY]) ; + + q->flow_refill_delay = usecs_to_jiffies(usecs_delay); + } + if (!err) err = fq_resize(q, fq_log); @@ -705,7 +710,7 @@ static int fq_init(struct Qdisc *sch, struct nlattr *opt) q->flow_plimit = 100; q->quantum = 2 * psched_mtu(qdisc_dev(sch)); q->initial_quantum = 10 * psched_mtu(qdisc_dev(sch)); - q->flow_default_rate = 0; + q->flow_refill_delay = msecs_to_jiffies(40); q->flow_max_rate = ~0U; q->rate_enable = 1; q->new_flows.first = NULL; @@ -732,15 +737,16 @@ static int fq_dump(struct Qdisc *sch, struct sk_buff *skb) if (opts == NULL) goto nla_put_failure; - /* TCA_FQ_FLOW_DEFAULT_RATE is not used anymore, - * do not bother giving its value - */ + /* TCA_FQ_FLOW_DEFAULT_RATE is not used anymore */ + if (nla_put_u32(skb, TCA_FQ_PLIMIT, sch->limit) || nla_put_u32(skb, TCA_FQ_FLOW_PLIMIT, q->flow_plimit) || nla_put_u32(skb, TCA_FQ_QUANTUM, q->quantum) || nla_put_u32(skb, TCA_FQ_INITIAL_QUANTUM, q->initial_quantum) || nla_put_u32(skb, TCA_FQ_RATE_ENABLE, q->rate_enable) || nla_put_u32(skb, TCA_FQ_FLOW_MAX_RATE, q->flow_max_rate) || + nla_put_u32(skb, TCA_FQ_FLOW_REFILL_DELAY, + jiffies_to_usecs(q->flow_refill_delay)) || nla_put_u32(skb, TCA_FQ_BUCKETS_LOG, q->fq_trees_log)) goto nla_put_failure; diff --git a/net/sctp/associola.c b/net/sctp/associola.c index c9b91cb1cb0..68a27f9796d 100644 --- a/net/sctp/associola.c +++ b/net/sctp/associola.c @@ -907,8 +907,8 @@ void sctp_assoc_control_transport(struct sctp_association *asoc, if (!first || t->last_time_heard > first->last_time_heard) { second = first; first = t; - } - if (!second || t->last_time_heard > second->last_time_heard) + } else if (!second || + t->last_time_heard > second->last_time_heard) second = t; } @@ -929,6 +929,8 @@ void sctp_assoc_control_transport(struct sctp_association *asoc, first = asoc->peer.primary_path; } + if (!second) + second = first; /* If we failed to find a usable transport, just camp on the * primary, even if it is inactive. */ diff --git a/net/sunrpc/rpc_pipe.c b/net/sunrpc/rpc_pipe.c index d0d14a04dce..bf04b30a788 100644 --- a/net/sunrpc/rpc_pipe.c +++ b/net/sunrpc/rpc_pipe.c @@ -471,15 +471,6 @@ struct rpc_filelist { umode_t mode; }; -static int rpc_delete_dentry(const struct dentry *dentry) -{ - return 1; -} - -static const struct dentry_operations rpc_dentry_operations = { - .d_delete = rpc_delete_dentry, -}; - static struct inode * rpc_get_inode(struct super_block *sb, umode_t mode) { @@ -1266,7 +1257,7 @@ rpc_fill_super(struct super_block *sb, void *data, int silent) sb->s_blocksize_bits = PAGE_CACHE_SHIFT; sb->s_magic = RPCAUTH_GSSMAGIC; sb->s_op = &s_ops; - sb->s_d_op = &rpc_dentry_operations; + sb->s_d_op = &simple_dentry_operations; sb->s_time_gran = 1; inode = rpc_get_inode(sb, S_IFDIR | S_IRUGO | S_IXUGO); diff --git a/net/tipc/link.c b/net/tipc/link.c index cf465d66ccd..69cd9bf3f56 100644 --- a/net/tipc/link.c +++ b/net/tipc/link.c @@ -2358,7 +2358,8 @@ int tipc_link_recv_fragment(struct sk_buff **head, struct sk_buff **tail, *head = frag; skb_frag_list_init(*head); return 0; - } else if (skb_try_coalesce(*head, frag, &headstolen, &delta)) { + } else if (*head && + skb_try_coalesce(*head, frag, &headstolen, &delta)) { kfree_skb_partial(frag, headstolen); } else { if (!*head) diff --git a/net/tipc/netlink.c b/net/tipc/netlink.c index 8bcd4985d0f..9f72a637636 100644 --- a/net/tipc/netlink.c +++ b/net/tipc/netlink.c @@ -76,9 +76,11 @@ static struct genl_family tipc_genl_family = { .maxattr = 0, }; -static struct genl_ops tipc_genl_ops = { - .cmd = TIPC_GENL_CMD, - .doit = handle_cmd, +static struct genl_ops tipc_genl_ops[] = { + { + .cmd = TIPC_GENL_CMD, + .doit = handle_cmd, + }, }; static int tipc_genl_family_registered; @@ -87,8 +89,7 @@ int tipc_netlink_start(void) { int res; - res = genl_register_family_with_ops(&tipc_genl_family, - &tipc_genl_ops, 1); + res = genl_register_family_with_ops(&tipc_genl_family, tipc_genl_ops); if (res) { pr_err("Failed to register netlink interface\n"); return res; diff --git a/net/wimax/op-msg.c b/net/wimax/op-msg.c index 0694d62e4db..c278b3356f7 100644 --- a/net/wimax/op-msg.c +++ b/net/wimax/op-msg.c @@ -279,7 +279,7 @@ int wimax_msg_send(struct wimax_dev *wimax_dev, struct sk_buff *skb) d_printf(1, dev, "CTX: wimax msg, %zu bytes\n", size); d_dump(2, dev, msg, size); - genlmsg_multicast(skb, 0, wimax_gnl_mcg.id, GFP_KERNEL); + genlmsg_multicast(&wimax_gnl_family, skb, 0, 0, GFP_KERNEL); d_printf(1, dev, "CTX: genl multicast done\n"); return 0; } @@ -321,17 +321,6 @@ int wimax_msg(struct wimax_dev *wimax_dev, const char *pipe_name, } EXPORT_SYMBOL_GPL(wimax_msg); - -static const struct nla_policy wimax_gnl_msg_policy[WIMAX_GNL_ATTR_MAX + 1] = { - [WIMAX_GNL_MSG_IFIDX] = { - .type = NLA_U32, - }, - [WIMAX_GNL_MSG_DATA] = { - .type = NLA_UNSPEC, /* libnl doesn't grok BINARY yet */ - }, -}; - - /* * Relays a message from user space to the driver * @@ -340,7 +329,6 @@ static const struct nla_policy wimax_gnl_msg_policy[WIMAX_GNL_ATTR_MAX + 1] = { * * This call will block while handling/relaying the message. */ -static int wimax_gnl_doit_msg_from_user(struct sk_buff *skb, struct genl_info *info) { int result, ifindex; @@ -418,16 +406,3 @@ error_no_wimax_dev: return result; } - -/* - * Generic Netlink glue - */ - -struct genl_ops wimax_gnl_msg_from_user = { - .cmd = WIMAX_GNL_OP_MSG_FROM_USER, - .flags = GENL_ADMIN_PERM, - .policy = wimax_gnl_msg_policy, - .doit = wimax_gnl_doit_msg_from_user, - .dumpit = NULL, -}; - diff --git a/net/wimax/op-reset.c b/net/wimax/op-reset.c index 7ceffe39d70..eb4580784d9 100644 --- a/net/wimax/op-reset.c +++ b/net/wimax/op-reset.c @@ -92,13 +92,6 @@ int wimax_reset(struct wimax_dev *wimax_dev) EXPORT_SYMBOL(wimax_reset); -static const struct nla_policy wimax_gnl_reset_policy[WIMAX_GNL_ATTR_MAX + 1] = { - [WIMAX_GNL_RESET_IFIDX] = { - .type = NLA_U32, - }, -}; - - /* * Exporting to user space over generic netlink * @@ -106,7 +99,6 @@ static const struct nla_policy wimax_gnl_reset_policy[WIMAX_GNL_ATTR_MAX + 1] = * * No attributes. */ -static int wimax_gnl_doit_reset(struct sk_buff *skb, struct genl_info *info) { int result, ifindex; @@ -130,12 +122,3 @@ error_no_wimax_dev: d_fnend(3, NULL, "(skb %p info %p) = %d\n", skb, info, result); return result; } - - -struct genl_ops wimax_gnl_reset = { - .cmd = WIMAX_GNL_OP_RESET, - .flags = GENL_ADMIN_PERM, - .policy = wimax_gnl_reset_policy, - .doit = wimax_gnl_doit_reset, - .dumpit = NULL, -}; diff --git a/net/wimax/op-rfkill.c b/net/wimax/op-rfkill.c index 7ab60babdd2..403078d670a 100644 --- a/net/wimax/op-rfkill.c +++ b/net/wimax/op-rfkill.c @@ -411,17 +411,6 @@ void wimax_rfkill_rm(struct wimax_dev *wimax_dev) * just query). */ -static const struct nla_policy wimax_gnl_rfkill_policy[WIMAX_GNL_ATTR_MAX + 1] = { - [WIMAX_GNL_RFKILL_IFIDX] = { - .type = NLA_U32, - }, - [WIMAX_GNL_RFKILL_STATE] = { - .type = NLA_U32 /* enum wimax_rf_state */ - }, -}; - - -static int wimax_gnl_doit_rfkill(struct sk_buff *skb, struct genl_info *info) { int result, ifindex; @@ -457,13 +446,3 @@ error_no_wimax_dev: d_fnend(3, NULL, "(skb %p info %p) = %d\n", skb, info, result); return result; } - - -struct genl_ops wimax_gnl_rfkill = { - .cmd = WIMAX_GNL_OP_RFKILL, - .flags = GENL_ADMIN_PERM, - .policy = wimax_gnl_rfkill_policy, - .doit = wimax_gnl_doit_rfkill, - .dumpit = NULL, -}; - diff --git a/net/wimax/op-state-get.c b/net/wimax/op-state-get.c index aff8776e2d4..995c08c827b 100644 --- a/net/wimax/op-state-get.c +++ b/net/wimax/op-state-get.c @@ -33,13 +33,6 @@ #include "debug-levels.h" -static const struct nla_policy wimax_gnl_state_get_policy[WIMAX_GNL_ATTR_MAX + 1] = { - [WIMAX_GNL_STGET_IFIDX] = { - .type = NLA_U32, - }, -}; - - /* * Exporting to user space over generic netlink * @@ -48,7 +41,6 @@ static const struct nla_policy wimax_gnl_state_get_policy[WIMAX_GNL_ATTR_MAX + 1 * * No attributes. */ -static int wimax_gnl_doit_state_get(struct sk_buff *skb, struct genl_info *info) { int result, ifindex; @@ -72,12 +64,3 @@ error_no_wimax_dev: d_fnend(3, NULL, "(skb %p info %p) = %d\n", skb, info, result); return result; } - - -struct genl_ops wimax_gnl_state_get = { - .cmd = WIMAX_GNL_OP_STATE_GET, - .flags = GENL_ADMIN_PERM, - .policy = wimax_gnl_state_get_policy, - .doit = wimax_gnl_doit_state_get, - .dumpit = NULL, -}; diff --git a/net/wimax/stack.c b/net/wimax/stack.c index a6470ac3949..ef2191b969a 100644 --- a/net/wimax/stack.c +++ b/net/wimax/stack.c @@ -116,8 +116,9 @@ struct sk_buff *wimax_gnl_re_state_change_alloc( dev_err(dev, "RE_STCH: can't create message\n"); goto error_new; } - data = genlmsg_put(report_skb, 0, wimax_gnl_mcg.id, &wimax_gnl_family, - 0, WIMAX_GNL_RE_STATE_CHANGE); + /* FIXME: sending a group ID as the seq is wrong */ + data = genlmsg_put(report_skb, 0, wimax_gnl_family.mcgrp_offset, + &wimax_gnl_family, 0, WIMAX_GNL_RE_STATE_CHANGE); if (data == NULL) { dev_err(dev, "RE_STCH: can't put data into message\n"); goto error_put; @@ -177,7 +178,7 @@ int wimax_gnl_re_state_change_send( goto out; } genlmsg_end(report_skb, header); - genlmsg_multicast(report_skb, 0, wimax_gnl_mcg.id, GFP_KERNEL); + genlmsg_multicast(&wimax_gnl_family, report_skb, 0, 0, GFP_KERNEL); out: d_fnend(3, dev, "(wimax_dev %p report_skb %p) = %d\n", wimax_dev, report_skb, result); @@ -402,22 +403,44 @@ void wimax_dev_init(struct wimax_dev *wimax_dev) } EXPORT_SYMBOL_GPL(wimax_dev_init); -/* - * This extern is declared here because it's easier to keep track -- - * both declarations are a list of the same - */ -extern struct genl_ops - wimax_gnl_msg_from_user, - wimax_gnl_reset, - wimax_gnl_rfkill, - wimax_gnl_state_get; +static const struct nla_policy wimax_gnl_policy[WIMAX_GNL_ATTR_MAX + 1] = { + [WIMAX_GNL_RESET_IFIDX] = { .type = NLA_U32, }, + [WIMAX_GNL_RFKILL_IFIDX] = { .type = NLA_U32, }, + [WIMAX_GNL_RFKILL_STATE] = { + .type = NLA_U32 /* enum wimax_rf_state */ + }, + [WIMAX_GNL_STGET_IFIDX] = { .type = NLA_U32, }, + [WIMAX_GNL_MSG_IFIDX] = { .type = NLA_U32, }, + [WIMAX_GNL_MSG_DATA] = { + .type = NLA_UNSPEC, /* libnl doesn't grok BINARY yet */ + }, +}; -static -struct genl_ops *wimax_gnl_ops[] = { - &wimax_gnl_msg_from_user, - &wimax_gnl_reset, - &wimax_gnl_rfkill, - &wimax_gnl_state_get, +static const struct genl_ops wimax_gnl_ops[] = { + { + .cmd = WIMAX_GNL_OP_MSG_FROM_USER, + .flags = GENL_ADMIN_PERM, + .policy = wimax_gnl_policy, + .doit = wimax_gnl_doit_msg_from_user, + }, + { + .cmd = WIMAX_GNL_OP_RESET, + .flags = GENL_ADMIN_PERM, + .policy = wimax_gnl_policy, + .doit = wimax_gnl_doit_reset, + }, + { + .cmd = WIMAX_GNL_OP_RFKILL, + .flags = GENL_ADMIN_PERM, + .policy = wimax_gnl_policy, + .doit = wimax_gnl_doit_rfkill, + }, + { + .cmd = WIMAX_GNL_OP_STATE_GET, + .flags = GENL_ADMIN_PERM, + .policy = wimax_gnl_policy, + .doit = wimax_gnl_doit_state_get, + }, }; @@ -557,8 +580,8 @@ struct genl_family wimax_gnl_family = { .maxattr = WIMAX_GNL_ATTR_MAX, }; -struct genl_multicast_group wimax_gnl_mcg = { - .name = "msg", +static const struct genl_multicast_group wimax_gnl_mcgrps[] = { + { .name = "msg", }, }; @@ -567,7 +590,7 @@ struct genl_multicast_group wimax_gnl_mcg = { static int __init wimax_subsys_init(void) { - int result, cnt; + int result; d_fnstart(4, NULL, "()\n"); d_parse_params(D_LEVEL, D_LEVEL_SIZE, wimax_debug_params, @@ -575,37 +598,18 @@ int __init wimax_subsys_init(void) snprintf(wimax_gnl_family.name, sizeof(wimax_gnl_family.name), "WiMAX"); - result = genl_register_family(&wimax_gnl_family); + result = genl_register_family_with_ops_groups(&wimax_gnl_family, + wimax_gnl_ops, + wimax_gnl_mcgrps); if (unlikely(result < 0)) { printk(KERN_ERR "cannot register generic netlink family: %d\n", result); goto error_register_family; } - for (cnt = 0; cnt < ARRAY_SIZE(wimax_gnl_ops); cnt++) { - result = genl_register_ops(&wimax_gnl_family, - wimax_gnl_ops[cnt]); - d_printf(4, NULL, "registering generic netlink op code " - "%u: %d\n", wimax_gnl_ops[cnt]->cmd, result); - if (unlikely(result < 0)) { - printk(KERN_ERR "cannot register generic netlink op " - "code %u: %d\n", - wimax_gnl_ops[cnt]->cmd, result); - goto error_register_ops; - } - } - - result = genl_register_mc_group(&wimax_gnl_family, &wimax_gnl_mcg); - if (result < 0) - goto error_mc_group; d_fnend(4, NULL, "() = 0\n"); return 0; -error_mc_group: -error_register_ops: - for (cnt--; cnt >= 0; cnt--) - genl_unregister_ops(&wimax_gnl_family, - wimax_gnl_ops[cnt]); genl_unregister_family(&wimax_gnl_family); error_register_family: d_fnend(4, NULL, "() = %d\n", result); @@ -619,12 +623,7 @@ module_init(wimax_subsys_init); static void __exit wimax_subsys_exit(void) { - int cnt; wimax_id_table_release(); - genl_unregister_mc_group(&wimax_gnl_family, &wimax_gnl_mcg); - for (cnt = ARRAY_SIZE(wimax_gnl_ops) - 1; cnt >= 0; cnt--) - genl_unregister_ops(&wimax_gnl_family, - wimax_gnl_ops[cnt]); genl_unregister_family(&wimax_gnl_family); } module_exit(wimax_subsys_exit); diff --git a/net/wimax/wimax-internal.h b/net/wimax/wimax-internal.h index 5dcd9c067bf..b445b82020a 100644 --- a/net/wimax/wimax-internal.h +++ b/net/wimax/wimax-internal.h @@ -84,8 +84,14 @@ void wimax_id_table_release(void); int wimax_rfkill_add(struct wimax_dev *); void wimax_rfkill_rm(struct wimax_dev *); +/* generic netlink */ extern struct genl_family wimax_gnl_family; -extern struct genl_multicast_group wimax_gnl_mcg; + +/* ops */ +int wimax_gnl_doit_msg_from_user(struct sk_buff *skb, struct genl_info *info); +int wimax_gnl_doit_reset(struct sk_buff *skb, struct genl_info *info); +int wimax_gnl_doit_rfkill(struct sk_buff *skb, struct genl_info *info); +int wimax_gnl_doit_state_get(struct sk_buff *skb, struct genl_info *info); #endif /* #ifdef __KERNEL__ */ #endif /* #ifndef __WIMAX_INTERNAL_H__ */ diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c index a7f4e790210..a1eb2107317 100644 --- a/net/wireless/nl80211.c +++ b/net/wireless/nl80211.c @@ -30,9 +30,9 @@ static int nl80211_crypto_settings(struct cfg80211_registered_device *rdev, struct cfg80211_crypto_settings *settings, int cipher_limit); -static int nl80211_pre_doit(struct genl_ops *ops, struct sk_buff *skb, +static int nl80211_pre_doit(const struct genl_ops *ops, struct sk_buff *skb, struct genl_info *info); -static void nl80211_post_doit(struct genl_ops *ops, struct sk_buff *skb, +static void nl80211_post_doit(const struct genl_ops *ops, struct sk_buff *skb, struct genl_info *info); /* the netlink family */ @@ -47,6 +47,25 @@ static struct genl_family nl80211_fam = { .post_doit = nl80211_post_doit, }; +/* multicast groups */ +enum nl80211_multicast_groups { + NL80211_MCGRP_CONFIG, + NL80211_MCGRP_SCAN, + NL80211_MCGRP_REGULATORY, + NL80211_MCGRP_MLME, + NL80211_MCGRP_TESTMODE /* keep last - ifdef! */ +}; + +static const struct genl_multicast_group nl80211_mcgrps[] = { + [NL80211_MCGRP_CONFIG] = { .name = "config", }, + [NL80211_MCGRP_SCAN] = { .name = "scan", }, + [NL80211_MCGRP_REGULATORY] = { .name = "regulatory", }, + [NL80211_MCGRP_MLME] = { .name = "mlme", }, +#ifdef CONFIG_NL80211_TESTMODE + [NL80211_MCGRP_TESTMODE] = { .name = "testmode", } +#endif +}; + /* returns ERR_PTR values */ static struct wireless_dev * __cfg80211_wdev_from_attrs(struct net *netns, struct nlattr **attrs) @@ -6656,10 +6675,6 @@ static int nl80211_set_mcast_rate(struct sk_buff *skb, struct genl_info *info) #ifdef CONFIG_NL80211_TESTMODE -static struct genl_multicast_group nl80211_testmode_mcgrp = { - .name = "testmode", -}; - static int nl80211_testmode_do(struct sk_buff *skb, struct genl_info *info) { struct cfg80211_registered_device *rdev = info->user_ptr[0]; @@ -6868,8 +6883,8 @@ void cfg80211_testmode_event(struct sk_buff *skb, gfp_t gfp) nla_nest_end(skb, data); genlmsg_end(skb, hdr); - genlmsg_multicast_netns(wiphy_net(&rdev->wiphy), skb, 0, - nl80211_testmode_mcgrp.id, gfp); + genlmsg_multicast_netns(&nl80211_fam, wiphy_net(&rdev->wiphy), skb, 0, + NL80211_MCGRP_TESTMODE, gfp); } EXPORT_SYMBOL(cfg80211_testmode_event); #endif @@ -8851,7 +8866,7 @@ static int nl80211_crit_protocol_stop(struct sk_buff *skb, #define NL80211_FLAG_NEED_WDEV_UP (NL80211_FLAG_NEED_WDEV |\ NL80211_FLAG_CHECK_NETDEV_UP) -static int nl80211_pre_doit(struct genl_ops *ops, struct sk_buff *skb, +static int nl80211_pre_doit(const struct genl_ops *ops, struct sk_buff *skb, struct genl_info *info) { struct cfg80211_registered_device *rdev; @@ -8920,7 +8935,7 @@ static int nl80211_pre_doit(struct genl_ops *ops, struct sk_buff *skb, return 0; } -static void nl80211_post_doit(struct genl_ops *ops, struct sk_buff *skb, +static void nl80211_post_doit(const struct genl_ops *ops, struct sk_buff *skb, struct genl_info *info) { if (info->user_ptr[1]) { @@ -8937,7 +8952,7 @@ static void nl80211_post_doit(struct genl_ops *ops, struct sk_buff *skb, rtnl_unlock(); } -static struct genl_ops nl80211_ops[] = { +static const struct genl_ops nl80211_ops[] = { { .cmd = NL80211_CMD_GET_WIPHY, .doit = nl80211_get_wiphy, @@ -9566,21 +9581,6 @@ static struct genl_ops nl80211_ops[] = { }, }; -static struct genl_multicast_group nl80211_mlme_mcgrp = { - .name = "mlme", -}; - -/* multicast groups */ -static struct genl_multicast_group nl80211_config_mcgrp = { - .name = "config", -}; -static struct genl_multicast_group nl80211_scan_mcgrp = { - .name = "scan", -}; -static struct genl_multicast_group nl80211_regulatory_mcgrp = { - .name = "regulatory", -}; - /* notification functions */ void nl80211_notify_dev_rename(struct cfg80211_registered_device *rdev) @@ -9597,8 +9597,8 @@ void nl80211_notify_dev_rename(struct cfg80211_registered_device *rdev) return; } - genlmsg_multicast_netns(wiphy_net(&rdev->wiphy), msg, 0, - nl80211_config_mcgrp.id, GFP_KERNEL); + genlmsg_multicast_netns(&nl80211_fam, wiphy_net(&rdev->wiphy), msg, 0, + NL80211_MCGRP_CONFIG, GFP_KERNEL); } static int nl80211_add_scan_req(struct sk_buff *msg, @@ -9707,8 +9707,8 @@ void nl80211_send_scan_start(struct cfg80211_registered_device *rdev, return; } - genlmsg_multicast_netns(wiphy_net(&rdev->wiphy), msg, 0, - nl80211_scan_mcgrp.id, GFP_KERNEL); + genlmsg_multicast_netns(&nl80211_fam, wiphy_net(&rdev->wiphy), msg, 0, + NL80211_MCGRP_SCAN, GFP_KERNEL); } void nl80211_send_scan_done(struct cfg80211_registered_device *rdev, @@ -9726,8 +9726,8 @@ void nl80211_send_scan_done(struct cfg80211_registered_device *rdev, return; } - genlmsg_multicast_netns(wiphy_net(&rdev->wiphy), msg, 0, - nl80211_scan_mcgrp.id, GFP_KERNEL); + genlmsg_multicast_netns(&nl80211_fam, wiphy_net(&rdev->wiphy), msg, 0, + NL80211_MCGRP_SCAN, GFP_KERNEL); } void nl80211_send_scan_aborted(struct cfg80211_registered_device *rdev, @@ -9745,8 +9745,8 @@ void nl80211_send_scan_aborted(struct cfg80211_registered_device *rdev, return; } - genlmsg_multicast_netns(wiphy_net(&rdev->wiphy), msg, 0, - nl80211_scan_mcgrp.id, GFP_KERNEL); + genlmsg_multicast_netns(&nl80211_fam, wiphy_net(&rdev->wiphy), msg, 0, + NL80211_MCGRP_SCAN, GFP_KERNEL); } void nl80211_send_sched_scan_results(struct cfg80211_registered_device *rdev, @@ -9764,8 +9764,8 @@ void nl80211_send_sched_scan_results(struct cfg80211_registered_device *rdev, return; } - genlmsg_multicast_netns(wiphy_net(&rdev->wiphy), msg, 0, - nl80211_scan_mcgrp.id, GFP_KERNEL); + genlmsg_multicast_netns(&nl80211_fam, wiphy_net(&rdev->wiphy), msg, 0, + NL80211_MCGRP_SCAN, GFP_KERNEL); } void nl80211_send_sched_scan(struct cfg80211_registered_device *rdev, @@ -9782,8 +9782,8 @@ void nl80211_send_sched_scan(struct cfg80211_registered_device *rdev, return; } - genlmsg_multicast_netns(wiphy_net(&rdev->wiphy), msg, 0, - nl80211_scan_mcgrp.id, GFP_KERNEL); + genlmsg_multicast_netns(&nl80211_fam, wiphy_net(&rdev->wiphy), msg, 0, + NL80211_MCGRP_SCAN, GFP_KERNEL); } /* @@ -9837,8 +9837,8 @@ void nl80211_send_reg_change_event(struct regulatory_request *request) genlmsg_end(msg, hdr); rcu_read_lock(); - genlmsg_multicast_allns(msg, 0, nl80211_regulatory_mcgrp.id, - GFP_ATOMIC); + genlmsg_multicast_allns(&nl80211_fam, msg, 0, + NL80211_MCGRP_REGULATORY, GFP_ATOMIC); rcu_read_unlock(); return; @@ -9873,8 +9873,8 @@ static void nl80211_send_mlme_event(struct cfg80211_registered_device *rdev, genlmsg_end(msg, hdr); - genlmsg_multicast_netns(wiphy_net(&rdev->wiphy), msg, 0, - nl80211_mlme_mcgrp.id, gfp); + genlmsg_multicast_netns(&nl80211_fam, wiphy_net(&rdev->wiphy), msg, 0, + NL80211_MCGRP_MLME, gfp); return; nla_put_failure: @@ -9961,8 +9961,8 @@ static void nl80211_send_mlme_timeout(struct cfg80211_registered_device *rdev, genlmsg_end(msg, hdr); - genlmsg_multicast_netns(wiphy_net(&rdev->wiphy), msg, 0, - nl80211_mlme_mcgrp.id, gfp); + genlmsg_multicast_netns(&nl80211_fam, wiphy_net(&rdev->wiphy), msg, 0, + NL80211_MCGRP_MLME, gfp); return; nla_put_failure: @@ -10017,8 +10017,8 @@ void nl80211_send_connect_result(struct cfg80211_registered_device *rdev, genlmsg_end(msg, hdr); - genlmsg_multicast_netns(wiphy_net(&rdev->wiphy), msg, 0, - nl80211_mlme_mcgrp.id, gfp); + genlmsg_multicast_netns(&nl80211_fam, wiphy_net(&rdev->wiphy), msg, 0, + NL80211_MCGRP_MLME, gfp); return; nla_put_failure: @@ -10056,8 +10056,8 @@ void nl80211_send_roamed(struct cfg80211_registered_device *rdev, genlmsg_end(msg, hdr); - genlmsg_multicast_netns(wiphy_net(&rdev->wiphy), msg, 0, - nl80211_mlme_mcgrp.id, gfp); + genlmsg_multicast_netns(&nl80211_fam, wiphy_net(&rdev->wiphy), msg, 0, + NL80211_MCGRP_MLME, gfp); return; nla_put_failure: @@ -10094,8 +10094,8 @@ void nl80211_send_disconnected(struct cfg80211_registered_device *rdev, genlmsg_end(msg, hdr); - genlmsg_multicast_netns(wiphy_net(&rdev->wiphy), msg, 0, - nl80211_mlme_mcgrp.id, GFP_KERNEL); + genlmsg_multicast_netns(&nl80211_fam, wiphy_net(&rdev->wiphy), msg, 0, + NL80211_MCGRP_MLME, GFP_KERNEL); return; nla_put_failure: @@ -10128,8 +10128,8 @@ void nl80211_send_ibss_bssid(struct cfg80211_registered_device *rdev, genlmsg_end(msg, hdr); - genlmsg_multicast_netns(wiphy_net(&rdev->wiphy), msg, 0, - nl80211_mlme_mcgrp.id, gfp); + genlmsg_multicast_netns(&nl80211_fam, wiphy_net(&rdev->wiphy), msg, 0, + NL80211_MCGRP_MLME, gfp); return; nla_put_failure: @@ -10169,8 +10169,8 @@ void cfg80211_notify_new_peer_candidate(struct net_device *dev, const u8 *addr, genlmsg_end(msg, hdr); - genlmsg_multicast_netns(wiphy_net(&rdev->wiphy), msg, 0, - nl80211_mlme_mcgrp.id, gfp); + genlmsg_multicast_netns(&nl80211_fam, wiphy_net(&rdev->wiphy), msg, 0, + NL80211_MCGRP_MLME, gfp); return; nla_put_failure: @@ -10208,8 +10208,8 @@ void nl80211_michael_mic_failure(struct cfg80211_registered_device *rdev, genlmsg_end(msg, hdr); - genlmsg_multicast_netns(wiphy_net(&rdev->wiphy), msg, 0, - nl80211_mlme_mcgrp.id, gfp); + genlmsg_multicast_netns(&nl80211_fam, wiphy_net(&rdev->wiphy), msg, 0, + NL80211_MCGRP_MLME, gfp); return; nla_put_failure: @@ -10261,8 +10261,8 @@ void nl80211_send_beacon_hint_event(struct wiphy *wiphy, genlmsg_end(msg, hdr); rcu_read_lock(); - genlmsg_multicast_allns(msg, 0, nl80211_regulatory_mcgrp.id, - GFP_ATOMIC); + genlmsg_multicast_allns(&nl80211_fam, msg, 0, + NL80211_MCGRP_REGULATORY, GFP_ATOMIC); rcu_read_unlock(); return; @@ -10307,8 +10307,8 @@ static void nl80211_send_remain_on_chan_event( genlmsg_end(msg, hdr); - genlmsg_multicast_netns(wiphy_net(&rdev->wiphy), msg, 0, - nl80211_mlme_mcgrp.id, gfp); + genlmsg_multicast_netns(&nl80211_fam, wiphy_net(&rdev->wiphy), msg, 0, + NL80211_MCGRP_MLME, gfp); return; nla_put_failure: @@ -10362,8 +10362,8 @@ void cfg80211_new_sta(struct net_device *dev, const u8 *mac_addr, return; } - genlmsg_multicast_netns(wiphy_net(&rdev->wiphy), msg, 0, - nl80211_mlme_mcgrp.id, gfp); + genlmsg_multicast_netns(&nl80211_fam, wiphy_net(&rdev->wiphy), msg, 0, + NL80211_MCGRP_MLME, gfp); } EXPORT_SYMBOL(cfg80211_new_sta); @@ -10392,8 +10392,8 @@ void cfg80211_del_sta(struct net_device *dev, const u8 *mac_addr, gfp_t gfp) genlmsg_end(msg, hdr); - genlmsg_multicast_netns(wiphy_net(&rdev->wiphy), msg, 0, - nl80211_mlme_mcgrp.id, gfp); + genlmsg_multicast_netns(&nl80211_fam, wiphy_net(&rdev->wiphy), msg, 0, + NL80211_MCGRP_MLME, gfp); return; nla_put_failure: @@ -10428,8 +10428,8 @@ void cfg80211_conn_failed(struct net_device *dev, const u8 *mac_addr, genlmsg_end(msg, hdr); - genlmsg_multicast_netns(wiphy_net(&rdev->wiphy), msg, 0, - nl80211_mlme_mcgrp.id, gfp); + genlmsg_multicast_netns(&nl80211_fam, wiphy_net(&rdev->wiphy), msg, 0, + NL80211_MCGRP_MLME, gfp); return; nla_put_failure: @@ -10590,8 +10590,8 @@ void cfg80211_mgmt_tx_status(struct wireless_dev *wdev, u64 cookie, genlmsg_end(msg, hdr); - genlmsg_multicast_netns(wiphy_net(&rdev->wiphy), msg, 0, - nl80211_mlme_mcgrp.id, gfp); + genlmsg_multicast_netns(&nl80211_fam, wiphy_net(&rdev->wiphy), msg, 0, + NL80211_MCGRP_MLME, gfp); return; nla_put_failure: @@ -10639,8 +10639,8 @@ void cfg80211_cqm_rssi_notify(struct net_device *dev, genlmsg_end(msg, hdr); - genlmsg_multicast_netns(wiphy_net(&rdev->wiphy), msg, 0, - nl80211_mlme_mcgrp.id, gfp); + genlmsg_multicast_netns(&nl80211_fam, wiphy_net(&rdev->wiphy), msg, 0, + NL80211_MCGRP_MLME, gfp); return; nla_put_failure: @@ -10684,8 +10684,8 @@ static void nl80211_gtk_rekey_notify(struct cfg80211_registered_device *rdev, genlmsg_end(msg, hdr); - genlmsg_multicast_netns(wiphy_net(&rdev->wiphy), msg, 0, - nl80211_mlme_mcgrp.id, gfp); + genlmsg_multicast_netns(&nl80211_fam, wiphy_net(&rdev->wiphy), msg, 0, + NL80211_MCGRP_MLME, gfp); return; nla_put_failure: @@ -10742,8 +10742,8 @@ nl80211_pmksa_candidate_notify(struct cfg80211_registered_device *rdev, genlmsg_end(msg, hdr); - genlmsg_multicast_netns(wiphy_net(&rdev->wiphy), msg, 0, - nl80211_mlme_mcgrp.id, gfp); + genlmsg_multicast_netns(&nl80211_fam, wiphy_net(&rdev->wiphy), msg, 0, + NL80211_MCGRP_MLME, gfp); return; nla_put_failure: @@ -10789,8 +10789,8 @@ static void nl80211_ch_switch_notify(struct cfg80211_registered_device *rdev, genlmsg_end(msg, hdr); - genlmsg_multicast_netns(wiphy_net(&rdev->wiphy), msg, 0, - nl80211_mlme_mcgrp.id, gfp); + genlmsg_multicast_netns(&nl80211_fam, wiphy_net(&rdev->wiphy), msg, 0, + NL80211_MCGRP_MLME, gfp); return; nla_put_failure: @@ -10866,8 +10866,8 @@ void cfg80211_cqm_txe_notify(struct net_device *dev, genlmsg_end(msg, hdr); - genlmsg_multicast_netns(wiphy_net(&rdev->wiphy), msg, 0, - nl80211_mlme_mcgrp.id, gfp); + genlmsg_multicast_netns(&nl80211_fam, wiphy_net(&rdev->wiphy), msg, 0, + NL80211_MCGRP_MLME, gfp); return; nla_put_failure: @@ -10915,8 +10915,8 @@ nl80211_radar_notify(struct cfg80211_registered_device *rdev, genlmsg_end(msg, hdr); - genlmsg_multicast_netns(wiphy_net(&rdev->wiphy), msg, 0, - nl80211_mlme_mcgrp.id, gfp); + genlmsg_multicast_netns(&nl80211_fam, wiphy_net(&rdev->wiphy), msg, 0, + NL80211_MCGRP_MLME, gfp); return; nla_put_failure: @@ -10962,8 +10962,8 @@ void cfg80211_cqm_pktloss_notify(struct net_device *dev, genlmsg_end(msg, hdr); - genlmsg_multicast_netns(wiphy_net(&rdev->wiphy), msg, 0, - nl80211_mlme_mcgrp.id, gfp); + genlmsg_multicast_netns(&nl80211_fam, wiphy_net(&rdev->wiphy), msg, 0, + NL80211_MCGRP_MLME, gfp); return; nla_put_failure: @@ -11002,8 +11002,8 @@ void cfg80211_probe_status(struct net_device *dev, const u8 *addr, genlmsg_end(msg, hdr); - genlmsg_multicast_netns(wiphy_net(&rdev->wiphy), msg, 0, - nl80211_mlme_mcgrp.id, gfp); + genlmsg_multicast_netns(&nl80211_fam, wiphy_net(&rdev->wiphy), msg, 0, + NL80211_MCGRP_MLME, gfp); return; nla_put_failure: @@ -11154,8 +11154,8 @@ void cfg80211_report_wowlan_wakeup(struct wireless_dev *wdev, genlmsg_end(msg, hdr); - genlmsg_multicast_netns(wiphy_net(&rdev->wiphy), msg, 0, - nl80211_mlme_mcgrp.id, gfp); + genlmsg_multicast_netns(&nl80211_fam, wiphy_net(&rdev->wiphy), msg, 0, + NL80211_MCGRP_MLME, gfp); return; free_msg: @@ -11196,8 +11196,8 @@ void cfg80211_tdls_oper_request(struct net_device *dev, const u8 *peer, genlmsg_end(msg, hdr); - genlmsg_multicast_netns(wiphy_net(&rdev->wiphy), msg, 0, - nl80211_mlme_mcgrp.id, gfp); + genlmsg_multicast_netns(&nl80211_fam, wiphy_net(&rdev->wiphy), msg, 0, + NL80211_MCGRP_MLME, gfp); return; nla_put_failure: @@ -11279,8 +11279,8 @@ void cfg80211_ft_event(struct net_device *netdev, genlmsg_end(msg, hdr); - genlmsg_multicast_netns(wiphy_net(&rdev->wiphy), msg, 0, - nl80211_mlme_mcgrp.id, GFP_KERNEL); + genlmsg_multicast_netns(&nl80211_fam, wiphy_net(&rdev->wiphy), msg, 0, + NL80211_MCGRP_MLME, GFP_KERNEL); } EXPORT_SYMBOL(cfg80211_ft_event); @@ -11329,33 +11329,11 @@ int nl80211_init(void) { int err; - err = genl_register_family_with_ops(&nl80211_fam, - nl80211_ops, ARRAY_SIZE(nl80211_ops)); + err = genl_register_family_with_ops_groups(&nl80211_fam, nl80211_ops, + nl80211_mcgrps); if (err) return err; - err = genl_register_mc_group(&nl80211_fam, &nl80211_config_mcgrp); - if (err) - goto err_out; - - err = genl_register_mc_group(&nl80211_fam, &nl80211_scan_mcgrp); - if (err) - goto err_out; - - err = genl_register_mc_group(&nl80211_fam, &nl80211_regulatory_mcgrp); - if (err) - goto err_out; - - err = genl_register_mc_group(&nl80211_fam, &nl80211_mlme_mcgrp); - if (err) - goto err_out; - -#ifdef CONFIG_NL80211_TESTMODE - err = genl_register_mc_group(&nl80211_fam, &nl80211_testmode_mcgrp); - if (err) - goto err_out; -#endif - err = netlink_register_notifier(&nl80211_netlink_notifier); if (err) goto err_out; diff --git a/scripts/asn1_compiler.c b/scripts/asn1_compiler.c index db0e5cd34c7..91c4117637a 100644 --- a/scripts/asn1_compiler.c +++ b/scripts/asn1_compiler.c @@ -1353,6 +1353,8 @@ static void render_out_of_line_list(FILE *out) render_opcode(out, "ASN1_OP_END_SET_OF%s,\n", act); render_opcode(out, "_jump_target(%u),\n", entry); break; + default: + break; } if (e->action) render_opcode(out, "_action(ACT_%s),\n", diff --git a/scripts/checkpatch.pl b/scripts/checkpatch.pl index 61090e0ff61..9c981003037 100755 --- a/scripts/checkpatch.pl +++ b/scripts/checkpatch.pl @@ -3289,6 +3289,7 @@ sub process { } } if (!defined $suppress_whiletrailers{$linenr} && + defined($stat) && defined($cond) && $line =~ /\b(?:if|while|for)\s*\(/ && $line !~ /^.\s*#/) { my ($s, $c) = ($stat, $cond); diff --git a/security/Makefile b/security/Makefile index c26c81e9257..a5918e01a4f 100644 --- a/security/Makefile +++ b/security/Makefile @@ -16,7 +16,6 @@ obj-$(CONFIG_MMU) += min_addr.o # Object file lists obj-$(CONFIG_SECURITY) += security.o capability.o obj-$(CONFIG_SECURITYFS) += inode.o -# Must precede capability.o in order to stack properly. obj-$(CONFIG_SECURITY_SELINUX) += selinux/built-in.o obj-$(CONFIG_SECURITY_SMACK) += smack/built-in.o obj-$(CONFIG_AUDIT) += lsm_audit.o diff --git a/security/apparmor/audit.c b/security/apparmor/audit.c index 031d2d9dd69..89c78658031 100644 --- a/security/apparmor/audit.c +++ b/security/apparmor/audit.c @@ -111,7 +111,6 @@ static const char *const aa_audit_type[] = { static void audit_pre(struct audit_buffer *ab, void *ca) { struct common_audit_data *sa = ca; - struct task_struct *tsk = sa->aad->tsk ? sa->aad->tsk : current; if (aa_g_audit_header) { audit_log_format(ab, "apparmor="); @@ -132,11 +131,6 @@ static void audit_pre(struct audit_buffer *ab, void *ca) if (sa->aad->profile) { struct aa_profile *profile = sa->aad->profile; - pid_t pid; - rcu_read_lock(); - pid = rcu_dereference(tsk->real_parent)->pid; - rcu_read_unlock(); - audit_log_format(ab, " parent=%d", pid); if (profile->ns != root_ns) { audit_log_format(ab, " namespace="); audit_log_untrustedstring(ab, profile->ns->base.hname); @@ -149,12 +143,6 @@ static void audit_pre(struct audit_buffer *ab, void *ca) audit_log_format(ab, " name="); audit_log_untrustedstring(ab, sa->aad->name); } - - if (sa->aad->tsk) { - audit_log_format(ab, " pid=%d comm=", tsk->pid); - audit_log_untrustedstring(ab, tsk->comm); - } - } /** @@ -212,7 +200,7 @@ int aa_audit(int type, struct aa_profile *profile, gfp_t gfp, if (sa->aad->type == AUDIT_APPARMOR_KILL) (void)send_sig_info(SIGKILL, NULL, - sa->aad->tsk ? sa->aad->tsk : current); + sa->u.tsk ? sa->u.tsk : current); if (sa->aad->type == AUDIT_APPARMOR_ALLOWED) return complain_error(sa->aad->error); diff --git a/security/apparmor/capability.c b/security/apparmor/capability.c index 84d1f5f5387..1101c6f64bb 100644 --- a/security/apparmor/capability.c +++ b/security/apparmor/capability.c @@ -53,8 +53,7 @@ static void audit_cb(struct audit_buffer *ab, void *va) /** * audit_caps - audit a capability - * @profile: profile confining task (NOT NULL) - * @task: task capability test was performed against (NOT NULL) + * @profile: profile being tested for confinement (NOT NULL) * @cap: capability tested * @error: error code returned by test * @@ -63,8 +62,7 @@ static void audit_cb(struct audit_buffer *ab, void *va) * * Returns: 0 or sa->error on success, error code on failure */ -static int audit_caps(struct aa_profile *profile, struct task_struct *task, - int cap, int error) +static int audit_caps(struct aa_profile *profile, int cap, int error) { struct audit_cache *ent; int type = AUDIT_APPARMOR_AUTO; @@ -73,7 +71,6 @@ static int audit_caps(struct aa_profile *profile, struct task_struct *task, sa.type = LSM_AUDIT_DATA_CAP; sa.aad = &aad; sa.u.cap = cap; - sa.aad->tsk = task; sa.aad->op = OP_CAPABLE; sa.aad->error = error; @@ -124,8 +121,7 @@ static int profile_capable(struct aa_profile *profile, int cap) /** * aa_capable - test permission to use capability - * @task: task doing capability test against (NOT NULL) - * @profile: profile confining @task (NOT NULL) + * @profile: profile being tested against (NOT NULL) * @cap: capability to be tested * @audit: whether an audit record should be generated * @@ -133,8 +129,7 @@ static int profile_capable(struct aa_profile *profile, int cap) * * Returns: 0 on success, or else an error code. */ -int aa_capable(struct task_struct *task, struct aa_profile *profile, int cap, - int audit) +int aa_capable(struct aa_profile *profile, int cap, int audit) { int error = profile_capable(profile, cap); @@ -144,5 +139,5 @@ int aa_capable(struct task_struct *task, struct aa_profile *profile, int cap, return error; } - return audit_caps(profile, task, cap, error); + return audit_caps(profile, cap, error); } diff --git a/security/apparmor/domain.c b/security/apparmor/domain.c index 26c607c971f..452567d3a08 100644 --- a/security/apparmor/domain.c +++ b/security/apparmor/domain.c @@ -50,23 +50,21 @@ void aa_free_domain_entries(struct aa_domain *domain) /** * may_change_ptraced_domain - check if can change profile on ptraced task - * @task: task we want to change profile of (NOT NULL) * @to_profile: profile to change to (NOT NULL) * - * Check if the task is ptraced and if so if the tracing task is allowed + * Check if current is ptraced and if so if the tracing task is allowed * to trace the new domain * * Returns: %0 or error if change not allowed */ -static int may_change_ptraced_domain(struct task_struct *task, - struct aa_profile *to_profile) +static int may_change_ptraced_domain(struct aa_profile *to_profile) { struct task_struct *tracer; struct aa_profile *tracerp = NULL; int error = 0; rcu_read_lock(); - tracer = ptrace_parent(task); + tracer = ptrace_parent(current); if (tracer) /* released below */ tracerp = aa_get_task_profile(tracer); @@ -75,7 +73,7 @@ static int may_change_ptraced_domain(struct task_struct *task, if (!tracer || unconfined(tracerp)) goto out; - error = aa_may_ptrace(tracer, tracerp, to_profile, PTRACE_MODE_ATTACH); + error = aa_may_ptrace(tracerp, to_profile, PTRACE_MODE_ATTACH); out: rcu_read_unlock(); @@ -477,7 +475,7 @@ int apparmor_bprm_set_creds(struct linux_binprm *bprm) } if (bprm->unsafe & (LSM_UNSAFE_PTRACE | LSM_UNSAFE_PTRACE_CAP)) { - error = may_change_ptraced_domain(current, new_profile); + error = may_change_ptraced_domain(new_profile); if (error) { aa_put_profile(new_profile); goto audit; @@ -690,7 +688,7 @@ int aa_change_hat(const char *hats[], int count, u64 token, bool permtest) } } - error = may_change_ptraced_domain(current, hat); + error = may_change_ptraced_domain(hat); if (error) { info = "ptraced"; error = -EPERM; @@ -829,7 +827,7 @@ int aa_change_profile(const char *ns_name, const char *hname, bool onexec, } /* check if tracing task is allowed to trace target domain */ - error = may_change_ptraced_domain(current, target); + error = may_change_ptraced_domain(target); if (error) { info = "ptrace prevents transition"; goto audit; diff --git a/security/apparmor/include/audit.h b/security/apparmor/include/audit.h index 30e8d768725..ba3dfd17f23 100644 --- a/security/apparmor/include/audit.h +++ b/security/apparmor/include/audit.h @@ -109,7 +109,6 @@ struct apparmor_audit_data { void *profile; const char *name; const char *info; - struct task_struct *tsk; union { void *target; struct { diff --git a/security/apparmor/include/capability.h b/security/apparmor/include/capability.h index 2e7c9d6a2f3..fc3fa381d85 100644 --- a/security/apparmor/include/capability.h +++ b/security/apparmor/include/capability.h @@ -4,7 +4,7 @@ * This file contains AppArmor capability mediation definitions. * * Copyright (C) 1998-2008 Novell/SUSE - * Copyright 2009-2010 Canonical Ltd. + * Copyright 2009-2013 Canonical Ltd. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as @@ -38,8 +38,7 @@ struct aa_caps { extern struct aa_fs_entry aa_fs_entry_caps[]; -int aa_capable(struct task_struct *task, struct aa_profile *profile, int cap, - int audit); +int aa_capable(struct aa_profile *profile, int cap, int audit); static inline void aa_free_cap_rules(struct aa_caps *caps) { diff --git a/security/apparmor/include/ipc.h b/security/apparmor/include/ipc.h index aeda0fbc8b2..288ca76e2fb 100644 --- a/security/apparmor/include/ipc.h +++ b/security/apparmor/include/ipc.h @@ -19,8 +19,8 @@ struct aa_profile; -int aa_may_ptrace(struct task_struct *tracer_task, struct aa_profile *tracer, - struct aa_profile *tracee, unsigned int mode); +int aa_may_ptrace(struct aa_profile *tracer, struct aa_profile *tracee, + unsigned int mode); int aa_ptrace(struct task_struct *tracer, struct task_struct *tracee, unsigned int mode); diff --git a/security/apparmor/ipc.c b/security/apparmor/ipc.c index c51d2266587..777ac1c4725 100644 --- a/security/apparmor/ipc.c +++ b/security/apparmor/ipc.c @@ -54,15 +54,14 @@ static int aa_audit_ptrace(struct aa_profile *profile, /** * aa_may_ptrace - test if tracer task can trace the tracee - * @tracer_task: task who will do the tracing (NOT NULL) * @tracer: profile of the task doing the tracing (NOT NULL) * @tracee: task to be traced * @mode: whether PTRACE_MODE_READ || PTRACE_MODE_ATTACH * * Returns: %0 else error code if permission denied or error */ -int aa_may_ptrace(struct task_struct *tracer_task, struct aa_profile *tracer, - struct aa_profile *tracee, unsigned int mode) +int aa_may_ptrace(struct aa_profile *tracer, struct aa_profile *tracee, + unsigned int mode) { /* TODO: currently only based on capability, not extended ptrace * rules, @@ -72,7 +71,7 @@ int aa_may_ptrace(struct task_struct *tracer_task, struct aa_profile *tracer, if (unconfined(tracer) || tracer == tracee) return 0; /* log this capability request */ - return aa_capable(tracer_task, tracer, CAP_SYS_PTRACE, 1); + return aa_capable(tracer, CAP_SYS_PTRACE, 1); } /** @@ -101,7 +100,7 @@ int aa_ptrace(struct task_struct *tracer, struct task_struct *tracee, if (!unconfined(tracer_p)) { struct aa_profile *tracee_p = aa_get_task_profile(tracee); - error = aa_may_ptrace(tracer, tracer_p, tracee_p, mode); + error = aa_may_ptrace(tracer_p, tracee_p, mode); error = aa_audit_ptrace(tracer_p, tracee_p, error); aa_put_profile(tracee_p); diff --git a/security/apparmor/lsm.c b/security/apparmor/lsm.c index fb99e18123b..4257b7e2796 100644 --- a/security/apparmor/lsm.c +++ b/security/apparmor/lsm.c @@ -145,7 +145,7 @@ static int apparmor_capable(const struct cred *cred, struct user_namespace *ns, if (!error) { profile = aa_cred_profile(cred); if (!unconfined(profile)) - error = aa_capable(current, profile, cap, audit); + error = aa_capable(profile, cap, audit); } return error; } diff --git a/security/capability.c b/security/capability.c index dbeb9bc27b2..8b4f24ae433 100644 --- a/security/capability.c +++ b/security/capability.c @@ -777,9 +777,15 @@ static int cap_xfrm_policy_delete_security(struct xfrm_sec_ctx *ctx) return 0; } -static int cap_xfrm_state_alloc_security(struct xfrm_state *x, - struct xfrm_user_sec_ctx *sec_ctx, - u32 secid) +static int cap_xfrm_state_alloc(struct xfrm_state *x, + struct xfrm_user_sec_ctx *sec_ctx) +{ + return 0; +} + +static int cap_xfrm_state_alloc_acquire(struct xfrm_state *x, + struct xfrm_sec_ctx *polsec, + u32 secid) { return 0; } @@ -1101,7 +1107,8 @@ void __init security_fixup_ops(struct security_operations *ops) set_to_cap_if_null(ops, xfrm_policy_clone_security); set_to_cap_if_null(ops, xfrm_policy_free_security); set_to_cap_if_null(ops, xfrm_policy_delete_security); - set_to_cap_if_null(ops, xfrm_state_alloc_security); + set_to_cap_if_null(ops, xfrm_state_alloc); + set_to_cap_if_null(ops, xfrm_state_alloc_acquire); set_to_cap_if_null(ops, xfrm_state_free_security); set_to_cap_if_null(ops, xfrm_state_delete_security); set_to_cap_if_null(ops, xfrm_policy_lookup); diff --git a/security/integrity/digsig.c b/security/integrity/digsig.c index 0b759e17a13..77ca965ab68 100644 --- a/security/integrity/digsig.c +++ b/security/integrity/digsig.c @@ -13,7 +13,9 @@ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/err.h> +#include <linux/sched.h> #include <linux/rbtree.h> +#include <linux/cred.h> #include <linux/key-type.h> #include <linux/digsig.h> @@ -21,21 +23,29 @@ static struct key *keyring[INTEGRITY_KEYRING_MAX]; +#ifdef CONFIG_IMA_TRUSTED_KEYRING +static const char *keyring_name[INTEGRITY_KEYRING_MAX] = { + ".evm", + ".module", + ".ima", +}; +#else static const char *keyring_name[INTEGRITY_KEYRING_MAX] = { "_evm", "_module", "_ima", }; +#endif int integrity_digsig_verify(const unsigned int id, const char *sig, int siglen, - const char *digest, int digestlen) + const char *digest, int digestlen) { if (id >= INTEGRITY_KEYRING_MAX) return -EINVAL; if (!keyring[id]) { keyring[id] = - request_key(&key_type_keyring, keyring_name[id], NULL); + request_key(&key_type_keyring, keyring_name[id], NULL); if (IS_ERR(keyring[id])) { int err = PTR_ERR(keyring[id]); pr_err("no %s keyring: %d\n", keyring_name[id], err); @@ -44,9 +54,10 @@ int integrity_digsig_verify(const unsigned int id, const char *sig, int siglen, } } - switch (sig[0]) { + switch (sig[1]) { case 1: - return digsig_verify(keyring[id], sig, siglen, + /* v1 API expect signature without xattr type */ + return digsig_verify(keyring[id], sig + 1, siglen - 1, digest, digestlen); case 2: return asymmetric_verify(keyring[id], sig, siglen, @@ -55,3 +66,21 @@ int integrity_digsig_verify(const unsigned int id, const char *sig, int siglen, return -EOPNOTSUPP; } + +int integrity_init_keyring(const unsigned int id) +{ + const struct cred *cred = current_cred(); + const struct user_struct *user = cred->user; + + keyring[id] = keyring_alloc(keyring_name[id], KUIDT_INIT(0), + KGIDT_INIT(0), cred, + ((KEY_POS_ALL & ~KEY_POS_SETATTR) | + KEY_USR_VIEW | KEY_USR_READ), + KEY_ALLOC_NOT_IN_QUOTA, user->uid_keyring); + if (!IS_ERR(keyring[id])) + set_bit(KEY_FLAG_TRUSTED_ONLY, &keyring[id]->flags); + else + pr_info("Can't allocate %s keyring (%ld)\n", + keyring_name[id], PTR_ERR(keyring[id])); + return 0; +} diff --git a/security/integrity/digsig_asymmetric.c b/security/integrity/digsig_asymmetric.c index b4754667659..9eae4809006 100644 --- a/security/integrity/digsig_asymmetric.c +++ b/security/integrity/digsig_asymmetric.c @@ -20,17 +20,6 @@ #include "integrity.h" /* - * signature format v2 - for using with asymmetric keys - */ -struct signature_v2_hdr { - uint8_t version; /* signature format version */ - uint8_t hash_algo; /* Digest algorithm [enum pkey_hash_algo] */ - uint32_t keyid; /* IMA key identifier - not X509/PGP specific*/ - uint16_t sig_size; /* signature size */ - uint8_t sig[0]; /* signature payload */ -} __packed; - -/* * Request an asymmetric key. */ static struct key *request_asymmetric_key(struct key *keyring, uint32_t keyid) diff --git a/security/integrity/evm/evm_main.c b/security/integrity/evm/evm_main.c index af9b6852f4e..336b3ddfe63 100644 --- a/security/integrity/evm/evm_main.c +++ b/security/integrity/evm/evm_main.c @@ -123,7 +123,7 @@ static enum integrity_status evm_verify_hmac(struct dentry *dentry, goto out; } - xattr_len = rc - 1; + xattr_len = rc; /* check value type */ switch (xattr_data->type) { @@ -143,7 +143,7 @@ static enum integrity_status evm_verify_hmac(struct dentry *dentry, if (rc) break; rc = integrity_digsig_verify(INTEGRITY_KEYRING_EVM, - xattr_data->digest, xattr_len, + (const char *)xattr_data, xattr_len, calc.digest, sizeof(calc.digest)); if (!rc) { /* we probably want to replace rsa with hmac here */ diff --git a/security/integrity/evm/evm_posix_acl.c b/security/integrity/evm/evm_posix_acl.c index b1753e98bf9..46408b9e62e 100644 --- a/security/integrity/evm/evm_posix_acl.c +++ b/security/integrity/evm/evm_posix_acl.c @@ -11,8 +11,9 @@ #include <linux/module.h> #include <linux/xattr.h> +#include <linux/evm.h> -int posix_xattr_acl(char *xattr) +int posix_xattr_acl(const char *xattr) { int xattr_len = strlen(xattr); diff --git a/security/integrity/iint.c b/security/integrity/iint.c index 74522dbd10a..c49d3f14cbe 100644 --- a/security/integrity/iint.c +++ b/security/integrity/iint.c @@ -70,6 +70,8 @@ struct integrity_iint_cache *integrity_iint_find(struct inode *inode) static void iint_free(struct integrity_iint_cache *iint) { + kfree(iint->ima_hash); + iint->ima_hash = NULL; iint->version = 0; iint->flags = 0UL; iint->ima_file_status = INTEGRITY_UNKNOWN; diff --git a/security/integrity/ima/Kconfig b/security/integrity/ima/Kconfig index 39196abaff0..dad8d4ca243 100644 --- a/security/integrity/ima/Kconfig +++ b/security/integrity/ima/Kconfig @@ -9,6 +9,7 @@ config IMA select CRYPTO_HMAC select CRYPTO_MD5 select CRYPTO_SHA1 + select CRYPTO_HASH_INFO select TCG_TPM if HAS_IOMEM && !UML select TCG_TIS if TCG_TPM && X86 select TCG_IBMVTPM if TCG_TPM && PPC64 @@ -45,6 +46,69 @@ config IMA_LSM_RULES help Disabling this option will disregard LSM based policy rules. +choice + prompt "Default template" + default IMA_NG_TEMPLATE + depends on IMA + help + Select the default IMA measurement template. + + The original 'ima' measurement list template contains a + hash, defined as 20 bytes, and a null terminated pathname, + limited to 255 characters. The 'ima-ng' measurement list + template permits both larger hash digests and longer + pathnames. + + config IMA_TEMPLATE + bool "ima" + config IMA_NG_TEMPLATE + bool "ima-ng (default)" + config IMA_SIG_TEMPLATE + bool "ima-sig" +endchoice + +config IMA_DEFAULT_TEMPLATE + string + depends on IMA + default "ima" if IMA_TEMPLATE + default "ima-ng" if IMA_NG_TEMPLATE + default "ima-sig" if IMA_SIG_TEMPLATE + +choice + prompt "Default integrity hash algorithm" + default IMA_DEFAULT_HASH_SHA1 + depends on IMA + help + Select the default hash algorithm used for the measurement + list, integrity appraisal and audit log. The compiled default + hash algorithm can be overwritten using the kernel command + line 'ima_hash=' option. + + config IMA_DEFAULT_HASH_SHA1 + bool "SHA1 (default)" + depends on CRYPTO_SHA1 + + config IMA_DEFAULT_HASH_SHA256 + bool "SHA256" + depends on CRYPTO_SHA256 && !IMA_TEMPLATE + + config IMA_DEFAULT_HASH_SHA512 + bool "SHA512" + depends on CRYPTO_SHA512 && !IMA_TEMPLATE + + config IMA_DEFAULT_HASH_WP512 + bool "WP512" + depends on CRYPTO_WP512 && !IMA_TEMPLATE +endchoice + +config IMA_DEFAULT_HASH + string + depends on IMA + default "sha1" if IMA_DEFAULT_HASH_SHA1 + default "sha256" if IMA_DEFAULT_HASH_SHA256 + default "sha512" if IMA_DEFAULT_HASH_SHA512 + default "wp512" if IMA_DEFAULT_HASH_WP512 + config IMA_APPRAISE bool "Appraise integrity measurements" depends on IMA @@ -59,3 +123,11 @@ config IMA_APPRAISE For more information on integrity appraisal refer to: <http://linux-ima.sourceforge.net> If unsure, say N. + +config IMA_TRUSTED_KEYRING + bool "Require all keys on the _ima keyring be signed" + depends on IMA_APPRAISE && SYSTEM_TRUSTED_KEYRING + default y + help + This option requires that all keys added to the _ima + keyring be signed by a key on the system trusted keyring. diff --git a/security/integrity/ima/Makefile b/security/integrity/ima/Makefile index 56dfee7cbf6..d79263d2fdb 100644 --- a/security/integrity/ima/Makefile +++ b/security/integrity/ima/Makefile @@ -6,5 +6,5 @@ obj-$(CONFIG_IMA) += ima.o ima-y := ima_fs.o ima_queue.o ima_init.o ima_main.o ima_crypto.o ima_api.o \ - ima_policy.o + ima_policy.o ima_template.o ima_template_lib.o ima-$(CONFIG_IMA_APPRAISE) += ima_appraise.o diff --git a/security/integrity/ima/ima.h b/security/integrity/ima/ima.h index b3dd616560f..bf03c6a16cc 100644 --- a/security/integrity/ima/ima.h +++ b/security/integrity/ima/ima.h @@ -36,23 +36,48 @@ enum tpm_pcrs { TPM_PCR0 = 0, TPM_PCR8 = 8 }; #define IMA_HASH_BITS 9 #define IMA_MEASURE_HTABLE_SIZE (1 << IMA_HASH_BITS) +#define IMA_TEMPLATE_FIELD_ID_MAX_LEN 16 +#define IMA_TEMPLATE_NUM_FIELDS_MAX 15 + +#define IMA_TEMPLATE_IMA_NAME "ima" +#define IMA_TEMPLATE_IMA_FMT "d|n" + /* set during initialization */ extern int ima_initialized; extern int ima_used_chip; -extern char *ima_hash; +extern int ima_hash_algo; extern int ima_appraise; -/* IMA inode template definition */ -struct ima_template_data { - u8 digest[IMA_DIGEST_SIZE]; /* sha1/md5 measurement hash */ - char file_name[IMA_EVENT_NAME_LEN_MAX + 1]; /* name + \0 */ +/* IMA template field data definition */ +struct ima_field_data { + u8 *data; + u32 len; +}; + +/* IMA template field definition */ +struct ima_template_field { + const char field_id[IMA_TEMPLATE_FIELD_ID_MAX_LEN]; + int (*field_init) (struct integrity_iint_cache *iint, struct file *file, + const unsigned char *filename, + struct evm_ima_xattr_data *xattr_value, + int xattr_len, struct ima_field_data *field_data); + void (*field_show) (struct seq_file *m, enum ima_show_type show, + struct ima_field_data *field_data); +}; + +/* IMA template descriptor definition */ +struct ima_template_desc { + char *name; + char *fmt; + int num_fields; + struct ima_template_field **fields; }; struct ima_template_entry { - u8 digest[IMA_DIGEST_SIZE]; /* sha1 or md5 measurement hash */ - const char *template_name; - int template_len; - struct ima_template_data template; + u8 digest[TPM_DIGEST_SIZE]; /* sha1 or md5 measurement hash */ + struct ima_template_desc *template_desc; /* template descriptor */ + u32 template_data_len; + struct ima_field_data template_data[0]; /* template related data */ }; struct ima_queue_entry { @@ -69,13 +94,21 @@ int ima_fs_init(void); void ima_fs_cleanup(void); int ima_inode_alloc(struct inode *inode); int ima_add_template_entry(struct ima_template_entry *entry, int violation, - const char *op, struct inode *inode); -int ima_calc_file_hash(struct file *file, char *digest); -int ima_calc_buffer_hash(const void *data, int len, char *digest); -int ima_calc_boot_aggregate(char *digest); -void ima_add_violation(struct inode *inode, const unsigned char *filename, + const char *op, struct inode *inode, + const unsigned char *filename); +int ima_calc_file_hash(struct file *file, struct ima_digest_data *hash); +int ima_calc_field_array_hash(struct ima_field_data *field_data, int num_fields, + struct ima_digest_data *hash); +int __init ima_calc_boot_aggregate(struct ima_digest_data *hash); +void ima_add_violation(struct file *file, const unsigned char *filename, const char *op, const char *cause); int ima_init_crypto(void); +void ima_putc(struct seq_file *m, void *data, int datalen); +void ima_print_digest(struct seq_file *m, u8 *digest, int size); +struct ima_template_desc *ima_template_desc_current(void); +int ima_init_template(void); + +int ima_init_template(void); /* * used to protect h_table and sha_table @@ -98,14 +131,21 @@ static inline unsigned long ima_hash_key(u8 *digest) int ima_get_action(struct inode *inode, int mask, int function); int ima_must_measure(struct inode *inode, int mask, int function); int ima_collect_measurement(struct integrity_iint_cache *iint, - struct file *file); + struct file *file, + struct evm_ima_xattr_data **xattr_value, + int *xattr_len); void ima_store_measurement(struct integrity_iint_cache *iint, struct file *file, - const unsigned char *filename); + const unsigned char *filename, + struct evm_ima_xattr_data *xattr_value, + int xattr_len); void ima_audit_measurement(struct integrity_iint_cache *iint, const unsigned char *filename); +int ima_alloc_init_template(struct integrity_iint_cache *iint, + struct file *file, const unsigned char *filename, + struct evm_ima_xattr_data *xattr_value, + int xattr_len, struct ima_template_entry **entry); int ima_store_template(struct ima_template_entry *entry, int violation, - struct inode *inode); -void ima_template_show(struct seq_file *m, void *e, enum ima_show_type show); + struct inode *inode, const unsigned char *filename); const char *ima_d_path(struct path *path, char **pathbuf); /* rbtree tree calls to lookup, insert, delete @@ -131,17 +171,25 @@ void ima_delete_rules(void); #ifdef CONFIG_IMA_APPRAISE int ima_appraise_measurement(int func, struct integrity_iint_cache *iint, - struct file *file, const unsigned char *filename); + struct file *file, const unsigned char *filename, + struct evm_ima_xattr_data *xattr_value, + int xattr_len); int ima_must_appraise(struct inode *inode, int mask, enum ima_hooks func); void ima_update_xattr(struct integrity_iint_cache *iint, struct file *file); enum integrity_status ima_get_cache_status(struct integrity_iint_cache *iint, int func); +void ima_get_hash_algo(struct evm_ima_xattr_data *xattr_value, int xattr_len, + struct ima_digest_data *hash); +int ima_read_xattr(struct dentry *dentry, + struct evm_ima_xattr_data **xattr_value); #else static inline int ima_appraise_measurement(int func, struct integrity_iint_cache *iint, struct file *file, - const unsigned char *filename) + const unsigned char *filename, + struct evm_ima_xattr_data *xattr_value, + int xattr_len) { return INTEGRITY_UNKNOWN; } @@ -162,6 +210,19 @@ static inline enum integrity_status ima_get_cache_status(struct integrity_iint_c { return INTEGRITY_UNKNOWN; } + +static inline void ima_get_hash_algo(struct evm_ima_xattr_data *xattr_value, + int xattr_len, + struct ima_digest_data *hash) +{ +} + +static inline int ima_read_xattr(struct dentry *dentry, + struct evm_ima_xattr_data **xattr_value) +{ + return 0; +} + #endif /* LSM based policy rules require audit */ diff --git a/security/integrity/ima/ima_api.c b/security/integrity/ima/ima_api.c index 1c03e8f1e0e..0e7540863fc 100644 --- a/security/integrity/ima/ima_api.c +++ b/security/integrity/ima/ima_api.c @@ -18,9 +18,46 @@ #include <linux/fs.h> #include <linux/xattr.h> #include <linux/evm.h> +#include <crypto/hash_info.h> #include "ima.h" -static const char *IMA_TEMPLATE_NAME = "ima"; +/* + * ima_alloc_init_template - create and initialize a new template entry + */ +int ima_alloc_init_template(struct integrity_iint_cache *iint, + struct file *file, const unsigned char *filename, + struct evm_ima_xattr_data *xattr_value, + int xattr_len, struct ima_template_entry **entry) +{ + struct ima_template_desc *template_desc = ima_template_desc_current(); + int i, result = 0; + + *entry = kzalloc(sizeof(**entry) + template_desc->num_fields * + sizeof(struct ima_field_data), GFP_NOFS); + if (!*entry) + return -ENOMEM; + + for (i = 0; i < template_desc->num_fields; i++) { + struct ima_template_field *field = template_desc->fields[i]; + u32 len; + + result = field->field_init(iint, file, filename, + xattr_value, xattr_len, + &((*entry)->template_data[i])); + if (result != 0) + goto out; + + len = (*entry)->template_data[i].len; + (*entry)->template_data_len += sizeof(len); + (*entry)->template_data_len += len; + } + (*entry)->template_desc = template_desc; + return 0; +out: + kfree(*entry); + *entry = NULL; + return result; +} /* * ima_store_template - store ima template measurements @@ -39,28 +76,34 @@ static const char *IMA_TEMPLATE_NAME = "ima"; * Returns 0 on success, error code otherwise */ int ima_store_template(struct ima_template_entry *entry, - int violation, struct inode *inode) + int violation, struct inode *inode, + const unsigned char *filename) { const char *op = "add_template_measure"; const char *audit_cause = "hashing_error"; + char *template_name = entry->template_desc->name; int result; - - memset(entry->digest, 0, sizeof(entry->digest)); - entry->template_name = IMA_TEMPLATE_NAME; - entry->template_len = sizeof(entry->template); + struct { + struct ima_digest_data hdr; + char digest[TPM_DIGEST_SIZE]; + } hash; if (!violation) { - result = ima_calc_buffer_hash(&entry->template, - entry->template_len, - entry->digest); + int num_fields = entry->template_desc->num_fields; + + /* this function uses default algo */ + hash.hdr.algo = HASH_ALGO_SHA1; + result = ima_calc_field_array_hash(&entry->template_data[0], + num_fields, &hash.hdr); if (result < 0) { integrity_audit_msg(AUDIT_INTEGRITY_PCR, inode, - entry->template_name, op, + template_name, op, audit_cause, result, 0); return result; } + memcpy(entry->digest, hash.hdr.digest, hash.hdr.length); } - result = ima_add_template_entry(entry, violation, op, inode); + result = ima_add_template_entry(entry, violation, op, inode, filename); return result; } @@ -71,24 +114,24 @@ int ima_store_template(struct ima_template_entry *entry, * By extending the PCR with 0xFF's instead of with zeroes, the PCR * value is invalidated. */ -void ima_add_violation(struct inode *inode, const unsigned char *filename, +void ima_add_violation(struct file *file, const unsigned char *filename, const char *op, const char *cause) { struct ima_template_entry *entry; + struct inode *inode = file->f_dentry->d_inode; int violation = 1; int result; /* can overflow, only indicator */ atomic_long_inc(&ima_htable.violations); - entry = kmalloc(sizeof(*entry), GFP_KERNEL); - if (!entry) { + result = ima_alloc_init_template(NULL, file, filename, + NULL, 0, &entry); + if (result < 0) { result = -ENOMEM; goto err_out; } - memset(&entry->template, 0, sizeof(entry->template)); - strncpy(entry->template.file_name, filename, IMA_EVENT_NAME_LEN_MAX); - result = ima_store_template(entry, violation, inode); + result = ima_store_template(entry, violation, inode, filename); if (result < 0) kfree(entry); err_out: @@ -138,20 +181,42 @@ int ima_must_measure(struct inode *inode, int mask, int function) * Return 0 on success, error code otherwise */ int ima_collect_measurement(struct integrity_iint_cache *iint, - struct file *file) + struct file *file, + struct evm_ima_xattr_data **xattr_value, + int *xattr_len) { struct inode *inode = file_inode(file); const char *filename = file->f_dentry->d_name.name; int result = 0; + struct { + struct ima_digest_data hdr; + char digest[IMA_MAX_DIGEST_SIZE]; + } hash; + + if (xattr_value) + *xattr_len = ima_read_xattr(file->f_dentry, xattr_value); if (!(iint->flags & IMA_COLLECTED)) { u64 i_version = file_inode(file)->i_version; - iint->ima_xattr.type = IMA_XATTR_DIGEST; - result = ima_calc_file_hash(file, iint->ima_xattr.digest); + /* use default hash algorithm */ + hash.hdr.algo = ima_hash_algo; + + if (xattr_value) + ima_get_hash_algo(*xattr_value, *xattr_len, &hash.hdr); + + result = ima_calc_file_hash(file, &hash.hdr); if (!result) { - iint->version = i_version; - iint->flags |= IMA_COLLECTED; + int length = sizeof(hash.hdr) + hash.hdr.length; + void *tmpbuf = krealloc(iint->ima_hash, length, + GFP_NOFS); + if (tmpbuf) { + iint->ima_hash = tmpbuf; + memcpy(iint->ima_hash, &hash, length); + iint->version = i_version; + iint->flags |= IMA_COLLECTED; + } else + result = -ENOMEM; } } if (result) @@ -177,7 +242,9 @@ int ima_collect_measurement(struct integrity_iint_cache *iint, * Must be called with iint->mutex held. */ void ima_store_measurement(struct integrity_iint_cache *iint, - struct file *file, const unsigned char *filename) + struct file *file, const unsigned char *filename, + struct evm_ima_xattr_data *xattr_value, + int xattr_len) { const char *op = "add_template_measure"; const char *audit_cause = "ENOMEM"; @@ -189,19 +256,15 @@ void ima_store_measurement(struct integrity_iint_cache *iint, if (iint->flags & IMA_MEASURED) return; - entry = kmalloc(sizeof(*entry), GFP_KERNEL); - if (!entry) { + result = ima_alloc_init_template(iint, file, filename, + xattr_value, xattr_len, &entry); + if (result < 0) { integrity_audit_msg(AUDIT_INTEGRITY_PCR, inode, filename, op, audit_cause, result, 0); return; } - memset(&entry->template, 0, sizeof(entry->template)); - memcpy(entry->template.digest, iint->ima_xattr.digest, IMA_DIGEST_SIZE); - strcpy(entry->template.file_name, - (strlen(filename) > IMA_EVENT_NAME_LEN_MAX) ? - file->f_dentry->d_name.name : filename); - result = ima_store_template(entry, violation, inode); + result = ima_store_template(entry, violation, inode, filename); if (!result || result == -EEXIST) iint->flags |= IMA_MEASURED; if (result < 0) @@ -212,14 +275,16 @@ void ima_audit_measurement(struct integrity_iint_cache *iint, const unsigned char *filename) { struct audit_buffer *ab; - char hash[(IMA_DIGEST_SIZE * 2) + 1]; + char hash[(iint->ima_hash->length * 2) + 1]; + const char *algo_name = hash_algo_name[iint->ima_hash->algo]; + char algo_hash[sizeof(hash) + strlen(algo_name) + 2]; int i; if (iint->flags & IMA_AUDITED) return; - for (i = 0; i < IMA_DIGEST_SIZE; i++) - hex_byte_pack(hash + (i * 2), iint->ima_xattr.digest[i]); + for (i = 0; i < iint->ima_hash->length; i++) + hex_byte_pack(hash + (i * 2), iint->ima_hash->digest[i]); hash[i * 2] = '\0'; ab = audit_log_start(current->audit_context, GFP_KERNEL, @@ -230,7 +295,8 @@ void ima_audit_measurement(struct integrity_iint_cache *iint, audit_log_format(ab, "file="); audit_log_untrustedstring(ab, filename); audit_log_format(ab, " hash="); - audit_log_untrustedstring(ab, hash); + snprintf(algo_hash, sizeof(algo_hash), "%s:%s", algo_name, hash); + audit_log_untrustedstring(ab, algo_hash); audit_log_task_info(ab, current); audit_log_end(ab); diff --git a/security/integrity/ima/ima_appraise.c b/security/integrity/ima/ima_appraise.c index 2d4becab891..46353ee517f 100644 --- a/security/integrity/ima/ima_appraise.c +++ b/security/integrity/ima/ima_appraise.c @@ -15,6 +15,7 @@ #include <linux/magic.h> #include <linux/ima.h> #include <linux/evm.h> +#include <crypto/hash_info.h> #include "ima.h" @@ -43,19 +44,31 @@ int ima_must_appraise(struct inode *inode, int mask, enum ima_hooks func) } static int ima_fix_xattr(struct dentry *dentry, - struct integrity_iint_cache *iint) + struct integrity_iint_cache *iint) { - iint->ima_xattr.type = IMA_XATTR_DIGEST; - return __vfs_setxattr_noperm(dentry, XATTR_NAME_IMA, - (u8 *)&iint->ima_xattr, - sizeof(iint->ima_xattr), 0); + int rc, offset; + u8 algo = iint->ima_hash->algo; + + if (algo <= HASH_ALGO_SHA1) { + offset = 1; + iint->ima_hash->xattr.sha1.type = IMA_XATTR_DIGEST; + } else { + offset = 0; + iint->ima_hash->xattr.ng.type = IMA_XATTR_DIGEST_NG; + iint->ima_hash->xattr.ng.algo = algo; + } + rc = __vfs_setxattr_noperm(dentry, XATTR_NAME_IMA, + &iint->ima_hash->xattr.data[offset], + (sizeof(iint->ima_hash->xattr) - offset) + + iint->ima_hash->length, 0); + return rc; } /* Return specific func appraised cached result */ enum integrity_status ima_get_cache_status(struct integrity_iint_cache *iint, int func) { - switch(func) { + switch (func) { case MMAP_CHECK: return iint->ima_mmap_status; case BPRM_CHECK: @@ -71,7 +84,7 @@ enum integrity_status ima_get_cache_status(struct integrity_iint_cache *iint, static void ima_set_cache_status(struct integrity_iint_cache *iint, int func, enum integrity_status status) { - switch(func) { + switch (func) { case MMAP_CHECK: iint->ima_mmap_status = status; break; @@ -90,7 +103,7 @@ static void ima_set_cache_status(struct integrity_iint_cache *iint, static void ima_cache_flags(struct integrity_iint_cache *iint, int func) { - switch(func) { + switch (func) { case MMAP_CHECK: iint->flags |= (IMA_MMAP_APPRAISED | IMA_APPRAISED); break; @@ -107,6 +120,50 @@ static void ima_cache_flags(struct integrity_iint_cache *iint, int func) } } +void ima_get_hash_algo(struct evm_ima_xattr_data *xattr_value, int xattr_len, + struct ima_digest_data *hash) +{ + struct signature_v2_hdr *sig; + + if (!xattr_value || xattr_len < 2) + return; + + switch (xattr_value->type) { + case EVM_IMA_XATTR_DIGSIG: + sig = (typeof(sig))xattr_value; + if (sig->version != 2 || xattr_len <= sizeof(*sig)) + return; + hash->algo = sig->hash_algo; + break; + case IMA_XATTR_DIGEST_NG: + hash->algo = xattr_value->digest[0]; + break; + case IMA_XATTR_DIGEST: + /* this is for backward compatibility */ + if (xattr_len == 21) { + unsigned int zero = 0; + if (!memcmp(&xattr_value->digest[16], &zero, 4)) + hash->algo = HASH_ALGO_MD5; + else + hash->algo = HASH_ALGO_SHA1; + } else if (xattr_len == 17) + hash->algo = HASH_ALGO_MD5; + break; + } +} + +int ima_read_xattr(struct dentry *dentry, + struct evm_ima_xattr_data **xattr_value) +{ + struct inode *inode = dentry->d_inode; + + if (!inode->i_op->getxattr) + return 0; + + return vfs_getxattr_alloc(dentry, XATTR_NAME_IMA, (char **)xattr_value, + 0, GFP_NOFS); +} + /* * ima_appraise_measurement - appraise file measurement * @@ -116,23 +173,22 @@ static void ima_cache_flags(struct integrity_iint_cache *iint, int func) * Return 0 on success, error code otherwise */ int ima_appraise_measurement(int func, struct integrity_iint_cache *iint, - struct file *file, const unsigned char *filename) + struct file *file, const unsigned char *filename, + struct evm_ima_xattr_data *xattr_value, + int xattr_len) { struct dentry *dentry = file->f_dentry; struct inode *inode = dentry->d_inode; - struct evm_ima_xattr_data *xattr_value = NULL; enum integrity_status status = INTEGRITY_UNKNOWN; const char *op = "appraise_data"; char *cause = "unknown"; - int rc; + int rc = xattr_len, hash_start = 0; if (!ima_appraise) return 0; if (!inode->i_op->getxattr) return INTEGRITY_UNKNOWN; - rc = vfs_getxattr_alloc(dentry, XATTR_NAME_IMA, (char **)&xattr_value, - 0, GFP_NOFS); if (rc <= 0) { if (rc && rc != -ENODATA) goto out; @@ -153,14 +209,25 @@ int ima_appraise_measurement(int func, struct integrity_iint_cache *iint, goto out; } switch (xattr_value->type) { + case IMA_XATTR_DIGEST_NG: + /* first byte contains algorithm id */ + hash_start = 1; case IMA_XATTR_DIGEST: if (iint->flags & IMA_DIGSIG_REQUIRED) { cause = "IMA signature required"; status = INTEGRITY_FAIL; break; } - rc = memcmp(xattr_value->digest, iint->ima_xattr.digest, - IMA_DIGEST_SIZE); + if (xattr_len - sizeof(xattr_value->type) - hash_start >= + iint->ima_hash->length) + /* xattr length may be longer. md5 hash in previous + version occupied 20 bytes in xattr, instead of 16 + */ + rc = memcmp(&xattr_value->digest[hash_start], + iint->ima_hash->digest, + iint->ima_hash->length); + else + rc = -EINVAL; if (rc) { cause = "invalid-hash"; status = INTEGRITY_FAIL; @@ -171,9 +238,9 @@ int ima_appraise_measurement(int func, struct integrity_iint_cache *iint, case EVM_IMA_XATTR_DIGSIG: iint->flags |= IMA_DIGSIG; rc = integrity_digsig_verify(INTEGRITY_KEYRING_IMA, - xattr_value->digest, rc - 1, - iint->ima_xattr.digest, - IMA_DIGEST_SIZE); + (const char *)xattr_value, rc, + iint->ima_hash->digest, + iint->ima_hash->length); if (rc == -EOPNOTSUPP) { status = INTEGRITY_UNKNOWN; } else if (rc) { @@ -203,7 +270,6 @@ out: ima_cache_flags(iint, func); } ima_set_cache_status(iint, func, status); - kfree(xattr_value); return status; } @@ -219,7 +285,7 @@ void ima_update_xattr(struct integrity_iint_cache *iint, struct file *file) if (iint->flags & IMA_DIGSIG) return; - rc = ima_collect_measurement(iint, file); + rc = ima_collect_measurement(iint, file, NULL, NULL); if (rc < 0) return; @@ -315,3 +381,14 @@ int ima_inode_removexattr(struct dentry *dentry, const char *xattr_name) } return result; } + +#ifdef CONFIG_IMA_TRUSTED_KEYRING +static int __init init_ima_keyring(void) +{ + int ret; + + ret = integrity_init_keyring(INTEGRITY_KEYRING_IMA); + return 0; +} +late_initcall(init_ima_keyring); +#endif diff --git a/security/integrity/ima/ima_crypto.c b/security/integrity/ima/ima_crypto.c index a02e0791cf1..676e0292dfe 100644 --- a/security/integrity/ima/ima_crypto.c +++ b/security/integrity/ima/ima_crypto.c @@ -20,6 +20,7 @@ #include <linux/err.h> #include <linux/slab.h> #include <crypto/hash.h> +#include <crypto/hash_info.h> #include "ima.h" static struct crypto_shash *ima_shash_tfm; @@ -28,31 +29,58 @@ int ima_init_crypto(void) { long rc; - ima_shash_tfm = crypto_alloc_shash(ima_hash, 0, 0); + ima_shash_tfm = crypto_alloc_shash(hash_algo_name[ima_hash_algo], 0, 0); if (IS_ERR(ima_shash_tfm)) { rc = PTR_ERR(ima_shash_tfm); - pr_err("Can not allocate %s (reason: %ld)\n", ima_hash, rc); + pr_err("Can not allocate %s (reason: %ld)\n", + hash_algo_name[ima_hash_algo], rc); return rc; } return 0; } +static struct crypto_shash *ima_alloc_tfm(enum hash_algo algo) +{ + struct crypto_shash *tfm = ima_shash_tfm; + int rc; + + if (algo != ima_hash_algo && algo < HASH_ALGO__LAST) { + tfm = crypto_alloc_shash(hash_algo_name[algo], 0, 0); + if (IS_ERR(tfm)) { + rc = PTR_ERR(tfm); + pr_err("Can not allocate %s (reason: %d)\n", + hash_algo_name[algo], rc); + } + } + return tfm; +} + +static void ima_free_tfm(struct crypto_shash *tfm) +{ + if (tfm != ima_shash_tfm) + crypto_free_shash(tfm); +} + /* * Calculate the MD5/SHA1 file digest */ -int ima_calc_file_hash(struct file *file, char *digest) +static int ima_calc_file_hash_tfm(struct file *file, + struct ima_digest_data *hash, + struct crypto_shash *tfm) { loff_t i_size, offset = 0; char *rbuf; int rc, read = 0; struct { struct shash_desc shash; - char ctx[crypto_shash_descsize(ima_shash_tfm)]; + char ctx[crypto_shash_descsize(tfm)]; } desc; - desc.shash.tfm = ima_shash_tfm; + desc.shash.tfm = tfm; desc.shash.flags = 0; + hash->length = crypto_shash_digestsize(tfm); + rc = crypto_shash_init(&desc.shash); if (rc != 0) return rc; @@ -85,27 +113,83 @@ int ima_calc_file_hash(struct file *file, char *digest) } kfree(rbuf); if (!rc) - rc = crypto_shash_final(&desc.shash, digest); + rc = crypto_shash_final(&desc.shash, hash->digest); if (read) file->f_mode &= ~FMODE_READ; out: return rc; } +int ima_calc_file_hash(struct file *file, struct ima_digest_data *hash) +{ + struct crypto_shash *tfm; + int rc; + + tfm = ima_alloc_tfm(hash->algo); + if (IS_ERR(tfm)) + return PTR_ERR(tfm); + + rc = ima_calc_file_hash_tfm(file, hash, tfm); + + ima_free_tfm(tfm); + + return rc; +} + /* - * Calculate the hash of a given buffer + * Calculate the hash of template data */ -int ima_calc_buffer_hash(const void *data, int len, char *digest) +static int ima_calc_field_array_hash_tfm(struct ima_field_data *field_data, + int num_fields, + struct ima_digest_data *hash, + struct crypto_shash *tfm) { struct { struct shash_desc shash; - char ctx[crypto_shash_descsize(ima_shash_tfm)]; + char ctx[crypto_shash_descsize(tfm)]; } desc; + int rc, i; - desc.shash.tfm = ima_shash_tfm; + desc.shash.tfm = tfm; desc.shash.flags = 0; - return crypto_shash_digest(&desc.shash, data, len, digest); + hash->length = crypto_shash_digestsize(tfm); + + rc = crypto_shash_init(&desc.shash); + if (rc != 0) + return rc; + + for (i = 0; i < num_fields; i++) { + rc = crypto_shash_update(&desc.shash, + (const u8 *) &field_data[i].len, + sizeof(field_data[i].len)); + rc = crypto_shash_update(&desc.shash, field_data[i].data, + field_data[i].len); + if (rc) + break; + } + + if (!rc) + rc = crypto_shash_final(&desc.shash, hash->digest); + + return rc; +} + +int ima_calc_field_array_hash(struct ima_field_data *field_data, int num_fields, + struct ima_digest_data *hash) +{ + struct crypto_shash *tfm; + int rc; + + tfm = ima_alloc_tfm(hash->algo); + if (IS_ERR(tfm)) + return PTR_ERR(tfm); + + rc = ima_calc_field_array_hash_tfm(field_data, num_fields, hash, tfm); + + ima_free_tfm(tfm); + + return rc; } static void __init ima_pcrread(int idx, u8 *pcr) @@ -120,16 +204,17 @@ static void __init ima_pcrread(int idx, u8 *pcr) /* * Calculate the boot aggregate hash */ -int __init ima_calc_boot_aggregate(char *digest) +static int __init ima_calc_boot_aggregate_tfm(char *digest, + struct crypto_shash *tfm) { - u8 pcr_i[IMA_DIGEST_SIZE]; + u8 pcr_i[TPM_DIGEST_SIZE]; int rc, i; struct { struct shash_desc shash; - char ctx[crypto_shash_descsize(ima_shash_tfm)]; + char ctx[crypto_shash_descsize(tfm)]; } desc; - desc.shash.tfm = ima_shash_tfm; + desc.shash.tfm = tfm; desc.shash.flags = 0; rc = crypto_shash_init(&desc.shash); @@ -140,9 +225,26 @@ int __init ima_calc_boot_aggregate(char *digest) for (i = TPM_PCR0; i < TPM_PCR8; i++) { ima_pcrread(i, pcr_i); /* now accumulate with current aggregate */ - rc = crypto_shash_update(&desc.shash, pcr_i, IMA_DIGEST_SIZE); + rc = crypto_shash_update(&desc.shash, pcr_i, TPM_DIGEST_SIZE); } if (!rc) crypto_shash_final(&desc.shash, digest); return rc; } + +int __init ima_calc_boot_aggregate(struct ima_digest_data *hash) +{ + struct crypto_shash *tfm; + int rc; + + tfm = ima_alloc_tfm(hash->algo); + if (IS_ERR(tfm)) + return PTR_ERR(tfm); + + hash->length = crypto_shash_digestsize(tfm); + rc = ima_calc_boot_aggregate_tfm(hash->digest, tfm); + + ima_free_tfm(tfm); + + return rc; +} diff --git a/security/integrity/ima/ima_fs.c b/security/integrity/ima/ima_fs.c index 38477c9c341..d47a7c86a21 100644 --- a/security/integrity/ima/ima_fs.c +++ b/security/integrity/ima/ima_fs.c @@ -88,8 +88,7 @@ static void *ima_measurements_next(struct seq_file *m, void *v, loff_t *pos) * against concurrent list-extension */ rcu_read_lock(); - qe = list_entry_rcu(qe->later.next, - struct ima_queue_entry, later); + qe = list_entry_rcu(qe->later.next, struct ima_queue_entry, later); rcu_read_unlock(); (*pos)++; @@ -100,7 +99,7 @@ static void ima_measurements_stop(struct seq_file *m, void *v) { } -static void ima_putc(struct seq_file *m, void *data, int datalen) +void ima_putc(struct seq_file *m, void *data, int datalen) { while (datalen--) seq_putc(m, *(char *)data++); @@ -111,6 +110,7 @@ static void ima_putc(struct seq_file *m, void *data, int datalen) * char[20]=template digest * 32bit-le=template name size * char[n]=template name + * [eventdata length] * eventdata[n]=template specific data */ static int ima_measurements_show(struct seq_file *m, void *v) @@ -120,6 +120,7 @@ static int ima_measurements_show(struct seq_file *m, void *v) struct ima_template_entry *e; int namelen; u32 pcr = CONFIG_IMA_MEASURE_PCR_IDX; + int i; /* get entry */ e = qe->entry; @@ -134,18 +135,25 @@ static int ima_measurements_show(struct seq_file *m, void *v) ima_putc(m, &pcr, sizeof pcr); /* 2nd: template digest */ - ima_putc(m, e->digest, IMA_DIGEST_SIZE); + ima_putc(m, e->digest, TPM_DIGEST_SIZE); /* 3rd: template name size */ - namelen = strlen(e->template_name); + namelen = strlen(e->template_desc->name); ima_putc(m, &namelen, sizeof namelen); /* 4th: template name */ - ima_putc(m, (void *)e->template_name, namelen); + ima_putc(m, e->template_desc->name, namelen); + + /* 5th: template length (except for 'ima' template) */ + if (strcmp(e->template_desc->name, IMA_TEMPLATE_IMA_NAME) != 0) + ima_putc(m, &e->template_data_len, + sizeof(e->template_data_len)); - /* 5th: template specific data */ - ima_template_show(m, (struct ima_template_data *)&e->template, - IMA_SHOW_BINARY); + /* 6th: template specific data */ + for (i = 0; i < e->template_desc->num_fields; i++) { + e->template_desc->fields[i]->field_show(m, IMA_SHOW_BINARY, + &e->template_data[i]); + } return 0; } @@ -168,41 +176,21 @@ static const struct file_operations ima_measurements_ops = { .release = seq_release, }; -static void ima_print_digest(struct seq_file *m, u8 *digest) +void ima_print_digest(struct seq_file *m, u8 *digest, int size) { int i; - for (i = 0; i < IMA_DIGEST_SIZE; i++) + for (i = 0; i < size; i++) seq_printf(m, "%02x", *(digest + i)); } -void ima_template_show(struct seq_file *m, void *e, enum ima_show_type show) -{ - struct ima_template_data *entry = e; - int namelen; - - switch (show) { - case IMA_SHOW_ASCII: - ima_print_digest(m, entry->digest); - seq_printf(m, " %s\n", entry->file_name); - break; - case IMA_SHOW_BINARY: - ima_putc(m, entry->digest, IMA_DIGEST_SIZE); - - namelen = strlen(entry->file_name); - ima_putc(m, &namelen, sizeof namelen); - ima_putc(m, entry->file_name, namelen); - default: - break; - } -} - /* print in ascii */ static int ima_ascii_measurements_show(struct seq_file *m, void *v) { /* the list never shrinks, so we don't need a lock here */ struct ima_queue_entry *qe = v; struct ima_template_entry *e; + int i; /* get entry */ e = qe->entry; @@ -213,14 +201,21 @@ static int ima_ascii_measurements_show(struct seq_file *m, void *v) seq_printf(m, "%2d ", CONFIG_IMA_MEASURE_PCR_IDX); /* 2nd: SHA1 template hash */ - ima_print_digest(m, e->digest); + ima_print_digest(m, e->digest, TPM_DIGEST_SIZE); /* 3th: template name */ - seq_printf(m, " %s ", e->template_name); + seq_printf(m, " %s", e->template_desc->name); /* 4th: template specific data */ - ima_template_show(m, (struct ima_template_data *)&e->template, - IMA_SHOW_ASCII); + for (i = 0; i < e->template_desc->num_fields; i++) { + seq_puts(m, " "); + if (e->template_data[i].len == 0) + continue; + + e->template_desc->fields[i]->field_show(m, IMA_SHOW_ASCII, + &e->template_data[i]); + } + seq_puts(m, "\n"); return 0; } diff --git a/security/integrity/ima/ima_init.c b/security/integrity/ima/ima_init.c index 162ea723db3..15f34bd40ab 100644 --- a/security/integrity/ima/ima_init.c +++ b/security/integrity/ima/ima_init.c @@ -18,6 +18,7 @@ #include <linux/scatterlist.h> #include <linux/slab.h> #include <linux/err.h> +#include <crypto/hash_info.h> #include "ima.h" /* name for boot aggregate entry */ @@ -42,28 +43,38 @@ int ima_used_chip; static void __init ima_add_boot_aggregate(void) { struct ima_template_entry *entry; + struct integrity_iint_cache tmp_iint, *iint = &tmp_iint; const char *op = "add_boot_aggregate"; const char *audit_cause = "ENOMEM"; int result = -ENOMEM; - int violation = 1; + int violation = 0; + struct { + struct ima_digest_data hdr; + char digest[TPM_DIGEST_SIZE]; + } hash; - entry = kmalloc(sizeof(*entry), GFP_KERNEL); - if (!entry) - goto err_out; + memset(iint, 0, sizeof(*iint)); + memset(&hash, 0, sizeof(hash)); + iint->ima_hash = &hash.hdr; + iint->ima_hash->algo = HASH_ALGO_SHA1; + iint->ima_hash->length = SHA1_DIGEST_SIZE; - memset(&entry->template, 0, sizeof(entry->template)); - strncpy(entry->template.file_name, boot_aggregate_name, - IMA_EVENT_NAME_LEN_MAX); if (ima_used_chip) { - violation = 0; - result = ima_calc_boot_aggregate(entry->template.digest); + result = ima_calc_boot_aggregate(&hash.hdr); if (result < 0) { audit_cause = "hashing_error"; kfree(entry); goto err_out; } } - result = ima_store_template(entry, violation, NULL); + + result = ima_alloc_init_template(iint, NULL, boot_aggregate_name, + NULL, 0, &entry); + if (result < 0) + return; + + result = ima_store_template(entry, violation, NULL, + boot_aggregate_name); if (result < 0) kfree(entry); return; @@ -74,7 +85,7 @@ err_out: int __init ima_init(void) { - u8 pcr_i[IMA_DIGEST_SIZE]; + u8 pcr_i[TPM_DIGEST_SIZE]; int rc; ima_used_chip = 0; @@ -88,6 +99,10 @@ int __init ima_init(void) rc = ima_init_crypto(); if (rc) return rc; + rc = ima_init_template(); + if (rc != 0) + return rc; + ima_add_boot_aggregate(); /* boot aggregate must be first entry */ ima_init_policy(); diff --git a/security/integrity/ima/ima_main.c b/security/integrity/ima/ima_main.c index e9508d5bbfc..149ee1119f8 100644 --- a/security/integrity/ima/ima_main.c +++ b/security/integrity/ima/ima_main.c @@ -24,6 +24,7 @@ #include <linux/slab.h> #include <linux/xattr.h> #include <linux/ima.h> +#include <crypto/hash_info.h> #include "ima.h" @@ -35,11 +36,33 @@ int ima_appraise = IMA_APPRAISE_ENFORCE; int ima_appraise; #endif -char *ima_hash = "sha1"; +int ima_hash_algo = HASH_ALGO_SHA1; +static int hash_setup_done; + static int __init hash_setup(char *str) { - if (strncmp(str, "md5", 3) == 0) - ima_hash = "md5"; + struct ima_template_desc *template_desc = ima_template_desc_current(); + int i; + + if (hash_setup_done) + return 1; + + if (strcmp(template_desc->name, IMA_TEMPLATE_IMA_NAME) == 0) { + if (strncmp(str, "sha1", 4) == 0) + ima_hash_algo = HASH_ALGO_SHA1; + else if (strncmp(str, "md5", 3) == 0) + ima_hash_algo = HASH_ALGO_MD5; + goto out; + } + + for (i = 0; i < HASH_ALGO__LAST; i++) { + if (strcmp(str, hash_algo_name[i]) == 0) { + ima_hash_algo = i; + break; + } + } +out: + hash_setup_done = 1; return 1; } __setup("ima_hash=", hash_setup); @@ -92,10 +115,9 @@ out: pathname = dentry->d_name.name; if (send_tomtou) - ima_add_violation(inode, pathname, - "invalid_pcr", "ToMToU"); + ima_add_violation(file, pathname, "invalid_pcr", "ToMToU"); if (send_writers) - ima_add_violation(inode, pathname, + ima_add_violation(file, pathname, "invalid_pcr", "open_writers"); kfree(pathbuf); } @@ -144,9 +166,12 @@ static int process_measurement(struct file *file, const char *filename, { struct inode *inode = file_inode(file); struct integrity_iint_cache *iint; + struct ima_template_desc *template_desc = ima_template_desc_current(); char *pathbuf = NULL; const char *pathname = NULL; int rc = -ENOMEM, action, must_appraise, _func; + struct evm_ima_xattr_data *xattr_value = NULL, **xattr_ptr = NULL; + int xattr_len = 0; if (!ima_initialized || !S_ISREG(inode->i_mode)) return 0; @@ -185,7 +210,13 @@ static int process_measurement(struct file *file, const char *filename, goto out_digsig; } - rc = ima_collect_measurement(iint, file); + if (strcmp(template_desc->name, IMA_TEMPLATE_IMA_NAME) == 0) { + if (action & IMA_APPRAISE_SUBMASK) + xattr_ptr = &xattr_value; + } else + xattr_ptr = &xattr_value; + + rc = ima_collect_measurement(iint, file, xattr_ptr, &xattr_len); if (rc != 0) goto out_digsig; @@ -194,9 +225,11 @@ static int process_measurement(struct file *file, const char *filename, pathname = (const char *)file->f_dentry->d_name.name; if (action & IMA_MEASURE) - ima_store_measurement(iint, file, pathname); + ima_store_measurement(iint, file, pathname, + xattr_value, xattr_len); if (action & IMA_APPRAISE_SUBMASK) - rc = ima_appraise_measurement(_func, iint, file, pathname); + rc = ima_appraise_measurement(_func, iint, file, pathname, + xattr_value, xattr_len); if (action & IMA_AUDIT) ima_audit_measurement(iint, pathname); kfree(pathbuf); @@ -205,6 +238,7 @@ out_digsig: rc = -EACCES; out: mutex_unlock(&inode->i_mutex); + kfree(xattr_value); if ((rc && must_appraise) && (ima_appraise & IMA_APPRAISE_ENFORCE)) return -EACCES; return 0; @@ -244,9 +278,9 @@ int ima_file_mmap(struct file *file, unsigned long prot) int ima_bprm_check(struct linux_binprm *bprm) { return process_measurement(bprm->file, - (strcmp(bprm->filename, bprm->interp) == 0) ? - bprm->filename : bprm->interp, - MAY_EXEC, BPRM_CHECK); + (strcmp(bprm->filename, bprm->interp) == 0) ? + bprm->filename : bprm->interp, + MAY_EXEC, BPRM_CHECK); } /** @@ -263,8 +297,8 @@ int ima_file_check(struct file *file, int mask) { ima_rdwr_violation_check(file); return process_measurement(file, NULL, - mask & (MAY_READ | MAY_WRITE | MAY_EXEC), - FILE_CHECK); + mask & (MAY_READ | MAY_WRITE | MAY_EXEC), + FILE_CHECK); } EXPORT_SYMBOL_GPL(ima_file_check); @@ -294,6 +328,7 @@ static int __init init_ima(void) { int error; + hash_setup(CONFIG_IMA_DEFAULT_HASH); error = ima_init(); if (!error) ima_initialized = 1; diff --git a/security/integrity/ima/ima_policy.c b/security/integrity/ima/ima_policy.c index 399433ad614..a9c3d3cd199 100644 --- a/security/integrity/ima/ima_policy.c +++ b/security/integrity/ima/ima_policy.c @@ -73,7 +73,6 @@ static struct ima_rule_entry default_rules[] = { {.action = DONT_MEASURE,.fsmagic = SYSFS_MAGIC,.flags = IMA_FSMAGIC}, {.action = DONT_MEASURE,.fsmagic = DEBUGFS_MAGIC,.flags = IMA_FSMAGIC}, {.action = DONT_MEASURE,.fsmagic = TMPFS_MAGIC,.flags = IMA_FSMAGIC}, - {.action = DONT_MEASURE,.fsmagic = RAMFS_MAGIC,.flags = IMA_FSMAGIC}, {.action = DONT_MEASURE,.fsmagic = DEVPTS_SUPER_MAGIC,.flags = IMA_FSMAGIC}, {.action = DONT_MEASURE,.fsmagic = BINFMTFS_MAGIC,.flags = IMA_FSMAGIC}, {.action = DONT_MEASURE,.fsmagic = SECURITYFS_MAGIC,.flags = IMA_FSMAGIC}, diff --git a/security/integrity/ima/ima_queue.c b/security/integrity/ima/ima_queue.c index ff63fe00c19..d85e99761f4 100644 --- a/security/integrity/ima/ima_queue.c +++ b/security/integrity/ima/ima_queue.c @@ -50,7 +50,7 @@ static struct ima_queue_entry *ima_lookup_digest_entry(u8 *digest_value) key = ima_hash_key(digest_value); rcu_read_lock(); hlist_for_each_entry_rcu(qe, &ima_htable.queue[key], hnext) { - rc = memcmp(qe->entry->digest, digest_value, IMA_DIGEST_SIZE); + rc = memcmp(qe->entry->digest, digest_value, TPM_DIGEST_SIZE); if (rc == 0) { ret = qe; break; @@ -104,9 +104,10 @@ static int ima_pcr_extend(const u8 *hash) * and extend the pcr. */ int ima_add_template_entry(struct ima_template_entry *entry, int violation, - const char *op, struct inode *inode) + const char *op, struct inode *inode, + const unsigned char *filename) { - u8 digest[IMA_DIGEST_SIZE]; + u8 digest[TPM_DIGEST_SIZE]; const char *audit_cause = "hash_added"; char tpm_audit_cause[AUDIT_CAUSE_LEN_MAX]; int audit_info = 1; @@ -141,8 +142,7 @@ int ima_add_template_entry(struct ima_template_entry *entry, int violation, } out: mutex_unlock(&ima_extend_list_mutex); - integrity_audit_msg(AUDIT_INTEGRITY_PCR, inode, - entry->template.file_name, + integrity_audit_msg(AUDIT_INTEGRITY_PCR, inode, filename, op, audit_cause, result, audit_info); return result; } diff --git a/security/integrity/ima/ima_template.c b/security/integrity/ima/ima_template.c new file mode 100644 index 00000000000..4e5da990630 --- /dev/null +++ b/security/integrity/ima/ima_template.c @@ -0,0 +1,178 @@ +/* + * Copyright (C) 2013 Politecnico di Torino, Italy + * TORSEC group -- http://security.polito.it + * + * Author: Roberto Sassu <roberto.sassu@polito.it> + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation, version 2 of the + * License. + * + * File: ima_template.c + * Helpers to manage template descriptors. + */ +#include <crypto/hash_info.h> + +#include "ima.h" +#include "ima_template_lib.h" + +static struct ima_template_desc defined_templates[] = { + {.name = IMA_TEMPLATE_IMA_NAME, .fmt = IMA_TEMPLATE_IMA_FMT}, + {.name = "ima-ng",.fmt = "d-ng|n-ng"}, + {.name = "ima-sig",.fmt = "d-ng|n-ng|sig"}, +}; + +static struct ima_template_field supported_fields[] = { + {.field_id = "d",.field_init = ima_eventdigest_init, + .field_show = ima_show_template_digest}, + {.field_id = "n",.field_init = ima_eventname_init, + .field_show = ima_show_template_string}, + {.field_id = "d-ng",.field_init = ima_eventdigest_ng_init, + .field_show = ima_show_template_digest_ng}, + {.field_id = "n-ng",.field_init = ima_eventname_ng_init, + .field_show = ima_show_template_string}, + {.field_id = "sig",.field_init = ima_eventsig_init, + .field_show = ima_show_template_sig}, +}; + +static struct ima_template_desc *ima_template; +static struct ima_template_desc *lookup_template_desc(const char *name); + +static int __init ima_template_setup(char *str) +{ + struct ima_template_desc *template_desc; + int template_len = strlen(str); + + /* + * Verify that a template with the supplied name exists. + * If not, use CONFIG_IMA_DEFAULT_TEMPLATE. + */ + template_desc = lookup_template_desc(str); + if (!template_desc) + return 1; + + /* + * Verify whether the current hash algorithm is supported + * by the 'ima' template. + */ + if (template_len == 3 && strcmp(str, IMA_TEMPLATE_IMA_NAME) == 0 && + ima_hash_algo != HASH_ALGO_SHA1 && ima_hash_algo != HASH_ALGO_MD5) { + pr_err("IMA: template does not support hash alg\n"); + return 1; + } + + ima_template = template_desc; + return 1; +} +__setup("ima_template=", ima_template_setup); + +static struct ima_template_desc *lookup_template_desc(const char *name) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(defined_templates); i++) { + if (strcmp(defined_templates[i].name, name) == 0) + return defined_templates + i; + } + + return NULL; +} + +static struct ima_template_field *lookup_template_field(const char *field_id) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(supported_fields); i++) + if (strncmp(supported_fields[i].field_id, field_id, + IMA_TEMPLATE_FIELD_ID_MAX_LEN) == 0) + return &supported_fields[i]; + return NULL; +} + +static int template_fmt_size(char *template_fmt) +{ + char c; + int template_fmt_len = strlen(template_fmt); + int i = 0, j = 0; + + while (i < template_fmt_len) { + c = template_fmt[i]; + if (c == '|') + j++; + i++; + } + + return j + 1; +} + +static int template_desc_init_fields(char *template_fmt, + struct ima_template_field ***fields, + int *num_fields) +{ + char *c, *template_fmt_ptr = template_fmt; + int template_num_fields = template_fmt_size(template_fmt); + int i, result = 0; + + if (template_num_fields > IMA_TEMPLATE_NUM_FIELDS_MAX) + return -EINVAL; + + *fields = kzalloc(template_num_fields * sizeof(*fields), GFP_KERNEL); + if (*fields == NULL) { + result = -ENOMEM; + goto out; + } + for (i = 0; (c = strsep(&template_fmt_ptr, "|")) != NULL && + i < template_num_fields; i++) { + struct ima_template_field *f = lookup_template_field(c); + + if (!f) { + result = -ENOENT; + goto out; + } + (*fields)[i] = f; + } + *num_fields = i; + return 0; +out: + kfree(*fields); + *fields = NULL; + return result; +} + +static int init_defined_templates(void) +{ + int i = 0; + int result = 0; + + /* Init defined templates. */ + for (i = 0; i < ARRAY_SIZE(defined_templates); i++) { + struct ima_template_desc *template = &defined_templates[i]; + + result = template_desc_init_fields(template->fmt, + &(template->fields), + &(template->num_fields)); + if (result < 0) + return result; + } + return result; +} + +struct ima_template_desc *ima_template_desc_current(void) +{ + if (!ima_template) + ima_template = + lookup_template_desc(CONFIG_IMA_DEFAULT_TEMPLATE); + return ima_template; +} + +int ima_init_template(void) +{ + int result; + + result = init_defined_templates(); + if (result < 0) + return result; + + return 0; +} diff --git a/security/integrity/ima/ima_template_lib.c b/security/integrity/ima/ima_template_lib.c new file mode 100644 index 00000000000..6d66ad6ed26 --- /dev/null +++ b/security/integrity/ima/ima_template_lib.c @@ -0,0 +1,347 @@ +/* + * Copyright (C) 2013 Politecnico di Torino, Italy + * TORSEC group -- http://security.polito.it + * + * Author: Roberto Sassu <roberto.sassu@polito.it> + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation, version 2 of the + * License. + * + * File: ima_template_lib.c + * Library of supported template fields. + */ +#include <crypto/hash_info.h> + +#include "ima_template_lib.h" + +static bool ima_template_hash_algo_allowed(u8 algo) +{ + if (algo == HASH_ALGO_SHA1 || algo == HASH_ALGO_MD5) + return true; + + return false; +} + +enum data_formats { + DATA_FMT_DIGEST = 0, + DATA_FMT_DIGEST_WITH_ALGO, + DATA_FMT_EVENT_NAME, + DATA_FMT_STRING, + DATA_FMT_HEX +}; + +static int ima_write_template_field_data(const void *data, const u32 datalen, + enum data_formats datafmt, + struct ima_field_data *field_data) +{ + u8 *buf, *buf_ptr; + u32 buflen; + + switch (datafmt) { + case DATA_FMT_EVENT_NAME: + buflen = IMA_EVENT_NAME_LEN_MAX + 1; + break; + case DATA_FMT_STRING: + buflen = datalen + 1; + break; + default: + buflen = datalen; + } + + buf = kzalloc(buflen, GFP_KERNEL); + if (!buf) + return -ENOMEM; + + memcpy(buf, data, datalen); + + /* + * Replace all space characters with underscore for event names and + * strings. This avoid that, during the parsing of a measurements list, + * filenames with spaces or that end with the suffix ' (deleted)' are + * split into multiple template fields (the space is the delimitator + * character for measurements lists in ASCII format). + */ + if (datafmt == DATA_FMT_EVENT_NAME || datafmt == DATA_FMT_STRING) { + for (buf_ptr = buf; buf_ptr - buf < datalen; buf_ptr++) + if (*buf_ptr == ' ') + *buf_ptr = '_'; + } + + field_data->data = buf; + field_data->len = buflen; + return 0; +} + +static void ima_show_template_data_ascii(struct seq_file *m, + enum ima_show_type show, + enum data_formats datafmt, + struct ima_field_data *field_data) +{ + u8 *buf_ptr = field_data->data, buflen = field_data->len; + + switch (datafmt) { + case DATA_FMT_DIGEST_WITH_ALGO: + buf_ptr = strnchr(field_data->data, buflen, ':'); + if (buf_ptr != field_data->data) + seq_printf(m, "%s", field_data->data); + + /* skip ':' and '\0' */ + buf_ptr += 2; + buflen -= buf_ptr - field_data->data; + case DATA_FMT_DIGEST: + case DATA_FMT_HEX: + if (!buflen) + break; + ima_print_digest(m, buf_ptr, buflen); + break; + case DATA_FMT_STRING: + seq_printf(m, "%s", buf_ptr); + break; + default: + break; + } +} + +static void ima_show_template_data_binary(struct seq_file *m, + enum ima_show_type show, + enum data_formats datafmt, + struct ima_field_data *field_data) +{ + ima_putc(m, &field_data->len, sizeof(u32)); + if (!field_data->len) + return; + ima_putc(m, field_data->data, field_data->len); +} + +static void ima_show_template_field_data(struct seq_file *m, + enum ima_show_type show, + enum data_formats datafmt, + struct ima_field_data *field_data) +{ + switch (show) { + case IMA_SHOW_ASCII: + ima_show_template_data_ascii(m, show, datafmt, field_data); + break; + case IMA_SHOW_BINARY: + ima_show_template_data_binary(m, show, datafmt, field_data); + break; + default: + break; + } +} + +void ima_show_template_digest(struct seq_file *m, enum ima_show_type show, + struct ima_field_data *field_data) +{ + ima_show_template_field_data(m, show, DATA_FMT_DIGEST, field_data); +} + +void ima_show_template_digest_ng(struct seq_file *m, enum ima_show_type show, + struct ima_field_data *field_data) +{ + ima_show_template_field_data(m, show, DATA_FMT_DIGEST_WITH_ALGO, + field_data); +} + +void ima_show_template_string(struct seq_file *m, enum ima_show_type show, + struct ima_field_data *field_data) +{ + ima_show_template_field_data(m, show, DATA_FMT_STRING, field_data); +} + +void ima_show_template_sig(struct seq_file *m, enum ima_show_type show, + struct ima_field_data *field_data) +{ + ima_show_template_field_data(m, show, DATA_FMT_HEX, field_data); +} + +static int ima_eventdigest_init_common(u8 *digest, u32 digestsize, u8 hash_algo, + struct ima_field_data *field_data, + bool size_limit) +{ + /* + * digest formats: + * - DATA_FMT_DIGEST: digest + * - DATA_FMT_DIGEST_WITH_ALGO: [<hash algo>] + ':' + '\0' + digest, + * where <hash algo> is provided if the hash algoritm is not + * SHA1 or MD5 + */ + u8 buffer[CRYPTO_MAX_ALG_NAME + 2 + IMA_MAX_DIGEST_SIZE] = { 0 }; + enum data_formats fmt = DATA_FMT_DIGEST; + u32 offset = 0; + + if (!size_limit) { + fmt = DATA_FMT_DIGEST_WITH_ALGO; + if (hash_algo < HASH_ALGO__LAST) + offset += snprintf(buffer, CRYPTO_MAX_ALG_NAME + 1, + "%s", hash_algo_name[hash_algo]); + buffer[offset] = ':'; + offset += 2; + } + + if (digest) + memcpy(buffer + offset, digest, digestsize); + else + /* + * If digest is NULL, the event being recorded is a violation. + * Make room for the digest by increasing the offset of + * IMA_DIGEST_SIZE. + */ + offset += IMA_DIGEST_SIZE; + + return ima_write_template_field_data(buffer, offset + digestsize, + fmt, field_data); +} + +/* + * This function writes the digest of an event (with size limit). + */ +int ima_eventdigest_init(struct integrity_iint_cache *iint, struct file *file, + const unsigned char *filename, + struct evm_ima_xattr_data *xattr_value, int xattr_len, + struct ima_field_data *field_data) +{ + struct { + struct ima_digest_data hdr; + char digest[IMA_MAX_DIGEST_SIZE]; + } hash; + u8 *cur_digest = NULL; + u32 cur_digestsize = 0; + struct inode *inode; + int result; + + memset(&hash, 0, sizeof(hash)); + + if (!iint) /* recording a violation. */ + goto out; + + if (ima_template_hash_algo_allowed(iint->ima_hash->algo)) { + cur_digest = iint->ima_hash->digest; + cur_digestsize = iint->ima_hash->length; + goto out; + } + + if (!file) /* missing info to re-calculate the digest */ + return -EINVAL; + + inode = file_inode(file); + hash.hdr.algo = ima_template_hash_algo_allowed(ima_hash_algo) ? + ima_hash_algo : HASH_ALGO_SHA1; + result = ima_calc_file_hash(file, &hash.hdr); + if (result) { + integrity_audit_msg(AUDIT_INTEGRITY_DATA, inode, + filename, "collect_data", + "failed", result, 0); + return result; + } + cur_digest = hash.hdr.digest; + cur_digestsize = hash.hdr.length; +out: + return ima_eventdigest_init_common(cur_digest, cur_digestsize, -1, + field_data, true); +} + +/* + * This function writes the digest of an event (without size limit). + */ +int ima_eventdigest_ng_init(struct integrity_iint_cache *iint, + struct file *file, const unsigned char *filename, + struct evm_ima_xattr_data *xattr_value, + int xattr_len, struct ima_field_data *field_data) +{ + u8 *cur_digest = NULL, hash_algo = HASH_ALGO__LAST; + u32 cur_digestsize = 0; + + /* If iint is NULL, we are recording a violation. */ + if (!iint) + goto out; + + cur_digest = iint->ima_hash->digest; + cur_digestsize = iint->ima_hash->length; + + hash_algo = iint->ima_hash->algo; +out: + return ima_eventdigest_init_common(cur_digest, cur_digestsize, + hash_algo, field_data, false); +} + +static int ima_eventname_init_common(struct integrity_iint_cache *iint, + struct file *file, + const unsigned char *filename, + struct ima_field_data *field_data, + bool size_limit) +{ + const char *cur_filename = NULL; + u32 cur_filename_len = 0; + enum data_formats fmt = size_limit ? + DATA_FMT_EVENT_NAME : DATA_FMT_STRING; + + BUG_ON(filename == NULL && file == NULL); + + if (filename) { + cur_filename = filename; + cur_filename_len = strlen(filename); + + if (!size_limit || cur_filename_len <= IMA_EVENT_NAME_LEN_MAX) + goto out; + } + + if (file) { + cur_filename = file->f_dentry->d_name.name; + cur_filename_len = strlen(cur_filename); + } else + /* + * Truncate filename if the latter is too long and + * the file descriptor is not available. + */ + cur_filename_len = IMA_EVENT_NAME_LEN_MAX; +out: + return ima_write_template_field_data(cur_filename, cur_filename_len, + fmt, field_data); +} + +/* + * This function writes the name of an event (with size limit). + */ +int ima_eventname_init(struct integrity_iint_cache *iint, struct file *file, + const unsigned char *filename, + struct evm_ima_xattr_data *xattr_value, int xattr_len, + struct ima_field_data *field_data) +{ + return ima_eventname_init_common(iint, file, filename, + field_data, true); +} + +/* + * This function writes the name of an event (without size limit). + */ +int ima_eventname_ng_init(struct integrity_iint_cache *iint, struct file *file, + const unsigned char *filename, + struct evm_ima_xattr_data *xattr_value, int xattr_len, + struct ima_field_data *field_data) +{ + return ima_eventname_init_common(iint, file, filename, + field_data, false); +} + +/* + * ima_eventsig_init - include the file signature as part of the template data + */ +int ima_eventsig_init(struct integrity_iint_cache *iint, struct file *file, + const unsigned char *filename, + struct evm_ima_xattr_data *xattr_value, int xattr_len, + struct ima_field_data *field_data) +{ + enum data_formats fmt = DATA_FMT_HEX; + int rc = 0; + + if ((!xattr_value) || (xattr_value->type != EVM_IMA_XATTR_DIGSIG)) + goto out; + + rc = ima_write_template_field_data(xattr_value, xattr_len, fmt, + field_data); +out: + return rc; +} diff --git a/security/integrity/ima/ima_template_lib.h b/security/integrity/ima/ima_template_lib.h new file mode 100644 index 00000000000..63f6b52cb1c --- /dev/null +++ b/security/integrity/ima/ima_template_lib.h @@ -0,0 +1,49 @@ +/* + * Copyright (C) 2013 Politecnico di Torino, Italy + * TORSEC group -- http://security.polito.it + * + * Author: Roberto Sassu <roberto.sassu@polito.it> + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation, version 2 of the + * License. + * + * File: ima_template_lib.h + * Header for the library of supported template fields. + */ +#ifndef __LINUX_IMA_TEMPLATE_LIB_H +#define __LINUX_IMA_TEMPLATE_LIB_H + +#include <linux/seq_file.h> +#include "ima.h" + +void ima_show_template_digest(struct seq_file *m, enum ima_show_type show, + struct ima_field_data *field_data); +void ima_show_template_digest_ng(struct seq_file *m, enum ima_show_type show, + struct ima_field_data *field_data); +void ima_show_template_string(struct seq_file *m, enum ima_show_type show, + struct ima_field_data *field_data); +void ima_show_template_sig(struct seq_file *m, enum ima_show_type show, + struct ima_field_data *field_data); +int ima_eventdigest_init(struct integrity_iint_cache *iint, struct file *file, + const unsigned char *filename, + struct evm_ima_xattr_data *xattr_value, int xattr_len, + struct ima_field_data *field_data); +int ima_eventname_init(struct integrity_iint_cache *iint, struct file *file, + const unsigned char *filename, + struct evm_ima_xattr_data *xattr_value, int xattr_len, + struct ima_field_data *field_data); +int ima_eventdigest_ng_init(struct integrity_iint_cache *iint, + struct file *file, const unsigned char *filename, + struct evm_ima_xattr_data *xattr_value, + int xattr_len, struct ima_field_data *field_data); +int ima_eventname_ng_init(struct integrity_iint_cache *iint, struct file *file, + const unsigned char *filename, + struct evm_ima_xattr_data *xattr_value, int xattr_len, + struct ima_field_data *field_data); +int ima_eventsig_init(struct integrity_iint_cache *iint, struct file *file, + const unsigned char *filename, + struct evm_ima_xattr_data *xattr_value, int xattr_len, + struct ima_field_data *field_data); +#endif /* __LINUX_IMA_TEMPLATE_LIB_H */ diff --git a/security/integrity/integrity.h b/security/integrity/integrity.h index c42fb7a70de..b9e7c133734 100644 --- a/security/integrity/integrity.h +++ b/security/integrity/integrity.h @@ -54,25 +54,57 @@ enum evm_ima_xattr_type { IMA_XATTR_DIGEST = 0x01, EVM_XATTR_HMAC, EVM_IMA_XATTR_DIGSIG, + IMA_XATTR_DIGEST_NG, }; struct evm_ima_xattr_data { u8 type; u8 digest[SHA1_DIGEST_SIZE]; -} __attribute__((packed)); +} __packed; + +#define IMA_MAX_DIGEST_SIZE 64 + +struct ima_digest_data { + u8 algo; + u8 length; + union { + struct { + u8 unused; + u8 type; + } sha1; + struct { + u8 type; + u8 algo; + } ng; + u8 data[2]; + } xattr; + u8 digest[0]; +} __packed; + +/* + * signature format v2 - for using with asymmetric keys + */ +struct signature_v2_hdr { + uint8_t type; /* xattr type */ + uint8_t version; /* signature format version */ + uint8_t hash_algo; /* Digest algorithm [enum pkey_hash_algo] */ + uint32_t keyid; /* IMA key identifier - not X509/PGP specific */ + uint16_t sig_size; /* signature size */ + uint8_t sig[0]; /* signature payload */ +} __packed; /* integrity data associated with an inode */ struct integrity_iint_cache { - struct rb_node rb_node; /* rooted in integrity_iint_tree */ + struct rb_node rb_node; /* rooted in integrity_iint_tree */ struct inode *inode; /* back pointer to inode in question */ u64 version; /* track inode changes */ unsigned long flags; - struct evm_ima_xattr_data ima_xattr; enum integrity_status ima_file_status:4; enum integrity_status ima_mmap_status:4; enum integrity_status ima_bprm_status:4; enum integrity_status ima_module_status:4; enum integrity_status evm_status:4; + struct ima_digest_data *ima_hash; }; /* rbtree tree calls to lookup, insert, delete @@ -89,7 +121,7 @@ struct integrity_iint_cache *integrity_iint_find(struct inode *inode); #ifdef CONFIG_INTEGRITY_SIGNATURE int integrity_digsig_verify(const unsigned int id, const char *sig, int siglen, - const char *digest, int digestlen); + const char *digest, int digestlen); #else @@ -105,12 +137,19 @@ static inline int integrity_digsig_verify(const unsigned int id, #ifdef CONFIG_INTEGRITY_ASYMMETRIC_KEYS int asymmetric_verify(struct key *keyring, const char *sig, int siglen, const char *data, int datalen); + +int integrity_init_keyring(const unsigned int id); #else static inline int asymmetric_verify(struct key *keyring, const char *sig, int siglen, const char *data, int datalen) { return -EOPNOTSUPP; } + +static int integrity_init_keyring(const unsigned int id) +{ + return 0; +} #endif #ifdef CONFIG_INTEGRITY_AUDIT diff --git a/security/keys/Kconfig b/security/keys/Kconfig index a90d6d300db..a4f3f8c48d6 100644 --- a/security/keys/Kconfig +++ b/security/keys/Kconfig @@ -4,6 +4,7 @@ config KEYS bool "Enable access key retention support" + select ASSOCIATIVE_ARRAY help This option provides support for retaining authentication tokens and access keys in the kernel. @@ -19,6 +20,34 @@ config KEYS If you are unsure as to whether this is required, answer N. +config PERSISTENT_KEYRINGS + bool "Enable register of persistent per-UID keyrings" + depends on KEYS + help + This option provides a register of persistent per-UID keyrings, + primarily aimed at Kerberos key storage. The keyrings are persistent + in the sense that they stay around after all processes of that UID + have exited, not that they survive the machine being rebooted. + + A particular keyring may be accessed by either the user whose keyring + it is or by a process with administrative privileges. The active + LSMs gets to rule on which admin-level processes get to access the + cache. + + Keyrings are created and added into the register upon demand and get + removed if they expire (a default timeout is set upon creation). + +config BIG_KEYS + bool "Large payload keys" + depends on KEYS + depends on TMPFS + help + This option provides support for holding large keys within the kernel + (for example Kerberos ticket caches). The data may be stored out to + swapspace by tmpfs. + + If you are unsure as to whether this is required, answer N. + config TRUSTED_KEYS tristate "TRUSTED KEYS" depends on KEYS && TCG_TPM diff --git a/security/keys/Makefile b/security/keys/Makefile index 504aaa00838..dfb3a7beded 100644 --- a/security/keys/Makefile +++ b/security/keys/Makefile @@ -18,9 +18,11 @@ obj-y := \ obj-$(CONFIG_KEYS_COMPAT) += compat.o obj-$(CONFIG_PROC_FS) += proc.o obj-$(CONFIG_SYSCTL) += sysctl.o +obj-$(CONFIG_PERSISTENT_KEYRINGS) += persistent.o # # Key types # +obj-$(CONFIG_BIG_KEYS) += big_key.o obj-$(CONFIG_TRUSTED_KEYS) += trusted.o obj-$(CONFIG_ENCRYPTED_KEYS) += encrypted-keys/ diff --git a/security/keys/big_key.c b/security/keys/big_key.c new file mode 100644 index 00000000000..7f44c3207a9 --- /dev/null +++ b/security/keys/big_key.c @@ -0,0 +1,207 @@ +/* Large capacity key type + * + * Copyright (C) 2013 Red Hat, Inc. All Rights Reserved. + * Written by David Howells (dhowells@redhat.com) + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public Licence + * as published by the Free Software Foundation; either version + * 2 of the Licence, or (at your option) any later version. + */ + +#include <linux/module.h> +#include <linux/init.h> +#include <linux/seq_file.h> +#include <linux/file.h> +#include <linux/shmem_fs.h> +#include <linux/err.h> +#include <keys/user-type.h> +#include <keys/big_key-type.h> + +MODULE_LICENSE("GPL"); + +/* + * If the data is under this limit, there's no point creating a shm file to + * hold it as the permanently resident metadata for the shmem fs will be at + * least as large as the data. + */ +#define BIG_KEY_FILE_THRESHOLD (sizeof(struct inode) + sizeof(struct dentry)) + +/* + * big_key defined keys take an arbitrary string as the description and an + * arbitrary blob of data as the payload + */ +struct key_type key_type_big_key = { + .name = "big_key", + .def_lookup_type = KEYRING_SEARCH_LOOKUP_DIRECT, + .instantiate = big_key_instantiate, + .match = user_match, + .revoke = big_key_revoke, + .destroy = big_key_destroy, + .describe = big_key_describe, + .read = big_key_read, +}; + +/* + * Instantiate a big key + */ +int big_key_instantiate(struct key *key, struct key_preparsed_payload *prep) +{ + struct path *path = (struct path *)&key->payload.data2; + struct file *file; + ssize_t written; + size_t datalen = prep->datalen; + int ret; + + ret = -EINVAL; + if (datalen <= 0 || datalen > 1024 * 1024 || !prep->data) + goto error; + + /* Set an arbitrary quota */ + ret = key_payload_reserve(key, 16); + if (ret < 0) + goto error; + + key->type_data.x[1] = datalen; + + if (datalen > BIG_KEY_FILE_THRESHOLD) { + /* Create a shmem file to store the data in. This will permit the data + * to be swapped out if needed. + * + * TODO: Encrypt the stored data with a temporary key. + */ + file = shmem_file_setup("", datalen, 0); + if (IS_ERR(file)) { + ret = PTR_ERR(file); + goto err_quota; + } + + written = kernel_write(file, prep->data, prep->datalen, 0); + if (written != datalen) { + ret = written; + if (written >= 0) + ret = -ENOMEM; + goto err_fput; + } + + /* Pin the mount and dentry to the key so that we can open it again + * later + */ + *path = file->f_path; + path_get(path); + fput(file); + } else { + /* Just store the data in a buffer */ + void *data = kmalloc(datalen, GFP_KERNEL); + if (!data) { + ret = -ENOMEM; + goto err_quota; + } + + key->payload.data = memcpy(data, prep->data, prep->datalen); + } + return 0; + +err_fput: + fput(file); +err_quota: + key_payload_reserve(key, 0); +error: + return ret; +} + +/* + * dispose of the links from a revoked keyring + * - called with the key sem write-locked + */ +void big_key_revoke(struct key *key) +{ + struct path *path = (struct path *)&key->payload.data2; + + /* clear the quota */ + key_payload_reserve(key, 0); + if (key_is_instantiated(key) && key->type_data.x[1] > BIG_KEY_FILE_THRESHOLD) + vfs_truncate(path, 0); +} + +/* + * dispose of the data dangling from the corpse of a big_key key + */ +void big_key_destroy(struct key *key) +{ + if (key->type_data.x[1] > BIG_KEY_FILE_THRESHOLD) { + struct path *path = (struct path *)&key->payload.data2; + path_put(path); + path->mnt = NULL; + path->dentry = NULL; + } else { + kfree(key->payload.data); + key->payload.data = NULL; + } +} + +/* + * describe the big_key key + */ +void big_key_describe(const struct key *key, struct seq_file *m) +{ + unsigned long datalen = key->type_data.x[1]; + + seq_puts(m, key->description); + + if (key_is_instantiated(key)) + seq_printf(m, ": %lu [%s]", + datalen, + datalen > BIG_KEY_FILE_THRESHOLD ? "file" : "buff"); +} + +/* + * read the key data + * - the key's semaphore is read-locked + */ +long big_key_read(const struct key *key, char __user *buffer, size_t buflen) +{ + unsigned long datalen = key->type_data.x[1]; + long ret; + + if (!buffer || buflen < datalen) + return datalen; + + if (datalen > BIG_KEY_FILE_THRESHOLD) { + struct path *path = (struct path *)&key->payload.data2; + struct file *file; + loff_t pos; + + file = dentry_open(path, O_RDONLY, current_cred()); + if (IS_ERR(file)) + return PTR_ERR(file); + + pos = 0; + ret = vfs_read(file, buffer, datalen, &pos); + fput(file); + if (ret >= 0 && ret != datalen) + ret = -EIO; + } else { + ret = datalen; + if (copy_to_user(buffer, key->payload.data, datalen) != 0) + ret = -EFAULT; + } + + return ret; +} + +/* + * Module stuff + */ +static int __init big_key_init(void) +{ + return register_key_type(&key_type_big_key); +} + +static void __exit big_key_cleanup(void) +{ + unregister_key_type(&key_type_big_key); +} + +module_init(big_key_init); +module_exit(big_key_cleanup); diff --git a/security/keys/compat.c b/security/keys/compat.c index d65fa7fa29b..bbd32c729db 100644 --- a/security/keys/compat.c +++ b/security/keys/compat.c @@ -138,6 +138,9 @@ asmlinkage long compat_sys_keyctl(u32 option, case KEYCTL_INVALIDATE: return keyctl_invalidate_key(arg2); + case KEYCTL_GET_PERSISTENT: + return keyctl_get_persistent(arg2, arg3); + default: return -EOPNOTSUPP; } diff --git a/security/keys/gc.c b/security/keys/gc.c index d67c97bb102..d3222b6d7d5 100644 --- a/security/keys/gc.c +++ b/security/keys/gc.c @@ -131,50 +131,6 @@ void key_gc_keytype(struct key_type *ktype) } /* - * Garbage collect pointers from a keyring. - * - * Not called with any locks held. The keyring's key struct will not be - * deallocated under us as only our caller may deallocate it. - */ -static void key_gc_keyring(struct key *keyring, time_t limit) -{ - struct keyring_list *klist; - int loop; - - kenter("%x", key_serial(keyring)); - - if (keyring->flags & ((1 << KEY_FLAG_INVALIDATED) | - (1 << KEY_FLAG_REVOKED))) - goto dont_gc; - - /* scan the keyring looking for dead keys */ - rcu_read_lock(); - klist = rcu_dereference(keyring->payload.subscriptions); - if (!klist) - goto unlock_dont_gc; - - loop = klist->nkeys; - smp_rmb(); - for (loop--; loop >= 0; loop--) { - struct key *key = rcu_dereference(klist->keys[loop]); - if (key_is_dead(key, limit)) - goto do_gc; - } - -unlock_dont_gc: - rcu_read_unlock(); -dont_gc: - kleave(" [no gc]"); - return; - -do_gc: - rcu_read_unlock(); - - keyring_gc(keyring, limit); - kleave(" [gc]"); -} - -/* * Garbage collect a list of unreferenced, detached keys */ static noinline void key_gc_unused_keys(struct list_head *keys) @@ -392,8 +348,7 @@ found_unreferenced_key: */ found_keyring: spin_unlock(&key_serial_lock); - kdebug("scan keyring %d", key->serial); - key_gc_keyring(key, limit); + keyring_gc(key, limit); goto maybe_resched; /* We found a dead key that is still referenced. Reset its type and diff --git a/security/keys/internal.h b/security/keys/internal.h index d4f1468b9b5..80b2aac4f50 100644 --- a/security/keys/internal.h +++ b/security/keys/internal.h @@ -89,42 +89,53 @@ extern struct key_type *key_type_lookup(const char *type); extern void key_type_put(struct key_type *ktype); extern int __key_link_begin(struct key *keyring, - const struct key_type *type, - const char *description, - unsigned long *_prealloc); + const struct keyring_index_key *index_key, + struct assoc_array_edit **_edit); extern int __key_link_check_live_key(struct key *keyring, struct key *key); -extern void __key_link(struct key *keyring, struct key *key, - unsigned long *_prealloc); +extern void __key_link(struct key *key, struct assoc_array_edit **_edit); extern void __key_link_end(struct key *keyring, - struct key_type *type, - unsigned long prealloc); + const struct keyring_index_key *index_key, + struct assoc_array_edit *edit); -extern key_ref_t __keyring_search_one(key_ref_t keyring_ref, - const struct key_type *type, - const char *description, - key_perm_t perm); +extern key_ref_t find_key_to_update(key_ref_t keyring_ref, + const struct keyring_index_key *index_key); extern struct key *keyring_search_instkey(struct key *keyring, key_serial_t target_id); +extern int iterate_over_keyring(const struct key *keyring, + int (*func)(const struct key *key, void *data), + void *data); + typedef int (*key_match_func_t)(const struct key *, const void *); +struct keyring_search_context { + struct keyring_index_key index_key; + const struct cred *cred; + key_match_func_t match; + const void *match_data; + unsigned flags; +#define KEYRING_SEARCH_LOOKUP_TYPE 0x0001 /* [as type->def_lookup_type] */ +#define KEYRING_SEARCH_NO_STATE_CHECK 0x0002 /* Skip state checks */ +#define KEYRING_SEARCH_DO_STATE_CHECK 0x0004 /* Override NO_STATE_CHECK */ +#define KEYRING_SEARCH_NO_UPDATE_TIME 0x0008 /* Don't update times */ +#define KEYRING_SEARCH_NO_CHECK_PERM 0x0010 /* Don't check permissions */ +#define KEYRING_SEARCH_DETECT_TOO_DEEP 0x0020 /* Give an error on excessive depth */ + + int (*iterator)(const void *object, void *iterator_data); + + /* Internal stuff */ + int skipped_ret; + bool possessed; + key_ref_t result; + struct timespec now; +}; + extern key_ref_t keyring_search_aux(key_ref_t keyring_ref, - const struct cred *cred, - struct key_type *type, - const void *description, - key_match_func_t match, - bool no_state_check); - -extern key_ref_t search_my_process_keyrings(struct key_type *type, - const void *description, - key_match_func_t match, - bool no_state_check, - const struct cred *cred); -extern key_ref_t search_process_keyrings(struct key_type *type, - const void *description, - key_match_func_t match, - const struct cred *cred); + struct keyring_search_context *ctx); + +extern key_ref_t search_my_process_keyrings(struct keyring_search_context *ctx); +extern key_ref_t search_process_keyrings(struct keyring_search_context *ctx); extern struct key *find_keyring_by_name(const char *name, bool skip_perm_check); @@ -202,7 +213,7 @@ extern struct key *key_get_instantiation_authkey(key_serial_t target_id); /* * Determine whether a key is dead. */ -static inline bool key_is_dead(struct key *key, time_t limit) +static inline bool key_is_dead(const struct key *key, time_t limit) { return key->flags & ((1 << KEY_FLAG_DEAD) | @@ -244,6 +255,15 @@ extern long keyctl_invalidate_key(key_serial_t); extern long keyctl_instantiate_key_common(key_serial_t, const struct iovec *, unsigned, size_t, key_serial_t); +#ifdef CONFIG_PERSISTENT_KEYRINGS +extern long keyctl_get_persistent(uid_t, key_serial_t); +extern unsigned persistent_keyring_expiry; +#else +static inline long keyctl_get_persistent(uid_t uid, key_serial_t destring) +{ + return -EOPNOTSUPP; +} +#endif /* * Debugging key validation diff --git a/security/keys/key.c b/security/keys/key.c index 8fb7c7bd465..55d110f0ace 100644 --- a/security/keys/key.c +++ b/security/keys/key.c @@ -242,8 +242,8 @@ struct key *key_alloc(struct key_type *type, const char *desc, } } - desclen = strlen(desc) + 1; - quotalen = desclen + type->def_datalen; + desclen = strlen(desc); + quotalen = desclen + 1 + type->def_datalen; /* get hold of the key tracking for this user */ user = key_user_lookup(uid); @@ -277,7 +277,8 @@ struct key *key_alloc(struct key_type *type, const char *desc, goto no_memory_2; if (desc) { - key->description = kmemdup(desc, desclen, GFP_KERNEL); + key->index_key.desc_len = desclen; + key->index_key.description = kmemdup(desc, desclen + 1, GFP_KERNEL); if (!key->description) goto no_memory_3; } @@ -285,7 +286,7 @@ struct key *key_alloc(struct key_type *type, const char *desc, atomic_set(&key->usage, 1); init_rwsem(&key->sem); lockdep_set_class(&key->sem, &type->lock_class); - key->type = type; + key->index_key.type = type; key->user = user; key->quotalen = quotalen; key->datalen = type->def_datalen; @@ -299,6 +300,8 @@ struct key *key_alloc(struct key_type *type, const char *desc, if (!(flags & KEY_ALLOC_NOT_IN_QUOTA)) key->flags |= 1 << KEY_FLAG_IN_QUOTA; + if (flags & KEY_ALLOC_TRUSTED) + key->flags |= 1 << KEY_FLAG_TRUSTED; memset(&key->type_data, 0, sizeof(key->type_data)); @@ -408,7 +411,7 @@ static int __key_instantiate_and_link(struct key *key, struct key_preparsed_payload *prep, struct key *keyring, struct key *authkey, - unsigned long *_prealloc) + struct assoc_array_edit **_edit) { int ret, awaken; @@ -435,7 +438,7 @@ static int __key_instantiate_and_link(struct key *key, /* and link it into the destination keyring */ if (keyring) - __key_link(keyring, key, _prealloc); + __key_link(key, _edit); /* disable the authorisation key */ if (authkey) @@ -475,7 +478,7 @@ int key_instantiate_and_link(struct key *key, struct key *authkey) { struct key_preparsed_payload prep; - unsigned long prealloc; + struct assoc_array_edit *edit; int ret; memset(&prep, 0, sizeof(prep)); @@ -489,17 +492,15 @@ int key_instantiate_and_link(struct key *key, } if (keyring) { - ret = __key_link_begin(keyring, key->type, key->description, - &prealloc); + ret = __key_link_begin(keyring, &key->index_key, &edit); if (ret < 0) goto error_free_preparse; } - ret = __key_instantiate_and_link(key, &prep, keyring, authkey, - &prealloc); + ret = __key_instantiate_and_link(key, &prep, keyring, authkey, &edit); if (keyring) - __key_link_end(keyring, key->type, prealloc); + __key_link_end(keyring, &key->index_key, edit); error_free_preparse: if (key->type->preparse) @@ -537,7 +538,7 @@ int key_reject_and_link(struct key *key, struct key *keyring, struct key *authkey) { - unsigned long prealloc; + struct assoc_array_edit *edit; struct timespec now; int ret, awaken, link_ret = 0; @@ -548,8 +549,7 @@ int key_reject_and_link(struct key *key, ret = -EBUSY; if (keyring) - link_ret = __key_link_begin(keyring, key->type, - key->description, &prealloc); + link_ret = __key_link_begin(keyring, &key->index_key, &edit); mutex_lock(&key_construction_mutex); @@ -557,9 +557,10 @@ int key_reject_and_link(struct key *key, if (!test_bit(KEY_FLAG_INSTANTIATED, &key->flags)) { /* mark the key as being negatively instantiated */ atomic_inc(&key->user->nikeys); + key->type_data.reject_error = -error; + smp_wmb(); set_bit(KEY_FLAG_NEGATIVE, &key->flags); set_bit(KEY_FLAG_INSTANTIATED, &key->flags); - key->type_data.reject_error = -error; now = current_kernel_time(); key->expiry = now.tv_sec + timeout; key_schedule_gc(key->expiry + key_gc_delay); @@ -571,7 +572,7 @@ int key_reject_and_link(struct key *key, /* and link it into the destination keyring */ if (keyring && link_ret == 0) - __key_link(keyring, key, &prealloc); + __key_link(key, &edit); /* disable the authorisation key */ if (authkey) @@ -581,7 +582,7 @@ int key_reject_and_link(struct key *key, mutex_unlock(&key_construction_mutex); if (keyring) - __key_link_end(keyring, key->type, prealloc); + __key_link_end(keyring, &key->index_key, edit); /* wake up anyone waiting for a key to be constructed */ if (awaken) @@ -645,7 +646,7 @@ found: /* this races with key_put(), but that doesn't matter since key_put() * doesn't actually change the key */ - atomic_inc(&key->usage); + __key_get(key); error: spin_unlock(&key_serial_lock); @@ -780,25 +781,27 @@ key_ref_t key_create_or_update(key_ref_t keyring_ref, key_perm_t perm, unsigned long flags) { - unsigned long prealloc; + struct keyring_index_key index_key = { + .description = description, + }; struct key_preparsed_payload prep; + struct assoc_array_edit *edit; const struct cred *cred = current_cred(); - struct key_type *ktype; struct key *keyring, *key = NULL; key_ref_t key_ref; int ret; /* look up the key type to see if it's one of the registered kernel * types */ - ktype = key_type_lookup(type); - if (IS_ERR(ktype)) { + index_key.type = key_type_lookup(type); + if (IS_ERR(index_key.type)) { key_ref = ERR_PTR(-ENODEV); goto error; } key_ref = ERR_PTR(-EINVAL); - if (!ktype->match || !ktype->instantiate || - (!description && !ktype->preparse)) + if (!index_key.type->match || !index_key.type->instantiate || + (!index_key.description && !index_key.type->preparse)) goto error_put_type; keyring = key_ref_to_ptr(keyring_ref); @@ -812,21 +815,28 @@ key_ref_t key_create_or_update(key_ref_t keyring_ref, memset(&prep, 0, sizeof(prep)); prep.data = payload; prep.datalen = plen; - prep.quotalen = ktype->def_datalen; - if (ktype->preparse) { - ret = ktype->preparse(&prep); + prep.quotalen = index_key.type->def_datalen; + prep.trusted = flags & KEY_ALLOC_TRUSTED; + if (index_key.type->preparse) { + ret = index_key.type->preparse(&prep); if (ret < 0) { key_ref = ERR_PTR(ret); goto error_put_type; } - if (!description) - description = prep.description; + if (!index_key.description) + index_key.description = prep.description; key_ref = ERR_PTR(-EINVAL); - if (!description) + if (!index_key.description) goto error_free_prep; } + index_key.desc_len = strlen(index_key.description); + + key_ref = ERR_PTR(-EPERM); + if (!prep.trusted && test_bit(KEY_FLAG_TRUSTED_ONLY, &keyring->flags)) + goto error_free_prep; + flags |= prep.trusted ? KEY_ALLOC_TRUSTED : 0; - ret = __key_link_begin(keyring, ktype, description, &prealloc); + ret = __key_link_begin(keyring, &index_key, &edit); if (ret < 0) { key_ref = ERR_PTR(ret); goto error_free_prep; @@ -844,10 +854,9 @@ key_ref_t key_create_or_update(key_ref_t keyring_ref, * key of the same type and description in the destination keyring and * update that instead if possible */ - if (ktype->update) { - key_ref = __keyring_search_one(keyring_ref, ktype, description, - 0); - if (!IS_ERR(key_ref)) + if (index_key.type->update) { + key_ref = find_key_to_update(keyring_ref, &index_key); + if (key_ref) goto found_matching_key; } @@ -856,23 +865,24 @@ key_ref_t key_create_or_update(key_ref_t keyring_ref, perm = KEY_POS_VIEW | KEY_POS_SEARCH | KEY_POS_LINK | KEY_POS_SETATTR; perm |= KEY_USR_VIEW; - if (ktype->read) + if (index_key.type->read) perm |= KEY_POS_READ; - if (ktype == &key_type_keyring || ktype->update) + if (index_key.type == &key_type_keyring || + index_key.type->update) perm |= KEY_POS_WRITE; } /* allocate a new key */ - key = key_alloc(ktype, description, cred->fsuid, cred->fsgid, cred, - perm, flags); + key = key_alloc(index_key.type, index_key.description, + cred->fsuid, cred->fsgid, cred, perm, flags); if (IS_ERR(key)) { key_ref = ERR_CAST(key); goto error_link_end; } /* instantiate it and link it into the target keyring */ - ret = __key_instantiate_and_link(key, &prep, keyring, NULL, &prealloc); + ret = __key_instantiate_and_link(key, &prep, keyring, NULL, &edit); if (ret < 0) { key_put(key); key_ref = ERR_PTR(ret); @@ -882,12 +892,12 @@ key_ref_t key_create_or_update(key_ref_t keyring_ref, key_ref = make_key_ref(key, is_key_possessed(keyring_ref)); error_link_end: - __key_link_end(keyring, ktype, prealloc); + __key_link_end(keyring, &index_key, edit); error_free_prep: - if (ktype->preparse) - ktype->free_preparse(&prep); + if (index_key.type->preparse) + index_key.type->free_preparse(&prep); error_put_type: - key_type_put(ktype); + key_type_put(index_key.type); error: return key_ref; @@ -895,7 +905,7 @@ error: /* we found a matching key, so we're going to try to update it * - we can drop the locks first as we have the key pinned */ - __key_link_end(keyring, ktype, prealloc); + __key_link_end(keyring, &index_key, edit); key_ref = __key_update(key_ref, &prep); goto error_free_prep; diff --git a/security/keys/keyctl.c b/security/keys/keyctl.c index 33cfd27b4de..cee72ce6422 100644 --- a/security/keys/keyctl.c +++ b/security/keys/keyctl.c @@ -1667,6 +1667,9 @@ SYSCALL_DEFINE5(keyctl, int, option, unsigned long, arg2, unsigned long, arg3, case KEYCTL_INVALIDATE: return keyctl_invalidate_key((key_serial_t) arg2); + case KEYCTL_GET_PERSISTENT: + return keyctl_get_persistent((uid_t)arg2, (key_serial_t)arg3); + default: return -EOPNOTSUPP; } diff --git a/security/keys/keyring.c b/security/keys/keyring.c index 6ece7f2e570..69f0cb7bab7 100644 --- a/security/keys/keyring.c +++ b/security/keys/keyring.c @@ -1,6 +1,6 @@ /* Keyring handling * - * Copyright (C) 2004-2005, 2008 Red Hat, Inc. All Rights Reserved. + * Copyright (C) 2004-2005, 2008, 2013 Red Hat, Inc. All Rights Reserved. * Written by David Howells (dhowells@redhat.com) * * This program is free software; you can redistribute it and/or @@ -17,25 +17,11 @@ #include <linux/seq_file.h> #include <linux/err.h> #include <keys/keyring-type.h> +#include <keys/user-type.h> +#include <linux/assoc_array_priv.h> #include <linux/uaccess.h> #include "internal.h" -#define rcu_dereference_locked_keyring(keyring) \ - (rcu_dereference_protected( \ - (keyring)->payload.subscriptions, \ - rwsem_is_locked((struct rw_semaphore *)&(keyring)->sem))) - -#define rcu_deref_link_locked(klist, index, keyring) \ - (rcu_dereference_protected( \ - (klist)->keys[index], \ - rwsem_is_locked((struct rw_semaphore *)&(keyring)->sem))) - -#define MAX_KEYRING_LINKS \ - min_t(size_t, USHRT_MAX - 1, \ - ((PAGE_SIZE - sizeof(struct keyring_list)) / sizeof(struct key *))) - -#define KEY_LINK_FIXQUOTA 1UL - /* * When plumbing the depths of the key tree, this sets a hard limit * set on how deep we're willing to go. @@ -47,6 +33,28 @@ */ #define KEYRING_NAME_HASH_SIZE (1 << 5) +/* + * We mark pointers we pass to the associative array with bit 1 set if + * they're keyrings and clear otherwise. + */ +#define KEYRING_PTR_SUBTYPE 0x2UL + +static inline bool keyring_ptr_is_keyring(const struct assoc_array_ptr *x) +{ + return (unsigned long)x & KEYRING_PTR_SUBTYPE; +} +static inline struct key *keyring_ptr_to_key(const struct assoc_array_ptr *x) +{ + void *object = assoc_array_ptr_to_leaf(x); + return (struct key *)((unsigned long)object & ~KEYRING_PTR_SUBTYPE); +} +static inline void *keyring_key_to_ptr(struct key *key) +{ + if (key->type == &key_type_keyring) + return (void *)((unsigned long)key | KEYRING_PTR_SUBTYPE); + return key; +} + static struct list_head keyring_name_hash[KEYRING_NAME_HASH_SIZE]; static DEFINE_RWLOCK(keyring_name_lock); @@ -67,7 +75,6 @@ static inline unsigned keyring_hash(const char *desc) */ static int keyring_instantiate(struct key *keyring, struct key_preparsed_payload *prep); -static int keyring_match(const struct key *keyring, const void *criterion); static void keyring_revoke(struct key *keyring); static void keyring_destroy(struct key *keyring); static void keyring_describe(const struct key *keyring, struct seq_file *m); @@ -76,9 +83,9 @@ static long keyring_read(const struct key *keyring, struct key_type key_type_keyring = { .name = "keyring", - .def_datalen = sizeof(struct keyring_list), + .def_datalen = 0, .instantiate = keyring_instantiate, - .match = keyring_match, + .match = user_match, .revoke = keyring_revoke, .destroy = keyring_destroy, .describe = keyring_describe, @@ -127,6 +134,7 @@ static int keyring_instantiate(struct key *keyring, ret = -EINVAL; if (prep->datalen == 0) { + assoc_array_init(&keyring->keys); /* make the keyring available by name if it has one */ keyring_publish_name(keyring); ret = 0; @@ -136,15 +144,226 @@ static int keyring_instantiate(struct key *keyring, } /* - * Match keyrings on their name + * Multiply 64-bits by 32-bits to 96-bits and fold back to 64-bit. Ideally we'd + * fold the carry back too, but that requires inline asm. + */ +static u64 mult_64x32_and_fold(u64 x, u32 y) +{ + u64 hi = (u64)(u32)(x >> 32) * y; + u64 lo = (u64)(u32)(x) * y; + return lo + ((u64)(u32)hi << 32) + (u32)(hi >> 32); +} + +/* + * Hash a key type and description. + */ +static unsigned long hash_key_type_and_desc(const struct keyring_index_key *index_key) +{ + const unsigned level_shift = ASSOC_ARRAY_LEVEL_STEP; + const unsigned long level_mask = ASSOC_ARRAY_LEVEL_STEP_MASK; + const char *description = index_key->description; + unsigned long hash, type; + u32 piece; + u64 acc; + int n, desc_len = index_key->desc_len; + + type = (unsigned long)index_key->type; + + acc = mult_64x32_and_fold(type, desc_len + 13); + acc = mult_64x32_and_fold(acc, 9207); + for (;;) { + n = desc_len; + if (n <= 0) + break; + if (n > 4) + n = 4; + piece = 0; + memcpy(&piece, description, n); + description += n; + desc_len -= n; + acc = mult_64x32_and_fold(acc, piece); + acc = mult_64x32_and_fold(acc, 9207); + } + + /* Fold the hash down to 32 bits if need be. */ + hash = acc; + if (ASSOC_ARRAY_KEY_CHUNK_SIZE == 32) + hash ^= acc >> 32; + + /* Squidge all the keyrings into a separate part of the tree to + * ordinary keys by making sure the lowest level segment in the hash is + * zero for keyrings and non-zero otherwise. + */ + if (index_key->type != &key_type_keyring && (hash & level_mask) == 0) + return hash | (hash >> (ASSOC_ARRAY_KEY_CHUNK_SIZE - level_shift)) | 1; + if (index_key->type == &key_type_keyring && (hash & level_mask) != 0) + return (hash + (hash << level_shift)) & ~level_mask; + return hash; +} + +/* + * Build the next index key chunk. + * + * On 32-bit systems the index key is laid out as: + * + * 0 4 5 9... + * hash desclen typeptr desc[] + * + * On 64-bit systems: + * + * 0 8 9 17... + * hash desclen typeptr desc[] + * + * We return it one word-sized chunk at a time. */ -static int keyring_match(const struct key *keyring, const void *description) +static unsigned long keyring_get_key_chunk(const void *data, int level) +{ + const struct keyring_index_key *index_key = data; + unsigned long chunk = 0; + long offset = 0; + int desc_len = index_key->desc_len, n = sizeof(chunk); + + level /= ASSOC_ARRAY_KEY_CHUNK_SIZE; + switch (level) { + case 0: + return hash_key_type_and_desc(index_key); + case 1: + return ((unsigned long)index_key->type << 8) | desc_len; + case 2: + if (desc_len == 0) + return (u8)((unsigned long)index_key->type >> + (ASSOC_ARRAY_KEY_CHUNK_SIZE - 8)); + n--; + offset = 1; + default: + offset += sizeof(chunk) - 1; + offset += (level - 3) * sizeof(chunk); + if (offset >= desc_len) + return 0; + desc_len -= offset; + if (desc_len > n) + desc_len = n; + offset += desc_len; + do { + chunk <<= 8; + chunk |= ((u8*)index_key->description)[--offset]; + } while (--desc_len > 0); + + if (level == 2) { + chunk <<= 8; + chunk |= (u8)((unsigned long)index_key->type >> + (ASSOC_ARRAY_KEY_CHUNK_SIZE - 8)); + } + return chunk; + } +} + +static unsigned long keyring_get_object_key_chunk(const void *object, int level) +{ + const struct key *key = keyring_ptr_to_key(object); + return keyring_get_key_chunk(&key->index_key, level); +} + +static bool keyring_compare_object(const void *object, const void *data) { - return keyring->description && - strcmp(keyring->description, description) == 0; + const struct keyring_index_key *index_key = data; + const struct key *key = keyring_ptr_to_key(object); + + return key->index_key.type == index_key->type && + key->index_key.desc_len == index_key->desc_len && + memcmp(key->index_key.description, index_key->description, + index_key->desc_len) == 0; } /* + * Compare the index keys of a pair of objects and determine the bit position + * at which they differ - if they differ. + */ +static int keyring_diff_objects(const void *_a, const void *_b) +{ + const struct key *key_a = keyring_ptr_to_key(_a); + const struct key *key_b = keyring_ptr_to_key(_b); + const struct keyring_index_key *a = &key_a->index_key; + const struct keyring_index_key *b = &key_b->index_key; + unsigned long seg_a, seg_b; + int level, i; + + level = 0; + seg_a = hash_key_type_and_desc(a); + seg_b = hash_key_type_and_desc(b); + if ((seg_a ^ seg_b) != 0) + goto differ; + + /* The number of bits contributed by the hash is controlled by a + * constant in the assoc_array headers. Everything else thereafter we + * can deal with as being machine word-size dependent. + */ + level += ASSOC_ARRAY_KEY_CHUNK_SIZE / 8; + seg_a = a->desc_len; + seg_b = b->desc_len; + if ((seg_a ^ seg_b) != 0) + goto differ; + + /* The next bit may not work on big endian */ + level++; + seg_a = (unsigned long)a->type; + seg_b = (unsigned long)b->type; + if ((seg_a ^ seg_b) != 0) + goto differ; + + level += sizeof(unsigned long); + if (a->desc_len == 0) + goto same; + + i = 0; + if (((unsigned long)a->description | (unsigned long)b->description) & + (sizeof(unsigned long) - 1)) { + do { + seg_a = *(unsigned long *)(a->description + i); + seg_b = *(unsigned long *)(b->description + i); + if ((seg_a ^ seg_b) != 0) + goto differ_plus_i; + i += sizeof(unsigned long); + } while (i < (a->desc_len & (sizeof(unsigned long) - 1))); + } + + for (; i < a->desc_len; i++) { + seg_a = *(unsigned char *)(a->description + i); + seg_b = *(unsigned char *)(b->description + i); + if ((seg_a ^ seg_b) != 0) + goto differ_plus_i; + } + +same: + return -1; + +differ_plus_i: + level += i; +differ: + i = level * 8 + __ffs(seg_a ^ seg_b); + return i; +} + +/* + * Free an object after stripping the keyring flag off of the pointer. + */ +static void keyring_free_object(void *object) +{ + key_put(keyring_ptr_to_key(object)); +} + +/* + * Operations for keyring management by the index-tree routines. + */ +static const struct assoc_array_ops keyring_assoc_array_ops = { + .get_key_chunk = keyring_get_key_chunk, + .get_object_key_chunk = keyring_get_object_key_chunk, + .compare_object = keyring_compare_object, + .diff_objects = keyring_diff_objects, + .free_object = keyring_free_object, +}; + +/* * Clean up a keyring when it is destroyed. Unpublish its name if it had one * and dispose of its data. * @@ -155,9 +374,6 @@ static int keyring_match(const struct key *keyring, const void *description) */ static void keyring_destroy(struct key *keyring) { - struct keyring_list *klist; - int loop; - if (keyring->description) { write_lock(&keyring_name_lock); @@ -168,12 +384,7 @@ static void keyring_destroy(struct key *keyring) write_unlock(&keyring_name_lock); } - klist = rcu_access_pointer(keyring->payload.subscriptions); - if (klist) { - for (loop = klist->nkeys - 1; loop >= 0; loop--) - key_put(rcu_access_pointer(klist->keys[loop])); - kfree(klist); - } + assoc_array_destroy(&keyring->keys, &keyring_assoc_array_ops); } /* @@ -181,76 +392,88 @@ static void keyring_destroy(struct key *keyring) */ static void keyring_describe(const struct key *keyring, struct seq_file *m) { - struct keyring_list *klist; - if (keyring->description) seq_puts(m, keyring->description); else seq_puts(m, "[anon]"); if (key_is_instantiated(keyring)) { - rcu_read_lock(); - klist = rcu_dereference(keyring->payload.subscriptions); - if (klist) - seq_printf(m, ": %u/%u", klist->nkeys, klist->maxkeys); + if (keyring->keys.nr_leaves_on_tree != 0) + seq_printf(m, ": %lu", keyring->keys.nr_leaves_on_tree); else seq_puts(m, ": empty"); - rcu_read_unlock(); } } +struct keyring_read_iterator_context { + size_t qty; + size_t count; + key_serial_t __user *buffer; +}; + +static int keyring_read_iterator(const void *object, void *data) +{ + struct keyring_read_iterator_context *ctx = data; + const struct key *key = keyring_ptr_to_key(object); + int ret; + + kenter("{%s,%d},,{%zu/%zu}", + key->type->name, key->serial, ctx->count, ctx->qty); + + if (ctx->count >= ctx->qty) + return 1; + + ret = put_user(key->serial, ctx->buffer); + if (ret < 0) + return ret; + ctx->buffer++; + ctx->count += sizeof(key->serial); + return 0; +} + /* * Read a list of key IDs from the keyring's contents in binary form * - * The keyring's semaphore is read-locked by the caller. + * The keyring's semaphore is read-locked by the caller. This prevents someone + * from modifying it under us - which could cause us to read key IDs multiple + * times. */ static long keyring_read(const struct key *keyring, char __user *buffer, size_t buflen) { - struct keyring_list *klist; - struct key *key; - size_t qty, tmp; - int loop, ret; + struct keyring_read_iterator_context ctx; + unsigned long nr_keys; + int ret; - ret = 0; - klist = rcu_dereference_locked_keyring(keyring); - if (klist) { - /* calculate how much data we could return */ - qty = klist->nkeys * sizeof(key_serial_t); - - if (buffer && buflen > 0) { - if (buflen > qty) - buflen = qty; - - /* copy the IDs of the subscribed keys into the - * buffer */ - ret = -EFAULT; - - for (loop = 0; loop < klist->nkeys; loop++) { - key = rcu_deref_link_locked(klist, loop, - keyring); - - tmp = sizeof(key_serial_t); - if (tmp > buflen) - tmp = buflen; - - if (copy_to_user(buffer, - &key->serial, - tmp) != 0) - goto error; - - buflen -= tmp; - if (buflen == 0) - break; - buffer += tmp; - } - } + kenter("{%d},,%zu", key_serial(keyring), buflen); + + if (buflen & (sizeof(key_serial_t) - 1)) + return -EINVAL; + + nr_keys = keyring->keys.nr_leaves_on_tree; + if (nr_keys == 0) + return 0; - ret = qty; + /* Calculate how much data we could return */ + ctx.qty = nr_keys * sizeof(key_serial_t); + + if (!buffer || !buflen) + return ctx.qty; + + if (buflen > ctx.qty) + ctx.qty = buflen; + + /* Copy the IDs of the subscribed keys into the buffer */ + ctx.buffer = (key_serial_t __user *)buffer; + ctx.count = 0; + ret = assoc_array_iterate(&keyring->keys, keyring_read_iterator, &ctx); + if (ret < 0) { + kleave(" = %d [iterate]", ret); + return ret; } -error: - return ret; + kleave(" = %zu [ok]", ctx.count); + return ctx.count; } /* @@ -277,227 +500,361 @@ struct key *keyring_alloc(const char *description, kuid_t uid, kgid_t gid, } EXPORT_SYMBOL(keyring_alloc); -/** - * keyring_search_aux - Search a keyring tree for a key matching some criteria - * @keyring_ref: A pointer to the keyring with possession indicator. - * @cred: The credentials to use for permissions checks. - * @type: The type of key to search for. - * @description: Parameter for @match. - * @match: Function to rule on whether or not a key is the one required. - * @no_state_check: Don't check if a matching key is bad - * - * Search the supplied keyring tree for a key that matches the criteria given. - * The root keyring and any linked keyrings must grant Search permission to the - * caller to be searchable and keys can only be found if they too grant Search - * to the caller. The possession flag on the root keyring pointer controls use - * of the possessor bits in permissions checking of the entire tree. In - * addition, the LSM gets to forbid keyring searches and key matches. - * - * The search is performed as a breadth-then-depth search up to the prescribed - * limit (KEYRING_SEARCH_MAX_DEPTH). - * - * Keys are matched to the type provided and are then filtered by the match - * function, which is given the description to use in any way it sees fit. The - * match function may use any attributes of a key that it wishes to to - * determine the match. Normally the match function from the key type would be - * used. - * - * RCU is used to prevent the keyring key lists from disappearing without the - * need to take lots of locks. - * - * Returns a pointer to the found key and increments the key usage count if - * successful; -EAGAIN if no matching keys were found, or if expired or revoked - * keys were found; -ENOKEY if only negative keys were found; -ENOTDIR if the - * specified keyring wasn't a keyring. - * - * In the case of a successful return, the possession attribute from - * @keyring_ref is propagated to the returned key reference. +/* + * Iteration function to consider each key found. */ -key_ref_t keyring_search_aux(key_ref_t keyring_ref, - const struct cred *cred, - struct key_type *type, - const void *description, - key_match_func_t match, - bool no_state_check) +static int keyring_search_iterator(const void *object, void *iterator_data) { - struct { - /* Need a separate keylist pointer for RCU purposes */ - struct key *keyring; - struct keyring_list *keylist; - int kix; - } stack[KEYRING_SEARCH_MAX_DEPTH]; - - struct keyring_list *keylist; - struct timespec now; - unsigned long possessed, kflags; - struct key *keyring, *key; - key_ref_t key_ref; - long err; - int sp, nkeys, kix; + struct keyring_search_context *ctx = iterator_data; + const struct key *key = keyring_ptr_to_key(object); + unsigned long kflags = key->flags; - keyring = key_ref_to_ptr(keyring_ref); - possessed = is_key_possessed(keyring_ref); - key_check(keyring); + kenter("{%d}", key->serial); - /* top keyring must have search permission to begin the search */ - err = key_task_permission(keyring_ref, cred, KEY_SEARCH); - if (err < 0) { - key_ref = ERR_PTR(err); - goto error; + /* ignore keys not of this type */ + if (key->type != ctx->index_key.type) { + kleave(" = 0 [!type]"); + return 0; } - key_ref = ERR_PTR(-ENOTDIR); - if (keyring->type != &key_type_keyring) - goto error; + /* skip invalidated, revoked and expired keys */ + if (ctx->flags & KEYRING_SEARCH_DO_STATE_CHECK) { + if (kflags & ((1 << KEY_FLAG_INVALIDATED) | + (1 << KEY_FLAG_REVOKED))) { + ctx->result = ERR_PTR(-EKEYREVOKED); + kleave(" = %d [invrev]", ctx->skipped_ret); + goto skipped; + } - rcu_read_lock(); + if (key->expiry && ctx->now.tv_sec >= key->expiry) { + ctx->result = ERR_PTR(-EKEYEXPIRED); + kleave(" = %d [expire]", ctx->skipped_ret); + goto skipped; + } + } - now = current_kernel_time(); - err = -EAGAIN; - sp = 0; - - /* firstly we should check to see if this top-level keyring is what we - * are looking for */ - key_ref = ERR_PTR(-EAGAIN); - kflags = keyring->flags; - if (keyring->type == type && match(keyring, description)) { - key = keyring; - if (no_state_check) - goto found; + /* keys that don't match */ + if (!ctx->match(key, ctx->match_data)) { + kleave(" = 0 [!match]"); + return 0; + } - /* check it isn't negative and hasn't expired or been - * revoked */ - if (kflags & (1 << KEY_FLAG_REVOKED)) - goto error_2; - if (key->expiry && now.tv_sec >= key->expiry) - goto error_2; - key_ref = ERR_PTR(key->type_data.reject_error); - if (kflags & (1 << KEY_FLAG_NEGATIVE)) - goto error_2; - goto found; + /* key must have search permissions */ + if (!(ctx->flags & KEYRING_SEARCH_NO_CHECK_PERM) && + key_task_permission(make_key_ref(key, ctx->possessed), + ctx->cred, KEY_SEARCH) < 0) { + ctx->result = ERR_PTR(-EACCES); + kleave(" = %d [!perm]", ctx->skipped_ret); + goto skipped; } - /* otherwise, the top keyring must not be revoked, expired, or - * negatively instantiated if we are to search it */ - key_ref = ERR_PTR(-EAGAIN); - if (kflags & ((1 << KEY_FLAG_INVALIDATED) | - (1 << KEY_FLAG_REVOKED) | - (1 << KEY_FLAG_NEGATIVE)) || - (keyring->expiry && now.tv_sec >= keyring->expiry)) - goto error_2; - - /* start processing a new keyring */ -descend: - kflags = keyring->flags; - if (kflags & ((1 << KEY_FLAG_INVALIDATED) | - (1 << KEY_FLAG_REVOKED))) - goto not_this_keyring; + if (ctx->flags & KEYRING_SEARCH_DO_STATE_CHECK) { + /* we set a different error code if we pass a negative key */ + if (kflags & (1 << KEY_FLAG_NEGATIVE)) { + smp_rmb(); + ctx->result = ERR_PTR(key->type_data.reject_error); + kleave(" = %d [neg]", ctx->skipped_ret); + goto skipped; + } + } - keylist = rcu_dereference(keyring->payload.subscriptions); - if (!keylist) - goto not_this_keyring; + /* Found */ + ctx->result = make_key_ref(key, ctx->possessed); + kleave(" = 1 [found]"); + return 1; - /* iterate through the keys in this keyring first */ - nkeys = keylist->nkeys; - smp_rmb(); - for (kix = 0; kix < nkeys; kix++) { - key = rcu_dereference(keylist->keys[kix]); - kflags = key->flags; +skipped: + return ctx->skipped_ret; +} - /* ignore keys not of this type */ - if (key->type != type) - continue; +/* + * Search inside a keyring for a key. We can search by walking to it + * directly based on its index-key or we can iterate over the entire + * tree looking for it, based on the match function. + */ +static int search_keyring(struct key *keyring, struct keyring_search_context *ctx) +{ + if ((ctx->flags & KEYRING_SEARCH_LOOKUP_TYPE) == + KEYRING_SEARCH_LOOKUP_DIRECT) { + const void *object; + + object = assoc_array_find(&keyring->keys, + &keyring_assoc_array_ops, + &ctx->index_key); + return object ? ctx->iterator(object, ctx) : 0; + } + return assoc_array_iterate(&keyring->keys, ctx->iterator, ctx); +} - /* skip invalidated, revoked and expired keys */ - if (!no_state_check) { - if (kflags & ((1 << KEY_FLAG_INVALIDATED) | - (1 << KEY_FLAG_REVOKED))) - continue; +/* + * Search a tree of keyrings that point to other keyrings up to the maximum + * depth. + */ +static bool search_nested_keyrings(struct key *keyring, + struct keyring_search_context *ctx) +{ + struct { + struct key *keyring; + struct assoc_array_node *node; + int slot; + } stack[KEYRING_SEARCH_MAX_DEPTH]; - if (key->expiry && now.tv_sec >= key->expiry) - continue; - } + struct assoc_array_shortcut *shortcut; + struct assoc_array_node *node; + struct assoc_array_ptr *ptr; + struct key *key; + int sp = 0, slot; - /* keys that don't match */ - if (!match(key, description)) - continue; + kenter("{%d},{%s,%s}", + keyring->serial, + ctx->index_key.type->name, + ctx->index_key.description); - /* key must have search permissions */ - if (key_task_permission(make_key_ref(key, possessed), - cred, KEY_SEARCH) < 0) - continue; + if (ctx->index_key.description) + ctx->index_key.desc_len = strlen(ctx->index_key.description); - if (no_state_check) + /* Check to see if this top-level keyring is what we are looking for + * and whether it is valid or not. + */ + if (ctx->flags & KEYRING_SEARCH_LOOKUP_ITERATE || + keyring_compare_object(keyring, &ctx->index_key)) { + ctx->skipped_ret = 2; + ctx->flags |= KEYRING_SEARCH_DO_STATE_CHECK; + switch (ctx->iterator(keyring_key_to_ptr(keyring), ctx)) { + case 1: goto found; - - /* we set a different error code if we pass a negative key */ - if (kflags & (1 << KEY_FLAG_NEGATIVE)) { - err = key->type_data.reject_error; - continue; + case 2: + return false; + default: + break; } + } + + ctx->skipped_ret = 0; + if (ctx->flags & KEYRING_SEARCH_NO_STATE_CHECK) + ctx->flags &= ~KEYRING_SEARCH_DO_STATE_CHECK; + /* Start processing a new keyring */ +descend_to_keyring: + kdebug("descend to %d", keyring->serial); + if (keyring->flags & ((1 << KEY_FLAG_INVALIDATED) | + (1 << KEY_FLAG_REVOKED))) + goto not_this_keyring; + + /* Search through the keys in this keyring before its searching its + * subtrees. + */ + if (search_keyring(keyring, ctx)) goto found; - } - /* search through the keyrings nested in this one */ - kix = 0; -ascend: - nkeys = keylist->nkeys; - smp_rmb(); - for (; kix < nkeys; kix++) { - key = rcu_dereference(keylist->keys[kix]); - if (key->type != &key_type_keyring) - continue; + /* Then manually iterate through the keyrings nested in this one. + * + * Start from the root node of the index tree. Because of the way the + * hash function has been set up, keyrings cluster on the leftmost + * branch of the root node (root slot 0) or in the root node itself. + * Non-keyrings avoid the leftmost branch of the root entirely (root + * slots 1-15). + */ + ptr = ACCESS_ONCE(keyring->keys.root); + if (!ptr) + goto not_this_keyring; - /* recursively search nested keyrings - * - only search keyrings for which we have search permission + if (assoc_array_ptr_is_shortcut(ptr)) { + /* If the root is a shortcut, either the keyring only contains + * keyring pointers (everything clusters behind root slot 0) or + * doesn't contain any keyring pointers. */ - if (sp >= KEYRING_SEARCH_MAX_DEPTH) + shortcut = assoc_array_ptr_to_shortcut(ptr); + smp_read_barrier_depends(); + if ((shortcut->index_key[0] & ASSOC_ARRAY_FAN_MASK) != 0) + goto not_this_keyring; + + ptr = ACCESS_ONCE(shortcut->next_node); + node = assoc_array_ptr_to_node(ptr); + goto begin_node; + } + + node = assoc_array_ptr_to_node(ptr); + smp_read_barrier_depends(); + + ptr = node->slots[0]; + if (!assoc_array_ptr_is_meta(ptr)) + goto begin_node; + +descend_to_node: + /* Descend to a more distal node in this keyring's content tree and go + * through that. + */ + kdebug("descend"); + if (assoc_array_ptr_is_shortcut(ptr)) { + shortcut = assoc_array_ptr_to_shortcut(ptr); + smp_read_barrier_depends(); + ptr = ACCESS_ONCE(shortcut->next_node); + BUG_ON(!assoc_array_ptr_is_node(ptr)); + node = assoc_array_ptr_to_node(ptr); + } + +begin_node: + kdebug("begin_node"); + smp_read_barrier_depends(); + slot = 0; +ascend_to_node: + /* Go through the slots in a node */ + for (; slot < ASSOC_ARRAY_FAN_OUT; slot++) { + ptr = ACCESS_ONCE(node->slots[slot]); + + if (assoc_array_ptr_is_meta(ptr) && node->back_pointer) + goto descend_to_node; + + if (!keyring_ptr_is_keyring(ptr)) continue; - if (key_task_permission(make_key_ref(key, possessed), - cred, KEY_SEARCH) < 0) + key = keyring_ptr_to_key(ptr); + + if (sp >= KEYRING_SEARCH_MAX_DEPTH) { + if (ctx->flags & KEYRING_SEARCH_DETECT_TOO_DEEP) { + ctx->result = ERR_PTR(-ELOOP); + return false; + } + goto not_this_keyring; + } + + /* Search a nested keyring */ + if (!(ctx->flags & KEYRING_SEARCH_NO_CHECK_PERM) && + key_task_permission(make_key_ref(key, ctx->possessed), + ctx->cred, KEY_SEARCH) < 0) continue; /* stack the current position */ stack[sp].keyring = keyring; - stack[sp].keylist = keylist; - stack[sp].kix = kix; + stack[sp].node = node; + stack[sp].slot = slot; sp++; /* begin again with the new keyring */ keyring = key; - goto descend; + goto descend_to_keyring; } - /* the keyring we're looking at was disqualified or didn't contain a - * matching key */ + /* We've dealt with all the slots in the current node, so now we need + * to ascend to the parent and continue processing there. + */ + ptr = ACCESS_ONCE(node->back_pointer); + slot = node->parent_slot; + + if (ptr && assoc_array_ptr_is_shortcut(ptr)) { + shortcut = assoc_array_ptr_to_shortcut(ptr); + smp_read_barrier_depends(); + ptr = ACCESS_ONCE(shortcut->back_pointer); + slot = shortcut->parent_slot; + } + if (!ptr) + goto not_this_keyring; + node = assoc_array_ptr_to_node(ptr); + smp_read_barrier_depends(); + slot++; + + /* If we've ascended to the root (zero backpointer), we must have just + * finished processing the leftmost branch rather than the root slots - + * so there can't be any more keyrings for us to find. + */ + if (node->back_pointer) { + kdebug("ascend %d", slot); + goto ascend_to_node; + } + + /* The keyring we're looking at was disqualified or didn't contain a + * matching key. + */ not_this_keyring: - if (sp > 0) { - /* resume the processing of a keyring higher up in the tree */ - sp--; - keyring = stack[sp].keyring; - keylist = stack[sp].keylist; - kix = stack[sp].kix + 1; - goto ascend; + kdebug("not_this_keyring %d", sp); + if (sp <= 0) { + kleave(" = false"); + return false; } - key_ref = ERR_PTR(err); - goto error_2; + /* Resume the processing of a keyring higher up in the tree */ + sp--; + keyring = stack[sp].keyring; + node = stack[sp].node; + slot = stack[sp].slot + 1; + kdebug("ascend to %d [%d]", keyring->serial, slot); + goto ascend_to_node; - /* we found a viable match */ + /* We found a viable match */ found: - atomic_inc(&key->usage); - key->last_used_at = now.tv_sec; - keyring->last_used_at = now.tv_sec; - while (sp > 0) - stack[--sp].keyring->last_used_at = now.tv_sec; + key = key_ref_to_ptr(ctx->result); key_check(key); - key_ref = make_key_ref(key, possessed); -error_2: + if (!(ctx->flags & KEYRING_SEARCH_NO_UPDATE_TIME)) { + key->last_used_at = ctx->now.tv_sec; + keyring->last_used_at = ctx->now.tv_sec; + while (sp > 0) + stack[--sp].keyring->last_used_at = ctx->now.tv_sec; + } + kleave(" = true"); + return true; +} + +/** + * keyring_search_aux - Search a keyring tree for a key matching some criteria + * @keyring_ref: A pointer to the keyring with possession indicator. + * @ctx: The keyring search context. + * + * Search the supplied keyring tree for a key that matches the criteria given. + * The root keyring and any linked keyrings must grant Search permission to the + * caller to be searchable and keys can only be found if they too grant Search + * to the caller. The possession flag on the root keyring pointer controls use + * of the possessor bits in permissions checking of the entire tree. In + * addition, the LSM gets to forbid keyring searches and key matches. + * + * The search is performed as a breadth-then-depth search up to the prescribed + * limit (KEYRING_SEARCH_MAX_DEPTH). + * + * Keys are matched to the type provided and are then filtered by the match + * function, which is given the description to use in any way it sees fit. The + * match function may use any attributes of a key that it wishes to to + * determine the match. Normally the match function from the key type would be + * used. + * + * RCU can be used to prevent the keyring key lists from disappearing without + * the need to take lots of locks. + * + * Returns a pointer to the found key and increments the key usage count if + * successful; -EAGAIN if no matching keys were found, or if expired or revoked + * keys were found; -ENOKEY if only negative keys were found; -ENOTDIR if the + * specified keyring wasn't a keyring. + * + * In the case of a successful return, the possession attribute from + * @keyring_ref is propagated to the returned key reference. + */ +key_ref_t keyring_search_aux(key_ref_t keyring_ref, + struct keyring_search_context *ctx) +{ + struct key *keyring; + long err; + + ctx->iterator = keyring_search_iterator; + ctx->possessed = is_key_possessed(keyring_ref); + ctx->result = ERR_PTR(-EAGAIN); + + keyring = key_ref_to_ptr(keyring_ref); + key_check(keyring); + + if (keyring->type != &key_type_keyring) + return ERR_PTR(-ENOTDIR); + + if (!(ctx->flags & KEYRING_SEARCH_NO_CHECK_PERM)) { + err = key_task_permission(keyring_ref, ctx->cred, KEY_SEARCH); + if (err < 0) + return ERR_PTR(err); + } + + rcu_read_lock(); + ctx->now = current_kernel_time(); + if (search_nested_keyrings(keyring, ctx)) + __key_get(key_ref_to_ptr(ctx->result)); rcu_read_unlock(); -error: - return key_ref; + return ctx->result; } /** @@ -507,77 +864,73 @@ error: * @description: The name of the keyring we want to find. * * As keyring_search_aux() above, but using the current task's credentials and - * type's default matching function. + * type's default matching function and preferred search method. */ key_ref_t keyring_search(key_ref_t keyring, struct key_type *type, const char *description) { - if (!type->match) + struct keyring_search_context ctx = { + .index_key.type = type, + .index_key.description = description, + .cred = current_cred(), + .match = type->match, + .match_data = description, + .flags = (type->def_lookup_type | + KEYRING_SEARCH_DO_STATE_CHECK), + }; + + if (!ctx.match) return ERR_PTR(-ENOKEY); - return keyring_search_aux(keyring, current->cred, - type, description, type->match, false); + return keyring_search_aux(keyring, &ctx); } EXPORT_SYMBOL(keyring_search); /* - * Search the given keyring only (no recursion). + * Search the given keyring for a key that might be updated. * * The caller must guarantee that the keyring is a keyring and that the - * permission is granted to search the keyring as no check is made here. - * - * RCU is used to make it unnecessary to lock the keyring key list here. + * permission is granted to modify the keyring as no check is made here. The + * caller must also hold a lock on the keyring semaphore. * * Returns a pointer to the found key with usage count incremented if - * successful and returns -ENOKEY if not found. Revoked keys and keys not - * providing the requested permission are skipped over. + * successful and returns NULL if not found. Revoked and invalidated keys are + * skipped over. * * If successful, the possession indicator is propagated from the keyring ref * to the returned key reference. */ -key_ref_t __keyring_search_one(key_ref_t keyring_ref, - const struct key_type *ktype, - const char *description, - key_perm_t perm) +key_ref_t find_key_to_update(key_ref_t keyring_ref, + const struct keyring_index_key *index_key) { - struct keyring_list *klist; - unsigned long possessed; struct key *keyring, *key; - int nkeys, loop; + const void *object; keyring = key_ref_to_ptr(keyring_ref); - possessed = is_key_possessed(keyring_ref); - rcu_read_lock(); + kenter("{%d},{%s,%s}", + keyring->serial, index_key->type->name, index_key->description); - klist = rcu_dereference(keyring->payload.subscriptions); - if (klist) { - nkeys = klist->nkeys; - smp_rmb(); - for (loop = 0; loop < nkeys ; loop++) { - key = rcu_dereference(klist->keys[loop]); - if (key->type == ktype && - (!key->type->match || - key->type->match(key, description)) && - key_permission(make_key_ref(key, possessed), - perm) == 0 && - !(key->flags & ((1 << KEY_FLAG_INVALIDATED) | - (1 << KEY_FLAG_REVOKED))) - ) - goto found; - } - } + object = assoc_array_find(&keyring->keys, &keyring_assoc_array_ops, + index_key); - rcu_read_unlock(); - return ERR_PTR(-ENOKEY); + if (object) + goto found; + + kleave(" = NULL"); + return NULL; found: - atomic_inc(&key->usage); - keyring->last_used_at = key->last_used_at = - current_kernel_time().tv_sec; - rcu_read_unlock(); - return make_key_ref(key, possessed); + key = keyring_ptr_to_key(object); + if (key->flags & ((1 << KEY_FLAG_INVALIDATED) | + (1 << KEY_FLAG_REVOKED))) { + kleave(" = NULL [x]"); + return NULL; + } + __key_get(key); + kleave(" = {%d}", key->serial); + return make_key_ref(key, is_key_possessed(keyring_ref)); } /* @@ -640,6 +993,19 @@ out: return keyring; } +static int keyring_detect_cycle_iterator(const void *object, + void *iterator_data) +{ + struct keyring_search_context *ctx = iterator_data; + const struct key *key = keyring_ptr_to_key(object); + + kenter("{%d}", key->serial); + + BUG_ON(key != ctx->match_data); + ctx->result = ERR_PTR(-EDEADLK); + return 1; +} + /* * See if a cycle will will be created by inserting acyclic tree B in acyclic * tree A at the topmost level (ie: as a direct child of A). @@ -649,116 +1015,39 @@ out: */ static int keyring_detect_cycle(struct key *A, struct key *B) { - struct { - struct keyring_list *keylist; - int kix; - } stack[KEYRING_SEARCH_MAX_DEPTH]; - - struct keyring_list *keylist; - struct key *subtree, *key; - int sp, nkeys, kix, ret; + struct keyring_search_context ctx = { + .index_key = A->index_key, + .match_data = A, + .iterator = keyring_detect_cycle_iterator, + .flags = (KEYRING_SEARCH_LOOKUP_DIRECT | + KEYRING_SEARCH_NO_STATE_CHECK | + KEYRING_SEARCH_NO_UPDATE_TIME | + KEYRING_SEARCH_NO_CHECK_PERM | + KEYRING_SEARCH_DETECT_TOO_DEEP), + }; rcu_read_lock(); - - ret = -EDEADLK; - if (A == B) - goto cycle_detected; - - subtree = B; - sp = 0; - - /* start processing a new keyring */ -descend: - if (test_bit(KEY_FLAG_REVOKED, &subtree->flags)) - goto not_this_keyring; - - keylist = rcu_dereference(subtree->payload.subscriptions); - if (!keylist) - goto not_this_keyring; - kix = 0; - -ascend: - /* iterate through the remaining keys in this keyring */ - nkeys = keylist->nkeys; - smp_rmb(); - for (; kix < nkeys; kix++) { - key = rcu_dereference(keylist->keys[kix]); - - if (key == A) - goto cycle_detected; - - /* recursively check nested keyrings */ - if (key->type == &key_type_keyring) { - if (sp >= KEYRING_SEARCH_MAX_DEPTH) - goto too_deep; - - /* stack the current position */ - stack[sp].keylist = keylist; - stack[sp].kix = kix; - sp++; - - /* begin again with the new keyring */ - subtree = key; - goto descend; - } - } - - /* the keyring we're looking at was disqualified or didn't contain a - * matching key */ -not_this_keyring: - if (sp > 0) { - /* resume the checking of a keyring higher up in the tree */ - sp--; - keylist = stack[sp].keylist; - kix = stack[sp].kix + 1; - goto ascend; - } - - ret = 0; /* no cycles detected */ - -error: + search_nested_keyrings(B, &ctx); rcu_read_unlock(); - return ret; - -too_deep: - ret = -ELOOP; - goto error; - -cycle_detected: - ret = -EDEADLK; - goto error; -} - -/* - * Dispose of a keyring list after the RCU grace period, freeing the unlinked - * key - */ -static void keyring_unlink_rcu_disposal(struct rcu_head *rcu) -{ - struct keyring_list *klist = - container_of(rcu, struct keyring_list, rcu); - - if (klist->delkey != USHRT_MAX) - key_put(rcu_access_pointer(klist->keys[klist->delkey])); - kfree(klist); + return PTR_ERR(ctx.result) == -EAGAIN ? 0 : PTR_ERR(ctx.result); } /* * Preallocate memory so that a key can be linked into to a keyring. */ -int __key_link_begin(struct key *keyring, const struct key_type *type, - const char *description, unsigned long *_prealloc) +int __key_link_begin(struct key *keyring, + const struct keyring_index_key *index_key, + struct assoc_array_edit **_edit) __acquires(&keyring->sem) __acquires(&keyring_serialise_link_sem) { - struct keyring_list *klist, *nklist; - unsigned long prealloc; - unsigned max; - time_t lowest_lru; - size_t size; - int loop, lru, ret; + struct assoc_array_edit *edit; + int ret; + + kenter("%d,%s,%s,", + keyring->serial, index_key->type->name, index_key->description); - kenter("%d,%s,%s,", key_serial(keyring), type->name, description); + BUG_ON(index_key->desc_len == 0); if (keyring->type != &key_type_keyring) return -ENOTDIR; @@ -771,100 +1060,39 @@ int __key_link_begin(struct key *keyring, const struct key_type *type, /* serialise link/link calls to prevent parallel calls causing a cycle * when linking two keyring in opposite orders */ - if (type == &key_type_keyring) + if (index_key->type == &key_type_keyring) down_write(&keyring_serialise_link_sem); - klist = rcu_dereference_locked_keyring(keyring); - - /* see if there's a matching key we can displace */ - lru = -1; - if (klist && klist->nkeys > 0) { - lowest_lru = TIME_T_MAX; - for (loop = klist->nkeys - 1; loop >= 0; loop--) { - struct key *key = rcu_deref_link_locked(klist, loop, - keyring); - if (key->type == type && - strcmp(key->description, description) == 0) { - /* Found a match - we'll replace the link with - * one to the new key. We record the slot - * position. - */ - klist->delkey = loop; - prealloc = 0; - goto done; - } - if (key->last_used_at < lowest_lru) { - lowest_lru = key->last_used_at; - lru = loop; - } - } - } - - /* If the keyring is full then do an LRU discard */ - if (klist && - klist->nkeys == klist->maxkeys && - klist->maxkeys >= MAX_KEYRING_LINKS) { - kdebug("LRU discard %d\n", lru); - klist->delkey = lru; - prealloc = 0; - goto done; - } - - /* check that we aren't going to overrun the user's quota */ - ret = key_payload_reserve(keyring, - keyring->datalen + KEYQUOTA_LINK_BYTES); - if (ret < 0) + /* Create an edit script that will insert/replace the key in the + * keyring tree. + */ + edit = assoc_array_insert(&keyring->keys, + &keyring_assoc_array_ops, + index_key, + NULL); + if (IS_ERR(edit)) { + ret = PTR_ERR(edit); goto error_sem; + } - if (klist && klist->nkeys < klist->maxkeys) { - /* there's sufficient slack space to append directly */ - klist->delkey = klist->nkeys; - prealloc = KEY_LINK_FIXQUOTA; - } else { - /* grow the key list */ - max = 4; - if (klist) { - max += klist->maxkeys; - if (max > MAX_KEYRING_LINKS) - max = MAX_KEYRING_LINKS; - BUG_ON(max <= klist->maxkeys); - } - - size = sizeof(*klist) + sizeof(struct key *) * max; - - ret = -ENOMEM; - nklist = kmalloc(size, GFP_KERNEL); - if (!nklist) - goto error_quota; - - nklist->maxkeys = max; - if (klist) { - memcpy(nklist->keys, klist->keys, - sizeof(struct key *) * klist->nkeys); - nklist->delkey = klist->nkeys; - nklist->nkeys = klist->nkeys + 1; - klist->delkey = USHRT_MAX; - } else { - nklist->nkeys = 1; - nklist->delkey = 0; - } - - /* add the key into the new space */ - RCU_INIT_POINTER(nklist->keys[nklist->delkey], NULL); - prealloc = (unsigned long)nklist | KEY_LINK_FIXQUOTA; + /* If we're not replacing a link in-place then we're going to need some + * extra quota. + */ + if (!edit->dead_leaf) { + ret = key_payload_reserve(keyring, + keyring->datalen + KEYQUOTA_LINK_BYTES); + if (ret < 0) + goto error_cancel; } -done: - *_prealloc = prealloc; + *_edit = edit; kleave(" = 0"); return 0; -error_quota: - /* undo the quota changes */ - key_payload_reserve(keyring, - keyring->datalen - KEYQUOTA_LINK_BYTES); +error_cancel: + assoc_array_cancel_edit(edit); error_sem: - if (type == &key_type_keyring) + if (index_key->type == &key_type_keyring) up_write(&keyring_serialise_link_sem); error_krsem: up_write(&keyring->sem); @@ -895,60 +1123,12 @@ int __key_link_check_live_key(struct key *keyring, struct key *key) * holds at most one link to any given key of a particular type+description * combination. */ -void __key_link(struct key *keyring, struct key *key, - unsigned long *_prealloc) +void __key_link(struct key *key, struct assoc_array_edit **_edit) { - struct keyring_list *klist, *nklist; - struct key *discard; - - nklist = (struct keyring_list *)(*_prealloc & ~KEY_LINK_FIXQUOTA); - *_prealloc = 0; - - kenter("%d,%d,%p", keyring->serial, key->serial, nklist); - - klist = rcu_dereference_locked_keyring(keyring); - - atomic_inc(&key->usage); - keyring->last_used_at = key->last_used_at = - current_kernel_time().tv_sec; - - /* there's a matching key we can displace or an empty slot in a newly - * allocated list we can fill */ - if (nklist) { - kdebug("reissue %hu/%hu/%hu", - nklist->delkey, nklist->nkeys, nklist->maxkeys); - - RCU_INIT_POINTER(nklist->keys[nklist->delkey], key); - - rcu_assign_pointer(keyring->payload.subscriptions, nklist); - - /* dispose of the old keyring list and, if there was one, the - * displaced key */ - if (klist) { - kdebug("dispose %hu/%hu/%hu", - klist->delkey, klist->nkeys, klist->maxkeys); - call_rcu(&klist->rcu, keyring_unlink_rcu_disposal); - } - } else if (klist->delkey < klist->nkeys) { - kdebug("replace %hu/%hu/%hu", - klist->delkey, klist->nkeys, klist->maxkeys); - - discard = rcu_dereference_protected( - klist->keys[klist->delkey], - rwsem_is_locked(&keyring->sem)); - rcu_assign_pointer(klist->keys[klist->delkey], key); - /* The garbage collector will take care of RCU - * synchronisation */ - key_put(discard); - } else { - /* there's sufficient slack space to append directly */ - kdebug("append %hu/%hu/%hu", - klist->delkey, klist->nkeys, klist->maxkeys); - - RCU_INIT_POINTER(klist->keys[klist->delkey], key); - smp_wmb(); - klist->nkeys++; - } + __key_get(key); + assoc_array_insert_set_object(*_edit, keyring_key_to_ptr(key)); + assoc_array_apply_edit(*_edit); + *_edit = NULL; } /* @@ -956,24 +1136,22 @@ void __key_link(struct key *keyring, struct key *key, * * Must be called with __key_link_begin() having being called. */ -void __key_link_end(struct key *keyring, struct key_type *type, - unsigned long prealloc) +void __key_link_end(struct key *keyring, + const struct keyring_index_key *index_key, + struct assoc_array_edit *edit) __releases(&keyring->sem) __releases(&keyring_serialise_link_sem) { - BUG_ON(type == NULL); - BUG_ON(type->name == NULL); - kenter("%d,%s,%lx", keyring->serial, type->name, prealloc); + BUG_ON(index_key->type == NULL); + kenter("%d,%s,", keyring->serial, index_key->type->name); - if (type == &key_type_keyring) + if (index_key->type == &key_type_keyring) up_write(&keyring_serialise_link_sem); - if (prealloc) { - if (prealloc & KEY_LINK_FIXQUOTA) - key_payload_reserve(keyring, - keyring->datalen - - KEYQUOTA_LINK_BYTES); - kfree((struct keyring_list *)(prealloc & ~KEY_LINK_FIXQUOTA)); + if (edit && !edit->dead_leaf) { + key_payload_reserve(keyring, + keyring->datalen - KEYQUOTA_LINK_BYTES); + assoc_array_cancel_edit(edit); } up_write(&keyring->sem); } @@ -1000,20 +1178,28 @@ void __key_link_end(struct key *keyring, struct key_type *type, */ int key_link(struct key *keyring, struct key *key) { - unsigned long prealloc; + struct assoc_array_edit *edit; int ret; + kenter("{%d,%d}", keyring->serial, atomic_read(&keyring->usage)); + key_check(keyring); key_check(key); - ret = __key_link_begin(keyring, key->type, key->description, &prealloc); + if (test_bit(KEY_FLAG_TRUSTED_ONLY, &keyring->flags) && + !test_bit(KEY_FLAG_TRUSTED, &key->flags)) + return -EPERM; + + ret = __key_link_begin(keyring, &key->index_key, &edit); if (ret == 0) { + kdebug("begun {%d,%d}", keyring->serial, atomic_read(&keyring->usage)); ret = __key_link_check_live_key(keyring, key); if (ret == 0) - __key_link(keyring, key, &prealloc); - __key_link_end(keyring, key->type, prealloc); + __key_link(key, &edit); + __key_link_end(keyring, &key->index_key, edit); } + kleave(" = %d {%d,%d}", ret, keyring->serial, atomic_read(&keyring->usage)); return ret; } EXPORT_SYMBOL(key_link); @@ -1037,90 +1223,37 @@ EXPORT_SYMBOL(key_link); */ int key_unlink(struct key *keyring, struct key *key) { - struct keyring_list *klist, *nklist; - int loop, ret; + struct assoc_array_edit *edit; + int ret; key_check(keyring); key_check(key); - ret = -ENOTDIR; if (keyring->type != &key_type_keyring) - goto error; + return -ENOTDIR; down_write(&keyring->sem); - klist = rcu_dereference_locked_keyring(keyring); - if (klist) { - /* search the keyring for the key */ - for (loop = 0; loop < klist->nkeys; loop++) - if (rcu_access_pointer(klist->keys[loop]) == key) - goto key_is_present; + edit = assoc_array_delete(&keyring->keys, &keyring_assoc_array_ops, + &key->index_key); + if (IS_ERR(edit)) { + ret = PTR_ERR(edit); + goto error; } - - up_write(&keyring->sem); ret = -ENOENT; - goto error; - -key_is_present: - /* we need to copy the key list for RCU purposes */ - nklist = kmalloc(sizeof(*klist) + - sizeof(struct key *) * klist->maxkeys, - GFP_KERNEL); - if (!nklist) - goto nomem; - nklist->maxkeys = klist->maxkeys; - nklist->nkeys = klist->nkeys - 1; - - if (loop > 0) - memcpy(&nklist->keys[0], - &klist->keys[0], - loop * sizeof(struct key *)); - - if (loop < nklist->nkeys) - memcpy(&nklist->keys[loop], - &klist->keys[loop + 1], - (nklist->nkeys - loop) * sizeof(struct key *)); - - /* adjust the user's quota */ - key_payload_reserve(keyring, - keyring->datalen - KEYQUOTA_LINK_BYTES); - - rcu_assign_pointer(keyring->payload.subscriptions, nklist); - - up_write(&keyring->sem); - - /* schedule for later cleanup */ - klist->delkey = loop; - call_rcu(&klist->rcu, keyring_unlink_rcu_disposal); + if (edit == NULL) + goto error; + assoc_array_apply_edit(edit); + key_payload_reserve(keyring, keyring->datalen - KEYQUOTA_LINK_BYTES); ret = 0; error: - return ret; -nomem: - ret = -ENOMEM; up_write(&keyring->sem); - goto error; + return ret; } EXPORT_SYMBOL(key_unlink); -/* - * Dispose of a keyring list after the RCU grace period, releasing the keys it - * links to. - */ -static void keyring_clear_rcu_disposal(struct rcu_head *rcu) -{ - struct keyring_list *klist; - int loop; - - klist = container_of(rcu, struct keyring_list, rcu); - - for (loop = klist->nkeys - 1; loop >= 0; loop--) - key_put(rcu_access_pointer(klist->keys[loop])); - - kfree(klist); -} - /** * keyring_clear - Clear a keyring * @keyring: The keyring to clear. @@ -1131,33 +1264,25 @@ static void keyring_clear_rcu_disposal(struct rcu_head *rcu) */ int keyring_clear(struct key *keyring) { - struct keyring_list *klist; + struct assoc_array_edit *edit; int ret; - ret = -ENOTDIR; - if (keyring->type == &key_type_keyring) { - /* detach the pointer block with the locks held */ - down_write(&keyring->sem); - - klist = rcu_dereference_locked_keyring(keyring); - if (klist) { - /* adjust the quota */ - key_payload_reserve(keyring, - sizeof(struct keyring_list)); - - rcu_assign_pointer(keyring->payload.subscriptions, - NULL); - } - - up_write(&keyring->sem); + if (keyring->type != &key_type_keyring) + return -ENOTDIR; - /* free the keys after the locks have been dropped */ - if (klist) - call_rcu(&klist->rcu, keyring_clear_rcu_disposal); + down_write(&keyring->sem); + edit = assoc_array_clear(&keyring->keys, &keyring_assoc_array_ops); + if (IS_ERR(edit)) { + ret = PTR_ERR(edit); + } else { + if (edit) + assoc_array_apply_edit(edit); + key_payload_reserve(keyring, 0); ret = 0; } + up_write(&keyring->sem); return ret; } EXPORT_SYMBOL(keyring_clear); @@ -1169,111 +1294,68 @@ EXPORT_SYMBOL(keyring_clear); */ static void keyring_revoke(struct key *keyring) { - struct keyring_list *klist; + struct assoc_array_edit *edit; + + edit = assoc_array_clear(&keyring->keys, &keyring_assoc_array_ops); + if (!IS_ERR(edit)) { + if (edit) + assoc_array_apply_edit(edit); + key_payload_reserve(keyring, 0); + } +} + +static bool keyring_gc_select_iterator(void *object, void *iterator_data) +{ + struct key *key = keyring_ptr_to_key(object); + time_t *limit = iterator_data; - klist = rcu_dereference_locked_keyring(keyring); + if (key_is_dead(key, *limit)) + return false; + key_get(key); + return true; +} - /* adjust the quota */ - key_payload_reserve(keyring, 0); +static int keyring_gc_check_iterator(const void *object, void *iterator_data) +{ + const struct key *key = keyring_ptr_to_key(object); + time_t *limit = iterator_data; - if (klist) { - rcu_assign_pointer(keyring->payload.subscriptions, NULL); - call_rcu(&klist->rcu, keyring_clear_rcu_disposal); - } + key_check(key); + return key_is_dead(key, *limit); } /* - * Collect garbage from the contents of a keyring, replacing the old list with - * a new one with the pointers all shuffled down. + * Garbage collect pointers from a keyring. * - * Dead keys are classed as oned that are flagged as being dead or are revoked, - * expired or negative keys that were revoked or expired before the specified - * limit. + * Not called with any locks held. The keyring's key struct will not be + * deallocated under us as only our caller may deallocate it. */ void keyring_gc(struct key *keyring, time_t limit) { - struct keyring_list *klist, *new; - struct key *key; - int loop, keep, max; - - kenter("{%x,%s}", key_serial(keyring), keyring->description); - - down_write(&keyring->sem); - - klist = rcu_dereference_locked_keyring(keyring); - if (!klist) - goto no_klist; - - /* work out how many subscriptions we're keeping */ - keep = 0; - for (loop = klist->nkeys - 1; loop >= 0; loop--) - if (!key_is_dead(rcu_deref_link_locked(klist, loop, keyring), - limit)) - keep++; - - if (keep == klist->nkeys) - goto just_return; - - /* allocate a new keyring payload */ - max = roundup(keep, 4); - new = kmalloc(sizeof(struct keyring_list) + max * sizeof(struct key *), - GFP_KERNEL); - if (!new) - goto nomem; - new->maxkeys = max; - new->nkeys = 0; - new->delkey = 0; - - /* install the live keys - * - must take care as expired keys may be updated back to life - */ - keep = 0; - for (loop = klist->nkeys - 1; loop >= 0; loop--) { - key = rcu_deref_link_locked(klist, loop, keyring); - if (!key_is_dead(key, limit)) { - if (keep >= max) - goto discard_new; - RCU_INIT_POINTER(new->keys[keep++], key_get(key)); - } - } - new->nkeys = keep; - - /* adjust the quota */ - key_payload_reserve(keyring, - sizeof(struct keyring_list) + - KEYQUOTA_LINK_BYTES * keep); + int result; - if (keep == 0) { - rcu_assign_pointer(keyring->payload.subscriptions, NULL); - kfree(new); - } else { - rcu_assign_pointer(keyring->payload.subscriptions, new); - } + kenter("%x{%s}", keyring->serial, keyring->description ?: ""); - up_write(&keyring->sem); + if (keyring->flags & ((1 << KEY_FLAG_INVALIDATED) | + (1 << KEY_FLAG_REVOKED))) + goto dont_gc; - call_rcu(&klist->rcu, keyring_clear_rcu_disposal); - kleave(" [yes]"); - return; - -discard_new: - new->nkeys = keep; - keyring_clear_rcu_disposal(&new->rcu); - up_write(&keyring->sem); - kleave(" [discard]"); - return; - -just_return: - up_write(&keyring->sem); - kleave(" [no dead]"); - return; + /* scan the keyring looking for dead keys */ + rcu_read_lock(); + result = assoc_array_iterate(&keyring->keys, + keyring_gc_check_iterator, &limit); + rcu_read_unlock(); + if (result == true) + goto do_gc; -no_klist: - up_write(&keyring->sem); - kleave(" [no_klist]"); +dont_gc: + kleave(" [no gc]"); return; -nomem: +do_gc: + down_write(&keyring->sem); + assoc_array_gc(&keyring->keys, &keyring_assoc_array_ops, + keyring_gc_select_iterator, &limit); up_write(&keyring->sem); - kleave(" [oom]"); + kleave(" [gc]"); } diff --git a/security/keys/persistent.c b/security/keys/persistent.c new file mode 100644 index 00000000000..0ad3ee28378 --- /dev/null +++ b/security/keys/persistent.c @@ -0,0 +1,167 @@ +/* General persistent per-UID keyrings register + * + * Copyright (C) 2013 Red Hat, Inc. All Rights Reserved. + * Written by David Howells (dhowells@redhat.com) + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public Licence + * as published by the Free Software Foundation; either version + * 2 of the Licence, or (at your option) any later version. + */ + +#include <linux/user_namespace.h> +#include "internal.h" + +unsigned persistent_keyring_expiry = 3 * 24 * 3600; /* Expire after 3 days of non-use */ + +/* + * Create the persistent keyring register for the current user namespace. + * + * Called with the namespace's sem locked for writing. + */ +static int key_create_persistent_register(struct user_namespace *ns) +{ + struct key *reg = keyring_alloc(".persistent_register", + KUIDT_INIT(0), KGIDT_INIT(0), + current_cred(), + ((KEY_POS_ALL & ~KEY_POS_SETATTR) | + KEY_USR_VIEW | KEY_USR_READ), + KEY_ALLOC_NOT_IN_QUOTA, NULL); + if (IS_ERR(reg)) + return PTR_ERR(reg); + + ns->persistent_keyring_register = reg; + return 0; +} + +/* + * Create the persistent keyring for the specified user. + * + * Called with the namespace's sem locked for writing. + */ +static key_ref_t key_create_persistent(struct user_namespace *ns, kuid_t uid, + struct keyring_index_key *index_key) +{ + struct key *persistent; + key_ref_t reg_ref, persistent_ref; + + if (!ns->persistent_keyring_register) { + long err = key_create_persistent_register(ns); + if (err < 0) + return ERR_PTR(err); + } else { + reg_ref = make_key_ref(ns->persistent_keyring_register, true); + persistent_ref = find_key_to_update(reg_ref, index_key); + if (persistent_ref) + return persistent_ref; + } + + persistent = keyring_alloc(index_key->description, + uid, INVALID_GID, current_cred(), + ((KEY_POS_ALL & ~KEY_POS_SETATTR) | + KEY_USR_VIEW | KEY_USR_READ), + KEY_ALLOC_NOT_IN_QUOTA, + ns->persistent_keyring_register); + if (IS_ERR(persistent)) + return ERR_CAST(persistent); + + return make_key_ref(persistent, true); +} + +/* + * Get the persistent keyring for a specific UID and link it to the nominated + * keyring. + */ +static long key_get_persistent(struct user_namespace *ns, kuid_t uid, + key_ref_t dest_ref) +{ + struct keyring_index_key index_key; + struct key *persistent; + key_ref_t reg_ref, persistent_ref; + char buf[32]; + long ret; + + /* Look in the register if it exists */ + index_key.type = &key_type_keyring; + index_key.description = buf; + index_key.desc_len = sprintf(buf, "_persistent.%u", from_kuid(ns, uid)); + + if (ns->persistent_keyring_register) { + reg_ref = make_key_ref(ns->persistent_keyring_register, true); + down_read(&ns->persistent_keyring_register_sem); + persistent_ref = find_key_to_update(reg_ref, &index_key); + up_read(&ns->persistent_keyring_register_sem); + + if (persistent_ref) + goto found; + } + + /* It wasn't in the register, so we'll need to create it. We might + * also need to create the register. + */ + down_write(&ns->persistent_keyring_register_sem); + persistent_ref = key_create_persistent(ns, uid, &index_key); + up_write(&ns->persistent_keyring_register_sem); + if (!IS_ERR(persistent_ref)) + goto found; + + return PTR_ERR(persistent_ref); + +found: + ret = key_task_permission(persistent_ref, current_cred(), KEY_LINK); + if (ret == 0) { + persistent = key_ref_to_ptr(persistent_ref); + ret = key_link(key_ref_to_ptr(dest_ref), persistent); + if (ret == 0) { + key_set_timeout(persistent, persistent_keyring_expiry); + ret = persistent->serial; + } + } + + key_ref_put(persistent_ref); + return ret; +} + +/* + * Get the persistent keyring for a specific UID and link it to the nominated + * keyring. + */ +long keyctl_get_persistent(uid_t _uid, key_serial_t destid) +{ + struct user_namespace *ns = current_user_ns(); + key_ref_t dest_ref; + kuid_t uid; + long ret; + + /* -1 indicates the current user */ + if (_uid == (uid_t)-1) { + uid = current_uid(); + } else { + uid = make_kuid(ns, _uid); + if (!uid_valid(uid)) + return -EINVAL; + + /* You can only see your own persistent cache if you're not + * sufficiently privileged. + */ + if (!uid_eq(uid, current_uid()) && + !uid_eq(uid, current_euid()) && + !ns_capable(ns, CAP_SETUID)) + return -EPERM; + } + + /* There must be a destination keyring */ + dest_ref = lookup_user_key(destid, KEY_LOOKUP_CREATE, KEY_WRITE); + if (IS_ERR(dest_ref)) + return PTR_ERR(dest_ref); + if (key_ref_to_ptr(dest_ref)->type != &key_type_keyring) { + ret = -ENOTDIR; + goto out_put_dest; + } + + ret = key_get_persistent(ns, uid, dest_ref); + +out_put_dest: + key_ref_put(dest_ref); + return ret; +} diff --git a/security/keys/proc.c b/security/keys/proc.c index 217b6855e81..88e9a466940 100644 --- a/security/keys/proc.c +++ b/security/keys/proc.c @@ -182,7 +182,6 @@ static void proc_keys_stop(struct seq_file *p, void *v) static int proc_keys_show(struct seq_file *m, void *v) { - const struct cred *cred = current_cred(); struct rb_node *_p = v; struct key *key = rb_entry(_p, struct key, serial_node); struct timespec now; @@ -191,15 +190,23 @@ static int proc_keys_show(struct seq_file *m, void *v) char xbuf[12]; int rc; + struct keyring_search_context ctx = { + .index_key.type = key->type, + .index_key.description = key->description, + .cred = current_cred(), + .match = lookup_user_key_possessed, + .match_data = key, + .flags = (KEYRING_SEARCH_NO_STATE_CHECK | + KEYRING_SEARCH_LOOKUP_DIRECT), + }; + key_ref = make_key_ref(key, 0); /* determine if the key is possessed by this process (a test we can * skip if the key does not indicate the possessor can view it */ if (key->perm & KEY_POS_VIEW) { - skey_ref = search_my_process_keyrings(key->type, key, - lookup_user_key_possessed, - true, cred); + skey_ref = search_my_process_keyrings(&ctx); if (!IS_ERR(skey_ref)) { key_ref_put(skey_ref); key_ref = make_key_ref(key, 1); @@ -211,7 +218,7 @@ static int proc_keys_show(struct seq_file *m, void *v) * - the caller holds a spinlock, and thus the RCU read lock, making our * access to __current_cred() safe */ - rc = key_task_permission(key_ref, cred, KEY_VIEW); + rc = key_task_permission(key_ref, ctx.cred, KEY_VIEW); if (rc < 0) return 0; diff --git a/security/keys/process_keys.c b/security/keys/process_keys.c index 42defae1e16..0cf8a130a26 100644 --- a/security/keys/process_keys.c +++ b/security/keys/process_keys.c @@ -235,7 +235,7 @@ int install_session_keyring_to_cred(struct cred *cred, struct key *keyring) if (IS_ERR(keyring)) return PTR_ERR(keyring); } else { - atomic_inc(&keyring->usage); + __key_get(keyring); } /* install the keyring */ @@ -319,11 +319,7 @@ void key_fsgid_changed(struct task_struct *tsk) * In the case of a successful return, the possession attribute is set on the * returned key reference. */ -key_ref_t search_my_process_keyrings(struct key_type *type, - const void *description, - key_match_func_t match, - bool no_state_check, - const struct cred *cred) +key_ref_t search_my_process_keyrings(struct keyring_search_context *ctx) { key_ref_t key_ref, ret, err; @@ -339,10 +335,9 @@ key_ref_t search_my_process_keyrings(struct key_type *type, err = ERR_PTR(-EAGAIN); /* search the thread keyring first */ - if (cred->thread_keyring) { + if (ctx->cred->thread_keyring) { key_ref = keyring_search_aux( - make_key_ref(cred->thread_keyring, 1), - cred, type, description, match, no_state_check); + make_key_ref(ctx->cred->thread_keyring, 1), ctx); if (!IS_ERR(key_ref)) goto found; @@ -358,10 +353,9 @@ key_ref_t search_my_process_keyrings(struct key_type *type, } /* search the process keyring second */ - if (cred->process_keyring) { + if (ctx->cred->process_keyring) { key_ref = keyring_search_aux( - make_key_ref(cred->process_keyring, 1), - cred, type, description, match, no_state_check); + make_key_ref(ctx->cred->process_keyring, 1), ctx); if (!IS_ERR(key_ref)) goto found; @@ -379,11 +373,11 @@ key_ref_t search_my_process_keyrings(struct key_type *type, } /* search the session keyring */ - if (cred->session_keyring) { + if (ctx->cred->session_keyring) { rcu_read_lock(); key_ref = keyring_search_aux( - make_key_ref(rcu_dereference(cred->session_keyring), 1), - cred, type, description, match, no_state_check); + make_key_ref(rcu_dereference(ctx->cred->session_keyring), 1), + ctx); rcu_read_unlock(); if (!IS_ERR(key_ref)) @@ -402,10 +396,10 @@ key_ref_t search_my_process_keyrings(struct key_type *type, } } /* or search the user-session keyring */ - else if (cred->user->session_keyring) { + else if (ctx->cred->user->session_keyring) { key_ref = keyring_search_aux( - make_key_ref(cred->user->session_keyring, 1), - cred, type, description, match, no_state_check); + make_key_ref(ctx->cred->user->session_keyring, 1), + ctx); if (!IS_ERR(key_ref)) goto found; @@ -437,18 +431,14 @@ found: * * Return same as search_my_process_keyrings(). */ -key_ref_t search_process_keyrings(struct key_type *type, - const void *description, - key_match_func_t match, - const struct cred *cred) +key_ref_t search_process_keyrings(struct keyring_search_context *ctx) { struct request_key_auth *rka; key_ref_t key_ref, ret = ERR_PTR(-EACCES), err; might_sleep(); - key_ref = search_my_process_keyrings(type, description, match, - false, cred); + key_ref = search_my_process_keyrings(ctx); if (!IS_ERR(key_ref)) goto found; err = key_ref; @@ -457,18 +447,21 @@ key_ref_t search_process_keyrings(struct key_type *type, * search the keyrings of the process mentioned there * - we don't permit access to request_key auth keys via this method */ - if (cred->request_key_auth && - cred == current_cred() && - type != &key_type_request_key_auth + if (ctx->cred->request_key_auth && + ctx->cred == current_cred() && + ctx->index_key.type != &key_type_request_key_auth ) { + const struct cred *cred = ctx->cred; + /* defend against the auth key being revoked */ down_read(&cred->request_key_auth->sem); - if (key_validate(cred->request_key_auth) == 0) { - rka = cred->request_key_auth->payload.data; + if (key_validate(ctx->cred->request_key_auth) == 0) { + rka = ctx->cred->request_key_auth->payload.data; - key_ref = search_process_keyrings(type, description, - match, rka->cred); + ctx->cred = rka->cred; + key_ref = search_process_keyrings(ctx); + ctx->cred = cred; up_read(&cred->request_key_auth->sem); @@ -522,19 +515,23 @@ int lookup_user_key_possessed(const struct key *key, const void *target) key_ref_t lookup_user_key(key_serial_t id, unsigned long lflags, key_perm_t perm) { + struct keyring_search_context ctx = { + .match = lookup_user_key_possessed, + .flags = (KEYRING_SEARCH_NO_STATE_CHECK | + KEYRING_SEARCH_LOOKUP_DIRECT), + }; struct request_key_auth *rka; - const struct cred *cred; struct key *key; key_ref_t key_ref, skey_ref; int ret; try_again: - cred = get_current_cred(); + ctx.cred = get_current_cred(); key_ref = ERR_PTR(-ENOKEY); switch (id) { case KEY_SPEC_THREAD_KEYRING: - if (!cred->thread_keyring) { + if (!ctx.cred->thread_keyring) { if (!(lflags & KEY_LOOKUP_CREATE)) goto error; @@ -546,13 +543,13 @@ try_again: goto reget_creds; } - key = cred->thread_keyring; - atomic_inc(&key->usage); + key = ctx.cred->thread_keyring; + __key_get(key); key_ref = make_key_ref(key, 1); break; case KEY_SPEC_PROCESS_KEYRING: - if (!cred->process_keyring) { + if (!ctx.cred->process_keyring) { if (!(lflags & KEY_LOOKUP_CREATE)) goto error; @@ -564,13 +561,13 @@ try_again: goto reget_creds; } - key = cred->process_keyring; - atomic_inc(&key->usage); + key = ctx.cred->process_keyring; + __key_get(key); key_ref = make_key_ref(key, 1); break; case KEY_SPEC_SESSION_KEYRING: - if (!cred->session_keyring) { + if (!ctx.cred->session_keyring) { /* always install a session keyring upon access if one * doesn't exist yet */ ret = install_user_keyrings(); @@ -580,13 +577,13 @@ try_again: ret = join_session_keyring(NULL); else ret = install_session_keyring( - cred->user->session_keyring); + ctx.cred->user->session_keyring); if (ret < 0) goto error; goto reget_creds; - } else if (cred->session_keyring == - cred->user->session_keyring && + } else if (ctx.cred->session_keyring == + ctx.cred->user->session_keyring && lflags & KEY_LOOKUP_CREATE) { ret = join_session_keyring(NULL); if (ret < 0) @@ -595,33 +592,33 @@ try_again: } rcu_read_lock(); - key = rcu_dereference(cred->session_keyring); - atomic_inc(&key->usage); + key = rcu_dereference(ctx.cred->session_keyring); + __key_get(key); rcu_read_unlock(); key_ref = make_key_ref(key, 1); break; case KEY_SPEC_USER_KEYRING: - if (!cred->user->uid_keyring) { + if (!ctx.cred->user->uid_keyring) { ret = install_user_keyrings(); if (ret < 0) goto error; } - key = cred->user->uid_keyring; - atomic_inc(&key->usage); + key = ctx.cred->user->uid_keyring; + __key_get(key); key_ref = make_key_ref(key, 1); break; case KEY_SPEC_USER_SESSION_KEYRING: - if (!cred->user->session_keyring) { + if (!ctx.cred->user->session_keyring) { ret = install_user_keyrings(); if (ret < 0) goto error; } - key = cred->user->session_keyring; - atomic_inc(&key->usage); + key = ctx.cred->user->session_keyring; + __key_get(key); key_ref = make_key_ref(key, 1); break; @@ -631,29 +628,29 @@ try_again: goto error; case KEY_SPEC_REQKEY_AUTH_KEY: - key = cred->request_key_auth; + key = ctx.cred->request_key_auth; if (!key) goto error; - atomic_inc(&key->usage); + __key_get(key); key_ref = make_key_ref(key, 1); break; case KEY_SPEC_REQUESTOR_KEYRING: - if (!cred->request_key_auth) + if (!ctx.cred->request_key_auth) goto error; - down_read(&cred->request_key_auth->sem); + down_read(&ctx.cred->request_key_auth->sem); if (test_bit(KEY_FLAG_REVOKED, - &cred->request_key_auth->flags)) { + &ctx.cred->request_key_auth->flags)) { key_ref = ERR_PTR(-EKEYREVOKED); key = NULL; } else { - rka = cred->request_key_auth->payload.data; + rka = ctx.cred->request_key_auth->payload.data; key = rka->dest_keyring; - atomic_inc(&key->usage); + __key_get(key); } - up_read(&cred->request_key_auth->sem); + up_read(&ctx.cred->request_key_auth->sem); if (!key) goto error; key_ref = make_key_ref(key, 1); @@ -673,9 +670,13 @@ try_again: key_ref = make_key_ref(key, 0); /* check to see if we possess the key */ - skey_ref = search_process_keyrings(key->type, key, - lookup_user_key_possessed, - cred); + ctx.index_key.type = key->type; + ctx.index_key.description = key->description; + ctx.index_key.desc_len = strlen(key->description); + ctx.match_data = key; + kdebug("check possessed"); + skey_ref = search_process_keyrings(&ctx); + kdebug("possessed=%p", skey_ref); if (!IS_ERR(skey_ref)) { key_put(key); @@ -715,14 +716,14 @@ try_again: goto invalid_key; /* check the permissions */ - ret = key_task_permission(key_ref, cred, perm); + ret = key_task_permission(key_ref, ctx.cred, perm); if (ret < 0) goto invalid_key; key->last_used_at = current_kernel_time().tv_sec; error: - put_cred(cred); + put_cred(ctx.cred); return key_ref; invalid_key: @@ -733,7 +734,7 @@ invalid_key: /* if we attempted to install a keyring, then it may have caused new * creds to be installed */ reget_creds: - put_cred(cred); + put_cred(ctx.cred); goto try_again; } @@ -856,3 +857,13 @@ void key_change_session_keyring(struct callback_head *twork) commit_creds(new); } + +/* + * Make sure that root's user and user-session keyrings exist. + */ +static int __init init_root_keyring(void) +{ + return install_user_keyrings(); +} + +late_initcall(init_root_keyring); diff --git a/security/keys/request_key.c b/security/keys/request_key.c index c411f9bb156..381411941cc 100644 --- a/security/keys/request_key.c +++ b/security/keys/request_key.c @@ -345,33 +345,34 @@ static void construct_get_dest_keyring(struct key **_dest_keyring) * May return a key that's already under construction instead if there was a * race between two thread calling request_key(). */ -static int construct_alloc_key(struct key_type *type, - const char *description, +static int construct_alloc_key(struct keyring_search_context *ctx, struct key *dest_keyring, unsigned long flags, struct key_user *user, struct key **_key) { - const struct cred *cred = current_cred(); - unsigned long prealloc; + struct assoc_array_edit *edit; struct key *key; key_perm_t perm; key_ref_t key_ref; int ret; - kenter("%s,%s,,,", type->name, description); + kenter("%s,%s,,,", + ctx->index_key.type->name, ctx->index_key.description); *_key = NULL; mutex_lock(&user->cons_lock); perm = KEY_POS_VIEW | KEY_POS_SEARCH | KEY_POS_LINK | KEY_POS_SETATTR; perm |= KEY_USR_VIEW; - if (type->read) + if (ctx->index_key.type->read) perm |= KEY_POS_READ; - if (type == &key_type_keyring || type->update) + if (ctx->index_key.type == &key_type_keyring || + ctx->index_key.type->update) perm |= KEY_POS_WRITE; - key = key_alloc(type, description, cred->fsuid, cred->fsgid, cred, + key = key_alloc(ctx->index_key.type, ctx->index_key.description, + ctx->cred->fsuid, ctx->cred->fsgid, ctx->cred, perm, flags); if (IS_ERR(key)) goto alloc_failed; @@ -379,8 +380,7 @@ static int construct_alloc_key(struct key_type *type, set_bit(KEY_FLAG_USER_CONSTRUCT, &key->flags); if (dest_keyring) { - ret = __key_link_begin(dest_keyring, type, description, - &prealloc); + ret = __key_link_begin(dest_keyring, &ctx->index_key, &edit); if (ret < 0) goto link_prealloc_failed; } @@ -390,16 +390,16 @@ static int construct_alloc_key(struct key_type *type, * waited for locks */ mutex_lock(&key_construction_mutex); - key_ref = search_process_keyrings(type, description, type->match, cred); + key_ref = search_process_keyrings(ctx); if (!IS_ERR(key_ref)) goto key_already_present; if (dest_keyring) - __key_link(dest_keyring, key, &prealloc); + __key_link(key, &edit); mutex_unlock(&key_construction_mutex); if (dest_keyring) - __key_link_end(dest_keyring, type, prealloc); + __key_link_end(dest_keyring, &ctx->index_key, edit); mutex_unlock(&user->cons_lock); *_key = key; kleave(" = 0 [%d]", key_serial(key)); @@ -414,8 +414,8 @@ key_already_present: if (dest_keyring) { ret = __key_link_check_live_key(dest_keyring, key); if (ret == 0) - __key_link(dest_keyring, key, &prealloc); - __key_link_end(dest_keyring, type, prealloc); + __key_link(key, &edit); + __key_link_end(dest_keyring, &ctx->index_key, edit); if (ret < 0) goto link_check_failed; } @@ -444,8 +444,7 @@ alloc_failed: /* * Commence key construction. */ -static struct key *construct_key_and_link(struct key_type *type, - const char *description, +static struct key *construct_key_and_link(struct keyring_search_context *ctx, const char *callout_info, size_t callout_len, void *aux, @@ -464,8 +463,7 @@ static struct key *construct_key_and_link(struct key_type *type, construct_get_dest_keyring(&dest_keyring); - ret = construct_alloc_key(type, description, dest_keyring, flags, user, - &key); + ret = construct_alloc_key(ctx, dest_keyring, flags, user, &key); key_user_put(user); if (ret == 0) { @@ -529,17 +527,24 @@ struct key *request_key_and_link(struct key_type *type, struct key *dest_keyring, unsigned long flags) { - const struct cred *cred = current_cred(); + struct keyring_search_context ctx = { + .index_key.type = type, + .index_key.description = description, + .cred = current_cred(), + .match = type->match, + .match_data = description, + .flags = KEYRING_SEARCH_LOOKUP_DIRECT, + }; struct key *key; key_ref_t key_ref; int ret; kenter("%s,%s,%p,%zu,%p,%p,%lx", - type->name, description, callout_info, callout_len, aux, - dest_keyring, flags); + ctx.index_key.type->name, ctx.index_key.description, + callout_info, callout_len, aux, dest_keyring, flags); /* search all the process keyrings for a key */ - key_ref = search_process_keyrings(type, description, type->match, cred); + key_ref = search_process_keyrings(&ctx); if (!IS_ERR(key_ref)) { key = key_ref_to_ptr(key_ref); @@ -562,9 +567,8 @@ struct key *request_key_and_link(struct key_type *type, if (!callout_info) goto error; - key = construct_key_and_link(type, description, callout_info, - callout_len, aux, dest_keyring, - flags); + key = construct_key_and_link(&ctx, callout_info, callout_len, + aux, dest_keyring, flags); } error: @@ -592,8 +596,10 @@ int wait_for_key_construction(struct key *key, bool intr) intr ? TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE); if (ret < 0) return ret; - if (test_bit(KEY_FLAG_NEGATIVE, &key->flags)) + if (test_bit(KEY_FLAG_NEGATIVE, &key->flags)) { + smp_rmb(); return key->type_data.reject_error; + } return key_validate(key); } EXPORT_SYMBOL(wait_for_key_construction); diff --git a/security/keys/request_key_auth.c b/security/keys/request_key_auth.c index 85730d5a5a5..7495a93b4b9 100644 --- a/security/keys/request_key_auth.c +++ b/security/keys/request_key_auth.c @@ -18,6 +18,7 @@ #include <linux/slab.h> #include <asm/uaccess.h> #include "internal.h" +#include <keys/user-type.h> static int request_key_auth_instantiate(struct key *, struct key_preparsed_payload *); @@ -222,32 +223,26 @@ error_alloc: } /* - * See if an authorisation key is associated with a particular key. - */ -static int key_get_instantiation_authkey_match(const struct key *key, - const void *_id) -{ - struct request_key_auth *rka = key->payload.data; - key_serial_t id = (key_serial_t)(unsigned long) _id; - - return rka->target_key->serial == id; -} - -/* * Search the current process's keyrings for the authorisation key for * instantiation of a key. */ struct key *key_get_instantiation_authkey(key_serial_t target_id) { - const struct cred *cred = current_cred(); + char description[16]; + struct keyring_search_context ctx = { + .index_key.type = &key_type_request_key_auth, + .index_key.description = description, + .cred = current_cred(), + .match = user_match, + .match_data = description, + .flags = KEYRING_SEARCH_LOOKUP_DIRECT, + }; struct key *authkey; key_ref_t authkey_ref; - authkey_ref = search_process_keyrings( - &key_type_request_key_auth, - (void *) (unsigned long) target_id, - key_get_instantiation_authkey_match, - cred); + sprintf(description, "%x", target_id); + + authkey_ref = search_process_keyrings(&ctx); if (IS_ERR(authkey_ref)) { authkey = ERR_CAST(authkey_ref); diff --git a/security/keys/sysctl.c b/security/keys/sysctl.c index ee32d181764..8c0af08760c 100644 --- a/security/keys/sysctl.c +++ b/security/keys/sysctl.c @@ -61,5 +61,16 @@ ctl_table key_sysctls[] = { .extra1 = (void *) &zero, .extra2 = (void *) &max, }, +#ifdef CONFIG_PERSISTENT_KEYRINGS + { + .procname = "persistent_keyring_expiry", + .data = &persistent_keyring_expiry, + .maxlen = sizeof(unsigned), + .mode = 0644, + .proc_handler = proc_dointvec_minmax, + .extra1 = (void *) &zero, + .extra2 = (void *) &max, + }, +#endif { } }; diff --git a/security/keys/user_defined.c b/security/keys/user_defined.c index 55dc8893918..faa2caeb593 100644 --- a/security/keys/user_defined.c +++ b/security/keys/user_defined.c @@ -25,14 +25,15 @@ static int logon_vet_description(const char *desc); * arbitrary blob of data as the payload */ struct key_type key_type_user = { - .name = "user", - .instantiate = user_instantiate, - .update = user_update, - .match = user_match, - .revoke = user_revoke, - .destroy = user_destroy, - .describe = user_describe, - .read = user_read, + .name = "user", + .def_lookup_type = KEYRING_SEARCH_LOOKUP_DIRECT, + .instantiate = user_instantiate, + .update = user_update, + .match = user_match, + .revoke = user_revoke, + .destroy = user_destroy, + .describe = user_describe, + .read = user_read, }; EXPORT_SYMBOL_GPL(key_type_user); @@ -45,6 +46,7 @@ EXPORT_SYMBOL_GPL(key_type_user); */ struct key_type key_type_logon = { .name = "logon", + .def_lookup_type = KEYRING_SEARCH_LOOKUP_DIRECT, .instantiate = user_instantiate, .update = user_update, .match = user_match, diff --git a/security/lsm_audit.c b/security/lsm_audit.c index 234bc2ab450..9a62045e628 100644 --- a/security/lsm_audit.c +++ b/security/lsm_audit.c @@ -397,7 +397,8 @@ void common_lsm_audit(struct common_audit_data *a, if (a == NULL) return; /* we use GFP_ATOMIC so we won't sleep */ - ab = audit_log_start(current->audit_context, GFP_ATOMIC, AUDIT_AVC); + ab = audit_log_start(current->audit_context, GFP_ATOMIC | __GFP_NOWARN, + AUDIT_AVC); if (ab == NULL) return; diff --git a/security/security.c b/security/security.c index 4dc31f4f270..15b6928592e 100644 --- a/security/security.c +++ b/security/security.c @@ -1340,22 +1340,17 @@ int security_xfrm_policy_delete(struct xfrm_sec_ctx *ctx) return security_ops->xfrm_policy_delete_security(ctx); } -int security_xfrm_state_alloc(struct xfrm_state *x, struct xfrm_user_sec_ctx *sec_ctx) +int security_xfrm_state_alloc(struct xfrm_state *x, + struct xfrm_user_sec_ctx *sec_ctx) { - return security_ops->xfrm_state_alloc_security(x, sec_ctx, 0); + return security_ops->xfrm_state_alloc(x, sec_ctx); } EXPORT_SYMBOL(security_xfrm_state_alloc); int security_xfrm_state_alloc_acquire(struct xfrm_state *x, struct xfrm_sec_ctx *polsec, u32 secid) { - if (!polsec) - return 0; - /* - * We want the context to be taken from secid which is usually - * from the sock. - */ - return security_ops->xfrm_state_alloc_security(x, NULL, secid); + return security_ops->xfrm_state_alloc_acquire(x, polsec, secid); } int security_xfrm_state_delete(struct xfrm_state *x) diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c index c540795fb3f..794c3ca49ea 100644 --- a/security/selinux/hooks.c +++ b/security/selinux/hooks.c @@ -95,7 +95,9 @@ #include "audit.h" #include "avc_ss.h" -#define NUM_SEL_MNT_OPTS 5 +#define SB_TYPE_FMT "%s%s%s" +#define SB_SUBTYPE(sb) (sb->s_subtype && sb->s_subtype[0]) +#define SB_TYPE_ARGS(sb) sb->s_type->name, SB_SUBTYPE(sb) ? "." : "", SB_SUBTYPE(sb) ? sb->s_subtype : "" extern struct security_operations *security_ops; @@ -139,12 +141,28 @@ static struct kmem_cache *sel_inode_cache; * This function checks the SECMARK reference counter to see if any SECMARK * targets are currently configured, if the reference counter is greater than * zero SECMARK is considered to be enabled. Returns true (1) if SECMARK is - * enabled, false (0) if SECMARK is disabled. + * enabled, false (0) if SECMARK is disabled. If the always_check_network + * policy capability is enabled, SECMARK is always considered enabled. * */ static int selinux_secmark_enabled(void) { - return (atomic_read(&selinux_secmark_refcount) > 0); + return (selinux_policycap_alwaysnetwork || atomic_read(&selinux_secmark_refcount)); +} + +/** + * selinux_peerlbl_enabled - Check to see if peer labeling is currently enabled + * + * Description: + * This function checks if NetLabel or labeled IPSEC is enabled. Returns true + * (1) if any are enabled or false (0) if neither are enabled. If the + * always_check_network policy capability is enabled, peer labeling + * is always considered enabled. + * + */ +static int selinux_peerlbl_enabled(void) +{ + return (selinux_policycap_alwaysnetwork || netlbl_enabled() || selinux_xfrm_enabled()); } /* @@ -309,8 +327,11 @@ enum { Opt_defcontext = 3, Opt_rootcontext = 4, Opt_labelsupport = 5, + Opt_nextmntopt = 6, }; +#define NUM_SEL_MNT_OPTS (Opt_nextmntopt - 1) + static const match_table_t tokens = { {Opt_context, CONTEXT_STR "%s"}, {Opt_fscontext, FSCONTEXT_STR "%s"}, @@ -355,6 +376,29 @@ static int may_context_mount_inode_relabel(u32 sid, return rc; } +static int selinux_is_sblabel_mnt(struct super_block *sb) +{ + struct superblock_security_struct *sbsec = sb->s_security; + + if (sbsec->behavior == SECURITY_FS_USE_XATTR || + sbsec->behavior == SECURITY_FS_USE_TRANS || + sbsec->behavior == SECURITY_FS_USE_TASK) + return 1; + + /* Special handling for sysfs. Is genfs but also has setxattr handler*/ + if (strncmp(sb->s_type->name, "sysfs", sizeof("sysfs")) == 0) + return 1; + + /* + * Special handling for rootfs. Is genfs but supports + * setting SELinux context on in-core inodes. + */ + if (strncmp(sb->s_type->name, "rootfs", sizeof("rootfs")) == 0) + return 1; + + return 0; +} + static int sb_finish_set_opts(struct super_block *sb) { struct superblock_security_struct *sbsec = sb->s_security; @@ -369,8 +413,8 @@ static int sb_finish_set_opts(struct super_block *sb) the first boot of the SELinux kernel before we have assigned xattr values to the filesystem. */ if (!root_inode->i_op->getxattr) { - printk(KERN_WARNING "SELinux: (dev %s, type %s) has no " - "xattr support\n", sb->s_id, sb->s_type->name); + printk(KERN_WARNING "SELinux: (dev %s, type "SB_TYPE_FMT") has no " + "xattr support\n", sb->s_id, SB_TYPE_ARGS(sb)); rc = -EOPNOTSUPP; goto out; } @@ -378,35 +422,27 @@ static int sb_finish_set_opts(struct super_block *sb) if (rc < 0 && rc != -ENODATA) { if (rc == -EOPNOTSUPP) printk(KERN_WARNING "SELinux: (dev %s, type " - "%s) has no security xattr handler\n", - sb->s_id, sb->s_type->name); + SB_TYPE_FMT") has no security xattr handler\n", + sb->s_id, SB_TYPE_ARGS(sb)); else printk(KERN_WARNING "SELinux: (dev %s, type " - "%s) getxattr errno %d\n", sb->s_id, - sb->s_type->name, -rc); + SB_TYPE_FMT") getxattr errno %d\n", sb->s_id, + SB_TYPE_ARGS(sb), -rc); goto out; } } - sbsec->flags |= (SE_SBINITIALIZED | SE_SBLABELSUPP); - if (sbsec->behavior > ARRAY_SIZE(labeling_behaviors)) - printk(KERN_ERR "SELinux: initialized (dev %s, type %s), unknown behavior\n", - sb->s_id, sb->s_type->name); + printk(KERN_ERR "SELinux: initialized (dev %s, type "SB_TYPE_FMT"), unknown behavior\n", + sb->s_id, SB_TYPE_ARGS(sb)); else - printk(KERN_DEBUG "SELinux: initialized (dev %s, type %s), %s\n", - sb->s_id, sb->s_type->name, + printk(KERN_DEBUG "SELinux: initialized (dev %s, type "SB_TYPE_FMT"), %s\n", + sb->s_id, SB_TYPE_ARGS(sb), labeling_behaviors[sbsec->behavior-1]); - if (sbsec->behavior == SECURITY_FS_USE_GENFS || - sbsec->behavior == SECURITY_FS_USE_MNTPOINT || - sbsec->behavior == SECURITY_FS_USE_NONE || - sbsec->behavior > ARRAY_SIZE(labeling_behaviors)) - sbsec->flags &= ~SE_SBLABELSUPP; - - /* Special handling for sysfs. Is genfs but also has setxattr handler*/ - if (strncmp(sb->s_type->name, "sysfs", sizeof("sysfs")) == 0) - sbsec->flags |= SE_SBLABELSUPP; + sbsec->flags |= SE_SBINITIALIZED; + if (selinux_is_sblabel_mnt(sb)) + sbsec->flags |= SBLABEL_MNT; /* Initialize the root inode. */ rc = inode_doinit_with_dentry(root_inode, root); @@ -460,15 +496,18 @@ static int selinux_get_mnt_opts(const struct super_block *sb, if (!ss_initialized) return -EINVAL; + /* make sure we always check enough bits to cover the mask */ + BUILD_BUG_ON(SE_MNTMASK >= (1 << NUM_SEL_MNT_OPTS)); + tmp = sbsec->flags & SE_MNTMASK; /* count the number of mount options for this sb */ - for (i = 0; i < 8; i++) { + for (i = 0; i < NUM_SEL_MNT_OPTS; i++) { if (tmp & 0x01) opts->num_mnt_opts++; tmp >>= 1; } /* Check if the Label support flag is set */ - if (sbsec->flags & SE_SBLABELSUPP) + if (sbsec->flags & SBLABEL_MNT) opts->num_mnt_opts++; opts->mnt_opts = kcalloc(opts->num_mnt_opts, sizeof(char *), GFP_ATOMIC); @@ -515,9 +554,9 @@ static int selinux_get_mnt_opts(const struct super_block *sb, opts->mnt_opts[i] = context; opts->mnt_opts_flags[i++] = ROOTCONTEXT_MNT; } - if (sbsec->flags & SE_SBLABELSUPP) { + if (sbsec->flags & SBLABEL_MNT) { opts->mnt_opts[i] = NULL; - opts->mnt_opts_flags[i++] = SE_SBLABELSUPP; + opts->mnt_opts_flags[i++] = SBLABEL_MNT; } BUG_ON(i != opts->num_mnt_opts); @@ -561,7 +600,6 @@ static int selinux_set_mnt_opts(struct super_block *sb, const struct cred *cred = current_cred(); int rc = 0, i; struct superblock_security_struct *sbsec = sb->s_security; - const char *name = sb->s_type->name; struct inode *inode = sbsec->sb->s_root->d_inode; struct inode_security_struct *root_isec = inode->i_security; u32 fscontext_sid = 0, context_sid = 0, rootcontext_sid = 0; @@ -614,14 +652,14 @@ static int selinux_set_mnt_opts(struct super_block *sb, for (i = 0; i < num_opts; i++) { u32 sid; - if (flags[i] == SE_SBLABELSUPP) + if (flags[i] == SBLABEL_MNT) continue; rc = security_context_to_sid(mount_options[i], strlen(mount_options[i]), &sid); if (rc) { printk(KERN_WARNING "SELinux: security_context_to_sid" - "(%s) failed for (dev %s, type %s) errno=%d\n", - mount_options[i], sb->s_id, name, rc); + "(%s) failed for (dev %s, type "SB_TYPE_FMT") errno=%d\n", + mount_options[i], sb->s_id, SB_TYPE_ARGS(sb), rc); goto out; } switch (flags[i]) { @@ -685,9 +723,7 @@ static int selinux_set_mnt_opts(struct super_block *sb, * Determine the labeling behavior to use for this * filesystem type. */ - rc = security_fs_use((sbsec->flags & SE_SBPROC) ? - "proc" : sb->s_type->name, - &sbsec->behavior, &sbsec->sid); + rc = security_fs_use(sb); if (rc) { printk(KERN_WARNING "%s: security_fs_use(%s) returned %d\n", @@ -770,7 +806,8 @@ out: out_double_mount: rc = -EINVAL; printk(KERN_WARNING "SELinux: mount invalid. Same superblock, different " - "security settings for (dev %s, type %s)\n", sb->s_id, name); + "security settings for (dev %s, type "SB_TYPE_FMT")\n", sb->s_id, + SB_TYPE_ARGS(sb)); goto out; } @@ -1037,7 +1074,7 @@ static void selinux_write_opts(struct seq_file *m, case DEFCONTEXT_MNT: prefix = DEFCONTEXT_STR; break; - case SE_SBLABELSUPP: + case SBLABEL_MNT: seq_putc(m, ','); seq_puts(m, LABELSUPP_STR); continue; @@ -1649,7 +1686,7 @@ static int may_create(struct inode *dir, if (rc) return rc; - if (!newsid || !(sbsec->flags & SE_SBLABELSUPP)) { + if (!newsid || !(sbsec->flags & SBLABEL_MNT)) { rc = security_transition_sid(sid, dsec->sid, tclass, &dentry->d_name, &newsid); if (rc) @@ -2437,14 +2474,14 @@ static int selinux_sb_remount(struct super_block *sb, void *data) u32 sid; size_t len; - if (flags[i] == SE_SBLABELSUPP) + if (flags[i] == SBLABEL_MNT) continue; len = strlen(mount_options[i]); rc = security_context_to_sid(mount_options[i], len, &sid); if (rc) { printk(KERN_WARNING "SELinux: security_context_to_sid" - "(%s) failed for (dev %s, type %s) errno=%d\n", - mount_options[i], sb->s_id, sb->s_type->name, rc); + "(%s) failed for (dev %s, type "SB_TYPE_FMT") errno=%d\n", + mount_options[i], sb->s_id, SB_TYPE_ARGS(sb), rc); goto out_free_opts; } rc = -EINVAL; @@ -2482,8 +2519,8 @@ out_free_secdata: return rc; out_bad_option: printk(KERN_WARNING "SELinux: unable to change security options " - "during remount (dev %s, type=%s)\n", sb->s_id, - sb->s_type->name); + "during remount (dev %s, type "SB_TYPE_FMT")\n", sb->s_id, + SB_TYPE_ARGS(sb)); goto out_free_opts; } @@ -2606,7 +2643,7 @@ static int selinux_inode_init_security(struct inode *inode, struct inode *dir, if ((sbsec->flags & SE_SBINITIALIZED) && (sbsec->behavior == SECURITY_FS_USE_MNTPOINT)) newsid = sbsec->mntpoint_sid; - else if (!newsid || !(sbsec->flags & SE_SBLABELSUPP)) { + else if (!newsid || !(sbsec->flags & SBLABEL_MNT)) { rc = security_transition_sid(sid, dsec->sid, inode_mode_to_security_class(inode->i_mode), qstr, &newsid); @@ -2628,7 +2665,7 @@ static int selinux_inode_init_security(struct inode *inode, struct inode *dir, isec->initialized = 1; } - if (!ss_initialized || !(sbsec->flags & SE_SBLABELSUPP)) + if (!ss_initialized || !(sbsec->flags & SBLABEL_MNT)) return -EOPNOTSUPP; if (name) @@ -2830,7 +2867,7 @@ static int selinux_inode_setxattr(struct dentry *dentry, const char *name, return selinux_inode_setotherxattr(dentry, name); sbsec = inode->i_sb->s_security; - if (!(sbsec->flags & SE_SBLABELSUPP)) + if (!(sbsec->flags & SBLABEL_MNT)) return -EOPNOTSUPP; if (!inode_owner_or_capable(inode)) @@ -3791,8 +3828,12 @@ static int selinux_skb_peerlbl_sid(struct sk_buff *skb, u16 family, u32 *sid) u32 nlbl_sid; u32 nlbl_type; - selinux_skb_xfrm_sid(skb, &xfrm_sid); - selinux_netlbl_skbuff_getsid(skb, family, &nlbl_type, &nlbl_sid); + err = selinux_skb_xfrm_sid(skb, &xfrm_sid); + if (unlikely(err)) + return -EACCES; + err = selinux_netlbl_skbuff_getsid(skb, family, &nlbl_type, &nlbl_sid); + if (unlikely(err)) + return -EACCES; err = security_net_peersid_resolve(nlbl_sid, nlbl_type, xfrm_sid, sid); if (unlikely(err)) { @@ -4246,7 +4287,7 @@ static int selinux_socket_sock_rcv_skb(struct sock *sk, struct sk_buff *skb) return selinux_sock_rcv_skb_compat(sk, skb, family); secmark_active = selinux_secmark_enabled(); - peerlbl_active = netlbl_enabled() || selinux_xfrm_enabled(); + peerlbl_active = selinux_peerlbl_enabled(); if (!secmark_active && !peerlbl_active) return 0; @@ -4628,7 +4669,7 @@ static unsigned int selinux_ip_forward(struct sk_buff *skb, int ifindex, secmark_active = selinux_secmark_enabled(); netlbl_active = netlbl_enabled(); - peerlbl_active = netlbl_active || selinux_xfrm_enabled(); + peerlbl_active = selinux_peerlbl_enabled(); if (!secmark_active && !peerlbl_active) return NF_ACCEPT; @@ -4780,7 +4821,7 @@ static unsigned int selinux_ip_postroute(struct sk_buff *skb, int ifindex, return NF_ACCEPT; #endif secmark_active = selinux_secmark_enabled(); - peerlbl_active = netlbl_enabled() || selinux_xfrm_enabled(); + peerlbl_active = selinux_peerlbl_enabled(); if (!secmark_active && !peerlbl_active) return NF_ACCEPT; @@ -5784,7 +5825,8 @@ static struct security_operations selinux_ops = { .xfrm_policy_clone_security = selinux_xfrm_policy_clone, .xfrm_policy_free_security = selinux_xfrm_policy_free, .xfrm_policy_delete_security = selinux_xfrm_policy_delete, - .xfrm_state_alloc_security = selinux_xfrm_state_alloc, + .xfrm_state_alloc = selinux_xfrm_state_alloc, + .xfrm_state_alloc_acquire = selinux_xfrm_state_alloc_acquire, .xfrm_state_free_security = selinux_xfrm_state_free, .xfrm_state_delete_security = selinux_xfrm_state_delete, .xfrm_policy_lookup = selinux_xfrm_policy_lookup, diff --git a/security/selinux/include/objsec.h b/security/selinux/include/objsec.h index aa47bcabb5f..b1dfe104945 100644 --- a/security/selinux/include/objsec.h +++ b/security/selinux/include/objsec.h @@ -58,8 +58,8 @@ struct superblock_security_struct { u32 sid; /* SID of file system superblock */ u32 def_sid; /* default SID for labeling */ u32 mntpoint_sid; /* SECURITY_FS_USE_MNTPOINT context for files */ - unsigned int behavior; /* labeling behavior */ - unsigned char flags; /* which mount options were specified */ + unsigned short behavior; /* labeling behavior */ + unsigned short flags; /* which mount options were specified */ struct mutex lock; struct list_head isec_head; spinlock_t isec_lock; diff --git a/security/selinux/include/security.h b/security/selinux/include/security.h index 8fd8e18ea34..fe341ae3700 100644 --- a/security/selinux/include/security.h +++ b/security/selinux/include/security.h @@ -45,14 +45,15 @@ /* Mask for just the mount related flags */ #define SE_MNTMASK 0x0f /* Super block security struct flags for mount options */ +/* BE CAREFUL, these need to be the low order bits for selinux_get_mnt_opts */ #define CONTEXT_MNT 0x01 #define FSCONTEXT_MNT 0x02 #define ROOTCONTEXT_MNT 0x04 #define DEFCONTEXT_MNT 0x08 +#define SBLABEL_MNT 0x10 /* Non-mount related flags */ -#define SE_SBINITIALIZED 0x10 -#define SE_SBPROC 0x20 -#define SE_SBLABELSUPP 0x40 +#define SE_SBINITIALIZED 0x0100 +#define SE_SBPROC 0x0200 #define CONTEXT_STR "context=" #define FSCONTEXT_STR "fscontext=" @@ -68,12 +69,15 @@ extern int selinux_enabled; enum { POLICYDB_CAPABILITY_NETPEER, POLICYDB_CAPABILITY_OPENPERM, + POLICYDB_CAPABILITY_REDHAT1, + POLICYDB_CAPABILITY_ALWAYSNETWORK, __POLICYDB_CAPABILITY_MAX }; #define POLICYDB_CAPABILITY_MAX (__POLICYDB_CAPABILITY_MAX - 1) extern int selinux_policycap_netpeer; extern int selinux_policycap_openperm; +extern int selinux_policycap_alwaysnetwork; /* * type_datum properties @@ -172,8 +176,7 @@ int security_get_allow_unknown(void); #define SECURITY_FS_USE_NATIVE 7 /* use native label support */ #define SECURITY_FS_USE_MAX 7 /* Highest SECURITY_FS_USE_XXX */ -int security_fs_use(const char *fstype, unsigned int *behavior, - u32 *sid); +int security_fs_use(struct super_block *sb); int security_genfs_sid(const char *fstype, char *name, u16 sclass, u32 *sid); diff --git a/security/selinux/include/xfrm.h b/security/selinux/include/xfrm.h index 6713f04e30b..0dec76c64cf 100644 --- a/security/selinux/include/xfrm.h +++ b/security/selinux/include/xfrm.h @@ -10,29 +10,21 @@ #include <net/flow.h> int selinux_xfrm_policy_alloc(struct xfrm_sec_ctx **ctxp, - struct xfrm_user_sec_ctx *sec_ctx); + struct xfrm_user_sec_ctx *uctx); int selinux_xfrm_policy_clone(struct xfrm_sec_ctx *old_ctx, struct xfrm_sec_ctx **new_ctxp); void selinux_xfrm_policy_free(struct xfrm_sec_ctx *ctx); int selinux_xfrm_policy_delete(struct xfrm_sec_ctx *ctx); int selinux_xfrm_state_alloc(struct xfrm_state *x, - struct xfrm_user_sec_ctx *sec_ctx, u32 secid); + struct xfrm_user_sec_ctx *uctx); +int selinux_xfrm_state_alloc_acquire(struct xfrm_state *x, + struct xfrm_sec_ctx *polsec, u32 secid); void selinux_xfrm_state_free(struct xfrm_state *x); int selinux_xfrm_state_delete(struct xfrm_state *x); int selinux_xfrm_policy_lookup(struct xfrm_sec_ctx *ctx, u32 fl_secid, u8 dir); int selinux_xfrm_state_pol_flow_match(struct xfrm_state *x, - struct xfrm_policy *xp, const struct flowi *fl); - -/* - * Extract the security blob from the sock (it's actually on the socket) - */ -static inline struct inode_security_struct *get_sock_isec(struct sock *sk) -{ - if (!sk->sk_socket) - return NULL; - - return SOCK_INODE(sk->sk_socket)->i_security; -} + struct xfrm_policy *xp, + const struct flowi *fl); #ifdef CONFIG_SECURITY_NETWORK_XFRM extern atomic_t selinux_xfrm_refcount; @@ -42,10 +34,10 @@ static inline int selinux_xfrm_enabled(void) return (atomic_read(&selinux_xfrm_refcount) > 0); } -int selinux_xfrm_sock_rcv_skb(u32 sid, struct sk_buff *skb, - struct common_audit_data *ad); -int selinux_xfrm_postroute_last(u32 isec_sid, struct sk_buff *skb, - struct common_audit_data *ad, u8 proto); +int selinux_xfrm_sock_rcv_skb(u32 sk_sid, struct sk_buff *skb, + struct common_audit_data *ad); +int selinux_xfrm_postroute_last(u32 sk_sid, struct sk_buff *skb, + struct common_audit_data *ad, u8 proto); int selinux_xfrm_decode_session(struct sk_buff *skb, u32 *sid, int ckall); static inline void selinux_xfrm_notify_policyload(void) @@ -64,19 +56,21 @@ static inline int selinux_xfrm_enabled(void) return 0; } -static inline int selinux_xfrm_sock_rcv_skb(u32 isec_sid, struct sk_buff *skb, - struct common_audit_data *ad) +static inline int selinux_xfrm_sock_rcv_skb(u32 sk_sid, struct sk_buff *skb, + struct common_audit_data *ad) { return 0; } -static inline int selinux_xfrm_postroute_last(u32 isec_sid, struct sk_buff *skb, - struct common_audit_data *ad, u8 proto) +static inline int selinux_xfrm_postroute_last(u32 sk_sid, struct sk_buff *skb, + struct common_audit_data *ad, + u8 proto) { return 0; } -static inline int selinux_xfrm_decode_session(struct sk_buff *skb, u32 *sid, int ckall) +static inline int selinux_xfrm_decode_session(struct sk_buff *skb, u32 *sid, + int ckall) { *sid = SECSID_NULL; return 0; @@ -87,10 +81,9 @@ static inline void selinux_xfrm_notify_policyload(void) } #endif -static inline void selinux_skb_xfrm_sid(struct sk_buff *skb, u32 *sid) +static inline int selinux_skb_xfrm_sid(struct sk_buff *skb, u32 *sid) { - int err = selinux_xfrm_decode_session(skb, sid, 0); - BUG_ON(err); + return selinux_xfrm_decode_session(skb, sid, 0); } #endif /* _SELINUX_XFRM_H_ */ diff --git a/security/selinux/netlabel.c b/security/selinux/netlabel.c index da4b8b23328..6235d052338 100644 --- a/security/selinux/netlabel.c +++ b/security/selinux/netlabel.c @@ -442,8 +442,7 @@ int selinux_netlbl_socket_connect(struct sock *sk, struct sockaddr *addr) sksec->nlbl_state != NLBL_CONNLABELED) return 0; - local_bh_disable(); - bh_lock_sock_nested(sk); + lock_sock(sk); /* connected sockets are allowed to disconnect when the address family * is set to AF_UNSPEC, if that is what is happening we want to reset @@ -464,7 +463,6 @@ int selinux_netlbl_socket_connect(struct sock *sk, struct sockaddr *addr) sksec->nlbl_state = NLBL_CONNLABELED; socket_connect_return: - bh_unlock_sock(sk); - local_bh_enable(); + release_sock(sk); return rc; } diff --git a/security/selinux/netnode.c b/security/selinux/netnode.c index c5454c0477c..03a72c32afd 100644 --- a/security/selinux/netnode.c +++ b/security/selinux/netnode.c @@ -166,6 +166,7 @@ static void sel_netnode_insert(struct sel_netnode *node) break; default: BUG(); + return; } /* we need to impose a limit on the growth of the hash table so check @@ -225,6 +226,7 @@ static int sel_netnode_sid_slow(void *addr, u16 family, u32 *sid) break; default: BUG(); + ret = -EINVAL; } if (ret != 0) goto out; diff --git a/security/selinux/nlmsgtab.c b/security/selinux/nlmsgtab.c index 855e464e92e..332ac8a80cf 100644 --- a/security/selinux/nlmsgtab.c +++ b/security/selinux/nlmsgtab.c @@ -116,6 +116,8 @@ static struct nlmsg_perm nlmsg_audit_perms[] = { AUDIT_MAKE_EQUIV, NETLINK_AUDIT_SOCKET__NLMSG_WRITE }, { AUDIT_TTY_GET, NETLINK_AUDIT_SOCKET__NLMSG_READ }, { AUDIT_TTY_SET, NETLINK_AUDIT_SOCKET__NLMSG_TTY_AUDIT }, + { AUDIT_GET_FEATURE, NETLINK_AUDIT_SOCKET__NLMSG_READ }, + { AUDIT_SET_FEATURE, NETLINK_AUDIT_SOCKET__NLMSG_WRITE }, }; diff --git a/security/selinux/selinuxfs.c b/security/selinux/selinuxfs.c index ff427733c29..5122affe06a 100644 --- a/security/selinux/selinuxfs.c +++ b/security/selinux/selinuxfs.c @@ -44,7 +44,9 @@ /* Policy capability filenames */ static char *policycap_names[] = { "network_peer_controls", - "open_perms" + "open_perms", + "redhat1", + "always_check_network" }; unsigned int selinux_checkreqprot = CONFIG_SECURITY_SELINUX_CHECKREQPROT_VALUE; diff --git a/security/selinux/ss/ebitmap.c b/security/selinux/ss/ebitmap.c index 30f119b1d1e..820313a04d4 100644 --- a/security/selinux/ss/ebitmap.c +++ b/security/selinux/ss/ebitmap.c @@ -213,7 +213,12 @@ netlbl_import_failure: } #endif /* CONFIG_NETLABEL */ -int ebitmap_contains(struct ebitmap *e1, struct ebitmap *e2) +/* + * Check to see if all the bits set in e2 are also set in e1. Optionally, + * if last_e2bit is non-zero, the highest set bit in e2 cannot exceed + * last_e2bit. + */ +int ebitmap_contains(struct ebitmap *e1, struct ebitmap *e2, u32 last_e2bit) { struct ebitmap_node *n1, *n2; int i; @@ -223,14 +228,25 @@ int ebitmap_contains(struct ebitmap *e1, struct ebitmap *e2) n1 = e1->node; n2 = e2->node; + while (n1 && n2 && (n1->startbit <= n2->startbit)) { if (n1->startbit < n2->startbit) { n1 = n1->next; continue; } - for (i = 0; i < EBITMAP_UNIT_NUMS; i++) { + for (i = EBITMAP_UNIT_NUMS - 1; (i >= 0) && !n2->maps[i]; ) + i--; /* Skip trailing NULL map entries */ + if (last_e2bit && (i >= 0)) { + u32 lastsetbit = n2->startbit + i * EBITMAP_UNIT_SIZE + + __fls(n2->maps[i]); + if (lastsetbit > last_e2bit) + return 0; + } + + while (i >= 0) { if ((n1->maps[i] & n2->maps[i]) != n2->maps[i]) return 0; + i--; } n1 = n1->next; diff --git a/security/selinux/ss/ebitmap.h b/security/selinux/ss/ebitmap.h index 922f8afa89d..712c8a7b8e8 100644 --- a/security/selinux/ss/ebitmap.h +++ b/security/selinux/ss/ebitmap.h @@ -16,7 +16,13 @@ #include <net/netlabel.h> -#define EBITMAP_UNIT_NUMS ((32 - sizeof(void *) - sizeof(u32)) \ +#ifdef CONFIG_64BIT +#define EBITMAP_NODE_SIZE 64 +#else +#define EBITMAP_NODE_SIZE 32 +#endif + +#define EBITMAP_UNIT_NUMS ((EBITMAP_NODE_SIZE-sizeof(void *)-sizeof(u32))\ / sizeof(unsigned long)) #define EBITMAP_UNIT_SIZE BITS_PER_LONG #define EBITMAP_SIZE (EBITMAP_UNIT_NUMS * EBITMAP_UNIT_SIZE) @@ -117,7 +123,7 @@ static inline void ebitmap_node_clr_bit(struct ebitmap_node *n, int ebitmap_cmp(struct ebitmap *e1, struct ebitmap *e2); int ebitmap_cpy(struct ebitmap *dst, struct ebitmap *src); -int ebitmap_contains(struct ebitmap *e1, struct ebitmap *e2); +int ebitmap_contains(struct ebitmap *e1, struct ebitmap *e2, u32 last_e2bit); int ebitmap_get_bit(struct ebitmap *e, unsigned long bit); int ebitmap_set_bit(struct ebitmap *e, unsigned long bit, int value); void ebitmap_destroy(struct ebitmap *e); diff --git a/security/selinux/ss/mls.c b/security/selinux/ss/mls.c index 40de8d3f208..c85bc1ec040 100644 --- a/security/selinux/ss/mls.c +++ b/security/selinux/ss/mls.c @@ -160,8 +160,6 @@ void mls_sid_to_context(struct context *context, int mls_level_isvalid(struct policydb *p, struct mls_level *l) { struct level_datum *levdatum; - struct ebitmap_node *node; - int i; if (!l->sens || l->sens > p->p_levels.nprim) return 0; @@ -170,19 +168,13 @@ int mls_level_isvalid(struct policydb *p, struct mls_level *l) if (!levdatum) return 0; - ebitmap_for_each_positive_bit(&l->cat, node, i) { - if (i > p->p_cats.nprim) - return 0; - if (!ebitmap_get_bit(&levdatum->level->cat, i)) { - /* - * Category may not be associated with - * sensitivity. - */ - return 0; - } - } - - return 1; + /* + * Return 1 iff all the bits set in l->cat are also be set in + * levdatum->level->cat and no bit in l->cat is larger than + * p->p_cats.nprim. + */ + return ebitmap_contains(&levdatum->level->cat, &l->cat, + p->p_cats.nprim); } int mls_range_isvalid(struct policydb *p, struct mls_range *r) diff --git a/security/selinux/ss/mls_types.h b/security/selinux/ss/mls_types.h index 03bed52a805..e9364877413 100644 --- a/security/selinux/ss/mls_types.h +++ b/security/selinux/ss/mls_types.h @@ -35,7 +35,7 @@ static inline int mls_level_eq(struct mls_level *l1, struct mls_level *l2) static inline int mls_level_dom(struct mls_level *l1, struct mls_level *l2) { return ((l1->sens >= l2->sens) && - ebitmap_contains(&l1->cat, &l2->cat)); + ebitmap_contains(&l1->cat, &l2->cat, 0)); } #define mls_level_incomp(l1, l2) \ diff --git a/security/selinux/ss/policydb.c b/security/selinux/ss/policydb.c index c8adde3aff8..f6195ebde3c 100644 --- a/security/selinux/ss/policydb.c +++ b/security/selinux/ss/policydb.c @@ -3203,9 +3203,8 @@ static int range_write_helper(void *key, void *data, void *ptr) static int range_write(struct policydb *p, void *fp) { - size_t nel; __le32 buf[1]; - int rc; + int rc, nel; struct policy_data pd; pd.p = p; diff --git a/security/selinux/ss/services.c b/security/selinux/ss/services.c index b4feecc3fe0..ee470a0b5c2 100644 --- a/security/selinux/ss/services.c +++ b/security/selinux/ss/services.c @@ -72,6 +72,7 @@ int selinux_policycap_netpeer; int selinux_policycap_openperm; +int selinux_policycap_alwaysnetwork; static DEFINE_RWLOCK(policy_rwlock); @@ -1812,6 +1813,8 @@ static void security_load_policycaps(void) POLICYDB_CAPABILITY_NETPEER); selinux_policycap_openperm = ebitmap_get_bit(&policydb.policycaps, POLICYDB_CAPABILITY_OPENPERM); + selinux_policycap_alwaysnetwork = ebitmap_get_bit(&policydb.policycaps, + POLICYDB_CAPABILITY_ALWAYSNETWORK); } static int security_preserve_bools(struct policydb *p); @@ -2323,43 +2326,74 @@ out: /** * security_fs_use - Determine how to handle labeling for a filesystem. - * @fstype: filesystem type - * @behavior: labeling behavior - * @sid: SID for filesystem (superblock) + * @sb: superblock in question */ -int security_fs_use( - const char *fstype, - unsigned int *behavior, - u32 *sid) +int security_fs_use(struct super_block *sb) { int rc = 0; struct ocontext *c; + struct superblock_security_struct *sbsec = sb->s_security; + const char *fstype = sb->s_type->name; + const char *subtype = (sb->s_subtype && sb->s_subtype[0]) ? sb->s_subtype : NULL; + struct ocontext *base = NULL; read_lock(&policy_rwlock); - c = policydb.ocontexts[OCON_FSUSE]; - while (c) { - if (strcmp(fstype, c->u.name) == 0) + for (c = policydb.ocontexts[OCON_FSUSE]; c; c = c->next) { + char *sub; + int baselen; + + baselen = strlen(fstype); + + /* if base does not match, this is not the one */ + if (strncmp(fstype, c->u.name, baselen)) + continue; + + /* if there is no subtype, this is the one! */ + if (!subtype) + break; + + /* skip past the base in this entry */ + sub = c->u.name + baselen; + + /* entry is only a base. save it. keep looking for subtype */ + if (sub[0] == '\0') { + base = c; + continue; + } + + /* entry is not followed by a subtype, so it is not a match */ + if (sub[0] != '.') + continue; + + /* whew, we found a subtype of this fstype */ + sub++; /* move past '.' */ + + /* exact match of fstype AND subtype */ + if (!strcmp(subtype, sub)) break; - c = c->next; } + /* in case we had found an fstype match but no subtype match */ + if (!c) + c = base; + if (c) { - *behavior = c->v.behavior; + sbsec->behavior = c->v.behavior; if (!c->sid[0]) { rc = sidtab_context_to_sid(&sidtab, &c->context[0], &c->sid[0]); if (rc) goto out; } - *sid = c->sid[0]; + sbsec->sid = c->sid[0]; } else { - rc = security_genfs_sid(fstype, "/", SECCLASS_DIR, sid); + rc = security_genfs_sid(fstype, "/", SECCLASS_DIR, &sbsec->sid); if (rc) { - *behavior = SECURITY_FS_USE_NONE; + sbsec->behavior = SECURITY_FS_USE_NONE; rc = 0; } else { - *behavior = SECURITY_FS_USE_GENFS; + sbsec->behavior = SECURITY_FS_USE_GENFS; } } diff --git a/security/selinux/xfrm.c b/security/selinux/xfrm.c index d0308188621..a91d205ec0c 100644 --- a/security/selinux/xfrm.c +++ b/security/selinux/xfrm.c @@ -56,7 +56,7 @@ atomic_t selinux_xfrm_refcount = ATOMIC_INIT(0); /* - * Returns true if an LSM/SELinux context + * Returns true if the context is an LSM/SELinux context. */ static inline int selinux_authorizable_ctx(struct xfrm_sec_ctx *ctx) { @@ -66,7 +66,7 @@ static inline int selinux_authorizable_ctx(struct xfrm_sec_ctx *ctx) } /* - * Returns true if the xfrm contains a security blob for SELinux + * Returns true if the xfrm contains a security blob for SELinux. */ static inline int selinux_authorizable_xfrm(struct xfrm_state *x) { @@ -74,48 +74,111 @@ static inline int selinux_authorizable_xfrm(struct xfrm_state *x) } /* - * LSM hook implementation that authorizes that a flow can use - * a xfrm policy rule. + * Allocates a xfrm_sec_state and populates it using the supplied security + * xfrm_user_sec_ctx context. */ -int selinux_xfrm_policy_lookup(struct xfrm_sec_ctx *ctx, u32 fl_secid, u8 dir) +static int selinux_xfrm_alloc_user(struct xfrm_sec_ctx **ctxp, + struct xfrm_user_sec_ctx *uctx) { int rc; - u32 sel_sid; + const struct task_security_struct *tsec = current_security(); + struct xfrm_sec_ctx *ctx = NULL; + u32 str_len; - /* Context sid is either set to label or ANY_ASSOC */ - if (ctx) { - if (!selinux_authorizable_ctx(ctx)) - return -EINVAL; - - sel_sid = ctx->ctx_sid; - } else - /* - * All flows should be treated as polmatch'ing an - * otherwise applicable "non-labeled" policy. This - * would prevent inadvertent "leaks". - */ - return 0; + if (ctxp == NULL || uctx == NULL || + uctx->ctx_doi != XFRM_SC_DOI_LSM || + uctx->ctx_alg != XFRM_SC_ALG_SELINUX) + return -EINVAL; - rc = avc_has_perm(fl_secid, sel_sid, SECCLASS_ASSOCIATION, - ASSOCIATION__POLMATCH, - NULL); + str_len = uctx->ctx_len; + if (str_len >= PAGE_SIZE) + return -ENOMEM; - if (rc == -EACCES) - return -ESRCH; + ctx = kmalloc(sizeof(*ctx) + str_len + 1, GFP_KERNEL); + if (!ctx) + return -ENOMEM; + ctx->ctx_doi = XFRM_SC_DOI_LSM; + ctx->ctx_alg = XFRM_SC_ALG_SELINUX; + ctx->ctx_len = str_len; + memcpy(ctx->ctx_str, &uctx[1], str_len); + ctx->ctx_str[str_len] = '\0'; + rc = security_context_to_sid(ctx->ctx_str, str_len, &ctx->ctx_sid); + if (rc) + goto err; + + rc = avc_has_perm(tsec->sid, ctx->ctx_sid, + SECCLASS_ASSOCIATION, ASSOCIATION__SETCONTEXT, NULL); + if (rc) + goto err; + + *ctxp = ctx; + atomic_inc(&selinux_xfrm_refcount); + return 0; + +err: + kfree(ctx); return rc; } /* + * Free the xfrm_sec_ctx structure. + */ +static void selinux_xfrm_free(struct xfrm_sec_ctx *ctx) +{ + if (!ctx) + return; + + atomic_dec(&selinux_xfrm_refcount); + kfree(ctx); +} + +/* + * Authorize the deletion of a labeled SA or policy rule. + */ +static int selinux_xfrm_delete(struct xfrm_sec_ctx *ctx) +{ + const struct task_security_struct *tsec = current_security(); + + if (!ctx) + return 0; + + return avc_has_perm(tsec->sid, ctx->ctx_sid, + SECCLASS_ASSOCIATION, ASSOCIATION__SETCONTEXT, + NULL); +} + +/* + * LSM hook implementation that authorizes that a flow can use a xfrm policy + * rule. + */ +int selinux_xfrm_policy_lookup(struct xfrm_sec_ctx *ctx, u32 fl_secid, u8 dir) +{ + int rc; + + /* All flows should be treated as polmatch'ing an otherwise applicable + * "non-labeled" policy. This would prevent inadvertent "leaks". */ + if (!ctx) + return 0; + + /* Context sid is either set to label or ANY_ASSOC */ + if (!selinux_authorizable_ctx(ctx)) + return -EINVAL; + + rc = avc_has_perm(fl_secid, ctx->ctx_sid, + SECCLASS_ASSOCIATION, ASSOCIATION__POLMATCH, NULL); + return (rc == -EACCES ? -ESRCH : rc); +} + +/* * LSM hook implementation that authorizes that a state matches * the given policy, flow combo. */ - -int selinux_xfrm_state_pol_flow_match(struct xfrm_state *x, struct xfrm_policy *xp, - const struct flowi *fl) +int selinux_xfrm_state_pol_flow_match(struct xfrm_state *x, + struct xfrm_policy *xp, + const struct flowi *fl) { u32 state_sid; - int rc; if (!xp->security) if (x->security) @@ -138,187 +201,80 @@ int selinux_xfrm_state_pol_flow_match(struct xfrm_state *x, struct xfrm_policy * if (fl->flowi_secid != state_sid) return 0; - rc = avc_has_perm(fl->flowi_secid, state_sid, SECCLASS_ASSOCIATION, - ASSOCIATION__SENDTO, - NULL)? 0:1; - - /* - * We don't need a separate SA Vs. policy polmatch check - * since the SA is now of the same label as the flow and - * a flow Vs. policy polmatch check had already happened - * in selinux_xfrm_policy_lookup() above. - */ - - return rc; + /* We don't need a separate SA Vs. policy polmatch check since the SA + * is now of the same label as the flow and a flow Vs. policy polmatch + * check had already happened in selinux_xfrm_policy_lookup() above. */ + return (avc_has_perm(fl->flowi_secid, state_sid, + SECCLASS_ASSOCIATION, ASSOCIATION__SENDTO, + NULL) ? 0 : 1); } /* * LSM hook implementation that checks and/or returns the xfrm sid for the * incoming packet. */ - int selinux_xfrm_decode_session(struct sk_buff *skb, u32 *sid, int ckall) { + u32 sid_session = SECSID_NULL; struct sec_path *sp; - *sid = SECSID_NULL; - if (skb == NULL) - return 0; + goto out; sp = skb->sp; if (sp) { - int i, sid_set = 0; + int i; - for (i = sp->len-1; i >= 0; i--) { + for (i = sp->len - 1; i >= 0; i--) { struct xfrm_state *x = sp->xvec[i]; if (selinux_authorizable_xfrm(x)) { struct xfrm_sec_ctx *ctx = x->security; - if (!sid_set) { - *sid = ctx->ctx_sid; - sid_set = 1; - + if (sid_session == SECSID_NULL) { + sid_session = ctx->ctx_sid; if (!ckall) - break; - } else if (*sid != ctx->ctx_sid) + goto out; + } else if (sid_session != ctx->ctx_sid) { + *sid = SECSID_NULL; return -EINVAL; + } } } } - return 0; -} - -/* - * Security blob allocation for xfrm_policy and xfrm_state - * CTX does not have a meaningful value on input - */ -static int selinux_xfrm_sec_ctx_alloc(struct xfrm_sec_ctx **ctxp, - struct xfrm_user_sec_ctx *uctx, u32 sid) -{ - int rc = 0; - const struct task_security_struct *tsec = current_security(); - struct xfrm_sec_ctx *ctx = NULL; - char *ctx_str = NULL; - u32 str_len; - - BUG_ON(uctx && sid); - - if (!uctx) - goto not_from_user; - - if (uctx->ctx_alg != XFRM_SC_ALG_SELINUX) - return -EINVAL; - - str_len = uctx->ctx_len; - if (str_len >= PAGE_SIZE) - return -ENOMEM; - - *ctxp = ctx = kmalloc(sizeof(*ctx) + - str_len + 1, - GFP_KERNEL); - - if (!ctx) - return -ENOMEM; - - ctx->ctx_doi = uctx->ctx_doi; - ctx->ctx_len = str_len; - ctx->ctx_alg = uctx->ctx_alg; - - memcpy(ctx->ctx_str, - uctx+1, - str_len); - ctx->ctx_str[str_len] = 0; - rc = security_context_to_sid(ctx->ctx_str, - str_len, - &ctx->ctx_sid); - - if (rc) - goto out; - - /* - * Does the subject have permission to set security context? - */ - rc = avc_has_perm(tsec->sid, ctx->ctx_sid, - SECCLASS_ASSOCIATION, - ASSOCIATION__SETCONTEXT, NULL); - if (rc) - goto out; - - return rc; - -not_from_user: - rc = security_sid_to_context(sid, &ctx_str, &str_len); - if (rc) - goto out; - - *ctxp = ctx = kmalloc(sizeof(*ctx) + - str_len, - GFP_ATOMIC); - - if (!ctx) { - rc = -ENOMEM; - goto out; - } - - ctx->ctx_doi = XFRM_SC_DOI_LSM; - ctx->ctx_alg = XFRM_SC_ALG_SELINUX; - ctx->ctx_sid = sid; - ctx->ctx_len = str_len; - memcpy(ctx->ctx_str, - ctx_str, - str_len); - - goto out2; - out: - *ctxp = NULL; - kfree(ctx); -out2: - kfree(ctx_str); - return rc; + *sid = sid_session; + return 0; } /* - * LSM hook implementation that allocs and transfers uctx spec to - * xfrm_policy. + * LSM hook implementation that allocs and transfers uctx spec to xfrm_policy. */ int selinux_xfrm_policy_alloc(struct xfrm_sec_ctx **ctxp, struct xfrm_user_sec_ctx *uctx) { - int err; - - BUG_ON(!uctx); - - err = selinux_xfrm_sec_ctx_alloc(ctxp, uctx, 0); - if (err == 0) - atomic_inc(&selinux_xfrm_refcount); - - return err; + return selinux_xfrm_alloc_user(ctxp, uctx); } - /* - * LSM hook implementation that copies security data structure from old to - * new for policy cloning. + * LSM hook implementation that copies security data structure from old to new + * for policy cloning. */ int selinux_xfrm_policy_clone(struct xfrm_sec_ctx *old_ctx, struct xfrm_sec_ctx **new_ctxp) { struct xfrm_sec_ctx *new_ctx; - if (old_ctx) { - new_ctx = kmalloc(sizeof(*old_ctx) + old_ctx->ctx_len, - GFP_ATOMIC); - if (!new_ctx) - return -ENOMEM; + if (!old_ctx) + return 0; + + new_ctx = kmemdup(old_ctx, sizeof(*old_ctx) + old_ctx->ctx_len, + GFP_ATOMIC); + if (!new_ctx) + return -ENOMEM; + atomic_inc(&selinux_xfrm_refcount); + *new_ctxp = new_ctx; - memcpy(new_ctx, old_ctx, sizeof(*new_ctx)); - memcpy(new_ctx->ctx_str, old_ctx->ctx_str, new_ctx->ctx_len); - atomic_inc(&selinux_xfrm_refcount); - *new_ctxp = new_ctx; - } return 0; } @@ -327,8 +283,7 @@ int selinux_xfrm_policy_clone(struct xfrm_sec_ctx *old_ctx, */ void selinux_xfrm_policy_free(struct xfrm_sec_ctx *ctx) { - atomic_dec(&selinux_xfrm_refcount); - kfree(ctx); + selinux_xfrm_free(ctx); } /* @@ -336,31 +291,55 @@ void selinux_xfrm_policy_free(struct xfrm_sec_ctx *ctx) */ int selinux_xfrm_policy_delete(struct xfrm_sec_ctx *ctx) { - const struct task_security_struct *tsec = current_security(); - - if (!ctx) - return 0; + return selinux_xfrm_delete(ctx); +} - return avc_has_perm(tsec->sid, ctx->ctx_sid, - SECCLASS_ASSOCIATION, ASSOCIATION__SETCONTEXT, - NULL); +/* + * LSM hook implementation that allocates a xfrm_sec_state, populates it using + * the supplied security context, and assigns it to the xfrm_state. + */ +int selinux_xfrm_state_alloc(struct xfrm_state *x, + struct xfrm_user_sec_ctx *uctx) +{ + return selinux_xfrm_alloc_user(&x->security, uctx); } /* - * LSM hook implementation that allocs and transfers sec_ctx spec to - * xfrm_state. + * LSM hook implementation that allocates a xfrm_sec_state and populates based + * on a secid. */ -int selinux_xfrm_state_alloc(struct xfrm_state *x, struct xfrm_user_sec_ctx *uctx, - u32 secid) +int selinux_xfrm_state_alloc_acquire(struct xfrm_state *x, + struct xfrm_sec_ctx *polsec, u32 secid) { - int err; + int rc; + struct xfrm_sec_ctx *ctx; + char *ctx_str = NULL; + int str_len; + + if (!polsec) + return 0; - BUG_ON(!x); + if (secid == 0) + return -EINVAL; - err = selinux_xfrm_sec_ctx_alloc(&x->security, uctx, secid); - if (err == 0) - atomic_inc(&selinux_xfrm_refcount); - return err; + rc = security_sid_to_context(secid, &ctx_str, &str_len); + if (rc) + return rc; + + ctx = kmalloc(sizeof(*ctx) + str_len, GFP_ATOMIC); + if (!ctx) + return -ENOMEM; + + ctx->ctx_doi = XFRM_SC_DOI_LSM; + ctx->ctx_alg = XFRM_SC_ALG_SELINUX; + ctx->ctx_sid = secid; + ctx->ctx_len = str_len; + memcpy(ctx->ctx_str, ctx_str, str_len); + kfree(ctx_str); + + x->security = ctx; + atomic_inc(&selinux_xfrm_refcount); + return 0; } /* @@ -368,24 +347,15 @@ int selinux_xfrm_state_alloc(struct xfrm_state *x, struct xfrm_user_sec_ctx *uct */ void selinux_xfrm_state_free(struct xfrm_state *x) { - atomic_dec(&selinux_xfrm_refcount); - kfree(x->security); + selinux_xfrm_free(x->security); } - /* - * LSM hook implementation that authorizes deletion of labeled SAs. - */ +/* + * LSM hook implementation that authorizes deletion of labeled SAs. + */ int selinux_xfrm_state_delete(struct xfrm_state *x) { - const struct task_security_struct *tsec = current_security(); - struct xfrm_sec_ctx *ctx = x->security; - - if (!ctx) - return 0; - - return avc_has_perm(tsec->sid, ctx->ctx_sid, - SECCLASS_ASSOCIATION, ASSOCIATION__SETCONTEXT, - NULL); + return selinux_xfrm_delete(x->security); } /* @@ -395,14 +365,12 @@ int selinux_xfrm_state_delete(struct xfrm_state *x) * we need to check for unlabelled access since this may not have * gone thru the IPSec process. */ -int selinux_xfrm_sock_rcv_skb(u32 isec_sid, struct sk_buff *skb, - struct common_audit_data *ad) +int selinux_xfrm_sock_rcv_skb(u32 sk_sid, struct sk_buff *skb, + struct common_audit_data *ad) { - int i, rc = 0; - struct sec_path *sp; - u32 sel_sid = SECINITSID_UNLABELED; - - sp = skb->sp; + int i; + struct sec_path *sp = skb->sp; + u32 peer_sid = SECINITSID_UNLABELED; if (sp) { for (i = 0; i < sp->len; i++) { @@ -410,23 +378,17 @@ int selinux_xfrm_sock_rcv_skb(u32 isec_sid, struct sk_buff *skb, if (x && selinux_authorizable_xfrm(x)) { struct xfrm_sec_ctx *ctx = x->security; - sel_sid = ctx->ctx_sid; + peer_sid = ctx->ctx_sid; break; } } } - /* - * This check even when there's no association involved is - * intended, according to Trent Jaeger, to make sure a - * process can't engage in non-ipsec communication unless - * explicitly allowed by policy. - */ - - rc = avc_has_perm(isec_sid, sel_sid, SECCLASS_ASSOCIATION, - ASSOCIATION__RECVFROM, ad); - - return rc; + /* This check even when there's no association involved is intended, + * according to Trent Jaeger, to make sure a process can't engage in + * non-IPsec communication unless explicitly allowed by policy. */ + return avc_has_perm(sk_sid, peer_sid, + SECCLASS_ASSOCIATION, ASSOCIATION__RECVFROM, ad); } /* @@ -436,49 +398,38 @@ int selinux_xfrm_sock_rcv_skb(u32 isec_sid, struct sk_buff *skb, * If we do have a authorizable security association, then it has already been * checked in the selinux_xfrm_state_pol_flow_match hook above. */ -int selinux_xfrm_postroute_last(u32 isec_sid, struct sk_buff *skb, - struct common_audit_data *ad, u8 proto) +int selinux_xfrm_postroute_last(u32 sk_sid, struct sk_buff *skb, + struct common_audit_data *ad, u8 proto) { struct dst_entry *dst; - int rc = 0; - - dst = skb_dst(skb); - - if (dst) { - struct dst_entry *dst_test; - - for (dst_test = dst; dst_test != NULL; - dst_test = dst_test->child) { - struct xfrm_state *x = dst_test->xfrm; - - if (x && selinux_authorizable_xfrm(x)) - goto out; - } - } switch (proto) { case IPPROTO_AH: case IPPROTO_ESP: case IPPROTO_COMP: - /* - * We should have already seen this packet once before - * it underwent xfrm(s). No need to subject it to the - * unlabeled check. - */ - goto out; + /* We should have already seen this packet once before it + * underwent xfrm(s). No need to subject it to the unlabeled + * check. */ + return 0; default: break; } - /* - * This check even when there's no association involved is - * intended, according to Trent Jaeger, to make sure a - * process can't engage in non-ipsec communication unless - * explicitly allowed by policy. - */ + dst = skb_dst(skb); + if (dst) { + struct dst_entry *iter; - rc = avc_has_perm(isec_sid, SECINITSID_UNLABELED, SECCLASS_ASSOCIATION, - ASSOCIATION__SENDTO, ad); -out: - return rc; + for (iter = dst; iter != NULL; iter = iter->child) { + struct xfrm_state *x = iter->xfrm; + + if (x && selinux_authorizable_xfrm(x)) + return 0; + } + } + + /* This check even when there's no association involved is intended, + * according to Trent Jaeger, to make sure a process can't engage in + * non-IPsec communication unless explicitly allowed by policy. */ + return avc_has_perm(sk_sid, SECINITSID_UNLABELED, + SECCLASS_ASSOCIATION, ASSOCIATION__SENDTO, ad); } diff --git a/security/smack/smack.h b/security/smack/smack.h index 076b8e8a51a..364cc64fce7 100644 --- a/security/smack/smack.h +++ b/security/smack/smack.h @@ -177,9 +177,13 @@ struct smk_port_label { #define SMACK_CIPSO_MAXCATNUM 184 /* 23 * 8 */ /* - * Flag for transmute access + * Flags for untraditional access modes. + * It shouldn't be necessary to avoid conflicts with definitions + * in fs.h, but do so anyway. */ -#define MAY_TRANSMUTE 64 +#define MAY_TRANSMUTE 0x00001000 /* Controls directory labeling */ +#define MAY_LOCK 0x00002000 /* Locks should be writes, but ... */ + /* * Just to make the common cases easier to deal with */ @@ -188,9 +192,9 @@ struct smk_port_label { #define MAY_NOT 0 /* - * Number of access types used by Smack (rwxat) + * Number of access types used by Smack (rwxatl) */ -#define SMK_NUM_ACCESS_TYPE 5 +#define SMK_NUM_ACCESS_TYPE 6 /* SMACK data */ struct smack_audit_data { diff --git a/security/smack/smack_access.c b/security/smack/smack_access.c index b3b59b1e93d..14293cd9b1e 100644 --- a/security/smack/smack_access.c +++ b/security/smack/smack_access.c @@ -84,6 +84,8 @@ int log_policy = SMACK_AUDIT_DENIED; * * Do the object check first because that is more * likely to differ. + * + * Allowing write access implies allowing locking. */ int smk_access_entry(char *subject_label, char *object_label, struct list_head *rule_list) @@ -99,6 +101,11 @@ int smk_access_entry(char *subject_label, char *object_label, } } + /* + * MAY_WRITE implies MAY_LOCK. + */ + if ((may & MAY_WRITE) == MAY_WRITE) + may |= MAY_LOCK; return may; } @@ -245,6 +252,7 @@ out_audit: static inline void smack_str_from_perm(char *string, int access) { int i = 0; + if (access & MAY_READ) string[i++] = 'r'; if (access & MAY_WRITE) @@ -255,6 +263,8 @@ static inline void smack_str_from_perm(char *string, int access) string[i++] = 'a'; if (access & MAY_TRANSMUTE) string[i++] = 't'; + if (access & MAY_LOCK) + string[i++] = 'l'; string[i] = '\0'; } /** diff --git a/security/smack/smack_lsm.c b/security/smack/smack_lsm.c index 8825375cc03..b0be893ad44 100644 --- a/security/smack/smack_lsm.c +++ b/security/smack/smack_lsm.c @@ -185,7 +185,7 @@ static int smack_ptrace_access_check(struct task_struct *ctp, unsigned int mode) smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_TASK); smk_ad_setfield_u_tsk(&ad, ctp); - rc = smk_curacc(skp->smk_known, MAY_READWRITE, &ad); + rc = smk_curacc(skp->smk_known, mode, &ad); return rc; } @@ -1146,7 +1146,7 @@ static int smack_file_ioctl(struct file *file, unsigned int cmd, * @file: the object * @cmd: unused * - * Returns 0 if current has write access, error code otherwise + * Returns 0 if current has lock access, error code otherwise */ static int smack_file_lock(struct file *file, unsigned int cmd) { @@ -1154,7 +1154,7 @@ static int smack_file_lock(struct file *file, unsigned int cmd) smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_PATH); smk_ad_setfield_u_fs_path(&ad, file->f_path); - return smk_curacc(file->f_security, MAY_WRITE, &ad); + return smk_curacc(file->f_security, MAY_LOCK, &ad); } /** @@ -1178,8 +1178,13 @@ static int smack_file_fcntl(struct file *file, unsigned int cmd, switch (cmd) { case F_GETLK: + break; case F_SETLK: case F_SETLKW: + smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_PATH); + smk_ad_setfield_u_fs_path(&ad, file->f_path); + rc = smk_curacc(file->f_security, MAY_LOCK, &ad); + break; case F_SETOWN: case F_SETSIG: smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_PATH); diff --git a/security/smack/smackfs.c b/security/smack/smackfs.c index 80f4b4a4572..160aa08e3cd 100644 --- a/security/smack/smackfs.c +++ b/security/smack/smackfs.c @@ -139,7 +139,7 @@ const char *smack_cipso_option = SMACK_CIPSO_OPTION; * SMK_LOADLEN: Smack rule length */ #define SMK_OACCESS "rwxa" -#define SMK_ACCESS "rwxat" +#define SMK_ACCESS "rwxatl" #define SMK_OACCESSLEN (sizeof(SMK_OACCESS) - 1) #define SMK_ACCESSLEN (sizeof(SMK_ACCESS) - 1) #define SMK_OLOADLEN (SMK_LABELLEN + SMK_LABELLEN + SMK_OACCESSLEN) @@ -282,6 +282,10 @@ static int smk_perm_from_str(const char *string) case 'T': perm |= MAY_TRANSMUTE; break; + case 'l': + case 'L': + perm |= MAY_LOCK; + break; default: return perm; } @@ -452,7 +456,7 @@ static ssize_t smk_write_rules_list(struct file *file, const char __user *buf, /* * Minor hack for backward compatibility */ - if (count != SMK_OLOADLEN && count != SMK_LOADLEN) + if (count < SMK_OLOADLEN || count > SMK_LOADLEN) return -EINVAL; } else { if (count >= PAGE_SIZE) { @@ -592,6 +596,8 @@ static void smk_rule_show(struct seq_file *s, struct smack_rule *srp, int max) seq_putc(s, 'a'); if (srp->smk_access & MAY_TRANSMUTE) seq_putc(s, 't'); + if (srp->smk_access & MAY_LOCK) + seq_putc(s, 'l'); seq_putc(s, '\n'); } diff --git a/sound/ppc/keywest.c b/sound/ppc/keywest.c index 01aecc2b507..0d1c27e911b 100644 --- a/sound/ppc/keywest.c +++ b/sound/ppc/keywest.c @@ -65,7 +65,7 @@ static int keywest_attach_adapter(struct i2c_adapter *adapter) * already bound. If not it means binding failed, and then there * is no point in keeping the device instantiated. */ - if (!keywest_ctx->client->driver) { + if (!keywest_ctx->client->dev.driver) { i2c_unregister_device(keywest_ctx->client); keywest_ctx->client = NULL; return -ENODEV; @@ -76,7 +76,7 @@ static int keywest_attach_adapter(struct i2c_adapter *adapter) * This is safe because i2c-core holds the core_lock mutex for us. */ list_add_tail(&keywest_ctx->client->detected, - &keywest_ctx->client->driver->clients); + &to_i2c_driver(keywest_ctx->client->dev.driver)->clients); return 0; } diff --git a/sound/soc/davinci/davinci-pcm.c b/sound/soc/davinci/davinci-pcm.c index fa64cd85204..fb5d107f560 100644 --- a/sound/soc/davinci/davinci-pcm.c +++ b/sound/soc/davinci/davinci-pcm.c @@ -238,7 +238,7 @@ static void davinci_pcm_dma_irq(unsigned link, u16 ch_status, void *data) print_buf_info(prtd->ram_channel, "i ram_channel"); pr_debug("davinci_pcm: link=%d, status=0x%x\n", link, ch_status); - if (unlikely(ch_status != DMA_COMPLETE)) + if (unlikely(ch_status != EDMA_DMA_COMPLETE)) return; if (snd_pcm_running(substream)) { diff --git a/sound/soc/fsl/imx-wm8962.c b/sound/soc/fsl/imx-wm8962.c index 361f94f09b1..61e48852b9e 100644 --- a/sound/soc/fsl/imx-wm8962.c +++ b/sound/soc/fsl/imx-wm8962.c @@ -215,7 +215,7 @@ static int imx_wm8962_probe(struct platform_device *pdev) goto fail; } codec_dev = of_find_i2c_device_by_node(codec_np); - if (!codec_dev || !codec_dev->driver) { + if (!codec_dev || !codec_dev->dev.driver) { dev_err(&pdev->dev, "failed to find codec platform device\n"); ret = -EINVAL; goto fail; diff --git a/tools/power/x86/turbostat/turbostat.c b/tools/power/x86/turbostat/turbostat.c index fe702076ca4..9d77f13c2d2 100644 --- a/tools/power/x86/turbostat/turbostat.c +++ b/tools/power/x86/turbostat/turbostat.c @@ -2,7 +2,7 @@ * turbostat -- show CPU frequency and C-state residency * on modern Intel turbo-capable processors. * - * Copyright (c) 2012 Intel Corporation. + * Copyright (c) 2013 Intel Corporation. * Len Brown <len.brown@intel.com> * * This program is free software; you can redistribute it and/or modify it @@ -47,6 +47,8 @@ unsigned int skip_c1; unsigned int do_nhm_cstates; unsigned int do_snb_cstates; unsigned int do_c8_c9_c10; +unsigned int do_slm_cstates; +unsigned int use_c1_residency_msr; unsigned int has_aperf; unsigned int has_epb; unsigned int units = 1000000000; /* Ghz etc */ @@ -81,6 +83,8 @@ double rapl_joule_counter_range; #define RAPL_DRAM (1 << 3) #define RAPL_PKG_PERF_STATUS (1 << 4) #define RAPL_DRAM_PERF_STATUS (1 << 5) +#define RAPL_PKG_POWER_INFO (1 << 6) +#define RAPL_CORE_POLICY (1 << 7) #define TJMAX_DEFAULT 100 #define MAX(a, b) ((a) > (b) ? (a) : (b)) @@ -96,7 +100,7 @@ struct thread_data { unsigned long long tsc; unsigned long long aperf; unsigned long long mperf; - unsigned long long c1; /* derived */ + unsigned long long c1; unsigned long long extra_msr64; unsigned long long extra_delta64; unsigned long long extra_msr32; @@ -266,7 +270,7 @@ void print_header(void) outp += sprintf(outp, " MSR 0x%03X", extra_msr_offset64); if (do_nhm_cstates) outp += sprintf(outp, " %%c1"); - if (do_nhm_cstates) + if (do_nhm_cstates && !do_slm_cstates) outp += sprintf(outp, " %%c3"); if (do_nhm_cstates) outp += sprintf(outp, " %%c6"); @@ -280,9 +284,9 @@ void print_header(void) if (do_snb_cstates) outp += sprintf(outp, " %%pc2"); - if (do_nhm_cstates) + if (do_nhm_cstates && !do_slm_cstates) outp += sprintf(outp, " %%pc3"); - if (do_nhm_cstates) + if (do_nhm_cstates && !do_slm_cstates) outp += sprintf(outp, " %%pc6"); if (do_snb_cstates) outp += sprintf(outp, " %%pc7"); @@ -480,7 +484,7 @@ int format_counters(struct thread_data *t, struct core_data *c, if (!(t->flags & CPU_IS_FIRST_THREAD_IN_CORE)) goto done; - if (do_nhm_cstates) + if (do_nhm_cstates && !do_slm_cstates) outp += sprintf(outp, " %6.2f", 100.0 * c->c3/t->tsc); if (do_nhm_cstates) outp += sprintf(outp, " %6.2f", 100.0 * c->c6/t->tsc); @@ -499,9 +503,9 @@ int format_counters(struct thread_data *t, struct core_data *c, if (do_snb_cstates) outp += sprintf(outp, " %6.2f", 100.0 * p->pc2/t->tsc); - if (do_nhm_cstates) + if (do_nhm_cstates && !do_slm_cstates) outp += sprintf(outp, " %6.2f", 100.0 * p->pc3/t->tsc); - if (do_nhm_cstates) + if (do_nhm_cstates && !do_slm_cstates) outp += sprintf(outp, " %6.2f", 100.0 * p->pc6/t->tsc); if (do_snb_cstates) outp += sprintf(outp, " %6.2f", 100.0 * p->pc7/t->tsc); @@ -648,17 +652,24 @@ delta_thread(struct thread_data *new, struct thread_data *old, } - /* - * As counter collection is not atomic, - * it is possible for mperf's non-halted cycles + idle states - * to exceed TSC's all cycles: show c1 = 0% in that case. - */ - if ((old->mperf + core_delta->c3 + core_delta->c6 + core_delta->c7) > old->tsc) - old->c1 = 0; - else { - /* normal case, derive c1 */ - old->c1 = old->tsc - old->mperf - core_delta->c3 + if (use_c1_residency_msr) { + /* + * Some models have a dedicated C1 residency MSR, + * which should be more accurate than the derivation below. + */ + } else { + /* + * As counter collection is not atomic, + * it is possible for mperf's non-halted cycles + idle states + * to exceed TSC's all cycles: show c1 = 0% in that case. + */ + if ((old->mperf + core_delta->c3 + core_delta->c6 + core_delta->c7) > old->tsc) + old->c1 = 0; + else { + /* normal case, derive c1 */ + old->c1 = old->tsc - old->mperf - core_delta->c3 - core_delta->c6 - core_delta->c7; + } } if (old->mperf == 0) { @@ -872,13 +883,21 @@ int get_counters(struct thread_data *t, struct core_data *c, struct pkg_data *p) if (get_msr(cpu, extra_msr_offset64, &t->extra_msr64)) return -5; + if (use_c1_residency_msr) { + if (get_msr(cpu, MSR_CORE_C1_RES, &t->c1)) + return -6; + } + /* collect core counters only for 1st thread in core */ if (!(t->flags & CPU_IS_FIRST_THREAD_IN_CORE)) return 0; - if (do_nhm_cstates) { + if (do_nhm_cstates && !do_slm_cstates) { if (get_msr(cpu, MSR_CORE_C3_RESIDENCY, &c->c3)) return -6; + } + + if (do_nhm_cstates) { if (get_msr(cpu, MSR_CORE_C6_RESIDENCY, &c->c6)) return -7; } @@ -898,7 +917,7 @@ int get_counters(struct thread_data *t, struct core_data *c, struct pkg_data *p) if (!(t->flags & CPU_IS_FIRST_CORE_IN_PACKAGE)) return 0; - if (do_nhm_cstates) { + if (do_nhm_cstates && !do_slm_cstates) { if (get_msr(cpu, MSR_PKG_C3_RESIDENCY, &p->pc3)) return -9; if (get_msr(cpu, MSR_PKG_C6_RESIDENCY, &p->pc6)) @@ -977,7 +996,7 @@ void print_verbose_header(void) ratio, bclk, ratio * bclk); get_msr(0, MSR_IA32_POWER_CTL, &msr); - fprintf(stderr, "cpu0: MSR_IA32_POWER_CTL: 0x%08llx (C1E: %sabled)\n", + fprintf(stderr, "cpu0: MSR_IA32_POWER_CTL: 0x%08llx (C1E auto-promotion: %sabled)\n", msr, msr & 0x2 ? "EN" : "DIS"); if (!do_ivt_turbo_ratio_limit) @@ -1046,25 +1065,28 @@ print_nhm_turbo_ratio_limits: switch(msr & 0x7) { case 0: - fprintf(stderr, "pc0"); + fprintf(stderr, do_slm_cstates ? "no pkg states" : "pc0"); break; case 1: - fprintf(stderr, do_snb_cstates ? "pc2" : "pc0"); + fprintf(stderr, do_slm_cstates ? "no pkg states" : do_snb_cstates ? "pc2" : "pc0"); break; case 2: - fprintf(stderr, do_snb_cstates ? "pc6-noret" : "pc3"); + fprintf(stderr, do_slm_cstates ? "invalid" : do_snb_cstates ? "pc6-noret" : "pc3"); break; case 3: - fprintf(stderr, "pc6"); + fprintf(stderr, do_slm_cstates ? "invalid" : "pc6"); break; case 4: - fprintf(stderr, "pc7"); + fprintf(stderr, do_slm_cstates ? "pc4" : "pc7"); break; case 5: - fprintf(stderr, do_snb_cstates ? "pc7s" : "invalid"); + fprintf(stderr, do_slm_cstates ? "invalid" : do_snb_cstates ? "pc7s" : "invalid"); + break; + case 6: + fprintf(stderr, do_slm_cstates ? "pc6" : "invalid"); break; case 7: - fprintf(stderr, "unlimited"); + fprintf(stderr, do_slm_cstates ? "pc7" : "unlimited"); break; default: fprintf(stderr, "invalid"); @@ -1460,6 +1482,8 @@ int has_nehalem_turbo_ratio_limit(unsigned int family, unsigned int model) case 0x3F: /* HSW */ case 0x45: /* HSW */ case 0x46: /* HSW */ + case 0x37: /* BYT */ + case 0x4D: /* AVN */ return 1; case 0x2E: /* Nehalem-EX Xeon - Beckton */ case 0x2F: /* Westmere-EX Xeon - Eagleton */ @@ -1532,14 +1556,33 @@ int print_epb(struct thread_data *t, struct core_data *c, struct pkg_data *p) #define RAPL_POWER_GRANULARITY 0x7FFF /* 15 bit power granularity */ #define RAPL_TIME_GRANULARITY 0x3F /* 6 bit time granularity */ +double get_tdp(model) +{ + unsigned long long msr; + + if (do_rapl & RAPL_PKG_POWER_INFO) + if (!get_msr(0, MSR_PKG_POWER_INFO, &msr)) + return ((msr >> 0) & RAPL_POWER_GRANULARITY) * rapl_power_units; + + switch (model) { + case 0x37: + case 0x4D: + return 30.0; + default: + return 135.0; + } +} + + /* * rapl_probe() * - * sets do_rapl + * sets do_rapl, rapl_power_units, rapl_energy_units, rapl_time_units */ void rapl_probe(unsigned int family, unsigned int model) { unsigned long long msr; + unsigned int time_unit; double tdp; if (!genuine_intel) @@ -1555,11 +1598,15 @@ void rapl_probe(unsigned int family, unsigned int model) case 0x3F: /* HSW */ case 0x45: /* HSW */ case 0x46: /* HSW */ - do_rapl = RAPL_PKG | RAPL_CORES | RAPL_GFX; + do_rapl = RAPL_PKG | RAPL_CORES | RAPL_CORE_POLICY | RAPL_GFX | RAPL_PKG_POWER_INFO; break; case 0x2D: case 0x3E: - do_rapl = RAPL_PKG | RAPL_CORES | RAPL_DRAM | RAPL_PKG_PERF_STATUS | RAPL_DRAM_PERF_STATUS; + do_rapl = RAPL_PKG | RAPL_CORES | RAPL_CORE_POLICY | RAPL_DRAM | RAPL_PKG_PERF_STATUS | RAPL_DRAM_PERF_STATUS | RAPL_PKG_POWER_INFO; + break; + case 0x37: /* BYT */ + case 0x4D: /* AVN */ + do_rapl = RAPL_PKG | RAPL_CORES ; break; default: return; @@ -1570,19 +1617,22 @@ void rapl_probe(unsigned int family, unsigned int model) return; rapl_power_units = 1.0 / (1 << (msr & 0xF)); - rapl_energy_units = 1.0 / (1 << (msr >> 8 & 0x1F)); - rapl_time_units = 1.0 / (1 << (msr >> 16 & 0xF)); + if (model == 0x37) + rapl_energy_units = 1.0 * (1 << (msr >> 8 & 0x1F)) / 1000000; + else + rapl_energy_units = 1.0 / (1 << (msr >> 8 & 0x1F)); - /* get TDP to determine energy counter range */ - if (get_msr(0, MSR_PKG_POWER_INFO, &msr)) - return; + time_unit = msr >> 16 & 0xF; + if (time_unit == 0) + time_unit = 0xA; - tdp = ((msr >> 0) & RAPL_POWER_GRANULARITY) * rapl_power_units; + rapl_time_units = 1.0 / (1 << (time_unit)); - rapl_joule_counter_range = 0xFFFFFFFF * rapl_energy_units / tdp; + tdp = get_tdp(model); + rapl_joule_counter_range = 0xFFFFFFFF * rapl_energy_units / tdp; if (verbose) - fprintf(stderr, "RAPL: %.0f sec. Joule Counter Range\n", rapl_joule_counter_range); + fprintf(stderr, "RAPL: %.0f sec. Joule Counter Range, at %.0f Watts\n", rapl_joule_counter_range, tdp); return; } @@ -1668,7 +1718,6 @@ int print_rapl(struct thread_data *t, struct core_data *c, struct pkg_data *p) { unsigned long long msr; int cpu; - double local_rapl_power_units, local_rapl_energy_units, local_rapl_time_units; if (!do_rapl) return 0; @@ -1686,23 +1735,13 @@ int print_rapl(struct thread_data *t, struct core_data *c, struct pkg_data *p) if (get_msr(cpu, MSR_RAPL_POWER_UNIT, &msr)) return -1; - local_rapl_power_units = 1.0 / (1 << (msr & 0xF)); - local_rapl_energy_units = 1.0 / (1 << (msr >> 8 & 0x1F)); - local_rapl_time_units = 1.0 / (1 << (msr >> 16 & 0xF)); - - if (local_rapl_power_units != rapl_power_units) - fprintf(stderr, "cpu%d, ERROR: Power units mis-match\n", cpu); - if (local_rapl_energy_units != rapl_energy_units) - fprintf(stderr, "cpu%d, ERROR: Energy units mis-match\n", cpu); - if (local_rapl_time_units != rapl_time_units) - fprintf(stderr, "cpu%d, ERROR: Time units mis-match\n", cpu); - if (verbose) { fprintf(stderr, "cpu%d: MSR_RAPL_POWER_UNIT: 0x%08llx " "(%f Watts, %f Joules, %f sec.)\n", cpu, msr, - local_rapl_power_units, local_rapl_energy_units, local_rapl_time_units); + rapl_power_units, rapl_energy_units, rapl_time_units); } - if (do_rapl & RAPL_PKG) { + if (do_rapl & RAPL_PKG_POWER_INFO) { + if (get_msr(cpu, MSR_PKG_POWER_INFO, &msr)) return -5; @@ -1714,6 +1753,9 @@ int print_rapl(struct thread_data *t, struct core_data *c, struct pkg_data *p) ((msr >> 32) & RAPL_POWER_GRANULARITY) * rapl_power_units, ((msr >> 48) & RAPL_TIME_GRANULARITY) * rapl_time_units); + } + if (do_rapl & RAPL_PKG) { + if (get_msr(cpu, MSR_PKG_POWER_LIMIT, &msr)) return -9; @@ -1749,12 +1791,16 @@ int print_rapl(struct thread_data *t, struct core_data *c, struct pkg_data *p) print_power_limit_msr(cpu, msr, "DRAM Limit"); } - if (do_rapl & RAPL_CORES) { + if (do_rapl & RAPL_CORE_POLICY) { if (verbose) { if (get_msr(cpu, MSR_PP0_POLICY, &msr)) return -7; fprintf(stderr, "cpu%d: MSR_PP0_POLICY: %lld\n", cpu, msr & 0xF); + } + } + if (do_rapl & RAPL_CORES) { + if (verbose) { if (get_msr(cpu, MSR_PP0_POWER_LIMIT, &msr)) return -9; @@ -1813,10 +1859,48 @@ int has_c8_c9_c10(unsigned int family, unsigned int model) } +int is_slm(unsigned int family, unsigned int model) +{ + if (!genuine_intel) + return 0; + switch (model) { + case 0x37: /* BYT */ + case 0x4D: /* AVN */ + return 1; + } + return 0; +} + +#define SLM_BCLK_FREQS 5 +double slm_freq_table[SLM_BCLK_FREQS] = { 83.3, 100.0, 133.3, 116.7, 80.0}; + +double slm_bclk(void) +{ + unsigned long long msr = 3; + unsigned int i; + double freq; + + if (get_msr(0, MSR_FSB_FREQ, &msr)) + fprintf(stderr, "SLM BCLK: unknown\n"); + + i = msr & 0xf; + if (i >= SLM_BCLK_FREQS) { + fprintf(stderr, "SLM BCLK[%d] invalid\n", i); + msr = 3; + } + freq = slm_freq_table[i]; + + fprintf(stderr, "SLM BCLK: %.1f Mhz\n", freq); + + return freq; +} + double discover_bclk(unsigned int family, unsigned int model) { if (is_snb(family, model)) return 100.00; + else if (is_slm(family, model)) + return slm_bclk(); else return 133.33; } @@ -1873,7 +1957,7 @@ int set_temperature_target(struct thread_data *t, struct core_data *c, struct pk fprintf(stderr, "cpu%d: MSR_IA32_TEMPERATURE_TARGET: 0x%08llx (%d C)\n", cpu, msr, target_c_local); - if (target_c_local < 85 || target_c_local > 120) + if (target_c_local < 85 || target_c_local > 127) goto guess; tcc_activation_temp = target_c_local; @@ -1970,6 +2054,7 @@ void check_cpuid() do_smi = do_nhm_cstates; do_snb_cstates = is_snb(family, model); do_c8_c9_c10 = has_c8_c9_c10(family, model); + do_slm_cstates = is_slm(family, model); bclk = discover_bclk(family, model); do_nehalem_turbo_ratio_limit = has_nehalem_turbo_ratio_limit(family, model); @@ -2331,7 +2416,7 @@ int main(int argc, char **argv) cmdline(argc, argv); if (verbose) - fprintf(stderr, "turbostat v3.4 April 17, 2013" + fprintf(stderr, "turbostat v3.5 April 26, 2013" " - Len Brown <lenb@kernel.org>\n"); turbostat_init(); |